partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
valid
|
expand_dims
|
Insert a new axis, corresponding to a given position in the array shape
Args:
a (array_like): Input array.
axis (int): Position (amongst axes) where new axis is to be inserted.
|
distob/arrays.py
|
def expand_dims(a, axis):
"""Insert a new axis, corresponding to a given position in the array shape
Args:
a (array_like): Input array.
axis (int): Position (amongst axes) where new axis is to be inserted.
"""
if hasattr(a, 'expand_dims') and hasattr(type(a), '__array_interface__'):
return a.expand_dims(axis)
else:
return np.expand_dims(a, axis)
|
def expand_dims(a, axis):
"""Insert a new axis, corresponding to a given position in the array shape
Args:
a (array_like): Input array.
axis (int): Position (amongst axes) where new axis is to be inserted.
"""
if hasattr(a, 'expand_dims') and hasattr(type(a), '__array_interface__'):
return a.expand_dims(axis)
else:
return np.expand_dims(a, axis)
|
[
"Insert",
"a",
"new",
"axis",
"corresponding",
"to",
"a",
"given",
"position",
"in",
"the",
"array",
"shape"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1508-L1518
|
[
"def",
"expand_dims",
"(",
"a",
",",
"axis",
")",
":",
"if",
"hasattr",
"(",
"a",
",",
"'expand_dims'",
")",
"and",
"hasattr",
"(",
"type",
"(",
"a",
")",
",",
"'__array_interface__'",
")",
":",
"return",
"a",
".",
"expand_dims",
"(",
"axis",
")",
"else",
":",
"return",
"np",
".",
"expand_dims",
"(",
"a",
",",
"axis",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
concatenate
|
Join a sequence of arrays together.
Will aim to join `ndarray`, `RemoteArray`, and `DistArray` without moving
their data, if they happen to be on different engines.
Args:
tup (sequence of array_like): Arrays to be concatenated. They must have
the same shape, except in the dimension corresponding to `axis`.
axis (int, optional): The axis along which the arrays will be joined.
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
|
distob/arrays.py
|
def concatenate(tup, axis=0):
"""Join a sequence of arrays together.
Will aim to join `ndarray`, `RemoteArray`, and `DistArray` without moving
their data, if they happen to be on different engines.
Args:
tup (sequence of array_like): Arrays to be concatenated. They must have
the same shape, except in the dimension corresponding to `axis`.
axis (int, optional): The axis along which the arrays will be joined.
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
"""
from distob import engine
if len(tup) is 0:
raise ValueError('need at least one array to concatenate')
first = tup[0]
others = tup[1:]
# allow subclasses to provide their own implementations of concatenate:
if (hasattr(first, 'concatenate') and
hasattr(type(first), '__array_interface__')):
return first.concatenate(others, axis)
# convert all arguments to arrays/RemoteArrays if they are not already:
arrays = []
for ar in tup:
if isinstance(ar, DistArray):
if axis == ar._distaxis:
arrays.extend(ar._subarrays)
else:
# Since not yet implemented arrays distributed on more than
# one axis, will fetch and re-scatter on the new axis:
arrays.append(gather(ar))
elif isinstance(ar, RemoteArray):
arrays.append(ar)
elif isinstance(ar, Remote):
arrays.append(_remote_to_array(ar))
elif hasattr(type(ar), '__array_interface__'):
# then treat as a local ndarray
arrays.append(ar)
else:
arrays.append(np.array(ar))
if all(isinstance(ar, np.ndarray) for ar in arrays):
return np.concatenate(arrays, axis)
total_length = 0
# validate dimensions are same, except for axis of concatenation:
commonshape = list(arrays[0].shape)
commonshape[axis] = None # ignore this axis for shape comparison
for ar in arrays:
total_length += ar.shape[axis]
shp = list(ar.shape)
shp[axis] = None
if shp != commonshape:
raise ValueError('incompatible shapes for concatenation')
# set sensible target block size if splitting subarrays further:
blocksize = ((total_length - 1) // engine.nengines) + 1
rarrays = []
for ar in arrays:
if isinstance(ar, DistArray):
rarrays.extend(ar._subarrays)
elif isinstance(ar, RemoteArray):
rarrays.append(ar)
else:
da = _scatter_ndarray(ar, axis, blocksize)
for ra in da._subarrays:
rarrays.append(ra)
del da
del arrays
# At this point rarrays is a list of RemoteArray to be concatenated
eid = rarrays[0]._id.engine
if all(ra._id.engine == eid for ra in rarrays):
# Arrays to be joined are all on the same engine
if eid == engine.eid:
# Arrays are all local
return concatenate([gather(r) for r in rarrays], axis)
else:
return call(concatenate, rarrays, axis)
else:
# Arrays to be joined are on different engines.
# TODO: consolidate any consecutive arrays already on same engine
return DistArray(rarrays, axis)
|
def concatenate(tup, axis=0):
"""Join a sequence of arrays together.
Will aim to join `ndarray`, `RemoteArray`, and `DistArray` without moving
their data, if they happen to be on different engines.
Args:
tup (sequence of array_like): Arrays to be concatenated. They must have
the same shape, except in the dimension corresponding to `axis`.
axis (int, optional): The axis along which the arrays will be joined.
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
"""
from distob import engine
if len(tup) is 0:
raise ValueError('need at least one array to concatenate')
first = tup[0]
others = tup[1:]
# allow subclasses to provide their own implementations of concatenate:
if (hasattr(first, 'concatenate') and
hasattr(type(first), '__array_interface__')):
return first.concatenate(others, axis)
# convert all arguments to arrays/RemoteArrays if they are not already:
arrays = []
for ar in tup:
if isinstance(ar, DistArray):
if axis == ar._distaxis:
arrays.extend(ar._subarrays)
else:
# Since not yet implemented arrays distributed on more than
# one axis, will fetch and re-scatter on the new axis:
arrays.append(gather(ar))
elif isinstance(ar, RemoteArray):
arrays.append(ar)
elif isinstance(ar, Remote):
arrays.append(_remote_to_array(ar))
elif hasattr(type(ar), '__array_interface__'):
# then treat as a local ndarray
arrays.append(ar)
else:
arrays.append(np.array(ar))
if all(isinstance(ar, np.ndarray) for ar in arrays):
return np.concatenate(arrays, axis)
total_length = 0
# validate dimensions are same, except for axis of concatenation:
commonshape = list(arrays[0].shape)
commonshape[axis] = None # ignore this axis for shape comparison
for ar in arrays:
total_length += ar.shape[axis]
shp = list(ar.shape)
shp[axis] = None
if shp != commonshape:
raise ValueError('incompatible shapes for concatenation')
# set sensible target block size if splitting subarrays further:
blocksize = ((total_length - 1) // engine.nengines) + 1
rarrays = []
for ar in arrays:
if isinstance(ar, DistArray):
rarrays.extend(ar._subarrays)
elif isinstance(ar, RemoteArray):
rarrays.append(ar)
else:
da = _scatter_ndarray(ar, axis, blocksize)
for ra in da._subarrays:
rarrays.append(ra)
del da
del arrays
# At this point rarrays is a list of RemoteArray to be concatenated
eid = rarrays[0]._id.engine
if all(ra._id.engine == eid for ra in rarrays):
# Arrays to be joined are all on the same engine
if eid == engine.eid:
# Arrays are all local
return concatenate([gather(r) for r in rarrays], axis)
else:
return call(concatenate, rarrays, axis)
else:
# Arrays to be joined are on different engines.
# TODO: consolidate any consecutive arrays already on same engine
return DistArray(rarrays, axis)
|
[
"Join",
"a",
"sequence",
"of",
"arrays",
"together",
".",
"Will",
"aim",
"to",
"join",
"ndarray",
"RemoteArray",
"and",
"DistArray",
"without",
"moving",
"their",
"data",
"if",
"they",
"happen",
"to",
"be",
"on",
"different",
"engines",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1529-L1610
|
[
"def",
"concatenate",
"(",
"tup",
",",
"axis",
"=",
"0",
")",
":",
"from",
"distob",
"import",
"engine",
"if",
"len",
"(",
"tup",
")",
"is",
"0",
":",
"raise",
"ValueError",
"(",
"'need at least one array to concatenate'",
")",
"first",
"=",
"tup",
"[",
"0",
"]",
"others",
"=",
"tup",
"[",
"1",
":",
"]",
"# allow subclasses to provide their own implementations of concatenate:",
"if",
"(",
"hasattr",
"(",
"first",
",",
"'concatenate'",
")",
"and",
"hasattr",
"(",
"type",
"(",
"first",
")",
",",
"'__array_interface__'",
")",
")",
":",
"return",
"first",
".",
"concatenate",
"(",
"others",
",",
"axis",
")",
"# convert all arguments to arrays/RemoteArrays if they are not already:",
"arrays",
"=",
"[",
"]",
"for",
"ar",
"in",
"tup",
":",
"if",
"isinstance",
"(",
"ar",
",",
"DistArray",
")",
":",
"if",
"axis",
"==",
"ar",
".",
"_distaxis",
":",
"arrays",
".",
"extend",
"(",
"ar",
".",
"_subarrays",
")",
"else",
":",
"# Since not yet implemented arrays distributed on more than",
"# one axis, will fetch and re-scatter on the new axis:",
"arrays",
".",
"append",
"(",
"gather",
"(",
"ar",
")",
")",
"elif",
"isinstance",
"(",
"ar",
",",
"RemoteArray",
")",
":",
"arrays",
".",
"append",
"(",
"ar",
")",
"elif",
"isinstance",
"(",
"ar",
",",
"Remote",
")",
":",
"arrays",
".",
"append",
"(",
"_remote_to_array",
"(",
"ar",
")",
")",
"elif",
"hasattr",
"(",
"type",
"(",
"ar",
")",
",",
"'__array_interface__'",
")",
":",
"# then treat as a local ndarray",
"arrays",
".",
"append",
"(",
"ar",
")",
"else",
":",
"arrays",
".",
"append",
"(",
"np",
".",
"array",
"(",
"ar",
")",
")",
"if",
"all",
"(",
"isinstance",
"(",
"ar",
",",
"np",
".",
"ndarray",
")",
"for",
"ar",
"in",
"arrays",
")",
":",
"return",
"np",
".",
"concatenate",
"(",
"arrays",
",",
"axis",
")",
"total_length",
"=",
"0",
"# validate dimensions are same, except for axis of concatenation:",
"commonshape",
"=",
"list",
"(",
"arrays",
"[",
"0",
"]",
".",
"shape",
")",
"commonshape",
"[",
"axis",
"]",
"=",
"None",
"# ignore this axis for shape comparison",
"for",
"ar",
"in",
"arrays",
":",
"total_length",
"+=",
"ar",
".",
"shape",
"[",
"axis",
"]",
"shp",
"=",
"list",
"(",
"ar",
".",
"shape",
")",
"shp",
"[",
"axis",
"]",
"=",
"None",
"if",
"shp",
"!=",
"commonshape",
":",
"raise",
"ValueError",
"(",
"'incompatible shapes for concatenation'",
")",
"# set sensible target block size if splitting subarrays further:",
"blocksize",
"=",
"(",
"(",
"total_length",
"-",
"1",
")",
"//",
"engine",
".",
"nengines",
")",
"+",
"1",
"rarrays",
"=",
"[",
"]",
"for",
"ar",
"in",
"arrays",
":",
"if",
"isinstance",
"(",
"ar",
",",
"DistArray",
")",
":",
"rarrays",
".",
"extend",
"(",
"ar",
".",
"_subarrays",
")",
"elif",
"isinstance",
"(",
"ar",
",",
"RemoteArray",
")",
":",
"rarrays",
".",
"append",
"(",
"ar",
")",
"else",
":",
"da",
"=",
"_scatter_ndarray",
"(",
"ar",
",",
"axis",
",",
"blocksize",
")",
"for",
"ra",
"in",
"da",
".",
"_subarrays",
":",
"rarrays",
".",
"append",
"(",
"ra",
")",
"del",
"da",
"del",
"arrays",
"# At this point rarrays is a list of RemoteArray to be concatenated",
"eid",
"=",
"rarrays",
"[",
"0",
"]",
".",
"_id",
".",
"engine",
"if",
"all",
"(",
"ra",
".",
"_id",
".",
"engine",
"==",
"eid",
"for",
"ra",
"in",
"rarrays",
")",
":",
"# Arrays to be joined are all on the same engine",
"if",
"eid",
"==",
"engine",
".",
"eid",
":",
"# Arrays are all local",
"return",
"concatenate",
"(",
"[",
"gather",
"(",
"r",
")",
"for",
"r",
"in",
"rarrays",
"]",
",",
"axis",
")",
"else",
":",
"return",
"call",
"(",
"concatenate",
",",
"rarrays",
",",
"axis",
")",
"else",
":",
"# Arrays to be joined are on different engines.",
"# TODO: consolidate any consecutive arrays already on same engine",
"return",
"DistArray",
"(",
"rarrays",
",",
"axis",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
vstack
|
Stack arrays in sequence vertically (row wise),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
|
distob/arrays.py
|
def vstack(tup):
"""Stack arrays in sequence vertically (row wise),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
"""
# Follow numpy.vstack behavior for 1D arrays:
arrays = list(tup)
for i in range(len(arrays)):
if arrays[i].ndim is 1:
arrays[i] = arrays[i][np.newaxis, :]
return concatenate(tup, axis=0)
|
def vstack(tup):
"""Stack arrays in sequence vertically (row wise),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
"""
# Follow numpy.vstack behavior for 1D arrays:
arrays = list(tup)
for i in range(len(arrays)):
if arrays[i].ndim is 1:
arrays[i] = arrays[i][np.newaxis, :]
return concatenate(tup, axis=0)
|
[
"Stack",
"arrays",
"in",
"sequence",
"vertically",
"(",
"row",
"wise",
")",
"handling",
"RemoteArray",
"and",
"DistArray",
"without",
"moving",
"data",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1613-L1630
|
[
"def",
"vstack",
"(",
"tup",
")",
":",
"# Follow numpy.vstack behavior for 1D arrays:",
"arrays",
"=",
"list",
"(",
"tup",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"arrays",
")",
")",
":",
"if",
"arrays",
"[",
"i",
"]",
".",
"ndim",
"is",
"1",
":",
"arrays",
"[",
"i",
"]",
"=",
"arrays",
"[",
"i",
"]",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"return",
"concatenate",
"(",
"tup",
",",
"axis",
"=",
"0",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
hstack
|
Stack arrays in sequence horizontally (column wise),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
|
distob/arrays.py
|
def hstack(tup):
"""Stack arrays in sequence horizontally (column wise),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
"""
# Follow numpy.hstack behavior for 1D arrays:
if all(ar.ndim is 1 for ar in tup):
return concatenate(tup, axis=0)
else:
return concatenate(tup, axis=1)
|
def hstack(tup):
"""Stack arrays in sequence horizontally (column wise),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
"""
# Follow numpy.hstack behavior for 1D arrays:
if all(ar.ndim is 1 for ar in tup):
return concatenate(tup, axis=0)
else:
return concatenate(tup, axis=1)
|
[
"Stack",
"arrays",
"in",
"sequence",
"horizontally",
"(",
"column",
"wise",
")",
"handling",
"RemoteArray",
"and",
"DistArray",
"without",
"moving",
"data",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1633-L1649
|
[
"def",
"hstack",
"(",
"tup",
")",
":",
"# Follow numpy.hstack behavior for 1D arrays:",
"if",
"all",
"(",
"ar",
".",
"ndim",
"is",
"1",
"for",
"ar",
"in",
"tup",
")",
":",
"return",
"concatenate",
"(",
"tup",
",",
"axis",
"=",
"0",
")",
"else",
":",
"return",
"concatenate",
"(",
"tup",
",",
"axis",
"=",
"1",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
dstack
|
Stack arrays in sequence depth wise (along third dimension),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
|
distob/arrays.py
|
def dstack(tup):
"""Stack arrays in sequence depth wise (along third dimension),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
"""
# Follow numpy.dstack behavior for 1D and 2D arrays:
arrays = list(tup)
for i in range(len(arrays)):
if arrays[i].ndim is 1:
arrays[i] = arrays[i][np.newaxis, :]
if arrays[i].ndim is 2:
arrays[i] = arrays[i][:, :, np.newaxis]
return concatenate(arrays, axis=2)
|
def dstack(tup):
"""Stack arrays in sequence depth wise (along third dimension),
handling ``RemoteArray`` and ``DistArray`` without moving data.
Args:
tup (sequence of array_like)
Returns:
res: `ndarray`, if inputs were all local
`RemoteArray`, if inputs were all on the same remote engine
`DistArray`, if inputs were already scattered on different engines
"""
# Follow numpy.dstack behavior for 1D and 2D arrays:
arrays = list(tup)
for i in range(len(arrays)):
if arrays[i].ndim is 1:
arrays[i] = arrays[i][np.newaxis, :]
if arrays[i].ndim is 2:
arrays[i] = arrays[i][:, :, np.newaxis]
return concatenate(arrays, axis=2)
|
[
"Stack",
"arrays",
"in",
"sequence",
"depth",
"wise",
"(",
"along",
"third",
"dimension",
")",
"handling",
"RemoteArray",
"and",
"DistArray",
"without",
"moving",
"data",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1652-L1671
|
[
"def",
"dstack",
"(",
"tup",
")",
":",
"# Follow numpy.dstack behavior for 1D and 2D arrays:",
"arrays",
"=",
"list",
"(",
"tup",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"arrays",
")",
")",
":",
"if",
"arrays",
"[",
"i",
"]",
".",
"ndim",
"is",
"1",
":",
"arrays",
"[",
"i",
"]",
"=",
"arrays",
"[",
"i",
"]",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"if",
"arrays",
"[",
"i",
"]",
".",
"ndim",
"is",
"2",
":",
"arrays",
"[",
"i",
"]",
"=",
"arrays",
"[",
"i",
"]",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"return",
"concatenate",
"(",
"arrays",
",",
"axis",
"=",
"2",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
_broadcast_shape
|
Return the shape that would result from broadcasting the inputs
|
distob/arrays.py
|
def _broadcast_shape(*args):
"""Return the shape that would result from broadcasting the inputs"""
#TODO: currently incorrect result if a Sequence is provided as an input
shapes = [a.shape if hasattr(type(a), '__array_interface__')
else () for a in args]
ndim = max(len(sh) for sh in shapes) # new common ndim after broadcasting
for i, sh in enumerate(shapes):
if len(sh) < ndim:
shapes[i] = (1,)*(ndim - len(sh)) + sh
return tuple(max(sh[ax] for sh in shapes) for ax in range(ndim))
|
def _broadcast_shape(*args):
"""Return the shape that would result from broadcasting the inputs"""
#TODO: currently incorrect result if a Sequence is provided as an input
shapes = [a.shape if hasattr(type(a), '__array_interface__')
else () for a in args]
ndim = max(len(sh) for sh in shapes) # new common ndim after broadcasting
for i, sh in enumerate(shapes):
if len(sh) < ndim:
shapes[i] = (1,)*(ndim - len(sh)) + sh
return tuple(max(sh[ax] for sh in shapes) for ax in range(ndim))
|
[
"Return",
"the",
"shape",
"that",
"would",
"result",
"from",
"broadcasting",
"the",
"inputs"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1708-L1717
|
[
"def",
"_broadcast_shape",
"(",
"*",
"args",
")",
":",
"#TODO: currently incorrect result if a Sequence is provided as an input",
"shapes",
"=",
"[",
"a",
".",
"shape",
"if",
"hasattr",
"(",
"type",
"(",
"a",
")",
",",
"'__array_interface__'",
")",
"else",
"(",
")",
"for",
"a",
"in",
"args",
"]",
"ndim",
"=",
"max",
"(",
"len",
"(",
"sh",
")",
"for",
"sh",
"in",
"shapes",
")",
"# new common ndim after broadcasting",
"for",
"i",
",",
"sh",
"in",
"enumerate",
"(",
"shapes",
")",
":",
"if",
"len",
"(",
"sh",
")",
"<",
"ndim",
":",
"shapes",
"[",
"i",
"]",
"=",
"(",
"1",
",",
")",
"*",
"(",
"ndim",
"-",
"len",
"(",
"sh",
")",
")",
"+",
"sh",
"return",
"tuple",
"(",
"max",
"(",
"sh",
"[",
"ax",
"]",
"for",
"sh",
"in",
"shapes",
")",
"for",
"ax",
"in",
"range",
"(",
"ndim",
")",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
mean
|
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
m : ndarray, see dtype parameter above
Notes
-----
np.mean fails to pass the keepdims parameter to ndarray subclasses.
That is the main reason we implement this function.
|
distob/arrays.py
|
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
m : ndarray, see dtype parameter above
Notes
-----
np.mean fails to pass the keepdims parameter to ndarray subclasses.
That is the main reason we implement this function.
"""
if (isinstance(a, np.ndarray) or
isinstance(a, RemoteArray) or
isinstance(a, DistArray)):
return a.mean(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
else:
return np.mean(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
|
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
m : ndarray, see dtype parameter above
Notes
-----
np.mean fails to pass the keepdims parameter to ndarray subclasses.
That is the main reason we implement this function.
"""
if (isinstance(a, np.ndarray) or
isinstance(a, RemoteArray) or
isinstance(a, DistArray)):
return a.mean(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
else:
return np.mean(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
|
[
"Compute",
"the",
"arithmetic",
"mean",
"along",
"the",
"specified",
"axis",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1756-L1802
|
[
"def",
"mean",
"(",
"a",
",",
"axis",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"out",
"=",
"None",
",",
"keepdims",
"=",
"False",
")",
":",
"if",
"(",
"isinstance",
"(",
"a",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"a",
",",
"RemoteArray",
")",
"or",
"isinstance",
"(",
"a",
",",
"DistArray",
")",
")",
":",
"return",
"a",
".",
"mean",
"(",
"axis",
"=",
"axis",
",",
"dtype",
"=",
"dtype",
",",
"out",
"=",
"out",
",",
"keepdims",
"=",
"keepdims",
")",
"else",
":",
"return",
"np",
".",
"mean",
"(",
"a",
",",
"axis",
"=",
"axis",
",",
"dtype",
"=",
"dtype",
",",
"out",
"=",
"out",
",",
"keepdims",
"=",
"keepdims",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
DistArray._fetch
|
forces update of a local cached copy of the real object
(regardless of the preference setting self.cache)
|
distob/arrays.py
|
def _fetch(self):
"""forces update of a local cached copy of the real object
(regardless of the preference setting self.cache)"""
if not self._obcache_current:
from distob import engine
ax = self._distaxis
self._obcache = concatenate([ra._ob for ra in self._subarrays], ax)
# let subarray obcaches and main obcache be views on same memory:
for i in range(self._n):
ix = [slice(None)] * self.ndim
ix[ax] = slice(self._si[i], self._si[i+1])
self._subarrays[i]._obcache = self._obcache[tuple(ix)]
self._obcache_current = True
# now prefer local processing:
self.__engine_affinity__ = (
engine.eid, self.__engine_affinity__[1])
|
def _fetch(self):
"""forces update of a local cached copy of the real object
(regardless of the preference setting self.cache)"""
if not self._obcache_current:
from distob import engine
ax = self._distaxis
self._obcache = concatenate([ra._ob for ra in self._subarrays], ax)
# let subarray obcaches and main obcache be views on same memory:
for i in range(self._n):
ix = [slice(None)] * self.ndim
ix[ax] = slice(self._si[i], self._si[i+1])
self._subarrays[i]._obcache = self._obcache[tuple(ix)]
self._obcache_current = True
# now prefer local processing:
self.__engine_affinity__ = (
engine.eid, self.__engine_affinity__[1])
|
[
"forces",
"update",
"of",
"a",
"local",
"cached",
"copy",
"of",
"the",
"real",
"object",
"(",
"regardless",
"of",
"the",
"preference",
"setting",
"self",
".",
"cache",
")"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L424-L439
|
[
"def",
"_fetch",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_obcache_current",
":",
"from",
"distob",
"import",
"engine",
"ax",
"=",
"self",
".",
"_distaxis",
"self",
".",
"_obcache",
"=",
"concatenate",
"(",
"[",
"ra",
".",
"_ob",
"for",
"ra",
"in",
"self",
".",
"_subarrays",
"]",
",",
"ax",
")",
"# let subarray obcaches and main obcache be views on same memory:",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_n",
")",
":",
"ix",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"self",
".",
"ndim",
"ix",
"[",
"ax",
"]",
"=",
"slice",
"(",
"self",
".",
"_si",
"[",
"i",
"]",
",",
"self",
".",
"_si",
"[",
"i",
"+",
"1",
"]",
")",
"self",
".",
"_subarrays",
"[",
"i",
"]",
".",
"_obcache",
"=",
"self",
".",
"_obcache",
"[",
"tuple",
"(",
"ix",
")",
"]",
"self",
".",
"_obcache_current",
"=",
"True",
"# now prefer local processing:",
"self",
".",
"__engine_affinity__",
"=",
"(",
"engine",
".",
"eid",
",",
"self",
".",
"__engine_affinity__",
"[",
"1",
"]",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
DistArray._tosubslices
|
Maps a slice object for whole array to slice objects for subarrays.
Returns pair (ss, ms) where ss is a list of subarrays and ms is a list
giving the slice object that should be applied to each subarray.
|
distob/arrays.py
|
def _tosubslices(self, sl):
"""Maps a slice object for whole array to slice objects for subarrays.
Returns pair (ss, ms) where ss is a list of subarrays and ms is a list
giving the slice object that should be applied to each subarray.
"""
N = self.shape[self._distaxis]
start, stop, step = sl.start, sl.stop, sl.step
if step is None:
step = 1
ss = []
ms = []
if step > 0:
if start is None:
start = 0
if stop is None:
stop = N
subs = range(0, self._n)
for s in subs:
low = self._si[s]
high = self._si[s + 1]
first = low + ((low - start) % step)
last = high + ((high - start) % step)
if start < high and stop > low and first < high:
ss.append(s)
substart = max(first, start) - low
substop = min(last, stop) - low
ms.append(slice(substart, substop, step))
elif step < 0:
if start is None:
start = N - 1
if stop is None:
stop = -1
subs = range(self._n - 1, -1, -1)
for s in subs:
low = self._si[s]
high = self._si[s + 1]
first = high + step + ((high - start) % step)
last = low + step + ((low - start) % step)
if start >= low and stop < high and first >= low:
ss.append(s)
substart = min(first, start) - low
substop = max(last + step, stop) - low
if substop < 0:
substop = None
ms.append(slice(substart, substop, step))
else:
raise ValueError('slice step cannot be zero')
return ss, ms
|
def _tosubslices(self, sl):
"""Maps a slice object for whole array to slice objects for subarrays.
Returns pair (ss, ms) where ss is a list of subarrays and ms is a list
giving the slice object that should be applied to each subarray.
"""
N = self.shape[self._distaxis]
start, stop, step = sl.start, sl.stop, sl.step
if step is None:
step = 1
ss = []
ms = []
if step > 0:
if start is None:
start = 0
if stop is None:
stop = N
subs = range(0, self._n)
for s in subs:
low = self._si[s]
high = self._si[s + 1]
first = low + ((low - start) % step)
last = high + ((high - start) % step)
if start < high and stop > low and first < high:
ss.append(s)
substart = max(first, start) - low
substop = min(last, stop) - low
ms.append(slice(substart, substop, step))
elif step < 0:
if start is None:
start = N - 1
if stop is None:
stop = -1
subs = range(self._n - 1, -1, -1)
for s in subs:
low = self._si[s]
high = self._si[s + 1]
first = high + step + ((high - start) % step)
last = low + step + ((low - start) % step)
if start >= low and stop < high and first >= low:
ss.append(s)
substart = min(first, start) - low
substop = max(last + step, stop) - low
if substop < 0:
substop = None
ms.append(slice(substart, substop, step))
else:
raise ValueError('slice step cannot be zero')
return ss, ms
|
[
"Maps",
"a",
"slice",
"object",
"for",
"whole",
"array",
"to",
"slice",
"objects",
"for",
"subarrays",
".",
"Returns",
"pair",
"(",
"ss",
"ms",
")",
"where",
"ss",
"is",
"a",
"list",
"of",
"subarrays",
"and",
"ms",
"is",
"a",
"list",
"giving",
"the",
"slice",
"object",
"that",
"should",
"be",
"applied",
"to",
"each",
"subarray",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L673-L720
|
[
"def",
"_tosubslices",
"(",
"self",
",",
"sl",
")",
":",
"N",
"=",
"self",
".",
"shape",
"[",
"self",
".",
"_distaxis",
"]",
"start",
",",
"stop",
",",
"step",
"=",
"sl",
".",
"start",
",",
"sl",
".",
"stop",
",",
"sl",
".",
"step",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"1",
"ss",
"=",
"[",
"]",
"ms",
"=",
"[",
"]",
"if",
"step",
">",
"0",
":",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"0",
"if",
"stop",
"is",
"None",
":",
"stop",
"=",
"N",
"subs",
"=",
"range",
"(",
"0",
",",
"self",
".",
"_n",
")",
"for",
"s",
"in",
"subs",
":",
"low",
"=",
"self",
".",
"_si",
"[",
"s",
"]",
"high",
"=",
"self",
".",
"_si",
"[",
"s",
"+",
"1",
"]",
"first",
"=",
"low",
"+",
"(",
"(",
"low",
"-",
"start",
")",
"%",
"step",
")",
"last",
"=",
"high",
"+",
"(",
"(",
"high",
"-",
"start",
")",
"%",
"step",
")",
"if",
"start",
"<",
"high",
"and",
"stop",
">",
"low",
"and",
"first",
"<",
"high",
":",
"ss",
".",
"append",
"(",
"s",
")",
"substart",
"=",
"max",
"(",
"first",
",",
"start",
")",
"-",
"low",
"substop",
"=",
"min",
"(",
"last",
",",
"stop",
")",
"-",
"low",
"ms",
".",
"append",
"(",
"slice",
"(",
"substart",
",",
"substop",
",",
"step",
")",
")",
"elif",
"step",
"<",
"0",
":",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"N",
"-",
"1",
"if",
"stop",
"is",
"None",
":",
"stop",
"=",
"-",
"1",
"subs",
"=",
"range",
"(",
"self",
".",
"_n",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"for",
"s",
"in",
"subs",
":",
"low",
"=",
"self",
".",
"_si",
"[",
"s",
"]",
"high",
"=",
"self",
".",
"_si",
"[",
"s",
"+",
"1",
"]",
"first",
"=",
"high",
"+",
"step",
"+",
"(",
"(",
"high",
"-",
"start",
")",
"%",
"step",
")",
"last",
"=",
"low",
"+",
"step",
"+",
"(",
"(",
"low",
"-",
"start",
")",
"%",
"step",
")",
"if",
"start",
">=",
"low",
"and",
"stop",
"<",
"high",
"and",
"first",
">=",
"low",
":",
"ss",
".",
"append",
"(",
"s",
")",
"substart",
"=",
"min",
"(",
"first",
",",
"start",
")",
"-",
"low",
"substop",
"=",
"max",
"(",
"last",
"+",
"step",
",",
"stop",
")",
"-",
"low",
"if",
"substop",
"<",
"0",
":",
"substop",
"=",
"None",
"ms",
".",
"append",
"(",
"slice",
"(",
"substart",
",",
"substop",
",",
"step",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'slice step cannot be zero'",
")",
"return",
"ss",
",",
"ms"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
DistArray._valid_distaxis
|
`ax` is a valid candidate for a distributed axis if the given
subarray shapes are all the same when ignoring axis `ax`
|
distob/arrays.py
|
def _valid_distaxis(shapes, ax):
"""`ax` is a valid candidate for a distributed axis if the given
subarray shapes are all the same when ignoring axis `ax`"""
compare_shapes = np.vstack(shapes)
if ax < compare_shapes.shape[1]:
compare_shapes[:, ax] = -1
return np.count_nonzero(compare_shapes - compare_shapes[0]) == 0
|
def _valid_distaxis(shapes, ax):
"""`ax` is a valid candidate for a distributed axis if the given
subarray shapes are all the same when ignoring axis `ax`"""
compare_shapes = np.vstack(shapes)
if ax < compare_shapes.shape[1]:
compare_shapes[:, ax] = -1
return np.count_nonzero(compare_shapes - compare_shapes[0]) == 0
|
[
"ax",
"is",
"a",
"valid",
"candidate",
"for",
"a",
"distributed",
"axis",
"if",
"the",
"given",
"subarray",
"shapes",
"are",
"all",
"the",
"same",
"when",
"ignoring",
"axis",
"ax"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L917-L923
|
[
"def",
"_valid_distaxis",
"(",
"shapes",
",",
"ax",
")",
":",
"compare_shapes",
"=",
"np",
".",
"vstack",
"(",
"shapes",
")",
"if",
"ax",
"<",
"compare_shapes",
".",
"shape",
"[",
"1",
"]",
":",
"compare_shapes",
"[",
":",
",",
"ax",
"]",
"=",
"-",
"1",
"return",
"np",
".",
"count_nonzero",
"(",
"compare_shapes",
"-",
"compare_shapes",
"[",
"0",
"]",
")",
"==",
"0"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
DistArray.expand_dims
|
Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
|
distob/arrays.py
|
def expand_dims(self, axis):
"""Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
"""
if axis == -1:
axis = self.ndim
if axis <= self._distaxis:
subaxis = axis
new_distaxis = self._distaxis + 1
else:
subaxis = axis - 1
new_distaxis = self._distaxis
new_subarrays = [expand_dims(ra, subaxis) for ra in self._subarrays]
return DistArray(new_subarrays, new_distaxis)
|
def expand_dims(self, axis):
"""Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
"""
if axis == -1:
axis = self.ndim
if axis <= self._distaxis:
subaxis = axis
new_distaxis = self._distaxis + 1
else:
subaxis = axis - 1
new_distaxis = self._distaxis
new_subarrays = [expand_dims(ra, subaxis) for ra in self._subarrays]
return DistArray(new_subarrays, new_distaxis)
|
[
"Insert",
"a",
"new",
"axis",
"at",
"a",
"given",
"position",
"in",
"the",
"array",
"shape",
"Args",
":",
"axis",
"(",
"int",
")",
":",
"Position",
"(",
"amongst",
"axes",
")",
"where",
"new",
"axis",
"is",
"to",
"be",
"inserted",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1014-L1028
|
[
"def",
"expand_dims",
"(",
"self",
",",
"axis",
")",
":",
"if",
"axis",
"==",
"-",
"1",
":",
"axis",
"=",
"self",
".",
"ndim",
"if",
"axis",
"<=",
"self",
".",
"_distaxis",
":",
"subaxis",
"=",
"axis",
"new_distaxis",
"=",
"self",
".",
"_distaxis",
"+",
"1",
"else",
":",
"subaxis",
"=",
"axis",
"-",
"1",
"new_distaxis",
"=",
"self",
".",
"_distaxis",
"new_subarrays",
"=",
"[",
"expand_dims",
"(",
"ra",
",",
"subaxis",
")",
"for",
"ra",
"in",
"self",
".",
"_subarrays",
"]",
"return",
"DistArray",
"(",
"new_subarrays",
",",
"new_distaxis",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
DistArray.mean
|
Compute the arithmetic mean along the specified axis.
See np.mean() for details.
|
distob/arrays.py
|
def mean(self, axis=None, dtype=None, out=None, keepdims=False):
"""Compute the arithmetic mean along the specified axis.
See np.mean() for details."""
if axis == -1:
axis = self.ndim
if axis is None:
results = vectorize(mean)(self, axis, dtype, keepdims=False)
weights = self._sublengths
res = np.average(results, axis=None, weights=weights)
if keepdims:
for i in range(self.ndim):
res = expand_dims(res, res.ndim)
elif axis == self._distaxis:
results = vectorize(mean)(self, axis, dtype, keepdims=True)
results = gather(results)
# Average manually (np.average doesn't preserve ndarray subclasses)
weights = (np.array(self._sublengths, dtype=np.float64) /
sum(self._sublengths))
ix = [slice(None)] * self.ndim
ix[axis] = 0
res = results[ix] * weights[0]
for i in range(1, self._n):
ix[axis] = i
res = res + results[ix] * weights[i]
if keepdims:
res = expand_dims(res, axis)
else:
res = vectorize(mean)(self, axis, dtype, keepdims=False)
if keepdims:
res = expand_dims(res, axis)
if out is not None:
out[:] = res
return res
|
def mean(self, axis=None, dtype=None, out=None, keepdims=False):
"""Compute the arithmetic mean along the specified axis.
See np.mean() for details."""
if axis == -1:
axis = self.ndim
if axis is None:
results = vectorize(mean)(self, axis, dtype, keepdims=False)
weights = self._sublengths
res = np.average(results, axis=None, weights=weights)
if keepdims:
for i in range(self.ndim):
res = expand_dims(res, res.ndim)
elif axis == self._distaxis:
results = vectorize(mean)(self, axis, dtype, keepdims=True)
results = gather(results)
# Average manually (np.average doesn't preserve ndarray subclasses)
weights = (np.array(self._sublengths, dtype=np.float64) /
sum(self._sublengths))
ix = [slice(None)] * self.ndim
ix[axis] = 0
res = results[ix] * weights[0]
for i in range(1, self._n):
ix[axis] = i
res = res + results[ix] * weights[i]
if keepdims:
res = expand_dims(res, axis)
else:
res = vectorize(mean)(self, axis, dtype, keepdims=False)
if keepdims:
res = expand_dims(res, axis)
if out is not None:
out[:] = res
return res
|
[
"Compute",
"the",
"arithmetic",
"mean",
"along",
"the",
"specified",
"axis",
".",
"See",
"np",
".",
"mean",
"()",
"for",
"details",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1030-L1062
|
[
"def",
"mean",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"out",
"=",
"None",
",",
"keepdims",
"=",
"False",
")",
":",
"if",
"axis",
"==",
"-",
"1",
":",
"axis",
"=",
"self",
".",
"ndim",
"if",
"axis",
"is",
"None",
":",
"results",
"=",
"vectorize",
"(",
"mean",
")",
"(",
"self",
",",
"axis",
",",
"dtype",
",",
"keepdims",
"=",
"False",
")",
"weights",
"=",
"self",
".",
"_sublengths",
"res",
"=",
"np",
".",
"average",
"(",
"results",
",",
"axis",
"=",
"None",
",",
"weights",
"=",
"weights",
")",
"if",
"keepdims",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ndim",
")",
":",
"res",
"=",
"expand_dims",
"(",
"res",
",",
"res",
".",
"ndim",
")",
"elif",
"axis",
"==",
"self",
".",
"_distaxis",
":",
"results",
"=",
"vectorize",
"(",
"mean",
")",
"(",
"self",
",",
"axis",
",",
"dtype",
",",
"keepdims",
"=",
"True",
")",
"results",
"=",
"gather",
"(",
"results",
")",
"# Average manually (np.average doesn't preserve ndarray subclasses)",
"weights",
"=",
"(",
"np",
".",
"array",
"(",
"self",
".",
"_sublengths",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"/",
"sum",
"(",
"self",
".",
"_sublengths",
")",
")",
"ix",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"self",
".",
"ndim",
"ix",
"[",
"axis",
"]",
"=",
"0",
"res",
"=",
"results",
"[",
"ix",
"]",
"*",
"weights",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"self",
".",
"_n",
")",
":",
"ix",
"[",
"axis",
"]",
"=",
"i",
"res",
"=",
"res",
"+",
"results",
"[",
"ix",
"]",
"*",
"weights",
"[",
"i",
"]",
"if",
"keepdims",
":",
"res",
"=",
"expand_dims",
"(",
"res",
",",
"axis",
")",
"else",
":",
"res",
"=",
"vectorize",
"(",
"mean",
")",
"(",
"self",
",",
"axis",
",",
"dtype",
",",
"keepdims",
"=",
"False",
")",
"if",
"keepdims",
":",
"res",
"=",
"expand_dims",
"(",
"res",
",",
"axis",
")",
"if",
"out",
"is",
"not",
"None",
":",
"out",
"[",
":",
"]",
"=",
"res",
"return",
"res"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
run
|
Returns True if successful, False if failure
|
cpenv/shell.py
|
def run(*args, **kwargs):
'''Returns True if successful, False if failure'''
kwargs.setdefault('env', os.environ)
kwargs.setdefault('shell', True)
try:
subprocess.check_call(' '.join(args), **kwargs)
return True
except subprocess.CalledProcessError:
logger.debug('Error running: {}'.format(args))
return False
|
def run(*args, **kwargs):
'''Returns True if successful, False if failure'''
kwargs.setdefault('env', os.environ)
kwargs.setdefault('shell', True)
try:
subprocess.check_call(' '.join(args), **kwargs)
return True
except subprocess.CalledProcessError:
logger.debug('Error running: {}'.format(args))
return False
|
[
"Returns",
"True",
"if",
"successful",
"False",
"if",
"failure"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/shell.py#L9-L20
|
[
"def",
"run",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'env'",
",",
"os",
".",
"environ",
")",
"kwargs",
".",
"setdefault",
"(",
"'shell'",
",",
"True",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"' '",
".",
"join",
"(",
"args",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"True",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"logger",
".",
"debug",
"(",
"'Error running: {}'",
".",
"format",
"(",
"args",
")",
")",
"return",
"False"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
cmd
|
Return a command to launch a subshell
|
cpenv/shell.py
|
def cmd():
'''Return a command to launch a subshell'''
if platform == 'win':
return ['cmd.exe', '/K']
elif platform == 'linux':
ppid = os.getppid()
ppid_cmdline_file = '/proc/{0}/cmdline'.format(ppid)
try:
with open(ppid_cmdline_file) as f:
cmd = f.read()
if cmd.endswith('\x00'):
cmd = cmd[:-1]
cmd = cmd.split('\x00')
return cmd + [binpath('subshell.sh')]
except:
cmd = 'bash'
else:
cmd = 'bash'
return [cmd, binpath('subshell.sh')]
|
def cmd():
'''Return a command to launch a subshell'''
if platform == 'win':
return ['cmd.exe', '/K']
elif platform == 'linux':
ppid = os.getppid()
ppid_cmdline_file = '/proc/{0}/cmdline'.format(ppid)
try:
with open(ppid_cmdline_file) as f:
cmd = f.read()
if cmd.endswith('\x00'):
cmd = cmd[:-1]
cmd = cmd.split('\x00')
return cmd + [binpath('subshell.sh')]
except:
cmd = 'bash'
else:
cmd = 'bash'
return [cmd, binpath('subshell.sh')]
|
[
"Return",
"a",
"command",
"to",
"launch",
"a",
"subshell"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/shell.py#L23-L45
|
[
"def",
"cmd",
"(",
")",
":",
"if",
"platform",
"==",
"'win'",
":",
"return",
"[",
"'cmd.exe'",
",",
"'/K'",
"]",
"elif",
"platform",
"==",
"'linux'",
":",
"ppid",
"=",
"os",
".",
"getppid",
"(",
")",
"ppid_cmdline_file",
"=",
"'/proc/{0}/cmdline'",
".",
"format",
"(",
"ppid",
")",
"try",
":",
"with",
"open",
"(",
"ppid_cmdline_file",
")",
"as",
"f",
":",
"cmd",
"=",
"f",
".",
"read",
"(",
")",
"if",
"cmd",
".",
"endswith",
"(",
"'\\x00'",
")",
":",
"cmd",
"=",
"cmd",
"[",
":",
"-",
"1",
"]",
"cmd",
"=",
"cmd",
".",
"split",
"(",
"'\\x00'",
")",
"return",
"cmd",
"+",
"[",
"binpath",
"(",
"'subshell.sh'",
")",
"]",
"except",
":",
"cmd",
"=",
"'bash'",
"else",
":",
"cmd",
"=",
"'bash'",
"return",
"[",
"cmd",
",",
"binpath",
"(",
"'subshell.sh'",
")",
"]"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
prompt
|
Generate a prompt with a given prefix
linux/osx: [prefix] user@host cwd $
win: [prefix] cwd:
|
cpenv/shell.py
|
def prompt(prefix=None, colored=True):
'''Generate a prompt with a given prefix
linux/osx: [prefix] user@host cwd $
win: [prefix] cwd:
'''
if platform == 'win':
return '[{0}] $P$G'.format(prefix)
else:
if colored:
return (
'[{0}] ' # White prefix
'\\[\\033[01;32m\\]\\u@\\h\\[\\033[00m\\] ' # Green user@host
'\\[\\033[01;34m\\]\\w $ \\[\\033[00m\\]' # Blue cwd $
).format(prefix)
return '[{0}] \\u@\\h \\w $ '.format(prefix)
|
def prompt(prefix=None, colored=True):
'''Generate a prompt with a given prefix
linux/osx: [prefix] user@host cwd $
win: [prefix] cwd:
'''
if platform == 'win':
return '[{0}] $P$G'.format(prefix)
else:
if colored:
return (
'[{0}] ' # White prefix
'\\[\\033[01;32m\\]\\u@\\h\\[\\033[00m\\] ' # Green user@host
'\\[\\033[01;34m\\]\\w $ \\[\\033[00m\\]' # Blue cwd $
).format(prefix)
return '[{0}] \\u@\\h \\w $ '.format(prefix)
|
[
"Generate",
"a",
"prompt",
"with",
"a",
"given",
"prefix"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/shell.py#L48-L64
|
[
"def",
"prompt",
"(",
"prefix",
"=",
"None",
",",
"colored",
"=",
"True",
")",
":",
"if",
"platform",
"==",
"'win'",
":",
"return",
"'[{0}] $P$G'",
".",
"format",
"(",
"prefix",
")",
"else",
":",
"if",
"colored",
":",
"return",
"(",
"'[{0}] '",
"# White prefix",
"'\\\\[\\\\033[01;32m\\\\]\\\\u@\\\\h\\\\[\\\\033[00m\\\\] '",
"# Green user@host",
"'\\\\[\\\\033[01;34m\\\\]\\\\w $ \\\\[\\\\033[00m\\\\]'",
"# Blue cwd $",
")",
".",
"format",
"(",
"prefix",
")",
"return",
"'[{0}] \\\\u@\\\\h \\\\w $ '",
".",
"format",
"(",
"prefix",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
launch
|
Launch a subshell
|
cpenv/shell.py
|
def launch(prompt_prefix=None):
'''Launch a subshell'''
if prompt_prefix:
os.environ['PROMPT'] = prompt(prompt_prefix)
subprocess.call(cmd(), env=os.environ.data)
|
def launch(prompt_prefix=None):
'''Launch a subshell'''
if prompt_prefix:
os.environ['PROMPT'] = prompt(prompt_prefix)
subprocess.call(cmd(), env=os.environ.data)
|
[
"Launch",
"a",
"subshell"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/shell.py#L67-L73
|
[
"def",
"launch",
"(",
"prompt_prefix",
"=",
"None",
")",
":",
"if",
"prompt_prefix",
":",
"os",
".",
"environ",
"[",
"'PROMPT'",
"]",
"=",
"prompt",
"(",
"prompt_prefix",
")",
"subprocess",
".",
"call",
"(",
"cmd",
"(",
")",
",",
"env",
"=",
"os",
".",
"environ",
".",
"data",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
FileModificationMonitor.add_file
|
Append a file to file repository.
For file monitoring, monitor instance needs file.
Please put the name of file to `file` argument.
:param file: the name of file you want monitor.
|
mdfmonitor.py
|
def add_file(self, file, **kwargs):
"""Append a file to file repository.
For file monitoring, monitor instance needs file.
Please put the name of file to `file` argument.
:param file: the name of file you want monitor.
"""
if os.access(file, os.F_OK):
if file in self.f_repository:
raise DuplicationError("file already added.")
self.f_repository.append(file)
else:
raise IOError("file not found.")
|
def add_file(self, file, **kwargs):
"""Append a file to file repository.
For file monitoring, monitor instance needs file.
Please put the name of file to `file` argument.
:param file: the name of file you want monitor.
"""
if os.access(file, os.F_OK):
if file in self.f_repository:
raise DuplicationError("file already added.")
self.f_repository.append(file)
else:
raise IOError("file not found.")
|
[
"Append",
"a",
"file",
"to",
"file",
"repository",
"."
] |
alice1017/mdfmonitor
|
python
|
https://github.com/alice1017/mdfmonitor/blob/a414ed3d486b92ed31d30e23de823b05b0381f55/mdfmonitor.py#L83-L101
|
[
"def",
"add_file",
"(",
"self",
",",
"file",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"os",
".",
"access",
"(",
"file",
",",
"os",
".",
"F_OK",
")",
":",
"if",
"file",
"in",
"self",
".",
"f_repository",
":",
"raise",
"DuplicationError",
"(",
"\"file already added.\"",
")",
"self",
".",
"f_repository",
".",
"append",
"(",
"file",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"file not found.\"",
")"
] |
a414ed3d486b92ed31d30e23de823b05b0381f55
|
valid
|
FileModificationMonitor.add_files
|
Append files to file repository.
ModificationMonitor can append files to repository using this.
Please put the list of file names to `filelist` argument.
:param filelist: the list of file nmaes
|
mdfmonitor.py
|
def add_files(self, filelist, **kwargs):
"""Append files to file repository.
ModificationMonitor can append files to repository using this.
Please put the list of file names to `filelist` argument.
:param filelist: the list of file nmaes
"""
# check filelist is list type
if not isinstance(filelist, list):
raise TypeError("request the list type.")
for file in filelist:
self.add_file(file)
|
def add_files(self, filelist, **kwargs):
"""Append files to file repository.
ModificationMonitor can append files to repository using this.
Please put the list of file names to `filelist` argument.
:param filelist: the list of file nmaes
"""
# check filelist is list type
if not isinstance(filelist, list):
raise TypeError("request the list type.")
for file in filelist:
self.add_file(file)
|
[
"Append",
"files",
"to",
"file",
"repository",
".",
"ModificationMonitor",
"can",
"append",
"files",
"to",
"repository",
"using",
"this",
".",
"Please",
"put",
"the",
"list",
"of",
"file",
"names",
"to",
"filelist",
"argument",
"."
] |
alice1017/mdfmonitor
|
python
|
https://github.com/alice1017/mdfmonitor/blob/a414ed3d486b92ed31d30e23de823b05b0381f55/mdfmonitor.py#L104-L118
|
[
"def",
"add_files",
"(",
"self",
",",
"filelist",
",",
"*",
"*",
"kwargs",
")",
":",
"# check filelist is list type",
"if",
"not",
"isinstance",
"(",
"filelist",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"request the list type.\"",
")",
"for",
"file",
"in",
"filelist",
":",
"self",
".",
"add_file",
"(",
"file",
")"
] |
a414ed3d486b92ed31d30e23de823b05b0381f55
|
valid
|
FileModificationMonitor.monitor
|
Run file modification monitor.
The monitor can catch file modification using timestamp and file body.
Monitor has timestamp data and file body data. And insert timestamp
data and file body data before into while roop. In while roop, monitor
get new timestamp and file body, and then monitor compare new timestamp
to originaltimestamp. If new timestamp and file body differ original,
monitor regard thease changes as `modification`. Then monitor create
instance of FileModificationObjectManager and FileModificationObject,
and monitor insert FileModificationObject to FileModificationObject-
Manager. Then, yield this object.
:param sleep: How times do you sleep in while roop.
|
mdfmonitor.py
|
def monitor(self, sleep=5):
"""Run file modification monitor.
The monitor can catch file modification using timestamp and file body.
Monitor has timestamp data and file body data. And insert timestamp
data and file body data before into while roop. In while roop, monitor
get new timestamp and file body, and then monitor compare new timestamp
to originaltimestamp. If new timestamp and file body differ original,
monitor regard thease changes as `modification`. Then monitor create
instance of FileModificationObjectManager and FileModificationObject,
and monitor insert FileModificationObject to FileModificationObject-
Manager. Then, yield this object.
:param sleep: How times do you sleep in while roop.
"""
manager = FileModificationObjectManager()
timestamps = {}
filebodies = {}
# register original timestamp and filebody to dict
for file in self.f_repository:
timestamps[file] = self._get_mtime(file)
filebodies[file] = open(file).read()
while True:
for file in self.f_repository:
mtime = timestamps[file]
fbody = filebodies[file]
modified = self._check_modify(file, mtime, fbody)
# file not modify -> continue
if not modified:
continue
# file modifies -> create the modification object
new_mtime = self._get_mtime(file)
new_fbody = open(file).read()
obj = FileModificationObject(
file,
(mtime, new_mtime),
(fbody, new_fbody) )
# overwrite new timestamp and filebody
timestamps[file] = new_mtime
filebodies[file] = new_fbody
# append file modification object to manager
manager.add_object(obj)
# return new modification object
yield obj
time.sleep(sleep)
|
def monitor(self, sleep=5):
"""Run file modification monitor.
The monitor can catch file modification using timestamp and file body.
Monitor has timestamp data and file body data. And insert timestamp
data and file body data before into while roop. In while roop, monitor
get new timestamp and file body, and then monitor compare new timestamp
to originaltimestamp. If new timestamp and file body differ original,
monitor regard thease changes as `modification`. Then monitor create
instance of FileModificationObjectManager and FileModificationObject,
and monitor insert FileModificationObject to FileModificationObject-
Manager. Then, yield this object.
:param sleep: How times do you sleep in while roop.
"""
manager = FileModificationObjectManager()
timestamps = {}
filebodies = {}
# register original timestamp and filebody to dict
for file in self.f_repository:
timestamps[file] = self._get_mtime(file)
filebodies[file] = open(file).read()
while True:
for file in self.f_repository:
mtime = timestamps[file]
fbody = filebodies[file]
modified = self._check_modify(file, mtime, fbody)
# file not modify -> continue
if not modified:
continue
# file modifies -> create the modification object
new_mtime = self._get_mtime(file)
new_fbody = open(file).read()
obj = FileModificationObject(
file,
(mtime, new_mtime),
(fbody, new_fbody) )
# overwrite new timestamp and filebody
timestamps[file] = new_mtime
filebodies[file] = new_fbody
# append file modification object to manager
manager.add_object(obj)
# return new modification object
yield obj
time.sleep(sleep)
|
[
"Run",
"file",
"modification",
"monitor",
"."
] |
alice1017/mdfmonitor
|
python
|
https://github.com/alice1017/mdfmonitor/blob/a414ed3d486b92ed31d30e23de823b05b0381f55/mdfmonitor.py#L120-L182
|
[
"def",
"monitor",
"(",
"self",
",",
"sleep",
"=",
"5",
")",
":",
"manager",
"=",
"FileModificationObjectManager",
"(",
")",
"timestamps",
"=",
"{",
"}",
"filebodies",
"=",
"{",
"}",
"# register original timestamp and filebody to dict",
"for",
"file",
"in",
"self",
".",
"f_repository",
":",
"timestamps",
"[",
"file",
"]",
"=",
"self",
".",
"_get_mtime",
"(",
"file",
")",
"filebodies",
"[",
"file",
"]",
"=",
"open",
"(",
"file",
")",
".",
"read",
"(",
")",
"while",
"True",
":",
"for",
"file",
"in",
"self",
".",
"f_repository",
":",
"mtime",
"=",
"timestamps",
"[",
"file",
"]",
"fbody",
"=",
"filebodies",
"[",
"file",
"]",
"modified",
"=",
"self",
".",
"_check_modify",
"(",
"file",
",",
"mtime",
",",
"fbody",
")",
"# file not modify -> continue",
"if",
"not",
"modified",
":",
"continue",
"# file modifies -> create the modification object",
"new_mtime",
"=",
"self",
".",
"_get_mtime",
"(",
"file",
")",
"new_fbody",
"=",
"open",
"(",
"file",
")",
".",
"read",
"(",
")",
"obj",
"=",
"FileModificationObject",
"(",
"file",
",",
"(",
"mtime",
",",
"new_mtime",
")",
",",
"(",
"fbody",
",",
"new_fbody",
")",
")",
"# overwrite new timestamp and filebody",
"timestamps",
"[",
"file",
"]",
"=",
"new_mtime",
"filebodies",
"[",
"file",
"]",
"=",
"new_fbody",
"# append file modification object to manager",
"manager",
".",
"add_object",
"(",
"obj",
")",
"# return new modification object",
"yield",
"obj",
"time",
".",
"sleep",
"(",
"sleep",
")"
] |
a414ed3d486b92ed31d30e23de823b05b0381f55
|
valid
|
RestPoints.init_app
|
Initialize a :class:`~flask.Flask` application for use with
this extension.
|
flask_restpoints/base.py
|
def init_app(self, app):
"""Initialize a :class:`~flask.Flask` application for use with
this extension.
"""
self._jobs = []
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['restpoints'] = self
app.restpoints_instance = self
app.add_url_rule('/ping', 'ping', ping)
app.add_url_rule('/time', 'time', time)
app.add_url_rule('/status', 'status', status(self._jobs))
|
def init_app(self, app):
"""Initialize a :class:`~flask.Flask` application for use with
this extension.
"""
self._jobs = []
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['restpoints'] = self
app.restpoints_instance = self
app.add_url_rule('/ping', 'ping', ping)
app.add_url_rule('/time', 'time', time)
app.add_url_rule('/status', 'status', status(self._jobs))
|
[
"Initialize",
"a",
":",
"class",
":",
"~flask",
".",
"Flask",
"application",
"for",
"use",
"with",
"this",
"extension",
"."
] |
juztin/flask-restpoints
|
python
|
https://github.com/juztin/flask-restpoints/blob/1833e1aeed6139c3b130d4e7497526c78c063a0f/flask_restpoints/base.py#L24-L37
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"self",
".",
"_jobs",
"=",
"[",
"]",
"if",
"not",
"hasattr",
"(",
"app",
",",
"'extensions'",
")",
":",
"app",
".",
"extensions",
"=",
"{",
"}",
"app",
".",
"extensions",
"[",
"'restpoints'",
"]",
"=",
"self",
"app",
".",
"restpoints_instance",
"=",
"self",
"app",
".",
"add_url_rule",
"(",
"'/ping'",
",",
"'ping'",
",",
"ping",
")",
"app",
".",
"add_url_rule",
"(",
"'/time'",
",",
"'time'",
",",
"time",
")",
"app",
".",
"add_url_rule",
"(",
"'/status'",
",",
"'status'",
",",
"status",
"(",
"self",
".",
"_jobs",
")",
")"
] |
1833e1aeed6139c3b130d4e7497526c78c063a0f
|
valid
|
RestPoints.add_status_job
|
Adds a job to be included during calls to the `/status` endpoint.
:param job_func: the status function.
:param name: the name used in the JSON response for the given status
function. The name of the function is the default.
:param timeout: the time limit before the job status is set to
"timeout exceeded".
|
flask_restpoints/base.py
|
def add_status_job(self, job_func, name=None, timeout=3):
"""Adds a job to be included during calls to the `/status` endpoint.
:param job_func: the status function.
:param name: the name used in the JSON response for the given status
function. The name of the function is the default.
:param timeout: the time limit before the job status is set to
"timeout exceeded".
"""
job_name = job_func.__name__ if name is None else name
job = (job_name, timeout, job_func)
self._jobs.append(job)
|
def add_status_job(self, job_func, name=None, timeout=3):
"""Adds a job to be included during calls to the `/status` endpoint.
:param job_func: the status function.
:param name: the name used in the JSON response for the given status
function. The name of the function is the default.
:param timeout: the time limit before the job status is set to
"timeout exceeded".
"""
job_name = job_func.__name__ if name is None else name
job = (job_name, timeout, job_func)
self._jobs.append(job)
|
[
"Adds",
"a",
"job",
"to",
"be",
"included",
"during",
"calls",
"to",
"the",
"/",
"status",
"endpoint",
"."
] |
juztin/flask-restpoints
|
python
|
https://github.com/juztin/flask-restpoints/blob/1833e1aeed6139c3b130d4e7497526c78c063a0f/flask_restpoints/base.py#L39-L50
|
[
"def",
"add_status_job",
"(",
"self",
",",
"job_func",
",",
"name",
"=",
"None",
",",
"timeout",
"=",
"3",
")",
":",
"job_name",
"=",
"job_func",
".",
"__name__",
"if",
"name",
"is",
"None",
"else",
"name",
"job",
"=",
"(",
"job_name",
",",
"timeout",
",",
"job_func",
")",
"self",
".",
"_jobs",
".",
"append",
"(",
"job",
")"
] |
1833e1aeed6139c3b130d4e7497526c78c063a0f
|
valid
|
RestPoints.status_job
|
Decorator that invokes `add_status_job`.
::
@app.status_job
def postgresql():
# query/ping postgres
@app.status_job(name="Active Directory")
def active_directory():
# query active directory
@app.status_job(timeout=5)
def paypal():
# query paypal, timeout after 5 seconds
|
flask_restpoints/base.py
|
def status_job(self, fn=None, name=None, timeout=3):
"""Decorator that invokes `add_status_job`.
::
@app.status_job
def postgresql():
# query/ping postgres
@app.status_job(name="Active Directory")
def active_directory():
# query active directory
@app.status_job(timeout=5)
def paypal():
# query paypal, timeout after 5 seconds
"""
if fn is None:
def decorator(fn):
self.add_status_job(fn, name, timeout)
return decorator
else:
self.add_status_job(fn, name, timeout)
|
def status_job(self, fn=None, name=None, timeout=3):
"""Decorator that invokes `add_status_job`.
::
@app.status_job
def postgresql():
# query/ping postgres
@app.status_job(name="Active Directory")
def active_directory():
# query active directory
@app.status_job(timeout=5)
def paypal():
# query paypal, timeout after 5 seconds
"""
if fn is None:
def decorator(fn):
self.add_status_job(fn, name, timeout)
return decorator
else:
self.add_status_job(fn, name, timeout)
|
[
"Decorator",
"that",
"invokes",
"add_status_job",
"."
] |
juztin/flask-restpoints
|
python
|
https://github.com/juztin/flask-restpoints/blob/1833e1aeed6139c3b130d4e7497526c78c063a0f/flask_restpoints/base.py#L52-L75
|
[
"def",
"status_job",
"(",
"self",
",",
"fn",
"=",
"None",
",",
"name",
"=",
"None",
",",
"timeout",
"=",
"3",
")",
":",
"if",
"fn",
"is",
"None",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"self",
".",
"add_status_job",
"(",
"fn",
",",
"name",
",",
"timeout",
")",
"return",
"decorator",
"else",
":",
"self",
".",
"add_status_job",
"(",
"fn",
",",
"name",
",",
"timeout",
")"
] |
1833e1aeed6139c3b130d4e7497526c78c063a0f
|
valid
|
_pipepager
|
Page through text by feeding it to another program. Invoking a
pager through this might support colors.
|
cpenv/packages/click/_termui_impl.py
|
def _pipepager(text, cmd, color):
"""Page through text by feeding it to another program. Invoking a
pager through this might support colors.
"""
import subprocess
env = dict(os.environ)
# If we're piping to less we might support colors under the
# condition that
cmd_detail = cmd.rsplit('/', 1)[-1].split()
if color is None and cmd_detail[0] == 'less':
less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
if not less_flags:
env['LESS'] = '-R'
color = True
elif 'r' in less_flags or 'R' in less_flags:
color = True
if not color:
text = strip_ansi(text)
c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
env=env)
encoding = get_best_encoding(c.stdin)
try:
c.stdin.write(text.encode(encoding, 'replace'))
c.stdin.close()
except (IOError, KeyboardInterrupt):
pass
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting
# search or other commands inside less).
#
# That means when the user hits ^C, the parent process (click) terminates,
# but less is still alive, paging the output and messing up the terminal.
#
# If the user wants to make the pager exit on ^C, they should set
# `LESS='-K'`. It's not our decision to make.
while True:
try:
c.wait()
except KeyboardInterrupt:
pass
else:
break
|
def _pipepager(text, cmd, color):
"""Page through text by feeding it to another program. Invoking a
pager through this might support colors.
"""
import subprocess
env = dict(os.environ)
# If we're piping to less we might support colors under the
# condition that
cmd_detail = cmd.rsplit('/', 1)[-1].split()
if color is None and cmd_detail[0] == 'less':
less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
if not less_flags:
env['LESS'] = '-R'
color = True
elif 'r' in less_flags or 'R' in less_flags:
color = True
if not color:
text = strip_ansi(text)
c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
env=env)
encoding = get_best_encoding(c.stdin)
try:
c.stdin.write(text.encode(encoding, 'replace'))
c.stdin.close()
except (IOError, KeyboardInterrupt):
pass
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting
# search or other commands inside less).
#
# That means when the user hits ^C, the parent process (click) terminates,
# but less is still alive, paging the output and messing up the terminal.
#
# If the user wants to make the pager exit on ^C, they should set
# `LESS='-K'`. It's not our decision to make.
while True:
try:
c.wait()
except KeyboardInterrupt:
pass
else:
break
|
[
"Page",
"through",
"text",
"by",
"feeding",
"it",
"to",
"another",
"program",
".",
"Invoking",
"a",
"pager",
"through",
"this",
"might",
"support",
"colors",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/packages/click/_termui_impl.py#L302-L346
|
[
"def",
"_pipepager",
"(",
"text",
",",
"cmd",
",",
"color",
")",
":",
"import",
"subprocess",
"env",
"=",
"dict",
"(",
"os",
".",
"environ",
")",
"# If we're piping to less we might support colors under the",
"# condition that",
"cmd_detail",
"=",
"cmd",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
")",
"if",
"color",
"is",
"None",
"and",
"cmd_detail",
"[",
"0",
"]",
"==",
"'less'",
":",
"less_flags",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'LESS'",
",",
"''",
")",
"+",
"' '",
".",
"join",
"(",
"cmd_detail",
"[",
"1",
":",
"]",
")",
"if",
"not",
"less_flags",
":",
"env",
"[",
"'LESS'",
"]",
"=",
"'-R'",
"color",
"=",
"True",
"elif",
"'r'",
"in",
"less_flags",
"or",
"'R'",
"in",
"less_flags",
":",
"color",
"=",
"True",
"if",
"not",
"color",
":",
"text",
"=",
"strip_ansi",
"(",
"text",
")",
"c",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"env",
"=",
"env",
")",
"encoding",
"=",
"get_best_encoding",
"(",
"c",
".",
"stdin",
")",
"try",
":",
"c",
".",
"stdin",
".",
"write",
"(",
"text",
".",
"encode",
"(",
"encoding",
",",
"'replace'",
")",
")",
"c",
".",
"stdin",
".",
"close",
"(",
")",
"except",
"(",
"IOError",
",",
"KeyboardInterrupt",
")",
":",
"pass",
"# Less doesn't respect ^C, but catches it for its own UI purposes (aborting",
"# search or other commands inside less).",
"#",
"# That means when the user hits ^C, the parent process (click) terminates,",
"# but less is still alive, paging the output and messing up the terminal.",
"#",
"# If the user wants to make the pager exit on ^C, they should set",
"# `LESS='-K'`. It's not our decision to make.",
"while",
"True",
":",
"try",
":",
"c",
".",
"wait",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"else",
":",
"break"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
_get_funky
|
Renvoie une fonction numpy correspondant au nom passé en paramètre,
sinon renvoie la fonction elle-même
|
pyair/date.py
|
def _get_funky(func):
"""Renvoie une fonction numpy correspondant au nom passé en paramètre,
sinon renvoie la fonction elle-même"""
if isinstance(func, str):
try:
func = getattr(np, func)
except:
raise NameError("Nom de fonction non comprise")
return func
|
def _get_funky(func):
"""Renvoie une fonction numpy correspondant au nom passé en paramètre,
sinon renvoie la fonction elle-même"""
if isinstance(func, str):
try:
func = getattr(np, func)
except:
raise NameError("Nom de fonction non comprise")
return func
|
[
"Renvoie",
"une",
"fonction",
"numpy",
"correspondant",
"au",
"nom",
"passé",
"en",
"paramètre",
"sinon",
"renvoie",
"la",
"fonction",
"elle",
"-",
"même"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/date.py#L20-L29
|
[
"def",
"_get_funky",
"(",
"func",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"str",
")",
":",
"try",
":",
"func",
"=",
"getattr",
"(",
"np",
",",
"func",
")",
"except",
":",
"raise",
"NameError",
"(",
"\"Nom de fonction non comprise\"",
")",
"return",
"func"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
profil_journalier
|
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par heure sur une journée
|
pyair/date.py
|
def profil_journalier(df, func='mean'):
"""
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par heure sur une journée
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.hour).aggregate(func)
return res
|
def profil_journalier(df, func='mean'):
"""
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par heure sur une journée
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.hour).aggregate(func)
return res
|
[
"Calcul",
"du",
"profil",
"journalier"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/date.py#L32-L47
|
[
"def",
"profil_journalier",
"(",
"df",
",",
"func",
"=",
"'mean'",
")",
":",
"func",
"=",
"_get_funky",
"(",
"func",
")",
"res",
"=",
"df",
".",
"groupby",
"(",
"lambda",
"x",
":",
"x",
".",
"hour",
")",
".",
"aggregate",
"(",
"func",
")",
"return",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
profil_hebdo
|
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par journée sur la semaine
|
pyair/date.py
|
def profil_hebdo(df, func='mean'):
"""
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par journée sur la semaine
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.weekday).aggregate(func)
# On met des noms de jour à la place des numéros dans l'index
res.index = [cal.day_name[i] for i in range(0,7)]
return res
|
def profil_hebdo(df, func='mean'):
"""
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par journée sur la semaine
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.weekday).aggregate(func)
# On met des noms de jour à la place des numéros dans l'index
res.index = [cal.day_name[i] for i in range(0,7)]
return res
|
[
"Calcul",
"du",
"profil",
"journalier"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/date.py#L50-L67
|
[
"def",
"profil_hebdo",
"(",
"df",
",",
"func",
"=",
"'mean'",
")",
":",
"func",
"=",
"_get_funky",
"(",
"func",
")",
"res",
"=",
"df",
".",
"groupby",
"(",
"lambda",
"x",
":",
"x",
".",
"weekday",
")",
".",
"aggregate",
"(",
"func",
")",
"# On met des noms de jour à la place des numéros dans l'index",
"res",
".",
"index",
"=",
"[",
"cal",
".",
"day_name",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"7",
")",
"]",
"return",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
profil_annuel
|
Calcul du profil annuel
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par mois
|
pyair/date.py
|
def profil_annuel(df, func='mean'):
"""
Calcul du profil annuel
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par mois
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.month).aggregate(func)
# On met des noms de mois à la place des numéros dans l'index
res.index = [cal.month_name[i] for i in range(1,13)]
return res
|
def profil_annuel(df, func='mean'):
"""
Calcul du profil annuel
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par mois
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.month).aggregate(func)
# On met des noms de mois à la place des numéros dans l'index
res.index = [cal.month_name[i] for i in range(1,13)]
return res
|
[
"Calcul",
"du",
"profil",
"annuel"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/date.py#L70-L87
|
[
"def",
"profil_annuel",
"(",
"df",
",",
"func",
"=",
"'mean'",
")",
":",
"func",
"=",
"_get_funky",
"(",
"func",
")",
"res",
"=",
"df",
".",
"groupby",
"(",
"lambda",
"x",
":",
"x",
".",
"month",
")",
".",
"aggregate",
"(",
"func",
")",
"# On met des noms de mois à la place des numéros dans l'index",
"res",
".",
"index",
"=",
"[",
"cal",
".",
"month_name",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"13",
")",
"]",
"return",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
to_date
|
Transforme un champ date vers un objet python datetime
Paramètres:
date:
- si None, renvoie la date du jour
- si de type str, renvoie un objet python datetime
- si de type datetime, le retourne sans modification
dayfirst: Si True, aide l'analyse du champ date de type str en informant
le décrypteur que le jour se situe en début de chaîne (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
format: chaîne de caractère décrivant précisement le champ date de type
str. Voir la documentation officielle de python.datetime pour description
|
pyair/xair.py
|
def to_date(date, dayfirst=False, format=None):
"""
Transforme un champ date vers un objet python datetime
Paramètres:
date:
- si None, renvoie la date du jour
- si de type str, renvoie un objet python datetime
- si de type datetime, le retourne sans modification
dayfirst: Si True, aide l'analyse du champ date de type str en informant
le décrypteur que le jour se situe en début de chaîne (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
format: chaîne de caractère décrivant précisement le champ date de type
str. Voir la documentation officielle de python.datetime pour description
"""
## TODO: voir si pd.tseries.api ne peut pas remplacer tout ca
if not date:
return dt.datetime.fromordinal(
dt.date.today().toordinal()) # mieux que dt.datetime.now() car ca met les heures, minutes et secondes à zéro
elif isinstance(date, dt.datetime):
return date
elif isinstance(date, str):
return pd.to_datetime(date, dayfirst=dayfirst, format=format)
elif isinstance(date, dt.date):
return dt.datetime.fromordinal(date.toordinal())
else:
raise ValueError("Les dates doivent être de type None, str, datetime.date ou datetime.datetime")
|
def to_date(date, dayfirst=False, format=None):
"""
Transforme un champ date vers un objet python datetime
Paramètres:
date:
- si None, renvoie la date du jour
- si de type str, renvoie un objet python datetime
- si de type datetime, le retourne sans modification
dayfirst: Si True, aide l'analyse du champ date de type str en informant
le décrypteur que le jour se situe en début de chaîne (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
format: chaîne de caractère décrivant précisement le champ date de type
str. Voir la documentation officielle de python.datetime pour description
"""
## TODO: voir si pd.tseries.api ne peut pas remplacer tout ca
if not date:
return dt.datetime.fromordinal(
dt.date.today().toordinal()) # mieux que dt.datetime.now() car ca met les heures, minutes et secondes à zéro
elif isinstance(date, dt.datetime):
return date
elif isinstance(date, str):
return pd.to_datetime(date, dayfirst=dayfirst, format=format)
elif isinstance(date, dt.date):
return dt.datetime.fromordinal(date.toordinal())
else:
raise ValueError("Les dates doivent être de type None, str, datetime.date ou datetime.datetime")
|
[
"Transforme",
"un",
"champ",
"date",
"vers",
"un",
"objet",
"python",
"datetime",
"Paramètres",
":",
"date",
":",
"-",
"si",
"None",
"renvoie",
"la",
"date",
"du",
"jour",
"-",
"si",
"de",
"type",
"str",
"renvoie",
"un",
"objet",
"python",
"datetime",
"-",
"si",
"de",
"type",
"datetime",
"le",
"retourne",
"sans",
"modification",
"dayfirst",
":",
"Si",
"True",
"aide",
"l",
"analyse",
"du",
"champ",
"date",
"de",
"type",
"str",
"en",
"informant",
"le",
"décrypteur",
"que",
"le",
"jour",
"se",
"situe",
"en",
"début",
"de",
"chaîne",
"(",
"ex",
":",
"11",
"/",
"09",
"/",
"2012",
"pourrait",
"être",
"interpreté",
"comme",
"le",
"09",
"novembre",
"si",
"dayfirst",
"=",
"False",
")",
"format",
":",
"chaîne",
"de",
"caractère",
"décrivant",
"précisement",
"le",
"champ",
"date",
"de",
"type",
"str",
".",
"Voir",
"la",
"documentation",
"officielle",
"de",
"python",
".",
"datetime",
"pour",
"description"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L44-L70
|
[
"def",
"to_date",
"(",
"date",
",",
"dayfirst",
"=",
"False",
",",
"format",
"=",
"None",
")",
":",
"## TODO: voir si pd.tseries.api ne peut pas remplacer tout ca",
"if",
"not",
"date",
":",
"return",
"dt",
".",
"datetime",
".",
"fromordinal",
"(",
"dt",
".",
"date",
".",
"today",
"(",
")",
".",
"toordinal",
"(",
")",
")",
"# mieux que dt.datetime.now() car ca met les heures, minutes et secondes à zéro",
"elif",
"isinstance",
"(",
"date",
",",
"dt",
".",
"datetime",
")",
":",
"return",
"date",
"elif",
"isinstance",
"(",
"date",
",",
"str",
")",
":",
"return",
"pd",
".",
"to_datetime",
"(",
"date",
",",
"dayfirst",
"=",
"dayfirst",
",",
"format",
"=",
"format",
")",
"elif",
"isinstance",
"(",
"date",
",",
"dt",
".",
"date",
")",
":",
"return",
"dt",
".",
"datetime",
".",
"fromordinal",
"(",
"date",
".",
"toordinal",
"(",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Les dates doivent être de type None, str, datetime.date ou datetime.datetime\")",
""
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
_format
|
Formate une donnée d'entrée pour être exploitable dans les fonctions liste_*
et get_*.
Paramètres:
noms: chaîne de caractère, liste ou tuples de chaînes de caractères ou
pandas.Series de chaînes de caractères.
Retourne:
Une chaînes de caractères dont chaque élément est séparé du suivant par les
caractères ',' (simples quotes comprises)
|
pyair/xair.py
|
def _format(noms):
"""
Formate une donnée d'entrée pour être exploitable dans les fonctions liste_*
et get_*.
Paramètres:
noms: chaîne de caractère, liste ou tuples de chaînes de caractères ou
pandas.Series de chaînes de caractères.
Retourne:
Une chaînes de caractères dont chaque élément est séparé du suivant par les
caractères ',' (simples quotes comprises)
"""
if isinstance(noms, (list, tuple, pd.Series)):
noms = ','.join(noms)
noms = noms.replace(",", "','")
return noms
|
def _format(noms):
"""
Formate une donnée d'entrée pour être exploitable dans les fonctions liste_*
et get_*.
Paramètres:
noms: chaîne de caractère, liste ou tuples de chaînes de caractères ou
pandas.Series de chaînes de caractères.
Retourne:
Une chaînes de caractères dont chaque élément est séparé du suivant par les
caractères ',' (simples quotes comprises)
"""
if isinstance(noms, (list, tuple, pd.Series)):
noms = ','.join(noms)
noms = noms.replace(",", "','")
return noms
|
[
"Formate",
"une",
"donnée",
"d",
"entrée",
"pour",
"être",
"exploitable",
"dans",
"les",
"fonctions",
"liste_",
"*",
"et",
"get_",
"*",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L73-L90
|
[
"def",
"_format",
"(",
"noms",
")",
":",
"if",
"isinstance",
"(",
"noms",
",",
"(",
"list",
",",
"tuple",
",",
"pd",
".",
"Series",
")",
")",
":",
"noms",
"=",
"','",
".",
"join",
"(",
"noms",
")",
"noms",
"=",
"noms",
".",
"replace",
"(",
"\",\"",
",",
"\"','\"",
")",
"return",
"noms"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
date_range
|
Génère une liste de date en tenant compte des heures de début et fin d'une journée.
La date de début sera toujours calée à 0h, et celle de fin à 23h
Paramètres:
debut: datetime représentant la date de début
fin: datetime représentant la date de fin
freq: freq de temps. Valeurs possibles : T (minute), H (heure), D (jour),
M (mois), Y (année). Peux prendre des cycles, comme 15T pour 15 minutes
|
pyair/xair.py
|
def date_range(debut, fin, freq):
"""
Génère une liste de date en tenant compte des heures de début et fin d'une journée.
La date de début sera toujours calée à 0h, et celle de fin à 23h
Paramètres:
debut: datetime représentant la date de début
fin: datetime représentant la date de fin
freq: freq de temps. Valeurs possibles : T (minute), H (heure), D (jour),
M (mois), Y (année). Peux prendre des cycles, comme 15T pour 15 minutes
"""
debut_dt = debut.replace(hour=0, minute=0, second=0, microsecond=0)
fin_dt = fin.replace(hour=23, minute=59, second=0, microsecond=0)
if freq in ('M', 'A'): # Calle la fréquence sur le début de mois/année
freq += 'S'
debut_dt = debut_dt.replace(day=1, minute=0, second=0, microsecond=0)
fin_dt = fin_dt.replace(day=1, minute=0, second=0, microsecond=0)
dates_completes = pd.date_range(start=debut_dt, end=fin_dt, freq=freq)
return dates_completes
|
def date_range(debut, fin, freq):
"""
Génère une liste de date en tenant compte des heures de début et fin d'une journée.
La date de début sera toujours calée à 0h, et celle de fin à 23h
Paramètres:
debut: datetime représentant la date de début
fin: datetime représentant la date de fin
freq: freq de temps. Valeurs possibles : T (minute), H (heure), D (jour),
M (mois), Y (année). Peux prendre des cycles, comme 15T pour 15 minutes
"""
debut_dt = debut.replace(hour=0, minute=0, second=0, microsecond=0)
fin_dt = fin.replace(hour=23, minute=59, second=0, microsecond=0)
if freq in ('M', 'A'): # Calle la fréquence sur le début de mois/année
freq += 'S'
debut_dt = debut_dt.replace(day=1, minute=0, second=0, microsecond=0)
fin_dt = fin_dt.replace(day=1, minute=0, second=0, microsecond=0)
dates_completes = pd.date_range(start=debut_dt, end=fin_dt, freq=freq)
return dates_completes
|
[
"Génère",
"une",
"liste",
"de",
"date",
"en",
"tenant",
"compte",
"des",
"heures",
"de",
"début",
"et",
"fin",
"d",
"une",
"journée",
".",
"La",
"date",
"de",
"début",
"sera",
"toujours",
"calée",
"à",
"0h",
"et",
"celle",
"de",
"fin",
"à",
"23h"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L93-L113
|
[
"def",
"date_range",
"(",
"debut",
",",
"fin",
",",
"freq",
")",
":",
"debut_dt",
"=",
"debut",
".",
"replace",
"(",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
"fin_dt",
"=",
"fin",
".",
"replace",
"(",
"hour",
"=",
"23",
",",
"minute",
"=",
"59",
",",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
"if",
"freq",
"in",
"(",
"'M'",
",",
"'A'",
")",
":",
"# Calle la fréquence sur le début de mois/année",
"freq",
"+=",
"'S'",
"debut_dt",
"=",
"debut_dt",
".",
"replace",
"(",
"day",
"=",
"1",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
"fin_dt",
"=",
"fin_dt",
".",
"replace",
"(",
"day",
"=",
"1",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
"dates_completes",
"=",
"pd",
".",
"date_range",
"(",
"start",
"=",
"debut_dt",
",",
"end",
"=",
"fin_dt",
",",
"freq",
"=",
"freq",
")",
"return",
"dates_completes"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR._connect
|
Connexion à la base XAIR
|
pyair/xair.py
|
def _connect(self):
"""
Connexion à la base XAIR
"""
try:
# On passe par Oracle Instant Client avec le TNS ORA_FULL
self.conn = cx_Oracle.connect(self._ORA_FULL)
self.cursor = self.conn.cursor()
print('XAIR: Connexion établie')
except cx_Oracle.Error as e:
print("Erreur: %s" % (e))
raise cx_Oracle.Error('Echec de connexion')
|
def _connect(self):
"""
Connexion à la base XAIR
"""
try:
# On passe par Oracle Instant Client avec le TNS ORA_FULL
self.conn = cx_Oracle.connect(self._ORA_FULL)
self.cursor = self.conn.cursor()
print('XAIR: Connexion établie')
except cx_Oracle.Error as e:
print("Erreur: %s" % (e))
raise cx_Oracle.Error('Echec de connexion')
|
[
"Connexion",
"à",
"la",
"base",
"XAIR"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L132-L144
|
[
"def",
"_connect",
"(",
"self",
")",
":",
"try",
":",
"# On passe par Oracle Instant Client avec le TNS ORA_FULL",
"self",
".",
"conn",
"=",
"cx_Oracle",
".",
"connect",
"(",
"self",
".",
"_ORA_FULL",
")",
"self",
".",
"cursor",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"print",
"(",
"'XAIR: Connexion établie')",
"",
"except",
"cx_Oracle",
".",
"Error",
"as",
"e",
":",
"print",
"(",
"\"Erreur: %s\"",
"%",
"(",
"e",
")",
")",
"raise",
"cx_Oracle",
".",
"Error",
"(",
"'Echec de connexion'",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.liste_parametres
|
Liste des paramètres
Paramètres:
parametre: si fourni, retourne l'entrée pour ce parametre uniquement
|
pyair/xair.py
|
def liste_parametres(self, parametre=None):
"""
Liste des paramètres
Paramètres:
parametre: si fourni, retourne l'entrée pour ce parametre uniquement
"""
condition = ""
if parametre:
condition = "WHERE CCHIM='%s'" % parametre
_sql = """SELECT CCHIM AS PARAMETRE,
NCON AS LIBELLE,
NOPOL AS CODE
FROM NOM_MESURE %s ORDER BY CCHIM""" % condition
return psql.read_sql(_sql, self.conn)
|
def liste_parametres(self, parametre=None):
"""
Liste des paramètres
Paramètres:
parametre: si fourni, retourne l'entrée pour ce parametre uniquement
"""
condition = ""
if parametre:
condition = "WHERE CCHIM='%s'" % parametre
_sql = """SELECT CCHIM AS PARAMETRE,
NCON AS LIBELLE,
NOPOL AS CODE
FROM NOM_MESURE %s ORDER BY CCHIM""" % condition
return psql.read_sql(_sql, self.conn)
|
[
"Liste",
"des",
"paramètres"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L160-L175
|
[
"def",
"liste_parametres",
"(",
"self",
",",
"parametre",
"=",
"None",
")",
":",
"condition",
"=",
"\"\"",
"if",
"parametre",
":",
"condition",
"=",
"\"WHERE CCHIM='%s'\"",
"%",
"parametre",
"_sql",
"=",
"\"\"\"SELECT CCHIM AS PARAMETRE,\n NCON AS LIBELLE,\n NOPOL AS CODE\n FROM NOM_MESURE %s ORDER BY CCHIM\"\"\"",
"%",
"condition",
"return",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.liste_mesures
|
Décrit les mesures:
- d'un ou des reseaux,
- d'une ou des stations,
- d'un ou des parametres
ou décrit une (des) mesures suivant son (leur) identifiant(s)
Chaque attribut peut être étendu en rajoutant des noms séparés par des
virgules ou en les mettant dans une liste/tuple/pandas.Series.
Ainsi pour avoir la liste des mesures en vitesse et direction de vent:
parametre="VV,DV" ou = ["VV", "DV"]
Les arguments sont combinés ensemble pour la sélection des mesures.
Paramètres:
reseau : nom du reseau dans lequel lister les mesures
station: nom de la station où lister les mesures
parametre: Code chimique du parametre à lister
mesure: nom de la mesure à décrire
|
pyair/xair.py
|
def liste_mesures(self, reseau=None, station=None, parametre=None, mesure=None):
"""
Décrit les mesures:
- d'un ou des reseaux,
- d'une ou des stations,
- d'un ou des parametres
ou décrit une (des) mesures suivant son (leur) identifiant(s)
Chaque attribut peut être étendu en rajoutant des noms séparés par des
virgules ou en les mettant dans une liste/tuple/pandas.Series.
Ainsi pour avoir la liste des mesures en vitesse et direction de vent:
parametre="VV,DV" ou = ["VV", "DV"]
Les arguments sont combinés ensemble pour la sélection des mesures.
Paramètres:
reseau : nom du reseau dans lequel lister les mesures
station: nom de la station où lister les mesures
parametre: Code chimique du parametre à lister
mesure: nom de la mesure à décrire
"""
tbreseau = ""
conditions = []
if reseau:
reseau = _format(reseau)
tbreseau = """INNER JOIN RESEAUMES R USING (NOM_COURT_MES) """
conditions.append("""R.NOM_COURT_RES IN ('%s') """ % reseau)
if parametre:
parametre = _format(parametre)
conditions.append("""N.CCHIM IN ('%s')""" % parametre)
if station:
station = _format(station)
conditions.append("""S.IDENTIFIANT IN ('%s')""" % station)
if mesure:
mesure = _format(mesure)
conditions.append("""M.IDENTIFIANT IN ('%s')""" % mesure)
condition = "WHERE %s" % " and ".join(conditions) if conditions else ""
_sql = """SELECT
M.IDENTIFIANT AS MESURE,
M.NOM_MES AS LIBELLE,
M.UNITE AS UNITE,
S.IDENTIFIANT AS STATION,
N.CCHIM AS CODE_PARAM,
N.NCON AS PARAMETRE
FROM MESURE M
INNER JOIN NOM_MESURE N USING (NOPOL)
INNER JOIN STATION S USING (NOM_COURT_SIT)
%s
%s
ORDER BY M.IDENTIFIANT""" % (tbreseau, condition)
return psql.read_sql(_sql, self.conn)
|
def liste_mesures(self, reseau=None, station=None, parametre=None, mesure=None):
"""
Décrit les mesures:
- d'un ou des reseaux,
- d'une ou des stations,
- d'un ou des parametres
ou décrit une (des) mesures suivant son (leur) identifiant(s)
Chaque attribut peut être étendu en rajoutant des noms séparés par des
virgules ou en les mettant dans une liste/tuple/pandas.Series.
Ainsi pour avoir la liste des mesures en vitesse et direction de vent:
parametre="VV,DV" ou = ["VV", "DV"]
Les arguments sont combinés ensemble pour la sélection des mesures.
Paramètres:
reseau : nom du reseau dans lequel lister les mesures
station: nom de la station où lister les mesures
parametre: Code chimique du parametre à lister
mesure: nom de la mesure à décrire
"""
tbreseau = ""
conditions = []
if reseau:
reseau = _format(reseau)
tbreseau = """INNER JOIN RESEAUMES R USING (NOM_COURT_MES) """
conditions.append("""R.NOM_COURT_RES IN ('%s') """ % reseau)
if parametre:
parametre = _format(parametre)
conditions.append("""N.CCHIM IN ('%s')""" % parametre)
if station:
station = _format(station)
conditions.append("""S.IDENTIFIANT IN ('%s')""" % station)
if mesure:
mesure = _format(mesure)
conditions.append("""M.IDENTIFIANT IN ('%s')""" % mesure)
condition = "WHERE %s" % " and ".join(conditions) if conditions else ""
_sql = """SELECT
M.IDENTIFIANT AS MESURE,
M.NOM_MES AS LIBELLE,
M.UNITE AS UNITE,
S.IDENTIFIANT AS STATION,
N.CCHIM AS CODE_PARAM,
N.NCON AS PARAMETRE
FROM MESURE M
INNER JOIN NOM_MESURE N USING (NOPOL)
INNER JOIN STATION S USING (NOM_COURT_SIT)
%s
%s
ORDER BY M.IDENTIFIANT""" % (tbreseau, condition)
return psql.read_sql(_sql, self.conn)
|
[
"Décrit",
"les",
"mesures",
":",
"-",
"d",
"un",
"ou",
"des",
"reseaux",
"-",
"d",
"une",
"ou",
"des",
"stations",
"-",
"d",
"un",
"ou",
"des",
"parametres",
"ou",
"décrit",
"une",
"(",
"des",
")",
"mesures",
"suivant",
"son",
"(",
"leur",
")",
"identifiant",
"(",
"s",
")",
"Chaque",
"attribut",
"peut",
"être",
"étendu",
"en",
"rajoutant",
"des",
"noms",
"séparés",
"par",
"des",
"virgules",
"ou",
"en",
"les",
"mettant",
"dans",
"une",
"liste",
"/",
"tuple",
"/",
"pandas",
".",
"Series",
".",
"Ainsi",
"pour",
"avoir",
"la",
"liste",
"des",
"mesures",
"en",
"vitesse",
"et",
"direction",
"de",
"vent",
":",
"parametre",
"=",
"VV",
"DV",
"ou",
"=",
"[",
"VV",
"DV",
"]",
"Les",
"arguments",
"sont",
"combinés",
"ensemble",
"pour",
"la",
"sélection",
"des",
"mesures",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L177-L235
|
[
"def",
"liste_mesures",
"(",
"self",
",",
"reseau",
"=",
"None",
",",
"station",
"=",
"None",
",",
"parametre",
"=",
"None",
",",
"mesure",
"=",
"None",
")",
":",
"tbreseau",
"=",
"\"\"",
"conditions",
"=",
"[",
"]",
"if",
"reseau",
":",
"reseau",
"=",
"_format",
"(",
"reseau",
")",
"tbreseau",
"=",
"\"\"\"INNER JOIN RESEAUMES R USING (NOM_COURT_MES) \"\"\"",
"conditions",
".",
"append",
"(",
"\"\"\"R.NOM_COURT_RES IN ('%s') \"\"\"",
"%",
"reseau",
")",
"if",
"parametre",
":",
"parametre",
"=",
"_format",
"(",
"parametre",
")",
"conditions",
".",
"append",
"(",
"\"\"\"N.CCHIM IN ('%s')\"\"\"",
"%",
"parametre",
")",
"if",
"station",
":",
"station",
"=",
"_format",
"(",
"station",
")",
"conditions",
".",
"append",
"(",
"\"\"\"S.IDENTIFIANT IN ('%s')\"\"\"",
"%",
"station",
")",
"if",
"mesure",
":",
"mesure",
"=",
"_format",
"(",
"mesure",
")",
"conditions",
".",
"append",
"(",
"\"\"\"M.IDENTIFIANT IN ('%s')\"\"\"",
"%",
"mesure",
")",
"condition",
"=",
"\"WHERE %s\"",
"%",
"\" and \"",
".",
"join",
"(",
"conditions",
")",
"if",
"conditions",
"else",
"\"\"",
"_sql",
"=",
"\"\"\"SELECT\n M.IDENTIFIANT AS MESURE,\n M.NOM_MES AS LIBELLE,\n M.UNITE AS UNITE,\n S.IDENTIFIANT AS STATION,\n N.CCHIM AS CODE_PARAM,\n N.NCON AS PARAMETRE\n FROM MESURE M\n INNER JOIN NOM_MESURE N USING (NOPOL)\n INNER JOIN STATION S USING (NOM_COURT_SIT)\n %s\n %s\n ORDER BY M.IDENTIFIANT\"\"\"",
"%",
"(",
"tbreseau",
",",
"condition",
")",
"return",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.liste_stations
|
Liste des stations
Paramètres:
station : un nom de station valide (si vide, liste toutes les stations)
detail : si True, affiche plus de détail sur la (les) station(s).
|
pyair/xair.py
|
def liste_stations(self, station=None, detail=False):
"""
Liste des stations
Paramètres:
station : un nom de station valide (si vide, liste toutes les stations)
detail : si True, affiche plus de détail sur la (les) station(s).
"""
condition = ""
if station:
station = _format(station)
condition = "WHERE IDENTIFIANT IN ('%s')" % station
select = ""
if detail:
select = """,
ISIT AS DESCRIPTION,
NO_TELEPHONE AS TELEPHONE,
ADRESSE_IP,
LONGI AS LONGITUDE,
LATI AS LATITUDE,
ALTI AS ALTITUDE,
AXE AS ADR,
CODE_POSTAL AS CP,
FLAG_VALID AS VALID"""
_sql = """SELECT
NSIT AS NUMERO,
IDENTIFIANT AS STATION %s
FROM STATION
%s
ORDER BY NSIT""" % (select, condition)
return psql.read_sql(_sql, self.conn)
|
def liste_stations(self, station=None, detail=False):
"""
Liste des stations
Paramètres:
station : un nom de station valide (si vide, liste toutes les stations)
detail : si True, affiche plus de détail sur la (les) station(s).
"""
condition = ""
if station:
station = _format(station)
condition = "WHERE IDENTIFIANT IN ('%s')" % station
select = ""
if detail:
select = """,
ISIT AS DESCRIPTION,
NO_TELEPHONE AS TELEPHONE,
ADRESSE_IP,
LONGI AS LONGITUDE,
LATI AS LATITUDE,
ALTI AS ALTITUDE,
AXE AS ADR,
CODE_POSTAL AS CP,
FLAG_VALID AS VALID"""
_sql = """SELECT
NSIT AS NUMERO,
IDENTIFIANT AS STATION %s
FROM STATION
%s
ORDER BY NSIT""" % (select, condition)
return psql.read_sql(_sql, self.conn)
|
[
"Liste",
"des",
"stations"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L249-L282
|
[
"def",
"liste_stations",
"(",
"self",
",",
"station",
"=",
"None",
",",
"detail",
"=",
"False",
")",
":",
"condition",
"=",
"\"\"",
"if",
"station",
":",
"station",
"=",
"_format",
"(",
"station",
")",
"condition",
"=",
"\"WHERE IDENTIFIANT IN ('%s')\"",
"%",
"station",
"select",
"=",
"\"\"",
"if",
"detail",
":",
"select",
"=",
"\"\"\",\n ISIT AS DESCRIPTION,\n NO_TELEPHONE AS TELEPHONE,\n ADRESSE_IP,\n LONGI AS LONGITUDE,\n LATI AS LATITUDE,\n ALTI AS ALTITUDE,\n AXE AS ADR,\n CODE_POSTAL AS CP,\n FLAG_VALID AS VALID\"\"\"",
"_sql",
"=",
"\"\"\"SELECT\n NSIT AS NUMERO,\n IDENTIFIANT AS STATION %s\n FROM STATION\n %s\n ORDER BY NSIT\"\"\"",
"%",
"(",
"select",
",",
"condition",
")",
"return",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.liste_campagnes
|
Liste des campagnes de mesure et des stations associées
Paramètres:
campagne: Si définie, liste des stations que pour cette campagne
|
pyair/xair.py
|
def liste_campagnes(self, campagne=None):
"""
Liste des campagnes de mesure et des stations associées
Paramètres:
campagne: Si définie, liste des stations que pour cette campagne
"""
condition = ""
if campagne:
condition = "WHERE NOM_COURT_CM='%s' """ % campagne
_sql = """SELECT
NOM_COURT_CM AS CAMPAGNE,
IDENTIFIANT AS STATION,
LIBELLE AS LIBELLE_CM,
DATEDEB AS DEBUT,
DATEFIN AS FIN
FROM CAMPMES
INNER JOIN CAMPMES_STATION USING (NOM_COURT_CM)
INNER JOIN STATION USING (NOM_COURT_SIT)
%s ORDER BY DATEDEB DESC""" % condition
return psql.read_sql(_sql, self.conn)
|
def liste_campagnes(self, campagne=None):
"""
Liste des campagnes de mesure et des stations associées
Paramètres:
campagne: Si définie, liste des stations que pour cette campagne
"""
condition = ""
if campagne:
condition = "WHERE NOM_COURT_CM='%s' """ % campagne
_sql = """SELECT
NOM_COURT_CM AS CAMPAGNE,
IDENTIFIANT AS STATION,
LIBELLE AS LIBELLE_CM,
DATEDEB AS DEBUT,
DATEFIN AS FIN
FROM CAMPMES
INNER JOIN CAMPMES_STATION USING (NOM_COURT_CM)
INNER JOIN STATION USING (NOM_COURT_SIT)
%s ORDER BY DATEDEB DESC""" % condition
return psql.read_sql(_sql, self.conn)
|
[
"Liste",
"des",
"campagnes",
"de",
"mesure",
"et",
"des",
"stations",
"associées"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L293-L315
|
[
"def",
"liste_campagnes",
"(",
"self",
",",
"campagne",
"=",
"None",
")",
":",
"condition",
"=",
"\"\"",
"if",
"campagne",
":",
"condition",
"=",
"\"WHERE NOM_COURT_CM='%s' \"",
"\"\"",
"%",
"campagne",
"_sql",
"=",
"\"\"\"SELECT\n NOM_COURT_CM AS CAMPAGNE,\n IDENTIFIANT AS STATION,\n LIBELLE AS LIBELLE_CM,\n DATEDEB AS DEBUT,\n DATEFIN AS FIN\n FROM CAMPMES\n INNER JOIN CAMPMES_STATION USING (NOM_COURT_CM)\n INNER JOIN STATION USING (NOM_COURT_SIT)\n %s ORDER BY DATEDEB DESC\"\"\"",
"%",
"condition",
"return",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.get_mesures
|
Récupération des données de mesure.
Paramètres:
mes: Un nom de mesure ou plusieurs séparées par des virgules, une liste
(list, tuple, pandas.Series) de noms
debut: Chaine de caractère ou objet datetime décrivant la date de début.
Défaut=date du jour
fin: Chaine de caractère ou objet datetime décrivant la date de fin.
Défaut=date de début
freq: fréquence de temps. '15T' | 'H' | 'D' | 'M' | 'A' (15T pour quart-horaire)
format: chaine de caractère décrivant le format des dates (ex:"%Y-%m-%d"
pour debut/fin="2013-01-28"). Appeler pyair.date.strtime_help() pour
obtenir la liste des codes possibles.
Defaut="%Y-%m-%d"
dayfirst: Si aucun format n'est fourni et que les dates sont des chaines
de caractères, aide le décrypteur à transformer la date en objet datetime
en spécifiant que les dates commencent par le jour (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
brut: si oui ou non renvoyer le dataframe brut, non invalidé, et les
codes d'état des mesures
Defaut=False
Retourne:
Un dataframe contenant toutes les mesures demandées.
Si brut=True, renvoie le dataframe des mesures brutes non invalidées et
le dataframe des codes d'états.
Le dataframe valide (net) peut être alors recalculé en faisant:
brut, etats = xr.get_mesure(..., brut=True)
invalides = etats_to_invalid(etats)
net = brut.mask(invalides)
|
pyair/xair.py
|
def get_mesures(self, mes, debut=None, fin=None, freq='H', format=None,
dayfirst=False, brut=False):
"""
Récupération des données de mesure.
Paramètres:
mes: Un nom de mesure ou plusieurs séparées par des virgules, une liste
(list, tuple, pandas.Series) de noms
debut: Chaine de caractère ou objet datetime décrivant la date de début.
Défaut=date du jour
fin: Chaine de caractère ou objet datetime décrivant la date de fin.
Défaut=date de début
freq: fréquence de temps. '15T' | 'H' | 'D' | 'M' | 'A' (15T pour quart-horaire)
format: chaine de caractère décrivant le format des dates (ex:"%Y-%m-%d"
pour debut/fin="2013-01-28"). Appeler pyair.date.strtime_help() pour
obtenir la liste des codes possibles.
Defaut="%Y-%m-%d"
dayfirst: Si aucun format n'est fourni et que les dates sont des chaines
de caractères, aide le décrypteur à transformer la date en objet datetime
en spécifiant que les dates commencent par le jour (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
brut: si oui ou non renvoyer le dataframe brut, non invalidé, et les
codes d'état des mesures
Defaut=False
Retourne:
Un dataframe contenant toutes les mesures demandées.
Si brut=True, renvoie le dataframe des mesures brutes non invalidées et
le dataframe des codes d'états.
Le dataframe valide (net) peut être alors recalculé en faisant:
brut, etats = xr.get_mesure(..., brut=True)
invalides = etats_to_invalid(etats)
net = brut.mask(invalides)
"""
def create_index(index, freq):
"""
Nouvel index [id, date] avec date formaté suivant le pas de temps voulu
index: index de l'ancien dataframe, tel que [date à minuit, date à ajouter]
"""
decalage = 1 # sert à compenser l'aberration des temps qui veut qu'on marque sur la fin d'une période (ex: à 24h, la pollution de 23 à minuit)
if freq == 'T' or freq == '15T':
f = pd.tseries.offsets.Minute
decalage = 15
if freq == 'H':
f = pd.tseries.offsets.Hour
if freq == 'D':
f = pd.tseries.offsets.Day
if freq == 'M':
f = pd.tseries.offsets.MonthBegin
if freq == 'A':
f = pd.tseries.offsets.YearBegin
else:
f = pd.tseries.offsets.Hour
new_index = [date + f(int(delta) - decalage) for date, delta in index]
return new_index
# Reformatage du champ des noms de mesure
mes = _format(mes)
# Analyse des champs dates
debut = to_date(debut, dayfirst, format)
if not fin:
fin = debut
else:
fin = to_date(fin, dayfirst, format)
# La freq de temps Q n'existe pas, on passe d'abord par une fréquence 15 minutes
if freq in ('Q', 'T'):
freq = '15T'
# Sélection des champs et de la table en fonctions de la fréquence de temps souhaitée
if freq == '15T':
diviseur = 96
champ_val = ','.join(['Q_M%02i AS "%i"' % (x, x * 15) for x in range(1, diviseur + 1)])
champ_code = 'Q_ETATV'
table = 'JOURNALIER'
elif freq == 'H':
diviseur = 24
champ_val = ','.join(['H_M%02i AS "%i"' % (x, x) for x in range(1, diviseur + 1)])
champ_code = 'H_ETAT'
table = 'JOURNALIER'
elif freq == 'D':
diviseur = 1
champ_val = 'J_M01 AS "1"'
champ_code = 'J_ETAT'
table = 'JOURNALIER'
elif freq == 'M':
diviseur = 12
champ_val = ','.join(['M_M%02i AS "%i"' % (x, x) for x in range(1, diviseur + 1)])
champ_code = 'M_ETAT'
table = 'MOIS'
elif freq == 'A':
diviseur = 1
champ_val = 'A_M01 AS "1"'
champ_code = 'A_ETAT'
table = 'MOIS'
else:
raise ValueError("freq doit être T, H, D, M ou A")
if table == 'JOURNALIER':
champ_date = 'J_DATE'
debut_db = debut
fin_db = fin
else:
champ_date = 'M_DATE'
# Pour les freq='M' et 'A', la table contient toutes les valeurs sur une
# année entière. Pour ne pas perturber la récupération si on passait des
# dates en milieu d'année, on transforme les dates pour être calées en début
# et en fin d'année. Le recadrage se fera plus loin dans le code, lors du reindex
debut_db = debut.replace(month=1, day=1, hour=0, minute=0)
fin_db = fin.replace(month=12, day=31, hour=23, minute=0)
debut_db = debut_db.strftime("%Y-%m-%d")
fin_db = fin_db.strftime("%Y-%m-%d")
# Récupération des valeurs et codes d'états associés
_sql = """SELECT
IDENTIFIANT as "id",
{champ_date} as "date",
{champ_code} as "etat",
{champ_val}
FROM {table}
INNER JOIN MESURE USING (NOM_COURT_MES)
WHERE IDENTIFIANT IN ('{mes}')
AND {champ_date} BETWEEN TO_DATE('{debut}', 'YYYY-MM-DD') AND TO_DATE('{fin}', 'YYYY-MM-DD')
ORDER BY IDENTIFIANT, {champ_date} ASC""".format(champ_date=champ_date,
table=table,
champ_code=champ_code,
mes=mes,
champ_val=champ_val,
debut=debut_db,
fin=fin_db)
## TODO : A essayer quand la base sera en version 11g
# _sql = """SELECT *
# FROM ({selection})
# UNPIVOT (IDENTIFIANT FOR VAL IN ({champ_as}))""".format(selection=_sql,
# champ_date=champ_date,
# champ_as=champ_as)
# On recupere les valeurs depuis la freq dans une dataframe
rep = psql.read_sql(_sql, self.conn)
# On créait un multiindex pour manipuler plus facilement le dataframe
df = rep.set_index(['id', 'date'])
# Stack le dataframe pour mettre les colonnes en lignes, en supprimant la colonne des états
# puis on unstack suivant l'id pour avoir les polluants en colonnes
etats = df['etat']
df = df.drop('etat', axis=1)
df_stack = df.stack(dropna=False)
df = df_stack.unstack('id')
# Calcul d'un nouvel index avec les bonnes dates. L'index du df est
# formé du champ date à minuit, et des noms des champs de valeurs
# qui sont aliassés de 1 à 24 pour les heures, ... voir champ_val.
# On aggrève alors ces 2 valeurs pour avoir des dates alignées qu'on utilise alors comme index final
index = create_index(df.index, freq)
df.reset_index(inplace=True, drop=True)
df['date'] = index
df = df.set_index(['date'])
# Traitement des codes d'état
# On concatène les codes d'état pour chaque polluant
# etats = etats.sum(level=0)
# etats = pd.DataFrame(zip(*etats.apply(list)))
etats = etats.unstack('id')
etats.fillna(value=MISSING_CODE * diviseur, inplace=True)
etats = etats.sum(axis=0)
etats = pd.DataFrame(list(zip(*etats.apply(list))))
etats.index = df.index
etats.columns = df.columns
# Remplacement des valeurs aux dates manquantes par des NaN
dates_completes = date_range(debut, fin, freq)
df = df.reindex(dates_completes)
etats = etats.reindex(dates_completes)
# Invalidation par codes d'état
# Pour chaque code d'état, regarde si oui ou non il est invalidant en le remplacant par un booléen
invalid = etats_to_invalid(etats)
if not brut:
# dans le dataframe, masque toute valeur invalide par NaN
dfn = df.mask(invalid) # DataFrame net
return dfn
else:
return df, etats
|
def get_mesures(self, mes, debut=None, fin=None, freq='H', format=None,
dayfirst=False, brut=False):
"""
Récupération des données de mesure.
Paramètres:
mes: Un nom de mesure ou plusieurs séparées par des virgules, une liste
(list, tuple, pandas.Series) de noms
debut: Chaine de caractère ou objet datetime décrivant la date de début.
Défaut=date du jour
fin: Chaine de caractère ou objet datetime décrivant la date de fin.
Défaut=date de début
freq: fréquence de temps. '15T' | 'H' | 'D' | 'M' | 'A' (15T pour quart-horaire)
format: chaine de caractère décrivant le format des dates (ex:"%Y-%m-%d"
pour debut/fin="2013-01-28"). Appeler pyair.date.strtime_help() pour
obtenir la liste des codes possibles.
Defaut="%Y-%m-%d"
dayfirst: Si aucun format n'est fourni et que les dates sont des chaines
de caractères, aide le décrypteur à transformer la date en objet datetime
en spécifiant que les dates commencent par le jour (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
brut: si oui ou non renvoyer le dataframe brut, non invalidé, et les
codes d'état des mesures
Defaut=False
Retourne:
Un dataframe contenant toutes les mesures demandées.
Si brut=True, renvoie le dataframe des mesures brutes non invalidées et
le dataframe des codes d'états.
Le dataframe valide (net) peut être alors recalculé en faisant:
brut, etats = xr.get_mesure(..., brut=True)
invalides = etats_to_invalid(etats)
net = brut.mask(invalides)
"""
def create_index(index, freq):
"""
Nouvel index [id, date] avec date formaté suivant le pas de temps voulu
index: index de l'ancien dataframe, tel que [date à minuit, date à ajouter]
"""
decalage = 1 # sert à compenser l'aberration des temps qui veut qu'on marque sur la fin d'une période (ex: à 24h, la pollution de 23 à minuit)
if freq == 'T' or freq == '15T':
f = pd.tseries.offsets.Minute
decalage = 15
if freq == 'H':
f = pd.tseries.offsets.Hour
if freq == 'D':
f = pd.tseries.offsets.Day
if freq == 'M':
f = pd.tseries.offsets.MonthBegin
if freq == 'A':
f = pd.tseries.offsets.YearBegin
else:
f = pd.tseries.offsets.Hour
new_index = [date + f(int(delta) - decalage) for date, delta in index]
return new_index
# Reformatage du champ des noms de mesure
mes = _format(mes)
# Analyse des champs dates
debut = to_date(debut, dayfirst, format)
if not fin:
fin = debut
else:
fin = to_date(fin, dayfirst, format)
# La freq de temps Q n'existe pas, on passe d'abord par une fréquence 15 minutes
if freq in ('Q', 'T'):
freq = '15T'
# Sélection des champs et de la table en fonctions de la fréquence de temps souhaitée
if freq == '15T':
diviseur = 96
champ_val = ','.join(['Q_M%02i AS "%i"' % (x, x * 15) for x in range(1, diviseur + 1)])
champ_code = 'Q_ETATV'
table = 'JOURNALIER'
elif freq == 'H':
diviseur = 24
champ_val = ','.join(['H_M%02i AS "%i"' % (x, x) for x in range(1, diviseur + 1)])
champ_code = 'H_ETAT'
table = 'JOURNALIER'
elif freq == 'D':
diviseur = 1
champ_val = 'J_M01 AS "1"'
champ_code = 'J_ETAT'
table = 'JOURNALIER'
elif freq == 'M':
diviseur = 12
champ_val = ','.join(['M_M%02i AS "%i"' % (x, x) for x in range(1, diviseur + 1)])
champ_code = 'M_ETAT'
table = 'MOIS'
elif freq == 'A':
diviseur = 1
champ_val = 'A_M01 AS "1"'
champ_code = 'A_ETAT'
table = 'MOIS'
else:
raise ValueError("freq doit être T, H, D, M ou A")
if table == 'JOURNALIER':
champ_date = 'J_DATE'
debut_db = debut
fin_db = fin
else:
champ_date = 'M_DATE'
# Pour les freq='M' et 'A', la table contient toutes les valeurs sur une
# année entière. Pour ne pas perturber la récupération si on passait des
# dates en milieu d'année, on transforme les dates pour être calées en début
# et en fin d'année. Le recadrage se fera plus loin dans le code, lors du reindex
debut_db = debut.replace(month=1, day=1, hour=0, minute=0)
fin_db = fin.replace(month=12, day=31, hour=23, minute=0)
debut_db = debut_db.strftime("%Y-%m-%d")
fin_db = fin_db.strftime("%Y-%m-%d")
# Récupération des valeurs et codes d'états associés
_sql = """SELECT
IDENTIFIANT as "id",
{champ_date} as "date",
{champ_code} as "etat",
{champ_val}
FROM {table}
INNER JOIN MESURE USING (NOM_COURT_MES)
WHERE IDENTIFIANT IN ('{mes}')
AND {champ_date} BETWEEN TO_DATE('{debut}', 'YYYY-MM-DD') AND TO_DATE('{fin}', 'YYYY-MM-DD')
ORDER BY IDENTIFIANT, {champ_date} ASC""".format(champ_date=champ_date,
table=table,
champ_code=champ_code,
mes=mes,
champ_val=champ_val,
debut=debut_db,
fin=fin_db)
## TODO : A essayer quand la base sera en version 11g
# _sql = """SELECT *
# FROM ({selection})
# UNPIVOT (IDENTIFIANT FOR VAL IN ({champ_as}))""".format(selection=_sql,
# champ_date=champ_date,
# champ_as=champ_as)
# On recupere les valeurs depuis la freq dans une dataframe
rep = psql.read_sql(_sql, self.conn)
# On créait un multiindex pour manipuler plus facilement le dataframe
df = rep.set_index(['id', 'date'])
# Stack le dataframe pour mettre les colonnes en lignes, en supprimant la colonne des états
# puis on unstack suivant l'id pour avoir les polluants en colonnes
etats = df['etat']
df = df.drop('etat', axis=1)
df_stack = df.stack(dropna=False)
df = df_stack.unstack('id')
# Calcul d'un nouvel index avec les bonnes dates. L'index du df est
# formé du champ date à minuit, et des noms des champs de valeurs
# qui sont aliassés de 1 à 24 pour les heures, ... voir champ_val.
# On aggrève alors ces 2 valeurs pour avoir des dates alignées qu'on utilise alors comme index final
index = create_index(df.index, freq)
df.reset_index(inplace=True, drop=True)
df['date'] = index
df = df.set_index(['date'])
# Traitement des codes d'état
# On concatène les codes d'état pour chaque polluant
# etats = etats.sum(level=0)
# etats = pd.DataFrame(zip(*etats.apply(list)))
etats = etats.unstack('id')
etats.fillna(value=MISSING_CODE * diviseur, inplace=True)
etats = etats.sum(axis=0)
etats = pd.DataFrame(list(zip(*etats.apply(list))))
etats.index = df.index
etats.columns = df.columns
# Remplacement des valeurs aux dates manquantes par des NaN
dates_completes = date_range(debut, fin, freq)
df = df.reindex(dates_completes)
etats = etats.reindex(dates_completes)
# Invalidation par codes d'état
# Pour chaque code d'état, regarde si oui ou non il est invalidant en le remplacant par un booléen
invalid = etats_to_invalid(etats)
if not brut:
# dans le dataframe, masque toute valeur invalide par NaN
dfn = df.mask(invalid) # DataFrame net
return dfn
else:
return df, etats
|
[
"Récupération",
"des",
"données",
"de",
"mesure",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L328-L517
|
[
"def",
"get_mesures",
"(",
"self",
",",
"mes",
",",
"debut",
"=",
"None",
",",
"fin",
"=",
"None",
",",
"freq",
"=",
"'H'",
",",
"format",
"=",
"None",
",",
"dayfirst",
"=",
"False",
",",
"brut",
"=",
"False",
")",
":",
"def",
"create_index",
"(",
"index",
",",
"freq",
")",
":",
"\"\"\"\n Nouvel index [id, date] avec date formaté suivant le pas de temps voulu\n index: index de l'ancien dataframe, tel que [date à minuit, date à ajouter]\n\n \"\"\"",
"decalage",
"=",
"1",
"# sert à compenser l'aberration des temps qui veut qu'on marque sur la fin d'une période (ex: à 24h, la pollution de 23 à minuit)",
"if",
"freq",
"==",
"'T'",
"or",
"freq",
"==",
"'15T'",
":",
"f",
"=",
"pd",
".",
"tseries",
".",
"offsets",
".",
"Minute",
"decalage",
"=",
"15",
"if",
"freq",
"==",
"'H'",
":",
"f",
"=",
"pd",
".",
"tseries",
".",
"offsets",
".",
"Hour",
"if",
"freq",
"==",
"'D'",
":",
"f",
"=",
"pd",
".",
"tseries",
".",
"offsets",
".",
"Day",
"if",
"freq",
"==",
"'M'",
":",
"f",
"=",
"pd",
".",
"tseries",
".",
"offsets",
".",
"MonthBegin",
"if",
"freq",
"==",
"'A'",
":",
"f",
"=",
"pd",
".",
"tseries",
".",
"offsets",
".",
"YearBegin",
"else",
":",
"f",
"=",
"pd",
".",
"tseries",
".",
"offsets",
".",
"Hour",
"new_index",
"=",
"[",
"date",
"+",
"f",
"(",
"int",
"(",
"delta",
")",
"-",
"decalage",
")",
"for",
"date",
",",
"delta",
"in",
"index",
"]",
"return",
"new_index",
"# Reformatage du champ des noms de mesure",
"mes",
"=",
"_format",
"(",
"mes",
")",
"# Analyse des champs dates",
"debut",
"=",
"to_date",
"(",
"debut",
",",
"dayfirst",
",",
"format",
")",
"if",
"not",
"fin",
":",
"fin",
"=",
"debut",
"else",
":",
"fin",
"=",
"to_date",
"(",
"fin",
",",
"dayfirst",
",",
"format",
")",
"# La freq de temps Q n'existe pas, on passe d'abord par une fréquence 15 minutes",
"if",
"freq",
"in",
"(",
"'Q'",
",",
"'T'",
")",
":",
"freq",
"=",
"'15T'",
"# Sélection des champs et de la table en fonctions de la fréquence de temps souhaitée",
"if",
"freq",
"==",
"'15T'",
":",
"diviseur",
"=",
"96",
"champ_val",
"=",
"','",
".",
"join",
"(",
"[",
"'Q_M%02i AS \"%i\"'",
"%",
"(",
"x",
",",
"x",
"*",
"15",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"diviseur",
"+",
"1",
")",
"]",
")",
"champ_code",
"=",
"'Q_ETATV'",
"table",
"=",
"'JOURNALIER'",
"elif",
"freq",
"==",
"'H'",
":",
"diviseur",
"=",
"24",
"champ_val",
"=",
"','",
".",
"join",
"(",
"[",
"'H_M%02i AS \"%i\"'",
"%",
"(",
"x",
",",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"diviseur",
"+",
"1",
")",
"]",
")",
"champ_code",
"=",
"'H_ETAT'",
"table",
"=",
"'JOURNALIER'",
"elif",
"freq",
"==",
"'D'",
":",
"diviseur",
"=",
"1",
"champ_val",
"=",
"'J_M01 AS \"1\"'",
"champ_code",
"=",
"'J_ETAT'",
"table",
"=",
"'JOURNALIER'",
"elif",
"freq",
"==",
"'M'",
":",
"diviseur",
"=",
"12",
"champ_val",
"=",
"','",
".",
"join",
"(",
"[",
"'M_M%02i AS \"%i\"'",
"%",
"(",
"x",
",",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"diviseur",
"+",
"1",
")",
"]",
")",
"champ_code",
"=",
"'M_ETAT'",
"table",
"=",
"'MOIS'",
"elif",
"freq",
"==",
"'A'",
":",
"diviseur",
"=",
"1",
"champ_val",
"=",
"'A_M01 AS \"1\"'",
"champ_code",
"=",
"'A_ETAT'",
"table",
"=",
"'MOIS'",
"else",
":",
"raise",
"ValueError",
"(",
"\"freq doit être T, H, D, M ou A\")",
"",
"if",
"table",
"==",
"'JOURNALIER'",
":",
"champ_date",
"=",
"'J_DATE'",
"debut_db",
"=",
"debut",
"fin_db",
"=",
"fin",
"else",
":",
"champ_date",
"=",
"'M_DATE'",
"# Pour les freq='M' et 'A', la table contient toutes les valeurs sur une",
"# année entière. Pour ne pas perturber la récupération si on passait des",
"# dates en milieu d'année, on transforme les dates pour être calées en début",
"# et en fin d'année. Le recadrage se fera plus loin dans le code, lors du reindex",
"debut_db",
"=",
"debut",
".",
"replace",
"(",
"month",
"=",
"1",
",",
"day",
"=",
"1",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
")",
"fin_db",
"=",
"fin",
".",
"replace",
"(",
"month",
"=",
"12",
",",
"day",
"=",
"31",
",",
"hour",
"=",
"23",
",",
"minute",
"=",
"0",
")",
"debut_db",
"=",
"debut_db",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"fin_db",
"=",
"fin_db",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"# Récupération des valeurs et codes d'états associés",
"_sql",
"=",
"\"\"\"SELECT\n IDENTIFIANT as \"id\",\n {champ_date} as \"date\",\n {champ_code} as \"etat\",\n {champ_val}\n FROM {table}\n INNER JOIN MESURE USING (NOM_COURT_MES)\n WHERE IDENTIFIANT IN ('{mes}')\n AND {champ_date} BETWEEN TO_DATE('{debut}', 'YYYY-MM-DD') AND TO_DATE('{fin}', 'YYYY-MM-DD')\n ORDER BY IDENTIFIANT, {champ_date} ASC\"\"\"",
".",
"format",
"(",
"champ_date",
"=",
"champ_date",
",",
"table",
"=",
"table",
",",
"champ_code",
"=",
"champ_code",
",",
"mes",
"=",
"mes",
",",
"champ_val",
"=",
"champ_val",
",",
"debut",
"=",
"debut_db",
",",
"fin",
"=",
"fin_db",
")",
"## TODO : A essayer quand la base sera en version 11g",
"# _sql = \"\"\"SELECT *",
"# FROM ({selection})",
"# UNPIVOT (IDENTIFIANT FOR VAL IN ({champ_as}))\"\"\".format(selection=_sql,",
"# champ_date=champ_date,",
"# champ_as=champ_as)",
"# On recupere les valeurs depuis la freq dans une dataframe",
"rep",
"=",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")",
"# On créait un multiindex pour manipuler plus facilement le dataframe",
"df",
"=",
"rep",
".",
"set_index",
"(",
"[",
"'id'",
",",
"'date'",
"]",
")",
"# Stack le dataframe pour mettre les colonnes en lignes, en supprimant la colonne des états",
"# puis on unstack suivant l'id pour avoir les polluants en colonnes",
"etats",
"=",
"df",
"[",
"'etat'",
"]",
"df",
"=",
"df",
".",
"drop",
"(",
"'etat'",
",",
"axis",
"=",
"1",
")",
"df_stack",
"=",
"df",
".",
"stack",
"(",
"dropna",
"=",
"False",
")",
"df",
"=",
"df_stack",
".",
"unstack",
"(",
"'id'",
")",
"# Calcul d'un nouvel index avec les bonnes dates. L'index du df est",
"# formé du champ date à minuit, et des noms des champs de valeurs",
"# qui sont aliassés de 1 à 24 pour les heures, ... voir champ_val.",
"# On aggrève alors ces 2 valeurs pour avoir des dates alignées qu'on utilise alors comme index final",
"index",
"=",
"create_index",
"(",
"df",
".",
"index",
",",
"freq",
")",
"df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"df",
"[",
"'date'",
"]",
"=",
"index",
"df",
"=",
"df",
".",
"set_index",
"(",
"[",
"'date'",
"]",
")",
"# Traitement des codes d'état",
"# On concatène les codes d'état pour chaque polluant",
"# etats = etats.sum(level=0)",
"# etats = pd.DataFrame(zip(*etats.apply(list)))",
"etats",
"=",
"etats",
".",
"unstack",
"(",
"'id'",
")",
"etats",
".",
"fillna",
"(",
"value",
"=",
"MISSING_CODE",
"*",
"diviseur",
",",
"inplace",
"=",
"True",
")",
"etats",
"=",
"etats",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"etats",
"=",
"pd",
".",
"DataFrame",
"(",
"list",
"(",
"zip",
"(",
"*",
"etats",
".",
"apply",
"(",
"list",
")",
")",
")",
")",
"etats",
".",
"index",
"=",
"df",
".",
"index",
"etats",
".",
"columns",
"=",
"df",
".",
"columns",
"# Remplacement des valeurs aux dates manquantes par des NaN",
"dates_completes",
"=",
"date_range",
"(",
"debut",
",",
"fin",
",",
"freq",
")",
"df",
"=",
"df",
".",
"reindex",
"(",
"dates_completes",
")",
"etats",
"=",
"etats",
".",
"reindex",
"(",
"dates_completes",
")",
"# Invalidation par codes d'état",
"# Pour chaque code d'état, regarde si oui ou non il est invalidant en le remplacant par un booléen",
"invalid",
"=",
"etats_to_invalid",
"(",
"etats",
")",
"if",
"not",
"brut",
":",
"# dans le dataframe, masque toute valeur invalide par NaN",
"dfn",
"=",
"df",
".",
"mask",
"(",
"invalid",
")",
"# DataFrame net",
"return",
"dfn",
"else",
":",
"return",
"df",
",",
"etats"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.get_manuelles
|
Recupération des mesures manuelles (labo) pour un site
site: numéro du site (voir fonction liste_sites_prelevement)
code_parametre: code ISO du paramètre à rechercher (C6H6=V4)
debut: date de début du premier prélèvement
fin: date de fin du dernier prélèvement
court: Renvoie un tableau au format court ou long (colonnes)
|
pyair/xair.py
|
def get_manuelles(self, site, code_parametre, debut, fin, court=False):
"""
Recupération des mesures manuelles (labo) pour un site
site: numéro du site (voir fonction liste_sites_prelevement)
code_parametre: code ISO du paramètre à rechercher (C6H6=V4)
debut: date de début du premier prélèvement
fin: date de fin du dernier prélèvement
court: Renvoie un tableau au format court ou long (colonnes)
"""
condition = "WHERE MESLA.NOPOL='%s' " % code_parametre
condition += "AND SITMETH.NSIT=%s " % site
condition += "AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') " % debut
condition += "AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') " % fin
if court == False:
select = """SELECT
MESLA.LIBELLE AS MESURE,
METH.LIBELLE AS METHODE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
ANA.DATE_ANA AS DATE_ANALYSE,
ANA.ID_LABO AS LABO,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
ANA.COMMENTAIRE AS COMMENTAIRE,
SITE.LIBELLE AS SITE,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
else:
select = """SELECT
MESLA.LIBELLE AS MESURE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
_sql = """%s
FROM ANALYSE ANA
INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP)
INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP)
INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP)
INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P)
INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT)
INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE)
%s
ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB""" % (select, condition)
return psql.read_sql(_sql, self.conn)
|
def get_manuelles(self, site, code_parametre, debut, fin, court=False):
"""
Recupération des mesures manuelles (labo) pour un site
site: numéro du site (voir fonction liste_sites_prelevement)
code_parametre: code ISO du paramètre à rechercher (C6H6=V4)
debut: date de début du premier prélèvement
fin: date de fin du dernier prélèvement
court: Renvoie un tableau au format court ou long (colonnes)
"""
condition = "WHERE MESLA.NOPOL='%s' " % code_parametre
condition += "AND SITMETH.NSIT=%s " % site
condition += "AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') " % debut
condition += "AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') " % fin
if court == False:
select = """SELECT
MESLA.LIBELLE AS MESURE,
METH.LIBELLE AS METHODE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
ANA.DATE_ANA AS DATE_ANALYSE,
ANA.ID_LABO AS LABO,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
ANA.COMMENTAIRE AS COMMENTAIRE,
SITE.LIBELLE AS SITE,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
else:
select = """SELECT
MESLA.LIBELLE AS MESURE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
_sql = """%s
FROM ANALYSE ANA
INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP)
INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP)
INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP)
INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P)
INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT)
INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE)
%s
ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB""" % (select, condition)
return psql.read_sql(_sql, self.conn)
|
[
"Recupération",
"des",
"mesures",
"manuelles",
"(",
"labo",
")",
"pour",
"un",
"site"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L519-L570
|
[
"def",
"get_manuelles",
"(",
"self",
",",
"site",
",",
"code_parametre",
",",
"debut",
",",
"fin",
",",
"court",
"=",
"False",
")",
":",
"condition",
"=",
"\"WHERE MESLA.NOPOL='%s' \"",
"%",
"code_parametre",
"condition",
"+=",
"\"AND SITMETH.NSIT=%s \"",
"%",
"site",
"condition",
"+=",
"\"AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') \"",
"%",
"debut",
"condition",
"+=",
"\"AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') \"",
"%",
"fin",
"if",
"court",
"==",
"False",
":",
"select",
"=",
"\"\"\"SELECT\n MESLA.LIBELLE AS MESURE,\n METH.LIBELLE AS METHODE,\n ANA.VALEUR AS VALEUR,\n MESLA.UNITE AS UNITE,\n ANA.CODE_QUALITE AS CODE_QUALITE,\n ANA.DATE_ANA AS DATE_ANALYSE,\n ANA.ID_LABO AS LABO,\n PRELEV.DATE_DEB AS DEBUT,\n PRELEV.DATE_FIN AS FIN,\n ANA.COMMENTAIRE AS COMMENTAIRE,\n SITE.LIBELLE AS SITE,\n SITE.AXE AS ADRESSE,\n COM.NOM_COMMUNE AS COMMUNE\"\"\"",
"else",
":",
"select",
"=",
"\"\"\"SELECT\n MESLA.LIBELLE AS MESURE,\n ANA.VALEUR AS VALEUR,\n MESLA.UNITE AS UNITE,\n ANA.CODE_QUALITE AS CODE_QUALITE,\n PRELEV.DATE_DEB AS DEBUT,\n PRELEV.DATE_FIN AS FIN,\n SITE.AXE AS ADRESSE,\n COM.NOM_COMMUNE AS COMMUNE\"\"\"",
"_sql",
"=",
"\"\"\"%s\n FROM ANALYSE ANA\n INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP)\n INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP)\n INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP)\n INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P)\n INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT)\n INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE)\n %s\n ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB\"\"\"",
"%",
"(",
"select",
",",
"condition",
")",
"return",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.get_indices
|
Récupération des indices ATMO pour un réseau donné.
Paramètres:
res : Nom du ou des réseaux à chercher (str, list, pandas.Series)
debut: date de début, format YYYY-MM-JJ (str)
fin: Date de fin, format YYYY-MM-JJ (str)
|
pyair/xair.py
|
def get_indices(self, res, debut, fin):
"""
Récupération des indices ATMO pour un réseau donné.
Paramètres:
res : Nom du ou des réseaux à chercher (str, list, pandas.Series)
debut: date de début, format YYYY-MM-JJ (str)
fin: Date de fin, format YYYY-MM-JJ (str)
"""
res = _format(res)
_sql = """SELECT
J_DATE AS "date",
NOM_AGGLO AS "reseau",
C_IND_CALCULE AS "indice"
FROM RESULTAT_INDICE
INNER JOIN GROUPE_ATMO USING (NOM_COURT_GRP)
WHERE NOM_AGGLO IN ('%s')
AND J_DATE BETWEEN TO_DATE('%s', 'YYYY-MM-DD') AND TO_DATE('%s', 'YYYY-MM-DD') """ % (res, debut, fin)
rep = psql.read_sql(_sql, self.conn)
df = rep.set_index(['reseau', 'date'])
df = df['indice']
df = df.unstack('reseau')
dates_completes = date_range(to_date(debut), to_date(fin), freq='D')
df = df.reindex(dates_completes)
return df
|
def get_indices(self, res, debut, fin):
"""
Récupération des indices ATMO pour un réseau donné.
Paramètres:
res : Nom du ou des réseaux à chercher (str, list, pandas.Series)
debut: date de début, format YYYY-MM-JJ (str)
fin: Date de fin, format YYYY-MM-JJ (str)
"""
res = _format(res)
_sql = """SELECT
J_DATE AS "date",
NOM_AGGLO AS "reseau",
C_IND_CALCULE AS "indice"
FROM RESULTAT_INDICE
INNER JOIN GROUPE_ATMO USING (NOM_COURT_GRP)
WHERE NOM_AGGLO IN ('%s')
AND J_DATE BETWEEN TO_DATE('%s', 'YYYY-MM-DD') AND TO_DATE('%s', 'YYYY-MM-DD') """ % (res, debut, fin)
rep = psql.read_sql(_sql, self.conn)
df = rep.set_index(['reseau', 'date'])
df = df['indice']
df = df.unstack('reseau')
dates_completes = date_range(to_date(debut), to_date(fin), freq='D')
df = df.reindex(dates_completes)
return df
|
[
"Récupération",
"des",
"indices",
"ATMO",
"pour",
"un",
"réseau",
"donné",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L572-L598
|
[
"def",
"get_indices",
"(",
"self",
",",
"res",
",",
"debut",
",",
"fin",
")",
":",
"res",
"=",
"_format",
"(",
"res",
")",
"_sql",
"=",
"\"\"\"SELECT\n J_DATE AS \"date\",\n NOM_AGGLO AS \"reseau\",\n C_IND_CALCULE AS \"indice\"\n FROM RESULTAT_INDICE\n INNER JOIN GROUPE_ATMO USING (NOM_COURT_GRP)\n WHERE NOM_AGGLO IN ('%s')\n AND J_DATE BETWEEN TO_DATE('%s', 'YYYY-MM-DD') AND TO_DATE('%s', 'YYYY-MM-DD') \"\"\"",
"%",
"(",
"res",
",",
"debut",
",",
"fin",
")",
"rep",
"=",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")",
"df",
"=",
"rep",
".",
"set_index",
"(",
"[",
"'reseau'",
",",
"'date'",
"]",
")",
"df",
"=",
"df",
"[",
"'indice'",
"]",
"df",
"=",
"df",
".",
"unstack",
"(",
"'reseau'",
")",
"dates_completes",
"=",
"date_range",
"(",
"to_date",
"(",
"debut",
")",
",",
"to_date",
"(",
"fin",
")",
",",
"freq",
"=",
"'D'",
")",
"df",
"=",
"df",
".",
"reindex",
"(",
"dates_completes",
")",
"return",
"df"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.get_indices_et_ssi
|
Renvoie l'indice et les sous_indices
complet: renvoyer les complets ou les prévus
reseau: nom du réseau à renvoyer
debut: date de début à renvoyer
fin: date de fin à renvoyer
Renvoi : reseau, date, Indice, sous_ind NO2,PM10,O3,SO2
|
pyair/xair.py
|
def get_indices_et_ssi(self, reseau, debut, fin, complet=True):
"""Renvoie l'indice et les sous_indices
complet: renvoyer les complets ou les prévus
reseau: nom du réseau à renvoyer
debut: date de début à renvoyer
fin: date de fin à renvoyer
Renvoi : reseau, date, Indice, sous_ind NO2,PM10,O3,SO2
"""
if complet:
i_str = "c_ind_diffuse"
ssi_str = "c_ss_indice"
else:
i_str = "p_ind_diffuse"
ssi_str = "p_ss_indice"
_sql = """SELECT
g.nom_agglo as "reseau",
i.j_date as "date",
max(case when i.{0}>0 then i.{0} else 0 end) indice,
max(case when n.cchim='NO2' then ssi.{1} else 0 end) no2,
max(case when n.cchim='PM10' then ssi.{1} else 0 end) pm10,
max(case when n.cchim='O3' then ssi.{1} else 0 end) o3,
max(case when n.cchim='SO2' then ssi.{1} else 0 end) so2
FROM resultat_indice i
INNER JOIN resultat_ss_indice ssi ON (i.nom_court_grp=ssi.nom_court_grp AND i.j_date=ssi.j_date)
INNER JOIN groupe_atmo g ON (i.nom_court_grp=g.nom_court_grp)
INNER JOIN nom_mesure n ON (ssi.nopol=n.nopol)
WHERE g.nom_agglo='{2}'
AND i.j_date BETWEEN
TO_DATE('{3}', 'YYYY-MM-DD') AND
TO_DATE('{4}', 'YYYY-MM-DD')
GROUP BY
g.nom_agglo,
i.j_date
ORDER BY i.j_date""".format(i_str, ssi_str, reseau, debut, fin)
df = psql.read_sql(_sql, self.conn)
df = df.set_index(['reseau', 'date'])
return df
|
def get_indices_et_ssi(self, reseau, debut, fin, complet=True):
"""Renvoie l'indice et les sous_indices
complet: renvoyer les complets ou les prévus
reseau: nom du réseau à renvoyer
debut: date de début à renvoyer
fin: date de fin à renvoyer
Renvoi : reseau, date, Indice, sous_ind NO2,PM10,O3,SO2
"""
if complet:
i_str = "c_ind_diffuse"
ssi_str = "c_ss_indice"
else:
i_str = "p_ind_diffuse"
ssi_str = "p_ss_indice"
_sql = """SELECT
g.nom_agglo as "reseau",
i.j_date as "date",
max(case when i.{0}>0 then i.{0} else 0 end) indice,
max(case when n.cchim='NO2' then ssi.{1} else 0 end) no2,
max(case when n.cchim='PM10' then ssi.{1} else 0 end) pm10,
max(case when n.cchim='O3' then ssi.{1} else 0 end) o3,
max(case when n.cchim='SO2' then ssi.{1} else 0 end) so2
FROM resultat_indice i
INNER JOIN resultat_ss_indice ssi ON (i.nom_court_grp=ssi.nom_court_grp AND i.j_date=ssi.j_date)
INNER JOIN groupe_atmo g ON (i.nom_court_grp=g.nom_court_grp)
INNER JOIN nom_mesure n ON (ssi.nopol=n.nopol)
WHERE g.nom_agglo='{2}'
AND i.j_date BETWEEN
TO_DATE('{3}', 'YYYY-MM-DD') AND
TO_DATE('{4}', 'YYYY-MM-DD')
GROUP BY
g.nom_agglo,
i.j_date
ORDER BY i.j_date""".format(i_str, ssi_str, reseau, debut, fin)
df = psql.read_sql(_sql, self.conn)
df = df.set_index(['reseau', 'date'])
return df
|
[
"Renvoie",
"l",
"indice",
"et",
"les",
"sous_indices",
"complet",
":",
"renvoyer",
"les",
"complets",
"ou",
"les",
"prévus",
"reseau",
":",
"nom",
"du",
"réseau",
"à",
"renvoyer",
"debut",
":",
"date",
"de",
"début",
"à",
"renvoyer",
"fin",
":",
"date",
"de",
"fin",
"à",
"renvoyer"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L600-L638
|
[
"def",
"get_indices_et_ssi",
"(",
"self",
",",
"reseau",
",",
"debut",
",",
"fin",
",",
"complet",
"=",
"True",
")",
":",
"if",
"complet",
":",
"i_str",
"=",
"\"c_ind_diffuse\"",
"ssi_str",
"=",
"\"c_ss_indice\"",
"else",
":",
"i_str",
"=",
"\"p_ind_diffuse\"",
"ssi_str",
"=",
"\"p_ss_indice\"",
"_sql",
"=",
"\"\"\"SELECT\n g.nom_agglo as \"reseau\",\n i.j_date as \"date\",\n max(case when i.{0}>0 then i.{0} else 0 end) indice,\n max(case when n.cchim='NO2' then ssi.{1} else 0 end) no2,\n max(case when n.cchim='PM10' then ssi.{1} else 0 end) pm10,\n max(case when n.cchim='O3' then ssi.{1} else 0 end) o3,\n max(case when n.cchim='SO2' then ssi.{1} else 0 end) so2\n FROM resultat_indice i\n INNER JOIN resultat_ss_indice ssi ON (i.nom_court_grp=ssi.nom_court_grp AND i.j_date=ssi.j_date)\n INNER JOIN groupe_atmo g ON (i.nom_court_grp=g.nom_court_grp)\n INNER JOIN nom_mesure n ON (ssi.nopol=n.nopol)\n WHERE g.nom_agglo='{2}'\n AND i.j_date BETWEEN\n TO_DATE('{3}', 'YYYY-MM-DD') AND\n TO_DATE('{4}', 'YYYY-MM-DD')\n GROUP BY\n g.nom_agglo,\n i.j_date\n ORDER BY i.j_date\"\"\"",
".",
"format",
"(",
"i_str",
",",
"ssi_str",
",",
"reseau",
",",
"debut",
",",
"fin",
")",
"df",
"=",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")",
"df",
"=",
"df",
".",
"set_index",
"(",
"[",
"'reseau'",
",",
"'date'",
"]",
")",
"return",
"df"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
XAIR.get_sqltext
|
retourne les requêtes actuellement lancées sur le serveur
|
pyair/xair.py
|
def get_sqltext(self, format_=1):
"""retourne les requêtes actuellement lancées sur le serveur"""
if format_ == 1:
_sql = """SELECT u.sid, substr(u.username,1,12) user_name, s.sql_text
FROM v$sql s,v$session u
WHERE s.hash_value = u.sql_hash_value
AND sql_text NOT LIKE '%from v$sql s, v$session u%'
AND u.username NOT LIKE 'None'
ORDER BY u.sid"""
if format_ == 2:
_sql = """SELECT u.username, s.first_load_time, s.executions, s.sql_text
FROM dba_users u,v$sqlarea s
WHERE u.user_id=s.parsing_user_id
AND u.username LIKE 'LIONEL'
AND sql_text NOT LIKE '%FROM dba_users u,v$sqlarea s%'
ORDER BY s.first_load_time"""
return psql.read_sql(_sql, self.conn)
|
def get_sqltext(self, format_=1):
"""retourne les requêtes actuellement lancées sur le serveur"""
if format_ == 1:
_sql = """SELECT u.sid, substr(u.username,1,12) user_name, s.sql_text
FROM v$sql s,v$session u
WHERE s.hash_value = u.sql_hash_value
AND sql_text NOT LIKE '%from v$sql s, v$session u%'
AND u.username NOT LIKE 'None'
ORDER BY u.sid"""
if format_ == 2:
_sql = """SELECT u.username, s.first_load_time, s.executions, s.sql_text
FROM dba_users u,v$sqlarea s
WHERE u.user_id=s.parsing_user_id
AND u.username LIKE 'LIONEL'
AND sql_text NOT LIKE '%FROM dba_users u,v$sqlarea s%'
ORDER BY s.first_load_time"""
return psql.read_sql(_sql, self.conn)
|
[
"retourne",
"les",
"requêtes",
"actuellement",
"lancées",
"sur",
"le",
"serveur"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L640-L659
|
[
"def",
"get_sqltext",
"(",
"self",
",",
"format_",
"=",
"1",
")",
":",
"if",
"format_",
"==",
"1",
":",
"_sql",
"=",
"\"\"\"SELECT u.sid, substr(u.username,1,12) user_name, s.sql_text\n FROM v$sql s,v$session u\n WHERE s.hash_value = u.sql_hash_value\n AND sql_text NOT LIKE '%from v$sql s, v$session u%'\n AND u.username NOT LIKE 'None'\n ORDER BY u.sid\"\"\"",
"if",
"format_",
"==",
"2",
":",
"_sql",
"=",
"\"\"\"SELECT u.username, s.first_load_time, s.executions, s.sql_text\n FROM dba_users u,v$sqlarea s\n WHERE u.user_id=s.parsing_user_id\n AND u.username LIKE 'LIONEL'\n AND sql_text NOT LIKE '%FROM dba_users u,v$sqlarea s%'\n ORDER BY s.first_load_time\"\"\"",
"return",
"psql",
".",
"read_sql",
"(",
"_sql",
",",
"self",
".",
"conn",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
run_global_hook
|
Attempt to run a global hook by name with args
|
cpenv/hooks.py
|
def run_global_hook(hook_name, *args):
'''Attempt to run a global hook by name with args'''
hook_finder = HookFinder(get_global_hook_path())
hook = hook_finder(hook_name)
if hook:
hook.run(*args)
|
def run_global_hook(hook_name, *args):
'''Attempt to run a global hook by name with args'''
hook_finder = HookFinder(get_global_hook_path())
hook = hook_finder(hook_name)
if hook:
hook.run(*args)
|
[
"Attempt",
"to",
"run",
"a",
"global",
"hook",
"by",
"name",
"with",
"args"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/hooks.py#L64-L70
|
[
"def",
"run_global_hook",
"(",
"hook_name",
",",
"*",
"args",
")",
":",
"hook_finder",
"=",
"HookFinder",
"(",
"get_global_hook_path",
"(",
")",
")",
"hook",
"=",
"hook_finder",
"(",
"hook_name",
")",
"if",
"hook",
":",
"hook",
".",
"run",
"(",
"*",
"args",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
status
|
Handler that calls each status job in a worker pool, attempting to timeout.
The resulting durations/errors are written to the response
as JSON.
eg.
`{
"endpoints": [
{ "endpoint": "Jenny's Database", "duration": 1.002556324005127 },
{ "endpoint": "Hotmail", "duration": -1, "error": "Host is down" },
]
}`
|
flask_restpoints/handlers.py
|
def status(jobs):
"""Handler that calls each status job in a worker pool, attempting to timeout.
The resulting durations/errors are written to the response
as JSON.
eg.
`{
"endpoints": [
{ "endpoint": "Jenny's Database", "duration": 1.002556324005127 },
{ "endpoint": "Hotmail", "duration": -1, "error": "Host is down" },
]
}`
"""
def status_handler():
endpoints = []
stats = {"endpoints": None}
executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
# This is basically calling the below within the executor:
#
# >>> timeit(job[2], number=1)
#
# job is a tuple of (name, timeout, func) so the above is really:
# >>> timeit(func, number=1)
#
#gen = ((job, executor.submit(timeit, job[2], number=1)) for job in jobs)
#for job, future in gen:
for job, future in [(job, executor.submit(timeit, job[2], number=1)) for job in jobs]:
name, timeout, _ = job
endpoint = {"endpoint": name}
try:
data = future.result(timeout=timeout)
endpoint["duration"] = data
except concurrent.futures.TimeoutError:
endpoint["error"] = "timeout exceeded"
except Exception as ex:
endpoint["error"] = str(ex)
endpoints.append(endpoint)
if len(endpoints) > 0:
stats["endpoints"] = endpoints
executor.shutdown(wait=False)
return jsonify(**stats)
# TODO: Look into potentially cleaning up jobs that have timed-out.
#
# This could be done by changing jobs from a func to an object
# that implements `def interrupt(self):` which would be used
# to interrupt/stop/close/cleanup any resources.
return status_handler
|
def status(jobs):
"""Handler that calls each status job in a worker pool, attempting to timeout.
The resulting durations/errors are written to the response
as JSON.
eg.
`{
"endpoints": [
{ "endpoint": "Jenny's Database", "duration": 1.002556324005127 },
{ "endpoint": "Hotmail", "duration": -1, "error": "Host is down" },
]
}`
"""
def status_handler():
endpoints = []
stats = {"endpoints": None}
executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
# This is basically calling the below within the executor:
#
# >>> timeit(job[2], number=1)
#
# job is a tuple of (name, timeout, func) so the above is really:
# >>> timeit(func, number=1)
#
#gen = ((job, executor.submit(timeit, job[2], number=1)) for job in jobs)
#for job, future in gen:
for job, future in [(job, executor.submit(timeit, job[2], number=1)) for job in jobs]:
name, timeout, _ = job
endpoint = {"endpoint": name}
try:
data = future.result(timeout=timeout)
endpoint["duration"] = data
except concurrent.futures.TimeoutError:
endpoint["error"] = "timeout exceeded"
except Exception as ex:
endpoint["error"] = str(ex)
endpoints.append(endpoint)
if len(endpoints) > 0:
stats["endpoints"] = endpoints
executor.shutdown(wait=False)
return jsonify(**stats)
# TODO: Look into potentially cleaning up jobs that have timed-out.
#
# This could be done by changing jobs from a func to an object
# that implements `def interrupt(self):` which would be used
# to interrupt/stop/close/cleanup any resources.
return status_handler
|
[
"Handler",
"that",
"calls",
"each",
"status",
"job",
"in",
"a",
"worker",
"pool",
"attempting",
"to",
"timeout",
".",
"The",
"resulting",
"durations",
"/",
"errors",
"are",
"written",
"to",
"the",
"response",
"as",
"JSON",
"."
] |
juztin/flask-restpoints
|
python
|
https://github.com/juztin/flask-restpoints/blob/1833e1aeed6139c3b130d4e7497526c78c063a0f/flask_restpoints/handlers.py#L8-L58
|
[
"def",
"status",
"(",
"jobs",
")",
":",
"def",
"status_handler",
"(",
")",
":",
"endpoints",
"=",
"[",
"]",
"stats",
"=",
"{",
"\"endpoints\"",
":",
"None",
"}",
"executor",
"=",
"concurrent",
".",
"futures",
".",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"5",
")",
"# This is basically calling the below within the executor:",
"#",
"# >>> timeit(job[2], number=1)",
"#",
"# job is a tuple of (name, timeout, func) so the above is really:",
"# >>> timeit(func, number=1)",
"#",
"#gen = ((job, executor.submit(timeit, job[2], number=1)) for job in jobs)",
"#for job, future in gen:",
"for",
"job",
",",
"future",
"in",
"[",
"(",
"job",
",",
"executor",
".",
"submit",
"(",
"timeit",
",",
"job",
"[",
"2",
"]",
",",
"number",
"=",
"1",
")",
")",
"for",
"job",
"in",
"jobs",
"]",
":",
"name",
",",
"timeout",
",",
"_",
"=",
"job",
"endpoint",
"=",
"{",
"\"endpoint\"",
":",
"name",
"}",
"try",
":",
"data",
"=",
"future",
".",
"result",
"(",
"timeout",
"=",
"timeout",
")",
"endpoint",
"[",
"\"duration\"",
"]",
"=",
"data",
"except",
"concurrent",
".",
"futures",
".",
"TimeoutError",
":",
"endpoint",
"[",
"\"error\"",
"]",
"=",
"\"timeout exceeded\"",
"except",
"Exception",
"as",
"ex",
":",
"endpoint",
"[",
"\"error\"",
"]",
"=",
"str",
"(",
"ex",
")",
"endpoints",
".",
"append",
"(",
"endpoint",
")",
"if",
"len",
"(",
"endpoints",
")",
">",
"0",
":",
"stats",
"[",
"\"endpoints\"",
"]",
"=",
"endpoints",
"executor",
".",
"shutdown",
"(",
"wait",
"=",
"False",
")",
"return",
"jsonify",
"(",
"*",
"*",
"stats",
")",
"# TODO: Look into potentially cleaning up jobs that have timed-out.",
"#",
"# This could be done by changing jobs from a func to an object",
"# that implements `def interrupt(self):` which would be used",
"# to interrupt/stop/close/cleanup any resources.",
"return",
"status_handler"
] |
1833e1aeed6139c3b130d4e7497526c78c063a0f
|
valid
|
moyennes_glissantes
|
Calcule de moyennes glissantes
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
sur: (int, par défaut 8) Nombre d'observations sur lequel s'appuiera le
calcul
rep: (float, défaut 0.75) Taux de réprésentativité en dessous duquel le
calcul renverra NaN
Retourne:
Un DataFrame des moyennes glissantes calculées
|
pyair/reg.py
|
def moyennes_glissantes(df, sur=8, rep=0.75):
"""
Calcule de moyennes glissantes
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
sur: (int, par défaut 8) Nombre d'observations sur lequel s'appuiera le
calcul
rep: (float, défaut 0.75) Taux de réprésentativité en dessous duquel le
calcul renverra NaN
Retourne:
Un DataFrame des moyennes glissantes calculées
"""
return pd.rolling_mean(df, window=sur, min_periods=rep * sur)
|
def moyennes_glissantes(df, sur=8, rep=0.75):
"""
Calcule de moyennes glissantes
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
sur: (int, par défaut 8) Nombre d'observations sur lequel s'appuiera le
calcul
rep: (float, défaut 0.75) Taux de réprésentativité en dessous duquel le
calcul renverra NaN
Retourne:
Un DataFrame des moyennes glissantes calculées
"""
return pd.rolling_mean(df, window=sur, min_periods=rep * sur)
|
[
"Calcule",
"de",
"moyennes",
"glissantes"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L23-L37
|
[
"def",
"moyennes_glissantes",
"(",
"df",
",",
"sur",
"=",
"8",
",",
"rep",
"=",
"0.75",
")",
":",
"return",
"pd",
".",
"rolling_mean",
"(",
"df",
",",
"window",
"=",
"sur",
",",
"min_periods",
"=",
"rep",
"*",
"sur",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
consecutive
|
Calcule si une valeur est dépassée durant une période donnée. Détecte
un dépassement de valeur sur X heures/jours/... consécutifs
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
sur: (int) Nombre d'observations consécutives où la valeur doit être dépassée
Retourne:
Un DataFrame de valeurs, de même taille (shape) que le df d'entrée, dont toutes
les valeurs sont supprimées, sauf celles supérieures à la valeur de référence
et positionnées sur les heures de début de dépassements
|
pyair/reg.py
|
def consecutive(df, valeur, sur=3):
"""Calcule si une valeur est dépassée durant une période donnée. Détecte
un dépassement de valeur sur X heures/jours/... consécutifs
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
sur: (int) Nombre d'observations consécutives où la valeur doit être dépassée
Retourne:
Un DataFrame de valeurs, de même taille (shape) que le df d'entrée, dont toutes
les valeurs sont supprimées, sauf celles supérieures à la valeur de référence
et positionnées sur les heures de début de dépassements
"""
dep = pd.rolling_max(df.where(df > valeur), window=sur, min_periods=sur)
return dep
|
def consecutive(df, valeur, sur=3):
"""Calcule si une valeur est dépassée durant une période donnée. Détecte
un dépassement de valeur sur X heures/jours/... consécutifs
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
sur: (int) Nombre d'observations consécutives où la valeur doit être dépassée
Retourne:
Un DataFrame de valeurs, de même taille (shape) que le df d'entrée, dont toutes
les valeurs sont supprimées, sauf celles supérieures à la valeur de référence
et positionnées sur les heures de début de dépassements
"""
dep = pd.rolling_max(df.where(df > valeur), window=sur, min_periods=sur)
return dep
|
[
"Calcule",
"si",
"une",
"valeur",
"est",
"dépassée",
"durant",
"une",
"période",
"donnée",
".",
"Détecte",
"un",
"dépassement",
"de",
"valeur",
"sur",
"X",
"heures",
"/",
"jours",
"/",
"...",
"consécutifs"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L40-L57
|
[
"def",
"consecutive",
"(",
"df",
",",
"valeur",
",",
"sur",
"=",
"3",
")",
":",
"dep",
"=",
"pd",
".",
"rolling_max",
"(",
"df",
".",
"where",
"(",
"df",
">",
"valeur",
")",
",",
"window",
"=",
"sur",
",",
"min_periods",
"=",
"sur",
")",
"return",
"dep"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
nombre_depassement
|
Calcule le nombre de dépassement d'une valeur sur l'intégralité du temps,
ou suivant un regroupement temporel.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
freq: (str ou None): Fréquence de temps sur lequel effectué un regroupement.
freq peut prendre les valeurs 'H' pour heure, 'D' pour jour, 'W' pour semaine,
'M' pour mois et 'A' pour année, ou None pour ne pas faire de regroupement.
Le nombre de dépassement sera alors regroupé suivant cette fréquence de temps.
Retourne:
Une Series du nombre de dépassement, total suivant la fréquence intrinsèque
du DataFrame d'entrée, ou aggloméré suivant la fréquence de temps choisie.
|
pyair/reg.py
|
def nombre_depassement(df, valeur, freq=None):
"""
Calcule le nombre de dépassement d'une valeur sur l'intégralité du temps,
ou suivant un regroupement temporel.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
freq: (str ou None): Fréquence de temps sur lequel effectué un regroupement.
freq peut prendre les valeurs 'H' pour heure, 'D' pour jour, 'W' pour semaine,
'M' pour mois et 'A' pour année, ou None pour ne pas faire de regroupement.
Le nombre de dépassement sera alors regroupé suivant cette fréquence de temps.
Retourne:
Une Series du nombre de dépassement, total suivant la fréquence intrinsèque
du DataFrame d'entrée, ou aggloméré suivant la fréquence de temps choisie.
"""
dep = depassement(df, valeur)
if freq is not None:
dep = dep.resample(freq, how='sum')
return dep.count()
|
def nombre_depassement(df, valeur, freq=None):
"""
Calcule le nombre de dépassement d'une valeur sur l'intégralité du temps,
ou suivant un regroupement temporel.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
freq: (str ou None): Fréquence de temps sur lequel effectué un regroupement.
freq peut prendre les valeurs 'H' pour heure, 'D' pour jour, 'W' pour semaine,
'M' pour mois et 'A' pour année, ou None pour ne pas faire de regroupement.
Le nombre de dépassement sera alors regroupé suivant cette fréquence de temps.
Retourne:
Une Series du nombre de dépassement, total suivant la fréquence intrinsèque
du DataFrame d'entrée, ou aggloméré suivant la fréquence de temps choisie.
"""
dep = depassement(df, valeur)
if freq is not None:
dep = dep.resample(freq, how='sum')
return dep.count()
|
[
"Calcule",
"le",
"nombre",
"de",
"dépassement",
"d",
"une",
"valeur",
"sur",
"l",
"intégralité",
"du",
"temps",
"ou",
"suivant",
"un",
"regroupement",
"temporel",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L77-L98
|
[
"def",
"nombre_depassement",
"(",
"df",
",",
"valeur",
",",
"freq",
"=",
"None",
")",
":",
"dep",
"=",
"depassement",
"(",
"df",
",",
"valeur",
")",
"if",
"freq",
"is",
"not",
"None",
":",
"dep",
"=",
"dep",
".",
"resample",
"(",
"freq",
",",
"how",
"=",
"'sum'",
")",
"return",
"dep",
".",
"count",
"(",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
aot40_vegetation
|
Calcul de l'AOT40 du 1er mai au 31 juillet
*AOT40 : AOT 40 ( exprimé en micro g/m³ par heure ) signifie la somme des
différences entre les concentrations horaires supérieures à 40 parties par
milliard ( 40 ppb soit 80 micro g/m³ ), durant une période donnée en
utilisant uniquement les valeurs sur 1 heure mesurées quotidiennement
entre 8 heures (début de la mesure) et 20 heures (pile, fin de la mesure) CET,
ce qui correspond à de 8h à 19h TU (donnant bien 12h de mesures, 8h donnant
la moyenne horaire de 7h01 à 8h00)
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
Retourne:
Un DataFrame de résultat de calcul
|
pyair/reg.py
|
def aot40_vegetation(df, nb_an):
"""
Calcul de l'AOT40 du 1er mai au 31 juillet
*AOT40 : AOT 40 ( exprimé en micro g/m³ par heure ) signifie la somme des
différences entre les concentrations horaires supérieures à 40 parties par
milliard ( 40 ppb soit 80 micro g/m³ ), durant une période donnée en
utilisant uniquement les valeurs sur 1 heure mesurées quotidiennement
entre 8 heures (début de la mesure) et 20 heures (pile, fin de la mesure) CET,
ce qui correspond à de 8h à 19h TU (donnant bien 12h de mesures, 8h donnant
la moyenne horaire de 7h01 à 8h00)
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
Retourne:
Un DataFrame de résultat de calcul
"""
return _aot(df.tshift(1), nb_an=nb_an, limite=80, mois_debut=5, mois_fin=7,
heure_debut=8, heure_fin=19)
|
def aot40_vegetation(df, nb_an):
"""
Calcul de l'AOT40 du 1er mai au 31 juillet
*AOT40 : AOT 40 ( exprimé en micro g/m³ par heure ) signifie la somme des
différences entre les concentrations horaires supérieures à 40 parties par
milliard ( 40 ppb soit 80 micro g/m³ ), durant une période donnée en
utilisant uniquement les valeurs sur 1 heure mesurées quotidiennement
entre 8 heures (début de la mesure) et 20 heures (pile, fin de la mesure) CET,
ce qui correspond à de 8h à 19h TU (donnant bien 12h de mesures, 8h donnant
la moyenne horaire de 7h01 à 8h00)
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
Retourne:
Un DataFrame de résultat de calcul
"""
return _aot(df.tshift(1), nb_an=nb_an, limite=80, mois_debut=5, mois_fin=7,
heure_debut=8, heure_fin=19)
|
[
"Calcul",
"de",
"l",
"AOT40",
"du",
"1er",
"mai",
"au",
"31",
"juillet"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L101-L123
|
[
"def",
"aot40_vegetation",
"(",
"df",
",",
"nb_an",
")",
":",
"return",
"_aot",
"(",
"df",
".",
"tshift",
"(",
"1",
")",
",",
"nb_an",
"=",
"nb_an",
",",
"limite",
"=",
"80",
",",
"mois_debut",
"=",
"5",
",",
"mois_fin",
"=",
"7",
",",
"heure_debut",
"=",
"8",
",",
"heure_fin",
"=",
"19",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
_aot
|
Calcul de l'AOT de manière paramètrable. Voir AOT40_vegetation ou
AOT40_foret pour des paramètres préalablement fixés.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
limite: (float) valeur limite au delà de laquelle les différences seront
additionnées pour calculer l'AOT
mois_debut: (int) mois de début de calcul
mois_fin: (int) mois de fin de calcul
heure_debut: (int) première heure de chaque jour après laquelle les valeurs
sont retenues
heure_fin: (int) dernière heure de chaque jour avant laquelle les valeurs
sont retenues
Retourne:
Un DataFrame de résultat de calcul
|
pyair/reg.py
|
def _aot(df, nb_an=1, limite=80, mois_debut=5, mois_fin=7,
heure_debut=7, heure_fin=19):
"""
Calcul de l'AOT de manière paramètrable. Voir AOT40_vegetation ou
AOT40_foret pour des paramètres préalablement fixés.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
limite: (float) valeur limite au delà de laquelle les différences seront
additionnées pour calculer l'AOT
mois_debut: (int) mois de début de calcul
mois_fin: (int) mois de fin de calcul
heure_debut: (int) première heure de chaque jour après laquelle les valeurs
sont retenues
heure_fin: (int) dernière heure de chaque jour avant laquelle les valeurs
sont retenues
Retourne:
Un DataFrame de résultat de calcul
"""
res = df[(df.index.month >= mois_debut) & (df.index.month <= mois_fin) &
(df.index.hour >= heure_debut) & (df.index.hour <= heure_fin)]
nb_valid = res.count()
nb_total = res.shape[0]
pcent = nb_valid.astype(pd.np.float) / nb_total * 100
brut = (res[res > limite] - limite) / nb_an
brut = brut.sum()
net = brut / nb_valid * nb_total
print("""{total} mesures au totales
du {m_d} au {m_f}
entre {h_d} et {h_f}""".format(total=nb_total,
m_d=mois_debut, m_f=mois_fin,
h_d=heure_debut, h_f=heure_fin
)
)
aot = pd.DataFrame([brut.round(), nb_valid.round(), pcent.round(), net.round()],
index=['brutes', 'mesures valides', '% de rep.', 'nettes'])
return aot
|
def _aot(df, nb_an=1, limite=80, mois_debut=5, mois_fin=7,
heure_debut=7, heure_fin=19):
"""
Calcul de l'AOT de manière paramètrable. Voir AOT40_vegetation ou
AOT40_foret pour des paramètres préalablement fixés.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
limite: (float) valeur limite au delà de laquelle les différences seront
additionnées pour calculer l'AOT
mois_debut: (int) mois de début de calcul
mois_fin: (int) mois de fin de calcul
heure_debut: (int) première heure de chaque jour après laquelle les valeurs
sont retenues
heure_fin: (int) dernière heure de chaque jour avant laquelle les valeurs
sont retenues
Retourne:
Un DataFrame de résultat de calcul
"""
res = df[(df.index.month >= mois_debut) & (df.index.month <= mois_fin) &
(df.index.hour >= heure_debut) & (df.index.hour <= heure_fin)]
nb_valid = res.count()
nb_total = res.shape[0]
pcent = nb_valid.astype(pd.np.float) / nb_total * 100
brut = (res[res > limite] - limite) / nb_an
brut = brut.sum()
net = brut / nb_valid * nb_total
print("""{total} mesures au totales
du {m_d} au {m_f}
entre {h_d} et {h_f}""".format(total=nb_total,
m_d=mois_debut, m_f=mois_fin,
h_d=heure_debut, h_f=heure_fin
)
)
aot = pd.DataFrame([brut.round(), nb_valid.round(), pcent.round(), net.round()],
index=['brutes', 'mesures valides', '% de rep.', 'nettes'])
return aot
|
[
"Calcul",
"de",
"l",
"AOT",
"de",
"manière",
"paramètrable",
".",
"Voir",
"AOT40_vegetation",
"ou",
"AOT40_foret",
"pour",
"des",
"paramètres",
"préalablement",
"fixés",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L148-L188
|
[
"def",
"_aot",
"(",
"df",
",",
"nb_an",
"=",
"1",
",",
"limite",
"=",
"80",
",",
"mois_debut",
"=",
"5",
",",
"mois_fin",
"=",
"7",
",",
"heure_debut",
"=",
"7",
",",
"heure_fin",
"=",
"19",
")",
":",
"res",
"=",
"df",
"[",
"(",
"df",
".",
"index",
".",
"month",
">=",
"mois_debut",
")",
"&",
"(",
"df",
".",
"index",
".",
"month",
"<=",
"mois_fin",
")",
"&",
"(",
"df",
".",
"index",
".",
"hour",
">=",
"heure_debut",
")",
"&",
"(",
"df",
".",
"index",
".",
"hour",
"<=",
"heure_fin",
")",
"]",
"nb_valid",
"=",
"res",
".",
"count",
"(",
")",
"nb_total",
"=",
"res",
".",
"shape",
"[",
"0",
"]",
"pcent",
"=",
"nb_valid",
".",
"astype",
"(",
"pd",
".",
"np",
".",
"float",
")",
"/",
"nb_total",
"*",
"100",
"brut",
"=",
"(",
"res",
"[",
"res",
">",
"limite",
"]",
"-",
"limite",
")",
"/",
"nb_an",
"brut",
"=",
"brut",
".",
"sum",
"(",
")",
"net",
"=",
"brut",
"/",
"nb_valid",
"*",
"nb_total",
"print",
"(",
"\"\"\"{total} mesures au totales\n du {m_d} au {m_f}\n entre {h_d} et {h_f}\"\"\"",
".",
"format",
"(",
"total",
"=",
"nb_total",
",",
"m_d",
"=",
"mois_debut",
",",
"m_f",
"=",
"mois_fin",
",",
"h_d",
"=",
"heure_debut",
",",
"h_f",
"=",
"heure_fin",
")",
")",
"aot",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"brut",
".",
"round",
"(",
")",
",",
"nb_valid",
".",
"round",
"(",
")",
",",
"pcent",
".",
"round",
"(",
")",
",",
"net",
".",
"round",
"(",
")",
"]",
",",
"index",
"=",
"[",
"'brutes'",
",",
"'mesures valides'",
",",
"'% de rep.'",
",",
"'nettes'",
"]",
")",
"return",
"aot"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
no2
|
Calculs réglementaires pour le dioxyde d'azote
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 200u
Seuil d'Alerte sur 3H consécutives: 400u
Seuil d'Alerte sur 3J consécutifs: 200u
Valeur limite pour la santé humaine 18H/A: 200u
Valeur limite pour la santé humaine en moyenne A: 40u
Objectif de qualité en moyenne A: 40u
Protection de la végétation en moyenne A: 30u
Les résultats sont donnés en terme d'heure de dépassement
|
pyair/reg.py
|
def no2(df):
"""
Calculs réglementaires pour le dioxyde d'azote
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 200u
Seuil d'Alerte sur 3H consécutives: 400u
Seuil d'Alerte sur 3J consécutifs: 200u
Valeur limite pour la santé humaine 18H/A: 200u
Valeur limite pour la santé humaine en moyenne A: 40u
Objectif de qualité en moyenne A: 40u
Protection de la végétation en moyenne A: 30u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = "NO2"
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI en moyenne H: 200u": depassement(df, valeur=200),
"Seuil d'Alerte sur 3H consécutives: 400u": consecutive(df, valeur=400, sur=3),
"Seuil d'Alerte sur 3J consécutifs: 200u": consecutive(df.resample('D', how='max'), valeur=200, sur=3),
"Valeur limite pour la santé humaine 18H/A: 200u": depassement(df, valeur=200),
"Valeur limite pour la santé humaine en moyenne A: 40u": depassement(df.resample('A', how='mean'),
valeur=40),
"Objectif de qualité en moyenne A: 40u": depassement(df.resample('A', how='mean'), valeur=40),
"Protection de la végétation en moyenne A: 30u": depassement(df.resample('A', how='mean'), valeur=30),
}
return polluant, res
|
def no2(df):
"""
Calculs réglementaires pour le dioxyde d'azote
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 200u
Seuil d'Alerte sur 3H consécutives: 400u
Seuil d'Alerte sur 3J consécutifs: 200u
Valeur limite pour la santé humaine 18H/A: 200u
Valeur limite pour la santé humaine en moyenne A: 40u
Objectif de qualité en moyenne A: 40u
Protection de la végétation en moyenne A: 30u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = "NO2"
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI en moyenne H: 200u": depassement(df, valeur=200),
"Seuil d'Alerte sur 3H consécutives: 400u": consecutive(df, valeur=400, sur=3),
"Seuil d'Alerte sur 3J consécutifs: 200u": consecutive(df.resample('D', how='max'), valeur=200, sur=3),
"Valeur limite pour la santé humaine 18H/A: 200u": depassement(df, valeur=200),
"Valeur limite pour la santé humaine en moyenne A: 40u": depassement(df.resample('A', how='mean'),
valeur=40),
"Objectif de qualité en moyenne A: 40u": depassement(df.resample('A', how='mean'), valeur=40),
"Protection de la végétation en moyenne A: 30u": depassement(df.resample('A', how='mean'), valeur=30),
}
return polluant, res
|
[
"Calculs",
"réglementaires",
"pour",
"le",
"dioxyde",
"d",
"azote"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L206-L247
|
[
"def",
"no2",
"(",
"df",
")",
":",
"polluant",
"=",
"\"NO2\"",
"# Le DataFrame doit être en heure",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
".",
"freq",
",",
"pdoffset",
".",
"Hour",
")",
":",
"raise",
"FreqException",
"(",
"\"df doit être en heure.\")",
"",
"res",
"=",
"{",
"\"Seuil de RI en moyenne H: 200u\"",
":",
"depassement",
"(",
"df",
",",
"valeur",
"=",
"200",
")",
",",
"\"Seuil d'Alerte sur 3H consécutives: 400u\":",
" ",
"onsecutive(",
"d",
"f,",
" ",
"aleur=",
"4",
"00,",
" ",
"ur=",
"3",
")",
",",
"",
"\"Seuil d'Alerte sur 3J consécutifs: 200u\":",
" ",
"onsecutive(",
"d",
"f.",
"r",
"esample(",
"'",
"D',",
" ",
"ow=",
"'",
"max')",
",",
" ",
"aleur=",
"2",
"00,",
" ",
"ur=",
"3",
")",
",",
"",
"\"Valeur limite pour la santé humaine 18H/A: 200u\":",
" ",
"epassement(",
"d",
"f,",
" ",
"aleur=",
"2",
"00)",
",",
"",
"\"Valeur limite pour la santé humaine en moyenne A: 40u\":",
" ",
"epassement(",
"d",
"f.",
"r",
"esample(",
"'",
"A',",
" ",
"ow=",
"'",
"mean')",
",",
"",
"valeur",
"=",
"40",
")",
",",
"\"Objectif de qualité en moyenne A: 40u\":",
" ",
"epassement(",
"d",
"f.",
"r",
"esample(",
"'",
"A',",
" ",
"ow=",
"'",
"mean')",
",",
" ",
"aleur=",
"4",
"0)",
",",
"",
"\"Protection de la végétation en moyenne A: 30u\": ",
"d",
"passement(d",
"f",
".r",
"e",
"sample('",
"A",
"', ",
"h",
"w='",
"m",
"ean'),",
" ",
"v",
"leur=3",
"0",
"),",
"",
"",
"}",
"return",
"polluant",
",",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
pm10
|
Calculs réglementaires pour les particules PM10
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne J: 50u
Seuil d'Alerte en moyenne J: 80u
Valeur limite pour la santé humaine 35J/A: 50u
Valeur limite pour la santé humaine en moyenne A: 40u
Objectif de qualité en moyenne A: 30u
Les résultats sont donnés en terme d'heure de dépassement
|
pyair/reg.py
|
def pm10(df):
"""
Calculs réglementaires pour les particules PM10
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne J: 50u
Seuil d'Alerte en moyenne J: 80u
Valeur limite pour la santé humaine 35J/A: 50u
Valeur limite pour la santé humaine en moyenne A: 40u
Objectif de qualité en moyenne A: 30u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'PM10'
# Le DataFrame doit être en jour
if not isinstance(df.index.freq, pdoffset.Day):
raise FreqException("df doit être en jour.")
res = {"Seuil de RI en moyenne J: 50u": depassement(df, valeur=50),
"Seuil d'Alerte en moyenne J: 80u": depassement(df, valeur=80),
"Valeur limite pour la santé humaine 35J/A: 50u": depassement(df, valeur=50),
"Valeur limite pour la santé humaine en moyenne A: 40u": depassement(df.resample('A', how='mean'),
valeur=40),
"Objectif de qualité en moyenne A: 30u": depassement(df.resample('A', how='mean'), valeur=30),
}
return polluant, res
|
def pm10(df):
"""
Calculs réglementaires pour les particules PM10
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne J: 50u
Seuil d'Alerte en moyenne J: 80u
Valeur limite pour la santé humaine 35J/A: 50u
Valeur limite pour la santé humaine en moyenne A: 40u
Objectif de qualité en moyenne A: 30u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'PM10'
# Le DataFrame doit être en jour
if not isinstance(df.index.freq, pdoffset.Day):
raise FreqException("df doit être en jour.")
res = {"Seuil de RI en moyenne J: 50u": depassement(df, valeur=50),
"Seuil d'Alerte en moyenne J: 80u": depassement(df, valeur=80),
"Valeur limite pour la santé humaine 35J/A: 50u": depassement(df, valeur=50),
"Valeur limite pour la santé humaine en moyenne A: 40u": depassement(df.resample('A', how='mean'),
valeur=40),
"Objectif de qualité en moyenne A: 30u": depassement(df.resample('A', how='mean'), valeur=30),
}
return polluant, res
|
[
"Calculs",
"réglementaires",
"pour",
"les",
"particules",
"PM10"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L250-L286
|
[
"def",
"pm10",
"(",
"df",
")",
":",
"polluant",
"=",
"'PM10'",
"# Le DataFrame doit être en jour",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
".",
"freq",
",",
"pdoffset",
".",
"Day",
")",
":",
"raise",
"FreqException",
"(",
"\"df doit être en jour.\")",
"",
"res",
"=",
"{",
"\"Seuil de RI en moyenne J: 50u\"",
":",
"depassement",
"(",
"df",
",",
"valeur",
"=",
"50",
")",
",",
"\"Seuil d'Alerte en moyenne J: 80u\"",
":",
"depassement",
"(",
"df",
",",
"valeur",
"=",
"80",
")",
",",
"\"Valeur limite pour la santé humaine 35J/A: 50u\":",
" ",
"epassement(",
"d",
"f,",
" ",
"aleur=",
"5",
"0)",
",",
"",
"\"Valeur limite pour la santé humaine en moyenne A: 40u\":",
" ",
"epassement(",
"d",
"f.",
"r",
"esample(",
"'",
"A',",
" ",
"ow=",
"'",
"mean')",
",",
"",
"valeur",
"=",
"40",
")",
",",
"\"Objectif de qualité en moyenne A: 30u\":",
" ",
"epassement(",
"d",
"f.",
"r",
"esample(",
"'",
"A',",
" ",
"ow=",
"'",
"mean')",
",",
" ",
"aleur=",
"3",
"0)",
",",
"",
"}",
"return",
"polluant",
",",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
so2
|
Calculs réglementaires pour le dioxyde de soufre
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 300u
Seuil d'Alerte sur 3H consécutives: 500u
Valeur limite pour la santé humaine 24H/A: 350u
Valeur limite pour la santé humaine 3J/A: 125u
Objectif de qualité en moyenne A: 50u
Protection de la végétation en moyenne A: 20u
Protection de la végétation du 01/10 au 31/03: 20u
Les résultats sont donnés en terme d'heure de dépassement
|
pyair/reg.py
|
def so2(df):
"""
Calculs réglementaires pour le dioxyde de soufre
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 300u
Seuil d'Alerte sur 3H consécutives: 500u
Valeur limite pour la santé humaine 24H/A: 350u
Valeur limite pour la santé humaine 3J/A: 125u
Objectif de qualité en moyenne A: 50u
Protection de la végétation en moyenne A: 20u
Protection de la végétation du 01/10 au 31/03: 20u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'SO2'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI en moyenne H: 300u": depassement(df, valeur=300),
"Seuil d'Alerte sur 3H consécutives: 500u": depassement(df, valeur=500),
"Valeur limite pour la santé humaine 24H/A: 350u": depassement(df, valeur=350),
"Valeur limite pour la santé humaine 3J/A: 125u": depassement(df.resample('D', how='mean'), valeur=125),
"Objectif de qualité en moyenne A: 50u": depassement(df.resample('A', how='mean'), valeur=50),
"Protection de la végétation en moyenne A: 20u": depassement(df.resample('A', how='mean'), valeur=20),
"Protection de la végétation du 01/10 au 31/03: 20u": depassement(
df[(df.index.month <= 3) | (df.index.month >= 10)], valeur=20),
}
return polluant, res
|
def so2(df):
"""
Calculs réglementaires pour le dioxyde de soufre
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 300u
Seuil d'Alerte sur 3H consécutives: 500u
Valeur limite pour la santé humaine 24H/A: 350u
Valeur limite pour la santé humaine 3J/A: 125u
Objectif de qualité en moyenne A: 50u
Protection de la végétation en moyenne A: 20u
Protection de la végétation du 01/10 au 31/03: 20u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'SO2'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI en moyenne H: 300u": depassement(df, valeur=300),
"Seuil d'Alerte sur 3H consécutives: 500u": depassement(df, valeur=500),
"Valeur limite pour la santé humaine 24H/A: 350u": depassement(df, valeur=350),
"Valeur limite pour la santé humaine 3J/A: 125u": depassement(df.resample('D', how='mean'), valeur=125),
"Objectif de qualité en moyenne A: 50u": depassement(df.resample('A', how='mean'), valeur=50),
"Protection de la végétation en moyenne A: 20u": depassement(df.resample('A', how='mean'), valeur=20),
"Protection de la végétation du 01/10 au 31/03: 20u": depassement(
df[(df.index.month <= 3) | (df.index.month >= 10)], valeur=20),
}
return polluant, res
|
[
"Calculs",
"réglementaires",
"pour",
"le",
"dioxyde",
"de",
"soufre"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L289-L330
|
[
"def",
"so2",
"(",
"df",
")",
":",
"polluant",
"=",
"'SO2'",
"# Le DataFrame doit être en heure",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
".",
"freq",
",",
"pdoffset",
".",
"Hour",
")",
":",
"raise",
"FreqException",
"(",
"\"df doit être en heure.\")",
"",
"res",
"=",
"{",
"\"Seuil de RI en moyenne H: 300u\"",
":",
"depassement",
"(",
"df",
",",
"valeur",
"=",
"300",
")",
",",
"\"Seuil d'Alerte sur 3H consécutives: 500u\":",
" ",
"epassement(",
"d",
"f,",
" ",
"aleur=",
"5",
"00)",
",",
"",
"\"Valeur limite pour la santé humaine 24H/A: 350u\":",
" ",
"epassement(",
"d",
"f,",
" ",
"aleur=",
"3",
"50)",
",",
"",
"\"Valeur limite pour la santé humaine 3J/A: 125u\":",
" ",
"epassement(",
"d",
"f.",
"r",
"esample(",
"'",
"D',",
" ",
"ow=",
"'",
"mean')",
",",
" ",
"aleur=",
"1",
"25)",
",",
"",
"\"Objectif de qualité en moyenne A: 50u\":",
" ",
"epassement(",
"d",
"f.",
"r",
"esample(",
"'",
"A',",
" ",
"ow=",
"'",
"mean')",
",",
" ",
"aleur=",
"5",
"0)",
",",
"",
"\"Protection de la végétation en moyenne A: 20u\": ",
"d",
"passement(d",
"f",
".r",
"e",
"sample('",
"A",
"', ",
"h",
"w='",
"m",
"ean'),",
" ",
"v",
"leur=2",
"0",
"),",
"",
"",
"\"Protection de la végétation du 01/10 au 31/03: 20u\": ",
"d",
"passement(",
"",
"df",
"[",
"(",
"df",
".",
"index",
".",
"month",
"<=",
"3",
")",
"|",
"(",
"df",
".",
"index",
".",
"month",
">=",
"10",
")",
"]",
",",
"valeur",
"=",
"20",
")",
",",
"}",
"return",
"polluant",
",",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
co
|
Calculs réglementaires pour le monoxyde de carbone
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Valeur limite pour la santé humaine max J 8H glissantes: 10000u
Les résultats sont donnés en terme d'heure de dépassement
|
pyair/reg.py
|
def co(df):
"""
Calculs réglementaires pour le monoxyde de carbone
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Valeur limite pour la santé humaine max J 8H glissantes: 10000u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'CO'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur limite pour la santé humaine sur 8H glissantes: 10000u": depassement(moyennes_glissantes(df, sur=8),
valeur=10),
}
return polluant, res
|
def co(df):
"""
Calculs réglementaires pour le monoxyde de carbone
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Valeur limite pour la santé humaine max J 8H glissantes: 10000u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'CO'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur limite pour la santé humaine sur 8H glissantes: 10000u": depassement(moyennes_glissantes(df, sur=8),
valeur=10),
}
return polluant, res
|
[
"Calculs",
"réglementaires",
"pour",
"le",
"monoxyde",
"de",
"carbone"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L333-L362
|
[
"def",
"co",
"(",
"df",
")",
":",
"polluant",
"=",
"'CO'",
"# Le DataFrame doit être en heure",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
".",
"freq",
",",
"pdoffset",
".",
"Hour",
")",
":",
"raise",
"FreqException",
"(",
"\"df doit être en heure.\")",
"",
"res",
"=",
"{",
"\"Valeur limite pour la santé humaine sur 8H glissantes: 10000u\":",
" ",
"epassement(",
"m",
"oyennes_glissantes(",
"d",
"f,",
" ",
"ur=",
"8",
")",
",",
"",
"valeur",
"=",
"10",
")",
",",
"}",
"return",
"polluant",
",",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
o3
|
Calculs réglementaires pour l'ozone
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI sur 1H: 180u
Seuil d'Alerte sur 1H: 240u
Seuil d'Alerte sur 3H consécutives: 240u
Seuil d'Alerte sur 3H consécutives: 300u
Seuil d'Alerte sur 1H: 360u
Objectif de qualité pour la santé humaine sur 8H glissantes: 120u
Les résultats sont donnés en terme d'heure de dépassement
|
pyair/reg.py
|
def o3(df):
"""
Calculs réglementaires pour l'ozone
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI sur 1H: 180u
Seuil d'Alerte sur 1H: 240u
Seuil d'Alerte sur 3H consécutives: 240u
Seuil d'Alerte sur 3H consécutives: 300u
Seuil d'Alerte sur 1H: 360u
Objectif de qualité pour la santé humaine sur 8H glissantes: 120u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'O3'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI sur 1H: 180u": depassement(df, valeur=180),
"Seuil d'Alerte sur 1H: 240u": depassement(df, valeur=240),
"Seuil d'Alerte sur 1H: 360u": depassement(df, valeur=360),
"Seuil d'Alerte sur 3H consécutives: 240u": consecutive(df, valeur=240, sur=3),
"Seuil d'Alerte sur 3H consécutives: 300u": consecutive(df, valeur=300, sur=3),
"Objectif de qualité pour la santé humaine sur 8H glissantes: 120u": depassement(
moyennes_glissantes(df, sur=8), valeur=120),
}
return polluant, res
|
def o3(df):
"""
Calculs réglementaires pour l'ozone
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI sur 1H: 180u
Seuil d'Alerte sur 1H: 240u
Seuil d'Alerte sur 3H consécutives: 240u
Seuil d'Alerte sur 3H consécutives: 300u
Seuil d'Alerte sur 1H: 360u
Objectif de qualité pour la santé humaine sur 8H glissantes: 120u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'O3'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI sur 1H: 180u": depassement(df, valeur=180),
"Seuil d'Alerte sur 1H: 240u": depassement(df, valeur=240),
"Seuil d'Alerte sur 1H: 360u": depassement(df, valeur=360),
"Seuil d'Alerte sur 3H consécutives: 240u": consecutive(df, valeur=240, sur=3),
"Seuil d'Alerte sur 3H consécutives: 300u": consecutive(df, valeur=300, sur=3),
"Objectif de qualité pour la santé humaine sur 8H glissantes: 120u": depassement(
moyennes_glissantes(df, sur=8), valeur=120),
}
return polluant, res
|
[
"Calculs",
"réglementaires",
"pour",
"l",
"ozone"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L365-L404
|
[
"def",
"o3",
"(",
"df",
")",
":",
"polluant",
"=",
"'O3'",
"# Le DataFrame doit être en heure",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
".",
"freq",
",",
"pdoffset",
".",
"Hour",
")",
":",
"raise",
"FreqException",
"(",
"\"df doit être en heure.\")",
"",
"res",
"=",
"{",
"\"Seuil de RI sur 1H: 180u\"",
":",
"depassement",
"(",
"df",
",",
"valeur",
"=",
"180",
")",
",",
"\"Seuil d'Alerte sur 1H: 240u\"",
":",
"depassement",
"(",
"df",
",",
"valeur",
"=",
"240",
")",
",",
"\"Seuil d'Alerte sur 1H: 360u\"",
":",
"depassement",
"(",
"df",
",",
"valeur",
"=",
"360",
")",
",",
"\"Seuil d'Alerte sur 3H consécutives: 240u\":",
" ",
"onsecutive(",
"d",
"f,",
" ",
"aleur=",
"2",
"40,",
" ",
"ur=",
"3",
")",
",",
"",
"\"Seuil d'Alerte sur 3H consécutives: 300u\":",
" ",
"onsecutive(",
"d",
"f,",
" ",
"aleur=",
"3",
"00,",
" ",
"ur=",
"3",
")",
",",
"",
"\"Objectif de qualité pour la santé humaine sur 8H glissantes: 120u\": ",
"d",
"passement(",
"",
"moyennes_glissantes",
"(",
"df",
",",
"sur",
"=",
"8",
")",
",",
"valeur",
"=",
"120",
")",
",",
"}",
"return",
"polluant",
",",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
c6h6
|
Calculs réglementaires pour le benzène
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Objectif de qualité en moyenne A: 2u
Valeur limite pour la santé humaine en moyenne A: 5u
Les résultats sont donnés en terme d'heure de dépassement
|
pyair/reg.py
|
def c6h6(df):
"""
Calculs réglementaires pour le benzène
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Objectif de qualité en moyenne A: 2u
Valeur limite pour la santé humaine en moyenne A: 5u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'C6H6'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Objectif de qualité en moyenne A: 2u": depassement(df.resample('A', how='mean'), valeur=2),
"Valeur limite pour la santé humaine en moyenne A: 5u": depassement(df.resample('A', how='mean'), valeur=5),
}
return polluant, res
|
def c6h6(df):
"""
Calculs réglementaires pour le benzène
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Objectif de qualité en moyenne A: 2u
Valeur limite pour la santé humaine en moyenne A: 5u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'C6H6'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Objectif de qualité en moyenne A: 2u": depassement(df.resample('A', how='mean'), valeur=2),
"Valeur limite pour la santé humaine en moyenne A: 5u": depassement(df.resample('A', how='mean'), valeur=5),
}
return polluant, res
|
[
"Calculs",
"réglementaires",
"pour",
"le",
"benzène"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L407-L437
|
[
"def",
"c6h6",
"(",
"df",
")",
":",
"polluant",
"=",
"'C6H6'",
"# Le DataFrame doit être en heure",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
".",
"freq",
",",
"pdoffset",
".",
"Hour",
")",
":",
"raise",
"FreqException",
"(",
"\"df doit être en heure.\")",
"",
"res",
"=",
"{",
"\"Objectif de qualité en moyenne A: 2u\":",
" ",
"epassement(",
"d",
"f.",
"r",
"esample(",
"'",
"A',",
" ",
"ow=",
"'",
"mean')",
",",
" ",
"aleur=",
"2",
")",
",",
"",
"\"Valeur limite pour la santé humaine en moyenne A: 5u\":",
" ",
"epassement(",
"d",
"f.",
"r",
"esample(",
"'",
"A',",
" ",
"ow=",
"'",
"mean')",
",",
" ",
"aleur=",
"5",
")",
",",
"",
"}",
"return",
"polluant",
",",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
arsenic
|
Calculs réglementaires pour l'arsenic
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): ng/m3 (nanogramme par mètre cube)
Valeur cible en moyenne A: 6u
Les résultats sont donnés en terme d'heure de dépassement
|
pyair/reg.py
|
def arsenic(df):
"""
Calculs réglementaires pour l'arsenic
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): ng/m3 (nanogramme par mètre cube)
Valeur cible en moyenne A: 6u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'As'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur cible en moyenne A: 6u": depassement(df.resample('A', how='mean'), valeur=6),
}
return polluant, res
|
def arsenic(df):
"""
Calculs réglementaires pour l'arsenic
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): ng/m3 (nanogramme par mètre cube)
Valeur cible en moyenne A: 6u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'As'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur cible en moyenne A: 6u": depassement(df.resample('A', how='mean'), valeur=6),
}
return polluant, res
|
[
"Calculs",
"réglementaires",
"pour",
"l",
"arsenic"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L474-L501
|
[
"def",
"arsenic",
"(",
"df",
")",
":",
"polluant",
"=",
"'As'",
"# Le DataFrame doit être en heure",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
".",
"freq",
",",
"pdoffset",
".",
"Hour",
")",
":",
"raise",
"FreqException",
"(",
"\"df doit être en heure.\")",
"",
"res",
"=",
"{",
"\"Valeur cible en moyenne A: 6u\"",
":",
"depassement",
"(",
"df",
".",
"resample",
"(",
"'A'",
",",
"how",
"=",
"'mean'",
")",
",",
"valeur",
"=",
"6",
")",
",",
"}",
"return",
"polluant",
",",
"res"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
print_synthese
|
Présente une synthèse des calculs réglementaires en fournissant les valeurs
calculées suivant les réglementations définies dans chaque fonction de calcul
et un tableau de nombre de dépassement.
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
Retourne:
Imprime sur l'écran les valeurs synthétisées
|
pyair/reg.py
|
def print_synthese(fct, df):
"""
Présente une synthèse des calculs réglementaires en fournissant les valeurs
calculées suivant les réglementations définies dans chaque fonction de calcul
et un tableau de nombre de dépassement.
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
Retourne:
Imprime sur l'écran les valeurs synthétisées
"""
res_count = dict()
polluant, res = fct(df)
print("\nPour le polluant: %s" % polluant)
print("\nValeurs mesurées suivant critères:")
for k, v in res.items():
comp = compresse(v)
if not comp.empty:
comp.index.name = k
print(comp.to_string(na_rep='', float_format=lambda x: "%.0f" % x))
else:
print("\n%s: aucune valeur en dépassement" % k)
res_count[k] = v.count()
res_count = pd.DataFrame(res_count).T
print("Nombre de dépassements des critères:\n")
print(res_count)
|
def print_synthese(fct, df):
"""
Présente une synthèse des calculs réglementaires en fournissant les valeurs
calculées suivant les réglementations définies dans chaque fonction de calcul
et un tableau de nombre de dépassement.
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
Retourne:
Imprime sur l'écran les valeurs synthétisées
"""
res_count = dict()
polluant, res = fct(df)
print("\nPour le polluant: %s" % polluant)
print("\nValeurs mesurées suivant critères:")
for k, v in res.items():
comp = compresse(v)
if not comp.empty:
comp.index.name = k
print(comp.to_string(na_rep='', float_format=lambda x: "%.0f" % x))
else:
print("\n%s: aucune valeur en dépassement" % k)
res_count[k] = v.count()
res_count = pd.DataFrame(res_count).T
print("Nombre de dépassements des critères:\n")
print(res_count)
|
[
"Présente",
"une",
"synthèse",
"des",
"calculs",
"réglementaires",
"en",
"fournissant",
"les",
"valeurs",
"calculées",
"suivant",
"les",
"réglementations",
"définies",
"dans",
"chaque",
"fonction",
"de",
"calcul",
"et",
"un",
"tableau",
"de",
"nombre",
"de",
"dépassement",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L597-L629
|
[
"def",
"print_synthese",
"(",
"fct",
",",
"df",
")",
":",
"res_count",
"=",
"dict",
"(",
")",
"polluant",
",",
"res",
"=",
"fct",
"(",
"df",
")",
"print",
"(",
"\"\\nPour le polluant: %s\"",
"%",
"polluant",
")",
"print",
"(",
"\"\\nValeurs mesurées suivant critères:\")",
"",
"for",
"k",
",",
"v",
"in",
"res",
".",
"items",
"(",
")",
":",
"comp",
"=",
"compresse",
"(",
"v",
")",
"if",
"not",
"comp",
".",
"empty",
":",
"comp",
".",
"index",
".",
"name",
"=",
"k",
"print",
"(",
"comp",
".",
"to_string",
"(",
"na_rep",
"=",
"''",
",",
"float_format",
"=",
"lambda",
"x",
":",
"\"%.0f\"",
"%",
"x",
")",
")",
"else",
":",
"print",
"(",
"\"\\n%s: aucune valeur en dépassement\" ",
" ",
")",
"",
"res_count",
"[",
"k",
"]",
"=",
"v",
".",
"count",
"(",
")",
"res_count",
"=",
"pd",
".",
"DataFrame",
"(",
"res_count",
")",
".",
"T",
"print",
"(",
"\"Nombre de dépassements des critères:\\n\")",
"",
"print",
"(",
"res_count",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
excel_synthese
|
Enregistre dans un fichier Excel une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Les résultats sont enregistrés
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
excel_file: Chemin du fichier excel où écrire les valeurs
Retourne:
Rien
|
pyair/reg.py
|
def excel_synthese(fct, df, excel_file):
"""
Enregistre dans un fichier Excel une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Les résultats sont enregistrés
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
excel_file: Chemin du fichier excel où écrire les valeurs
Retourne:
Rien
"""
def sheet_name(name):
# formatage du nom des feuilles (suppression des guillements, :, ...)
name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
name = k.replace("'", "").replace(":", "").replace(" ", "_")
name = "%i-%s" % (i, name)
name = name[:31]
return name
res_count = dict()
polluant, res = fct(df)
print("\nTraitement du polluant: %s" % polluant)
writer = pd.ExcelWriter(excel_file)
# Valeurs mesurées suivant critères
for i, (k, v) in enumerate(res.items()):
comp = compresse(v)
comp.index.name = k
comp = comp.apply(pd.np.round)
comp.to_excel(writer, sheet_name=sheet_name(k))
res_count[k] = v.count()
# Nombre de dépassements des critères
name = "Nombre_de_depassements"
res_count = pd.DataFrame(res_count).T
res_count.index.name = name
res_count.to_excel(writer, sheet_name=name)
writer.save()
|
def excel_synthese(fct, df, excel_file):
"""
Enregistre dans un fichier Excel une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Les résultats sont enregistrés
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
excel_file: Chemin du fichier excel où écrire les valeurs
Retourne:
Rien
"""
def sheet_name(name):
# formatage du nom des feuilles (suppression des guillements, :, ...)
name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
name = k.replace("'", "").replace(":", "").replace(" ", "_")
name = "%i-%s" % (i, name)
name = name[:31]
return name
res_count = dict()
polluant, res = fct(df)
print("\nTraitement du polluant: %s" % polluant)
writer = pd.ExcelWriter(excel_file)
# Valeurs mesurées suivant critères
for i, (k, v) in enumerate(res.items()):
comp = compresse(v)
comp.index.name = k
comp = comp.apply(pd.np.round)
comp.to_excel(writer, sheet_name=sheet_name(k))
res_count[k] = v.count()
# Nombre de dépassements des critères
name = "Nombre_de_depassements"
res_count = pd.DataFrame(res_count).T
res_count.index.name = name
res_count.to_excel(writer, sheet_name=name)
writer.save()
|
[
"Enregistre",
"dans",
"un",
"fichier",
"Excel",
"une",
"synthèse",
"des",
"calculs",
"réglementaires",
"en",
"fournissant",
"les",
"valeurs",
"calculées",
"suivant",
"les",
"réglementations",
"définies",
"dans",
"chaque",
"fonction",
"de",
"calcul",
"et",
"un",
"tableau",
"de",
"nombre",
"de",
"dépassement",
".",
"Les",
"résultats",
"sont",
"enregistrés"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L632-L678
|
[
"def",
"excel_synthese",
"(",
"fct",
",",
"df",
",",
"excel_file",
")",
":",
"def",
"sheet_name",
"(",
"name",
")",
":",
"# formatage du nom des feuilles (suppression des guillements, :, ...)",
"name",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"name",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"name",
"=",
"k",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\":\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"name",
"=",
"\"%i-%s\"",
"%",
"(",
"i",
",",
"name",
")",
"name",
"=",
"name",
"[",
":",
"31",
"]",
"return",
"name",
"res_count",
"=",
"dict",
"(",
")",
"polluant",
",",
"res",
"=",
"fct",
"(",
"df",
")",
"print",
"(",
"\"\\nTraitement du polluant: %s\"",
"%",
"polluant",
")",
"writer",
"=",
"pd",
".",
"ExcelWriter",
"(",
"excel_file",
")",
"# Valeurs mesurées suivant critères",
"for",
"i",
",",
"(",
"k",
",",
"v",
")",
"in",
"enumerate",
"(",
"res",
".",
"items",
"(",
")",
")",
":",
"comp",
"=",
"compresse",
"(",
"v",
")",
"comp",
".",
"index",
".",
"name",
"=",
"k",
"comp",
"=",
"comp",
".",
"apply",
"(",
"pd",
".",
"np",
".",
"round",
")",
"comp",
".",
"to_excel",
"(",
"writer",
",",
"sheet_name",
"=",
"sheet_name",
"(",
"k",
")",
")",
"res_count",
"[",
"k",
"]",
"=",
"v",
".",
"count",
"(",
")",
"# Nombre de dépassements des critères",
"name",
"=",
"\"Nombre_de_depassements\"",
"res_count",
"=",
"pd",
".",
"DataFrame",
"(",
"res_count",
")",
".",
"T",
"res_count",
".",
"index",
".",
"name",
"=",
"name",
"res_count",
".",
"to_excel",
"(",
"writer",
",",
"sheet_name",
"=",
"name",
")",
"writer",
".",
"save",
"(",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
html_synthese
|
Retourne au format html une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
Retourne:
Une chaine de caractère prête à être utilisé dans une page html
|
pyair/reg.py
|
def html_synthese(fct, df):
"""
Retourne au format html une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
Retourne:
Une chaine de caractère prête à être utilisé dans une page html
"""
html = str()
res_count = dict()
buf = StringIO()
polluant, res = fct(df)
html += '<p style="text-align:center"><h2>Pour le polluant: {}</h2></p>'.format(polluant)
# On enregistre tous les résultats dans le buffer et on calcule la somme de chaque
for k, v in res.items():
buf.write("<p>")
comp = compresse(v)
if not comp.empty:
comp.index.name = k
comp.to_html(buf=buf,
sparsify=True,
na_rep="")
else:
buf.write(
'<table border="1" class="dataframe"><thead><tr style="text-align: right;"><th>{}</th><th>Aucun dépassement</th></tr></table>'.format(
k))
buf.write("</p>")
res_count[k] = v.count()
res_count = pd.DataFrame(res_count).T
res_count.index.name = "Nombre de dépassements des critères"
html += "<p>"
html += res_count.to_html(sparsify=True)
html += "</p>"
html += buf.getvalue()
return html
|
def html_synthese(fct, df):
"""
Retourne au format html une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
Retourne:
Une chaine de caractère prête à être utilisé dans une page html
"""
html = str()
res_count = dict()
buf = StringIO()
polluant, res = fct(df)
html += '<p style="text-align:center"><h2>Pour le polluant: {}</h2></p>'.format(polluant)
# On enregistre tous les résultats dans le buffer et on calcule la somme de chaque
for k, v in res.items():
buf.write("<p>")
comp = compresse(v)
if not comp.empty:
comp.index.name = k
comp.to_html(buf=buf,
sparsify=True,
na_rep="")
else:
buf.write(
'<table border="1" class="dataframe"><thead><tr style="text-align: right;"><th>{}</th><th>Aucun dépassement</th></tr></table>'.format(
k))
buf.write("</p>")
res_count[k] = v.count()
res_count = pd.DataFrame(res_count).T
res_count.index.name = "Nombre de dépassements des critères"
html += "<p>"
html += res_count.to_html(sparsify=True)
html += "</p>"
html += buf.getvalue()
return html
|
[
"Retourne",
"au",
"format",
"html",
"une",
"synthèse",
"des",
"calculs",
"réglementaires",
"en",
"fournissant",
"les",
"valeurs",
"calculées",
"suivant",
"les",
"réglementations",
"définies",
"dans",
"chaque",
"fonction",
"de",
"calcul",
"et",
"un",
"tableau",
"de",
"nombre",
"de",
"dépassement",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L681-L726
|
[
"def",
"html_synthese",
"(",
"fct",
",",
"df",
")",
":",
"html",
"=",
"str",
"(",
")",
"res_count",
"=",
"dict",
"(",
")",
"buf",
"=",
"StringIO",
"(",
")",
"polluant",
",",
"res",
"=",
"fct",
"(",
"df",
")",
"html",
"+=",
"'<p style=\"text-align:center\"><h2>Pour le polluant: {}</h2></p>'",
".",
"format",
"(",
"polluant",
")",
"# On enregistre tous les résultats dans le buffer et on calcule la somme de chaque",
"for",
"k",
",",
"v",
"in",
"res",
".",
"items",
"(",
")",
":",
"buf",
".",
"write",
"(",
"\"<p>\"",
")",
"comp",
"=",
"compresse",
"(",
"v",
")",
"if",
"not",
"comp",
".",
"empty",
":",
"comp",
".",
"index",
".",
"name",
"=",
"k",
"comp",
".",
"to_html",
"(",
"buf",
"=",
"buf",
",",
"sparsify",
"=",
"True",
",",
"na_rep",
"=",
"\"\"",
")",
"else",
":",
"buf",
".",
"write",
"(",
"'<table border=\"1\" class=\"dataframe\"><thead><tr style=\"text-align: right;\"><th>{}</th><th>Aucun dépassement</th></tr></table>'.",
"f",
"ormat(",
"",
"k",
")",
")",
"buf",
".",
"write",
"(",
"\"</p>\"",
")",
"res_count",
"[",
"k",
"]",
"=",
"v",
".",
"count",
"(",
")",
"res_count",
"=",
"pd",
".",
"DataFrame",
"(",
"res_count",
")",
".",
"T",
"res_count",
".",
"index",
".",
"name",
"=",
"\"Nombre de dépassements des critères\"",
"html",
"+=",
"\"<p>\"",
"html",
"+=",
"res_count",
".",
"to_html",
"(",
"sparsify",
"=",
"True",
")",
"html",
"+=",
"\"</p>\"",
"html",
"+=",
"buf",
".",
"getvalue",
"(",
")",
"return",
"html"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
show_max
|
Pour chaque serie (colonne) d'un DataFrame, va rechercher la (les) valeur(s)
et la (les) date(s) du (des) max.
Paramètres:
df: DataFrame de valeurs à calculer
Retourne:
Un DataFrame montrant pour chaque serie (colonne), les valeurs maxs aux dates
d'apparition.
|
pyair/reg.py
|
def show_max(df):
"""Pour chaque serie (colonne) d'un DataFrame, va rechercher la (les) valeur(s)
et la (les) date(s) du (des) max.
Paramètres:
df: DataFrame de valeurs à calculer
Retourne:
Un DataFrame montrant pour chaque serie (colonne), les valeurs maxs aux dates
d'apparition.
"""
df = df.astype(pd.np.float)
res = list()
for c in df.columns:
serie = df[c]
res.append(serie.where(cond=serie == serie.max(), other=pd.np.nan).dropna())
return pd.DataFrame(res).T
|
def show_max(df):
"""Pour chaque serie (colonne) d'un DataFrame, va rechercher la (les) valeur(s)
et la (les) date(s) du (des) max.
Paramètres:
df: DataFrame de valeurs à calculer
Retourne:
Un DataFrame montrant pour chaque serie (colonne), les valeurs maxs aux dates
d'apparition.
"""
df = df.astype(pd.np.float)
res = list()
for c in df.columns:
serie = df[c]
res.append(serie.where(cond=serie == serie.max(), other=pd.np.nan).dropna())
return pd.DataFrame(res).T
|
[
"Pour",
"chaque",
"serie",
"(",
"colonne",
")",
"d",
"un",
"DataFrame",
"va",
"rechercher",
"la",
"(",
"les",
")",
"valeur",
"(",
"s",
")",
"et",
"la",
"(",
"les",
")",
"date",
"(",
"s",
")",
"du",
"(",
"des",
")",
"max",
"."
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L729-L745
|
[
"def",
"show_max",
"(",
"df",
")",
":",
"df",
"=",
"df",
".",
"astype",
"(",
"pd",
".",
"np",
".",
"float",
")",
"res",
"=",
"list",
"(",
")",
"for",
"c",
"in",
"df",
".",
"columns",
":",
"serie",
"=",
"df",
"[",
"c",
"]",
"res",
".",
"append",
"(",
"serie",
".",
"where",
"(",
"cond",
"=",
"serie",
"==",
"serie",
".",
"max",
"(",
")",
",",
"other",
"=",
"pd",
".",
"np",
".",
"nan",
")",
".",
"dropna",
"(",
")",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"res",
")",
".",
"T"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
taux_de_representativite
|
Calcul le taux de représentativité d'un dataframe
|
pyair/reg.py
|
def taux_de_representativite(df):
"""Calcul le taux de représentativité d'un dataframe"""
return (df.count().astype(pd.np.float) / df.shape[0] * 100).round(1)
|
def taux_de_representativite(df):
"""Calcul le taux de représentativité d'un dataframe"""
return (df.count().astype(pd.np.float) / df.shape[0] * 100).round(1)
|
[
"Calcul",
"le",
"taux",
"de",
"représentativité",
"d",
"un",
"dataframe"
] |
LionelR/pyair
|
python
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/reg.py#L748-L750
|
[
"def",
"taux_de_representativite",
"(",
"df",
")",
":",
"return",
"(",
"df",
".",
"count",
"(",
")",
".",
"astype",
"(",
"pd",
".",
"np",
".",
"float",
")",
"/",
"df",
".",
"shape",
"[",
"0",
"]",
"*",
"100",
")",
".",
"round",
"(",
"1",
")"
] |
467e8a843ca9f882f8bb2958805b7293591996ad
|
valid
|
EnvironmentCache.validate
|
Validate all the entries in the environment cache.
|
cpenv/cache.py
|
def validate(self):
'''Validate all the entries in the environment cache.'''
for env in list(self):
if not env.exists:
self.remove(env)
|
def validate(self):
'''Validate all the entries in the environment cache.'''
for env in list(self):
if not env.exists:
self.remove(env)
|
[
"Validate",
"all",
"the",
"entries",
"in",
"the",
"environment",
"cache",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/cache.py#L36-L41
|
[
"def",
"validate",
"(",
"self",
")",
":",
"for",
"env",
"in",
"list",
"(",
"self",
")",
":",
"if",
"not",
"env",
".",
"exists",
":",
"self",
".",
"remove",
"(",
"env",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
EnvironmentCache.load
|
Load the environment cache from disk.
|
cpenv/cache.py
|
def load(self):
'''Load the environment cache from disk.'''
if not os.path.exists(self.path):
return
with open(self.path, 'r') as f:
env_data = yaml.load(f.read())
if env_data:
for env in env_data:
self.add(VirtualEnvironment(env['root']))
|
def load(self):
'''Load the environment cache from disk.'''
if not os.path.exists(self.path):
return
with open(self.path, 'r') as f:
env_data = yaml.load(f.read())
if env_data:
for env in env_data:
self.add(VirtualEnvironment(env['root']))
|
[
"Load",
"the",
"environment",
"cache",
"from",
"disk",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/cache.py#L43-L54
|
[
"def",
"load",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"path",
")",
":",
"return",
"with",
"open",
"(",
"self",
".",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"env_data",
"=",
"yaml",
".",
"load",
"(",
"f",
".",
"read",
"(",
")",
")",
"if",
"env_data",
":",
"for",
"env",
"in",
"env_data",
":",
"self",
".",
"add",
"(",
"VirtualEnvironment",
"(",
"env",
"[",
"'root'",
"]",
")",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
EnvironmentCache.save
|
Save the environment cache to disk.
|
cpenv/cache.py
|
def save(self):
'''Save the environment cache to disk.'''
env_data = [dict(name=env.name, root=env.path) for env in self]
encode = yaml.safe_dump(env_data, default_flow_style=False)
with open(self.path, 'w') as f:
f.write(encode)
|
def save(self):
'''Save the environment cache to disk.'''
env_data = [dict(name=env.name, root=env.path) for env in self]
encode = yaml.safe_dump(env_data, default_flow_style=False)
with open(self.path, 'w') as f:
f.write(encode)
|
[
"Save",
"the",
"environment",
"cache",
"to",
"disk",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/cache.py#L61-L68
|
[
"def",
"save",
"(",
"self",
")",
":",
"env_data",
"=",
"[",
"dict",
"(",
"name",
"=",
"env",
".",
"name",
",",
"root",
"=",
"env",
".",
"path",
")",
"for",
"env",
"in",
"self",
"]",
"encode",
"=",
"yaml",
".",
"safe_dump",
"(",
"env_data",
",",
"default_flow_style",
"=",
"False",
")",
"with",
"open",
"(",
"self",
".",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"encode",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
prompt
|
Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending a interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: asks for confirmation for the value.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
|
cpenv/packages/click/termui.py
|
def prompt(text, default=None, hide_input=False,
confirmation_prompt=False, type=None,
value_proc=None, prompt_suffix=': ',
show_default=True, err=False):
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending a interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: asks for confirmation for the value.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
result = None
def prompt_func(text):
f = hide_input and hidden_prompt_func or visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text, nl=False, err=err)
return f('')
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default)
while 1:
while 1:
value = prompt_func(prompt)
if value:
break
# If a default is set and used, then the confirmation
# prompt is always skipped because that's the only thing
# that really makes sense.
elif default is not None:
return default
try:
result = value_proc(value)
except UsageError as e:
echo('Error: %s' % e.message, err=err)
continue
if not confirmation_prompt:
return result
while 1:
value2 = prompt_func('Repeat for confirmation: ')
if value2:
break
if value == value2:
return result
echo('Error: the two entered values do not match', err=err)
|
def prompt(text, default=None, hide_input=False,
confirmation_prompt=False, type=None,
value_proc=None, prompt_suffix=': ',
show_default=True, err=False):
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending a interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: asks for confirmation for the value.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
result = None
def prompt_func(text):
f = hide_input and hidden_prompt_func or visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text, nl=False, err=err)
return f('')
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default)
while 1:
while 1:
value = prompt_func(prompt)
if value:
break
# If a default is set and used, then the confirmation
# prompt is always skipped because that's the only thing
# that really makes sense.
elif default is not None:
return default
try:
result = value_proc(value)
except UsageError as e:
echo('Error: %s' % e.message, err=err)
continue
if not confirmation_prompt:
return result
while 1:
value2 = prompt_func('Repeat for confirmation: ')
if value2:
break
if value == value2:
return result
echo('Error: the two entered values do not match', err=err)
|
[
"Prompts",
"a",
"user",
"for",
"input",
".",
"This",
"is",
"a",
"convenience",
"function",
"that",
"can",
"be",
"used",
"to",
"prompt",
"a",
"user",
"for",
"input",
"later",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/packages/click/termui.py#L34-L110
|
[
"def",
"prompt",
"(",
"text",
",",
"default",
"=",
"None",
",",
"hide_input",
"=",
"False",
",",
"confirmation_prompt",
"=",
"False",
",",
"type",
"=",
"None",
",",
"value_proc",
"=",
"None",
",",
"prompt_suffix",
"=",
"': '",
",",
"show_default",
"=",
"True",
",",
"err",
"=",
"False",
")",
":",
"result",
"=",
"None",
"def",
"prompt_func",
"(",
"text",
")",
":",
"f",
"=",
"hide_input",
"and",
"hidden_prompt_func",
"or",
"visible_prompt_func",
"try",
":",
"# Write the prompt separately so that we get nice",
"# coloring through colorama on Windows",
"echo",
"(",
"text",
",",
"nl",
"=",
"False",
",",
"err",
"=",
"err",
")",
"return",
"f",
"(",
"''",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"EOFError",
")",
":",
"# getpass doesn't print a newline if the user aborts input with ^C.",
"# Allegedly this behavior is inherited from getpass(3).",
"# A doc bug has been filed at https://bugs.python.org/issue24711",
"if",
"hide_input",
":",
"echo",
"(",
"None",
",",
"err",
"=",
"err",
")",
"raise",
"Abort",
"(",
")",
"if",
"value_proc",
"is",
"None",
":",
"value_proc",
"=",
"convert_type",
"(",
"type",
",",
"default",
")",
"prompt",
"=",
"_build_prompt",
"(",
"text",
",",
"prompt_suffix",
",",
"show_default",
",",
"default",
")",
"while",
"1",
":",
"while",
"1",
":",
"value",
"=",
"prompt_func",
"(",
"prompt",
")",
"if",
"value",
":",
"break",
"# If a default is set and used, then the confirmation",
"# prompt is always skipped because that's the only thing",
"# that really makes sense.",
"elif",
"default",
"is",
"not",
"None",
":",
"return",
"default",
"try",
":",
"result",
"=",
"value_proc",
"(",
"value",
")",
"except",
"UsageError",
"as",
"e",
":",
"echo",
"(",
"'Error: %s'",
"%",
"e",
".",
"message",
",",
"err",
"=",
"err",
")",
"continue",
"if",
"not",
"confirmation_prompt",
":",
"return",
"result",
"while",
"1",
":",
"value2",
"=",
"prompt_func",
"(",
"'Repeat for confirmation: '",
")",
"if",
"value2",
":",
"break",
"if",
"value",
"==",
"value2",
":",
"return",
"result",
"echo",
"(",
"'Error: the two entered values do not match'",
",",
"err",
"=",
"err",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
echo_via_pager
|
This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text: the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
|
cpenv/packages/click/termui.py
|
def echo_via_pager(text, color=None):
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text: the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if not isinstance(text, string_types):
text = text_type(text)
from ._termui_impl import pager
return pager(text + '\n', color)
|
def echo_via_pager(text, color=None):
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text: the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if not isinstance(text, string_types):
text = text_type(text)
from ._termui_impl import pager
return pager(text + '\n', color)
|
[
"This",
"function",
"takes",
"a",
"text",
"and",
"shows",
"it",
"via",
"an",
"environment",
"specific",
"pager",
"on",
"stdout",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/packages/click/termui.py#L198-L213
|
[
"def",
"echo_via_pager",
"(",
"text",
",",
"color",
"=",
"None",
")",
":",
"color",
"=",
"resolve_color_default",
"(",
"color",
")",
"if",
"not",
"isinstance",
"(",
"text",
",",
"string_types",
")",
":",
"text",
"=",
"text_type",
"(",
"text",
")",
"from",
".",
"_termui_impl",
"import",
"pager",
"return",
"pager",
"(",
"text",
"+",
"'\\n'",
",",
"color",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
_remote_setup_engine
|
(Executed on remote engine) creates an ObjectEngine instance
|
distob/distob.py
|
def _remote_setup_engine(engine_id, nengines):
"""(Executed on remote engine) creates an ObjectEngine instance """
if distob.engine is None:
distob.engine = distob.ObjectEngine(engine_id, nengines)
# TODO these imports should be unnecessary with improved deserialization
import numpy as np
from scipy import stats
# TODO Using @ipyparallel.interactive still did not import to __main__
# so will do it this way for now.
import __main__
__main__.__dict__['np'] = np
__main__.__dict__['stats'] = stats
|
def _remote_setup_engine(engine_id, nengines):
"""(Executed on remote engine) creates an ObjectEngine instance """
if distob.engine is None:
distob.engine = distob.ObjectEngine(engine_id, nengines)
# TODO these imports should be unnecessary with improved deserialization
import numpy as np
from scipy import stats
# TODO Using @ipyparallel.interactive still did not import to __main__
# so will do it this way for now.
import __main__
__main__.__dict__['np'] = np
__main__.__dict__['stats'] = stats
|
[
"(",
"Executed",
"on",
"remote",
"engine",
")",
"creates",
"an",
"ObjectEngine",
"instance"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L315-L326
|
[
"def",
"_remote_setup_engine",
"(",
"engine_id",
",",
"nengines",
")",
":",
"if",
"distob",
".",
"engine",
"is",
"None",
":",
"distob",
".",
"engine",
"=",
"distob",
".",
"ObjectEngine",
"(",
"engine_id",
",",
"nengines",
")",
"# TODO these imports should be unnecessary with improved deserialization",
"import",
"numpy",
"as",
"np",
"from",
"scipy",
"import",
"stats",
"# TODO Using @ipyparallel.interactive still did not import to __main__",
"# so will do it this way for now.",
"import",
"__main__",
"__main__",
".",
"__dict__",
"[",
"'np'",
"]",
"=",
"np",
"__main__",
".",
"__dict__",
"[",
"'stats'",
"]",
"=",
"stats"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
setup_engines
|
Prepare all iPython engines for distributed object processing.
Args:
client (ipyparallel.Client, optional): If None, will create a client
using the default ipyparallel profile.
|
distob/distob.py
|
def setup_engines(client=None):
"""Prepare all iPython engines for distributed object processing.
Args:
client (ipyparallel.Client, optional): If None, will create a client
using the default ipyparallel profile.
"""
if not client:
try:
client = ipyparallel.Client()
except:
raise DistobClusterError(
u"""Could not connect to an ipyparallel cluster. Make
sure a cluster is started (e.g. to use the CPUs of a
single computer, can type 'ipcluster start')""")
eids = client.ids
if not eids:
raise DistobClusterError(
u'No ipyparallel compute engines are available')
nengines = len(eids)
dv = client[eids]
dv.use_dill()
with dv.sync_imports(quiet=True):
import distob
# create global ObjectEngine distob.engine on each engine
ars = []
for i in eids:
dv.targets = i
ars.append(dv.apply_async(_remote_setup_engine, i, nengines))
dv.wait(ars)
for ar in ars:
if not ar.successful():
raise ar.r
# create global ObjectHub distob.engine on the client host
if distob.engine is None:
distob.engine = ObjectHub(-1, client)
|
def setup_engines(client=None):
"""Prepare all iPython engines for distributed object processing.
Args:
client (ipyparallel.Client, optional): If None, will create a client
using the default ipyparallel profile.
"""
if not client:
try:
client = ipyparallel.Client()
except:
raise DistobClusterError(
u"""Could not connect to an ipyparallel cluster. Make
sure a cluster is started (e.g. to use the CPUs of a
single computer, can type 'ipcluster start')""")
eids = client.ids
if not eids:
raise DistobClusterError(
u'No ipyparallel compute engines are available')
nengines = len(eids)
dv = client[eids]
dv.use_dill()
with dv.sync_imports(quiet=True):
import distob
# create global ObjectEngine distob.engine on each engine
ars = []
for i in eids:
dv.targets = i
ars.append(dv.apply_async(_remote_setup_engine, i, nengines))
dv.wait(ars)
for ar in ars:
if not ar.successful():
raise ar.r
# create global ObjectHub distob.engine on the client host
if distob.engine is None:
distob.engine = ObjectHub(-1, client)
|
[
"Prepare",
"all",
"iPython",
"engines",
"for",
"distributed",
"object",
"processing",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L329-L364
|
[
"def",
"setup_engines",
"(",
"client",
"=",
"None",
")",
":",
"if",
"not",
"client",
":",
"try",
":",
"client",
"=",
"ipyparallel",
".",
"Client",
"(",
")",
"except",
":",
"raise",
"DistobClusterError",
"(",
"u\"\"\"Could not connect to an ipyparallel cluster. Make\n sure a cluster is started (e.g. to use the CPUs of a\n single computer, can type 'ipcluster start')\"\"\"",
")",
"eids",
"=",
"client",
".",
"ids",
"if",
"not",
"eids",
":",
"raise",
"DistobClusterError",
"(",
"u'No ipyparallel compute engines are available'",
")",
"nengines",
"=",
"len",
"(",
"eids",
")",
"dv",
"=",
"client",
"[",
"eids",
"]",
"dv",
".",
"use_dill",
"(",
")",
"with",
"dv",
".",
"sync_imports",
"(",
"quiet",
"=",
"True",
")",
":",
"import",
"distob",
"# create global ObjectEngine distob.engine on each engine",
"ars",
"=",
"[",
"]",
"for",
"i",
"in",
"eids",
":",
"dv",
".",
"targets",
"=",
"i",
"ars",
".",
"append",
"(",
"dv",
".",
"apply_async",
"(",
"_remote_setup_engine",
",",
"i",
",",
"nengines",
")",
")",
"dv",
".",
"wait",
"(",
"ars",
")",
"for",
"ar",
"in",
"ars",
":",
"if",
"not",
"ar",
".",
"successful",
"(",
")",
":",
"raise",
"ar",
".",
"r",
"# create global ObjectHub distob.engine on the client host",
"if",
"distob",
".",
"engine",
"is",
"None",
":",
"distob",
".",
"engine",
"=",
"ObjectHub",
"(",
"-",
"1",
",",
"client",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
_process_args
|
Select local or remote execution and prepare arguments accordingly.
Assumes any remote args have already been moved to a common engine.
Local execution will be chosen if:
- all args are ordinary objects or Remote instances on the local engine; or
- the local cache of all remote args is current, and prefer_local is True.
Otherwise, remote execution will be chosen.
For remote execution, replaces any remote arg with its Id.
For local execution, replaces any remote arg with its locally cached object
Any arguments or kwargs that are Sequences will be recursed one level deep.
Args:
args (list)
kwargs (dict)
prefer_local (bool, optional): Whether cached local results are prefered
if available, instead of returning Remote objects. Default is True.
|
distob/distob.py
|
def _process_args(args, kwargs, prefer_local=True, recurse=True):
"""Select local or remote execution and prepare arguments accordingly.
Assumes any remote args have already been moved to a common engine.
Local execution will be chosen if:
- all args are ordinary objects or Remote instances on the local engine; or
- the local cache of all remote args is current, and prefer_local is True.
Otherwise, remote execution will be chosen.
For remote execution, replaces any remote arg with its Id.
For local execution, replaces any remote arg with its locally cached object
Any arguments or kwargs that are Sequences will be recursed one level deep.
Args:
args (list)
kwargs (dict)
prefer_local (bool, optional): Whether cached local results are prefered
if available, instead of returning Remote objects. Default is True.
"""
this_engine = distob.engine.eid
local_args = []
remote_args = []
execloc = this_engine # the chosen engine id for execution of the call
for a in args:
id = None
if isinstance(a, Remote):
id = a._ref.id
elif isinstance(a, Ref):
id = a.id
elif isinstance(a, Id):
id = a
if id is not None:
if id.engine is this_engine:
local_args.append(distob.engine[id])
remote_args.append(distob.engine[id])
else:
if (prefer_local and isinstance(a, Remote) and
a._obcache_current):
local_args.append(a._obcache)
remote_args.append(id)
else:
# will choose remote execution
if execloc is not this_engine and id.engine is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
else:
execloc = id.engine
local_args.append(None)
remote_args.append(id)
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types) and recurse):
eid, ls, _ = _process_args(a, {}, prefer_local, recurse=False)
if eid is not this_engine:
if execloc is not this_engine and eid is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
execloc = eid
local_args.append(ls)
remote_args.append(ls)
else:
# argument is an ordinary object
local_args.append(a)
remote_args.append(a)
local_kwargs = dict()
remote_kwargs = dict()
for k, a in kwargs.items():
id = None
if isinstance(a, Remote):
id = a._ref.id
elif isinstance(a, Ref):
id = a.id
elif isinstance(a, Id):
id = a
if id is not None:
if id.engine is this_engine:
local_kwargs[k] = distob.engine[id]
remote_kwargs[k] = distob.engine[id]
else:
if (prefer_local and isinstance(a, Remote) and
a._obcache_current):
local_kwargs[k] = a._obcache
remote_kwargs[k] = id
else:
# will choose remote execution
if execloc is not this_engine and id.engine is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
else:
execloc = id.engine
local_kwargs[k] = None
remote_kwargs[k] = id
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types) and recurse):
eid, ls, _ = _process_args(a, {}, prefer_local, recurse=False)
if eid is not this_engine:
if execloc is not this_engine and eid is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
execloc = eid
local_kwargs[k] = ls
remote_kwargs[k] = ls
else:
# argument is an ordinary object
local_kwargs[k] = a
remote_kwargs[k] = a
if execloc is this_engine:
return execloc, tuple(local_args), local_kwargs
else:
return execloc, tuple(remote_args), remote_kwargs
|
def _process_args(args, kwargs, prefer_local=True, recurse=True):
"""Select local or remote execution and prepare arguments accordingly.
Assumes any remote args have already been moved to a common engine.
Local execution will be chosen if:
- all args are ordinary objects or Remote instances on the local engine; or
- the local cache of all remote args is current, and prefer_local is True.
Otherwise, remote execution will be chosen.
For remote execution, replaces any remote arg with its Id.
For local execution, replaces any remote arg with its locally cached object
Any arguments or kwargs that are Sequences will be recursed one level deep.
Args:
args (list)
kwargs (dict)
prefer_local (bool, optional): Whether cached local results are prefered
if available, instead of returning Remote objects. Default is True.
"""
this_engine = distob.engine.eid
local_args = []
remote_args = []
execloc = this_engine # the chosen engine id for execution of the call
for a in args:
id = None
if isinstance(a, Remote):
id = a._ref.id
elif isinstance(a, Ref):
id = a.id
elif isinstance(a, Id):
id = a
if id is not None:
if id.engine is this_engine:
local_args.append(distob.engine[id])
remote_args.append(distob.engine[id])
else:
if (prefer_local and isinstance(a, Remote) and
a._obcache_current):
local_args.append(a._obcache)
remote_args.append(id)
else:
# will choose remote execution
if execloc is not this_engine and id.engine is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
else:
execloc = id.engine
local_args.append(None)
remote_args.append(id)
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types) and recurse):
eid, ls, _ = _process_args(a, {}, prefer_local, recurse=False)
if eid is not this_engine:
if execloc is not this_engine and eid is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
execloc = eid
local_args.append(ls)
remote_args.append(ls)
else:
# argument is an ordinary object
local_args.append(a)
remote_args.append(a)
local_kwargs = dict()
remote_kwargs = dict()
for k, a in kwargs.items():
id = None
if isinstance(a, Remote):
id = a._ref.id
elif isinstance(a, Ref):
id = a.id
elif isinstance(a, Id):
id = a
if id is not None:
if id.engine is this_engine:
local_kwargs[k] = distob.engine[id]
remote_kwargs[k] = distob.engine[id]
else:
if (prefer_local and isinstance(a, Remote) and
a._obcache_current):
local_kwargs[k] = a._obcache
remote_kwargs[k] = id
else:
# will choose remote execution
if execloc is not this_engine and id.engine is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
else:
execloc = id.engine
local_kwargs[k] = None
remote_kwargs[k] = id
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types) and recurse):
eid, ls, _ = _process_args(a, {}, prefer_local, recurse=False)
if eid is not this_engine:
if execloc is not this_engine and eid is not execloc:
raise DistobValueError(
'two remote arguments are from different engines')
execloc = eid
local_kwargs[k] = ls
remote_kwargs[k] = ls
else:
# argument is an ordinary object
local_kwargs[k] = a
remote_kwargs[k] = a
if execloc is this_engine:
return execloc, tuple(local_args), local_kwargs
else:
return execloc, tuple(remote_args), remote_kwargs
|
[
"Select",
"local",
"or",
"remote",
"execution",
"and",
"prepare",
"arguments",
"accordingly",
".",
"Assumes",
"any",
"remote",
"args",
"have",
"already",
"been",
"moved",
"to",
"a",
"common",
"engine",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L367-L475
|
[
"def",
"_process_args",
"(",
"args",
",",
"kwargs",
",",
"prefer_local",
"=",
"True",
",",
"recurse",
"=",
"True",
")",
":",
"this_engine",
"=",
"distob",
".",
"engine",
".",
"eid",
"local_args",
"=",
"[",
"]",
"remote_args",
"=",
"[",
"]",
"execloc",
"=",
"this_engine",
"# the chosen engine id for execution of the call",
"for",
"a",
"in",
"args",
":",
"id",
"=",
"None",
"if",
"isinstance",
"(",
"a",
",",
"Remote",
")",
":",
"id",
"=",
"a",
".",
"_ref",
".",
"id",
"elif",
"isinstance",
"(",
"a",
",",
"Ref",
")",
":",
"id",
"=",
"a",
".",
"id",
"elif",
"isinstance",
"(",
"a",
",",
"Id",
")",
":",
"id",
"=",
"a",
"if",
"id",
"is",
"not",
"None",
":",
"if",
"id",
".",
"engine",
"is",
"this_engine",
":",
"local_args",
".",
"append",
"(",
"distob",
".",
"engine",
"[",
"id",
"]",
")",
"remote_args",
".",
"append",
"(",
"distob",
".",
"engine",
"[",
"id",
"]",
")",
"else",
":",
"if",
"(",
"prefer_local",
"and",
"isinstance",
"(",
"a",
",",
"Remote",
")",
"and",
"a",
".",
"_obcache_current",
")",
":",
"local_args",
".",
"append",
"(",
"a",
".",
"_obcache",
")",
"remote_args",
".",
"append",
"(",
"id",
")",
"else",
":",
"# will choose remote execution",
"if",
"execloc",
"is",
"not",
"this_engine",
"and",
"id",
".",
"engine",
"is",
"not",
"execloc",
":",
"raise",
"DistobValueError",
"(",
"'two remote arguments are from different engines'",
")",
"else",
":",
"execloc",
"=",
"id",
".",
"engine",
"local_args",
".",
"append",
"(",
"None",
")",
"remote_args",
".",
"append",
"(",
"id",
")",
"elif",
"(",
"isinstance",
"(",
"a",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"a",
",",
"string_types",
")",
"and",
"recurse",
")",
":",
"eid",
",",
"ls",
",",
"_",
"=",
"_process_args",
"(",
"a",
",",
"{",
"}",
",",
"prefer_local",
",",
"recurse",
"=",
"False",
")",
"if",
"eid",
"is",
"not",
"this_engine",
":",
"if",
"execloc",
"is",
"not",
"this_engine",
"and",
"eid",
"is",
"not",
"execloc",
":",
"raise",
"DistobValueError",
"(",
"'two remote arguments are from different engines'",
")",
"execloc",
"=",
"eid",
"local_args",
".",
"append",
"(",
"ls",
")",
"remote_args",
".",
"append",
"(",
"ls",
")",
"else",
":",
"# argument is an ordinary object",
"local_args",
".",
"append",
"(",
"a",
")",
"remote_args",
".",
"append",
"(",
"a",
")",
"local_kwargs",
"=",
"dict",
"(",
")",
"remote_kwargs",
"=",
"dict",
"(",
")",
"for",
"k",
",",
"a",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"id",
"=",
"None",
"if",
"isinstance",
"(",
"a",
",",
"Remote",
")",
":",
"id",
"=",
"a",
".",
"_ref",
".",
"id",
"elif",
"isinstance",
"(",
"a",
",",
"Ref",
")",
":",
"id",
"=",
"a",
".",
"id",
"elif",
"isinstance",
"(",
"a",
",",
"Id",
")",
":",
"id",
"=",
"a",
"if",
"id",
"is",
"not",
"None",
":",
"if",
"id",
".",
"engine",
"is",
"this_engine",
":",
"local_kwargs",
"[",
"k",
"]",
"=",
"distob",
".",
"engine",
"[",
"id",
"]",
"remote_kwargs",
"[",
"k",
"]",
"=",
"distob",
".",
"engine",
"[",
"id",
"]",
"else",
":",
"if",
"(",
"prefer_local",
"and",
"isinstance",
"(",
"a",
",",
"Remote",
")",
"and",
"a",
".",
"_obcache_current",
")",
":",
"local_kwargs",
"[",
"k",
"]",
"=",
"a",
".",
"_obcache",
"remote_kwargs",
"[",
"k",
"]",
"=",
"id",
"else",
":",
"# will choose remote execution",
"if",
"execloc",
"is",
"not",
"this_engine",
"and",
"id",
".",
"engine",
"is",
"not",
"execloc",
":",
"raise",
"DistobValueError",
"(",
"'two remote arguments are from different engines'",
")",
"else",
":",
"execloc",
"=",
"id",
".",
"engine",
"local_kwargs",
"[",
"k",
"]",
"=",
"None",
"remote_kwargs",
"[",
"k",
"]",
"=",
"id",
"elif",
"(",
"isinstance",
"(",
"a",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"a",
",",
"string_types",
")",
"and",
"recurse",
")",
":",
"eid",
",",
"ls",
",",
"_",
"=",
"_process_args",
"(",
"a",
",",
"{",
"}",
",",
"prefer_local",
",",
"recurse",
"=",
"False",
")",
"if",
"eid",
"is",
"not",
"this_engine",
":",
"if",
"execloc",
"is",
"not",
"this_engine",
"and",
"eid",
"is",
"not",
"execloc",
":",
"raise",
"DistobValueError",
"(",
"'two remote arguments are from different engines'",
")",
"execloc",
"=",
"eid",
"local_kwargs",
"[",
"k",
"]",
"=",
"ls",
"remote_kwargs",
"[",
"k",
"]",
"=",
"ls",
"else",
":",
"# argument is an ordinary object ",
"local_kwargs",
"[",
"k",
"]",
"=",
"a",
"remote_kwargs",
"[",
"k",
"]",
"=",
"a",
"if",
"execloc",
"is",
"this_engine",
":",
"return",
"execloc",
",",
"tuple",
"(",
"local_args",
")",
",",
"local_kwargs",
"else",
":",
"return",
"execloc",
",",
"tuple",
"(",
"remote_args",
")",
",",
"remote_kwargs"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
_remote_call
|
(Executed on remote engine) convert Ids to real objects, call f
|
distob/distob.py
|
def _remote_call(f, *args, **kwargs):
"""(Executed on remote engine) convert Ids to real objects, call f """
nargs = []
for a in args:
if isinstance(a, Id):
nargs.append(distob.engine[a])
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
nargs.append(
[distob.engine[b] if isinstance(b, Id) else b for b in a])
else: nargs.append(a)
for k, a in kwargs.items():
if isinstance(a, Id):
kwargs[k] = distob.engine[a]
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
kwargs[k] = [
distob.engine[b] if isinstance(b, Id) else b for b in a]
result = f(*nargs, **kwargs)
if (isinstance(result, collections.Sequence) and
not isinstance(result, string_types)):
# We will return any sub-sequences by value, not recurse deeper
results = []
for subresult in result:
if type(subresult) in distob.engine.proxy_types:
results.append(Ref(subresult))
else:
results.append(subresult)
return results
elif type(result) in distob.engine.proxy_types:
return Ref(result)
else:
return result
|
def _remote_call(f, *args, **kwargs):
"""(Executed on remote engine) convert Ids to real objects, call f """
nargs = []
for a in args:
if isinstance(a, Id):
nargs.append(distob.engine[a])
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
nargs.append(
[distob.engine[b] if isinstance(b, Id) else b for b in a])
else: nargs.append(a)
for k, a in kwargs.items():
if isinstance(a, Id):
kwargs[k] = distob.engine[a]
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
kwargs[k] = [
distob.engine[b] if isinstance(b, Id) else b for b in a]
result = f(*nargs, **kwargs)
if (isinstance(result, collections.Sequence) and
not isinstance(result, string_types)):
# We will return any sub-sequences by value, not recurse deeper
results = []
for subresult in result:
if type(subresult) in distob.engine.proxy_types:
results.append(Ref(subresult))
else:
results.append(subresult)
return results
elif type(result) in distob.engine.proxy_types:
return Ref(result)
else:
return result
|
[
"(",
"Executed",
"on",
"remote",
"engine",
")",
"convert",
"Ids",
"to",
"real",
"objects",
"call",
"f"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L478-L510
|
[
"def",
"_remote_call",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nargs",
"=",
"[",
"]",
"for",
"a",
"in",
"args",
":",
"if",
"isinstance",
"(",
"a",
",",
"Id",
")",
":",
"nargs",
".",
"append",
"(",
"distob",
".",
"engine",
"[",
"a",
"]",
")",
"elif",
"(",
"isinstance",
"(",
"a",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"a",
",",
"string_types",
")",
")",
":",
"nargs",
".",
"append",
"(",
"[",
"distob",
".",
"engine",
"[",
"b",
"]",
"if",
"isinstance",
"(",
"b",
",",
"Id",
")",
"else",
"b",
"for",
"b",
"in",
"a",
"]",
")",
"else",
":",
"nargs",
".",
"append",
"(",
"a",
")",
"for",
"k",
",",
"a",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"a",
",",
"Id",
")",
":",
"kwargs",
"[",
"k",
"]",
"=",
"distob",
".",
"engine",
"[",
"a",
"]",
"elif",
"(",
"isinstance",
"(",
"a",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"a",
",",
"string_types",
")",
")",
":",
"kwargs",
"[",
"k",
"]",
"=",
"[",
"distob",
".",
"engine",
"[",
"b",
"]",
"if",
"isinstance",
"(",
"b",
",",
"Id",
")",
"else",
"b",
"for",
"b",
"in",
"a",
"]",
"result",
"=",
"f",
"(",
"*",
"nargs",
",",
"*",
"*",
"kwargs",
")",
"if",
"(",
"isinstance",
"(",
"result",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"result",
",",
"string_types",
")",
")",
":",
"# We will return any sub-sequences by value, not recurse deeper",
"results",
"=",
"[",
"]",
"for",
"subresult",
"in",
"result",
":",
"if",
"type",
"(",
"subresult",
")",
"in",
"distob",
".",
"engine",
".",
"proxy_types",
":",
"results",
".",
"append",
"(",
"Ref",
"(",
"subresult",
")",
")",
"else",
":",
"results",
".",
"append",
"(",
"subresult",
")",
"return",
"results",
"elif",
"type",
"(",
"result",
")",
"in",
"distob",
".",
"engine",
".",
"proxy_types",
":",
"return",
"Ref",
"(",
"result",
")",
"else",
":",
"return",
"result"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
call
|
Execute f on the arguments, either locally or remotely as appropriate.
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True.
|
distob/distob.py
|
def call(f, *args, **kwargs):
"""Execute f on the arguments, either locally or remotely as appropriate.
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True.
"""
this_engine = distob.engine.eid
prefer_local = kwargs.pop('prefer_local', True)
block = kwargs.pop('block', True)
execloc, args, kwargs = _process_args(args, kwargs, prefer_local)
if execloc is this_engine:
r = f(*args, **kwargs)
else:
if False and prefer_local:
# result cache disabled until issue mattja/distob#1 is fixed
try:
kwtuple = tuple((k, kwargs[k]) for k in sorted(kwargs.keys()))
key = (f, args, kwtuple)
r = _call_cache[key]
except TypeError as te:
if te.args[0][:10] == 'unhashable':
#print("unhashable. won't be able to cache")
r = _uncached_call(execloc, f, *args, **kwargs)
else:
raise
except KeyError:
r = _uncached_call(execloc, f, *args, **kwargs)
if block:
_call_cache[key] = r.r
else:
r = _uncached_call(execloc, f, *args, **kwargs)
if block:
return convert_result(r)
else:
return r
|
def call(f, *args, **kwargs):
"""Execute f on the arguments, either locally or remotely as appropriate.
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True.
"""
this_engine = distob.engine.eid
prefer_local = kwargs.pop('prefer_local', True)
block = kwargs.pop('block', True)
execloc, args, kwargs = _process_args(args, kwargs, prefer_local)
if execloc is this_engine:
r = f(*args, **kwargs)
else:
if False and prefer_local:
# result cache disabled until issue mattja/distob#1 is fixed
try:
kwtuple = tuple((k, kwargs[k]) for k in sorted(kwargs.keys()))
key = (f, args, kwtuple)
r = _call_cache[key]
except TypeError as te:
if te.args[0][:10] == 'unhashable':
#print("unhashable. won't be able to cache")
r = _uncached_call(execloc, f, *args, **kwargs)
else:
raise
except KeyError:
r = _uncached_call(execloc, f, *args, **kwargs)
if block:
_call_cache[key] = r.r
else:
r = _uncached_call(execloc, f, *args, **kwargs)
if block:
return convert_result(r)
else:
return r
|
[
"Execute",
"f",
"on",
"the",
"arguments",
"either",
"locally",
"or",
"remotely",
"as",
"appropriate",
".",
"If",
"there",
"are",
"multiple",
"remote",
"arguments",
"they",
"must",
"be",
"on",
"the",
"same",
"engine",
".",
"kwargs",
":",
"prefer_local",
"(",
"bool",
"optional",
")",
":",
"Whether",
"to",
"return",
"cached",
"local",
"results",
"if",
"available",
"in",
"preference",
"to",
"returning",
"Remote",
"objects",
".",
"Default",
"is",
"True",
".",
"block",
"(",
"bool",
"optional",
")",
":",
"Whether",
"remote",
"calls",
"should",
"be",
"synchronous",
".",
"If",
"False",
"returned",
"results",
"may",
"be",
"AsyncResults",
"and",
"should",
"be",
"converted",
"by",
"the",
"caller",
"using",
"convert_result",
"()",
"before",
"use",
".",
"Default",
"is",
"True",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L524-L563
|
[
"def",
"call",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"this_engine",
"=",
"distob",
".",
"engine",
".",
"eid",
"prefer_local",
"=",
"kwargs",
".",
"pop",
"(",
"'prefer_local'",
",",
"True",
")",
"block",
"=",
"kwargs",
".",
"pop",
"(",
"'block'",
",",
"True",
")",
"execloc",
",",
"args",
",",
"kwargs",
"=",
"_process_args",
"(",
"args",
",",
"kwargs",
",",
"prefer_local",
")",
"if",
"execloc",
"is",
"this_engine",
":",
"r",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"if",
"False",
"and",
"prefer_local",
":",
"# result cache disabled until issue mattja/distob#1 is fixed",
"try",
":",
"kwtuple",
"=",
"tuple",
"(",
"(",
"k",
",",
"kwargs",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"sorted",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
"key",
"=",
"(",
"f",
",",
"args",
",",
"kwtuple",
")",
"r",
"=",
"_call_cache",
"[",
"key",
"]",
"except",
"TypeError",
"as",
"te",
":",
"if",
"te",
".",
"args",
"[",
"0",
"]",
"[",
":",
"10",
"]",
"==",
"'unhashable'",
":",
"#print(\"unhashable. won't be able to cache\")",
"r",
"=",
"_uncached_call",
"(",
"execloc",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"except",
"KeyError",
":",
"r",
"=",
"_uncached_call",
"(",
"execloc",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"block",
":",
"_call_cache",
"[",
"key",
"]",
"=",
"r",
".",
"r",
"else",
":",
"r",
"=",
"_uncached_call",
"(",
"execloc",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"block",
":",
"return",
"convert_result",
"(",
"r",
")",
"else",
":",
"return",
"r"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
convert_result
|
Waits for and converts any AsyncResults. Converts any Ref into a Remote.
Args:
r: can be an ordinary object, ipyparallel.AsyncResult, a Ref, or a
Sequence of objects, AsyncResults and Refs.
Returns:
either an ordinary object or a Remote instance
|
distob/distob.py
|
def convert_result(r):
"""Waits for and converts any AsyncResults. Converts any Ref into a Remote.
Args:
r: can be an ordinary object, ipyparallel.AsyncResult, a Ref, or a
Sequence of objects, AsyncResults and Refs.
Returns:
either an ordinary object or a Remote instance"""
if (isinstance(r, collections.Sequence) and
not isinstance(r, string_types)):
rs = []
for subresult in r:
rs.append(convert_result(subresult))
return rs
if isinstance(r, ipyparallel.AsyncResult):
r = r.r
if isinstance(r, Ref):
RemoteClass = distob.engine.proxy_types[r.type]
r = RemoteClass(r)
return r
|
def convert_result(r):
"""Waits for and converts any AsyncResults. Converts any Ref into a Remote.
Args:
r: can be an ordinary object, ipyparallel.AsyncResult, a Ref, or a
Sequence of objects, AsyncResults and Refs.
Returns:
either an ordinary object or a Remote instance"""
if (isinstance(r, collections.Sequence) and
not isinstance(r, string_types)):
rs = []
for subresult in r:
rs.append(convert_result(subresult))
return rs
if isinstance(r, ipyparallel.AsyncResult):
r = r.r
if isinstance(r, Ref):
RemoteClass = distob.engine.proxy_types[r.type]
r = RemoteClass(r)
return r
|
[
"Waits",
"for",
"and",
"converts",
"any",
"AsyncResults",
".",
"Converts",
"any",
"Ref",
"into",
"a",
"Remote",
".",
"Args",
":",
"r",
":",
"can",
"be",
"an",
"ordinary",
"object",
"ipyparallel",
".",
"AsyncResult",
"a",
"Ref",
"or",
"a",
"Sequence",
"of",
"objects",
"AsyncResults",
"and",
"Refs",
".",
"Returns",
":",
"either",
"an",
"ordinary",
"object",
"or",
"a",
"Remote",
"instance"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L566-L584
|
[
"def",
"convert_result",
"(",
"r",
")",
":",
"if",
"(",
"isinstance",
"(",
"r",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"r",
",",
"string_types",
")",
")",
":",
"rs",
"=",
"[",
"]",
"for",
"subresult",
"in",
"r",
":",
"rs",
".",
"append",
"(",
"convert_result",
"(",
"subresult",
")",
")",
"return",
"rs",
"if",
"isinstance",
"(",
"r",
",",
"ipyparallel",
".",
"AsyncResult",
")",
":",
"r",
"=",
"r",
".",
"r",
"if",
"isinstance",
"(",
"r",
",",
"Ref",
")",
":",
"RemoteClass",
"=",
"distob",
".",
"engine",
".",
"proxy_types",
"[",
"r",
".",
"type",
"]",
"r",
"=",
"RemoteClass",
"(",
"r",
")",
"return",
"r"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
_remote_methodcall
|
(Executed on remote engine) convert Ids to real objects, call method
|
distob/distob.py
|
def _remote_methodcall(id, method_name, *args, **kwargs):
"""(Executed on remote engine) convert Ids to real objects, call method """
obj = distob.engine[id]
nargs = []
for a in args:
if isinstance(a, Id):
nargs.append(distob.engine[a])
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
nargs.append(
[distob.engine[b] if isinstance(b, Id) else b for b in a])
else: nargs.append(a)
for k, a in kwargs.items():
if isinstance(a, Id):
kwargs[k] = distob.engine[a]
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
kwargs[k] = [
distob.engine[b] if isinstance(b, Id) else b for b in a]
result = getattr(obj, method_name)(*nargs, **kwargs)
if (isinstance(result, collections.Sequence) and
not isinstance(result, string_types)):
# We will return any sub-sequences by value, not recurse deeper
results = []
for subresult in result:
if type(subresult) in distob.engine.proxy_types:
results.append(Ref(subresult))
else:
results.append(subresult)
return results
elif type(result) in distob.engine.proxy_types:
return Ref(result)
else:
return result
|
def _remote_methodcall(id, method_name, *args, **kwargs):
"""(Executed on remote engine) convert Ids to real objects, call method """
obj = distob.engine[id]
nargs = []
for a in args:
if isinstance(a, Id):
nargs.append(distob.engine[a])
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
nargs.append(
[distob.engine[b] if isinstance(b, Id) else b for b in a])
else: nargs.append(a)
for k, a in kwargs.items():
if isinstance(a, Id):
kwargs[k] = distob.engine[a]
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
kwargs[k] = [
distob.engine[b] if isinstance(b, Id) else b for b in a]
result = getattr(obj, method_name)(*nargs, **kwargs)
if (isinstance(result, collections.Sequence) and
not isinstance(result, string_types)):
# We will return any sub-sequences by value, not recurse deeper
results = []
for subresult in result:
if type(subresult) in distob.engine.proxy_types:
results.append(Ref(subresult))
else:
results.append(subresult)
return results
elif type(result) in distob.engine.proxy_types:
return Ref(result)
else:
return result
|
[
"(",
"Executed",
"on",
"remote",
"engine",
")",
"convert",
"Ids",
"to",
"real",
"objects",
"call",
"method"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L587-L620
|
[
"def",
"_remote_methodcall",
"(",
"id",
",",
"method_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"obj",
"=",
"distob",
".",
"engine",
"[",
"id",
"]",
"nargs",
"=",
"[",
"]",
"for",
"a",
"in",
"args",
":",
"if",
"isinstance",
"(",
"a",
",",
"Id",
")",
":",
"nargs",
".",
"append",
"(",
"distob",
".",
"engine",
"[",
"a",
"]",
")",
"elif",
"(",
"isinstance",
"(",
"a",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"a",
",",
"string_types",
")",
")",
":",
"nargs",
".",
"append",
"(",
"[",
"distob",
".",
"engine",
"[",
"b",
"]",
"if",
"isinstance",
"(",
"b",
",",
"Id",
")",
"else",
"b",
"for",
"b",
"in",
"a",
"]",
")",
"else",
":",
"nargs",
".",
"append",
"(",
"a",
")",
"for",
"k",
",",
"a",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"a",
",",
"Id",
")",
":",
"kwargs",
"[",
"k",
"]",
"=",
"distob",
".",
"engine",
"[",
"a",
"]",
"elif",
"(",
"isinstance",
"(",
"a",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"a",
",",
"string_types",
")",
")",
":",
"kwargs",
"[",
"k",
"]",
"=",
"[",
"distob",
".",
"engine",
"[",
"b",
"]",
"if",
"isinstance",
"(",
"b",
",",
"Id",
")",
"else",
"b",
"for",
"b",
"in",
"a",
"]",
"result",
"=",
"getattr",
"(",
"obj",
",",
"method_name",
")",
"(",
"*",
"nargs",
",",
"*",
"*",
"kwargs",
")",
"if",
"(",
"isinstance",
"(",
"result",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"result",
",",
"string_types",
")",
")",
":",
"# We will return any sub-sequences by value, not recurse deeper",
"results",
"=",
"[",
"]",
"for",
"subresult",
"in",
"result",
":",
"if",
"type",
"(",
"subresult",
")",
"in",
"distob",
".",
"engine",
".",
"proxy_types",
":",
"results",
".",
"append",
"(",
"Ref",
"(",
"subresult",
")",
")",
"else",
":",
"results",
".",
"append",
"(",
"subresult",
")",
"return",
"results",
"elif",
"type",
"(",
"result",
")",
"in",
"distob",
".",
"engine",
".",
"proxy_types",
":",
"return",
"Ref",
"(",
"result",
")",
"else",
":",
"return",
"result"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
methodcall
|
Call a method of `obj`, either locally or remotely as appropriate.
obj may be an ordinary object, or a Remote object (or Ref or object Id)
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True.
|
distob/distob.py
|
def methodcall(obj, method_name, *args, **kwargs):
"""Call a method of `obj`, either locally or remotely as appropriate.
obj may be an ordinary object, or a Remote object (or Ref or object Id)
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True.
"""
this_engine = distob.engine.eid
args = [obj] + list(args)
prefer_local = kwargs.pop('prefer_local', None)
if prefer_local is None:
if isinstance(obj, Remote):
prefer_local = obj.prefer_local
else:
prefer_local = True
block = kwargs.pop('block', True)
execloc, args, kwargs = _process_args(args, kwargs, prefer_local)
if execloc is this_engine:
r = getattr(args[0], method_name)(*args[1:], **kwargs)
else:
if False and prefer_local:
# result cache disabled until issue mattja/distob#1 is fixed
try:
kwtuple = tuple((k, kwargs[k]) for k in sorted(kwargs.keys()))
key = (args[0], method_name, args, kwtuple)
r = _call_cache[key]
except TypeError as te:
if te.args[0][:10] == 'unhashable':
#print("unhashable. won't be able to cache")
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
else:
raise
except KeyError:
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
if block:
_call_cache[key] = r.r
else:
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
if block:
return convert_result(r)
else:
return r
|
def methodcall(obj, method_name, *args, **kwargs):
"""Call a method of `obj`, either locally or remotely as appropriate.
obj may be an ordinary object, or a Remote object (or Ref or object Id)
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True.
"""
this_engine = distob.engine.eid
args = [obj] + list(args)
prefer_local = kwargs.pop('prefer_local', None)
if prefer_local is None:
if isinstance(obj, Remote):
prefer_local = obj.prefer_local
else:
prefer_local = True
block = kwargs.pop('block', True)
execloc, args, kwargs = _process_args(args, kwargs, prefer_local)
if execloc is this_engine:
r = getattr(args[0], method_name)(*args[1:], **kwargs)
else:
if False and prefer_local:
# result cache disabled until issue mattja/distob#1 is fixed
try:
kwtuple = tuple((k, kwargs[k]) for k in sorted(kwargs.keys()))
key = (args[0], method_name, args, kwtuple)
r = _call_cache[key]
except TypeError as te:
if te.args[0][:10] == 'unhashable':
#print("unhashable. won't be able to cache")
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
else:
raise
except KeyError:
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
if block:
_call_cache[key] = r.r
else:
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
if block:
return convert_result(r)
else:
return r
|
[
"Call",
"a",
"method",
"of",
"obj",
"either",
"locally",
"or",
"remotely",
"as",
"appropriate",
".",
"obj",
"may",
"be",
"an",
"ordinary",
"object",
"or",
"a",
"Remote",
"object",
"(",
"or",
"Ref",
"or",
"object",
"Id",
")",
"If",
"there",
"are",
"multiple",
"remote",
"arguments",
"they",
"must",
"be",
"on",
"the",
"same",
"engine",
".",
"kwargs",
":",
"prefer_local",
"(",
"bool",
"optional",
")",
":",
"Whether",
"to",
"return",
"cached",
"local",
"results",
"if",
"available",
"in",
"preference",
"to",
"returning",
"Remote",
"objects",
".",
"Default",
"is",
"True",
".",
"block",
"(",
"bool",
"optional",
")",
":",
"Whether",
"remote",
"calls",
"should",
"be",
"synchronous",
".",
"If",
"False",
"returned",
"results",
"may",
"be",
"AsyncResults",
"and",
"should",
"be",
"converted",
"by",
"the",
"caller",
"using",
"convert_result",
"()",
"before",
"use",
".",
"Default",
"is",
"True",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L631-L680
|
[
"def",
"methodcall",
"(",
"obj",
",",
"method_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"this_engine",
"=",
"distob",
".",
"engine",
".",
"eid",
"args",
"=",
"[",
"obj",
"]",
"+",
"list",
"(",
"args",
")",
"prefer_local",
"=",
"kwargs",
".",
"pop",
"(",
"'prefer_local'",
",",
"None",
")",
"if",
"prefer_local",
"is",
"None",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Remote",
")",
":",
"prefer_local",
"=",
"obj",
".",
"prefer_local",
"else",
":",
"prefer_local",
"=",
"True",
"block",
"=",
"kwargs",
".",
"pop",
"(",
"'block'",
",",
"True",
")",
"execloc",
",",
"args",
",",
"kwargs",
"=",
"_process_args",
"(",
"args",
",",
"kwargs",
",",
"prefer_local",
")",
"if",
"execloc",
"is",
"this_engine",
":",
"r",
"=",
"getattr",
"(",
"args",
"[",
"0",
"]",
",",
"method_name",
")",
"(",
"*",
"args",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"if",
"False",
"and",
"prefer_local",
":",
"# result cache disabled until issue mattja/distob#1 is fixed",
"try",
":",
"kwtuple",
"=",
"tuple",
"(",
"(",
"k",
",",
"kwargs",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"sorted",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
"key",
"=",
"(",
"args",
"[",
"0",
"]",
",",
"method_name",
",",
"args",
",",
"kwtuple",
")",
"r",
"=",
"_call_cache",
"[",
"key",
"]",
"except",
"TypeError",
"as",
"te",
":",
"if",
"te",
".",
"args",
"[",
"0",
"]",
"[",
":",
"10",
"]",
"==",
"'unhashable'",
":",
"#print(\"unhashable. won't be able to cache\")",
"r",
"=",
"_uncached_methodcall",
"(",
"execloc",
",",
"args",
"[",
"0",
"]",
",",
"method_name",
",",
"*",
"args",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"except",
"KeyError",
":",
"r",
"=",
"_uncached_methodcall",
"(",
"execloc",
",",
"args",
"[",
"0",
"]",
",",
"method_name",
",",
"*",
"args",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kwargs",
")",
"if",
"block",
":",
"_call_cache",
"[",
"key",
"]",
"=",
"r",
".",
"r",
"else",
":",
"r",
"=",
"_uncached_methodcall",
"(",
"execloc",
",",
"args",
"[",
"0",
"]",
",",
"method_name",
",",
"*",
"args",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kwargs",
")",
"if",
"block",
":",
"return",
"convert_result",
"(",
"r",
")",
"else",
":",
"return",
"r"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
_scan_instance
|
(Executed on remote or local engine) Examines an object and returns info
about any instance-specific methods or attributes.
(For example, any attributes that were set by __init__() )
By default, methods or attributes starting with an underscore are ignored.
Args:
obj (object): the object to scan. must be on this local engine.
include_underscore (bool or sequence of str): Should methods or
attributes that start with an underscore be proxied anyway? If a
sequence of names is provided then methods or attributes starting with
an underscore will only be proxied if their names are in the sequence.
exclude (sequence of str): names of any methods or attributes that should
not be reported.
|
distob/distob.py
|
def _scan_instance(obj, include_underscore, exclude):
"""(Executed on remote or local engine) Examines an object and returns info
about any instance-specific methods or attributes.
(For example, any attributes that were set by __init__() )
By default, methods or attributes starting with an underscore are ignored.
Args:
obj (object): the object to scan. must be on this local engine.
include_underscore (bool or sequence of str): Should methods or
attributes that start with an underscore be proxied anyway? If a
sequence of names is provided then methods or attributes starting with
an underscore will only be proxied if their names are in the sequence.
exclude (sequence of str): names of any methods or attributes that should
not be reported.
"""
from sys import getsizeof
always_exclude = ('__new__', '__init__', '__getattribute__', '__class__',
'__reduce__', '__reduce_ex__')
method_info = []
attributes_info = []
if hasattr(obj, '__dict__'):
for name in obj.__dict__:
if (name not in exclude and
name not in always_exclude and
(name[0] != '_' or
include_underscore is True or
name in include_underscore)):
f = obj.__dict__[name]
if hasattr(f, '__doc__'):
doc = f.__doc__
else:
doc = None
if callable(f) and not isinstance(f, type):
method_info.append((name, doc))
else:
attributes_info.append((name, doc))
return (method_info, attributes_info, getsizeof(obj))
|
def _scan_instance(obj, include_underscore, exclude):
"""(Executed on remote or local engine) Examines an object and returns info
about any instance-specific methods or attributes.
(For example, any attributes that were set by __init__() )
By default, methods or attributes starting with an underscore are ignored.
Args:
obj (object): the object to scan. must be on this local engine.
include_underscore (bool or sequence of str): Should methods or
attributes that start with an underscore be proxied anyway? If a
sequence of names is provided then methods or attributes starting with
an underscore will only be proxied if their names are in the sequence.
exclude (sequence of str): names of any methods or attributes that should
not be reported.
"""
from sys import getsizeof
always_exclude = ('__new__', '__init__', '__getattribute__', '__class__',
'__reduce__', '__reduce_ex__')
method_info = []
attributes_info = []
if hasattr(obj, '__dict__'):
for name in obj.__dict__:
if (name not in exclude and
name not in always_exclude and
(name[0] != '_' or
include_underscore is True or
name in include_underscore)):
f = obj.__dict__[name]
if hasattr(f, '__doc__'):
doc = f.__doc__
else:
doc = None
if callable(f) and not isinstance(f, type):
method_info.append((name, doc))
else:
attributes_info.append((name, doc))
return (method_info, attributes_info, getsizeof(obj))
|
[
"(",
"Executed",
"on",
"remote",
"or",
"local",
"engine",
")",
"Examines",
"an",
"object",
"and",
"returns",
"info",
"about",
"any",
"instance",
"-",
"specific",
"methods",
"or",
"attributes",
".",
"(",
"For",
"example",
"any",
"attributes",
"that",
"were",
"set",
"by",
"__init__",
"()",
")"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L699-L736
|
[
"def",
"_scan_instance",
"(",
"obj",
",",
"include_underscore",
",",
"exclude",
")",
":",
"from",
"sys",
"import",
"getsizeof",
"always_exclude",
"=",
"(",
"'__new__'",
",",
"'__init__'",
",",
"'__getattribute__'",
",",
"'__class__'",
",",
"'__reduce__'",
",",
"'__reduce_ex__'",
")",
"method_info",
"=",
"[",
"]",
"attributes_info",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"obj",
",",
"'__dict__'",
")",
":",
"for",
"name",
"in",
"obj",
".",
"__dict__",
":",
"if",
"(",
"name",
"not",
"in",
"exclude",
"and",
"name",
"not",
"in",
"always_exclude",
"and",
"(",
"name",
"[",
"0",
"]",
"!=",
"'_'",
"or",
"include_underscore",
"is",
"True",
"or",
"name",
"in",
"include_underscore",
")",
")",
":",
"f",
"=",
"obj",
".",
"__dict__",
"[",
"name",
"]",
"if",
"hasattr",
"(",
"f",
",",
"'__doc__'",
")",
":",
"doc",
"=",
"f",
".",
"__doc__",
"else",
":",
"doc",
"=",
"None",
"if",
"callable",
"(",
"f",
")",
"and",
"not",
"isinstance",
"(",
"f",
",",
"type",
")",
":",
"method_info",
".",
"append",
"(",
"(",
"name",
",",
"doc",
")",
")",
"else",
":",
"attributes_info",
".",
"append",
"(",
"(",
"name",
",",
"doc",
")",
")",
"return",
"(",
"method_info",
",",
"attributes_info",
",",
"getsizeof",
"(",
"obj",
")",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
proxy_methods
|
class decorator. Modifies `Remote` subclasses to add proxy methods and
attributes that mimic those defined in class `base`.
Example:
@proxy_methods(Tree)
class RemoteTree(Remote, Tree)
The decorator registers the new proxy class and specifies which methods
and attributes of class `base` should be proxied via a remote call to
a real object, and which methods/attributes should not be proxied but
instead called directly on the instance of the proxy class.
By default all methods and attributes of the class `base` will be
proxied except those starting with an underscore.
The MRO of the decorated class is respected:
Any methods and attributes defined in the decorated class
(or in other bases of the decorated class that do not come after `base`
in its MRO) will override those added by this decorator,
so that `base` is treated like a base class.
Args:
base (type): The class whose instances should be remotely controlled.
include_underscore (bool or sequence of str): Should methods or
attributes that start with an underscore be proxied anyway? If a
sequence of names is provided then methods or attributes starting with
an underscore will only be proxied if their names are in the sequence.
exclude (sequence of str): Names of any methods or attributes that
should not be proxied.
supers (bool): Proxy methods and attributes defined in superclasses
of ``base``, in addition to those defined directly in class ``base``
|
distob/distob.py
|
def proxy_methods(base, include_underscore=None, exclude=None, supers=True):
"""class decorator. Modifies `Remote` subclasses to add proxy methods and
attributes that mimic those defined in class `base`.
Example:
@proxy_methods(Tree)
class RemoteTree(Remote, Tree)
The decorator registers the new proxy class and specifies which methods
and attributes of class `base` should be proxied via a remote call to
a real object, and which methods/attributes should not be proxied but
instead called directly on the instance of the proxy class.
By default all methods and attributes of the class `base` will be
proxied except those starting with an underscore.
The MRO of the decorated class is respected:
Any methods and attributes defined in the decorated class
(or in other bases of the decorated class that do not come after `base`
in its MRO) will override those added by this decorator,
so that `base` is treated like a base class.
Args:
base (type): The class whose instances should be remotely controlled.
include_underscore (bool or sequence of str): Should methods or
attributes that start with an underscore be proxied anyway? If a
sequence of names is provided then methods or attributes starting with
an underscore will only be proxied if their names are in the sequence.
exclude (sequence of str): Names of any methods or attributes that
should not be proxied.
supers (bool): Proxy methods and attributes defined in superclasses
of ``base``, in addition to those defined directly in class ``base``
"""
always_exclude = ('__new__', '__init__', '__getattribute__', '__class__',
'__reduce__', '__reduce_ex__')
if isinstance(include_underscore, str):
include_underscore = (include_underscore,)
if isinstance(exclude, str):
exclude = (exclude,)
if not include_underscore:
include_underscore = ()
if not exclude:
exclude = ()
def rebuild_class(cls):
# Identify any bases of cls that do not come after `base` in the list:
bases_other = list(cls.__bases__)
if bases_other[-1] is object:
bases_other.pop()
if base in bases_other:
bases_other = bases_other[:bases_other.index(base)]
if not issubclass(cls.__bases__[0], Remote):
raise DistobTypeError('First base class must be subclass of Remote')
if not issubclass(base, object):
raise DistobTypeError('Only new-style classes currently supported')
dct = cls.__dict__.copy()
if cls.__doc__ is None or '\n' not in cls.__doc__:
base_doc = base.__doc__
if base_doc is None:
base_doc = ''
dct['__doc__'] = """Local object representing a remote %s
It can be used just like a %s object, but behind the scenes
all requests are passed to a real %s object on a remote host.
""" % ((base.__name__,)*3) + base_doc
newcls = type(cls.__name__, cls.__bases__, dct)
newcls._include_underscore = include_underscore
newcls._exclude = exclude
if supers:
proxied_classes = base.__mro__[:-1]
else:
proxied_classes = (base,)
for c in proxied_classes:
for name in c.__dict__:
#respect MRO: proxy an attribute only if it is not overridden
if (name not in newcls.__dict__ and
all(name not in b.__dict__
for c in bases_other for b in c.mro()[:-1]) and
name not in newcls._exclude and
name not in always_exclude and
(name[0] != '_' or
newcls._include_underscore is True or
name in newcls._include_underscore)):
f = c.__dict__[name]
if hasattr(f, '__doc__'):
doc = f.__doc__
else:
doc = None
if callable(f) and not isinstance(f, type):
setattr(newcls, name, _make_proxy_method(name, doc))
else:
setattr(newcls, name, _make_proxy_property(name, doc))
newcls.__module__ = '__main__' # cause dill to pickle it whole
import __main__
__main__.__dict__[newcls.__name__] = newcls # for dill..
ObjectHub.register_proxy_type(base, newcls)
return newcls
return rebuild_class
|
def proxy_methods(base, include_underscore=None, exclude=None, supers=True):
"""class decorator. Modifies `Remote` subclasses to add proxy methods and
attributes that mimic those defined in class `base`.
Example:
@proxy_methods(Tree)
class RemoteTree(Remote, Tree)
The decorator registers the new proxy class and specifies which methods
and attributes of class `base` should be proxied via a remote call to
a real object, and which methods/attributes should not be proxied but
instead called directly on the instance of the proxy class.
By default all methods and attributes of the class `base` will be
proxied except those starting with an underscore.
The MRO of the decorated class is respected:
Any methods and attributes defined in the decorated class
(or in other bases of the decorated class that do not come after `base`
in its MRO) will override those added by this decorator,
so that `base` is treated like a base class.
Args:
base (type): The class whose instances should be remotely controlled.
include_underscore (bool or sequence of str): Should methods or
attributes that start with an underscore be proxied anyway? If a
sequence of names is provided then methods or attributes starting with
an underscore will only be proxied if their names are in the sequence.
exclude (sequence of str): Names of any methods or attributes that
should not be proxied.
supers (bool): Proxy methods and attributes defined in superclasses
of ``base``, in addition to those defined directly in class ``base``
"""
always_exclude = ('__new__', '__init__', '__getattribute__', '__class__',
'__reduce__', '__reduce_ex__')
if isinstance(include_underscore, str):
include_underscore = (include_underscore,)
if isinstance(exclude, str):
exclude = (exclude,)
if not include_underscore:
include_underscore = ()
if not exclude:
exclude = ()
def rebuild_class(cls):
# Identify any bases of cls that do not come after `base` in the list:
bases_other = list(cls.__bases__)
if bases_other[-1] is object:
bases_other.pop()
if base in bases_other:
bases_other = bases_other[:bases_other.index(base)]
if not issubclass(cls.__bases__[0], Remote):
raise DistobTypeError('First base class must be subclass of Remote')
if not issubclass(base, object):
raise DistobTypeError('Only new-style classes currently supported')
dct = cls.__dict__.copy()
if cls.__doc__ is None or '\n' not in cls.__doc__:
base_doc = base.__doc__
if base_doc is None:
base_doc = ''
dct['__doc__'] = """Local object representing a remote %s
It can be used just like a %s object, but behind the scenes
all requests are passed to a real %s object on a remote host.
""" % ((base.__name__,)*3) + base_doc
newcls = type(cls.__name__, cls.__bases__, dct)
newcls._include_underscore = include_underscore
newcls._exclude = exclude
if supers:
proxied_classes = base.__mro__[:-1]
else:
proxied_classes = (base,)
for c in proxied_classes:
for name in c.__dict__:
#respect MRO: proxy an attribute only if it is not overridden
if (name not in newcls.__dict__ and
all(name not in b.__dict__
for c in bases_other for b in c.mro()[:-1]) and
name not in newcls._exclude and
name not in always_exclude and
(name[0] != '_' or
newcls._include_underscore is True or
name in newcls._include_underscore)):
f = c.__dict__[name]
if hasattr(f, '__doc__'):
doc = f.__doc__
else:
doc = None
if callable(f) and not isinstance(f, type):
setattr(newcls, name, _make_proxy_method(name, doc))
else:
setattr(newcls, name, _make_proxy_property(name, doc))
newcls.__module__ = '__main__' # cause dill to pickle it whole
import __main__
__main__.__dict__[newcls.__name__] = newcls # for dill..
ObjectHub.register_proxy_type(base, newcls)
return newcls
return rebuild_class
|
[
"class",
"decorator",
".",
"Modifies",
"Remote",
"subclasses",
"to",
"add",
"proxy",
"methods",
"and",
"attributes",
"that",
"mimic",
"those",
"defined",
"in",
"class",
"base",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L845-L942
|
[
"def",
"proxy_methods",
"(",
"base",
",",
"include_underscore",
"=",
"None",
",",
"exclude",
"=",
"None",
",",
"supers",
"=",
"True",
")",
":",
"always_exclude",
"=",
"(",
"'__new__'",
",",
"'__init__'",
",",
"'__getattribute__'",
",",
"'__class__'",
",",
"'__reduce__'",
",",
"'__reduce_ex__'",
")",
"if",
"isinstance",
"(",
"include_underscore",
",",
"str",
")",
":",
"include_underscore",
"=",
"(",
"include_underscore",
",",
")",
"if",
"isinstance",
"(",
"exclude",
",",
"str",
")",
":",
"exclude",
"=",
"(",
"exclude",
",",
")",
"if",
"not",
"include_underscore",
":",
"include_underscore",
"=",
"(",
")",
"if",
"not",
"exclude",
":",
"exclude",
"=",
"(",
")",
"def",
"rebuild_class",
"(",
"cls",
")",
":",
"# Identify any bases of cls that do not come after `base` in the list:",
"bases_other",
"=",
"list",
"(",
"cls",
".",
"__bases__",
")",
"if",
"bases_other",
"[",
"-",
"1",
"]",
"is",
"object",
":",
"bases_other",
".",
"pop",
"(",
")",
"if",
"base",
"in",
"bases_other",
":",
"bases_other",
"=",
"bases_other",
"[",
":",
"bases_other",
".",
"index",
"(",
"base",
")",
"]",
"if",
"not",
"issubclass",
"(",
"cls",
".",
"__bases__",
"[",
"0",
"]",
",",
"Remote",
")",
":",
"raise",
"DistobTypeError",
"(",
"'First base class must be subclass of Remote'",
")",
"if",
"not",
"issubclass",
"(",
"base",
",",
"object",
")",
":",
"raise",
"DistobTypeError",
"(",
"'Only new-style classes currently supported'",
")",
"dct",
"=",
"cls",
".",
"__dict__",
".",
"copy",
"(",
")",
"if",
"cls",
".",
"__doc__",
"is",
"None",
"or",
"'\\n'",
"not",
"in",
"cls",
".",
"__doc__",
":",
"base_doc",
"=",
"base",
".",
"__doc__",
"if",
"base_doc",
"is",
"None",
":",
"base_doc",
"=",
"''",
"dct",
"[",
"'__doc__'",
"]",
"=",
"\"\"\"Local object representing a remote %s\n It can be used just like a %s object, but behind the scenes \n all requests are passed to a real %s object on a remote host.\n\n \"\"\"",
"%",
"(",
"(",
"base",
".",
"__name__",
",",
")",
"*",
"3",
")",
"+",
"base_doc",
"newcls",
"=",
"type",
"(",
"cls",
".",
"__name__",
",",
"cls",
".",
"__bases__",
",",
"dct",
")",
"newcls",
".",
"_include_underscore",
"=",
"include_underscore",
"newcls",
".",
"_exclude",
"=",
"exclude",
"if",
"supers",
":",
"proxied_classes",
"=",
"base",
".",
"__mro__",
"[",
":",
"-",
"1",
"]",
"else",
":",
"proxied_classes",
"=",
"(",
"base",
",",
")",
"for",
"c",
"in",
"proxied_classes",
":",
"for",
"name",
"in",
"c",
".",
"__dict__",
":",
"#respect MRO: proxy an attribute only if it is not overridden",
"if",
"(",
"name",
"not",
"in",
"newcls",
".",
"__dict__",
"and",
"all",
"(",
"name",
"not",
"in",
"b",
".",
"__dict__",
"for",
"c",
"in",
"bases_other",
"for",
"b",
"in",
"c",
".",
"mro",
"(",
")",
"[",
":",
"-",
"1",
"]",
")",
"and",
"name",
"not",
"in",
"newcls",
".",
"_exclude",
"and",
"name",
"not",
"in",
"always_exclude",
"and",
"(",
"name",
"[",
"0",
"]",
"!=",
"'_'",
"or",
"newcls",
".",
"_include_underscore",
"is",
"True",
"or",
"name",
"in",
"newcls",
".",
"_include_underscore",
")",
")",
":",
"f",
"=",
"c",
".",
"__dict__",
"[",
"name",
"]",
"if",
"hasattr",
"(",
"f",
",",
"'__doc__'",
")",
":",
"doc",
"=",
"f",
".",
"__doc__",
"else",
":",
"doc",
"=",
"None",
"if",
"callable",
"(",
"f",
")",
"and",
"not",
"isinstance",
"(",
"f",
",",
"type",
")",
":",
"setattr",
"(",
"newcls",
",",
"name",
",",
"_make_proxy_method",
"(",
"name",
",",
"doc",
")",
")",
"else",
":",
"setattr",
"(",
"newcls",
",",
"name",
",",
"_make_proxy_property",
"(",
"name",
",",
"doc",
")",
")",
"newcls",
".",
"__module__",
"=",
"'__main__'",
"# cause dill to pickle it whole",
"import",
"__main__",
"__main__",
".",
"__dict__",
"[",
"newcls",
".",
"__name__",
"]",
"=",
"newcls",
"# for dill..",
"ObjectHub",
".",
"register_proxy_type",
"(",
"base",
",",
"newcls",
")",
"return",
"newcls",
"return",
"rebuild_class"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
_async_scatter
|
Distribute an obj or list to remote engines.
Return an async result or (possibly nested) lists of async results,
each of which is a Ref
|
distob/distob.py
|
def _async_scatter(obj, destination=None):
"""Distribute an obj or list to remote engines.
Return an async result or (possibly nested) lists of async results,
each of which is a Ref
"""
#TODO Instead of special cases for strings and Remote, should have a
# list of types that should not be proxied, inc. strings and Remote.
if (isinstance(obj, Remote) or
isinstance(obj, numbers.Number) or
obj is None):
return obj
if (isinstance(obj, collections.Sequence) and
not isinstance(obj, string_types)):
ars = []
if destination is not None:
assert(len(destination) == len(obj))
for i in range(len(obj)):
ars.append(_async_scatter(obj[i], destination[i]))
else:
for i in range(len(obj)):
ars.append(_async_scatter(obj[i], destination=None))
return ars
else:
if distob.engine is None:
setup_engines()
client = distob.engine._client
dv = distob.engine._dv
def remote_put(obj):
return Ref(obj)
if destination is not None:
assert(isinstance(destination, numbers.Integral))
dv.targets = destination
else:
dv.targets = _async_scatter.next_engine
_async_scatter.next_engine = (
_async_scatter.next_engine + 1) % len(client)
ar_ref = dv.apply_async(remote_put, obj)
dv.targets = client.ids
return ar_ref
|
def _async_scatter(obj, destination=None):
"""Distribute an obj or list to remote engines.
Return an async result or (possibly nested) lists of async results,
each of which is a Ref
"""
#TODO Instead of special cases for strings and Remote, should have a
# list of types that should not be proxied, inc. strings and Remote.
if (isinstance(obj, Remote) or
isinstance(obj, numbers.Number) or
obj is None):
return obj
if (isinstance(obj, collections.Sequence) and
not isinstance(obj, string_types)):
ars = []
if destination is not None:
assert(len(destination) == len(obj))
for i in range(len(obj)):
ars.append(_async_scatter(obj[i], destination[i]))
else:
for i in range(len(obj)):
ars.append(_async_scatter(obj[i], destination=None))
return ars
else:
if distob.engine is None:
setup_engines()
client = distob.engine._client
dv = distob.engine._dv
def remote_put(obj):
return Ref(obj)
if destination is not None:
assert(isinstance(destination, numbers.Integral))
dv.targets = destination
else:
dv.targets = _async_scatter.next_engine
_async_scatter.next_engine = (
_async_scatter.next_engine + 1) % len(client)
ar_ref = dv.apply_async(remote_put, obj)
dv.targets = client.ids
return ar_ref
|
[
"Distribute",
"an",
"obj",
"or",
"list",
"to",
"remote",
"engines",
".",
"Return",
"an",
"async",
"result",
"or",
"(",
"possibly",
"nested",
")",
"lists",
"of",
"async",
"results",
"each",
"of",
"which",
"is",
"a",
"Ref"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L945-L983
|
[
"def",
"_async_scatter",
"(",
"obj",
",",
"destination",
"=",
"None",
")",
":",
"#TODO Instead of special cases for strings and Remote, should have a",
"# list of types that should not be proxied, inc. strings and Remote.",
"if",
"(",
"isinstance",
"(",
"obj",
",",
"Remote",
")",
"or",
"isinstance",
"(",
"obj",
",",
"numbers",
".",
"Number",
")",
"or",
"obj",
"is",
"None",
")",
":",
"return",
"obj",
"if",
"(",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"string_types",
")",
")",
":",
"ars",
"=",
"[",
"]",
"if",
"destination",
"is",
"not",
"None",
":",
"assert",
"(",
"len",
"(",
"destination",
")",
"==",
"len",
"(",
"obj",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"obj",
")",
")",
":",
"ars",
".",
"append",
"(",
"_async_scatter",
"(",
"obj",
"[",
"i",
"]",
",",
"destination",
"[",
"i",
"]",
")",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"obj",
")",
")",
":",
"ars",
".",
"append",
"(",
"_async_scatter",
"(",
"obj",
"[",
"i",
"]",
",",
"destination",
"=",
"None",
")",
")",
"return",
"ars",
"else",
":",
"if",
"distob",
".",
"engine",
"is",
"None",
":",
"setup_engines",
"(",
")",
"client",
"=",
"distob",
".",
"engine",
".",
"_client",
"dv",
"=",
"distob",
".",
"engine",
".",
"_dv",
"def",
"remote_put",
"(",
"obj",
")",
":",
"return",
"Ref",
"(",
"obj",
")",
"if",
"destination",
"is",
"not",
"None",
":",
"assert",
"(",
"isinstance",
"(",
"destination",
",",
"numbers",
".",
"Integral",
")",
")",
"dv",
".",
"targets",
"=",
"destination",
"else",
":",
"dv",
".",
"targets",
"=",
"_async_scatter",
".",
"next_engine",
"_async_scatter",
".",
"next_engine",
"=",
"(",
"_async_scatter",
".",
"next_engine",
"+",
"1",
")",
"%",
"len",
"(",
"client",
")",
"ar_ref",
"=",
"dv",
".",
"apply_async",
"(",
"remote_put",
",",
"obj",
")",
"dv",
".",
"targets",
"=",
"client",
".",
"ids",
"return",
"ar_ref"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
_ars_to_proxies
|
wait for async results and return proxy objects
Args:
ars: AsyncResult (or sequence of AsyncResults), each result type ``Ref``.
Returns:
Remote* proxy object (or list of them)
|
distob/distob.py
|
def _ars_to_proxies(ars):
"""wait for async results and return proxy objects
Args:
ars: AsyncResult (or sequence of AsyncResults), each result type ``Ref``.
Returns:
Remote* proxy object (or list of them)
"""
if (isinstance(ars, Remote) or
isinstance(ars, numbers.Number) or
ars is None):
return ars
elif isinstance(ars, collections.Sequence):
res = []
for i in range(len(ars)):
res.append(_ars_to_proxies(ars[i]))
return res
elif isinstance(ars, ipyparallel.AsyncResult):
ref = ars.r
ObClass = ref.type
if ObClass in distob.engine.proxy_types:
RemoteClass = distob.engine.proxy_types[ObClass]
else:
RemoteClass = type(
'Remote' + ObClass.__name__, (Remote, ObClass), dict())
RemoteClass = proxy_methods(ObClass)(RemoteClass)
proxy_obj = RemoteClass(ref)
return proxy_obj
else:
raise DistobTypeError('Unpacking ars: unexpected type %s' % type(ars))
|
def _ars_to_proxies(ars):
"""wait for async results and return proxy objects
Args:
ars: AsyncResult (or sequence of AsyncResults), each result type ``Ref``.
Returns:
Remote* proxy object (or list of them)
"""
if (isinstance(ars, Remote) or
isinstance(ars, numbers.Number) or
ars is None):
return ars
elif isinstance(ars, collections.Sequence):
res = []
for i in range(len(ars)):
res.append(_ars_to_proxies(ars[i]))
return res
elif isinstance(ars, ipyparallel.AsyncResult):
ref = ars.r
ObClass = ref.type
if ObClass in distob.engine.proxy_types:
RemoteClass = distob.engine.proxy_types[ObClass]
else:
RemoteClass = type(
'Remote' + ObClass.__name__, (Remote, ObClass), dict())
RemoteClass = proxy_methods(ObClass)(RemoteClass)
proxy_obj = RemoteClass(ref)
return proxy_obj
else:
raise DistobTypeError('Unpacking ars: unexpected type %s' % type(ars))
|
[
"wait",
"for",
"async",
"results",
"and",
"return",
"proxy",
"objects",
"Args",
":",
"ars",
":",
"AsyncResult",
"(",
"or",
"sequence",
"of",
"AsyncResults",
")",
"each",
"result",
"type",
"Ref",
".",
"Returns",
":",
"Remote",
"*",
"proxy",
"object",
"(",
"or",
"list",
"of",
"them",
")"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L988-L1016
|
[
"def",
"_ars_to_proxies",
"(",
"ars",
")",
":",
"if",
"(",
"isinstance",
"(",
"ars",
",",
"Remote",
")",
"or",
"isinstance",
"(",
"ars",
",",
"numbers",
".",
"Number",
")",
"or",
"ars",
"is",
"None",
")",
":",
"return",
"ars",
"elif",
"isinstance",
"(",
"ars",
",",
"collections",
".",
"Sequence",
")",
":",
"res",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"ars",
")",
")",
":",
"res",
".",
"append",
"(",
"_ars_to_proxies",
"(",
"ars",
"[",
"i",
"]",
")",
")",
"return",
"res",
"elif",
"isinstance",
"(",
"ars",
",",
"ipyparallel",
".",
"AsyncResult",
")",
":",
"ref",
"=",
"ars",
".",
"r",
"ObClass",
"=",
"ref",
".",
"type",
"if",
"ObClass",
"in",
"distob",
".",
"engine",
".",
"proxy_types",
":",
"RemoteClass",
"=",
"distob",
".",
"engine",
".",
"proxy_types",
"[",
"ObClass",
"]",
"else",
":",
"RemoteClass",
"=",
"type",
"(",
"'Remote'",
"+",
"ObClass",
".",
"__name__",
",",
"(",
"Remote",
",",
"ObClass",
")",
",",
"dict",
"(",
")",
")",
"RemoteClass",
"=",
"proxy_methods",
"(",
"ObClass",
")",
"(",
"RemoteClass",
")",
"proxy_obj",
"=",
"RemoteClass",
"(",
"ref",
")",
"return",
"proxy_obj",
"else",
":",
"raise",
"DistobTypeError",
"(",
"'Unpacking ars: unexpected type %s'",
"%",
"type",
"(",
"ars",
")",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
_scatter_ndarray
|
Turn a numpy ndarray into a DistArray or RemoteArray
Args:
ar (array_like)
axis (int, optional): specifies along which axis to split the array to
distribute it. The default is to split along the last axis. `None` means
do not distribute.
destination (int or list of int, optional): Optionally force the array to
go to a specific engine. If an array is to be scattered along an axis,
this should be a list of engine ids with the same length as that axis.
blocksize (int): Optionally control the size of intervals into which the
distributed axis is split (the default splits the distributed axis
evenly over all computing engines).
|
distob/distob.py
|
def _scatter_ndarray(ar, axis=-1, destination=None, blocksize=None):
"""Turn a numpy ndarray into a DistArray or RemoteArray
Args:
ar (array_like)
axis (int, optional): specifies along which axis to split the array to
distribute it. The default is to split along the last axis. `None` means
do not distribute.
destination (int or list of int, optional): Optionally force the array to
go to a specific engine. If an array is to be scattered along an axis,
this should be a list of engine ids with the same length as that axis.
blocksize (int): Optionally control the size of intervals into which the
distributed axis is split (the default splits the distributed axis
evenly over all computing engines).
"""
from .arrays import DistArray, RemoteArray
shape = ar.shape
ndim = len(shape)
if axis is None:
return _directed_scatter([ar], destination=[destination],
blocksize=blocksize)[0]
if axis < -ndim or axis > ndim - 1:
raise DistobValueError('axis out of range')
if axis < 0:
axis = ndim + axis
n = shape[axis]
if n == 1:
return _directed_scatter([ar], destination=[destination])[0]
if isinstance(destination, collections.Sequence):
ne = len(destination) # number of engines to scatter array to
else:
if distob.engine is None:
setup_engines()
ne = distob.engine.nengines # by default scatter across all engines
if blocksize is None:
blocksize = ((n - 1) // ne) + 1
if blocksize > n:
blocksize = n
if isinstance(ar, DistArray):
if axis == ar._distaxis:
return ar
else:
raise DistobError('Currently can only scatter one axis of array')
# Currently, if requested to scatter an array that is already Remote and
# large, first get whole array locally, then scatter. Not really optimal.
if isinstance(ar, RemoteArray) and n > blocksize:
ar = ar._ob
s = slice(None)
subarrays = []
low = 0
for i in range(0, n // blocksize):
high = low + blocksize
index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1)
subarrays.append(ar[index])
low += blocksize
if n % blocksize != 0:
high = low + (n % blocksize)
index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1)
subarrays.append(ar[index])
subarrays = _directed_scatter(subarrays, destination=destination)
return DistArray(subarrays, axis)
|
def _scatter_ndarray(ar, axis=-1, destination=None, blocksize=None):
"""Turn a numpy ndarray into a DistArray or RemoteArray
Args:
ar (array_like)
axis (int, optional): specifies along which axis to split the array to
distribute it. The default is to split along the last axis. `None` means
do not distribute.
destination (int or list of int, optional): Optionally force the array to
go to a specific engine. If an array is to be scattered along an axis,
this should be a list of engine ids with the same length as that axis.
blocksize (int): Optionally control the size of intervals into which the
distributed axis is split (the default splits the distributed axis
evenly over all computing engines).
"""
from .arrays import DistArray, RemoteArray
shape = ar.shape
ndim = len(shape)
if axis is None:
return _directed_scatter([ar], destination=[destination],
blocksize=blocksize)[0]
if axis < -ndim or axis > ndim - 1:
raise DistobValueError('axis out of range')
if axis < 0:
axis = ndim + axis
n = shape[axis]
if n == 1:
return _directed_scatter([ar], destination=[destination])[0]
if isinstance(destination, collections.Sequence):
ne = len(destination) # number of engines to scatter array to
else:
if distob.engine is None:
setup_engines()
ne = distob.engine.nengines # by default scatter across all engines
if blocksize is None:
blocksize = ((n - 1) // ne) + 1
if blocksize > n:
blocksize = n
if isinstance(ar, DistArray):
if axis == ar._distaxis:
return ar
else:
raise DistobError('Currently can only scatter one axis of array')
# Currently, if requested to scatter an array that is already Remote and
# large, first get whole array locally, then scatter. Not really optimal.
if isinstance(ar, RemoteArray) and n > blocksize:
ar = ar._ob
s = slice(None)
subarrays = []
low = 0
for i in range(0, n // blocksize):
high = low + blocksize
index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1)
subarrays.append(ar[index])
low += blocksize
if n % blocksize != 0:
high = low + (n % blocksize)
index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1)
subarrays.append(ar[index])
subarrays = _directed_scatter(subarrays, destination=destination)
return DistArray(subarrays, axis)
|
[
"Turn",
"a",
"numpy",
"ndarray",
"into",
"a",
"DistArray",
"or",
"RemoteArray",
"Args",
":",
"ar",
"(",
"array_like",
")",
"axis",
"(",
"int",
"optional",
")",
":",
"specifies",
"along",
"which",
"axis",
"to",
"split",
"the",
"array",
"to",
"distribute",
"it",
".",
"The",
"default",
"is",
"to",
"split",
"along",
"the",
"last",
"axis",
".",
"None",
"means",
"do",
"not",
"distribute",
".",
"destination",
"(",
"int",
"or",
"list",
"of",
"int",
"optional",
")",
":",
"Optionally",
"force",
"the",
"array",
"to",
"go",
"to",
"a",
"specific",
"engine",
".",
"If",
"an",
"array",
"is",
"to",
"be",
"scattered",
"along",
"an",
"axis",
"this",
"should",
"be",
"a",
"list",
"of",
"engine",
"ids",
"with",
"the",
"same",
"length",
"as",
"that",
"axis",
".",
"blocksize",
"(",
"int",
")",
":",
"Optionally",
"control",
"the",
"size",
"of",
"intervals",
"into",
"which",
"the",
"distributed",
"axis",
"is",
"split",
"(",
"the",
"default",
"splits",
"the",
"distributed",
"axis",
"evenly",
"over",
"all",
"computing",
"engines",
")",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L1019-L1078
|
[
"def",
"_scatter_ndarray",
"(",
"ar",
",",
"axis",
"=",
"-",
"1",
",",
"destination",
"=",
"None",
",",
"blocksize",
"=",
"None",
")",
":",
"from",
".",
"arrays",
"import",
"DistArray",
",",
"RemoteArray",
"shape",
"=",
"ar",
".",
"shape",
"ndim",
"=",
"len",
"(",
"shape",
")",
"if",
"axis",
"is",
"None",
":",
"return",
"_directed_scatter",
"(",
"[",
"ar",
"]",
",",
"destination",
"=",
"[",
"destination",
"]",
",",
"blocksize",
"=",
"blocksize",
")",
"[",
"0",
"]",
"if",
"axis",
"<",
"-",
"ndim",
"or",
"axis",
">",
"ndim",
"-",
"1",
":",
"raise",
"DistobValueError",
"(",
"'axis out of range'",
")",
"if",
"axis",
"<",
"0",
":",
"axis",
"=",
"ndim",
"+",
"axis",
"n",
"=",
"shape",
"[",
"axis",
"]",
"if",
"n",
"==",
"1",
":",
"return",
"_directed_scatter",
"(",
"[",
"ar",
"]",
",",
"destination",
"=",
"[",
"destination",
"]",
")",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"destination",
",",
"collections",
".",
"Sequence",
")",
":",
"ne",
"=",
"len",
"(",
"destination",
")",
"# number of engines to scatter array to",
"else",
":",
"if",
"distob",
".",
"engine",
"is",
"None",
":",
"setup_engines",
"(",
")",
"ne",
"=",
"distob",
".",
"engine",
".",
"nengines",
"# by default scatter across all engines",
"if",
"blocksize",
"is",
"None",
":",
"blocksize",
"=",
"(",
"(",
"n",
"-",
"1",
")",
"//",
"ne",
")",
"+",
"1",
"if",
"blocksize",
">",
"n",
":",
"blocksize",
"=",
"n",
"if",
"isinstance",
"(",
"ar",
",",
"DistArray",
")",
":",
"if",
"axis",
"==",
"ar",
".",
"_distaxis",
":",
"return",
"ar",
"else",
":",
"raise",
"DistobError",
"(",
"'Currently can only scatter one axis of array'",
")",
"# Currently, if requested to scatter an array that is already Remote and",
"# large, first get whole array locally, then scatter. Not really optimal.",
"if",
"isinstance",
"(",
"ar",
",",
"RemoteArray",
")",
"and",
"n",
">",
"blocksize",
":",
"ar",
"=",
"ar",
".",
"_ob",
"s",
"=",
"slice",
"(",
"None",
")",
"subarrays",
"=",
"[",
"]",
"low",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"n",
"//",
"blocksize",
")",
":",
"high",
"=",
"low",
"+",
"blocksize",
"index",
"=",
"(",
"s",
",",
")",
"*",
"axis",
"+",
"(",
"slice",
"(",
"low",
",",
"high",
")",
",",
")",
"+",
"(",
"s",
",",
")",
"*",
"(",
"ndim",
"-",
"axis",
"-",
"1",
")",
"subarrays",
".",
"append",
"(",
"ar",
"[",
"index",
"]",
")",
"low",
"+=",
"blocksize",
"if",
"n",
"%",
"blocksize",
"!=",
"0",
":",
"high",
"=",
"low",
"+",
"(",
"n",
"%",
"blocksize",
")",
"index",
"=",
"(",
"s",
",",
")",
"*",
"axis",
"+",
"(",
"slice",
"(",
"low",
",",
"high",
")",
",",
")",
"+",
"(",
"s",
",",
")",
"*",
"(",
"ndim",
"-",
"axis",
"-",
"1",
")",
"subarrays",
".",
"append",
"(",
"ar",
"[",
"index",
"]",
")",
"subarrays",
"=",
"_directed_scatter",
"(",
"subarrays",
",",
"destination",
"=",
"destination",
")",
"return",
"DistArray",
"(",
"subarrays",
",",
"axis",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
scatter
|
Distribute obj or list to remote engines, returning proxy objects
Args:
obj: any python object, or list of objects
axis (int, optional): Can be used if scattering a numpy array,
specifying along which axis to split the array to distribute it. The
default is to split along the last axis. `None` means do not distribute
blocksize (int, optional): Can be used if scattering a numpy array.
Optionally control the size of intervals into which the distributed
axis is split (the default splits the distributed axis evenly over all
computing engines).
|
distob/distob.py
|
def scatter(obj, axis=-1, blocksize=None):
"""Distribute obj or list to remote engines, returning proxy objects
Args:
obj: any python object, or list of objects
axis (int, optional): Can be used if scattering a numpy array,
specifying along which axis to split the array to distribute it. The
default is to split along the last axis. `None` means do not distribute
blocksize (int, optional): Can be used if scattering a numpy array.
Optionally control the size of intervals into which the distributed
axis is split (the default splits the distributed axis evenly over all
computing engines).
"""
if hasattr(obj, '__distob_scatter__'):
return obj.__distob_scatter__(axis, None, blocksize)
if distob._have_numpy and (isinstance(obj, np.ndarray) or
hasattr(type(obj), '__array_interface__')):
return _scatter_ndarray(obj, axis, blocksize)
elif isinstance(obj, Remote):
return obj
ars = _async_scatter(obj)
proxy_obj = _ars_to_proxies(ars)
return proxy_obj
|
def scatter(obj, axis=-1, blocksize=None):
"""Distribute obj or list to remote engines, returning proxy objects
Args:
obj: any python object, or list of objects
axis (int, optional): Can be used if scattering a numpy array,
specifying along which axis to split the array to distribute it. The
default is to split along the last axis. `None` means do not distribute
blocksize (int, optional): Can be used if scattering a numpy array.
Optionally control the size of intervals into which the distributed
axis is split (the default splits the distributed axis evenly over all
computing engines).
"""
if hasattr(obj, '__distob_scatter__'):
return obj.__distob_scatter__(axis, None, blocksize)
if distob._have_numpy and (isinstance(obj, np.ndarray) or
hasattr(type(obj), '__array_interface__')):
return _scatter_ndarray(obj, axis, blocksize)
elif isinstance(obj, Remote):
return obj
ars = _async_scatter(obj)
proxy_obj = _ars_to_proxies(ars)
return proxy_obj
|
[
"Distribute",
"obj",
"or",
"list",
"to",
"remote",
"engines",
"returning",
"proxy",
"objects",
"Args",
":",
"obj",
":",
"any",
"python",
"object",
"or",
"list",
"of",
"objects",
"axis",
"(",
"int",
"optional",
")",
":",
"Can",
"be",
"used",
"if",
"scattering",
"a",
"numpy",
"array",
"specifying",
"along",
"which",
"axis",
"to",
"split",
"the",
"array",
"to",
"distribute",
"it",
".",
"The",
"default",
"is",
"to",
"split",
"along",
"the",
"last",
"axis",
".",
"None",
"means",
"do",
"not",
"distribute",
"blocksize",
"(",
"int",
"optional",
")",
":",
"Can",
"be",
"used",
"if",
"scattering",
"a",
"numpy",
"array",
".",
"Optionally",
"control",
"the",
"size",
"of",
"intervals",
"into",
"which",
"the",
"distributed",
"axis",
"is",
"split",
"(",
"the",
"default",
"splits",
"the",
"distributed",
"axis",
"evenly",
"over",
"all",
"computing",
"engines",
")",
"."
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L1107-L1128
|
[
"def",
"scatter",
"(",
"obj",
",",
"axis",
"=",
"-",
"1",
",",
"blocksize",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"'__distob_scatter__'",
")",
":",
"return",
"obj",
".",
"__distob_scatter__",
"(",
"axis",
",",
"None",
",",
"blocksize",
")",
"if",
"distob",
".",
"_have_numpy",
"and",
"(",
"isinstance",
"(",
"obj",
",",
"np",
".",
"ndarray",
")",
"or",
"hasattr",
"(",
"type",
"(",
"obj",
")",
",",
"'__array_interface__'",
")",
")",
":",
"return",
"_scatter_ndarray",
"(",
"obj",
",",
"axis",
",",
"blocksize",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"Remote",
")",
":",
"return",
"obj",
"ars",
"=",
"_async_scatter",
"(",
"obj",
")",
"proxy_obj",
"=",
"_ars_to_proxies",
"(",
"ars",
")",
"return",
"proxy_obj"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
gather
|
Retrieve objects that have been distributed, making them local again
|
distob/distob.py
|
def gather(obj):
"""Retrieve objects that have been distributed, making them local again"""
if hasattr(obj, '__distob_gather__'):
return obj.__distob_gather__()
elif (isinstance(obj, collections.Sequence) and
not isinstance(obj, string_types)):
return [gather(subobj) for subobj in obj]
else:
return obj
|
def gather(obj):
"""Retrieve objects that have been distributed, making them local again"""
if hasattr(obj, '__distob_gather__'):
return obj.__distob_gather__()
elif (isinstance(obj, collections.Sequence) and
not isinstance(obj, string_types)):
return [gather(subobj) for subobj in obj]
else:
return obj
|
[
"Retrieve",
"objects",
"that",
"have",
"been",
"distributed",
"making",
"them",
"local",
"again"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L1131-L1139
|
[
"def",
"gather",
"(",
"obj",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"'__distob_gather__'",
")",
":",
"return",
"obj",
".",
"__distob_gather__",
"(",
")",
"elif",
"(",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"string_types",
")",
")",
":",
"return",
"[",
"gather",
"(",
"subobj",
")",
"for",
"subobj",
"in",
"obj",
"]",
"else",
":",
"return",
"obj"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
vectorize
|
Upgrade normal function f to act in parallel on distibuted lists/arrays
Args:
f (callable): an ordinary function which expects as its first argument a
single object, or a numpy array of N dimensions.
Returns:
vf (callable): new function that takes as its first argument a list of
objects, or a array of N+1 dimensions. ``vf()`` will do the
computation ``f()`` on each part of the input in parallel and will
return a list of results, or a distributed array of results.
|
distob/distob.py
|
def vectorize(f):
"""Upgrade normal function f to act in parallel on distibuted lists/arrays
Args:
f (callable): an ordinary function which expects as its first argument a
single object, or a numpy array of N dimensions.
Returns:
vf (callable): new function that takes as its first argument a list of
objects, or a array of N+1 dimensions. ``vf()`` will do the
computation ``f()`` on each part of the input in parallel and will
return a list of results, or a distributed array of results.
"""
def vf(obj, *args, **kwargs):
# user classes can customize how to vectorize a function:
if hasattr(obj, '__distob_vectorize__'):
return obj.__distob_vectorize__(f)(obj, *args, **kwargs)
if isinstance(obj, Remote):
return call(f, obj, *args, **kwargs)
elif distob._have_numpy and (isinstance(obj, np.ndarray) or
hasattr(type(obj), '__array_interface__')):
distarray = scatter(obj, axis=-1)
return vf(distarray, *args, **kwargs)
elif isinstance(obj, collections.Sequence):
inputs = scatter(obj)
dv = distob.engine._client[:]
kwargs = kwargs.copy()
kwargs['block'] = False
results = []
for obj in inputs:
results.append(call(f, obj, *args, **kwargs))
for i in range(len(results)):
results[i] = convert_result(results[i])
return results
if hasattr(f, '__name__'):
vf.__name__ = 'v' + f.__name__
f_str = f.__name__ + '()'
else:
f_str = 'callable'
doc = u"""Apply %s in parallel to a list or array\n
Args:
obj (Sequence of objects or an array)
other args are the same as for %s
""" % (f_str, f_str)
if hasattr(f, '__doc__') and f.__doc__ is not None:
doc = doc.rstrip() + (' detailed below:\n----------\n' + f.__doc__)
vf.__doc__ = doc
return vf
|
def vectorize(f):
"""Upgrade normal function f to act in parallel on distibuted lists/arrays
Args:
f (callable): an ordinary function which expects as its first argument a
single object, or a numpy array of N dimensions.
Returns:
vf (callable): new function that takes as its first argument a list of
objects, or a array of N+1 dimensions. ``vf()`` will do the
computation ``f()`` on each part of the input in parallel and will
return a list of results, or a distributed array of results.
"""
def vf(obj, *args, **kwargs):
# user classes can customize how to vectorize a function:
if hasattr(obj, '__distob_vectorize__'):
return obj.__distob_vectorize__(f)(obj, *args, **kwargs)
if isinstance(obj, Remote):
return call(f, obj, *args, **kwargs)
elif distob._have_numpy and (isinstance(obj, np.ndarray) or
hasattr(type(obj), '__array_interface__')):
distarray = scatter(obj, axis=-1)
return vf(distarray, *args, **kwargs)
elif isinstance(obj, collections.Sequence):
inputs = scatter(obj)
dv = distob.engine._client[:]
kwargs = kwargs.copy()
kwargs['block'] = False
results = []
for obj in inputs:
results.append(call(f, obj, *args, **kwargs))
for i in range(len(results)):
results[i] = convert_result(results[i])
return results
if hasattr(f, '__name__'):
vf.__name__ = 'v' + f.__name__
f_str = f.__name__ + '()'
else:
f_str = 'callable'
doc = u"""Apply %s in parallel to a list or array\n
Args:
obj (Sequence of objects or an array)
other args are the same as for %s
""" % (f_str, f_str)
if hasattr(f, '__doc__') and f.__doc__ is not None:
doc = doc.rstrip() + (' detailed below:\n----------\n' + f.__doc__)
vf.__doc__ = doc
return vf
|
[
"Upgrade",
"normal",
"function",
"f",
"to",
"act",
"in",
"parallel",
"on",
"distibuted",
"lists",
"/",
"arrays"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L1142-L1189
|
[
"def",
"vectorize",
"(",
"f",
")",
":",
"def",
"vf",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# user classes can customize how to vectorize a function:",
"if",
"hasattr",
"(",
"obj",
",",
"'__distob_vectorize__'",
")",
":",
"return",
"obj",
".",
"__distob_vectorize__",
"(",
"f",
")",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"obj",
",",
"Remote",
")",
":",
"return",
"call",
"(",
"f",
",",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"distob",
".",
"_have_numpy",
"and",
"(",
"isinstance",
"(",
"obj",
",",
"np",
".",
"ndarray",
")",
"or",
"hasattr",
"(",
"type",
"(",
"obj",
")",
",",
"'__array_interface__'",
")",
")",
":",
"distarray",
"=",
"scatter",
"(",
"obj",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"vf",
"(",
"distarray",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"Sequence",
")",
":",
"inputs",
"=",
"scatter",
"(",
"obj",
")",
"dv",
"=",
"distob",
".",
"engine",
".",
"_client",
"[",
":",
"]",
"kwargs",
"=",
"kwargs",
".",
"copy",
"(",
")",
"kwargs",
"[",
"'block'",
"]",
"=",
"False",
"results",
"=",
"[",
"]",
"for",
"obj",
"in",
"inputs",
":",
"results",
".",
"append",
"(",
"call",
"(",
"f",
",",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"results",
")",
")",
":",
"results",
"[",
"i",
"]",
"=",
"convert_result",
"(",
"results",
"[",
"i",
"]",
")",
"return",
"results",
"if",
"hasattr",
"(",
"f",
",",
"'__name__'",
")",
":",
"vf",
".",
"__name__",
"=",
"'v'",
"+",
"f",
".",
"__name__",
"f_str",
"=",
"f",
".",
"__name__",
"+",
"'()'",
"else",
":",
"f_str",
"=",
"'callable'",
"doc",
"=",
"u\"\"\"Apply %s in parallel to a list or array\\n\n Args:\n obj (Sequence of objects or an array)\n other args are the same as for %s\n \"\"\"",
"%",
"(",
"f_str",
",",
"f_str",
")",
"if",
"hasattr",
"(",
"f",
",",
"'__doc__'",
")",
"and",
"f",
".",
"__doc__",
"is",
"not",
"None",
":",
"doc",
"=",
"doc",
".",
"rstrip",
"(",
")",
"+",
"(",
"' detailed below:\\n----------\\n'",
"+",
"f",
".",
"__doc__",
")",
"vf",
".",
"__doc__",
"=",
"doc",
"return",
"vf"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
apply
|
Apply a function in parallel to each element of the input
|
distob/distob.py
|
def apply(f, obj, *args, **kwargs):
"""Apply a function in parallel to each element of the input"""
return vectorize(f)(obj, *args, **kwargs)
|
def apply(f, obj, *args, **kwargs):
"""Apply a function in parallel to each element of the input"""
return vectorize(f)(obj, *args, **kwargs)
|
[
"Apply",
"a",
"function",
"in",
"parallel",
"to",
"each",
"element",
"of",
"the",
"input"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L1192-L1194
|
[
"def",
"apply",
"(",
"f",
",",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"vectorize",
"(",
"f",
")",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
call_all
|
Call a method on each element of a sequence, in parallel.
Returns:
list of results
|
distob/distob.py
|
def call_all(sequence, method_name, *args, **kwargs):
"""Call a method on each element of a sequence, in parallel.
Returns:
list of results
"""
kwargs = kwargs.copy()
kwargs['block'] = False
results = []
for obj in sequence:
results.append(methodcall(obj, method_name, *args, **kwargs))
for i in range(len(results)):
results[i] = convert_result(results[i])
return results
|
def call_all(sequence, method_name, *args, **kwargs):
"""Call a method on each element of a sequence, in parallel.
Returns:
list of results
"""
kwargs = kwargs.copy()
kwargs['block'] = False
results = []
for obj in sequence:
results.append(methodcall(obj, method_name, *args, **kwargs))
for i in range(len(results)):
results[i] = convert_result(results[i])
return results
|
[
"Call",
"a",
"method",
"on",
"each",
"element",
"of",
"a",
"sequence",
"in",
"parallel",
".",
"Returns",
":",
"list",
"of",
"results"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L1197-L1209
|
[
"def",
"call_all",
"(",
"sequence",
",",
"method_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"kwargs",
".",
"copy",
"(",
")",
"kwargs",
"[",
"'block'",
"]",
"=",
"False",
"results",
"=",
"[",
"]",
"for",
"obj",
"in",
"sequence",
":",
"results",
".",
"append",
"(",
"methodcall",
"(",
"obj",
",",
"method_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"results",
")",
")",
":",
"results",
"[",
"i",
"]",
"=",
"convert_result",
"(",
"results",
"[",
"i",
"]",
")",
"return",
"results"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
ObjectHub.register_proxy_type
|
Configure engines so that remote methods returning values of type
`real_type` will instead return by proxy, as type `proxy_type`
|
distob/distob.py
|
def register_proxy_type(cls, real_type, proxy_type):
"""Configure engines so that remote methods returning values of type
`real_type` will instead return by proxy, as type `proxy_type`
"""
if distob.engine is None:
cls._initial_proxy_types[real_type] = proxy_type
elif isinstance(distob.engine, ObjectHub):
distob.engine._runtime_reg_proxy_type(real_type, proxy_type)
else:
# TODO: remove next line after issue #58 in dill is fixed.
distob.engine._singleeng_reg_proxy_type(real_type, proxy_type)
pass
|
def register_proxy_type(cls, real_type, proxy_type):
"""Configure engines so that remote methods returning values of type
`real_type` will instead return by proxy, as type `proxy_type`
"""
if distob.engine is None:
cls._initial_proxy_types[real_type] = proxy_type
elif isinstance(distob.engine, ObjectHub):
distob.engine._runtime_reg_proxy_type(real_type, proxy_type)
else:
# TODO: remove next line after issue #58 in dill is fixed.
distob.engine._singleeng_reg_proxy_type(real_type, proxy_type)
pass
|
[
"Configure",
"engines",
"so",
"that",
"remote",
"methods",
"returning",
"values",
"of",
"type",
"real_type",
"will",
"instead",
"return",
"by",
"proxy",
"as",
"type",
"proxy_type"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L237-L248
|
[
"def",
"register_proxy_type",
"(",
"cls",
",",
"real_type",
",",
"proxy_type",
")",
":",
"if",
"distob",
".",
"engine",
"is",
"None",
":",
"cls",
".",
"_initial_proxy_types",
"[",
"real_type",
"]",
"=",
"proxy_type",
"elif",
"isinstance",
"(",
"distob",
".",
"engine",
",",
"ObjectHub",
")",
":",
"distob",
".",
"engine",
".",
"_runtime_reg_proxy_type",
"(",
"real_type",
",",
"proxy_type",
")",
"else",
":",
"# TODO: remove next line after issue #58 in dill is fixed.",
"distob",
".",
"engine",
".",
"_singleeng_reg_proxy_type",
"(",
"real_type",
",",
"proxy_type",
")",
"pass"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
Remote._fetch
|
forces update of a local cached copy of the real object
(regardless of the preference setting self.cache)
|
distob/distob.py
|
def _fetch(self):
"""forces update of a local cached copy of the real object
(regardless of the preference setting self.cache)"""
if not self.is_local and not self._obcache_current:
#print('fetching data from %s' % self._ref.id)
def _remote_fetch(id):
return distob.engine[id]
self._obcache = self._dv.apply_sync(_remote_fetch, self._id)
self._obcache_current = True
self.__engine_affinity__ = (distob.engine.eid,
self.__engine_affinity__[1])
|
def _fetch(self):
"""forces update of a local cached copy of the real object
(regardless of the preference setting self.cache)"""
if not self.is_local and not self._obcache_current:
#print('fetching data from %s' % self._ref.id)
def _remote_fetch(id):
return distob.engine[id]
self._obcache = self._dv.apply_sync(_remote_fetch, self._id)
self._obcache_current = True
self.__engine_affinity__ = (distob.engine.eid,
self.__engine_affinity__[1])
|
[
"forces",
"update",
"of",
"a",
"local",
"cached",
"copy",
"of",
"the",
"real",
"object",
"(",
"regardless",
"of",
"the",
"preference",
"setting",
"self",
".",
"cache",
")"
] |
mattja/distob
|
python
|
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L804-L814
|
[
"def",
"_fetch",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_local",
"and",
"not",
"self",
".",
"_obcache_current",
":",
"#print('fetching data from %s' % self._ref.id)",
"def",
"_remote_fetch",
"(",
"id",
")",
":",
"return",
"distob",
".",
"engine",
"[",
"id",
"]",
"self",
".",
"_obcache",
"=",
"self",
".",
"_dv",
".",
"apply_sync",
"(",
"_remote_fetch",
",",
"self",
".",
"_id",
")",
"self",
".",
"_obcache_current",
"=",
"True",
"self",
".",
"__engine_affinity__",
"=",
"(",
"distob",
".",
"engine",
".",
"eid",
",",
"self",
".",
"__engine_affinity__",
"[",
"1",
"]",
")"
] |
b0fc49e157189932c70231077ed35e1aa5717da9
|
valid
|
extract_json
|
Supports: gettext, ngettext. See package README or github ( https://github.com/tigrawap/pybabel-json ) for more usage info.
|
pybabel_json/extractor.py
|
def extract_json(fileobj, keywords, comment_tags, options):
"""
Supports: gettext, ngettext. See package README or github ( https://github.com/tigrawap/pybabel-json ) for more usage info.
"""
data=fileobj.read()
json_extractor=JsonExtractor(data)
strings_data=json_extractor.get_lines_data()
for item in strings_data:
messages = [item['content']]
if item.get('funcname') == 'ngettext':
messages.append(item['alt_content'])
yield item['line_number'],item.get('funcname','gettext'),tuple(messages),[]
|
def extract_json(fileobj, keywords, comment_tags, options):
"""
Supports: gettext, ngettext. See package README or github ( https://github.com/tigrawap/pybabel-json ) for more usage info.
"""
data=fileobj.read()
json_extractor=JsonExtractor(data)
strings_data=json_extractor.get_lines_data()
for item in strings_data:
messages = [item['content']]
if item.get('funcname') == 'ngettext':
messages.append(item['alt_content'])
yield item['line_number'],item.get('funcname','gettext'),tuple(messages),[]
|
[
"Supports",
":",
"gettext",
"ngettext",
".",
"See",
"package",
"README",
"or",
"github",
"(",
"https",
":",
"//",
"github",
".",
"com",
"/",
"tigrawap",
"/",
"pybabel",
"-",
"json",
")",
"for",
"more",
"usage",
"info",
"."
] |
tigrawap/pybabel-json
|
python
|
https://github.com/tigrawap/pybabel-json/blob/432b5726c61afb906bd6892366a6b20e89dc566f/pybabel_json/extractor.py#L109-L121
|
[
"def",
"extract_json",
"(",
"fileobj",
",",
"keywords",
",",
"comment_tags",
",",
"options",
")",
":",
"data",
"=",
"fileobj",
".",
"read",
"(",
")",
"json_extractor",
"=",
"JsonExtractor",
"(",
"data",
")",
"strings_data",
"=",
"json_extractor",
".",
"get_lines_data",
"(",
")",
"for",
"item",
"in",
"strings_data",
":",
"messages",
"=",
"[",
"item",
"[",
"'content'",
"]",
"]",
"if",
"item",
".",
"get",
"(",
"'funcname'",
")",
"==",
"'ngettext'",
":",
"messages",
".",
"append",
"(",
"item",
"[",
"'alt_content'",
"]",
")",
"yield",
"item",
"[",
"'line_number'",
"]",
",",
"item",
".",
"get",
"(",
"'funcname'",
",",
"'gettext'",
")",
",",
"tuple",
"(",
"messages",
")",
",",
"[",
"]"
] |
432b5726c61afb906bd6892366a6b20e89dc566f
|
valid
|
JsonExtractor.get_lines_data
|
Returns string:line_numbers list
Since all strings are unique it is OK to get line numbers this way.
Since same string can occur several times inside single .json file the values should be popped(FIFO) from the list
:rtype: list
|
pybabel_json/extractor.py
|
def get_lines_data(self):
"""
Returns string:line_numbers list
Since all strings are unique it is OK to get line numbers this way.
Since same string can occur several times inside single .json file the values should be popped(FIFO) from the list
:rtype: list
"""
encoding = 'utf-8'
for token in tokenize(self.data.decode(encoding)):
if token.type == 'operator':
if token.value == '{':
self.start_object()
elif token.value ==':':
self.with_separator(token)
elif token.value == '}':
self.end_object()
elif token.value == ',':
self.end_pair()
elif token.type=='string':
if self.state=='key':
self.current_key=unquote_string(token.value)
if self.current_key==JSON_GETTEXT_KEYWORD:
self.gettext_mode=True
#==value not actually used, but if only key was met (like in list) it still will be used. The important part, that key wont be parsed as value, not reversal
if self.gettext_mode:
if self.current_key==JSON_GETTEXT_KEY_CONTENT:
self.token_to_add=token
elif self.current_key==JSON_GETTEXT_KEY_ALT_CONTENT:
self.token_params['alt_token']=token
elif self.current_key==JSON_GETTEXT_KEY_FUNCNAME:
self.token_params['funcname']=token.value
else:
self.token_to_add=token
return self.results
|
def get_lines_data(self):
"""
Returns string:line_numbers list
Since all strings are unique it is OK to get line numbers this way.
Since same string can occur several times inside single .json file the values should be popped(FIFO) from the list
:rtype: list
"""
encoding = 'utf-8'
for token in tokenize(self.data.decode(encoding)):
if token.type == 'operator':
if token.value == '{':
self.start_object()
elif token.value ==':':
self.with_separator(token)
elif token.value == '}':
self.end_object()
elif token.value == ',':
self.end_pair()
elif token.type=='string':
if self.state=='key':
self.current_key=unquote_string(token.value)
if self.current_key==JSON_GETTEXT_KEYWORD:
self.gettext_mode=True
#==value not actually used, but if only key was met (like in list) it still will be used. The important part, that key wont be parsed as value, not reversal
if self.gettext_mode:
if self.current_key==JSON_GETTEXT_KEY_CONTENT:
self.token_to_add=token
elif self.current_key==JSON_GETTEXT_KEY_ALT_CONTENT:
self.token_params['alt_token']=token
elif self.current_key==JSON_GETTEXT_KEY_FUNCNAME:
self.token_params['funcname']=token.value
else:
self.token_to_add=token
return self.results
|
[
"Returns",
"string",
":",
"line_numbers",
"list",
"Since",
"all",
"strings",
"are",
"unique",
"it",
"is",
"OK",
"to",
"get",
"line",
"numbers",
"this",
"way",
".",
"Since",
"same",
"string",
"can",
"occur",
"several",
"times",
"inside",
"single",
".",
"json",
"file",
"the",
"values",
"should",
"be",
"popped",
"(",
"FIFO",
")",
"from",
"the",
"list",
":",
"rtype",
":",
"list"
] |
tigrawap/pybabel-json
|
python
|
https://github.com/tigrawap/pybabel-json/blob/432b5726c61afb906bd6892366a6b20e89dc566f/pybabel_json/extractor.py#L68-L107
|
[
"def",
"get_lines_data",
"(",
"self",
")",
":",
"encoding",
"=",
"'utf-8'",
"for",
"token",
"in",
"tokenize",
"(",
"self",
".",
"data",
".",
"decode",
"(",
"encoding",
")",
")",
":",
"if",
"token",
".",
"type",
"==",
"'operator'",
":",
"if",
"token",
".",
"value",
"==",
"'{'",
":",
"self",
".",
"start_object",
"(",
")",
"elif",
"token",
".",
"value",
"==",
"':'",
":",
"self",
".",
"with_separator",
"(",
"token",
")",
"elif",
"token",
".",
"value",
"==",
"'}'",
":",
"self",
".",
"end_object",
"(",
")",
"elif",
"token",
".",
"value",
"==",
"','",
":",
"self",
".",
"end_pair",
"(",
")",
"elif",
"token",
".",
"type",
"==",
"'string'",
":",
"if",
"self",
".",
"state",
"==",
"'key'",
":",
"self",
".",
"current_key",
"=",
"unquote_string",
"(",
"token",
".",
"value",
")",
"if",
"self",
".",
"current_key",
"==",
"JSON_GETTEXT_KEYWORD",
":",
"self",
".",
"gettext_mode",
"=",
"True",
"#==value not actually used, but if only key was met (like in list) it still will be used. The important part, that key wont be parsed as value, not reversal",
"if",
"self",
".",
"gettext_mode",
":",
"if",
"self",
".",
"current_key",
"==",
"JSON_GETTEXT_KEY_CONTENT",
":",
"self",
".",
"token_to_add",
"=",
"token",
"elif",
"self",
".",
"current_key",
"==",
"JSON_GETTEXT_KEY_ALT_CONTENT",
":",
"self",
".",
"token_params",
"[",
"'alt_token'",
"]",
"=",
"token",
"elif",
"self",
".",
"current_key",
"==",
"JSON_GETTEXT_KEY_FUNCNAME",
":",
"self",
".",
"token_params",
"[",
"'funcname'",
"]",
"=",
"token",
".",
"value",
"else",
":",
"self",
".",
"token_to_add",
"=",
"token",
"return",
"self",
".",
"results"
] |
432b5726c61afb906bd6892366a6b20e89dc566f
|
valid
|
is_git_repo
|
Returns True if path is a git repository.
|
cpenv/utils.py
|
def is_git_repo(path):
'''Returns True if path is a git repository.'''
if path.startswith('git@') or path.startswith('https://'):
return True
if os.path.exists(unipath(path, '.git')):
return True
return False
|
def is_git_repo(path):
'''Returns True if path is a git repository.'''
if path.startswith('git@') or path.startswith('https://'):
return True
if os.path.exists(unipath(path, '.git')):
return True
return False
|
[
"Returns",
"True",
"if",
"path",
"is",
"a",
"git",
"repository",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L16-L25
|
[
"def",
"is_git_repo",
"(",
"path",
")",
":",
"if",
"path",
".",
"startswith",
"(",
"'git@'",
")",
"or",
"path",
".",
"startswith",
"(",
"'https://'",
")",
":",
"return",
"True",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"unipath",
"(",
"path",
",",
"'.git'",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
is_home_environment
|
Returns True if path is in CPENV_HOME
|
cpenv/utils.py
|
def is_home_environment(path):
'''Returns True if path is in CPENV_HOME'''
home = unipath(os.environ.get('CPENV_HOME', '~/.cpenv'))
path = unipath(path)
return path.startswith(home)
|
def is_home_environment(path):
'''Returns True if path is in CPENV_HOME'''
home = unipath(os.environ.get('CPENV_HOME', '~/.cpenv'))
path = unipath(path)
return path.startswith(home)
|
[
"Returns",
"True",
"if",
"path",
"is",
"in",
"CPENV_HOME"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L28-L34
|
[
"def",
"is_home_environment",
"(",
"path",
")",
":",
"home",
"=",
"unipath",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'CPENV_HOME'",
",",
"'~/.cpenv'",
")",
")",
"path",
"=",
"unipath",
"(",
"path",
")",
"return",
"path",
".",
"startswith",
"(",
"home",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
is_redirecting
|
Returns True if path contains a .cpenv file
|
cpenv/utils.py
|
def is_redirecting(path):
'''Returns True if path contains a .cpenv file'''
candidate = unipath(path, '.cpenv')
return os.path.exists(candidate) and os.path.isfile(candidate)
|
def is_redirecting(path):
'''Returns True if path contains a .cpenv file'''
candidate = unipath(path, '.cpenv')
return os.path.exists(candidate) and os.path.isfile(candidate)
|
[
"Returns",
"True",
"if",
"path",
"contains",
"a",
".",
"cpenv",
"file"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L55-L59
|
[
"def",
"is_redirecting",
"(",
"path",
")",
":",
"candidate",
"=",
"unipath",
"(",
"path",
",",
"'.cpenv'",
")",
"return",
"os",
".",
"path",
".",
"exists",
"(",
"candidate",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"candidate",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
redirect_to_env_paths
|
Get environment path from redirect file
|
cpenv/utils.py
|
def redirect_to_env_paths(path):
'''Get environment path from redirect file'''
with open(path, 'r') as f:
redirected = f.read()
return shlex.split(redirected)
|
def redirect_to_env_paths(path):
'''Get environment path from redirect file'''
with open(path, 'r') as f:
redirected = f.read()
return shlex.split(redirected)
|
[
"Get",
"environment",
"path",
"from",
"redirect",
"file"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L62-L68
|
[
"def",
"redirect_to_env_paths",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"redirected",
"=",
"f",
".",
"read",
"(",
")",
"return",
"shlex",
".",
"split",
"(",
"redirected",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
expandpath
|
Returns an absolute expanded path
|
cpenv/utils.py
|
def expandpath(path):
'''Returns an absolute expanded path'''
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
|
def expandpath(path):
'''Returns an absolute expanded path'''
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
|
[
"Returns",
"an",
"absolute",
"expanded",
"path"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L71-L74
|
[
"def",
"expandpath",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
")",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
unipath
|
Like os.path.join but also expands and normalizes path parts.
|
cpenv/utils.py
|
def unipath(*paths):
'''Like os.path.join but also expands and normalizes path parts.'''
return os.path.normpath(expandpath(os.path.join(*paths)))
|
def unipath(*paths):
'''Like os.path.join but also expands and normalizes path parts.'''
return os.path.normpath(expandpath(os.path.join(*paths)))
|
[
"Like",
"os",
".",
"path",
".",
"join",
"but",
"also",
"expands",
"and",
"normalizes",
"path",
"parts",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L77-L80
|
[
"def",
"unipath",
"(",
"*",
"paths",
")",
":",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"expandpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"*",
"paths",
")",
")",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
binpath
|
Like os.path.join but acts relative to this packages bin path.
|
cpenv/utils.py
|
def binpath(*paths):
'''Like os.path.join but acts relative to this packages bin path.'''
package_root = os.path.dirname(__file__)
return os.path.normpath(os.path.join(package_root, 'bin', *paths))
|
def binpath(*paths):
'''Like os.path.join but acts relative to this packages bin path.'''
package_root = os.path.dirname(__file__)
return os.path.normpath(os.path.join(package_root, 'bin', *paths))
|
[
"Like",
"os",
".",
"path",
".",
"join",
"but",
"acts",
"relative",
"to",
"this",
"packages",
"bin",
"path",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L83-L87
|
[
"def",
"binpath",
"(",
"*",
"paths",
")",
":",
"package_root",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_root",
",",
"'bin'",
",",
"*",
"paths",
")",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
ensure_path_exists
|
Like os.makedirs but keeps quiet if path already exists
|
cpenv/utils.py
|
def ensure_path_exists(path, *args):
'''Like os.makedirs but keeps quiet if path already exists'''
if os.path.exists(path):
return
os.makedirs(path, *args)
|
def ensure_path_exists(path, *args):
'''Like os.makedirs but keeps quiet if path already exists'''
if os.path.exists(path):
return
os.makedirs(path, *args)
|
[
"Like",
"os",
".",
"makedirs",
"but",
"keeps",
"quiet",
"if",
"path",
"already",
"exists"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L90-L95
|
[
"def",
"ensure_path_exists",
"(",
"path",
",",
"*",
"args",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"os",
".",
"makedirs",
"(",
"path",
",",
"*",
"args",
")"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
walk_dn
|
Walk down a directory tree. Same as os.walk but allows for a depth limit
via depth argument
|
cpenv/utils.py
|
def walk_dn(start_dir, depth=10):
'''
Walk down a directory tree. Same as os.walk but allows for a depth limit
via depth argument
'''
start_depth = len(os.path.split(start_dir))
end_depth = start_depth + depth
for root, subdirs, files in os.walk(start_dir):
yield root, subdirs, files
if len(os.path.split(root)) >= end_depth:
break
|
def walk_dn(start_dir, depth=10):
'''
Walk down a directory tree. Same as os.walk but allows for a depth limit
via depth argument
'''
start_depth = len(os.path.split(start_dir))
end_depth = start_depth + depth
for root, subdirs, files in os.walk(start_dir):
yield root, subdirs, files
if len(os.path.split(root)) >= end_depth:
break
|
[
"Walk",
"down",
"a",
"directory",
"tree",
".",
"Same",
"as",
"os",
".",
"walk",
"but",
"allows",
"for",
"a",
"depth",
"limit",
"via",
"depth",
"argument"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L98-L111
|
[
"def",
"walk_dn",
"(",
"start_dir",
",",
"depth",
"=",
"10",
")",
":",
"start_depth",
"=",
"len",
"(",
"os",
".",
"path",
".",
"split",
"(",
"start_dir",
")",
")",
"end_depth",
"=",
"start_depth",
"+",
"depth",
"for",
"root",
",",
"subdirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"start_dir",
")",
":",
"yield",
"root",
",",
"subdirs",
",",
"files",
"if",
"len",
"(",
"os",
".",
"path",
".",
"split",
"(",
"root",
")",
")",
">=",
"end_depth",
":",
"break"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
walk_up
|
Walk up a directory tree
|
cpenv/utils.py
|
def walk_up(start_dir, depth=20):
'''
Walk up a directory tree
'''
root = start_dir
for i in xrange(depth):
contents = os.listdir(root)
subdirs, files = [], []
for f in contents:
if os.path.isdir(os.path.join(root, f)):
subdirs.append(f)
else:
files.append(f)
yield root, subdirs, files
parent = os.path.dirname(root)
if parent and not parent == root:
root = parent
else:
break
|
def walk_up(start_dir, depth=20):
'''
Walk up a directory tree
'''
root = start_dir
for i in xrange(depth):
contents = os.listdir(root)
subdirs, files = [], []
for f in contents:
if os.path.isdir(os.path.join(root, f)):
subdirs.append(f)
else:
files.append(f)
yield root, subdirs, files
parent = os.path.dirname(root)
if parent and not parent == root:
root = parent
else:
break
|
[
"Walk",
"up",
"a",
"directory",
"tree"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L122-L143
|
[
"def",
"walk_up",
"(",
"start_dir",
",",
"depth",
"=",
"20",
")",
":",
"root",
"=",
"start_dir",
"for",
"i",
"in",
"xrange",
"(",
"depth",
")",
":",
"contents",
"=",
"os",
".",
"listdir",
"(",
"root",
")",
"subdirs",
",",
"files",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"f",
"in",
"contents",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
")",
":",
"subdirs",
".",
"append",
"(",
"f",
")",
"else",
":",
"files",
".",
"append",
"(",
"f",
")",
"yield",
"root",
",",
"subdirs",
",",
"files",
"parent",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"root",
")",
"if",
"parent",
"and",
"not",
"parent",
"==",
"root",
":",
"root",
"=",
"parent",
"else",
":",
"break"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
preprocess_dict
|
Preprocess a dict to be used as environment variables.
:param d: dict to be processed
|
cpenv/utils.py
|
def preprocess_dict(d):
'''
Preprocess a dict to be used as environment variables.
:param d: dict to be processed
'''
out_env = {}
for k, v in d.items():
if not type(v) in PREPROCESSORS:
raise KeyError('Invalid type in dict: {}'.format(type(v)))
out_env[k] = PREPROCESSORS[type(v)](v)
return out_env
|
def preprocess_dict(d):
'''
Preprocess a dict to be used as environment variables.
:param d: dict to be processed
'''
out_env = {}
for k, v in d.items():
if not type(v) in PREPROCESSORS:
raise KeyError('Invalid type in dict: {}'.format(type(v)))
out_env[k] = PREPROCESSORS[type(v)](v)
return out_env
|
[
"Preprocess",
"a",
"dict",
"to",
"be",
"used",
"as",
"environment",
"variables",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L187-L202
|
[
"def",
"preprocess_dict",
"(",
"d",
")",
":",
"out_env",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"not",
"type",
"(",
"v",
")",
"in",
"PREPROCESSORS",
":",
"raise",
"KeyError",
"(",
"'Invalid type in dict: {}'",
".",
"format",
"(",
"type",
"(",
"v",
")",
")",
")",
"out_env",
"[",
"k",
"]",
"=",
"PREPROCESSORS",
"[",
"type",
"(",
"v",
")",
"]",
"(",
"v",
")",
"return",
"out_env"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
_join_seq
|
Add a sequence value to env dict
|
cpenv/utils.py
|
def _join_seq(d, k, v):
'''Add a sequence value to env dict'''
if k not in d:
d[k] = list(v)
elif isinstance(d[k], list):
for item in v:
if item not in d[k]:
d[k].insert(0, item)
elif isinstance(d[k], string_types):
v.append(d[k])
d[k] = v
|
def _join_seq(d, k, v):
'''Add a sequence value to env dict'''
if k not in d:
d[k] = list(v)
elif isinstance(d[k], list):
for item in v:
if item not in d[k]:
d[k].insert(0, item)
elif isinstance(d[k], string_types):
v.append(d[k])
d[k] = v
|
[
"Add",
"a",
"sequence",
"value",
"to",
"env",
"dict"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L217-L230
|
[
"def",
"_join_seq",
"(",
"d",
",",
"k",
",",
"v",
")",
":",
"if",
"k",
"not",
"in",
"d",
":",
"d",
"[",
"k",
"]",
"=",
"list",
"(",
"v",
")",
"elif",
"isinstance",
"(",
"d",
"[",
"k",
"]",
",",
"list",
")",
":",
"for",
"item",
"in",
"v",
":",
"if",
"item",
"not",
"in",
"d",
"[",
"k",
"]",
":",
"d",
"[",
"k",
"]",
".",
"insert",
"(",
"0",
",",
"item",
")",
"elif",
"isinstance",
"(",
"d",
"[",
"k",
"]",
",",
"string_types",
")",
":",
"v",
".",
"append",
"(",
"d",
"[",
"k",
"]",
")",
"d",
"[",
"k",
"]",
"=",
"v"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
join_dicts
|
Join a bunch of dicts
|
cpenv/utils.py
|
def join_dicts(*dicts):
'''Join a bunch of dicts'''
out_dict = {}
for d in dicts:
for k, v in d.iteritems():
if not type(v) in JOINERS:
raise KeyError('Invalid type in dict: {}'.format(type(v)))
JOINERS[type(v)](out_dict, k, v)
return out_dict
|
def join_dicts(*dicts):
'''Join a bunch of dicts'''
out_dict = {}
for d in dicts:
for k, v in d.iteritems():
if not type(v) in JOINERS:
raise KeyError('Invalid type in dict: {}'.format(type(v)))
JOINERS[type(v)](out_dict, k, v)
return out_dict
|
[
"Join",
"a",
"bunch",
"of",
"dicts"
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L245-L258
|
[
"def",
"join_dicts",
"(",
"*",
"dicts",
")",
":",
"out_dict",
"=",
"{",
"}",
"for",
"d",
"in",
"dicts",
":",
"for",
"k",
",",
"v",
"in",
"d",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"type",
"(",
"v",
")",
"in",
"JOINERS",
":",
"raise",
"KeyError",
"(",
"'Invalid type in dict: {}'",
".",
"format",
"(",
"type",
"(",
"v",
")",
")",
")",
"JOINERS",
"[",
"type",
"(",
"v",
")",
"]",
"(",
"out_dict",
",",
"k",
",",
"v",
")",
"return",
"out_dict"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
valid
|
env_to_dict
|
Convert a dict containing environment variables into a standard dict.
Variables containing multiple values will be split into a list based on
the argument passed to pathsep.
:param env: Environment dict like os.environ.data
:param pathsep: Path separator used to split variables
|
cpenv/utils.py
|
def env_to_dict(env, pathsep=os.pathsep):
'''
Convert a dict containing environment variables into a standard dict.
Variables containing multiple values will be split into a list based on
the argument passed to pathsep.
:param env: Environment dict like os.environ.data
:param pathsep: Path separator used to split variables
'''
out_dict = {}
for k, v in env.iteritems():
if pathsep in v:
out_dict[k] = v.split(pathsep)
else:
out_dict[k] = v
return out_dict
|
def env_to_dict(env, pathsep=os.pathsep):
'''
Convert a dict containing environment variables into a standard dict.
Variables containing multiple values will be split into a list based on
the argument passed to pathsep.
:param env: Environment dict like os.environ.data
:param pathsep: Path separator used to split variables
'''
out_dict = {}
for k, v in env.iteritems():
if pathsep in v:
out_dict[k] = v.split(pathsep)
else:
out_dict[k] = v
return out_dict
|
[
"Convert",
"a",
"dict",
"containing",
"environment",
"variables",
"into",
"a",
"standard",
"dict",
".",
"Variables",
"containing",
"multiple",
"values",
"will",
"be",
"split",
"into",
"a",
"list",
"based",
"on",
"the",
"argument",
"passed",
"to",
"pathsep",
"."
] |
cpenv/cpenv
|
python
|
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/utils.py#L261-L279
|
[
"def",
"env_to_dict",
"(",
"env",
",",
"pathsep",
"=",
"os",
".",
"pathsep",
")",
":",
"out_dict",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"env",
".",
"iteritems",
"(",
")",
":",
"if",
"pathsep",
"in",
"v",
":",
"out_dict",
"[",
"k",
"]",
"=",
"v",
".",
"split",
"(",
"pathsep",
")",
"else",
":",
"out_dict",
"[",
"k",
"]",
"=",
"v",
"return",
"out_dict"
] |
afbb569ae04002743db041d3629a5be8c290bd89
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.