INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Show an image.
|
def imshow(img, win_name='', wait_time=0):
"""Show an image.
Args:
img (str or ndarray): The image to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
"""
cv2.imshow(win_name, imread(img))
cv2.waitKey(wait_time)
|
Draw bboxes on an image.
|
def imshow_bboxes(img,
bboxes,
colors='green',
top_k=-1,
thickness=1,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw bboxes on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes (list or ndarray): A list of ndarray of shape (k, 4).
colors (list[str or tuple or Color]): A list of colors.
top_k (int): Plot the first k bboxes only if set positive.
thickness (int): Thickness of lines.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str, optional): The filename to write the image.
"""
img = imread(img)
if isinstance(bboxes, np.ndarray):
bboxes = [bboxes]
if not isinstance(colors, list):
colors = [colors for _ in range(len(bboxes))]
colors = [color_val(c) for c in colors]
assert len(bboxes) == len(colors)
for i, _bboxes in enumerate(bboxes):
_bboxes = _bboxes.astype(np.int32)
if top_k <= 0:
_top_k = _bboxes.shape[0]
else:
_top_k = min(top_k, _bboxes.shape[0])
for j in range(_top_k):
left_top = (_bboxes[j, 0], _bboxes[j, 1])
right_bottom = (_bboxes[j, 2], _bboxes[j, 3])
cv2.rectangle(
img, left_top, right_bottom, colors[i], thickness=thickness)
if show:
imshow(img, win_name, wait_time)
if out_file is not None:
imwrite(img, out_file)
|
Draw bboxes and class labels ( with scores ) on an image.
|
def imshow_det_bboxes(img,
bboxes,
labels,
class_names=None,
score_thr=0,
bbox_color='green',
text_color='green',
thickness=1,
font_scale=0.5,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw bboxes and class labels (with scores) on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5).
labels (ndarray): Labels of bboxes.
class_names (list[str]): Names of each classes.
score_thr (float): Minimum score of bboxes to be shown.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str or None): The filename to write the image.
"""
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
img = imread(img)
if score_thr > 0:
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
bbox_color = color_val(bbox_color)
text_color = color_val(text_color)
for bbox, label in zip(bboxes, labels):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
cv2.rectangle(
img, left_top, right_bottom, bbox_color, thickness=thickness)
label_text = class_names[
label] if class_names is not None else 'cls {}'.format(label)
if len(bbox) > 4:
label_text += '|{:.02f}'.format(bbox[-1])
cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),
cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
if show:
imshow(img, win_name, wait_time)
if out_file is not None:
imwrite(img, out_file)
|
Read an optical flow map.
|
def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs):
"""Read an optical flow map.
Args:
flow_or_path (ndarray or str): A flow map or filepath.
quantize (bool): whether to read quantized pair, if set to True,
remaining args will be passed to :func:`dequantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
Returns:
ndarray: Optical flow represented as a (h, w, 2) numpy array
"""
if isinstance(flow_or_path, np.ndarray):
if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2):
raise ValueError('Invalid flow with shape {}'.format(
flow_or_path.shape))
return flow_or_path
elif not is_str(flow_or_path):
raise TypeError(
'"flow_or_path" must be a filename or numpy array, not {}'.format(
type(flow_or_path)))
if not quantize:
with open(flow_or_path, 'rb') as f:
try:
header = f.read(4).decode('utf-8')
except Exception:
raise IOError('Invalid flow file: {}'.format(flow_or_path))
else:
if header != 'PIEH':
raise IOError(
'Invalid flow file: {}, header does not contain PIEH'.
format(flow_or_path))
w = np.fromfile(f, np.int32, 1).squeeze()
h = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2))
else:
assert concat_axis in [0, 1]
cat_flow = imread(flow_or_path, flag='unchanged')
if cat_flow.ndim != 2:
raise IOError(
'{} is not a valid quantized flow file, its dimension is {}.'.
format(flow_or_path, cat_flow.ndim))
assert cat_flow.shape[concat_axis] % 2 == 0
dx, dy = np.split(cat_flow, 2, axis=concat_axis)
flow = dequantize_flow(dx, dy, *args, **kwargs)
return flow.astype(np.float32)
|
Write optical flow to file.
|
def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs):
"""Write optical flow to file.
If the flow is not quantized, it will be saved as a .flo file losslessly,
otherwise a jpeg image which is lossy but of much smaller size. (dx and dy
will be concatenated horizontally into a single image if quantize is True.)
Args:
flow (ndarray): (h, w, 2) array of optical flow.
filename (str): Output filepath.
quantize (bool): Whether to quantize the flow and save it to 2 jpeg
images. If set to True, remaining args will be passed to
:func:`quantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
"""
if not quantize:
with open(filename, 'wb') as f:
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
f.flush()
else:
assert concat_axis in [0, 1]
dx, dy = quantize_flow(flow, *args, **kwargs)
dxdy = np.concatenate((dx, dy), axis=concat_axis)
imwrite(dxdy, filename)
|
Quantize flow to [ 0 255 ].
|
def quantize_flow(flow, max_val=0.02, norm=True):
"""Quantize flow to [0, 255].
After this step, the size of flow will be much smaller, and can be
dumped as jpeg images.
Args:
flow (ndarray): (h, w, 2) array of optical flow.
max_val (float): Maximum value of flow, values beyond
[-max_val, max_val] will be truncated.
norm (bool): Whether to divide flow values by image width/height.
Returns:
tuple[ndarray]: Quantized dx and dy.
"""
h, w, _ = flow.shape
dx = flow[..., 0]
dy = flow[..., 1]
if norm:
dx = dx / w # avoid inplace operations
dy = dy / h
# use 255 levels instead of 256 to make sure 0 is 0 after dequantization.
flow_comps = [
quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy]
]
return tuple(flow_comps)
|
Recover from quantized flow.
|
def dequantize_flow(dx, dy, max_val=0.02, denorm=True):
"""Recover from quantized flow.
Args:
dx (ndarray): Quantized dx.
dy (ndarray): Quantized dy.
max_val (float): Maximum value used when quantizing.
denorm (bool): Whether to multiply flow values with width/height.
Returns:
ndarray: Dequantized flow.
"""
assert dx.shape == dy.shape
assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1)
dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]]
if denorm:
dx *= dx.shape[1]
dy *= dx.shape[0]
flow = np.dstack((dx, dy))
return flow
|
Load state_dict to a module.
|
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
own_state = module.state_dict()
for name, param in state_dict.items():
if name not in own_state:
unexpected_keys.append(name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(),
param.size()))
missing_keys = set(own_state.keys()) - set(state_dict.keys())
err_msg = []
if unexpected_keys:
err_msg.append('unexpected key in source state_dict: {}\n'.format(
', '.join(unexpected_keys)))
if missing_keys:
err_msg.append('missing keys in source state_dict: {}\n'.format(
', '.join(missing_keys)))
err_msg = '\n'.join(err_msg)
if err_msg:
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warn(err_msg)
else:
print(err_msg)
|
Load checkpoint from a file or URI.
|
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Either a filepath or URL or modelzoo://xxxxxxx.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
# load checkpoint from modelzoo or file or url
if filename.startswith('modelzoo://'):
import torchvision
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(
torchvision.models.__path__):
if not ispkg:
_zoo = import_module('torchvision.models.{}'.format(name))
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
model_name = filename[11:]
checkpoint = model_zoo.load_url(model_urls[model_name])
elif filename.startswith('open-mmlab://'):
model_name = filename[13:]
checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name])
elif filename.startswith(('http://', 'https://')):
checkpoint = model_zoo.load_url(filename)
else:
if not osp.isfile(filename):
raise IOError('{} is not a checkpoint file'.format(filename))
checkpoint = torch.load(filename, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()}
# load state_dict
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict, strict, logger)
else:
load_state_dict(model, state_dict, strict, logger)
return checkpoint
|
Copy a model state_dict to cpu.
|
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
|
Save checkpoint to file.
|
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError('meta must be a dict or None, but got {}'.format(
type(meta)))
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
mmcv.mkdir_or_exist(osp.dirname(filename))
if hasattr(model, 'module'):
model = model.module
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(model.state_dict())
}
if optimizer is not None:
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, filename)
|
Init the optimizer.
|
def init_optimizer(self, optimizer):
"""Init the optimizer.
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an
optimizer object or a dict used for constructing the optimizer.
Returns:
:obj:`~torch.optim.Optimizer`: An optimizer object.
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD'>
"""
if isinstance(optimizer, dict):
optimizer = obj_from_dict(
optimizer, torch.optim, dict(params=self.model.parameters()))
elif not isinstance(optimizer, torch.optim.Optimizer):
raise TypeError(
'optimizer must be either an Optimizer object or a dict, '
'but got {}'.format(type(optimizer)))
return optimizer
|
Init the logger.
|
def init_logger(self, log_dir=None, level=logging.INFO):
"""Init the logger.
Args:
log_dir(str, optional): Log file directory. If not specified, no
log file will be used.
level (int or str): See the built-in python logging module.
Returns:
:obj:`~logging.Logger`: Python logger.
"""
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s', level=level)
logger = logging.getLogger(__name__)
if log_dir and self.rank == 0:
filename = '{}.log'.format(self.timestamp)
log_file = osp.join(log_dir, filename)
self._add_file_handler(logger, log_file, level=level)
return logger
|
Get current learning rates.
|
def current_lr(self):
"""Get current learning rates.
Returns:
list: Current learning rate of all param groups.
"""
if self.optimizer is None:
raise RuntimeError(
'lr is not applicable because optimizer does not exist.')
return [group['lr'] for group in self.optimizer.param_groups]
|
Register a hook into the hook list.
|
def register_hook(self, hook, priority='NORMAL'):
"""Register a hook into the hook list.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
"""
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
# insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
|
Start running.
|
def run(self, data_loaders, workflow, max_epochs, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2), ('val', 1)] means
running 2 epochs for training and 1 epoch for validation,
iteratively.
max_epochs (int): Total training epochs.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
self._max_epochs = max_epochs
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs)
self.call_hook('before_run')
while self.epoch < max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
'runner has no method named "{}" to run an epoch'.
format(mode))
epoch_runner = getattr(self, mode)
elif callable(mode): # custom train()
epoch_runner = mode
else:
raise TypeError('mode in workflow must be a str or '
'callable function, not {}'.format(
type(mode)))
for _ in range(epochs):
if mode == 'train' and self.epoch >= max_epochs:
return
epoch_runner(data_loaders[i], **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
|
Register default hooks for training.
|
def register_training_hooks(self,
lr_config,
optimizer_config=None,
checkpoint_config=None,
log_config=None):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- OptimizerStepperHook
- CheckpointSaverHook
- IterTimerHook
- LoggerHook(s)
"""
if optimizer_config is None:
optimizer_config = {}
if checkpoint_config is None:
checkpoint_config = {}
self.register_lr_hooks(lr_config)
self.register_hook(self.build_hook(optimizer_config, OptimizerHook))
self.register_hook(self.build_hook(checkpoint_config, CheckpointHook))
self.register_hook(IterTimerHook())
if log_config is not None:
self.register_logger_hooks(log_config)
|
Convert a video with ffmpeg.
|
def convert_video(in_file, out_file, print_cmd=False, pre_options='',
**kwargs):
"""Convert a video with ffmpeg.
This provides a general api to ffmpeg, the executed command is::
`ffmpeg -y <pre_options> -i <in_file> <options> <out_file>`
Options(kwargs) are mapped to ffmpeg commands with the following rules:
- key=val: "-key val"
- key=True: "-key"
- key=False: ""
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
pre_options (str): Options appears before "-i <in_file>".
print_cmd (bool): Whether to print the final ffmpeg command.
"""
options = []
for k, v in kwargs.items():
if isinstance(v, bool):
if v:
options.append('-{}'.format(k))
elif k == 'log_level':
assert v in [
'quiet', 'panic', 'fatal', 'error', 'warning', 'info',
'verbose', 'debug', 'trace'
]
options.append('-loglevel {}'.format(v))
else:
options.append('-{} {}'.format(k, v))
cmd = 'ffmpeg -y {} -i {} {} {}'.format(pre_options, in_file,
' '.join(options), out_file)
if print_cmd:
print(cmd)
subprocess.call(cmd, shell=True)
|
Resize a video.
|
def resize_video(in_file,
out_file,
size=None,
ratio=None,
keep_ar=False,
log_level='info',
print_cmd=False,
**kwargs):
"""Resize a video.
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).
ratio (tuple or float): Expected resize ratio, (2, 0.5) means
(w*2, h*0.5).
keep_ar (bool): Whether to keep original aspect ratio.
log_level (str): Logging level of ffmpeg.
print_cmd (bool): Whether to print the final ffmpeg command.
"""
if size is None and ratio is None:
raise ValueError('expected size or ratio must be specified')
elif size is not None and ratio is not None:
raise ValueError('size and ratio cannot be specified at the same time')
options = {'log_level': log_level}
if size:
if not keep_ar:
options['vf'] = 'scale={}:{}'.format(size[0], size[1])
else:
options['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio'
'=decrease'.format(size[0], size[1]))
else:
if not isinstance(ratio, tuple):
ratio = (ratio, ratio)
options['vf'] = 'scale="trunc(iw*{}):trunc(ih*{})"'.format(
ratio[0], ratio[1])
convert_video(in_file, out_file, print_cmd, **options)
|
Cut a clip from a video.
|
def cut_video(in_file,
out_file,
start=None,
end=None,
vcodec=None,
acodec=None,
log_level='info',
print_cmd=False,
**kwargs):
"""Cut a clip from a video.
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
start (None or float): Start time (in seconds).
end (None or float): End time (in seconds).
vcodec (None or str): Output video codec, None for unchanged.
acodec (None or str): Output audio codec, None for unchanged.
log_level (str): Logging level of ffmpeg.
print_cmd (bool): Whether to print the final ffmpeg command.
"""
options = {'log_level': log_level}
if vcodec is None:
options['vcodec'] = 'copy'
if acodec is None:
options['acodec'] = 'copy'
if start:
options['ss'] = start
else:
start = 0
if end:
options['t'] = end - start
convert_video(in_file, out_file, print_cmd, **options)
|
Concatenate multiple videos into a single one.
|
def concat_video(video_list,
out_file,
vcodec=None,
acodec=None,
log_level='info',
print_cmd=False,
**kwargs):
"""Concatenate multiple videos into a single one.
Args:
video_list (list): A list of video filenames
out_file (str): Output video filename
vcodec (None or str): Output video codec, None for unchanged
acodec (None or str): Output audio codec, None for unchanged
log_level (str): Logging level of ffmpeg.
print_cmd (bool): Whether to print the final ffmpeg command.
"""
_, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True)
with open(tmp_filename, 'w') as f:
for filename in video_list:
f.write('file {}\n'.format(osp.abspath(filename)))
options = {'log_level': log_level}
if vcodec is None:
options['vcodec'] = 'copy'
if acodec is None:
options['acodec'] = 'copy'
convert_video(
tmp_filename,
out_file,
print_cmd,
pre_options='-f concat -safe 0',
**options)
os.remove(tmp_filename)
|
Load a text file and parse the content as a list of strings.
|
def list_from_file(filename, prefix='', offset=0, max_num=0):
"""Load a text file and parse the content as a list of strings.
Args:
filename (str): Filename.
prefix (str): The prefix to be inserted to the begining of each item.
offset (int): The offset of lines.
max_num (int): The maximum number of lines to be read,
zeros and negatives mean no limitation.
Returns:
list[str]: A list of strings.
"""
cnt = 0
item_list = []
with open(filename, 'r') as f:
for _ in range(offset):
f.readline()
for line in f:
if max_num > 0 and cnt >= max_num:
break
item_list.append(prefix + line.rstrip('\n'))
cnt += 1
return item_list
|
Load a text file and parse the content as a dict.
|
def dict_from_file(filename, key_type=str):
"""Load a text file and parse the content as a dict.
Each line of the text file will be two or more columns splited by
whitespaces or tabs. The first column will be parsed as dict keys, and
the following columns will be parsed as dict values.
Args:
filename(str): Filename.
key_type(type): Type of the dict's keys. str is user by default and
type conversion will be performed if specified.
Returns:
dict: The parsed contents.
"""
mapping = {}
with open(filename, 'r') as f:
for line in f:
items = line.rstrip('\n').split()
assert len(items) >= 2
key = key_type(items[0])
val = items[1:] if len(items) > 2 else items[1]
mapping[key] = val
return mapping
|
3x3 convolution with padding
|
def conv3x3(in_planes, out_planes, dilation=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
padding=dilation,
dilation=dilation)
|
Initialize an object from dict.
|
def obj_from_dict(info, parent=None, default_args=None):
"""Initialize an object from dict.
The dict must contain the key "type", which indicates the object type, it
can be either a string or type, such as "list" or ``list``. Remaining
fields are treated as the arguments for constructing the object.
Args:
info (dict): Object types and arguments.
parent (:class:`module`): Module which may containing expected object
classes.
default_args (dict, optional): Default arguments for initializing the
object.
Returns:
any type: Object built from the dict.
"""
assert isinstance(info, dict) and 'type' in info
assert isinstance(default_args, dict) or default_args is None
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if parent is not None:
obj_type = getattr(parent, obj_type)
else:
obj_type = sys.modules[obj_type]
elif not isinstance(obj_type, type):
raise TypeError('type must be a str or valid type, but got {}'.format(
type(obj_type)))
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
return obj_type(**args)
|
Read an image.
|
def imread(img_or_path, flag='color'):
"""Read an image.
Args:
img_or_path (ndarray or str): Either a numpy array or image path.
If it is a numpy array (loaded image), then it will be returned
as is.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
Returns:
ndarray: Loaded image array.
"""
if isinstance(img_or_path, np.ndarray):
return img_or_path
elif is_str(img_or_path):
flag = imread_flags[flag] if is_str(flag) else flag
check_file_exist(img_or_path,
'img file does not exist: {}'.format(img_or_path))
return cv2.imread(img_or_path, flag)
else:
raise TypeError('"img" must be a numpy array or a filename')
|
Read an image from bytes.
|
def imfrombytes(content, flag='color'):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Same as :func:`imread`.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
flag = imread_flags[flag] if is_str(flag) else flag
img = cv2.imdecode(img_np, flag)
return img
|
Write image to file
|
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = osp.abspath(osp.dirname(file_path))
mkdir_or_exist(dir_name)
return cv2.imwrite(file_path, img, params)
|
Convert a BGR image to grayscale image.
|
def bgr2gray(img, keepdim=False):
"""Convert a BGR image to grayscale image.
Args:
img (ndarray): The input image.
keepdim (bool): If False (by default), then return the grayscale image
with 2 dims, otherwise 3 dims.
Returns:
ndarray: The converted grayscale image.
"""
out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if keepdim:
out_img = out_img[..., None]
return out_img
|
Convert a grayscale image to BGR image.
|
def gray2bgr(img):
"""Convert a grayscale image to BGR image.
Args:
img (ndarray or str): The input image.
Returns:
ndarray: The converted BGR image.
"""
img = img[..., None] if img.ndim == 2 else img
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return out_img
|
Cast elements of an iterable object into some type.
|
def iter_cast(inputs, dst_type, return_type=None):
"""Cast elements of an iterable object into some type.
Args:
inputs (Iterable): The input object.
dst_type (type): Destination type.
return_type (type, optional): If specified, the output object will be
converted to this type, otherwise an iterator.
Returns:
iterator or specified type: The converted object.
"""
if not isinstance(inputs, collections_abc.Iterable):
raise TypeError('inputs must be an iterable object')
if not isinstance(dst_type, type):
raise TypeError('"dst_type" must be a valid type')
out_iterable = six.moves.map(dst_type, inputs)
if return_type is None:
return out_iterable
else:
return return_type(out_iterable)
|
Check whether it is a sequence of some type.
|
def is_seq_of(seq, expected_type, seq_type=None):
"""Check whether it is a sequence of some type.
Args:
seq (Sequence): The sequence to be checked.
expected_type (type): Expected type of sequence items.
seq_type (type, optional): Expected sequence type.
Returns:
bool: Whether the sequence is valid.
"""
if seq_type is None:
exp_seq_type = collections_abc.Sequence
else:
assert isinstance(seq_type, type)
exp_seq_type = seq_type
if not isinstance(seq, exp_seq_type):
return False
for item in seq:
if not isinstance(item, expected_type):
return False
return True
|
Slice a list into several sub lists by a list of given length.
|
def slice_list(in_list, lens):
"""Slice a list into several sub lists by a list of given length.
Args:
in_list (list): The list to be sliced.
lens(int or list): The expected length of each out list.
Returns:
list: A list of sliced list.
"""
if not isinstance(lens, list):
raise TypeError('"indices" must be a list of integers')
elif sum(lens) != len(in_list):
raise ValueError(
'sum of lens and list length does not match: {} != {}'.format(
sum(lens), len(in_list)))
out_list = []
idx = 0
for i in range(len(lens)):
out_list.append(in_list[idx:idx + lens[i]])
idx += lens[i]
return out_list
|
A decorator factory to check if prerequisites are satisfied.
|
def check_prerequisites(
prerequisites,
checker,
msg_tmpl='Prerequisites "{}" are required in method "{}" but not '
'found, please install them first.'):
"""A decorator factory to check if prerequisites are satisfied.
Args:
prerequisites (str of list[str]): Prerequisites to be checked.
checker (callable): The checker method that returns True if a
prerequisite is meet, False otherwise.
msg_tmpl (str): The message template with two variables.
Returns:
decorator: A specific decorator.
"""
def wrap(func):
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
requirements = [prerequisites] if isinstance(
prerequisites, str) else prerequisites
missing = []
for item in requirements:
if not checker(item):
missing.append(item)
if missing:
print(msg_tmpl.format(', '.join(missing), func.__name__))
raise RuntimeError('Prerequisites not meet.')
else:
return func(*args, **kwargs)
return wrapped_func
return wrap
|
Average latest n values or all values
|
def average(self, n=0):
"""Average latest n values or all values"""
assert n >= 0
for key in self.val_history:
values = np.array(self.val_history[key][-n:])
nums = np.array(self.n_history[key][-n:])
avg = np.sum(values * nums) / np.sum(nums)
self.output[key] = avg
self.ready = True
|
Scatters tensor across multiple GPUs.
|
def scatter(input, devices, streams=None):
"""Scatters tensor across multiple GPUs.
"""
if streams is None:
streams = [None] * len(devices)
if isinstance(input, list):
chunk_size = (len(input) - 1) // len(devices) + 1
outputs = [
scatter(input[i], [devices[i // chunk_size]],
[streams[i // chunk_size]]) for i in range(len(input))
]
return outputs
elif isinstance(input, torch.Tensor):
output = input.contiguous()
# TODO: copy to a pinned buffer first (if copying from CPU)
stream = streams[0] if output.numel() > 0 else None
with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
output = output.cuda(devices[0], non_blocking=True)
return output
else:
raise Exception('Unknown type {}.'.format(type(input)))
|
Convert various input to color tuples.
|
def color_val(color):
"""Convert various input to color tuples.
Args:
color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
Returns:
tuple[int]: A tuple of 3 integers indicating BGR channels.
"""
if is_str(color):
return Color[color].value
elif isinstance(color, Color):
return color.value
elif isinstance(color, tuple):
assert len(color) == 3
for channel in color:
assert channel >= 0 and channel <= 255
return color
elif isinstance(color, int):
assert color >= 0 and color <= 255
return color, color, color
elif isinstance(color, np.ndarray):
assert color.ndim == 1 and color.size == 3
assert np.all((color >= 0) & (color <= 255))
color = color.astype(np.uint8)
return tuple(color)
else:
raise TypeError('Invalid type for color: {}'.format(type(color)))
|
Add check points in a single line.
|
def check_time(timer_id):
"""Add check points in a single line.
This method is suitable for running a task on a list of items. A timer will
be registered when the method is called for the first time.
:Example:
>>> import time
>>> import mmcv
>>> for i in range(1, 6):
>>> # simulate a code block
>>> time.sleep(i)
>>> mmcv.check_time('task1')
2.000
3.000
4.000
5.000
Args:
timer_id (str): Timer identifier.
"""
if timer_id not in _g_timers:
_g_timers[timer_id] = Timer()
return 0
else:
return _g_timers[timer_id].since_last_check()
|
Start the timer.
|
def start(self):
"""Start the timer."""
if not self._is_running:
self._t_start = time()
self._is_running = True
self._t_last = time()
|
Total time since the timer is started.
|
def since_start(self):
"""Total time since the timer is started.
Returns (float): Time in seconds.
"""
if not self._is_running:
raise TimerError('timer is not running')
self._t_last = time()
return self._t_last - self._t_start
|
Time since the last checking.
|
def since_last_check(self):
"""Time since the last checking.
Either :func:`since_start` or :func:`since_last_check` is a checking
operation.
Returns (float): Time in seconds.
"""
if not self._is_running:
raise TimerError('timer is not running')
dur = time() - self._t_last
self._t_last = time()
return dur
|
Show optical flow.
|
def flowshow(flow, win_name='', wait_time=0):
"""Show optical flow.
Args:
flow (ndarray or str): The optical flow to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
"""
flow = flowread(flow)
flow_img = flow2rgb(flow)
imshow(rgb2bgr(flow_img), win_name, wait_time)
|
Convert flow map to RGB image.
|
def flow2rgb(flow, color_wheel=None, unknown_thr=1e6):
"""Convert flow map to RGB image.
Args:
flow (ndarray): Array of optical flow.
color_wheel (ndarray or None): Color wheel used to map flow field to
RGB colorspace. Default color wheel will be used if not specified.
unknown_thr (str): Values above this threshold will be marked as
unknown and thus ignored.
Returns:
ndarray: RGB image that can be visualized.
"""
assert flow.ndim == 3 and flow.shape[-1] == 2
if color_wheel is None:
color_wheel = make_color_wheel()
assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3
num_bins = color_wheel.shape[0]
dx = flow[:, :, 0].copy()
dy = flow[:, :, 1].copy()
ignore_inds = (np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) |
(np.abs(dy) > unknown_thr))
dx[ignore_inds] = 0
dy[ignore_inds] = 0
rad = np.sqrt(dx**2 + dy**2)
if np.any(rad > np.finfo(float).eps):
max_rad = np.max(rad)
dx /= max_rad
dy /= max_rad
[h, w] = dx.shape
rad = np.sqrt(dx**2 + dy**2)
angle = np.arctan2(-dy, -dx) / np.pi
bin_real = (angle + 1) / 2 * (num_bins - 1)
bin_left = np.floor(bin_real).astype(int)
bin_right = (bin_left + 1) % num_bins
w = (bin_real - bin_left.astype(np.float32))[..., None]
flow_img = (
1 - w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :]
small_ind = rad <= 1
flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind])
flow_img[np.logical_not(small_ind)] *= 0.75
flow_img[ignore_inds, :] = 0
return flow_img
|
Build a color wheel.
|
def make_color_wheel(bins=None):
"""Build a color wheel.
Args:
bins(list or tuple, optional): Specify the number of bins for each
color range, corresponding to six ranges: red -> yellow,
yellow -> green, green -> cyan, cyan -> blue, blue -> magenta,
magenta -> red. [15, 6, 4, 11, 13, 6] is used for default
(see Middlebury).
Returns:
ndarray: Color wheel of shape (total_bins, 3).
"""
if bins is None:
bins = [15, 6, 4, 11, 13, 6]
assert len(bins) == 6
RY, YG, GC, CB, BM, MR = tuple(bins)
ry = [1, np.arange(RY) / RY, 0]
yg = [1 - np.arange(YG) / YG, 1, 0]
gc = [0, 1, np.arange(GC) / GC]
cb = [0, 1 - np.arange(CB) / CB, 1]
bm = [np.arange(BM) / BM, 0, 1]
mr = [1, 0, 1 - np.arange(MR) / MR]
num_bins = RY + YG + GC + CB + BM + MR
color_wheel = np.zeros((3, num_bins), dtype=np.float32)
col = 0
for i, color in enumerate([ry, yg, gc, cb, bm, mr]):
for j in range(3):
color_wheel[j, col:col + bins[i]] = color[j]
col += bins[i]
return color_wheel.T
|
Computes the precision
|
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
Scatter inputs to target gpus.
|
def scatter(inputs, target_gpus, dim=0):
"""Scatter inputs to target gpus.
The only difference from original :func:`scatter` is to add support for
:type:`~mmcv.parallel.DataContainer`.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return OrigScatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, DataContainer):
if obj.cpu_only:
return obj.data
else:
return Scatter.forward(target_gpus, obj.data)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
out = list(map(list, zip(*map(scatter_map, obj))))
return out
if isinstance(obj, dict) and len(obj) > 0:
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return out
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
|
Scatter with support for kwargs dictionary
|
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
|
Fetch all the information by using aiohttp
|
async def fetch(self) -> Response:
"""Fetch all the information by using aiohttp"""
if self.request_config.get('DELAY', 0) > 0:
await asyncio.sleep(self.request_config['DELAY'])
timeout = self.request_config.get('TIMEOUT', 10)
try:
async with async_timeout.timeout(timeout):
resp = await self._make_request()
try:
resp_data = await resp.text(encoding=self.encoding)
except UnicodeDecodeError:
resp_data = await resp.read()
response = Response(
url=self.url,
method=self.method,
encoding=resp.get_encoding(),
html=resp_data,
metadata=self.metadata,
cookies=resp.cookies,
headers=resp.headers,
history=resp.history,
status=resp.status,
aws_json=resp.json,
aws_text=resp.text,
aws_read=resp.read)
# Retry middleware
aws_valid_response = self.request_config.get('VALID')
if aws_valid_response and iscoroutinefunction(aws_valid_response):
response = await aws_valid_response(response)
if response.ok:
return response
else:
return await self._retry(error_msg='request url failed!')
except asyncio.TimeoutError:
return await self._retry(error_msg='timeout')
except Exception as e:
return await self._retry(error_msg=e)
finally:
# Close client session
await self._close_request_session()
|
Define a Decorate to be called before a request. eg:
|
def request(self, *args, **kwargs):
"""
Define a Decorate to be called before a request.
eg: @middleware.request
"""
middleware = args[0]
@wraps(middleware)
def register_middleware(*args, **kwargs):
self.request_middleware.append(middleware)
return middleware
return register_middleware()
|
Define a Decorate to be called after a response. eg:
|
def response(self, *args, **kwargs):
"""
Define a Decorate to be called after a response.
eg: @middleware.response
"""
middleware = args[0]
@wraps(middleware)
def register_middleware(*args, **kwargs):
self.response_middleware.appendleft(middleware)
return middleware
return register_middleware()
|
Read and decodes JSON response.
|
async def json(self,
*,
encoding: str = None,
loads: JSONDecoder = DEFAULT_JSON_DECODER,
content_type: Optional[str] = 'application/json') -> Any:
"""Read and decodes JSON response."""
return await self._aws_json(
encoding=encoding, loads=loads, content_type=content_type)
|
Read response payload and decode.
|
async def text(self,
*,
encoding: Optional[str] = None,
errors: str = 'strict') -> str:
"""Read response payload and decode."""
return await self._aws_text(encoding=encoding, errors=errors)
|
Run hook before/ after spider start crawling: param hook_func: aws function: return:
|
async def _run_spider_hook(self, hook_func):
"""
Run hook before/after spider start crawling
:param hook_func: aws function
:return:
"""
if callable(hook_func):
try:
aws_hook_func = hook_func(weakref.proxy(self))
if isawaitable(aws_hook_func):
await aws_hook_func
except Exception as e:
self.logger.error(f'<Hook {hook_func.__name__}: {e}')
|
Corresponding processing for the invalid callback result: param item:: return:
|
async def process_callback_result(self, callback_result):
"""
Corresponding processing for the invalid callback result
:param item:
:return:
"""
callback_result_name = type(callback_result).__name__
process_func_name = self.callback_result_map.get(
callback_result_name, '')
process_func = getattr(self, process_func_name, None)
if process_func is not None:
await process_func(callback_result)
else:
raise InvalidCallbackResult(
f'<Parse invalid callback result type: {callback_result_name}>'
)
|
Start an async spider: param middleware: customize middleware or a list of middleware: param loop:: param after_start: hook: param before_stop: hook: return:
|
async def async_start(
cls,
middleware: typing.Union[typing.Iterable, Middleware] = None,
loop=None,
after_start=None,
before_stop=None,
**kwargs):
"""
Start an async spider
:param middleware: customize middleware or a list of middleware
:param loop:
:param after_start: hook
:param before_stop: hook
:return:
"""
loop = loop or asyncio.get_event_loop()
spider_ins = cls(middleware=middleware, loop=loop, is_async_start=True)
await spider_ins._start(
after_start=after_start, before_stop=before_stop)
|
Start a spider: param after_start: hook: param before_stop: hook: param middleware: customize middleware or a list of middleware: param loop: event loop: param close_event_loop: bool: return:
|
def start(cls,
middleware: typing.Union[typing.Iterable, Middleware] = None,
loop=None,
after_start=None,
before_stop=None,
close_event_loop=True,
**kwargs):
"""
Start a spider
:param after_start: hook
:param before_stop: hook
:param middleware: customize middleware or a list of middleware
:param loop: event loop
:param close_event_loop: bool
:return:
"""
loop = loop or asyncio.new_event_loop()
spider_ins = cls(middleware=middleware, loop=loop)
# Actually start crawling
spider_ins.loop.run_until_complete(
spider_ins._start(
after_start=after_start, before_stop=before_stop)
)
spider_ins.loop.run_until_complete(
spider_ins.loop.shutdown_asyncgens())
if close_event_loop:
spider_ins.loop.close()
|
Process coroutine callback function
|
async def handle_callback(self, aws_callback: typing.Coroutine, response):
"""Process coroutine callback function"""
callback_result = None
try:
callback_result = await aws_callback
except NothingMatchedError as e:
self.logger.error(f'<Item: {str(e).lower()}>')
except Exception as e:
self.logger.error(f'<Callback[{aws_callback.__name__}]: {e}')
return callback_result, response
|
Wrap request with middleware.: param request:: return:
|
async def handle_request(self, request: Request
) -> typing.Tuple[AsyncGeneratorType, Response]:
"""
Wrap request with middleware.
:param request:
:return:
"""
callback_result, response = None, None
await self._run_request_middleware(request)
try:
callback_result, response = await request.fetch_callback(self.sem)
except NotImplementedParseError as e:
self.logger.error(e)
except NothingMatchedError as e:
self.logger.error(f'<Item: {str(e).lower()}>')
except Exception as e:
self.logger.error(f'<Callback[{request.callback.__name__}]: {e}')
await self._run_response_middleware(request, response)
await self._process_response(request=request, response=response)
return callback_result, response
|
For crawling multiple urls
|
async def multiple_request(self, urls, is_gather=False, **kwargs):
"""For crawling multiple urls"""
if is_gather:
resp_results = await asyncio.gather(
*[
self.handle_request(self.request(url=url, **kwargs))
for url in urls
],
return_exceptions=True)
for index, task_result in enumerate(resp_results):
if not isinstance(task_result, RuntimeError) and task_result:
_, response = task_result
response.index = index
yield response
else:
for index, url in enumerate(urls):
_, response = await self.handle_request(
self.request(url=url, **kwargs))
response.index = index
yield response
|
Init a Request class for crawling html
|
def request(self,
url: str,
method: str = 'GET',
*,
callback=None,
encoding: typing.Optional[str] = None,
headers: dict = None,
metadata: dict = None,
request_config: dict = None,
request_session=None,
**kwargs):
"""Init a Request class for crawling html"""
headers = headers or {}
metadata = metadata or {}
request_config = request_config or {}
request_session = request_session or self.request_session
headers.update(self.headers.copy())
request_config.update(self.request_config.copy())
kwargs.update(self.kwargs.copy())
return Request(
url=url,
method=method,
callback=callback,
encoding=encoding,
headers=headers,
metadata=metadata,
request_config=request_config,
request_session=request_session,
**kwargs)
|
Actually start crawling.
|
async def start_master(self):
"""Actually start crawling."""
for url in self.start_urls:
request_ins = self.request(
url=url, callback=self.parse, metadata=self.metadata)
self.request_queue.put_nowait(self.handle_request(request_ins))
workers = [
asyncio.ensure_future(self.start_worker())
for i in range(self.worker_numbers)
]
for worker in workers:
self.logger.info(f"Worker started: {id(worker)}")
await self.request_queue.join()
if not self.is_async_start:
await self.stop(SIGINT)
else:
await self._cancel_tasks()
|
Finish all running tasks cancel remaining tasks then stop loop.: param _signal:: return:
|
async def stop(self, _signal):
"""
Finish all running tasks, cancel remaining tasks, then stop loop.
:param _signal:
:return:
"""
self.logger.info(f'Stopping spider: {self.name}')
await self._cancel_tasks()
self.loop.stop()
|
If there is a group dict return the dict ; even if there s only one value in the dict return a dictionary ; If there is a group in match return the group ; if there is only one value in the group return the value ; if there has no group return the whole matched string ; if there are many groups return a tuple ;: param match:: return:
|
def _parse_match(self, match):
"""
If there is a group dict, return the dict;
even if there's only one value in the dict, return a dictionary;
If there is a group in match, return the group;
if there is only one value in the group, return the value;
if there has no group, return the whole matched string;
if there are many groups, return a tuple;
:param match:
:return:
"""
if not match:
if self.default:
return self.default
else:
raise NothingMatchedError(
f"Extract `{self._re_select}` error, "
f"please check selector or set parameter named `default`")
else:
string = match.group()
groups = match.groups()
group_dict = match.groupdict()
if group_dict:
return group_dict
if groups:
return groups[0] if len(groups) == 1 else groups
return string
|
Get a db instance: param db: database name: return: the motor db instance
|
def get_db(self, db='test'):
"""
Get a db instance
:param db: database name
:return: the motor db instance
"""
if db not in self._db:
self._db[db] = self.client(db)[db]
return self._db[db]
|
Ensures tasks have an action key and strings are converted to python objects
|
def normalize_task_v2(task):
'''Ensures tasks have an action key and strings are converted to python objects'''
result = dict()
mod_arg_parser = ModuleArgsParser(task)
try:
action, arguments, result['delegate_to'] = mod_arg_parser.parse()
except AnsibleParserError as e:
try:
task_info = "%s:%s" % (task[FILENAME_KEY], task[LINE_NUMBER_KEY])
del task[FILENAME_KEY]
del task[LINE_NUMBER_KEY]
except KeyError:
task_info = "Unknown"
try:
import pprint
pp = pprint.PrettyPrinter(indent=2)
task_pprint = pp.pformat(task)
except ImportError:
task_pprint = task
raise SystemExit("Couldn't parse task at %s (%s)\n%s" % (task_info, e.message, task_pprint))
# denormalize shell -> command conversion
if '_uses_shell' in arguments:
action = 'shell'
del(arguments['_uses_shell'])
for (k, v) in list(task.items()):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action:
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
continue
else:
result[k] = v
result['action'] = dict(__ansible_module__=action)
if '_raw_params' in arguments:
result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ')
del(arguments['_raw_params'])
else:
result['action']['__ansible_arguments__'] = list()
if 'argv' in arguments and not result['action']['__ansible_arguments__']:
result['action']['__ansible_arguments__'] = arguments['argv']
del(arguments['argv'])
result['action'].update(arguments)
return result
|
Parses yaml as ansible. utils. parse_yaml but with linenumbers.
|
def parse_yaml_linenumbers(data, filename):
"""Parses yaml as ansible.utils.parse_yaml but with linenumbers.
The line numbers are stored in each node's LINE_NUMBER_KEY key.
"""
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
if ANSIBLE_VERSION < 2:
mapping = Constructor.construct_mapping(loader, node, deep=deep)
else:
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
if hasattr(node, '__line__'):
mapping[LINE_NUMBER_KEY] = node.__line__
else:
mapping[LINE_NUMBER_KEY] = mapping._line_number
mapping[FILENAME_KEY] = filename
return mapping
try:
if ANSIBLE_VERSION < 2:
loader = yaml.Loader(data)
else:
import inspect
kwargs = {}
if 'vault_password' in inspect.getargspec(AnsibleLoader.__init__).args:
kwargs['vault_password'] = DEFAULT_VAULT_PASSWORD
loader = AnsibleLoader(data, **kwargs)
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e)))
return data
|
Uses ruamel. yaml to parse comments then adds a skipped_rules list to the task ( or meta yaml block )
|
def append_skipped_rules(pyyaml_data, file_text, file_type):
""" Uses ruamel.yaml to parse comments then adds a
skipped_rules list to the task (or meta yaml block)
"""
yaml = ruamel.yaml.YAML()
ruamel_data = yaml.load(file_text)
if file_type in ('tasks', 'handlers'):
ruamel_tasks = ruamel_data
pyyaml_tasks = pyyaml_data
elif file_type == 'playbook':
try:
ruamel_tasks = []
pyyaml_tasks = []
for ruamel_play, pyyaml_play in zip(ruamel_data, pyyaml_data):
ruamel_tasks.extend(ruamel_play.get('tasks'))
pyyaml_tasks.extend(pyyaml_play.get('tasks'))
except (AttributeError, TypeError):
return pyyaml_data
elif file_type == 'meta':
if not isinstance(pyyaml_data, list):
return pyyaml_data
ruamel_tasks = [ruamel_data]
pyyaml_tasks = pyyaml_data
else:
return pyyaml_data
if len(ruamel_tasks) != len(pyyaml_tasks):
return pyyaml_data
for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks):
skipped_rules = _get_rule_skips_from_task(ruamel_task)
if skipped_rules:
pyyaml_task['skipped_rules'] = skipped_rules
return pyyaml_data
|
Helper method that compares two StoreItems and their e_tags and returns True if the new_value should overwrite the old_value. Otherwise returns False.: param old_value:: param new_value:: return:
|
def __should_write_changes(self, old_value: StoreItem, new_value: StoreItem) -> bool:
"""
Helper method that compares two StoreItems and their e_tags and returns True if the new_value should overwrite
the old_value. Otherwise returns False.
:param old_value:
:param new_value:
:return:
"""
# If old_value is none or if the new_value's e_tag is '*', then we return True
if old_value is None or (hasattr(new_value, 'e_tag') and new_value.e_tag == '*'):
return True
# If none of the above cases, we verify that e_tags exist on both arguments
elif hasattr(new_value, 'e_tag') and hasattr(old_value, 'e_tag'):
if new_value.e_tag is not None and old_value.e_tag is None:
return True
# And then we do a comparing between the old and new e_tag values to decide if the new data will be written
if old_value.e_tag == new_value.e_tag or int(old_value.e_tag) <= int(new_value.e_tag):
return True
else:
return False
else:
return False
|
Called by the parent class to run the adapters middleware set and calls the passed in callback () handler at the end of the chain.: param context:: param callback:: return:
|
async def run_middleware(self, context: TurnContext, callback: Callable=None):
"""
Called by the parent class to run the adapters middleware set and calls the passed in `callback()` handler at
the end of the chain.
:param context:
:param callback:
:return:
"""
return await self._middleware.receive_activity_with_status(context, callback)
|
Registers middleware plugin ( s ) with the bot or set.: param middleware:: return:
|
def use(self, *middleware: Middleware):
"""
Registers middleware plugin(s) with the bot or set.
:param middleware :
:return:
"""
for (idx, m) in enumerate(middleware):
if hasattr(m, 'on_process_request') and callable(m.on_process_request):
self._middleware.append(m)
return self
else:
raise TypeError('MiddlewareSet.use(): invalid middleware at index "%s" being added.' % idx)
|
Send information about the page viewed in the application ( a web page for instance ).: param name: the name of the page that was viewed.: param url: the URL of the page that was viewed.: param duration: the duration of the page view in milliseconds. ( defaults to: 0 ): param properties: the set of custom properties the client wants attached to this data item. ( defaults to: None ): param measurements: the set of custom measurements the client wants to attach to this data item. ( defaults to: None )
|
def track_pageview(self, name: str, url:str, duration: int = 0, properties : Dict[str, object]=None,
measurements: Dict[str, object]=None) -> None:
"""
Send information about the page viewed in the application (a web page for instance).
:param name: the name of the page that was viewed.
:param url: the URL of the page that was viewed.
:param duration: the duration of the page view in milliseconds. (defaults to: 0)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
self._client.track_pageview(name, url, duration, properties, measurements)
|
Send information about a single exception that occurred in the application.: param type_exception: the type of the exception that was thrown.: param value: the exception that the client wants to send.: param tb: the traceback information as returned by: func: sys. exc_info.: param properties: the set of custom properties the client wants attached to this data item. ( defaults to: None ): param measurements: the set of custom measurements the client wants to attach to this data item. ( defaults to: None )
|
def track_exception(self, type_exception: type = None, value : Exception =None, tb : traceback =None,
properties: Dict[str, object]=None, measurements: Dict[str, object]=None) -> None:
"""
Send information about a single exception that occurred in the application.
:param type_exception: the type of the exception that was thrown.
:param value: the exception that the client wants to send.
:param tb: the traceback information as returned by :func:`sys.exc_info`.
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
self._client.track_exception(type_exception, value, tb, properties, measurements)
|
Send information about a single event that has occurred in the context of the application.: param name: the data to associate to this event.: param properties: the set of custom properties the client wants attached to this data item. ( defaults to: None ): param measurements: the set of custom measurements the client wants to attach to this data item. ( defaults to: None )
|
def track_event(self, name: str, properties: Dict[str, object] = None,
measurements: Dict[str, object] = None) -> None:
"""
Send information about a single event that has occurred in the context of the application.
:param name: the data to associate to this event.
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
self._client.track_event(name, properties, measurements)
|
Send information about a single metric data point that was captured for the application.: param name: The name of the metric that was captured.: param value: The value of the metric that was captured.: param type: The type of the metric. ( defaults to: TelemetryDataPointType. aggregation ): param count: the number of metrics that were aggregated into this data point. ( defaults to: None ): param min: the minimum of all metrics collected that were aggregated into this data point. ( defaults to: None ): param max: the maximum of all metrics collected that were aggregated into this data point. ( defaults to: None ): param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. ( defaults to: None ): param properties: the set of custom properties the client wants attached to this data item. ( defaults to: None )
|
def track_metric(self, name: str, value: float, type: TelemetryDataPointType =None,
count: int =None, min: float=None, max: float=None, std_dev: float=None,
properties: Dict[str, object]=None) -> NotImplemented:
"""
Send information about a single metric data point that was captured for the application.
:param name: The name of the metric that was captured.
:param value: The value of the metric that was captured.
:param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`)
:param count: the number of metrics that were aggregated into this data point. (defaults to: None)
:param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)
:param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)
:param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
"""
self._client.track_metric(name, value, type, count, min, max, std_dev, properties)
|
Sends a single trace statement.: param name: the trace statement. \ n: param properties: the set of custom properties the client wants attached to this data item. ( defaults to: None ) \ n: param severity: the severity level of this trace one of DEBUG INFO WARNING ERROR CRITICAL
|
def track_trace(self, name: str, properties: Dict[str, object]=None, severity=None):
"""
Sends a single trace statement.
:param name: the trace statement.\n
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)\n
:param severity: the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
"""
self._client.track_trace(name, properties, severity)
|
Sends a single request that was captured for the application.: param name: The name for this request. All requests with the same name will be grouped together.: param url: The actual URL for this request ( to show in individual request instances ).: param success: True if the request ended in success False otherwise.: param start_time: the start time of the request. The value should look the same as the one returned by: func: datetime. isoformat () ( defaults to: None ): param duration: the number of milliseconds that this request lasted. ( defaults to: None ): param response_code: the response code that this request returned. ( defaults to: None ): param http_method: the HTTP method that triggered this request. ( defaults to: None ): param properties: the set of custom properties the client wants attached to this data item. ( defaults to: None ): param measurements: the set of custom measurements the client wants to attach to this data item. ( defaults to: None ): param request_id: the id for this request. If None a new uuid will be generated. ( defaults to: None )
|
def track_request(self, name: str, url: str, success: bool, start_time: str=None,
duration: int=None, response_code: str =None, http_method: str=None,
properties: Dict[str, object]=None, measurements: Dict[str, object]=None,
request_id: str=None):
"""
Sends a single request that was captured for the application.
:param name: The name for this request. All requests with the same name will be grouped together.
:param url: The actual URL for this request (to show in individual request instances).
:param success: True if the request ended in success, False otherwise.
:param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
:param duration: the number of milliseconds that this request lasted. (defaults to: None)
:param response_code: the response code that this request returned. (defaults to: None)
:param http_method: the HTTP method that triggered this request. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
"""
self._client.track_request(name, url, success, start_time, duration, response_code, http_method, properties,
measurements, request_id)
|
Sends a single dependency telemetry that was captured for the application.: param name: the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.: param data: the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.: param type: the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL Azure table and HTTP. ( default to: None ): param target: the target site of a dependency call. Examples are server name host address. ( default to: None ): param duration: the number of milliseconds that this dependency call lasted. ( defaults to: None ): param success: true if the dependency call ended in success false otherwise. ( defaults to: None ): param result_code: the result code of a dependency call. Examples are SQL error code and HTTP status code. ( defaults to: None ): param properties: the set of custom properties the client wants attached to this data item. ( defaults to: None ): param measurements: the set of custom measurements the client wants to attach to this data item. ( defaults to: None ): param id: the id for this dependency call. If None a new uuid will be generated. ( defaults to: None )
|
def track_dependency(self, name:str, data:str, type:str=None, target:str=None, duration:int=None,
success:bool=None, result_code:str=None, properties:Dict[str, object]=None,
measurements:Dict[str, object]=None, dependency_id:str=None):
"""
Sends a single dependency telemetry that was captured for the application.
:param name: the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.
:param data: the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.
:param type: the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)
:param target: the target site of a dependency call. Examples are server name, host address. (default to: None)
:param duration: the number of milliseconds that this dependency call lasted. (defaults to: None)
:param success: true if the dependency call ended in success, false otherwise. (defaults to: None)
:param result_code: the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param id: the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)
"""
self._client.track_dependency(name, data, type, target, duration, success, result_code, properties,
measurements, dependency_id)
|
Create a property definition and register it with this BotState.: param name: The name of the property.: param force:: return: If successful the state property accessor created.
|
def create_property(self, name:str) -> StatePropertyAccessor:
"""
Create a property definition and register it with this BotState.
:param name: The name of the property.
:param force:
:return: If successful, the state property accessor created.
"""
if not name:
raise TypeError('BotState.create_property(): BotState cannot be None or empty.')
return BotStatePropertyAccessor(self, name)
|
Reads in the current state object and caches it in the context object for this turm.: param turn_context: The context object for this turn.: param force: Optional. True to bypass the cache.
|
async def load(self, turn_context: TurnContext, force: bool = False) -> None:
"""
Reads in the current state object and caches it in the context object for this turm.
:param turn_context: The context object for this turn.
:param force: Optional. True to bypass the cache.
"""
if turn_context == None:
raise TypeError('BotState.load(): turn_context cannot be None.')
cached_state = turn_context.turn_state.get(self._context_service_key)
storage_key = self.get_storage_key(turn_context)
if (force or not cached_state or not cached_state.state) :
items = await self._storage.read([storage_key])
val = items.get(storage_key)
turn_context.turn_state[self._context_service_key] = CachedBotState(val)
|
If it has changed writes to storage the state object that is cached in the current context object for this turn.: param turn_context: The context object for this turn.: param force: Optional. True to save state to storage whether or not there are changes.
|
async def save_changes(self, turn_context: TurnContext, force: bool = False) -> None:
"""
If it has changed, writes to storage the state object that is cached in the current context object for this turn.
:param turn_context: The context object for this turn.
:param force: Optional. True to save state to storage whether or not there are changes.
"""
if turn_context == None:
raise TypeError('BotState.save_changes(): turn_context cannot be None.')
cached_state = turn_context.turn_state.get(self._context_service_key)
if force or (cached_state != None and cached_state.is_changed == True):
storage_key = self.get_storage_key(turn_context)
changes : Dict[str, object] = { storage_key: cached_state.state }
await self._storage.write(changes)
cached_state.hash = cached_state.compute_hash(cached_state.state)
|
Clears any state currently stored in this state scope. NOTE: that save_changes must be called in order for the cleared state to be persisted to the underlying store.: param turn_context: The context object for this turn.: return: None
|
async def clear_state(self, turn_context: TurnContext):
"""
Clears any state currently stored in this state scope.
NOTE: that save_changes must be called in order for the cleared state to be persisted to the underlying store.
:param turn_context: The context object for this turn.
:return: None
"""
if turn_context == None:
raise TypeError('BotState.clear_state(): turn_context cannot be None.')
# Explicitly setting the hash will mean IsChanged is always true. And that will force a Save.
cache_value = CachedBotState()
cache_value.hash = ''
turn_context.turn_state[self._context_service_key] = cache_value
|
Delete any state currently stored in this state scope.: param turn_context: The context object for this turn.: return: None
|
async def delete(self, turn_context: TurnContext) -> None:
"""
Delete any state currently stored in this state scope.
:param turn_context: The context object for this turn.
:return: None
"""
if turn_context == None:
raise TypeError('BotState.delete(): turn_context cannot be None.')
turn_context.turn_state.pop(self._context_service_key)
storage_key = self.get_storage_key(turn_context)
await self._storage.delete({ storage_key })
|
Deletes a property from the state cache in the turn context.: param turn_context: The context object for this turn.: param property_name: The value to set on the property.: return: None
|
async def set_property_value(self, turn_context: TurnContext, property_name: str, value: object) -> None:
"""
Deletes a property from the state cache in the turn context.
:param turn_context: The context object for this turn.
:param property_name: The value to set on the property.
:return: None
"""
if turn_context == None:
raise TypeError('BotState.delete_property(): turn_context cannot be None.')
if not property_name:
raise TypeError('BotState.delete_property(): property_name cannot be None.')
cached_state = turn_context.turn_state.get(self._context_service_key)
cached_state.state[property_name] = value
|
Continues a conversation with a user. This is often referred to as the bots Proactive Messaging flow as its lets the bot proactively send messages to a conversation or user that its already communicated with. Scenarios like sending notifications or coupons to a user are enabled by this method.: param reference:: param logic:: return:
|
async def continue_conversation(self, reference: ConversationReference, logic):
"""
Continues a conversation with a user. This is often referred to as the bots "Proactive Messaging"
flow as its lets the bot proactively send messages to a conversation or user that its already
communicated with. Scenarios like sending notifications or coupons to a user are enabled by this
method.
:param reference:
:param logic:
:return:
"""
request = TurnContext.apply_conversation_reference(Activity(), reference, is_incoming=True)
context = self.create_context(request)
return await self.run_middleware(context, logic)
|
Starts a new conversation with a user. This is typically used to Direct Message ( DM ) a member of a group.: param reference:: param logic:: return:
|
async def create_conversation(self, reference: ConversationReference, logic):
"""
Starts a new conversation with a user. This is typically used to Direct Message (DM) a member
of a group.
:param reference:
:param logic:
:return:
"""
try:
if reference.service_url is None:
raise TypeError('BotFrameworkAdapter.create_conversation(): reference.service_url cannot be None.')
# Create conversation
parameters = ConversationParameters(bot=reference.bot)
client = self.create_connector_client(reference.service_url)
resource_response = await client.conversations.create_conversation(parameters)
request = TurnContext.apply_conversation_reference(Activity(), reference, is_incoming=True)
request.conversation = ConversationAccount(id=resource_response.id)
if resource_response.service_url:
request.service_url = resource_response.service_url
context = self.create_context(request)
return await self.run_middleware(context, logic)
except Exception as e:
raise e
|
Processes an activity received by the bots web server. This includes any messages sent from a user and is the method that drives what s often referred to as the bots Reactive Messaging flow.: param req:: param auth_header:: param logic:: return:
|
async def process_activity(self, req, auth_header: str, logic: Callable):
"""
Processes an activity received by the bots web server. This includes any messages sent from a
user and is the method that drives what's often referred to as the bots "Reactive Messaging"
flow.
:param req:
:param auth_header:
:param logic:
:return:
"""
activity = await self.parse_request(req)
auth_header = auth_header or ''
await self.authenticate_request(activity, auth_header)
context = self.create_context(activity)
return await self.run_middleware(context, logic)
|
Allows for the overriding of authentication in unit tests.: param request:: param auth_header:: return:
|
async def authenticate_request(self, request: Activity, auth_header: str):
"""
Allows for the overriding of authentication in unit tests.
:param request:
:param auth_header:
:return:
"""
await JwtTokenValidation.authenticate_request(request, auth_header, self._credential_provider)
|
Parses and validates request: param req:: return:
|
async def parse_request(req):
"""
Parses and validates request
:param req:
:return:
"""
async def validate_activity(activity: Activity):
if not isinstance(activity.type, str):
raise TypeError('BotFrameworkAdapter.parse_request(): invalid or missing activity type.')
return True
if not isinstance(req, Activity):
# If the req is a raw HTTP Request, try to deserialize it into an Activity and return the Activity.
if hasattr(req, 'body'):
try:
activity = Activity().deserialize(req.body)
is_valid_activity = await validate_activity(activity)
if is_valid_activity:
return activity
except Exception as e:
raise e
elif 'body' in req:
try:
activity = Activity().deserialize(req['body'])
is_valid_activity = await validate_activity(activity)
if is_valid_activity:
return activity
except Exception as e:
raise e
else:
raise TypeError('BotFrameworkAdapter.parse_request(): received invalid request')
else:
# The `req` has already been deserialized to an Activity, so verify the Activity.type and return it.
is_valid_activity = await validate_activity(req)
if is_valid_activity:
return req
|
Replaces an activity that was previously sent to a channel. It should be noted that not all channels support this feature.: param context:: param activity:: return:
|
async def update_activity(self, context: TurnContext, activity: Activity):
"""
Replaces an activity that was previously sent to a channel. It should be noted that not all
channels support this feature.
:param context:
:param activity:
:return:
"""
try:
client = self.create_connector_client(activity.service_url)
return await client.conversations.update_activity(
activity.conversation.id,
activity.conversation.activity_id,
activity)
except Exception as e:
raise e
|
Deletes an activity that was previously sent to a channel. It should be noted that not all channels support this feature.: param context:: param conversation_reference:: return:
|
async def delete_activity(self, context: TurnContext, conversation_reference: ConversationReference):
"""
Deletes an activity that was previously sent to a channel. It should be noted that not all
channels support this feature.
:param context:
:param conversation_reference:
:return:
"""
try:
client = self.create_connector_client(conversation_reference.service_url)
await client.conversations.delete_activity(conversation_reference.conversation.id,
conversation_reference.activity_id)
except Exception as e:
raise e
|
Deletes a member from the current conversation.: param context:: param member_id:: return:
|
async def delete_conversation_member(self, context: TurnContext, member_id: str) -> None:
"""
Deletes a member from the current conversation.
:param context:
:param member_id:
:return:
"""
try:
if not context.activity.service_url:
raise TypeError('BotFrameworkAdapter.delete_conversation_member(): missing service_url')
if not context.activity.conversation or not context.activity.conversation.id:
raise TypeError('BotFrameworkAdapter.delete_conversation_member(): missing conversation or '
'conversation.id')
service_url = context.activity.service_url
conversation_id = context.activity.conversation.id
client = self.create_connector_client(service_url)
return await client.conversations.delete_conversation_member(conversation_id, member_id)
except AttributeError as attr_e:
raise attr_e
except Exception as e:
raise e
|
Lists the members of a given activity.: param context:: param activity_id:: return:
|
async def get_activity_members(self, context: TurnContext, activity_id: str):
"""
Lists the members of a given activity.
:param context:
:param activity_id:
:return:
"""
try:
if not activity_id:
activity_id = context.activity.id
if not context.activity.service_url:
raise TypeError('BotFrameworkAdapter.get_activity_member(): missing service_url')
if not context.activity.conversation or not context.activity.conversation.id:
raise TypeError('BotFrameworkAdapter.get_activity_member(): missing conversation or conversation.id')
if not activity_id:
raise TypeError('BotFrameworkAdapter.get_activity_member(): missing both activity_id and '
'context.activity.id')
service_url = context.activity.service_url
conversation_id = context.activity.conversation.id
client = self.create_connector_client(service_url)
return await client.conversations.get_activity_members(conversation_id, activity_id)
except Exception as e:
raise e
|
Lists the members of a current conversation.: param context:: return:
|
async def get_conversation_members(self, context: TurnContext):
"""
Lists the members of a current conversation.
:param context:
:return:
"""
try:
if not context.activity.service_url:
raise TypeError('BotFrameworkAdapter.get_conversation_members(): missing service_url')
if not context.activity.conversation or not context.activity.conversation.id:
raise TypeError('BotFrameworkAdapter.get_conversation_members(): missing conversation or '
'conversation.id')
service_url = context.activity.service_url
conversation_id = context.activity.conversation.id
client = self.create_connector_client(service_url)
return await client.conversations.get_conversation_members(conversation_id)
except Exception as e:
raise e
|
Lists the Conversations in which this bot has participated for a given channel server. The channel server returns results in pages and each page will include a continuationToken that can be used to fetch the next page of results from the server.: param service_url:: param continuation_token:: return:
|
async def get_conversations(self, service_url: str, continuation_token: str=None):
"""
Lists the Conversations in which this bot has participated for a given channel server. The channel server
returns results in pages and each page will include a `continuationToken` that can be used to fetch the next
page of results from the server.
:param service_url:
:param continuation_token:
:return:
"""
client = self.create_connector_client(service_url)
return await client.conversations.get_conversations(continuation_token)
|
Allows for mocking of the connector client in unit tests.: param service_url:: return:
|
def create_connector_client(self, service_url: str) -> ConnectorClient:
"""
Allows for mocking of the connector client in unit tests.
:param service_url:
:return:
"""
client = ConnectorClient(self._credentials, base_url=service_url)
client.config.add_user_agent(USER_AGENT)
return client
|
Adds a dialog to the component dialog. Adding a new dialog will inherit the BotTelemetryClient of the ComponentDialog.: param dialog: The dialog to add.: return: The updated ComponentDialog
|
def add_dialog(self, dialog: Dialog) -> object:
"""
Adds a dialog to the component dialog.
Adding a new dialog will inherit the BotTelemetryClient of the ComponentDialog.
:param dialog: The dialog to add.
:return: The updated ComponentDialog
"""
self._dialogs.add(dialog)
if not self.initial_dialog_id:
self.initial_dialog_id = dialog.id
return self
|
Authenticates the request and sets the service url in the set of trusted urls.: param activity: The incoming Activity from the Bot Framework or the Emulator: type activity: ~botframework. connector. models. Activity: param auth_header: The Bearer token included as part of the request: type auth_header: str: param credentials: The set of valid credentials such as the Bot Application ID: type credentials: CredentialProvider
|
async def authenticate_request(activity: Activity, auth_header: str, credentials: CredentialProvider) -> ClaimsIdentity:
"""Authenticates the request and sets the service url in the set of trusted urls.
:param activity: The incoming Activity from the Bot Framework or the Emulator
:type activity: ~botframework.connector.models.Activity
:param auth_header: The Bearer token included as part of the request
:type auth_header: str
:param credentials: The set of valid credentials, such as the Bot Application ID
:type credentials: CredentialProvider
:raises Exception:
"""
if not auth_header:
# No auth header was sent. We might be on the anonymous code path.
is_auth_disabled = await credentials.is_authentication_disabled()
if is_auth_disabled:
# We are on the anonymous code path.
return
# No Auth Header. Auth is required. Request is not authorized.
raise Exception('Unauthorized Access. Request is not authorized')
claims_identity = await JwtTokenValidation.validate_auth_header(auth_header, credentials, activity.channel_id, activity.service_url)
# On the standard Auth path, we need to trust the URL that was incoming.
MicrosoftAppCredentials.trust_service_url(activity.service_url)
return claims_identity
|
Return distribution full name with - replaced with _
|
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
|
Return archive name without extension
|
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
|
Generate requirements from setup. cfg as ( Requires - Dist requirement ; qualifier ) tuples. From a metadata section in setup. cfg:
|
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.