partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
variational_dropout
|
Dropout with the same drop mask for all fixed_mask_dims
Args:
units: a tensor, usually with shapes [B x T x F], where
B - batch size
T - tokens dimension
F - feature dimension
keep_prob: keep probability
fixed_mask_dims: in these dimensions the mask will be the same
Returns:
dropped units tensor
|
deeppavlov/core/layers/tf_layers.py
|
def variational_dropout(units, keep_prob, fixed_mask_dims=(1,)):
""" Dropout with the same drop mask for all fixed_mask_dims
Args:
units: a tensor, usually with shapes [B x T x F], where
B - batch size
T - tokens dimension
F - feature dimension
keep_prob: keep probability
fixed_mask_dims: in these dimensions the mask will be the same
Returns:
dropped units tensor
"""
units_shape = tf.shape(units)
noise_shape = [units_shape[n] for n in range(len(units.shape))]
for dim in fixed_mask_dims:
noise_shape[dim] = 1
return tf.nn.dropout(units, keep_prob, noise_shape)
|
def variational_dropout(units, keep_prob, fixed_mask_dims=(1,)):
""" Dropout with the same drop mask for all fixed_mask_dims
Args:
units: a tensor, usually with shapes [B x T x F], where
B - batch size
T - tokens dimension
F - feature dimension
keep_prob: keep probability
fixed_mask_dims: in these dimensions the mask will be the same
Returns:
dropped units tensor
"""
units_shape = tf.shape(units)
noise_shape = [units_shape[n] for n in range(len(units.shape))]
for dim in fixed_mask_dims:
noise_shape[dim] = 1
return tf.nn.dropout(units, keep_prob, noise_shape)
|
[
"Dropout",
"with",
"the",
"same",
"drop",
"mask",
"for",
"all",
"fixed_mask_dims"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/layers/tf_layers.py#L930-L948
|
[
"def",
"variational_dropout",
"(",
"units",
",",
"keep_prob",
",",
"fixed_mask_dims",
"=",
"(",
"1",
",",
")",
")",
":",
"units_shape",
"=",
"tf",
".",
"shape",
"(",
"units",
")",
"noise_shape",
"=",
"[",
"units_shape",
"[",
"n",
"]",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"units",
".",
"shape",
")",
")",
"]",
"for",
"dim",
"in",
"fixed_mask_dims",
":",
"noise_shape",
"[",
"dim",
"]",
"=",
"1",
"return",
"tf",
".",
"nn",
".",
"dropout",
"(",
"units",
",",
"keep_prob",
",",
"noise_shape",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
CharacterTagger.build
|
Builds the network using Keras.
|
deeppavlov/models/morpho_tagger/network.py
|
def build(self):
"""Builds the network using Keras.
"""
word_inputs = kl.Input(shape=(None, MAX_WORD_LENGTH+2), dtype="int32")
inputs = [word_inputs]
word_outputs = self._build_word_cnn(word_inputs)
if len(self.word_vectorizers) > 0:
additional_word_inputs = [kl.Input(shape=(None, input_dim), dtype="float32")
for input_dim, dense_dim in self.word_vectorizers]
inputs.extend(additional_word_inputs)
additional_word_embeddings = [kl.Dense(dense_dim)(additional_word_inputs[i])
for i, (_, dense_dim) in enumerate(self.word_vectorizers)]
word_outputs = kl.Concatenate()([word_outputs] + additional_word_embeddings)
outputs, lstm_outputs = self._build_basic_network(word_outputs)
compile_args = {"optimizer": ko.nadam(lr=0.002, clipnorm=5.0),
"loss": "categorical_crossentropy", "metrics": ["accuracy"]}
self.model_ = Model(inputs, outputs)
self.model_.compile(**compile_args)
if self.verbose > 0:
self.model_.summary(print_fn=log.info)
return self
|
def build(self):
"""Builds the network using Keras.
"""
word_inputs = kl.Input(shape=(None, MAX_WORD_LENGTH+2), dtype="int32")
inputs = [word_inputs]
word_outputs = self._build_word_cnn(word_inputs)
if len(self.word_vectorizers) > 0:
additional_word_inputs = [kl.Input(shape=(None, input_dim), dtype="float32")
for input_dim, dense_dim in self.word_vectorizers]
inputs.extend(additional_word_inputs)
additional_word_embeddings = [kl.Dense(dense_dim)(additional_word_inputs[i])
for i, (_, dense_dim) in enumerate(self.word_vectorizers)]
word_outputs = kl.Concatenate()([word_outputs] + additional_word_embeddings)
outputs, lstm_outputs = self._build_basic_network(word_outputs)
compile_args = {"optimizer": ko.nadam(lr=0.002, clipnorm=5.0),
"loss": "categorical_crossentropy", "metrics": ["accuracy"]}
self.model_ = Model(inputs, outputs)
self.model_.compile(**compile_args)
if self.verbose > 0:
self.model_.summary(print_fn=log.info)
return self
|
[
"Builds",
"the",
"network",
"using",
"Keras",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/network.py#L140-L160
|
[
"def",
"build",
"(",
"self",
")",
":",
"word_inputs",
"=",
"kl",
".",
"Input",
"(",
"shape",
"=",
"(",
"None",
",",
"MAX_WORD_LENGTH",
"+",
"2",
")",
",",
"dtype",
"=",
"\"int32\"",
")",
"inputs",
"=",
"[",
"word_inputs",
"]",
"word_outputs",
"=",
"self",
".",
"_build_word_cnn",
"(",
"word_inputs",
")",
"if",
"len",
"(",
"self",
".",
"word_vectorizers",
")",
">",
"0",
":",
"additional_word_inputs",
"=",
"[",
"kl",
".",
"Input",
"(",
"shape",
"=",
"(",
"None",
",",
"input_dim",
")",
",",
"dtype",
"=",
"\"float32\"",
")",
"for",
"input_dim",
",",
"dense_dim",
"in",
"self",
".",
"word_vectorizers",
"]",
"inputs",
".",
"extend",
"(",
"additional_word_inputs",
")",
"additional_word_embeddings",
"=",
"[",
"kl",
".",
"Dense",
"(",
"dense_dim",
")",
"(",
"additional_word_inputs",
"[",
"i",
"]",
")",
"for",
"i",
",",
"(",
"_",
",",
"dense_dim",
")",
"in",
"enumerate",
"(",
"self",
".",
"word_vectorizers",
")",
"]",
"word_outputs",
"=",
"kl",
".",
"Concatenate",
"(",
")",
"(",
"[",
"word_outputs",
"]",
"+",
"additional_word_embeddings",
")",
"outputs",
",",
"lstm_outputs",
"=",
"self",
".",
"_build_basic_network",
"(",
"word_outputs",
")",
"compile_args",
"=",
"{",
"\"optimizer\"",
":",
"ko",
".",
"nadam",
"(",
"lr",
"=",
"0.002",
",",
"clipnorm",
"=",
"5.0",
")",
",",
"\"loss\"",
":",
"\"categorical_crossentropy\"",
",",
"\"metrics\"",
":",
"[",
"\"accuracy\"",
"]",
"}",
"self",
".",
"model_",
"=",
"Model",
"(",
"inputs",
",",
"outputs",
")",
"self",
".",
"model_",
".",
"compile",
"(",
"*",
"*",
"compile_args",
")",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"self",
".",
"model_",
".",
"summary",
"(",
"print_fn",
"=",
"log",
".",
"info",
")",
"return",
"self"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
CharacterTagger._build_word_cnn
|
Builds word-level network
|
deeppavlov/models/morpho_tagger/network.py
|
def _build_word_cnn(self, inputs):
"""Builds word-level network
"""
inputs = kl.Lambda(kb.one_hot, arguments={"num_classes": self.symbols_number_},
output_shape=lambda x: tuple(x) + (self.symbols_number_,))(inputs)
char_embeddings = kl.Dense(self.char_embeddings_size, use_bias=False)(inputs)
conv_outputs = []
self.char_output_dim_ = 0
for window_size, filters_number in zip(self.char_window_size, self.char_filters):
curr_output = char_embeddings
curr_filters_number = (min(self.char_filter_multiple * window_size, 200)
if filters_number is None else filters_number)
for _ in range(self.char_conv_layers - 1):
curr_output = kl.Conv2D(curr_filters_number, (1, window_size),
padding="same", activation="relu",
data_format="channels_last")(curr_output)
if self.conv_dropout > 0.0:
curr_output = kl.Dropout(self.conv_dropout)(curr_output)
curr_output = kl.Conv2D(curr_filters_number, (1, window_size),
padding="same", activation="relu",
data_format="channels_last")(curr_output)
conv_outputs.append(curr_output)
self.char_output_dim_ += curr_filters_number
if len(conv_outputs) > 1:
conv_output = kl.Concatenate(axis=-1)(conv_outputs)
else:
conv_output = conv_outputs[0]
highway_input = kl.Lambda(kb.max, arguments={"axis": -2})(conv_output)
if self.intermediate_dropout > 0.0:
highway_input = kl.Dropout(self.intermediate_dropout)(highway_input)
for i in range(self.char_highway_layers - 1):
highway_input = Highway(activation="relu")(highway_input)
if self.highway_dropout > 0.0:
highway_input = kl.Dropout(self.highway_dropout)(highway_input)
highway_output = Highway(activation="relu")(highway_input)
return highway_output
|
def _build_word_cnn(self, inputs):
"""Builds word-level network
"""
inputs = kl.Lambda(kb.one_hot, arguments={"num_classes": self.symbols_number_},
output_shape=lambda x: tuple(x) + (self.symbols_number_,))(inputs)
char_embeddings = kl.Dense(self.char_embeddings_size, use_bias=False)(inputs)
conv_outputs = []
self.char_output_dim_ = 0
for window_size, filters_number in zip(self.char_window_size, self.char_filters):
curr_output = char_embeddings
curr_filters_number = (min(self.char_filter_multiple * window_size, 200)
if filters_number is None else filters_number)
for _ in range(self.char_conv_layers - 1):
curr_output = kl.Conv2D(curr_filters_number, (1, window_size),
padding="same", activation="relu",
data_format="channels_last")(curr_output)
if self.conv_dropout > 0.0:
curr_output = kl.Dropout(self.conv_dropout)(curr_output)
curr_output = kl.Conv2D(curr_filters_number, (1, window_size),
padding="same", activation="relu",
data_format="channels_last")(curr_output)
conv_outputs.append(curr_output)
self.char_output_dim_ += curr_filters_number
if len(conv_outputs) > 1:
conv_output = kl.Concatenate(axis=-1)(conv_outputs)
else:
conv_output = conv_outputs[0]
highway_input = kl.Lambda(kb.max, arguments={"axis": -2})(conv_output)
if self.intermediate_dropout > 0.0:
highway_input = kl.Dropout(self.intermediate_dropout)(highway_input)
for i in range(self.char_highway_layers - 1):
highway_input = Highway(activation="relu")(highway_input)
if self.highway_dropout > 0.0:
highway_input = kl.Dropout(self.highway_dropout)(highway_input)
highway_output = Highway(activation="relu")(highway_input)
return highway_output
|
[
"Builds",
"word",
"-",
"level",
"network"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/network.py#L162-L197
|
[
"def",
"_build_word_cnn",
"(",
"self",
",",
"inputs",
")",
":",
"inputs",
"=",
"kl",
".",
"Lambda",
"(",
"kb",
".",
"one_hot",
",",
"arguments",
"=",
"{",
"\"num_classes\"",
":",
"self",
".",
"symbols_number_",
"}",
",",
"output_shape",
"=",
"lambda",
"x",
":",
"tuple",
"(",
"x",
")",
"+",
"(",
"self",
".",
"symbols_number_",
",",
")",
")",
"(",
"inputs",
")",
"char_embeddings",
"=",
"kl",
".",
"Dense",
"(",
"self",
".",
"char_embeddings_size",
",",
"use_bias",
"=",
"False",
")",
"(",
"inputs",
")",
"conv_outputs",
"=",
"[",
"]",
"self",
".",
"char_output_dim_",
"=",
"0",
"for",
"window_size",
",",
"filters_number",
"in",
"zip",
"(",
"self",
".",
"char_window_size",
",",
"self",
".",
"char_filters",
")",
":",
"curr_output",
"=",
"char_embeddings",
"curr_filters_number",
"=",
"(",
"min",
"(",
"self",
".",
"char_filter_multiple",
"*",
"window_size",
",",
"200",
")",
"if",
"filters_number",
"is",
"None",
"else",
"filters_number",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"char_conv_layers",
"-",
"1",
")",
":",
"curr_output",
"=",
"kl",
".",
"Conv2D",
"(",
"curr_filters_number",
",",
"(",
"1",
",",
"window_size",
")",
",",
"padding",
"=",
"\"same\"",
",",
"activation",
"=",
"\"relu\"",
",",
"data_format",
"=",
"\"channels_last\"",
")",
"(",
"curr_output",
")",
"if",
"self",
".",
"conv_dropout",
">",
"0.0",
":",
"curr_output",
"=",
"kl",
".",
"Dropout",
"(",
"self",
".",
"conv_dropout",
")",
"(",
"curr_output",
")",
"curr_output",
"=",
"kl",
".",
"Conv2D",
"(",
"curr_filters_number",
",",
"(",
"1",
",",
"window_size",
")",
",",
"padding",
"=",
"\"same\"",
",",
"activation",
"=",
"\"relu\"",
",",
"data_format",
"=",
"\"channels_last\"",
")",
"(",
"curr_output",
")",
"conv_outputs",
".",
"append",
"(",
"curr_output",
")",
"self",
".",
"char_output_dim_",
"+=",
"curr_filters_number",
"if",
"len",
"(",
"conv_outputs",
")",
">",
"1",
":",
"conv_output",
"=",
"kl",
".",
"Concatenate",
"(",
"axis",
"=",
"-",
"1",
")",
"(",
"conv_outputs",
")",
"else",
":",
"conv_output",
"=",
"conv_outputs",
"[",
"0",
"]",
"highway_input",
"=",
"kl",
".",
"Lambda",
"(",
"kb",
".",
"max",
",",
"arguments",
"=",
"{",
"\"axis\"",
":",
"-",
"2",
"}",
")",
"(",
"conv_output",
")",
"if",
"self",
".",
"intermediate_dropout",
">",
"0.0",
":",
"highway_input",
"=",
"kl",
".",
"Dropout",
"(",
"self",
".",
"intermediate_dropout",
")",
"(",
"highway_input",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"char_highway_layers",
"-",
"1",
")",
":",
"highway_input",
"=",
"Highway",
"(",
"activation",
"=",
"\"relu\"",
")",
"(",
"highway_input",
")",
"if",
"self",
".",
"highway_dropout",
">",
"0.0",
":",
"highway_input",
"=",
"kl",
".",
"Dropout",
"(",
"self",
".",
"highway_dropout",
")",
"(",
"highway_input",
")",
"highway_output",
"=",
"Highway",
"(",
"activation",
"=",
"\"relu\"",
")",
"(",
"highway_input",
")",
"return",
"highway_output"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
CharacterTagger._build_basic_network
|
Creates the basic network architecture,
transforming word embeddings to intermediate outputs
|
deeppavlov/models/morpho_tagger/network.py
|
def _build_basic_network(self, word_outputs):
"""
Creates the basic network architecture,
transforming word embeddings to intermediate outputs
"""
if self.word_dropout > 0.0:
lstm_outputs = kl.Dropout(self.word_dropout)(word_outputs)
else:
lstm_outputs = word_outputs
for j in range(self.word_lstm_layers-1):
lstm_outputs = kl.Bidirectional(
kl.LSTM(self.word_lstm_units[j], return_sequences=True,
dropout=self.lstm_dropout))(lstm_outputs)
lstm_outputs = kl.Bidirectional(
kl.LSTM(self.word_lstm_units[-1], return_sequences=True,
dropout=self.lstm_dropout))(lstm_outputs)
pre_outputs = kl.TimeDistributed(
kl.Dense(self.tags_number_, activation="softmax",
activity_regularizer=self.regularizer),
name="p")(lstm_outputs)
return pre_outputs, lstm_outputs
|
def _build_basic_network(self, word_outputs):
"""
Creates the basic network architecture,
transforming word embeddings to intermediate outputs
"""
if self.word_dropout > 0.0:
lstm_outputs = kl.Dropout(self.word_dropout)(word_outputs)
else:
lstm_outputs = word_outputs
for j in range(self.word_lstm_layers-1):
lstm_outputs = kl.Bidirectional(
kl.LSTM(self.word_lstm_units[j], return_sequences=True,
dropout=self.lstm_dropout))(lstm_outputs)
lstm_outputs = kl.Bidirectional(
kl.LSTM(self.word_lstm_units[-1], return_sequences=True,
dropout=self.lstm_dropout))(lstm_outputs)
pre_outputs = kl.TimeDistributed(
kl.Dense(self.tags_number_, activation="softmax",
activity_regularizer=self.regularizer),
name="p")(lstm_outputs)
return pre_outputs, lstm_outputs
|
[
"Creates",
"the",
"basic",
"network",
"architecture",
"transforming",
"word",
"embeddings",
"to",
"intermediate",
"outputs"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/network.py#L199-L219
|
[
"def",
"_build_basic_network",
"(",
"self",
",",
"word_outputs",
")",
":",
"if",
"self",
".",
"word_dropout",
">",
"0.0",
":",
"lstm_outputs",
"=",
"kl",
".",
"Dropout",
"(",
"self",
".",
"word_dropout",
")",
"(",
"word_outputs",
")",
"else",
":",
"lstm_outputs",
"=",
"word_outputs",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"word_lstm_layers",
"-",
"1",
")",
":",
"lstm_outputs",
"=",
"kl",
".",
"Bidirectional",
"(",
"kl",
".",
"LSTM",
"(",
"self",
".",
"word_lstm_units",
"[",
"j",
"]",
",",
"return_sequences",
"=",
"True",
",",
"dropout",
"=",
"self",
".",
"lstm_dropout",
")",
")",
"(",
"lstm_outputs",
")",
"lstm_outputs",
"=",
"kl",
".",
"Bidirectional",
"(",
"kl",
".",
"LSTM",
"(",
"self",
".",
"word_lstm_units",
"[",
"-",
"1",
"]",
",",
"return_sequences",
"=",
"True",
",",
"dropout",
"=",
"self",
".",
"lstm_dropout",
")",
")",
"(",
"lstm_outputs",
")",
"pre_outputs",
"=",
"kl",
".",
"TimeDistributed",
"(",
"kl",
".",
"Dense",
"(",
"self",
".",
"tags_number_",
",",
"activation",
"=",
"\"softmax\"",
",",
"activity_regularizer",
"=",
"self",
".",
"regularizer",
")",
",",
"name",
"=",
"\"p\"",
")",
"(",
"lstm_outputs",
")",
"return",
"pre_outputs",
",",
"lstm_outputs"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
CharacterTagger.train_on_batch
|
Trains model on a single batch
Args:
data: a batch of word sequences
labels: a batch of correct tag sequences
Returns:
the trained model
|
deeppavlov/models/morpho_tagger/network.py
|
def train_on_batch(self, data: List[Iterable], labels: Iterable[list]) -> None:
"""Trains model on a single batch
Args:
data: a batch of word sequences
labels: a batch of correct tag sequences
Returns:
the trained model
"""
X, Y = self._transform_batch(data, labels)
self.model_.train_on_batch(X, Y)
|
def train_on_batch(self, data: List[Iterable], labels: Iterable[list]) -> None:
"""Trains model on a single batch
Args:
data: a batch of word sequences
labels: a batch of correct tag sequences
Returns:
the trained model
"""
X, Y = self._transform_batch(data, labels)
self.model_.train_on_batch(X, Y)
|
[
"Trains",
"model",
"on",
"a",
"single",
"batch"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/network.py#L234-L244
|
[
"def",
"train_on_batch",
"(",
"self",
",",
"data",
":",
"List",
"[",
"Iterable",
"]",
",",
"labels",
":",
"Iterable",
"[",
"list",
"]",
")",
"->",
"None",
":",
"X",
",",
"Y",
"=",
"self",
".",
"_transform_batch",
"(",
"data",
",",
"labels",
")",
"self",
".",
"model_",
".",
"train_on_batch",
"(",
"X",
",",
"Y",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
CharacterTagger.predict_on_batch
|
Makes predictions on a single batch
Args:
data: a batch of word sequences together with additional inputs
return_indexes: whether to return tag indexes in vocabulary or tags themselves
Returns:
a batch of label sequences
|
deeppavlov/models/morpho_tagger/network.py
|
def predict_on_batch(self, data: Union[list, tuple],
return_indexes: bool = False) -> List[List[str]]:
"""
Makes predictions on a single batch
Args:
data: a batch of word sequences together with additional inputs
return_indexes: whether to return tag indexes in vocabulary or tags themselves
Returns:
a batch of label sequences
"""
X = self._transform_batch(data)
objects_number, lengths = len(X[0]), [len(elem) for elem in data[0]]
Y = self.model_.predict_on_batch(X)
labels = np.argmax(Y, axis=-1)
answer: List[List[str]] = [None] * objects_number
for i, (elem, length) in enumerate(zip(labels, lengths)):
elem = elem[:length]
answer[i] = elem if return_indexes else self.tags.idxs2toks(elem)
return answer
|
def predict_on_batch(self, data: Union[list, tuple],
return_indexes: bool = False) -> List[List[str]]:
"""
Makes predictions on a single batch
Args:
data: a batch of word sequences together with additional inputs
return_indexes: whether to return tag indexes in vocabulary or tags themselves
Returns:
a batch of label sequences
"""
X = self._transform_batch(data)
objects_number, lengths = len(X[0]), [len(elem) for elem in data[0]]
Y = self.model_.predict_on_batch(X)
labels = np.argmax(Y, axis=-1)
answer: List[List[str]] = [None] * objects_number
for i, (elem, length) in enumerate(zip(labels, lengths)):
elem = elem[:length]
answer[i] = elem if return_indexes else self.tags.idxs2toks(elem)
return answer
|
[
"Makes",
"predictions",
"on",
"a",
"single",
"batch"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/network.py#L246-L266
|
[
"def",
"predict_on_batch",
"(",
"self",
",",
"data",
":",
"Union",
"[",
"list",
",",
"tuple",
"]",
",",
"return_indexes",
":",
"bool",
"=",
"False",
")",
"->",
"List",
"[",
"List",
"[",
"str",
"]",
"]",
":",
"X",
"=",
"self",
".",
"_transform_batch",
"(",
"data",
")",
"objects_number",
",",
"lengths",
"=",
"len",
"(",
"X",
"[",
"0",
"]",
")",
",",
"[",
"len",
"(",
"elem",
")",
"for",
"elem",
"in",
"data",
"[",
"0",
"]",
"]",
"Y",
"=",
"self",
".",
"model_",
".",
"predict_on_batch",
"(",
"X",
")",
"labels",
"=",
"np",
".",
"argmax",
"(",
"Y",
",",
"axis",
"=",
"-",
"1",
")",
"answer",
":",
"List",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"[",
"None",
"]",
"*",
"objects_number",
"for",
"i",
",",
"(",
"elem",
",",
"length",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"labels",
",",
"lengths",
")",
")",
":",
"elem",
"=",
"elem",
"[",
":",
"length",
"]",
"answer",
"[",
"i",
"]",
"=",
"elem",
"if",
"return_indexes",
"else",
"self",
".",
"tags",
".",
"idxs2toks",
"(",
"elem",
")",
"return",
"answer"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
CharacterTagger._make_sent_vector
|
Transforms a sentence to Numpy array, which will be the network input.
Args:
sent: input sentence
bucket_length: the width of the bucket
Returns:
A 3d array, answer[i][j][k] contains the index of k-th letter
in j-th word of i-th input sentence.
|
deeppavlov/models/morpho_tagger/network.py
|
def _make_sent_vector(self, sent: List, bucket_length: int =None) -> np.ndarray:
"""Transforms a sentence to Numpy array, which will be the network input.
Args:
sent: input sentence
bucket_length: the width of the bucket
Returns:
A 3d array, answer[i][j][k] contains the index of k-th letter
in j-th word of i-th input sentence.
"""
bucket_length = bucket_length or len(sent)
answer = np.zeros(shape=(bucket_length, MAX_WORD_LENGTH+2), dtype=np.int32)
for i, word in enumerate(sent):
answer[i, 0] = self.tags.tok2idx("BEGIN")
m = min(len(word), MAX_WORD_LENGTH)
for j, x in enumerate(word[-m:]):
answer[i, j+1] = self.symbols.tok2idx(x)
answer[i, m+1] = self.tags.tok2idx("END")
answer[i, m+2:] = self.tags.tok2idx("PAD")
return answer
|
def _make_sent_vector(self, sent: List, bucket_length: int =None) -> np.ndarray:
"""Transforms a sentence to Numpy array, which will be the network input.
Args:
sent: input sentence
bucket_length: the width of the bucket
Returns:
A 3d array, answer[i][j][k] contains the index of k-th letter
in j-th word of i-th input sentence.
"""
bucket_length = bucket_length or len(sent)
answer = np.zeros(shape=(bucket_length, MAX_WORD_LENGTH+2), dtype=np.int32)
for i, word in enumerate(sent):
answer[i, 0] = self.tags.tok2idx("BEGIN")
m = min(len(word), MAX_WORD_LENGTH)
for j, x in enumerate(word[-m:]):
answer[i, j+1] = self.symbols.tok2idx(x)
answer[i, m+1] = self.tags.tok2idx("END")
answer[i, m+2:] = self.tags.tok2idx("PAD")
return answer
|
[
"Transforms",
"a",
"sentence",
"to",
"Numpy",
"array",
"which",
"will",
"be",
"the",
"network",
"input",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/network.py#L268-L288
|
[
"def",
"_make_sent_vector",
"(",
"self",
",",
"sent",
":",
"List",
",",
"bucket_length",
":",
"int",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"bucket_length",
"=",
"bucket_length",
"or",
"len",
"(",
"sent",
")",
"answer",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"bucket_length",
",",
"MAX_WORD_LENGTH",
"+",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"sent",
")",
":",
"answer",
"[",
"i",
",",
"0",
"]",
"=",
"self",
".",
"tags",
".",
"tok2idx",
"(",
"\"BEGIN\"",
")",
"m",
"=",
"min",
"(",
"len",
"(",
"word",
")",
",",
"MAX_WORD_LENGTH",
")",
"for",
"j",
",",
"x",
"in",
"enumerate",
"(",
"word",
"[",
"-",
"m",
":",
"]",
")",
":",
"answer",
"[",
"i",
",",
"j",
"+",
"1",
"]",
"=",
"self",
".",
"symbols",
".",
"tok2idx",
"(",
"x",
")",
"answer",
"[",
"i",
",",
"m",
"+",
"1",
"]",
"=",
"self",
".",
"tags",
".",
"tok2idx",
"(",
"\"END\"",
")",
"answer",
"[",
"i",
",",
"m",
"+",
"2",
":",
"]",
"=",
"self",
".",
"tags",
".",
"tok2idx",
"(",
"\"PAD\"",
")",
"return",
"answer"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
CharacterTagger._make_tags_vector
|
Transforms a sentence of tags to Numpy array, which will be the network target.
Args:
tags: input sentence of tags
bucket_length: the width of the bucket
Returns:
A 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence.
|
deeppavlov/models/morpho_tagger/network.py
|
def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray:
"""Transforms a sentence of tags to Numpy array, which will be the network target.
Args:
tags: input sentence of tags
bucket_length: the width of the bucket
Returns:
A 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence.
"""
bucket_length = bucket_length or len(tags)
answer = np.zeros(shape=(bucket_length,), dtype=np.int32)
for i, tag in enumerate(tags):
answer[i] = self.tags.tok2idx(tag)
return answer
|
def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray:
"""Transforms a sentence of tags to Numpy array, which will be the network target.
Args:
tags: input sentence of tags
bucket_length: the width of the bucket
Returns:
A 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence.
"""
bucket_length = bucket_length or len(tags)
answer = np.zeros(shape=(bucket_length,), dtype=np.int32)
for i, tag in enumerate(tags):
answer[i] = self.tags.tok2idx(tag)
return answer
|
[
"Transforms",
"a",
"sentence",
"of",
"tags",
"to",
"Numpy",
"array",
"which",
"will",
"be",
"the",
"network",
"target",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/network.py#L290-L304
|
[
"def",
"_make_tags_vector",
"(",
"self",
",",
"tags",
",",
"bucket_length",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"bucket_length",
"=",
"bucket_length",
"or",
"len",
"(",
"tags",
")",
"answer",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"bucket_length",
",",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"i",
",",
"tag",
"in",
"enumerate",
"(",
"tags",
")",
":",
"answer",
"[",
"i",
"]",
"=",
"self",
".",
"tags",
".",
"tok2idx",
"(",
"tag",
")",
"return",
"answer"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
bleu_advanced
|
Calculate BLEU score
Parameters:
y_true: list of reference tokens
y_predicted: list of query tokens
weights: n-gram weights
smoothing_function: SmoothingFunction
auto_reweigh: Option to re-normalize the weights uniformly
penalty: either enable brevity penalty or not
Return:
BLEU score
|
deeppavlov/metrics/bleu.py
|
def bleu_advanced(y_true: List[Any], y_predicted: List[Any],
weights: Tuple=(1,), smoothing_function=SMOOTH.method1,
auto_reweigh=False, penalty=True) -> float:
"""Calculate BLEU score
Parameters:
y_true: list of reference tokens
y_predicted: list of query tokens
weights: n-gram weights
smoothing_function: SmoothingFunction
auto_reweigh: Option to re-normalize the weights uniformly
penalty: either enable brevity penalty or not
Return:
BLEU score
"""
bleu_measure = sentence_bleu([y_true], y_predicted, weights, smoothing_function, auto_reweigh)
hyp_len = len(y_predicted)
hyp_lengths = hyp_len
ref_lengths = closest_ref_length([y_true], hyp_len)
bpenalty = brevity_penalty(ref_lengths, hyp_lengths)
if penalty is True or bpenalty == 0:
return bleu_measure
return bleu_measure/bpenalty
|
def bleu_advanced(y_true: List[Any], y_predicted: List[Any],
weights: Tuple=(1,), smoothing_function=SMOOTH.method1,
auto_reweigh=False, penalty=True) -> float:
"""Calculate BLEU score
Parameters:
y_true: list of reference tokens
y_predicted: list of query tokens
weights: n-gram weights
smoothing_function: SmoothingFunction
auto_reweigh: Option to re-normalize the weights uniformly
penalty: either enable brevity penalty or not
Return:
BLEU score
"""
bleu_measure = sentence_bleu([y_true], y_predicted, weights, smoothing_function, auto_reweigh)
hyp_len = len(y_predicted)
hyp_lengths = hyp_len
ref_lengths = closest_ref_length([y_true], hyp_len)
bpenalty = brevity_penalty(ref_lengths, hyp_lengths)
if penalty is True or bpenalty == 0:
return bleu_measure
return bleu_measure/bpenalty
|
[
"Calculate",
"BLEU",
"score"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/bleu.py#L27-L55
|
[
"def",
"bleu_advanced",
"(",
"y_true",
":",
"List",
"[",
"Any",
"]",
",",
"y_predicted",
":",
"List",
"[",
"Any",
"]",
",",
"weights",
":",
"Tuple",
"=",
"(",
"1",
",",
")",
",",
"smoothing_function",
"=",
"SMOOTH",
".",
"method1",
",",
"auto_reweigh",
"=",
"False",
",",
"penalty",
"=",
"True",
")",
"->",
"float",
":",
"bleu_measure",
"=",
"sentence_bleu",
"(",
"[",
"y_true",
"]",
",",
"y_predicted",
",",
"weights",
",",
"smoothing_function",
",",
"auto_reweigh",
")",
"hyp_len",
"=",
"len",
"(",
"y_predicted",
")",
"hyp_lengths",
"=",
"hyp_len",
"ref_lengths",
"=",
"closest_ref_length",
"(",
"[",
"y_true",
"]",
",",
"hyp_len",
")",
"bpenalty",
"=",
"brevity_penalty",
"(",
"ref_lengths",
",",
"hyp_lengths",
")",
"if",
"penalty",
"is",
"True",
"or",
"bpenalty",
"==",
"0",
":",
"return",
"bleu_measure",
"return",
"bleu_measure",
"/",
"bpenalty"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
verify_sc_url
|
Verify signature certificate URL against Amazon Alexa requirements.
Each call of Agent passes incoming utterances batch through skills filter,
agent skills, skills processor. Batch of dialog IDs can be provided, in
other case utterances indexes in incoming batch are used as dialog IDs.
Args:
url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: True if verification was successful, False if not.
|
deeppavlov/utils/alexa/ssl_tools.py
|
def verify_sc_url(url: str) -> bool:
"""Verify signature certificate URL against Amazon Alexa requirements.
Each call of Agent passes incoming utterances batch through skills filter,
agent skills, skills processor. Batch of dialog IDs can be provided, in
other case utterances indexes in incoming batch are used as dialog IDs.
Args:
url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: True if verification was successful, False if not.
"""
parsed = urlsplit(url)
scheme: str = parsed.scheme
netloc: str = parsed.netloc
path: str = parsed.path
try:
port = parsed.port
except ValueError:
port = None
result = (scheme.lower() == 'https' and
netloc.lower().split(':')[0] == 's3.amazonaws.com' and
path.startswith('/echo.api/') and
(port == 443 or port is None))
return result
|
def verify_sc_url(url: str) -> bool:
"""Verify signature certificate URL against Amazon Alexa requirements.
Each call of Agent passes incoming utterances batch through skills filter,
agent skills, skills processor. Batch of dialog IDs can be provided, in
other case utterances indexes in incoming batch are used as dialog IDs.
Args:
url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: True if verification was successful, False if not.
"""
parsed = urlsplit(url)
scheme: str = parsed.scheme
netloc: str = parsed.netloc
path: str = parsed.path
try:
port = parsed.port
except ValueError:
port = None
result = (scheme.lower() == 'https' and
netloc.lower().split(':')[0] == 's3.amazonaws.com' and
path.startswith('/echo.api/') and
(port == 443 or port is None))
return result
|
[
"Verify",
"signature",
"certificate",
"URL",
"against",
"Amazon",
"Alexa",
"requirements",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/ssl_tools.py#L29-L58
|
[
"def",
"verify_sc_url",
"(",
"url",
":",
"str",
")",
"->",
"bool",
":",
"parsed",
"=",
"urlsplit",
"(",
"url",
")",
"scheme",
":",
"str",
"=",
"parsed",
".",
"scheme",
"netloc",
":",
"str",
"=",
"parsed",
".",
"netloc",
"path",
":",
"str",
"=",
"parsed",
".",
"path",
"try",
":",
"port",
"=",
"parsed",
".",
"port",
"except",
"ValueError",
":",
"port",
"=",
"None",
"result",
"=",
"(",
"scheme",
".",
"lower",
"(",
")",
"==",
"'https'",
"and",
"netloc",
".",
"lower",
"(",
")",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"==",
"'s3.amazonaws.com'",
"and",
"path",
".",
"startswith",
"(",
"'/echo.api/'",
")",
"and",
"(",
"port",
"==",
"443",
"or",
"port",
"is",
"None",
")",
")",
"return",
"result"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
extract_certs
|
Extracts pycrypto X509 objects from SSL certificates chain string.
Args:
certs_txt: SSL certificates chain string.
Returns:
result: List of pycrypto X509 objects.
|
deeppavlov/utils/alexa/ssl_tools.py
|
def extract_certs(certs_txt: str) -> List[crypto.X509]:
"""Extracts pycrypto X509 objects from SSL certificates chain string.
Args:
certs_txt: SSL certificates chain string.
Returns:
result: List of pycrypto X509 objects.
"""
pattern = r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'
certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL)
certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt]
return certs
|
def extract_certs(certs_txt: str) -> List[crypto.X509]:
"""Extracts pycrypto X509 objects from SSL certificates chain string.
Args:
certs_txt: SSL certificates chain string.
Returns:
result: List of pycrypto X509 objects.
"""
pattern = r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'
certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL)
certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt]
return certs
|
[
"Extracts",
"pycrypto",
"X509",
"objects",
"from",
"SSL",
"certificates",
"chain",
"string",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/ssl_tools.py#L61-L73
|
[
"def",
"extract_certs",
"(",
"certs_txt",
":",
"str",
")",
"->",
"List",
"[",
"crypto",
".",
"X509",
"]",
":",
"pattern",
"=",
"r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'",
"certs_txt",
"=",
"re",
".",
"findall",
"(",
"pattern",
",",
"certs_txt",
",",
"flags",
"=",
"re",
".",
"DOTALL",
")",
"certs",
"=",
"[",
"crypto",
".",
"load_certificate",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"cert_txt",
")",
"for",
"cert_txt",
"in",
"certs_txt",
"]",
"return",
"certs"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
verify_sans
|
Verifies Subject Alternative Names (SANs) for Amazon certificate.
Args:
amazon_cert: Pycrypto X509 Amazon certificate.
Returns:
result: True if verification was successful, False if not.
|
deeppavlov/utils/alexa/ssl_tools.py
|
def verify_sans(amazon_cert: crypto.X509) -> bool:
"""Verifies Subject Alternative Names (SANs) for Amazon certificate.
Args:
amazon_cert: Pycrypto X509 Amazon certificate.
Returns:
result: True if verification was successful, False if not.
"""
cert_extentions = [amazon_cert.get_extension(i) for i in range(amazon_cert.get_extension_count())]
subject_alt_names = ''
for extention in cert_extentions:
if 'subjectAltName' in str(extention.get_short_name()):
subject_alt_names = extention.__str__()
break
result = 'echo-api.amazon.com' in subject_alt_names
return result
|
def verify_sans(amazon_cert: crypto.X509) -> bool:
"""Verifies Subject Alternative Names (SANs) for Amazon certificate.
Args:
amazon_cert: Pycrypto X509 Amazon certificate.
Returns:
result: True if verification was successful, False if not.
"""
cert_extentions = [amazon_cert.get_extension(i) for i in range(amazon_cert.get_extension_count())]
subject_alt_names = ''
for extention in cert_extentions:
if 'subjectAltName' in str(extention.get_short_name()):
subject_alt_names = extention.__str__()
break
result = 'echo-api.amazon.com' in subject_alt_names
return result
|
[
"Verifies",
"Subject",
"Alternative",
"Names",
"(",
"SANs",
")",
"for",
"Amazon",
"certificate",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/ssl_tools.py#L76-L95
|
[
"def",
"verify_sans",
"(",
"amazon_cert",
":",
"crypto",
".",
"X509",
")",
"->",
"bool",
":",
"cert_extentions",
"=",
"[",
"amazon_cert",
".",
"get_extension",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"amazon_cert",
".",
"get_extension_count",
"(",
")",
")",
"]",
"subject_alt_names",
"=",
"''",
"for",
"extention",
"in",
"cert_extentions",
":",
"if",
"'subjectAltName'",
"in",
"str",
"(",
"extention",
".",
"get_short_name",
"(",
")",
")",
":",
"subject_alt_names",
"=",
"extention",
".",
"__str__",
"(",
")",
"break",
"result",
"=",
"'echo-api.amazon.com'",
"in",
"subject_alt_names",
"return",
"result"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
verify_certs_chain
|
Verifies if Amazon and additional certificates creates chain of trust to a root CA.
Args:
certs_chain: List of pycrypto X509 intermediate certificates from signature chain URL.
amazon_cert: Pycrypto X509 Amazon certificate.
Returns:
result: True if verification was successful, False if not.
|
deeppavlov/utils/alexa/ssl_tools.py
|
def verify_certs_chain(certs_chain: List[crypto.X509], amazon_cert: crypto.X509) -> bool:
"""Verifies if Amazon and additional certificates creates chain of trust to a root CA.
Args:
certs_chain: List of pycrypto X509 intermediate certificates from signature chain URL.
amazon_cert: Pycrypto X509 Amazon certificate.
Returns:
result: True if verification was successful, False if not.
"""
store = crypto.X509Store()
# add certificates from Amazon provided certs chain
for cert in certs_chain:
store.add_cert(cert)
# add CA certificates
default_verify_paths = ssl.get_default_verify_paths()
default_verify_file = default_verify_paths.cafile
default_verify_file = Path(default_verify_file).resolve() if default_verify_file else None
default_verify_path = default_verify_paths.capath
default_verify_path = Path(default_verify_path).resolve() if default_verify_path else None
ca_files = [ca_file for ca_file in default_verify_path.iterdir()] if default_verify_path else []
if default_verify_file:
ca_files.append(default_verify_file)
for ca_file in ca_files:
ca_file: Path
if ca_file.is_file():
with ca_file.open('r', encoding='ascii') as crt_f:
ca_certs_txt = crt_f.read()
ca_certs = extract_certs(ca_certs_txt)
for cert in ca_certs:
store.add_cert(cert)
# add CA certificates (Windows)
ssl_context = ssl.create_default_context()
der_certs = ssl_context.get_ca_certs(binary_form=True)
pem_certs = '\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in der_certs])
ca_certs = extract_certs(pem_certs)
for ca_cert in ca_certs:
store.add_cert(ca_cert)
store_context = crypto.X509StoreContext(store, amazon_cert)
try:
store_context.verify_certificate()
result = True
except crypto.X509StoreContextError:
result = False
return result
|
def verify_certs_chain(certs_chain: List[crypto.X509], amazon_cert: crypto.X509) -> bool:
"""Verifies if Amazon and additional certificates creates chain of trust to a root CA.
Args:
certs_chain: List of pycrypto X509 intermediate certificates from signature chain URL.
amazon_cert: Pycrypto X509 Amazon certificate.
Returns:
result: True if verification was successful, False if not.
"""
store = crypto.X509Store()
# add certificates from Amazon provided certs chain
for cert in certs_chain:
store.add_cert(cert)
# add CA certificates
default_verify_paths = ssl.get_default_verify_paths()
default_verify_file = default_verify_paths.cafile
default_verify_file = Path(default_verify_file).resolve() if default_verify_file else None
default_verify_path = default_verify_paths.capath
default_verify_path = Path(default_verify_path).resolve() if default_verify_path else None
ca_files = [ca_file for ca_file in default_verify_path.iterdir()] if default_verify_path else []
if default_verify_file:
ca_files.append(default_verify_file)
for ca_file in ca_files:
ca_file: Path
if ca_file.is_file():
with ca_file.open('r', encoding='ascii') as crt_f:
ca_certs_txt = crt_f.read()
ca_certs = extract_certs(ca_certs_txt)
for cert in ca_certs:
store.add_cert(cert)
# add CA certificates (Windows)
ssl_context = ssl.create_default_context()
der_certs = ssl_context.get_ca_certs(binary_form=True)
pem_certs = '\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in der_certs])
ca_certs = extract_certs(pem_certs)
for ca_cert in ca_certs:
store.add_cert(ca_cert)
store_context = crypto.X509StoreContext(store, amazon_cert)
try:
store_context.verify_certificate()
result = True
except crypto.X509StoreContextError:
result = False
return result
|
[
"Verifies",
"if",
"Amazon",
"and",
"additional",
"certificates",
"creates",
"chain",
"of",
"trust",
"to",
"a",
"root",
"CA",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/ssl_tools.py#L98-L152
|
[
"def",
"verify_certs_chain",
"(",
"certs_chain",
":",
"List",
"[",
"crypto",
".",
"X509",
"]",
",",
"amazon_cert",
":",
"crypto",
".",
"X509",
")",
"->",
"bool",
":",
"store",
"=",
"crypto",
".",
"X509Store",
"(",
")",
"# add certificates from Amazon provided certs chain",
"for",
"cert",
"in",
"certs_chain",
":",
"store",
".",
"add_cert",
"(",
"cert",
")",
"# add CA certificates",
"default_verify_paths",
"=",
"ssl",
".",
"get_default_verify_paths",
"(",
")",
"default_verify_file",
"=",
"default_verify_paths",
".",
"cafile",
"default_verify_file",
"=",
"Path",
"(",
"default_verify_file",
")",
".",
"resolve",
"(",
")",
"if",
"default_verify_file",
"else",
"None",
"default_verify_path",
"=",
"default_verify_paths",
".",
"capath",
"default_verify_path",
"=",
"Path",
"(",
"default_verify_path",
")",
".",
"resolve",
"(",
")",
"if",
"default_verify_path",
"else",
"None",
"ca_files",
"=",
"[",
"ca_file",
"for",
"ca_file",
"in",
"default_verify_path",
".",
"iterdir",
"(",
")",
"]",
"if",
"default_verify_path",
"else",
"[",
"]",
"if",
"default_verify_file",
":",
"ca_files",
".",
"append",
"(",
"default_verify_file",
")",
"for",
"ca_file",
"in",
"ca_files",
":",
"ca_file",
":",
"Path",
"if",
"ca_file",
".",
"is_file",
"(",
")",
":",
"with",
"ca_file",
".",
"open",
"(",
"'r'",
",",
"encoding",
"=",
"'ascii'",
")",
"as",
"crt_f",
":",
"ca_certs_txt",
"=",
"crt_f",
".",
"read",
"(",
")",
"ca_certs",
"=",
"extract_certs",
"(",
"ca_certs_txt",
")",
"for",
"cert",
"in",
"ca_certs",
":",
"store",
".",
"add_cert",
"(",
"cert",
")",
"# add CA certificates (Windows)",
"ssl_context",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
"der_certs",
"=",
"ssl_context",
".",
"get_ca_certs",
"(",
"binary_form",
"=",
"True",
")",
"pem_certs",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"ssl",
".",
"DER_cert_to_PEM_cert",
"(",
"der_cert",
")",
"for",
"der_cert",
"in",
"der_certs",
"]",
")",
"ca_certs",
"=",
"extract_certs",
"(",
"pem_certs",
")",
"for",
"ca_cert",
"in",
"ca_certs",
":",
"store",
".",
"add_cert",
"(",
"ca_cert",
")",
"store_context",
"=",
"crypto",
".",
"X509StoreContext",
"(",
"store",
",",
"amazon_cert",
")",
"try",
":",
"store_context",
".",
"verify_certificate",
"(",
")",
"result",
"=",
"True",
"except",
"crypto",
".",
"X509StoreContextError",
":",
"result",
"=",
"False",
"return",
"result"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
verify_signature
|
Verifies Alexa request signature.
Args:
amazon_cert: Pycrypto X509 Amazon certificate.
signature: Base64 decoded Alexa request signature from Signature HTTP header.
request_body: full HTTPS request body
Returns:
result: True if verification was successful, False if not.
|
deeppavlov/utils/alexa/ssl_tools.py
|
def verify_signature(amazon_cert: crypto.X509, signature: str, request_body: bytes) -> bool:
"""Verifies Alexa request signature.
Args:
amazon_cert: Pycrypto X509 Amazon certificate.
signature: Base64 decoded Alexa request signature from Signature HTTP header.
request_body: full HTTPS request body
Returns:
result: True if verification was successful, False if not.
"""
signature = base64.b64decode(signature)
try:
crypto.verify(amazon_cert, signature, request_body, 'sha1')
result = True
except crypto.Error:
result = False
return result
|
def verify_signature(amazon_cert: crypto.X509, signature: str, request_body: bytes) -> bool:
"""Verifies Alexa request signature.
Args:
amazon_cert: Pycrypto X509 Amazon certificate.
signature: Base64 decoded Alexa request signature from Signature HTTP header.
request_body: full HTTPS request body
Returns:
result: True if verification was successful, False if not.
"""
signature = base64.b64decode(signature)
try:
crypto.verify(amazon_cert, signature, request_body, 'sha1')
result = True
except crypto.Error:
result = False
return result
|
[
"Verifies",
"Alexa",
"request",
"signature",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/ssl_tools.py#L155-L173
|
[
"def",
"verify_signature",
"(",
"amazon_cert",
":",
"crypto",
".",
"X509",
",",
"signature",
":",
"str",
",",
"request_body",
":",
"bytes",
")",
"->",
"bool",
":",
"signature",
"=",
"base64",
".",
"b64decode",
"(",
"signature",
")",
"try",
":",
"crypto",
".",
"verify",
"(",
"amazon_cert",
",",
"signature",
",",
"request_body",
",",
"'sha1'",
")",
"result",
"=",
"True",
"except",
"crypto",
".",
"Error",
":",
"result",
"=",
"False",
"return",
"result"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
verify_cert
|
Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
Args:
signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: Amazon certificate if verification was successful, None if not.
|
deeppavlov/utils/alexa/ssl_tools.py
|
def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]:
"""Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
Args:
signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: Amazon certificate if verification was successful, None if not.
"""
try:
certs_chain_get = requests.get(signature_chain_url)
except requests.exceptions.ConnectionError as e:
log.error(f'Amazon signature chain get error: {e}')
return None
certs_chain_txt = certs_chain_get.text
certs_chain = extract_certs(certs_chain_txt)
amazon_cert: crypto.X509 = certs_chain.pop(0)
# verify signature chain url
sc_url_verification = verify_sc_url(signature_chain_url)
if not sc_url_verification:
log.error(f'Amazon signature url {signature_chain_url} was not verified')
# verify not expired
expired_verification = not amazon_cert.has_expired()
if not expired_verification:
log.error(f'Amazon certificate ({signature_chain_url}) expired')
# verify subject alternative names
sans_verification = verify_sans(amazon_cert)
if not sans_verification:
log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed')
# verify certs chain
chain_verification = verify_certs_chain(certs_chain, amazon_cert)
if not chain_verification:
log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed')
result = (sc_url_verification and expired_verification and sans_verification and chain_verification)
return amazon_cert if result else None
|
def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]:
"""Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
Args:
signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: Amazon certificate if verification was successful, None if not.
"""
try:
certs_chain_get = requests.get(signature_chain_url)
except requests.exceptions.ConnectionError as e:
log.error(f'Amazon signature chain get error: {e}')
return None
certs_chain_txt = certs_chain_get.text
certs_chain = extract_certs(certs_chain_txt)
amazon_cert: crypto.X509 = certs_chain.pop(0)
# verify signature chain url
sc_url_verification = verify_sc_url(signature_chain_url)
if not sc_url_verification:
log.error(f'Amazon signature url {signature_chain_url} was not verified')
# verify not expired
expired_verification = not amazon_cert.has_expired()
if not expired_verification:
log.error(f'Amazon certificate ({signature_chain_url}) expired')
# verify subject alternative names
sans_verification = verify_sans(amazon_cert)
if not sans_verification:
log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed')
# verify certs chain
chain_verification = verify_certs_chain(certs_chain, amazon_cert)
if not chain_verification:
log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed')
result = (sc_url_verification and expired_verification and sans_verification and chain_verification)
return amazon_cert if result else None
|
[
"Conducts",
"series",
"of",
"Alexa",
"SSL",
"certificate",
"verifications",
"against",
"Amazon",
"Alexa",
"requirements",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/ssl_tools.py#L176-L217
|
[
"def",
"verify_cert",
"(",
"signature_chain_url",
":",
"str",
")",
"->",
"Optional",
"[",
"crypto",
".",
"X509",
"]",
":",
"try",
":",
"certs_chain_get",
"=",
"requests",
".",
"get",
"(",
"signature_chain_url",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"f'Amazon signature chain get error: {e}'",
")",
"return",
"None",
"certs_chain_txt",
"=",
"certs_chain_get",
".",
"text",
"certs_chain",
"=",
"extract_certs",
"(",
"certs_chain_txt",
")",
"amazon_cert",
":",
"crypto",
".",
"X509",
"=",
"certs_chain",
".",
"pop",
"(",
"0",
")",
"# verify signature chain url",
"sc_url_verification",
"=",
"verify_sc_url",
"(",
"signature_chain_url",
")",
"if",
"not",
"sc_url_verification",
":",
"log",
".",
"error",
"(",
"f'Amazon signature url {signature_chain_url} was not verified'",
")",
"# verify not expired",
"expired_verification",
"=",
"not",
"amazon_cert",
".",
"has_expired",
"(",
")",
"if",
"not",
"expired_verification",
":",
"log",
".",
"error",
"(",
"f'Amazon certificate ({signature_chain_url}) expired'",
")",
"# verify subject alternative names",
"sans_verification",
"=",
"verify_sans",
"(",
"amazon_cert",
")",
"if",
"not",
"sans_verification",
":",
"log",
".",
"error",
"(",
"f'Subject alternative names verification for ({signature_chain_url}) certificate failed'",
")",
"# verify certs chain",
"chain_verification",
"=",
"verify_certs_chain",
"(",
"certs_chain",
",",
"amazon_cert",
")",
"if",
"not",
"chain_verification",
":",
"log",
".",
"error",
"(",
"f'Certificates chain verification for ({signature_chain_url}) certificate failed'",
")",
"result",
"=",
"(",
"sc_url_verification",
"and",
"expired_verification",
"and",
"sans_verification",
"and",
"chain_verification",
")",
"return",
"amazon_cert",
"if",
"result",
"else",
"None"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
RichMessage.json
|
Returns list of json compatible states of the RichMessage instance
nested controls.
Returns:
json_controls: Json representation of RichMessage instance
nested controls.
|
deeppavlov/core/agent/rich_content.py
|
def json(self) -> list:
"""Returns list of json compatible states of the RichMessage instance
nested controls.
Returns:
json_controls: Json representation of RichMessage instance
nested controls.
"""
json_controls = [control.json() for control in self.controls]
return json_controls
|
def json(self) -> list:
"""Returns list of json compatible states of the RichMessage instance
nested controls.
Returns:
json_controls: Json representation of RichMessage instance
nested controls.
"""
json_controls = [control.json() for control in self.controls]
return json_controls
|
[
"Returns",
"list",
"of",
"json",
"compatible",
"states",
"of",
"the",
"RichMessage",
"instance",
"nested",
"controls",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/agent/rich_content.py#L115-L124
|
[
"def",
"json",
"(",
"self",
")",
"->",
"list",
":",
"json_controls",
"=",
"[",
"control",
".",
"json",
"(",
")",
"for",
"control",
"in",
"self",
".",
"controls",
"]",
"return",
"json_controls"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
RichMessage.ms_bot_framework
|
Returns list of MS Bot Framework compatible states of the
RichMessage instance nested controls.
Returns:
ms_bf_controls: MS Bot Framework representation of RichMessage instance
nested controls.
|
deeppavlov/core/agent/rich_content.py
|
def ms_bot_framework(self) -> list:
"""Returns list of MS Bot Framework compatible states of the
RichMessage instance nested controls.
Returns:
ms_bf_controls: MS Bot Framework representation of RichMessage instance
nested controls.
"""
ms_bf_controls = [control.ms_bot_framework() for control in self.controls]
return ms_bf_controls
|
def ms_bot_framework(self) -> list:
"""Returns list of MS Bot Framework compatible states of the
RichMessage instance nested controls.
Returns:
ms_bf_controls: MS Bot Framework representation of RichMessage instance
nested controls.
"""
ms_bf_controls = [control.ms_bot_framework() for control in self.controls]
return ms_bf_controls
|
[
"Returns",
"list",
"of",
"MS",
"Bot",
"Framework",
"compatible",
"states",
"of",
"the",
"RichMessage",
"instance",
"nested",
"controls",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/agent/rich_content.py#L126-L135
|
[
"def",
"ms_bot_framework",
"(",
"self",
")",
"->",
"list",
":",
"ms_bf_controls",
"=",
"[",
"control",
".",
"ms_bot_framework",
"(",
")",
"for",
"control",
"in",
"self",
".",
"controls",
"]",
"return",
"ms_bf_controls"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
RichMessage.telegram
|
Returns list of Telegram compatible states of the RichMessage
instance nested controls.
Returns:
telegram_controls: Telegram representation of RichMessage instance nested
controls.
|
deeppavlov/core/agent/rich_content.py
|
def telegram(self) -> list:
"""Returns list of Telegram compatible states of the RichMessage
instance nested controls.
Returns:
telegram_controls: Telegram representation of RichMessage instance nested
controls.
"""
telegram_controls = [control.telegram() for control in self.controls]
return telegram_controls
|
def telegram(self) -> list:
"""Returns list of Telegram compatible states of the RichMessage
instance nested controls.
Returns:
telegram_controls: Telegram representation of RichMessage instance nested
controls.
"""
telegram_controls = [control.telegram() for control in self.controls]
return telegram_controls
|
[
"Returns",
"list",
"of",
"Telegram",
"compatible",
"states",
"of",
"the",
"RichMessage",
"instance",
"nested",
"controls",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/agent/rich_content.py#L137-L146
|
[
"def",
"telegram",
"(",
"self",
")",
"->",
"list",
":",
"telegram_controls",
"=",
"[",
"control",
".",
"telegram",
"(",
")",
"for",
"control",
"in",
"self",
".",
"controls",
"]",
"return",
"telegram_controls"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
RichMessage.alexa
|
Returns list of Amazon Alexa compatible states of the RichMessage
instance nested controls.
Returns:
alexa_controls: Amazon Alexa representation of RichMessage instance nested
controls.
|
deeppavlov/core/agent/rich_content.py
|
def alexa(self) -> list:
"""Returns list of Amazon Alexa compatible states of the RichMessage
instance nested controls.
Returns:
alexa_controls: Amazon Alexa representation of RichMessage instance nested
controls.
"""
alexa_controls = [control.alexa() for control in self.controls]
return alexa_controls
|
def alexa(self) -> list:
"""Returns list of Amazon Alexa compatible states of the RichMessage
instance nested controls.
Returns:
alexa_controls: Amazon Alexa representation of RichMessage instance nested
controls.
"""
alexa_controls = [control.alexa() for control in self.controls]
return alexa_controls
|
[
"Returns",
"list",
"of",
"Amazon",
"Alexa",
"compatible",
"states",
"of",
"the",
"RichMessage",
"instance",
"nested",
"controls",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/agent/rich_content.py#L148-L157
|
[
"def",
"alexa",
"(",
"self",
")",
"->",
"list",
":",
"alexa_controls",
"=",
"[",
"control",
".",
"alexa",
"(",
")",
"for",
"control",
"in",
"self",
".",
"controls",
"]",
"return",
"alexa_controls"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
main
|
DeepPavlov console configuration utility.
|
deeppavlov/settings.py
|
def main():
"""DeepPavlov console configuration utility."""
args = parser.parse_args()
path = get_settings_path()
if args.default:
if populate_settings_dir(force=True):
print(f'Populated {path} with default settings files')
else:
print(f'{path} is already a default settings directory')
else:
print(f'Current DeepPavlov settings path: {path}')
|
def main():
"""DeepPavlov console configuration utility."""
args = parser.parse_args()
path = get_settings_path()
if args.default:
if populate_settings_dir(force=True):
print(f'Populated {path} with default settings files')
else:
print(f'{path} is already a default settings directory')
else:
print(f'Current DeepPavlov settings path: {path}')
|
[
"DeepPavlov",
"console",
"configuration",
"utility",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/settings.py#L24-L35
|
[
"def",
"main",
"(",
")",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"path",
"=",
"get_settings_path",
"(",
")",
"if",
"args",
".",
"default",
":",
"if",
"populate_settings_dir",
"(",
"force",
"=",
"True",
")",
":",
"print",
"(",
"f'Populated {path} with default settings files'",
")",
"else",
":",
"print",
"(",
"f'{path} is already a default settings directory'",
")",
"else",
":",
"print",
"(",
"f'Current DeepPavlov settings path: {path}'",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
_graph_wrap
|
Constructs function encapsulated in the graph.
|
deeppavlov/core/models/tf_backend.py
|
def _graph_wrap(func, graph):
"""Constructs function encapsulated in the graph."""
@wraps(func)
def _wrapped(*args, **kwargs):
with graph.as_default():
return func(*args, **kwargs)
return _wrapped
|
def _graph_wrap(func, graph):
"""Constructs function encapsulated in the graph."""
@wraps(func)
def _wrapped(*args, **kwargs):
with graph.as_default():
return func(*args, **kwargs)
return _wrapped
|
[
"Constructs",
"function",
"encapsulated",
"in",
"the",
"graph",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/tf_backend.py#L22-L28
|
[
"def",
"_graph_wrap",
"(",
"func",
",",
"graph",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"_wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"graph",
".",
"as_default",
"(",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrapped"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
_keras_wrap
|
Constructs function encapsulated in the graph and the session.
|
deeppavlov/core/models/tf_backend.py
|
def _keras_wrap(func, graph, session):
"""Constructs function encapsulated in the graph and the session."""
import keras.backend as K
@wraps(func)
def _wrapped(*args, **kwargs):
with graph.as_default():
K.set_session(session)
return func(*args, **kwargs)
return _wrapped
|
def _keras_wrap(func, graph, session):
"""Constructs function encapsulated in the graph and the session."""
import keras.backend as K
@wraps(func)
def _wrapped(*args, **kwargs):
with graph.as_default():
K.set_session(session)
return func(*args, **kwargs)
return _wrapped
|
[
"Constructs",
"function",
"encapsulated",
"in",
"the",
"graph",
"and",
"the",
"session",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/tf_backend.py#L31-L40
|
[
"def",
"_keras_wrap",
"(",
"func",
",",
"graph",
",",
"session",
")",
":",
"import",
"keras",
".",
"backend",
"as",
"K",
"@",
"wraps",
"(",
"func",
")",
"def",
"_wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"graph",
".",
"as_default",
"(",
")",
":",
"K",
".",
"set_session",
"(",
"session",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrapped"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
roc_auc_score
|
Compute Area Under the Curve (AUC) from prediction scores.
Args:
y_true: true binary labels
y_pred: target scores, can either be probability estimates of the positive class
Returns:
Area Under the Curve (AUC) from prediction scores
|
deeppavlov/metrics/roc_auc_score.py
|
def roc_auc_score(y_true: Union[List[List[float]], List[List[int]], np.ndarray],
y_pred: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:
"""
Compute Area Under the Curve (AUC) from prediction scores.
Args:
y_true: true binary labels
y_pred: target scores, can either be probability estimates of the positive class
Returns:
Area Under the Curve (AUC) from prediction scores
"""
try:
return sklearn.metrics.roc_auc_score(np.squeeze(np.array(y_true)),
np.squeeze(np.array(y_pred)), average="macro")
except ValueError:
return 0.
|
def roc_auc_score(y_true: Union[List[List[float]], List[List[int]], np.ndarray],
y_pred: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:
"""
Compute Area Under the Curve (AUC) from prediction scores.
Args:
y_true: true binary labels
y_pred: target scores, can either be probability estimates of the positive class
Returns:
Area Under the Curve (AUC) from prediction scores
"""
try:
return sklearn.metrics.roc_auc_score(np.squeeze(np.array(y_true)),
np.squeeze(np.array(y_pred)), average="macro")
except ValueError:
return 0.
|
[
"Compute",
"Area",
"Under",
"the",
"Curve",
"(",
"AUC",
")",
"from",
"prediction",
"scores",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/roc_auc_score.py#L25-L41
|
[
"def",
"roc_auc_score",
"(",
"y_true",
":",
"Union",
"[",
"List",
"[",
"List",
"[",
"float",
"]",
"]",
",",
"List",
"[",
"List",
"[",
"int",
"]",
"]",
",",
"np",
".",
"ndarray",
"]",
",",
"y_pred",
":",
"Union",
"[",
"List",
"[",
"List",
"[",
"float",
"]",
"]",
",",
"List",
"[",
"List",
"[",
"int",
"]",
"]",
",",
"np",
".",
"ndarray",
"]",
")",
"->",
"float",
":",
"try",
":",
"return",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
"(",
"np",
".",
"squeeze",
"(",
"np",
".",
"array",
"(",
"y_true",
")",
")",
",",
"np",
".",
"squeeze",
"(",
"np",
".",
"array",
"(",
"y_pred",
")",
")",
",",
"average",
"=",
"\"macro\"",
")",
"except",
"ValueError",
":",
"return",
"0."
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
hash_
|
Convert a token to a hash of given size.
Args:
token: a word
hash_size: hash size
Returns:
int, hashed token
|
deeppavlov/models/vectorizers/hashing_tfidf_vectorizer.py
|
def hash_(token: str, hash_size: int) -> int:
"""Convert a token to a hash of given size.
Args:
token: a word
hash_size: hash size
Returns:
int, hashed token
"""
return murmurhash3_32(token, positive=True) % hash_size
|
def hash_(token: str, hash_size: int) -> int:
"""Convert a token to a hash of given size.
Args:
token: a word
hash_size: hash size
Returns:
int, hashed token
"""
return murmurhash3_32(token, positive=True) % hash_size
|
[
"Convert",
"a",
"token",
"to",
"a",
"hash",
"of",
"given",
"size",
".",
"Args",
":",
"token",
":",
"a",
"word",
"hash_size",
":",
"hash",
"size"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/vectorizers/hashing_tfidf_vectorizer.py#L33-L43
|
[
"def",
"hash_",
"(",
"token",
":",
"str",
",",
"hash_size",
":",
"int",
")",
"->",
"int",
":",
"return",
"murmurhash3_32",
"(",
"token",
",",
"positive",
"=",
"True",
")",
"%",
"hash_size"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
accuracy
|
Calculate accuracy in terms of absolute coincidence
Args:
y_true: array of true values
y_predicted: array of predicted values
Returns:
portion of absolutely coincidental samples
|
deeppavlov/metrics/accuracy.py
|
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float:
"""
Calculate accuracy in terms of absolute coincidence
Args:
y_true: array of true values
y_predicted: array of predicted values
Returns:
portion of absolutely coincidental samples
"""
examples_len = len(y_true)
correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)])
return correct / examples_len if examples_len else 0
|
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float:
"""
Calculate accuracy in terms of absolute coincidence
Args:
y_true: array of true values
y_predicted: array of predicted values
Returns:
portion of absolutely coincidental samples
"""
examples_len = len(y_true)
correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)])
return correct / examples_len if examples_len else 0
|
[
"Calculate",
"accuracy",
"in",
"terms",
"of",
"absolute",
"coincidence"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/accuracy.py#L24-L37
|
[
"def",
"accuracy",
"(",
"y_true",
":",
"[",
"list",
",",
"np",
".",
"ndarray",
"]",
",",
"y_predicted",
":",
"[",
"list",
",",
"np",
".",
"ndarray",
"]",
")",
"->",
"float",
":",
"examples_len",
"=",
"len",
"(",
"y_true",
")",
"correct",
"=",
"sum",
"(",
"[",
"y1",
"==",
"y2",
"for",
"y1",
",",
"y2",
"in",
"zip",
"(",
"y_true",
",",
"y_predicted",
")",
"]",
")",
"return",
"correct",
"/",
"examples_len",
"if",
"examples_len",
"else",
"0"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
round_accuracy
|
Rounds predictions and calculates accuracy in terms of absolute coincidence.
Args:
y_true: list of true values
y_predicted: list of predicted values
Returns:
portion of absolutely coincidental samples
|
deeppavlov/metrics/accuracy.py
|
def round_accuracy(y_true, y_predicted):
"""
Rounds predictions and calculates accuracy in terms of absolute coincidence.
Args:
y_true: list of true values
y_predicted: list of predicted values
Returns:
portion of absolutely coincidental samples
"""
predictions = [round(x) for x in y_predicted]
examples_len = len(y_true)
correct = sum([y1 == y2 for y1, y2 in zip(y_true, predictions)])
return correct / examples_len if examples_len else 0
|
def round_accuracy(y_true, y_predicted):
"""
Rounds predictions and calculates accuracy in terms of absolute coincidence.
Args:
y_true: list of true values
y_predicted: list of predicted values
Returns:
portion of absolutely coincidental samples
"""
predictions = [round(x) for x in y_predicted]
examples_len = len(y_true)
correct = sum([y1 == y2 for y1, y2 in zip(y_true, predictions)])
return correct / examples_len if examples_len else 0
|
[
"Rounds",
"predictions",
"and",
"calculates",
"accuracy",
"in",
"terms",
"of",
"absolute",
"coincidence",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/accuracy.py#L94-L108
|
[
"def",
"round_accuracy",
"(",
"y_true",
",",
"y_predicted",
")",
":",
"predictions",
"=",
"[",
"round",
"(",
"x",
")",
"for",
"x",
"in",
"y_predicted",
"]",
"examples_len",
"=",
"len",
"(",
"y_true",
")",
"correct",
"=",
"sum",
"(",
"[",
"y1",
"==",
"y2",
"for",
"y1",
",",
"y2",
"in",
"zip",
"(",
"y_true",
",",
"predictions",
")",
"]",
")",
"return",
"correct",
"/",
"examples_len",
"if",
"examples_len",
"else",
"0"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
_pretrained_initializer
|
We'll stub out all the initializers in the pretrained LM with
a function that loads the weights from the file
|
deeppavlov/models/elmo/elmo_model.py
|
def _pretrained_initializer(varname, weight_file, embedding_weight_file=None):
"""
We'll stub out all the initializers in the pretrained LM with
a function that loads the weights from the file
"""
weight_name_map = {}
for i in range(2):
for j in range(8): # if we decide to add more layers
root = 'RNN_{}/RNN/MultiRNNCell/Cell{}'.format(i, j)
weight_name_map[root + '/rnn/lstm_cell/kernel'] = \
root + '/LSTMCell/W_0'
weight_name_map[root + '/rnn/lstm_cell/bias'] = \
root + '/LSTMCell/B'
weight_name_map[root + '/rnn/lstm_cell/projection/kernel'] = \
root + '/LSTMCell/W_P_0'
# convert the graph name to that in the checkpoint
varname_in_file = varname[5:]
if varname_in_file.startswith('RNN'):
varname_in_file = weight_name_map[varname_in_file]
if varname_in_file == 'embedding':
with h5py.File(embedding_weight_file, 'r') as fin:
# Have added a special 0 index for padding not present
# in the original model.
embed_weights = fin[varname_in_file][...]
weights = np.zeros(
(embed_weights.shape[0] + 1, embed_weights.shape[1]),
dtype=DTYPE
)
weights[1:, :] = embed_weights
else:
with h5py.File(weight_file, 'r') as fin:
if varname_in_file == 'char_embed':
# Have added a special 0 index for padding not present
# in the original model.
char_embed_weights = fin[varname_in_file][...]
weights = np.zeros(
(char_embed_weights.shape[0] + 1,
char_embed_weights.shape[1]),
dtype=DTYPE
)
weights[1:, :] = char_embed_weights
else:
weights = fin[varname_in_file][...]
# Tensorflow initializers are callables that accept a shape parameter
# and some optional kwargs
def ret(shape, **kwargs):
if list(shape) != list(weights.shape):
raise ValueError(
"Invalid shape initializing {0}, got {1}, expected {2}".format(
varname_in_file, shape, weights.shape)
)
return weights
return ret
|
def _pretrained_initializer(varname, weight_file, embedding_weight_file=None):
"""
We'll stub out all the initializers in the pretrained LM with
a function that loads the weights from the file
"""
weight_name_map = {}
for i in range(2):
for j in range(8): # if we decide to add more layers
root = 'RNN_{}/RNN/MultiRNNCell/Cell{}'.format(i, j)
weight_name_map[root + '/rnn/lstm_cell/kernel'] = \
root + '/LSTMCell/W_0'
weight_name_map[root + '/rnn/lstm_cell/bias'] = \
root + '/LSTMCell/B'
weight_name_map[root + '/rnn/lstm_cell/projection/kernel'] = \
root + '/LSTMCell/W_P_0'
# convert the graph name to that in the checkpoint
varname_in_file = varname[5:]
if varname_in_file.startswith('RNN'):
varname_in_file = weight_name_map[varname_in_file]
if varname_in_file == 'embedding':
with h5py.File(embedding_weight_file, 'r') as fin:
# Have added a special 0 index for padding not present
# in the original model.
embed_weights = fin[varname_in_file][...]
weights = np.zeros(
(embed_weights.shape[0] + 1, embed_weights.shape[1]),
dtype=DTYPE
)
weights[1:, :] = embed_weights
else:
with h5py.File(weight_file, 'r') as fin:
if varname_in_file == 'char_embed':
# Have added a special 0 index for padding not present
# in the original model.
char_embed_weights = fin[varname_in_file][...]
weights = np.zeros(
(char_embed_weights.shape[0] + 1,
char_embed_weights.shape[1]),
dtype=DTYPE
)
weights[1:, :] = char_embed_weights
else:
weights = fin[varname_in_file][...]
# Tensorflow initializers are callables that accept a shape parameter
# and some optional kwargs
def ret(shape, **kwargs):
if list(shape) != list(weights.shape):
raise ValueError(
"Invalid shape initializing {0}, got {1}, expected {2}".format(
varname_in_file, shape, weights.shape)
)
return weights
return ret
|
[
"We",
"ll",
"stub",
"out",
"all",
"the",
"initializers",
"in",
"the",
"pretrained",
"LM",
"with",
"a",
"function",
"that",
"loads",
"the",
"weights",
"from",
"the",
"file"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/elmo/elmo_model.py#L192-L248
|
[
"def",
"_pretrained_initializer",
"(",
"varname",
",",
"weight_file",
",",
"embedding_weight_file",
"=",
"None",
")",
":",
"weight_name_map",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"for",
"j",
"in",
"range",
"(",
"8",
")",
":",
"# if we decide to add more layers",
"root",
"=",
"'RNN_{}/RNN/MultiRNNCell/Cell{}'",
".",
"format",
"(",
"i",
",",
"j",
")",
"weight_name_map",
"[",
"root",
"+",
"'/rnn/lstm_cell/kernel'",
"]",
"=",
"root",
"+",
"'/LSTMCell/W_0'",
"weight_name_map",
"[",
"root",
"+",
"'/rnn/lstm_cell/bias'",
"]",
"=",
"root",
"+",
"'/LSTMCell/B'",
"weight_name_map",
"[",
"root",
"+",
"'/rnn/lstm_cell/projection/kernel'",
"]",
"=",
"root",
"+",
"'/LSTMCell/W_P_0'",
"# convert the graph name to that in the checkpoint",
"varname_in_file",
"=",
"varname",
"[",
"5",
":",
"]",
"if",
"varname_in_file",
".",
"startswith",
"(",
"'RNN'",
")",
":",
"varname_in_file",
"=",
"weight_name_map",
"[",
"varname_in_file",
"]",
"if",
"varname_in_file",
"==",
"'embedding'",
":",
"with",
"h5py",
".",
"File",
"(",
"embedding_weight_file",
",",
"'r'",
")",
"as",
"fin",
":",
"# Have added a special 0 index for padding not present",
"# in the original model.",
"embed_weights",
"=",
"fin",
"[",
"varname_in_file",
"]",
"[",
"...",
"]",
"weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"embed_weights",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
",",
"embed_weights",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"DTYPE",
")",
"weights",
"[",
"1",
":",
",",
":",
"]",
"=",
"embed_weights",
"else",
":",
"with",
"h5py",
".",
"File",
"(",
"weight_file",
",",
"'r'",
")",
"as",
"fin",
":",
"if",
"varname_in_file",
"==",
"'char_embed'",
":",
"# Have added a special 0 index for padding not present",
"# in the original model.",
"char_embed_weights",
"=",
"fin",
"[",
"varname_in_file",
"]",
"[",
"...",
"]",
"weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"char_embed_weights",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
",",
"char_embed_weights",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"DTYPE",
")",
"weights",
"[",
"1",
":",
",",
":",
"]",
"=",
"char_embed_weights",
"else",
":",
"weights",
"=",
"fin",
"[",
"varname_in_file",
"]",
"[",
"...",
"]",
"# Tensorflow initializers are callables that accept a shape parameter",
"# and some optional kwargs",
"def",
"ret",
"(",
"shape",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"list",
"(",
"shape",
")",
"!=",
"list",
"(",
"weights",
".",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid shape initializing {0}, got {1}, expected {2}\"",
".",
"format",
"(",
"varname_in_file",
",",
"shape",
",",
"weights",
".",
"shape",
")",
")",
"return",
"weights",
"return",
"ret"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
weight_layers
|
Weight the layers of a biLM with trainable scalar weights to
compute ELMo representations.
For each output layer, this returns two ops. The first computes
a layer specific weighted average of the biLM layers, and
the second the l2 regularizer loss term.
The regularization terms are also add to tf.GraphKeys.REGULARIZATION_LOSSES
Input:
name = a string prefix used for the trainable variable names
bilm_ops = the tensorflow ops returned to compute internal
representations from a biLM. This is the return value
from BidirectionalLanguageModel(...)(ids_placeholder)
l2_coef: the l2 regularization coefficient $\lambda$.
Pass None or 0.0 for no regularization.
use_top_only: if True, then only use the top layer.
do_layer_norm: if True, then apply layer normalization to each biLM
layer before normalizing
reuse: reuse an aggregation variable scope.
Output:
{
'weighted_op': op to compute weighted average for output,
'regularization_op': op to compute regularization term
}
|
deeppavlov/models/elmo/elmo_model.py
|
def weight_layers(name, bilm_ops, l2_coef=None,
use_top_only=False, do_layer_norm=False, reuse=False):
"""
Weight the layers of a biLM with trainable scalar weights to
compute ELMo representations.
For each output layer, this returns two ops. The first computes
a layer specific weighted average of the biLM layers, and
the second the l2 regularizer loss term.
The regularization terms are also add to tf.GraphKeys.REGULARIZATION_LOSSES
Input:
name = a string prefix used for the trainable variable names
bilm_ops = the tensorflow ops returned to compute internal
representations from a biLM. This is the return value
from BidirectionalLanguageModel(...)(ids_placeholder)
l2_coef: the l2 regularization coefficient $\lambda$.
Pass None or 0.0 for no regularization.
use_top_only: if True, then only use the top layer.
do_layer_norm: if True, then apply layer normalization to each biLM
layer before normalizing
reuse: reuse an aggregation variable scope.
Output:
{
'weighted_op': op to compute weighted average for output,
'regularization_op': op to compute regularization term
}
"""
def _l2_regularizer(weights):
if l2_coef is not None:
return l2_coef * tf.reduce_sum(tf.square(weights))
else:
return 0.0
# Get ops for computing LM embeddings and mask
lm_embeddings = bilm_ops['lm_embeddings']
mask = bilm_ops['mask']
n_lm_layers = int(lm_embeddings.get_shape()[1])
lm_dim = int(lm_embeddings.get_shape()[3])
# import pdb; pdb.set_trace()
with tf.control_dependencies([lm_embeddings, mask]):
# Cast the mask and broadcast for layer use.
mask_float = tf.cast(mask, 'float32')
broadcast_mask = tf.expand_dims(mask_float, axis=-1)
def _do_ln(x):
# do layer normalization excluding the mask
x_masked = x * broadcast_mask
N = tf.reduce_sum(mask_float) * lm_dim
mean = tf.reduce_sum(x_masked) / N
variance = tf.reduce_sum(((x_masked - mean) * broadcast_mask)**2) / N
return tf.nn.batch_normalization(
x, mean, variance, None, None, 1E-12
)
if use_top_only:
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
# just the top layer
sum_pieces = tf.squeeze(layers[-1], squeeze_dims=1)
# no regularization
reg = 0.0
else:
with tf.variable_scope("aggregation", reuse=reuse):
W = tf.get_variable(
'{}_ELMo_W'.format(name),
shape=(n_lm_layers, ),
initializer=tf.zeros_initializer,
regularizer=_l2_regularizer,
trainable=True,
)
# normalize the weights
normed_weights = tf.split(
tf.nn.softmax(W + 1.0 / n_lm_layers), n_lm_layers
)
# split LM layers
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
# compute the weighted, normalized LM activations
pieces = []
for w, t in zip(normed_weights, layers):
if do_layer_norm:
pieces.append(w * _do_ln(tf.squeeze(t, squeeze_dims=1)))
else:
pieces.append(w * tf.squeeze(t, squeeze_dims=1))
sum_pieces = tf.add_n(pieces)
# get the regularizer
reg = [
r for r in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if r.name.find('{}_ELMo_W/'.format(name)) >= 0
]
if len(reg) != 1:
raise ValueError
# scale the weighted sum by gamma
with tf.variable_scope("aggregation", reuse=reuse):
gamma = tf.get_variable(
'{}_ELMo_gamma'.format(name),
shape=(1, ),
initializer=tf.ones_initializer,
regularizer=None,
trainable=True,
)
weighted_lm_layers = sum_pieces * gamma
weighted_lm_layers_masked = sum_pieces * broadcast_mask
weighted_lm_layers_sum = tf.reduce_sum(weighted_lm_layers_masked, 1)
mask_sum = tf.reduce_sum(mask_float, 1)
mask_sum = tf.maximum(mask_sum, [1])
weighted_lm_layers_mean = weighted_lm_layers_sum / tf.expand_dims(mask_sum, - 1)
word_emb_2n = tf.squeeze(layers[0], [1])
word_emb_1n = tf.slice(word_emb_2n, [0, 0, 0], [-1, -1, lm_dim // 2]) # to 512
lstm_outputs1 = tf.squeeze(layers[1], [1])
lstm_outputs2 = tf.squeeze(layers[2], [1])
ret = {'weighted_op': weighted_lm_layers,
'mean_op': weighted_lm_layers_mean,
'regularization_op': reg,
'word_emb': word_emb_1n,
'lstm_outputs1': lstm_outputs1,
'lstm_outputs2': lstm_outputs2, }
return ret
|
def weight_layers(name, bilm_ops, l2_coef=None,
use_top_only=False, do_layer_norm=False, reuse=False):
"""
Weight the layers of a biLM with trainable scalar weights to
compute ELMo representations.
For each output layer, this returns two ops. The first computes
a layer specific weighted average of the biLM layers, and
the second the l2 regularizer loss term.
The regularization terms are also add to tf.GraphKeys.REGULARIZATION_LOSSES
Input:
name = a string prefix used for the trainable variable names
bilm_ops = the tensorflow ops returned to compute internal
representations from a biLM. This is the return value
from BidirectionalLanguageModel(...)(ids_placeholder)
l2_coef: the l2 regularization coefficient $\lambda$.
Pass None or 0.0 for no regularization.
use_top_only: if True, then only use the top layer.
do_layer_norm: if True, then apply layer normalization to each biLM
layer before normalizing
reuse: reuse an aggregation variable scope.
Output:
{
'weighted_op': op to compute weighted average for output,
'regularization_op': op to compute regularization term
}
"""
def _l2_regularizer(weights):
if l2_coef is not None:
return l2_coef * tf.reduce_sum(tf.square(weights))
else:
return 0.0
# Get ops for computing LM embeddings and mask
lm_embeddings = bilm_ops['lm_embeddings']
mask = bilm_ops['mask']
n_lm_layers = int(lm_embeddings.get_shape()[1])
lm_dim = int(lm_embeddings.get_shape()[3])
# import pdb; pdb.set_trace()
with tf.control_dependencies([lm_embeddings, mask]):
# Cast the mask and broadcast for layer use.
mask_float = tf.cast(mask, 'float32')
broadcast_mask = tf.expand_dims(mask_float, axis=-1)
def _do_ln(x):
# do layer normalization excluding the mask
x_masked = x * broadcast_mask
N = tf.reduce_sum(mask_float) * lm_dim
mean = tf.reduce_sum(x_masked) / N
variance = tf.reduce_sum(((x_masked - mean) * broadcast_mask)**2) / N
return tf.nn.batch_normalization(
x, mean, variance, None, None, 1E-12
)
if use_top_only:
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
# just the top layer
sum_pieces = tf.squeeze(layers[-1], squeeze_dims=1)
# no regularization
reg = 0.0
else:
with tf.variable_scope("aggregation", reuse=reuse):
W = tf.get_variable(
'{}_ELMo_W'.format(name),
shape=(n_lm_layers, ),
initializer=tf.zeros_initializer,
regularizer=_l2_regularizer,
trainable=True,
)
# normalize the weights
normed_weights = tf.split(
tf.nn.softmax(W + 1.0 / n_lm_layers), n_lm_layers
)
# split LM layers
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
# compute the weighted, normalized LM activations
pieces = []
for w, t in zip(normed_weights, layers):
if do_layer_norm:
pieces.append(w * _do_ln(tf.squeeze(t, squeeze_dims=1)))
else:
pieces.append(w * tf.squeeze(t, squeeze_dims=1))
sum_pieces = tf.add_n(pieces)
# get the regularizer
reg = [
r for r in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if r.name.find('{}_ELMo_W/'.format(name)) >= 0
]
if len(reg) != 1:
raise ValueError
# scale the weighted sum by gamma
with tf.variable_scope("aggregation", reuse=reuse):
gamma = tf.get_variable(
'{}_ELMo_gamma'.format(name),
shape=(1, ),
initializer=tf.ones_initializer,
regularizer=None,
trainable=True,
)
weighted_lm_layers = sum_pieces * gamma
weighted_lm_layers_masked = sum_pieces * broadcast_mask
weighted_lm_layers_sum = tf.reduce_sum(weighted_lm_layers_masked, 1)
mask_sum = tf.reduce_sum(mask_float, 1)
mask_sum = tf.maximum(mask_sum, [1])
weighted_lm_layers_mean = weighted_lm_layers_sum / tf.expand_dims(mask_sum, - 1)
word_emb_2n = tf.squeeze(layers[0], [1])
word_emb_1n = tf.slice(word_emb_2n, [0, 0, 0], [-1, -1, lm_dim // 2]) # to 512
lstm_outputs1 = tf.squeeze(layers[1], [1])
lstm_outputs2 = tf.squeeze(layers[2], [1])
ret = {'weighted_op': weighted_lm_layers,
'mean_op': weighted_lm_layers_mean,
'regularization_op': reg,
'word_emb': word_emb_1n,
'lstm_outputs1': lstm_outputs1,
'lstm_outputs2': lstm_outputs2, }
return ret
|
[
"Weight",
"the",
"layers",
"of",
"a",
"biLM",
"with",
"trainable",
"scalar",
"weights",
"to",
"compute",
"ELMo",
"representations",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/elmo/elmo_model.py#L597-L728
|
[
"def",
"weight_layers",
"(",
"name",
",",
"bilm_ops",
",",
"l2_coef",
"=",
"None",
",",
"use_top_only",
"=",
"False",
",",
"do_layer_norm",
"=",
"False",
",",
"reuse",
"=",
"False",
")",
":",
"def",
"_l2_regularizer",
"(",
"weights",
")",
":",
"if",
"l2_coef",
"is",
"not",
"None",
":",
"return",
"l2_coef",
"*",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"weights",
")",
")",
"else",
":",
"return",
"0.0",
"# Get ops for computing LM embeddings and mask",
"lm_embeddings",
"=",
"bilm_ops",
"[",
"'lm_embeddings'",
"]",
"mask",
"=",
"bilm_ops",
"[",
"'mask'",
"]",
"n_lm_layers",
"=",
"int",
"(",
"lm_embeddings",
".",
"get_shape",
"(",
")",
"[",
"1",
"]",
")",
"lm_dim",
"=",
"int",
"(",
"lm_embeddings",
".",
"get_shape",
"(",
")",
"[",
"3",
"]",
")",
"# import pdb; pdb.set_trace()",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"lm_embeddings",
",",
"mask",
"]",
")",
":",
"# Cast the mask and broadcast for layer use.",
"mask_float",
"=",
"tf",
".",
"cast",
"(",
"mask",
",",
"'float32'",
")",
"broadcast_mask",
"=",
"tf",
".",
"expand_dims",
"(",
"mask_float",
",",
"axis",
"=",
"-",
"1",
")",
"def",
"_do_ln",
"(",
"x",
")",
":",
"# do layer normalization excluding the mask",
"x_masked",
"=",
"x",
"*",
"broadcast_mask",
"N",
"=",
"tf",
".",
"reduce_sum",
"(",
"mask_float",
")",
"*",
"lm_dim",
"mean",
"=",
"tf",
".",
"reduce_sum",
"(",
"x_masked",
")",
"/",
"N",
"variance",
"=",
"tf",
".",
"reduce_sum",
"(",
"(",
"(",
"x_masked",
"-",
"mean",
")",
"*",
"broadcast_mask",
")",
"**",
"2",
")",
"/",
"N",
"return",
"tf",
".",
"nn",
".",
"batch_normalization",
"(",
"x",
",",
"mean",
",",
"variance",
",",
"None",
",",
"None",
",",
"1E-12",
")",
"if",
"use_top_only",
":",
"layers",
"=",
"tf",
".",
"split",
"(",
"lm_embeddings",
",",
"n_lm_layers",
",",
"axis",
"=",
"1",
")",
"# just the top layer",
"sum_pieces",
"=",
"tf",
".",
"squeeze",
"(",
"layers",
"[",
"-",
"1",
"]",
",",
"squeeze_dims",
"=",
"1",
")",
"# no regularization",
"reg",
"=",
"0.0",
"else",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"aggregation\"",
",",
"reuse",
"=",
"reuse",
")",
":",
"W",
"=",
"tf",
".",
"get_variable",
"(",
"'{}_ELMo_W'",
".",
"format",
"(",
"name",
")",
",",
"shape",
"=",
"(",
"n_lm_layers",
",",
")",
",",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
",",
"regularizer",
"=",
"_l2_regularizer",
",",
"trainable",
"=",
"True",
",",
")",
"# normalize the weights",
"normed_weights",
"=",
"tf",
".",
"split",
"(",
"tf",
".",
"nn",
".",
"softmax",
"(",
"W",
"+",
"1.0",
"/",
"n_lm_layers",
")",
",",
"n_lm_layers",
")",
"# split LM layers",
"layers",
"=",
"tf",
".",
"split",
"(",
"lm_embeddings",
",",
"n_lm_layers",
",",
"axis",
"=",
"1",
")",
"# compute the weighted, normalized LM activations",
"pieces",
"=",
"[",
"]",
"for",
"w",
",",
"t",
"in",
"zip",
"(",
"normed_weights",
",",
"layers",
")",
":",
"if",
"do_layer_norm",
":",
"pieces",
".",
"append",
"(",
"w",
"*",
"_do_ln",
"(",
"tf",
".",
"squeeze",
"(",
"t",
",",
"squeeze_dims",
"=",
"1",
")",
")",
")",
"else",
":",
"pieces",
".",
"append",
"(",
"w",
"*",
"tf",
".",
"squeeze",
"(",
"t",
",",
"squeeze_dims",
"=",
"1",
")",
")",
"sum_pieces",
"=",
"tf",
".",
"add_n",
"(",
"pieces",
")",
"# get the regularizer",
"reg",
"=",
"[",
"r",
"for",
"r",
"in",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"REGULARIZATION_LOSSES",
")",
"if",
"r",
".",
"name",
".",
"find",
"(",
"'{}_ELMo_W/'",
".",
"format",
"(",
"name",
")",
")",
">=",
"0",
"]",
"if",
"len",
"(",
"reg",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"# scale the weighted sum by gamma",
"with",
"tf",
".",
"variable_scope",
"(",
"\"aggregation\"",
",",
"reuse",
"=",
"reuse",
")",
":",
"gamma",
"=",
"tf",
".",
"get_variable",
"(",
"'{}_ELMo_gamma'",
".",
"format",
"(",
"name",
")",
",",
"shape",
"=",
"(",
"1",
",",
")",
",",
"initializer",
"=",
"tf",
".",
"ones_initializer",
",",
"regularizer",
"=",
"None",
",",
"trainable",
"=",
"True",
",",
")",
"weighted_lm_layers",
"=",
"sum_pieces",
"*",
"gamma",
"weighted_lm_layers_masked",
"=",
"sum_pieces",
"*",
"broadcast_mask",
"weighted_lm_layers_sum",
"=",
"tf",
".",
"reduce_sum",
"(",
"weighted_lm_layers_masked",
",",
"1",
")",
"mask_sum",
"=",
"tf",
".",
"reduce_sum",
"(",
"mask_float",
",",
"1",
")",
"mask_sum",
"=",
"tf",
".",
"maximum",
"(",
"mask_sum",
",",
"[",
"1",
"]",
")",
"weighted_lm_layers_mean",
"=",
"weighted_lm_layers_sum",
"/",
"tf",
".",
"expand_dims",
"(",
"mask_sum",
",",
"-",
"1",
")",
"word_emb_2n",
"=",
"tf",
".",
"squeeze",
"(",
"layers",
"[",
"0",
"]",
",",
"[",
"1",
"]",
")",
"word_emb_1n",
"=",
"tf",
".",
"slice",
"(",
"word_emb_2n",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"lm_dim",
"//",
"2",
"]",
")",
"# to 512",
"lstm_outputs1",
"=",
"tf",
".",
"squeeze",
"(",
"layers",
"[",
"1",
"]",
",",
"[",
"1",
"]",
")",
"lstm_outputs2",
"=",
"tf",
".",
"squeeze",
"(",
"layers",
"[",
"2",
"]",
",",
"[",
"1",
"]",
")",
"ret",
"=",
"{",
"'weighted_op'",
":",
"weighted_lm_layers",
",",
"'mean_op'",
":",
"weighted_lm_layers_mean",
",",
"'regularization_op'",
":",
"reg",
",",
"'word_emb'",
":",
"word_emb_1n",
",",
"'lstm_outputs1'",
":",
"lstm_outputs1",
",",
"'lstm_outputs2'",
":",
"lstm_outputs2",
",",
"}",
"return",
"ret"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
BidirectionalLanguageModelGraph._build_word_char_embeddings
|
options contains key 'char_cnn': {
'n_characters': 262,
# includes the start / end characters
'max_characters_per_token': 50,
'filters': [
[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'activation': 'tanh',
# for the character embedding
'embedding': {'dim': 16}
# for highway layers
# if omitted, then no highway layers
'n_highway': 2,
}
|
deeppavlov/models/elmo/elmo_model.py
|
def _build_word_char_embeddings(self):
"""
options contains key 'char_cnn': {
'n_characters': 262,
# includes the start / end characters
'max_characters_per_token': 50,
'filters': [
[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'activation': 'tanh',
# for the character embedding
'embedding': {'dim': 16}
# for highway layers
# if omitted, then no highway layers
'n_highway': 2,
}
"""
projection_dim = self.options['lstm']['projection_dim']
cnn_options = self.options['char_cnn']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)
max_chars = cnn_options['max_characters_per_token']
char_embed_dim = cnn_options['embedding']['dim']
n_chars = cnn_options['n_characters']
if n_chars != 262:
raise Exception("Set n_characters=262 after training see a \
https://github.com/allenai/bilm-tf/blob/master/README.md")
if cnn_options['activation'] == 'tanh':
activation = tf.nn.tanh
elif cnn_options['activation'] == 'relu':
activation = tf.nn.relu
# the character embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable("char_embed", [n_chars, char_embed_dim],
dtype=DTYPE,
initializer=tf.random_uniform_initializer(-1.0, 1.0))
# shape (batch_size, unroll_steps, max_chars, embed_dim)
self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.ids_placeholder)
# the convolutions
def make_convolutions(inp):
with tf.variable_scope('CNN'):
convolutions = []
for i, (width, num) in enumerate(filters):
if cnn_options['activation'] == 'relu':
# He initialization for ReLU activation
# with char embeddings init between -1 and 1
# w_init = tf.random_normal_initializer(
# mean=0.0,
# stddev=np.sqrt(2.0 / (width * char_embed_dim))
# )
# Kim et al 2015, +/- 0.05
w_init = tf.random_uniform_initializer(
minval=-0.05, maxval=0.05)
elif cnn_options['activation'] == 'tanh':
# glorot init
w_init = tf.random_normal_initializer(
mean=0.0,
stddev=np.sqrt(1.0 / (width * char_embed_dim))
)
w = tf.get_variable(
"W_cnn_%s" % i,
[1, width, char_embed_dim, num],
initializer=w_init,
dtype=DTYPE)
b = tf.get_variable(
"b_cnn_%s" % i, [num], dtype=DTYPE,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(inp, w,
strides=[1, 1, 1, 1],
padding="VALID") + b
# now max pool
conv = tf.nn.max_pool(conv, [1, 1, max_chars - width + 1, 1],
[1, 1, 1, 1], 'VALID')
# activation
conv = activation(conv)
conv = tf.squeeze(conv, squeeze_dims=[2])
convolutions.append(conv)
return tf.concat(convolutions, 2)
embedding = make_convolutions(self.char_embedding)
# for highway and projection layers
n_highway = cnn_options.get('n_highway')
use_highway = n_highway is not None and n_highway > 0
use_proj = n_filters != projection_dim
if use_highway or use_proj:
# reshape from (batch_size, n_tokens, dim) to (-1, dim)
batch_size_n_tokens = tf.shape(embedding)[0:2]
embedding = tf.reshape(embedding, [-1, n_filters])
# set up weights for projection
if use_proj:
assert n_filters > projection_dim
with tf.variable_scope('CNN_proj'):
W_proj_cnn = tf.get_variable(
"W_proj", [n_filters, projection_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / n_filters)),
dtype=DTYPE)
b_proj_cnn = tf.get_variable(
"b_proj", [projection_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
# apply highways layers
def high(x, ww_carry, bb_carry, ww_tr, bb_tr):
carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry)
transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr)
return carry_gate * transform_gate + (1.0 - carry_gate) * x
if use_highway:
highway_dim = n_filters
for i in range(n_highway):
with tf.variable_scope('CNN_high_%s' % i):
W_carry = tf.get_variable(
'W_carry', [highway_dim, highway_dim],
# glorit init
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_carry = tf.get_variable(
'b_carry', [highway_dim],
initializer=tf.constant_initializer(-2.0),
dtype=DTYPE)
W_transform = tf.get_variable(
'W_transform', [highway_dim, highway_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_transform = tf.get_variable(
'b_transform', [highway_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
embedding = high(embedding, W_carry, b_carry,
W_transform, b_transform)
# finally project down if needed
if use_proj:
embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn
# reshape back to (batch_size, tokens, dim)
if use_highway or use_proj:
shp = tf.concat([batch_size_n_tokens, [projection_dim]], axis=0)
embedding = tf.reshape(embedding, shp)
# at last assign attributes for remainder of the model
self.embedding = embedding
|
def _build_word_char_embeddings(self):
"""
options contains key 'char_cnn': {
'n_characters': 262,
# includes the start / end characters
'max_characters_per_token': 50,
'filters': [
[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'activation': 'tanh',
# for the character embedding
'embedding': {'dim': 16}
# for highway layers
# if omitted, then no highway layers
'n_highway': 2,
}
"""
projection_dim = self.options['lstm']['projection_dim']
cnn_options = self.options['char_cnn']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)
max_chars = cnn_options['max_characters_per_token']
char_embed_dim = cnn_options['embedding']['dim']
n_chars = cnn_options['n_characters']
if n_chars != 262:
raise Exception("Set n_characters=262 after training see a \
https://github.com/allenai/bilm-tf/blob/master/README.md")
if cnn_options['activation'] == 'tanh':
activation = tf.nn.tanh
elif cnn_options['activation'] == 'relu':
activation = tf.nn.relu
# the character embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable("char_embed", [n_chars, char_embed_dim],
dtype=DTYPE,
initializer=tf.random_uniform_initializer(-1.0, 1.0))
# shape (batch_size, unroll_steps, max_chars, embed_dim)
self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.ids_placeholder)
# the convolutions
def make_convolutions(inp):
with tf.variable_scope('CNN'):
convolutions = []
for i, (width, num) in enumerate(filters):
if cnn_options['activation'] == 'relu':
# He initialization for ReLU activation
# with char embeddings init between -1 and 1
# w_init = tf.random_normal_initializer(
# mean=0.0,
# stddev=np.sqrt(2.0 / (width * char_embed_dim))
# )
# Kim et al 2015, +/- 0.05
w_init = tf.random_uniform_initializer(
minval=-0.05, maxval=0.05)
elif cnn_options['activation'] == 'tanh':
# glorot init
w_init = tf.random_normal_initializer(
mean=0.0,
stddev=np.sqrt(1.0 / (width * char_embed_dim))
)
w = tf.get_variable(
"W_cnn_%s" % i,
[1, width, char_embed_dim, num],
initializer=w_init,
dtype=DTYPE)
b = tf.get_variable(
"b_cnn_%s" % i, [num], dtype=DTYPE,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(inp, w,
strides=[1, 1, 1, 1],
padding="VALID") + b
# now max pool
conv = tf.nn.max_pool(conv, [1, 1, max_chars - width + 1, 1],
[1, 1, 1, 1], 'VALID')
# activation
conv = activation(conv)
conv = tf.squeeze(conv, squeeze_dims=[2])
convolutions.append(conv)
return tf.concat(convolutions, 2)
embedding = make_convolutions(self.char_embedding)
# for highway and projection layers
n_highway = cnn_options.get('n_highway')
use_highway = n_highway is not None and n_highway > 0
use_proj = n_filters != projection_dim
if use_highway or use_proj:
# reshape from (batch_size, n_tokens, dim) to (-1, dim)
batch_size_n_tokens = tf.shape(embedding)[0:2]
embedding = tf.reshape(embedding, [-1, n_filters])
# set up weights for projection
if use_proj:
assert n_filters > projection_dim
with tf.variable_scope('CNN_proj'):
W_proj_cnn = tf.get_variable(
"W_proj", [n_filters, projection_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / n_filters)),
dtype=DTYPE)
b_proj_cnn = tf.get_variable(
"b_proj", [projection_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
# apply highways layers
def high(x, ww_carry, bb_carry, ww_tr, bb_tr):
carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry)
transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr)
return carry_gate * transform_gate + (1.0 - carry_gate) * x
if use_highway:
highway_dim = n_filters
for i in range(n_highway):
with tf.variable_scope('CNN_high_%s' % i):
W_carry = tf.get_variable(
'W_carry', [highway_dim, highway_dim],
# glorit init
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_carry = tf.get_variable(
'b_carry', [highway_dim],
initializer=tf.constant_initializer(-2.0),
dtype=DTYPE)
W_transform = tf.get_variable(
'W_transform', [highway_dim, highway_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_transform = tf.get_variable(
'b_transform', [highway_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
embedding = high(embedding, W_carry, b_carry,
W_transform, b_transform)
# finally project down if needed
if use_proj:
embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn
# reshape back to (batch_size, tokens, dim)
if use_highway or use_proj:
shp = tf.concat([batch_size_n_tokens, [projection_dim]], axis=0)
embedding = tf.reshape(embedding, shp)
# at last assign attributes for remainder of the model
self.embedding = embedding
|
[
"options",
"contains",
"key",
"char_cnn",
":",
"{"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/elmo/elmo_model.py#L292-L462
|
[
"def",
"_build_word_char_embeddings",
"(",
"self",
")",
":",
"projection_dim",
"=",
"self",
".",
"options",
"[",
"'lstm'",
"]",
"[",
"'projection_dim'",
"]",
"cnn_options",
"=",
"self",
".",
"options",
"[",
"'char_cnn'",
"]",
"filters",
"=",
"cnn_options",
"[",
"'filters'",
"]",
"n_filters",
"=",
"sum",
"(",
"f",
"[",
"1",
"]",
"for",
"f",
"in",
"filters",
")",
"max_chars",
"=",
"cnn_options",
"[",
"'max_characters_per_token'",
"]",
"char_embed_dim",
"=",
"cnn_options",
"[",
"'embedding'",
"]",
"[",
"'dim'",
"]",
"n_chars",
"=",
"cnn_options",
"[",
"'n_characters'",
"]",
"if",
"n_chars",
"!=",
"262",
":",
"raise",
"Exception",
"(",
"\"Set n_characters=262 after training see a \\\n https://github.com/allenai/bilm-tf/blob/master/README.md\"",
")",
"if",
"cnn_options",
"[",
"'activation'",
"]",
"==",
"'tanh'",
":",
"activation",
"=",
"tf",
".",
"nn",
".",
"tanh",
"elif",
"cnn_options",
"[",
"'activation'",
"]",
"==",
"'relu'",
":",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
"# the character embeddings",
"with",
"tf",
".",
"device",
"(",
"\"/cpu:0\"",
")",
":",
"self",
".",
"embedding_weights",
"=",
"tf",
".",
"get_variable",
"(",
"\"char_embed\"",
",",
"[",
"n_chars",
",",
"char_embed_dim",
"]",
",",
"dtype",
"=",
"DTYPE",
",",
"initializer",
"=",
"tf",
".",
"random_uniform_initializer",
"(",
"-",
"1.0",
",",
"1.0",
")",
")",
"# shape (batch_size, unroll_steps, max_chars, embed_dim)",
"self",
".",
"char_embedding",
"=",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"self",
".",
"embedding_weights",
",",
"self",
".",
"ids_placeholder",
")",
"# the convolutions",
"def",
"make_convolutions",
"(",
"inp",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'CNN'",
")",
":",
"convolutions",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"width",
",",
"num",
")",
"in",
"enumerate",
"(",
"filters",
")",
":",
"if",
"cnn_options",
"[",
"'activation'",
"]",
"==",
"'relu'",
":",
"# He initialization for ReLU activation",
"# with char embeddings init between -1 and 1",
"# w_init = tf.random_normal_initializer(",
"# mean=0.0,",
"# stddev=np.sqrt(2.0 / (width * char_embed_dim))",
"# )",
"# Kim et al 2015, +/- 0.05",
"w_init",
"=",
"tf",
".",
"random_uniform_initializer",
"(",
"minval",
"=",
"-",
"0.05",
",",
"maxval",
"=",
"0.05",
")",
"elif",
"cnn_options",
"[",
"'activation'",
"]",
"==",
"'tanh'",
":",
"# glorot init",
"w_init",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"mean",
"=",
"0.0",
",",
"stddev",
"=",
"np",
".",
"sqrt",
"(",
"1.0",
"/",
"(",
"width",
"*",
"char_embed_dim",
")",
")",
")",
"w",
"=",
"tf",
".",
"get_variable",
"(",
"\"W_cnn_%s\"",
"%",
"i",
",",
"[",
"1",
",",
"width",
",",
"char_embed_dim",
",",
"num",
"]",
",",
"initializer",
"=",
"w_init",
",",
"dtype",
"=",
"DTYPE",
")",
"b",
"=",
"tf",
".",
"get_variable",
"(",
"\"b_cnn_%s\"",
"%",
"i",
",",
"[",
"num",
"]",
",",
"dtype",
"=",
"DTYPE",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0.0",
")",
")",
"conv",
"=",
"tf",
".",
"nn",
".",
"conv2d",
"(",
"inp",
",",
"w",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"\"VALID\"",
")",
"+",
"b",
"# now max pool",
"conv",
"=",
"tf",
".",
"nn",
".",
"max_pool",
"(",
"conv",
",",
"[",
"1",
",",
"1",
",",
"max_chars",
"-",
"width",
"+",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"'VALID'",
")",
"# activation",
"conv",
"=",
"activation",
"(",
"conv",
")",
"conv",
"=",
"tf",
".",
"squeeze",
"(",
"conv",
",",
"squeeze_dims",
"=",
"[",
"2",
"]",
")",
"convolutions",
".",
"append",
"(",
"conv",
")",
"return",
"tf",
".",
"concat",
"(",
"convolutions",
",",
"2",
")",
"embedding",
"=",
"make_convolutions",
"(",
"self",
".",
"char_embedding",
")",
"# for highway and projection layers",
"n_highway",
"=",
"cnn_options",
".",
"get",
"(",
"'n_highway'",
")",
"use_highway",
"=",
"n_highway",
"is",
"not",
"None",
"and",
"n_highway",
">",
"0",
"use_proj",
"=",
"n_filters",
"!=",
"projection_dim",
"if",
"use_highway",
"or",
"use_proj",
":",
"# reshape from (batch_size, n_tokens, dim) to (-1, dim)",
"batch_size_n_tokens",
"=",
"tf",
".",
"shape",
"(",
"embedding",
")",
"[",
"0",
":",
"2",
"]",
"embedding",
"=",
"tf",
".",
"reshape",
"(",
"embedding",
",",
"[",
"-",
"1",
",",
"n_filters",
"]",
")",
"# set up weights for projection",
"if",
"use_proj",
":",
"assert",
"n_filters",
">",
"projection_dim",
"with",
"tf",
".",
"variable_scope",
"(",
"'CNN_proj'",
")",
":",
"W_proj_cnn",
"=",
"tf",
".",
"get_variable",
"(",
"\"W_proj\"",
",",
"[",
"n_filters",
",",
"projection_dim",
"]",
",",
"initializer",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"mean",
"=",
"0.0",
",",
"stddev",
"=",
"np",
".",
"sqrt",
"(",
"1.0",
"/",
"n_filters",
")",
")",
",",
"dtype",
"=",
"DTYPE",
")",
"b_proj_cnn",
"=",
"tf",
".",
"get_variable",
"(",
"\"b_proj\"",
",",
"[",
"projection_dim",
"]",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0.0",
")",
",",
"dtype",
"=",
"DTYPE",
")",
"# apply highways layers",
"def",
"high",
"(",
"x",
",",
"ww_carry",
",",
"bb_carry",
",",
"ww_tr",
",",
"bb_tr",
")",
":",
"carry_gate",
"=",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"tf",
".",
"matmul",
"(",
"x",
",",
"ww_carry",
")",
"+",
"bb_carry",
")",
"transform_gate",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"tf",
".",
"matmul",
"(",
"x",
",",
"ww_tr",
")",
"+",
"bb_tr",
")",
"return",
"carry_gate",
"*",
"transform_gate",
"+",
"(",
"1.0",
"-",
"carry_gate",
")",
"*",
"x",
"if",
"use_highway",
":",
"highway_dim",
"=",
"n_filters",
"for",
"i",
"in",
"range",
"(",
"n_highway",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'CNN_high_%s'",
"%",
"i",
")",
":",
"W_carry",
"=",
"tf",
".",
"get_variable",
"(",
"'W_carry'",
",",
"[",
"highway_dim",
",",
"highway_dim",
"]",
",",
"# glorit init",
"initializer",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"mean",
"=",
"0.0",
",",
"stddev",
"=",
"np",
".",
"sqrt",
"(",
"1.0",
"/",
"highway_dim",
")",
")",
",",
"dtype",
"=",
"DTYPE",
")",
"b_carry",
"=",
"tf",
".",
"get_variable",
"(",
"'b_carry'",
",",
"[",
"highway_dim",
"]",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"-",
"2.0",
")",
",",
"dtype",
"=",
"DTYPE",
")",
"W_transform",
"=",
"tf",
".",
"get_variable",
"(",
"'W_transform'",
",",
"[",
"highway_dim",
",",
"highway_dim",
"]",
",",
"initializer",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"mean",
"=",
"0.0",
",",
"stddev",
"=",
"np",
".",
"sqrt",
"(",
"1.0",
"/",
"highway_dim",
")",
")",
",",
"dtype",
"=",
"DTYPE",
")",
"b_transform",
"=",
"tf",
".",
"get_variable",
"(",
"'b_transform'",
",",
"[",
"highway_dim",
"]",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0.0",
")",
",",
"dtype",
"=",
"DTYPE",
")",
"embedding",
"=",
"high",
"(",
"embedding",
",",
"W_carry",
",",
"b_carry",
",",
"W_transform",
",",
"b_transform",
")",
"# finally project down if needed",
"if",
"use_proj",
":",
"embedding",
"=",
"tf",
".",
"matmul",
"(",
"embedding",
",",
"W_proj_cnn",
")",
"+",
"b_proj_cnn",
"# reshape back to (batch_size, tokens, dim)",
"if",
"use_highway",
"or",
"use_proj",
":",
"shp",
"=",
"tf",
".",
"concat",
"(",
"[",
"batch_size_n_tokens",
",",
"[",
"projection_dim",
"]",
"]",
",",
"axis",
"=",
"0",
")",
"embedding",
"=",
"tf",
".",
"reshape",
"(",
"embedding",
",",
"shp",
")",
"# at last assign attributes for remainder of the model",
"self",
".",
"embedding",
"=",
"embedding"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
DatasetReader.read
|
Reads a file from a path and returns data as a list of tuples of inputs and correct outputs
for every data type in ``train``, ``valid`` and ``test``.
|
deeppavlov/core/data/dataset_reader.py
|
def read(self, data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[Any, Any]]]:
"""Reads a file from a path and returns data as a list of tuples of inputs and correct outputs
for every data type in ``train``, ``valid`` and ``test``.
"""
raise NotImplementedError
|
def read(self, data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[Any, Any]]]:
"""Reads a file from a path and returns data as a list of tuples of inputs and correct outputs
for every data type in ``train``, ``valid`` and ``test``.
"""
raise NotImplementedError
|
[
"Reads",
"a",
"file",
"from",
"a",
"path",
"and",
"returns",
"data",
"as",
"a",
"list",
"of",
"tuples",
"of",
"inputs",
"and",
"correct",
"outputs",
"for",
"every",
"data",
"type",
"in",
"train",
"valid",
"and",
"test",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/dataset_reader.py#L21-L25
|
[
"def",
"read",
"(",
"self",
",",
"data_path",
":",
"str",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"Dict",
"[",
"str",
",",
"List",
"[",
"Tuple",
"[",
"Any",
",",
"Any",
"]",
"]",
"]",
":",
"raise",
"NotImplementedError"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
make_hello_bot_agent
|
Builds agent based on PatternMatchingSkill and HighestConfidenceSelector.
This is agent building tutorial. You can use this .py file to check how hello-bot agent works.
Returns:
agent: Agent capable of handling several simple greetings.
|
deeppavlov/agents/hello_bot_agent/hello_bot_agent.py
|
def make_hello_bot_agent() -> DefaultAgent:
"""Builds agent based on PatternMatchingSkill and HighestConfidenceSelector.
This is agent building tutorial. You can use this .py file to check how hello-bot agent works.
Returns:
agent: Agent capable of handling several simple greetings.
"""
skill_hello = PatternMatchingSkill(['Hello world'], patterns=['hi', 'hello', 'good day'])
skill_bye = PatternMatchingSkill(['Goodbye world', 'See you around'], patterns=['bye', 'chao', 'see you'])
skill_fallback = PatternMatchingSkill(['I don\'t understand, sorry', 'I can say "Hello world"'])
agent = DefaultAgent([skill_hello, skill_bye, skill_fallback], skills_processor=HighestConfidenceSelector())
return agent
|
def make_hello_bot_agent() -> DefaultAgent:
"""Builds agent based on PatternMatchingSkill and HighestConfidenceSelector.
This is agent building tutorial. You can use this .py file to check how hello-bot agent works.
Returns:
agent: Agent capable of handling several simple greetings.
"""
skill_hello = PatternMatchingSkill(['Hello world'], patterns=['hi', 'hello', 'good day'])
skill_bye = PatternMatchingSkill(['Goodbye world', 'See you around'], patterns=['bye', 'chao', 'see you'])
skill_fallback = PatternMatchingSkill(['I don\'t understand, sorry', 'I can say "Hello world"'])
agent = DefaultAgent([skill_hello, skill_bye, skill_fallback], skills_processor=HighestConfidenceSelector())
return agent
|
[
"Builds",
"agent",
"based",
"on",
"PatternMatchingSkill",
"and",
"HighestConfidenceSelector",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/agents/hello_bot_agent/hello_bot_agent.py#L20-L34
|
[
"def",
"make_hello_bot_agent",
"(",
")",
"->",
"DefaultAgent",
":",
"skill_hello",
"=",
"PatternMatchingSkill",
"(",
"[",
"'Hello world'",
"]",
",",
"patterns",
"=",
"[",
"'hi'",
",",
"'hello'",
",",
"'good day'",
"]",
")",
"skill_bye",
"=",
"PatternMatchingSkill",
"(",
"[",
"'Goodbye world'",
",",
"'See you around'",
"]",
",",
"patterns",
"=",
"[",
"'bye'",
",",
"'chao'",
",",
"'see you'",
"]",
")",
"skill_fallback",
"=",
"PatternMatchingSkill",
"(",
"[",
"'I don\\'t understand, sorry'",
",",
"'I can say \"Hello world\"'",
"]",
")",
"agent",
"=",
"DefaultAgent",
"(",
"[",
"skill_hello",
",",
"skill_bye",
",",
"skill_fallback",
"]",
",",
"skills_processor",
"=",
"HighestConfidenceSelector",
"(",
")",
")",
"return",
"agent"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
to_one_hot
|
Takes an array of integers and transforms it
to an array of one-hot encoded vectors
|
deeppavlov/models/morpho_tagger/common_tagger.py
|
def to_one_hot(x, k):
"""
Takes an array of integers and transforms it
to an array of one-hot encoded vectors
"""
unit = np.eye(k, dtype=int)
return unit[x]
|
def to_one_hot(x, k):
"""
Takes an array of integers and transforms it
to an array of one-hot encoded vectors
"""
unit = np.eye(k, dtype=int)
return unit[x]
|
[
"Takes",
"an",
"array",
"of",
"integers",
"and",
"transforms",
"it",
"to",
"an",
"array",
"of",
"one",
"-",
"hot",
"encoded",
"vectors"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/common_tagger.py#L14-L20
|
[
"def",
"to_one_hot",
"(",
"x",
",",
"k",
")",
":",
"unit",
"=",
"np",
".",
"eye",
"(",
"k",
",",
"dtype",
"=",
"int",
")",
"return",
"unit",
"[",
"x",
"]"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
prettify_metrics
|
Prettifies the dictionary of metrics.
|
deeppavlov/core/trainers/utils.py
|
def prettify_metrics(metrics: List[Tuple[str, float]], precision: int = 4) -> OrderedDict:
"""Prettifies the dictionary of metrics."""
prettified_metrics = OrderedDict()
for key, value in metrics:
value = round(value, precision)
prettified_metrics[key] = value
return prettified_metrics
|
def prettify_metrics(metrics: List[Tuple[str, float]], precision: int = 4) -> OrderedDict:
"""Prettifies the dictionary of metrics."""
prettified_metrics = OrderedDict()
for key, value in metrics:
value = round(value, precision)
prettified_metrics[key] = value
return prettified_metrics
|
[
"Prettifies",
"the",
"dictionary",
"of",
"metrics",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/trainers/utils.py#L41-L47
|
[
"def",
"prettify_metrics",
"(",
"metrics",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"float",
"]",
"]",
",",
"precision",
":",
"int",
"=",
"4",
")",
"->",
"OrderedDict",
":",
"prettified_metrics",
"=",
"OrderedDict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"metrics",
":",
"value",
"=",
"round",
"(",
"value",
",",
"precision",
")",
"prettified_metrics",
"[",
"key",
"]",
"=",
"value",
"return",
"prettified_metrics"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
populate_settings_dir
|
Populate settings directory with default settings files
Args:
force: if ``True``, replace existing settings files with default ones
Returns:
``True`` if any files were copied and ``False`` otherwise
|
deeppavlov/core/common/paths.py
|
def populate_settings_dir(force: bool = False) -> bool:
"""
Populate settings directory with default settings files
Args:
force: if ``True``, replace existing settings files with default ones
Returns:
``True`` if any files were copied and ``False`` otherwise
"""
res = False
if _default_settings_path == _settings_path:
return res
for src in list(_default_settings_path.glob('**/*.json')):
dest = _settings_path / src.relative_to(_default_settings_path)
if not force and dest.exists():
continue
res = True
dest.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(src, dest)
return res
|
def populate_settings_dir(force: bool = False) -> bool:
"""
Populate settings directory with default settings files
Args:
force: if ``True``, replace existing settings files with default ones
Returns:
``True`` if any files were copied and ``False`` otherwise
"""
res = False
if _default_settings_path == _settings_path:
return res
for src in list(_default_settings_path.glob('**/*.json')):
dest = _settings_path / src.relative_to(_default_settings_path)
if not force and dest.exists():
continue
res = True
dest.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(src, dest)
return res
|
[
"Populate",
"settings",
"directory",
"with",
"default",
"settings",
"files"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/common/paths.py#L36-L57
|
[
"def",
"populate_settings_dir",
"(",
"force",
":",
"bool",
"=",
"False",
")",
"->",
"bool",
":",
"res",
"=",
"False",
"if",
"_default_settings_path",
"==",
"_settings_path",
":",
"return",
"res",
"for",
"src",
"in",
"list",
"(",
"_default_settings_path",
".",
"glob",
"(",
"'**/*.json'",
")",
")",
":",
"dest",
"=",
"_settings_path",
"/",
"src",
".",
"relative_to",
"(",
"_default_settings_path",
")",
"if",
"not",
"force",
"and",
"dest",
".",
"exists",
"(",
")",
":",
"continue",
"res",
"=",
"True",
"dest",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"shutil",
".",
"copy",
"(",
"src",
",",
"dest",
")",
"return",
"res"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Tracker.update_state
|
Updates dialogue state with new ``slots``, calculates features.
Returns:
Tracker: .
|
deeppavlov/models/go_bot/tracker.py
|
def update_state(self,
slots: Union[List[Tuple[str, Any]], Dict[str, Any]]) -> 'Tracker':
"""
Updates dialogue state with new ``slots``, calculates features.
Returns:
Tracker: ."""
pass
|
def update_state(self,
slots: Union[List[Tuple[str, Any]], Dict[str, Any]]) -> 'Tracker':
"""
Updates dialogue state with new ``slots``, calculates features.
Returns:
Tracker: ."""
pass
|
[
"Updates",
"dialogue",
"state",
"with",
"new",
"slots",
"calculates",
"features",
".",
"Returns",
":",
"Tracker",
":",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/go_bot/tracker.py#L35-L42
|
[
"def",
"update_state",
"(",
"self",
",",
"slots",
":",
"Union",
"[",
"List",
"[",
"Tuple",
"[",
"str",
",",
"Any",
"]",
"]",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
")",
"->",
"'Tracker'",
":",
"pass"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
predict_with_model
|
Returns predictions of morphotagging model given in config :config_path:.
Args:
config_path: a path to config
Returns:
a list of morphological analyses for each sentence. Each analysis is either a list of tags
or a list of full CONLL-U descriptions.
|
deeppavlov/models/morpho_tagger/common.py
|
def predict_with_model(config_path: [Path, str]) -> List[Optional[List[str]]]:
"""Returns predictions of morphotagging model given in config :config_path:.
Args:
config_path: a path to config
Returns:
a list of morphological analyses for each sentence. Each analysis is either a list of tags
or a list of full CONLL-U descriptions.
"""
config = parse_config(config_path)
reader_config = config['dataset_reader']
reader = get_model(reader_config['class_name'])()
data_path = expand_path(reader_config.get('data_path', ''))
read_params = {k: v for k, v in reader_config.items() if k not in ['class_name', 'data_path']}
data: Dict = reader.read(data_path, **read_params)
iterator_config = config['dataset_iterator']
iterator: MorphoTaggerDatasetIterator = from_params(iterator_config, data=data)
model = build_model(config, load_trained=True)
answers = [None] * len(iterator.test)
batch_size = config['predict'].get("batch_size", -1)
for indexes, (x, _) in iterator.gen_batches(
batch_size=batch_size, data_type="test", shuffle=False, return_indexes=True):
y = model(x)
for i, elem in zip(indexes, y):
answers[i] = elem
outfile = config['predict'].get("outfile")
if outfile is not None:
outfile = Path(outfile)
if not outfile.exists():
outfile.parent.mkdir(parents=True, exist_ok=True)
with open(outfile, "w", encoding="utf8") as fout:
for elem in answers:
fout.write(elem + "\n")
return answers
|
def predict_with_model(config_path: [Path, str]) -> List[Optional[List[str]]]:
"""Returns predictions of morphotagging model given in config :config_path:.
Args:
config_path: a path to config
Returns:
a list of morphological analyses for each sentence. Each analysis is either a list of tags
or a list of full CONLL-U descriptions.
"""
config = parse_config(config_path)
reader_config = config['dataset_reader']
reader = get_model(reader_config['class_name'])()
data_path = expand_path(reader_config.get('data_path', ''))
read_params = {k: v for k, v in reader_config.items() if k not in ['class_name', 'data_path']}
data: Dict = reader.read(data_path, **read_params)
iterator_config = config['dataset_iterator']
iterator: MorphoTaggerDatasetIterator = from_params(iterator_config, data=data)
model = build_model(config, load_trained=True)
answers = [None] * len(iterator.test)
batch_size = config['predict'].get("batch_size", -1)
for indexes, (x, _) in iterator.gen_batches(
batch_size=batch_size, data_type="test", shuffle=False, return_indexes=True):
y = model(x)
for i, elem in zip(indexes, y):
answers[i] = elem
outfile = config['predict'].get("outfile")
if outfile is not None:
outfile = Path(outfile)
if not outfile.exists():
outfile.parent.mkdir(parents=True, exist_ok=True)
with open(outfile, "w", encoding="utf8") as fout:
for elem in answers:
fout.write(elem + "\n")
return answers
|
[
"Returns",
"predictions",
"of",
"morphotagging",
"model",
"given",
"in",
"config",
":",
"config_path",
":",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/common.py#L14-L52
|
[
"def",
"predict_with_model",
"(",
"config_path",
":",
"[",
"Path",
",",
"str",
"]",
")",
"->",
"List",
"[",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"]",
":",
"config",
"=",
"parse_config",
"(",
"config_path",
")",
"reader_config",
"=",
"config",
"[",
"'dataset_reader'",
"]",
"reader",
"=",
"get_model",
"(",
"reader_config",
"[",
"'class_name'",
"]",
")",
"(",
")",
"data_path",
"=",
"expand_path",
"(",
"reader_config",
".",
"get",
"(",
"'data_path'",
",",
"''",
")",
")",
"read_params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"reader_config",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"[",
"'class_name'",
",",
"'data_path'",
"]",
"}",
"data",
":",
"Dict",
"=",
"reader",
".",
"read",
"(",
"data_path",
",",
"*",
"*",
"read_params",
")",
"iterator_config",
"=",
"config",
"[",
"'dataset_iterator'",
"]",
"iterator",
":",
"MorphoTaggerDatasetIterator",
"=",
"from_params",
"(",
"iterator_config",
",",
"data",
"=",
"data",
")",
"model",
"=",
"build_model",
"(",
"config",
",",
"load_trained",
"=",
"True",
")",
"answers",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"iterator",
".",
"test",
")",
"batch_size",
"=",
"config",
"[",
"'predict'",
"]",
".",
"get",
"(",
"\"batch_size\"",
",",
"-",
"1",
")",
"for",
"indexes",
",",
"(",
"x",
",",
"_",
")",
"in",
"iterator",
".",
"gen_batches",
"(",
"batch_size",
"=",
"batch_size",
",",
"data_type",
"=",
"\"test\"",
",",
"shuffle",
"=",
"False",
",",
"return_indexes",
"=",
"True",
")",
":",
"y",
"=",
"model",
"(",
"x",
")",
"for",
"i",
",",
"elem",
"in",
"zip",
"(",
"indexes",
",",
"y",
")",
":",
"answers",
"[",
"i",
"]",
"=",
"elem",
"outfile",
"=",
"config",
"[",
"'predict'",
"]",
".",
"get",
"(",
"\"outfile\"",
")",
"if",
"outfile",
"is",
"not",
"None",
":",
"outfile",
"=",
"Path",
"(",
"outfile",
")",
"if",
"not",
"outfile",
".",
"exists",
"(",
")",
":",
"outfile",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"outfile",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf8\"",
")",
"as",
"fout",
":",
"for",
"elem",
"in",
"answers",
":",
"fout",
".",
"write",
"(",
"elem",
"+",
"\"\\n\"",
")",
"return",
"answers"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
run_alexa_server
|
Initiates Flask web service with Alexa skill.
Args:
agent_generator: Callback Alexa agents factory.
multi_instance: Multi instance mode flag.
stateful: Stateful mode flag.
port: Flask web service port.
https: Flag for running Alexa skill service in https mode.
ssl_key: SSL key file path.
ssl_cert: SSL certificate file path.
|
deeppavlov/utils/alexa/server.py
|
def run_alexa_server(agent_generator: callable,
multi_instance: bool = False,
stateful: bool = False,
port: Optional[int] = None,
https: bool = False,
ssl_key: str = None,
ssl_cert: str = None) -> None:
"""Initiates Flask web service with Alexa skill.
Args:
agent_generator: Callback Alexa agents factory.
multi_instance: Multi instance mode flag.
stateful: Stateful mode flag.
port: Flask web service port.
https: Flag for running Alexa skill service in https mode.
ssl_key: SSL key file path.
ssl_cert: SSL certificate file path.
"""
server_config_path = Path(get_settings_path(), SERVER_CONFIG_FILENAME).resolve()
server_params = read_json(server_config_path)
host = server_params['common_defaults']['host']
port = port or server_params['common_defaults']['port']
alexa_server_params = server_params['alexa_defaults']
alexa_server_params['multi_instance'] = multi_instance or server_params['common_defaults']['multi_instance']
alexa_server_params['stateful'] = stateful or server_params['common_defaults']['stateful']
alexa_server_params['amazon_cert_lifetime'] = AMAZON_CERTIFICATE_LIFETIME
if https:
ssh_key_path = Path(ssl_key or server_params['https_key_path']).resolve()
if not ssh_key_path.is_file():
e = FileNotFoundError('Ssh key file not found: please provide correct path in --key param or '
'https_key_path param in server configuration file')
log.error(e)
raise e
ssh_cert_path = Path(ssl_cert or server_params['https_cert_path']).resolve()
if not ssh_cert_path.is_file():
e = FileNotFoundError('Ssh certificate file not found: please provide correct path in --cert param or '
'https_cert_path param in server configuration file')
log.error(e)
raise e
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain(ssh_cert_path, ssh_key_path)
else:
ssl_context = None
input_q = Queue()
output_q = Queue()
bot = Bot(agent_generator, alexa_server_params, input_q, output_q)
bot.start()
endpoint_description = {
'description': 'Amazon Alexa custom service endpoint',
'parameters': [
{
'name': 'Signature',
'in': 'header',
'required': 'true',
'type': 'string',
'example': 'Z5H5wqd06ExFVPNfJiqhKvAFjkf+cTVodOUirucHGcEVAMO1LfvgqWUkZ/X1ITDZbI0w+SMwVkEQZlkeThbVS/54M22StNDUtfz4Ua20xNDpIPwcWIACAmZ38XxbbTEFJI5WwqrbilNcfzqiGrIPfdO5rl+/xUjHFUdcJdUY/QzBxXsceytVYfEiR9MzOCN2m4C0XnpThUavAu159KrLj8AkuzN0JF87iXv+zOEeZRgEuwmsAnJrRUwkJ4yWokEPnSVdjF0D6f6CscfyvRe9nsWShq7/zRTa41meweh+n006zvf58MbzRdXPB22RI4AN0ksWW7hSC8/QLAKQE+lvaw==',
},
{
'name': 'Signaturecertchainurl',
'in': 'header',
'required': 'true',
'type': 'string',
'example': 'https://s3.amazonaws.com/echo.api/echo-api-cert-6-ats.pem',
},
{
'name': 'data',
'in': 'body',
'required': 'true',
'example': {
'version': '1.0',
'session': {
'new': False,
'sessionId': 'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63',
'application': {
'applicationId': 'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6'
},
'attributes': {
'sessionId': 'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63'
},
'user': {
'userId': 'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ'
}
},
'context': {
'System': {
'application': {
'applicationId': 'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6'
},
'user': {
'userId': 'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ'
},
'device': {
'deviceId': 'amzn1.ask.device.AFQAMLYOYQUUACSE7HFVYS4ZI2KUB35JPHQRUPKTDCAU3A47WESP5L57KSWT5L6RT3FVXWH4OA2DNPJRMZ2VGEIACF3PJEIDCOUWUBC4W5RPJNUB3ZVT22J4UJN5UL3T2UBP36RVHFJ5P4IPT2HUY3P2YOY33IOU4O33HUAG7R2BUNROEH4T2',
'supportedInterfaces': {}
},
'apiEndpoint': 'https://api.amazonalexa.com',
'apiAccessToken': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjhiMTdhNWRlLTM3NDktNDkxOS1hYTFmLWUwYmJhZjhhNDZhNiIsImV4cCI6MTU0NTIyMzY1OCwiaWF0IjoxNTQ1MjIwMDU4LCJuYmYiOjE1NDUyMjAwNTgsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZRQU1MWU9ZUVVVQUNTRTdIRlZZUzRaSTJLVUIzNUpQSFFSVVBLVERDQVUzQTQ3V0VTUDVMNTdLU1dUNUw2UlQzRlZYV0g0T0EyRE5QSlJNWjJWR0VJQUNGM1BKRUlEQ09VV1VCQzRXNVJQSk5VQjNaVlQyMko0VUpONVVMM1QyVUJQMzZSVkhGSjVQNElQVDJIVVkzUDJZT1kzM0lPVTRPMzNIVUFHN1IyQlVOUk9FSDRUMiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFHUjRSMkxPVkhNTk1OT0dST0JWTkxVN0NMNEM1N1g0NjVYSkYyVDJGNTVPVVhOVExDWERRUDNJNTVVWFpJQUxFS0taSjZRMk1BNU1FRlNNWlZQRUw1TlZaUzZGWkxFVTQ0NEJWT0xQQjVXVkg1Q0hZVFFBS0dEN1ZGTEdQUkZaVkhISDJOSUI0SEtOSEhHWDZITTZTNlFEV0NLWFdPSVpMN09OTlFTQlVDVlBNWlFLTUNZWFJHNUJBMlBPWUVYRkRYUlhDR0VWRFdWU01QUSJ9fQ.jcomYhBhU485T4uoe2NyhWnL-kZHoPQKpcycFqa-1sy_lSIitfFGup9DKrf2NkN-I9lZ3xwq9llqx9WRN78fVJjN6GLcDhBDH0irPwt3n9_V7_5bfB6KARv5ZG-JKOmZlLBqQbnln0DAJ10D8HNiytMARNEwduMBVDNK0A5z6YxtRcLYYFD2-Ieg_V8Qx90eE2pd2U5xOuIEL0pXfSoiJ8vpxb8BKwaMO47tdE4qhg_k7v8ClwyXg3EMEhZFjixYNqdW1tCrwDGj58IWMXDyzZhIlRMh6uudMOT6scSzcNVD0v42IOTZ3S_X6rG01B7xhUDlZXMqkrCuzOyqctGaPw'
},
'Viewport': {
'experiences': [
{
'arcMinuteWidth': 246,
'arcMinuteHeight': 144,
'canRotate': False,
'canResize': False
}
],
'shape': 'RECTANGLE',
'pixelWidth': 1024,
'pixelHeight': 600,
'dpi': 160,
'currentPixelWidth': 1024,
'currentPixelHeight': 600,
'touch': [
'SINGLE'
]
}
},
'request': {
'type': 'IntentRequest',
'requestId': 'amzn1.echo-api.request.388d0f6e-04b9-4450-a687-b9abaa73ac6a',
'timestamp': '2018-12-19T11:47:38Z',
'locale': 'en-US',
'intent': {
'name': 'AskDeepPavlov',
'confirmationStatus': 'NONE',
'slots': {
'raw_input': {
'name': 'raw_input',
'value': 'my beautiful sandbox skill',
'resolutions': {
'resolutionsPerAuthority': [
{
'authority': 'amzn1.er-authority.echo-sdk.amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6.GetInput',
'status': {
'code': 'ER_SUCCESS_NO_MATCH'
}
}
]
},
'confirmationStatus': 'NONE',
'source': 'USER'
}
}
}
}
}
}
],
'responses': {
"200": {
"description": "A model response"
}
}
}
@app.route('/')
def index():
return redirect('/apidocs/')
@app.route('/interact', methods=['POST'])
@swag_from(endpoint_description)
def handle_request():
request_body: bytes = request.get_data()
signature_chain_url: str = request.headers.get('Signaturecertchainurl')
signature: str = request.headers.get('Signature')
alexa_request: dict = request.get_json()
request_dict = {
'request_body': request_body,
'signature_chain_url': signature_chain_url,
'signature': signature,
'alexa_request': alexa_request
}
bot.input_queue.put(request_dict)
response: dict = bot.output_queue.get()
response_code = 400 if 'error' in response.keys() else 200
return jsonify(response), response_code
app.run(host=host, port=port, threaded=True, ssl_context=ssl_context)
|
def run_alexa_server(agent_generator: callable,
multi_instance: bool = False,
stateful: bool = False,
port: Optional[int] = None,
https: bool = False,
ssl_key: str = None,
ssl_cert: str = None) -> None:
"""Initiates Flask web service with Alexa skill.
Args:
agent_generator: Callback Alexa agents factory.
multi_instance: Multi instance mode flag.
stateful: Stateful mode flag.
port: Flask web service port.
https: Flag for running Alexa skill service in https mode.
ssl_key: SSL key file path.
ssl_cert: SSL certificate file path.
"""
server_config_path = Path(get_settings_path(), SERVER_CONFIG_FILENAME).resolve()
server_params = read_json(server_config_path)
host = server_params['common_defaults']['host']
port = port or server_params['common_defaults']['port']
alexa_server_params = server_params['alexa_defaults']
alexa_server_params['multi_instance'] = multi_instance or server_params['common_defaults']['multi_instance']
alexa_server_params['stateful'] = stateful or server_params['common_defaults']['stateful']
alexa_server_params['amazon_cert_lifetime'] = AMAZON_CERTIFICATE_LIFETIME
if https:
ssh_key_path = Path(ssl_key or server_params['https_key_path']).resolve()
if not ssh_key_path.is_file():
e = FileNotFoundError('Ssh key file not found: please provide correct path in --key param or '
'https_key_path param in server configuration file')
log.error(e)
raise e
ssh_cert_path = Path(ssl_cert or server_params['https_cert_path']).resolve()
if not ssh_cert_path.is_file():
e = FileNotFoundError('Ssh certificate file not found: please provide correct path in --cert param or '
'https_cert_path param in server configuration file')
log.error(e)
raise e
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain(ssh_cert_path, ssh_key_path)
else:
ssl_context = None
input_q = Queue()
output_q = Queue()
bot = Bot(agent_generator, alexa_server_params, input_q, output_q)
bot.start()
endpoint_description = {
'description': 'Amazon Alexa custom service endpoint',
'parameters': [
{
'name': 'Signature',
'in': 'header',
'required': 'true',
'type': 'string',
'example': 'Z5H5wqd06ExFVPNfJiqhKvAFjkf+cTVodOUirucHGcEVAMO1LfvgqWUkZ/X1ITDZbI0w+SMwVkEQZlkeThbVS/54M22StNDUtfz4Ua20xNDpIPwcWIACAmZ38XxbbTEFJI5WwqrbilNcfzqiGrIPfdO5rl+/xUjHFUdcJdUY/QzBxXsceytVYfEiR9MzOCN2m4C0XnpThUavAu159KrLj8AkuzN0JF87iXv+zOEeZRgEuwmsAnJrRUwkJ4yWokEPnSVdjF0D6f6CscfyvRe9nsWShq7/zRTa41meweh+n006zvf58MbzRdXPB22RI4AN0ksWW7hSC8/QLAKQE+lvaw==',
},
{
'name': 'Signaturecertchainurl',
'in': 'header',
'required': 'true',
'type': 'string',
'example': 'https://s3.amazonaws.com/echo.api/echo-api-cert-6-ats.pem',
},
{
'name': 'data',
'in': 'body',
'required': 'true',
'example': {
'version': '1.0',
'session': {
'new': False,
'sessionId': 'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63',
'application': {
'applicationId': 'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6'
},
'attributes': {
'sessionId': 'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63'
},
'user': {
'userId': 'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ'
}
},
'context': {
'System': {
'application': {
'applicationId': 'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6'
},
'user': {
'userId': 'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ'
},
'device': {
'deviceId': 'amzn1.ask.device.AFQAMLYOYQUUACSE7HFVYS4ZI2KUB35JPHQRUPKTDCAU3A47WESP5L57KSWT5L6RT3FVXWH4OA2DNPJRMZ2VGEIACF3PJEIDCOUWUBC4W5RPJNUB3ZVT22J4UJN5UL3T2UBP36RVHFJ5P4IPT2HUY3P2YOY33IOU4O33HUAG7R2BUNROEH4T2',
'supportedInterfaces': {}
},
'apiEndpoint': 'https://api.amazonalexa.com',
'apiAccessToken': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjhiMTdhNWRlLTM3NDktNDkxOS1hYTFmLWUwYmJhZjhhNDZhNiIsImV4cCI6MTU0NTIyMzY1OCwiaWF0IjoxNTQ1MjIwMDU4LCJuYmYiOjE1NDUyMjAwNTgsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZRQU1MWU9ZUVVVQUNTRTdIRlZZUzRaSTJLVUIzNUpQSFFSVVBLVERDQVUzQTQ3V0VTUDVMNTdLU1dUNUw2UlQzRlZYV0g0T0EyRE5QSlJNWjJWR0VJQUNGM1BKRUlEQ09VV1VCQzRXNVJQSk5VQjNaVlQyMko0VUpONVVMM1QyVUJQMzZSVkhGSjVQNElQVDJIVVkzUDJZT1kzM0lPVTRPMzNIVUFHN1IyQlVOUk9FSDRUMiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFHUjRSMkxPVkhNTk1OT0dST0JWTkxVN0NMNEM1N1g0NjVYSkYyVDJGNTVPVVhOVExDWERRUDNJNTVVWFpJQUxFS0taSjZRMk1BNU1FRlNNWlZQRUw1TlZaUzZGWkxFVTQ0NEJWT0xQQjVXVkg1Q0hZVFFBS0dEN1ZGTEdQUkZaVkhISDJOSUI0SEtOSEhHWDZITTZTNlFEV0NLWFdPSVpMN09OTlFTQlVDVlBNWlFLTUNZWFJHNUJBMlBPWUVYRkRYUlhDR0VWRFdWU01QUSJ9fQ.jcomYhBhU485T4uoe2NyhWnL-kZHoPQKpcycFqa-1sy_lSIitfFGup9DKrf2NkN-I9lZ3xwq9llqx9WRN78fVJjN6GLcDhBDH0irPwt3n9_V7_5bfB6KARv5ZG-JKOmZlLBqQbnln0DAJ10D8HNiytMARNEwduMBVDNK0A5z6YxtRcLYYFD2-Ieg_V8Qx90eE2pd2U5xOuIEL0pXfSoiJ8vpxb8BKwaMO47tdE4qhg_k7v8ClwyXg3EMEhZFjixYNqdW1tCrwDGj58IWMXDyzZhIlRMh6uudMOT6scSzcNVD0v42IOTZ3S_X6rG01B7xhUDlZXMqkrCuzOyqctGaPw'
},
'Viewport': {
'experiences': [
{
'arcMinuteWidth': 246,
'arcMinuteHeight': 144,
'canRotate': False,
'canResize': False
}
],
'shape': 'RECTANGLE',
'pixelWidth': 1024,
'pixelHeight': 600,
'dpi': 160,
'currentPixelWidth': 1024,
'currentPixelHeight': 600,
'touch': [
'SINGLE'
]
}
},
'request': {
'type': 'IntentRequest',
'requestId': 'amzn1.echo-api.request.388d0f6e-04b9-4450-a687-b9abaa73ac6a',
'timestamp': '2018-12-19T11:47:38Z',
'locale': 'en-US',
'intent': {
'name': 'AskDeepPavlov',
'confirmationStatus': 'NONE',
'slots': {
'raw_input': {
'name': 'raw_input',
'value': 'my beautiful sandbox skill',
'resolutions': {
'resolutionsPerAuthority': [
{
'authority': 'amzn1.er-authority.echo-sdk.amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6.GetInput',
'status': {
'code': 'ER_SUCCESS_NO_MATCH'
}
}
]
},
'confirmationStatus': 'NONE',
'source': 'USER'
}
}
}
}
}
}
],
'responses': {
"200": {
"description": "A model response"
}
}
}
@app.route('/')
def index():
return redirect('/apidocs/')
@app.route('/interact', methods=['POST'])
@swag_from(endpoint_description)
def handle_request():
request_body: bytes = request.get_data()
signature_chain_url: str = request.headers.get('Signaturecertchainurl')
signature: str = request.headers.get('Signature')
alexa_request: dict = request.get_json()
request_dict = {
'request_body': request_body,
'signature_chain_url': signature_chain_url,
'signature': signature,
'alexa_request': alexa_request
}
bot.input_queue.put(request_dict)
response: dict = bot.output_queue.get()
response_code = 400 if 'error' in response.keys() else 200
return jsonify(response), response_code
app.run(host=host, port=port, threaded=True, ssl_context=ssl_context)
|
[
"Initiates",
"Flask",
"web",
"service",
"with",
"Alexa",
"skill",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/server.py#L84-L274
|
[
"def",
"run_alexa_server",
"(",
"agent_generator",
":",
"callable",
",",
"multi_instance",
":",
"bool",
"=",
"False",
",",
"stateful",
":",
"bool",
"=",
"False",
",",
"port",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"https",
":",
"bool",
"=",
"False",
",",
"ssl_key",
":",
"str",
"=",
"None",
",",
"ssl_cert",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"server_config_path",
"=",
"Path",
"(",
"get_settings_path",
"(",
")",
",",
"SERVER_CONFIG_FILENAME",
")",
".",
"resolve",
"(",
")",
"server_params",
"=",
"read_json",
"(",
"server_config_path",
")",
"host",
"=",
"server_params",
"[",
"'common_defaults'",
"]",
"[",
"'host'",
"]",
"port",
"=",
"port",
"or",
"server_params",
"[",
"'common_defaults'",
"]",
"[",
"'port'",
"]",
"alexa_server_params",
"=",
"server_params",
"[",
"'alexa_defaults'",
"]",
"alexa_server_params",
"[",
"'multi_instance'",
"]",
"=",
"multi_instance",
"or",
"server_params",
"[",
"'common_defaults'",
"]",
"[",
"'multi_instance'",
"]",
"alexa_server_params",
"[",
"'stateful'",
"]",
"=",
"stateful",
"or",
"server_params",
"[",
"'common_defaults'",
"]",
"[",
"'stateful'",
"]",
"alexa_server_params",
"[",
"'amazon_cert_lifetime'",
"]",
"=",
"AMAZON_CERTIFICATE_LIFETIME",
"if",
"https",
":",
"ssh_key_path",
"=",
"Path",
"(",
"ssl_key",
"or",
"server_params",
"[",
"'https_key_path'",
"]",
")",
".",
"resolve",
"(",
")",
"if",
"not",
"ssh_key_path",
".",
"is_file",
"(",
")",
":",
"e",
"=",
"FileNotFoundError",
"(",
"'Ssh key file not found: please provide correct path in --key param or '",
"'https_key_path param in server configuration file'",
")",
"log",
".",
"error",
"(",
"e",
")",
"raise",
"e",
"ssh_cert_path",
"=",
"Path",
"(",
"ssl_cert",
"or",
"server_params",
"[",
"'https_cert_path'",
"]",
")",
".",
"resolve",
"(",
")",
"if",
"not",
"ssh_cert_path",
".",
"is_file",
"(",
")",
":",
"e",
"=",
"FileNotFoundError",
"(",
"'Ssh certificate file not found: please provide correct path in --cert param or '",
"'https_cert_path param in server configuration file'",
")",
"log",
".",
"error",
"(",
"e",
")",
"raise",
"e",
"ssl_context",
"=",
"ssl",
".",
"SSLContext",
"(",
"ssl",
".",
"PROTOCOL_TLSv1_2",
")",
"ssl_context",
".",
"load_cert_chain",
"(",
"ssh_cert_path",
",",
"ssh_key_path",
")",
"else",
":",
"ssl_context",
"=",
"None",
"input_q",
"=",
"Queue",
"(",
")",
"output_q",
"=",
"Queue",
"(",
")",
"bot",
"=",
"Bot",
"(",
"agent_generator",
",",
"alexa_server_params",
",",
"input_q",
",",
"output_q",
")",
"bot",
".",
"start",
"(",
")",
"endpoint_description",
"=",
"{",
"'description'",
":",
"'Amazon Alexa custom service endpoint'",
",",
"'parameters'",
":",
"[",
"{",
"'name'",
":",
"'Signature'",
",",
"'in'",
":",
"'header'",
",",
"'required'",
":",
"'true'",
",",
"'type'",
":",
"'string'",
",",
"'example'",
":",
"'Z5H5wqd06ExFVPNfJiqhKvAFjkf+cTVodOUirucHGcEVAMO1LfvgqWUkZ/X1ITDZbI0w+SMwVkEQZlkeThbVS/54M22StNDUtfz4Ua20xNDpIPwcWIACAmZ38XxbbTEFJI5WwqrbilNcfzqiGrIPfdO5rl+/xUjHFUdcJdUY/QzBxXsceytVYfEiR9MzOCN2m4C0XnpThUavAu159KrLj8AkuzN0JF87iXv+zOEeZRgEuwmsAnJrRUwkJ4yWokEPnSVdjF0D6f6CscfyvRe9nsWShq7/zRTa41meweh+n006zvf58MbzRdXPB22RI4AN0ksWW7hSC8/QLAKQE+lvaw=='",
",",
"}",
",",
"{",
"'name'",
":",
"'Signaturecertchainurl'",
",",
"'in'",
":",
"'header'",
",",
"'required'",
":",
"'true'",
",",
"'type'",
":",
"'string'",
",",
"'example'",
":",
"'https://s3.amazonaws.com/echo.api/echo-api-cert-6-ats.pem'",
",",
"}",
",",
"{",
"'name'",
":",
"'data'",
",",
"'in'",
":",
"'body'",
",",
"'required'",
":",
"'true'",
",",
"'example'",
":",
"{",
"'version'",
":",
"'1.0'",
",",
"'session'",
":",
"{",
"'new'",
":",
"False",
",",
"'sessionId'",
":",
"'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63'",
",",
"'application'",
":",
"{",
"'applicationId'",
":",
"'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6'",
"}",
",",
"'attributes'",
":",
"{",
"'sessionId'",
":",
"'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63'",
"}",
",",
"'user'",
":",
"{",
"'userId'",
":",
"'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ'",
"}",
"}",
",",
"'context'",
":",
"{",
"'System'",
":",
"{",
"'application'",
":",
"{",
"'applicationId'",
":",
"'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6'",
"}",
",",
"'user'",
":",
"{",
"'userId'",
":",
"'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ'",
"}",
",",
"'device'",
":",
"{",
"'deviceId'",
":",
"'amzn1.ask.device.AFQAMLYOYQUUACSE7HFVYS4ZI2KUB35JPHQRUPKTDCAU3A47WESP5L57KSWT5L6RT3FVXWH4OA2DNPJRMZ2VGEIACF3PJEIDCOUWUBC4W5RPJNUB3ZVT22J4UJN5UL3T2UBP36RVHFJ5P4IPT2HUY3P2YOY33IOU4O33HUAG7R2BUNROEH4T2'",
",",
"'supportedInterfaces'",
":",
"{",
"}",
"}",
",",
"'apiEndpoint'",
":",
"'https://api.amazonalexa.com'",
",",
"'apiAccessToken'",
":",
"'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjhiMTdhNWRlLTM3NDktNDkxOS1hYTFmLWUwYmJhZjhhNDZhNiIsImV4cCI6MTU0NTIyMzY1OCwiaWF0IjoxNTQ1MjIwMDU4LCJuYmYiOjE1NDUyMjAwNTgsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZRQU1MWU9ZUVVVQUNTRTdIRlZZUzRaSTJLVUIzNUpQSFFSVVBLVERDQVUzQTQ3V0VTUDVMNTdLU1dUNUw2UlQzRlZYV0g0T0EyRE5QSlJNWjJWR0VJQUNGM1BKRUlEQ09VV1VCQzRXNVJQSk5VQjNaVlQyMko0VUpONVVMM1QyVUJQMzZSVkhGSjVQNElQVDJIVVkzUDJZT1kzM0lPVTRPMzNIVUFHN1IyQlVOUk9FSDRUMiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFHUjRSMkxPVkhNTk1OT0dST0JWTkxVN0NMNEM1N1g0NjVYSkYyVDJGNTVPVVhOVExDWERRUDNJNTVVWFpJQUxFS0taSjZRMk1BNU1FRlNNWlZQRUw1TlZaUzZGWkxFVTQ0NEJWT0xQQjVXVkg1Q0hZVFFBS0dEN1ZGTEdQUkZaVkhISDJOSUI0SEtOSEhHWDZITTZTNlFEV0NLWFdPSVpMN09OTlFTQlVDVlBNWlFLTUNZWFJHNUJBMlBPWUVYRkRYUlhDR0VWRFdWU01QUSJ9fQ.jcomYhBhU485T4uoe2NyhWnL-kZHoPQKpcycFqa-1sy_lSIitfFGup9DKrf2NkN-I9lZ3xwq9llqx9WRN78fVJjN6GLcDhBDH0irPwt3n9_V7_5bfB6KARv5ZG-JKOmZlLBqQbnln0DAJ10D8HNiytMARNEwduMBVDNK0A5z6YxtRcLYYFD2-Ieg_V8Qx90eE2pd2U5xOuIEL0pXfSoiJ8vpxb8BKwaMO47tdE4qhg_k7v8ClwyXg3EMEhZFjixYNqdW1tCrwDGj58IWMXDyzZhIlRMh6uudMOT6scSzcNVD0v42IOTZ3S_X6rG01B7xhUDlZXMqkrCuzOyqctGaPw'",
"}",
",",
"'Viewport'",
":",
"{",
"'experiences'",
":",
"[",
"{",
"'arcMinuteWidth'",
":",
"246",
",",
"'arcMinuteHeight'",
":",
"144",
",",
"'canRotate'",
":",
"False",
",",
"'canResize'",
":",
"False",
"}",
"]",
",",
"'shape'",
":",
"'RECTANGLE'",
",",
"'pixelWidth'",
":",
"1024",
",",
"'pixelHeight'",
":",
"600",
",",
"'dpi'",
":",
"160",
",",
"'currentPixelWidth'",
":",
"1024",
",",
"'currentPixelHeight'",
":",
"600",
",",
"'touch'",
":",
"[",
"'SINGLE'",
"]",
"}",
"}",
",",
"'request'",
":",
"{",
"'type'",
":",
"'IntentRequest'",
",",
"'requestId'",
":",
"'amzn1.echo-api.request.388d0f6e-04b9-4450-a687-b9abaa73ac6a'",
",",
"'timestamp'",
":",
"'2018-12-19T11:47:38Z'",
",",
"'locale'",
":",
"'en-US'",
",",
"'intent'",
":",
"{",
"'name'",
":",
"'AskDeepPavlov'",
",",
"'confirmationStatus'",
":",
"'NONE'",
",",
"'slots'",
":",
"{",
"'raw_input'",
":",
"{",
"'name'",
":",
"'raw_input'",
",",
"'value'",
":",
"'my beautiful sandbox skill'",
",",
"'resolutions'",
":",
"{",
"'resolutionsPerAuthority'",
":",
"[",
"{",
"'authority'",
":",
"'amzn1.er-authority.echo-sdk.amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6.GetInput'",
",",
"'status'",
":",
"{",
"'code'",
":",
"'ER_SUCCESS_NO_MATCH'",
"}",
"}",
"]",
"}",
",",
"'confirmationStatus'",
":",
"'NONE'",
",",
"'source'",
":",
"'USER'",
"}",
"}",
"}",
"}",
"}",
"}",
"]",
",",
"'responses'",
":",
"{",
"\"200\"",
":",
"{",
"\"description\"",
":",
"\"A model response\"",
"}",
"}",
"}",
"@",
"app",
".",
"route",
"(",
"'/'",
")",
"def",
"index",
"(",
")",
":",
"return",
"redirect",
"(",
"'/apidocs/'",
")",
"@",
"app",
".",
"route",
"(",
"'/interact'",
",",
"methods",
"=",
"[",
"'POST'",
"]",
")",
"@",
"swag_from",
"(",
"endpoint_description",
")",
"def",
"handle_request",
"(",
")",
":",
"request_body",
":",
"bytes",
"=",
"request",
".",
"get_data",
"(",
")",
"signature_chain_url",
":",
"str",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'Signaturecertchainurl'",
")",
"signature",
":",
"str",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'Signature'",
")",
"alexa_request",
":",
"dict",
"=",
"request",
".",
"get_json",
"(",
")",
"request_dict",
"=",
"{",
"'request_body'",
":",
"request_body",
",",
"'signature_chain_url'",
":",
"signature_chain_url",
",",
"'signature'",
":",
"signature",
",",
"'alexa_request'",
":",
"alexa_request",
"}",
"bot",
".",
"input_queue",
".",
"put",
"(",
"request_dict",
")",
"response",
":",
"dict",
"=",
"bot",
".",
"output_queue",
".",
"get",
"(",
")",
"response_code",
"=",
"400",
"if",
"'error'",
"in",
"response",
".",
"keys",
"(",
")",
"else",
"200",
"return",
"jsonify",
"(",
"response",
")",
",",
"response_code",
"app",
".",
"run",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"threaded",
"=",
"True",
",",
"ssl_context",
"=",
"ssl_context",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
TFModel.load
|
Load model parameters from self.load_path
|
deeppavlov/core/models/tf_model.py
|
def load(self, exclude_scopes: tuple = ('Optimizer',)) -> None:
"""Load model parameters from self.load_path"""
if not hasattr(self, 'sess'):
raise RuntimeError('Your TensorFlow model {} must'
' have sess attribute!'.format(self.__class__.__name__))
path = str(self.load_path.resolve())
# Check presence of the model files
if tf.train.checkpoint_exists(path):
log.info('[loading model from {}]'.format(path))
# Exclude optimizer variables from saved variables
var_list = self._get_saveable_variables(exclude_scopes)
saver = tf.train.Saver(var_list)
saver.restore(self.sess, path)
|
def load(self, exclude_scopes: tuple = ('Optimizer',)) -> None:
"""Load model parameters from self.load_path"""
if not hasattr(self, 'sess'):
raise RuntimeError('Your TensorFlow model {} must'
' have sess attribute!'.format(self.__class__.__name__))
path = str(self.load_path.resolve())
# Check presence of the model files
if tf.train.checkpoint_exists(path):
log.info('[loading model from {}]'.format(path))
# Exclude optimizer variables from saved variables
var_list = self._get_saveable_variables(exclude_scopes)
saver = tf.train.Saver(var_list)
saver.restore(self.sess, path)
|
[
"Load",
"model",
"parameters",
"from",
"self",
".",
"load_path"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/tf_model.py#L42-L54
|
[
"def",
"load",
"(",
"self",
",",
"exclude_scopes",
":",
"tuple",
"=",
"(",
"'Optimizer'",
",",
")",
")",
"->",
"None",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'sess'",
")",
":",
"raise",
"RuntimeError",
"(",
"'Your TensorFlow model {} must'",
"' have sess attribute!'",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"path",
"=",
"str",
"(",
"self",
".",
"load_path",
".",
"resolve",
"(",
")",
")",
"# Check presence of the model files",
"if",
"tf",
".",
"train",
".",
"checkpoint_exists",
"(",
"path",
")",
":",
"log",
".",
"info",
"(",
"'[loading model from {}]'",
".",
"format",
"(",
"path",
")",
")",
"# Exclude optimizer variables from saved variables",
"var_list",
"=",
"self",
".",
"_get_saveable_variables",
"(",
"exclude_scopes",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"var_list",
")",
"saver",
".",
"restore",
"(",
"self",
".",
"sess",
",",
"path",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
TFModel.save
|
Save model parameters to self.save_path
|
deeppavlov/core/models/tf_model.py
|
def save(self, exclude_scopes: tuple = ('Optimizer',)) -> None:
"""Save model parameters to self.save_path"""
if not hasattr(self, 'sess'):
raise RuntimeError('Your TensorFlow model {} must'
' have sess attribute!'.format(self.__class__.__name__))
path = str(self.save_path.resolve())
log.info('[saving model to {}]'.format(path))
var_list = self._get_saveable_variables(exclude_scopes)
saver = tf.train.Saver(var_list)
saver.save(self.sess, path)
|
def save(self, exclude_scopes: tuple = ('Optimizer',)) -> None:
"""Save model parameters to self.save_path"""
if not hasattr(self, 'sess'):
raise RuntimeError('Your TensorFlow model {} must'
' have sess attribute!'.format(self.__class__.__name__))
path = str(self.save_path.resolve())
log.info('[saving model to {}]'.format(path))
var_list = self._get_saveable_variables(exclude_scopes)
saver = tf.train.Saver(var_list)
saver.save(self.sess, path)
|
[
"Save",
"model",
"parameters",
"to",
"self",
".",
"save_path"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/tf_model.py#L68-L77
|
[
"def",
"save",
"(",
"self",
",",
"exclude_scopes",
":",
"tuple",
"=",
"(",
"'Optimizer'",
",",
")",
")",
"->",
"None",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'sess'",
")",
":",
"raise",
"RuntimeError",
"(",
"'Your TensorFlow model {} must'",
"' have sess attribute!'",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"path",
"=",
"str",
"(",
"self",
".",
"save_path",
".",
"resolve",
"(",
")",
")",
"log",
".",
"info",
"(",
"'[saving model to {}]'",
".",
"format",
"(",
"path",
")",
")",
"var_list",
"=",
"self",
".",
"_get_saveable_variables",
"(",
"exclude_scopes",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"var_list",
")",
"saver",
".",
"save",
"(",
"self",
".",
"sess",
",",
"path",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
TFModel.get_train_op
|
Get train operation for given loss
Args:
loss: loss, tf tensor or scalar
learning_rate: scalar or placeholder.
clip_norm: clip gradients norm by clip_norm.
learnable_scopes: which scopes are trainable (None for all).
optimizer: instance of tf.train.Optimizer, default Adam.
**kwargs: parameters passed to tf.train.Optimizer object
(scalars or placeholders).
Returns:
train_op
|
deeppavlov/core/models/tf_model.py
|
def get_train_op(self,
loss,
learning_rate,
optimizer=None,
clip_norm=None,
learnable_scopes=None,
optimizer_scope_name=None,
**kwargs):
"""
Get train operation for given loss
Args:
loss: loss, tf tensor or scalar
learning_rate: scalar or placeholder.
clip_norm: clip gradients norm by clip_norm.
learnable_scopes: which scopes are trainable (None for all).
optimizer: instance of tf.train.Optimizer, default Adam.
**kwargs: parameters passed to tf.train.Optimizer object
(scalars or placeholders).
Returns:
train_op
"""
if optimizer_scope_name is None:
opt_scope = tf.variable_scope('Optimizer')
else:
opt_scope = tf.variable_scope(optimizer_scope_name)
with opt_scope:
if learnable_scopes is None:
variables_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
else:
variables_to_train = []
for scope_name in learnable_scopes:
variables_to_train.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name))
if optimizer is None:
optimizer = tf.train.AdamOptimizer
# For batch norm it is necessary to update running averages
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
def clip_if_not_none(grad):
if grad is not None:
return tf.clip_by_norm(grad, clip_norm)
opt = optimizer(learning_rate, **kwargs)
grads_and_vars = opt.compute_gradients(loss, var_list=variables_to_train)
if clip_norm is not None:
grads_and_vars = [(clip_if_not_none(grad), var)
for grad, var in grads_and_vars]
train_op = opt.apply_gradients(grads_and_vars)
return train_op
|
def get_train_op(self,
loss,
learning_rate,
optimizer=None,
clip_norm=None,
learnable_scopes=None,
optimizer_scope_name=None,
**kwargs):
"""
Get train operation for given loss
Args:
loss: loss, tf tensor or scalar
learning_rate: scalar or placeholder.
clip_norm: clip gradients norm by clip_norm.
learnable_scopes: which scopes are trainable (None for all).
optimizer: instance of tf.train.Optimizer, default Adam.
**kwargs: parameters passed to tf.train.Optimizer object
(scalars or placeholders).
Returns:
train_op
"""
if optimizer_scope_name is None:
opt_scope = tf.variable_scope('Optimizer')
else:
opt_scope = tf.variable_scope(optimizer_scope_name)
with opt_scope:
if learnable_scopes is None:
variables_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
else:
variables_to_train = []
for scope_name in learnable_scopes:
variables_to_train.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name))
if optimizer is None:
optimizer = tf.train.AdamOptimizer
# For batch norm it is necessary to update running averages
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
def clip_if_not_none(grad):
if grad is not None:
return tf.clip_by_norm(grad, clip_norm)
opt = optimizer(learning_rate, **kwargs)
grads_and_vars = opt.compute_gradients(loss, var_list=variables_to_train)
if clip_norm is not None:
grads_and_vars = [(clip_if_not_none(grad), var)
for grad, var in grads_and_vars]
train_op = opt.apply_gradients(grads_and_vars)
return train_op
|
[
"Get",
"train",
"operation",
"for",
"given",
"loss"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/tf_model.py#L97-L149
|
[
"def",
"get_train_op",
"(",
"self",
",",
"loss",
",",
"learning_rate",
",",
"optimizer",
"=",
"None",
",",
"clip_norm",
"=",
"None",
",",
"learnable_scopes",
"=",
"None",
",",
"optimizer_scope_name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"optimizer_scope_name",
"is",
"None",
":",
"opt_scope",
"=",
"tf",
".",
"variable_scope",
"(",
"'Optimizer'",
")",
"else",
":",
"opt_scope",
"=",
"tf",
".",
"variable_scope",
"(",
"optimizer_scope_name",
")",
"with",
"opt_scope",
":",
"if",
"learnable_scopes",
"is",
"None",
":",
"variables_to_train",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"TRAINABLE_VARIABLES",
")",
"else",
":",
"variables_to_train",
"=",
"[",
"]",
"for",
"scope_name",
"in",
"learnable_scopes",
":",
"variables_to_train",
".",
"extend",
"(",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"TRAINABLE_VARIABLES",
",",
"scope",
"=",
"scope_name",
")",
")",
"if",
"optimizer",
"is",
"None",
":",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"# For batch norm it is necessary to update running averages",
"extra_update_ops",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"UPDATE_OPS",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"extra_update_ops",
")",
":",
"def",
"clip_if_not_none",
"(",
"grad",
")",
":",
"if",
"grad",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"clip_by_norm",
"(",
"grad",
",",
"clip_norm",
")",
"opt",
"=",
"optimizer",
"(",
"learning_rate",
",",
"*",
"*",
"kwargs",
")",
"grads_and_vars",
"=",
"opt",
".",
"compute_gradients",
"(",
"loss",
",",
"var_list",
"=",
"variables_to_train",
")",
"if",
"clip_norm",
"is",
"not",
"None",
":",
"grads_and_vars",
"=",
"[",
"(",
"clip_if_not_none",
"(",
"grad",
")",
",",
"var",
")",
"for",
"grad",
",",
"var",
"in",
"grads_and_vars",
"]",
"train_op",
"=",
"opt",
".",
"apply_gradients",
"(",
"grads_and_vars",
")",
"return",
"train_op"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
TFModel.print_number_of_parameters
|
Print number of *trainable* parameters in the network
|
deeppavlov/core/models/tf_model.py
|
def print_number_of_parameters():
"""
Print number of *trainable* parameters in the network
"""
log.info('Number of parameters: ')
variables = tf.trainable_variables()
blocks = defaultdict(int)
for var in variables:
# Get the top level scope name of variable
block_name = var.name.split('/')[0]
number_of_parameters = np.prod(var.get_shape().as_list())
blocks[block_name] += number_of_parameters
for block_name, cnt in blocks.items():
log.info("{} - {}.".format(block_name, cnt))
total_num_parameters = np.sum(list(blocks.values()))
log.info('Total number of parameters equal {}'.format(total_num_parameters))
|
def print_number_of_parameters():
"""
Print number of *trainable* parameters in the network
"""
log.info('Number of parameters: ')
variables = tf.trainable_variables()
blocks = defaultdict(int)
for var in variables:
# Get the top level scope name of variable
block_name = var.name.split('/')[0]
number_of_parameters = np.prod(var.get_shape().as_list())
blocks[block_name] += number_of_parameters
for block_name, cnt in blocks.items():
log.info("{} - {}.".format(block_name, cnt))
total_num_parameters = np.sum(list(blocks.values()))
log.info('Total number of parameters equal {}'.format(total_num_parameters))
|
[
"Print",
"number",
"of",
"*",
"trainable",
"*",
"parameters",
"in",
"the",
"network"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/tf_model.py#L152-L167
|
[
"def",
"print_number_of_parameters",
"(",
")",
":",
"log",
".",
"info",
"(",
"'Number of parameters: '",
")",
"variables",
"=",
"tf",
".",
"trainable_variables",
"(",
")",
"blocks",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"var",
"in",
"variables",
":",
"# Get the top level scope name of variable",
"block_name",
"=",
"var",
".",
"name",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"number_of_parameters",
"=",
"np",
".",
"prod",
"(",
"var",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
")",
"blocks",
"[",
"block_name",
"]",
"+=",
"number_of_parameters",
"for",
"block_name",
",",
"cnt",
"in",
"blocks",
".",
"items",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"{} - {}.\"",
".",
"format",
"(",
"block_name",
",",
"cnt",
")",
")",
"total_num_parameters",
"=",
"np",
".",
"sum",
"(",
"list",
"(",
"blocks",
".",
"values",
"(",
")",
")",
")",
"log",
".",
"info",
"(",
"'Total number of parameters equal {}'",
".",
"format",
"(",
"total_num_parameters",
")",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
_precompute_absense_costs
|
Вычисляет минимальную стоимость появления нового символа в узлах словаря
в соответствии со штрафами из costs
Аргументы:
---------------
dictionary : Trie
словарь, хранящийся в виде ациклического автомата
removal_costs : dict
штрафы за удаление символов
insertion_costs : dict
штрафы за вставку символов
n : int
глубина ``заглядывания вперёд'' в словаре
Возвращает
---------------
answer : list of dicts, len(answer)=len(dictionary)
answer[i][a][j] равно минимальному штрафу за появление символа a
в j-ой позиции в вершине с номером i
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _precompute_absense_costs(dictionary, removal_costs, insertion_costs, n,
allow_spaces=False):
"""
Вычисляет минимальную стоимость появления нового символа в узлах словаря
в соответствии со штрафами из costs
Аргументы:
---------------
dictionary : Trie
словарь, хранящийся в виде ациклического автомата
removal_costs : dict
штрафы за удаление символов
insertion_costs : dict
штрафы за вставку символов
n : int
глубина ``заглядывания вперёд'' в словаре
Возвращает
---------------
answer : list of dicts, len(answer)=len(dictionary)
answer[i][a][j] равно минимальному штрафу за появление символа a
в j-ой позиции в вершине с номером i
"""
answer = [dict() for node in dictionary.data]
if n == 0:
return answer
curr_alphabet = copy.copy(dictionary.alphabet)
if allow_spaces:
curr_alphabet += [' ']
for l, (costs_in_node, node) in enumerate(zip(answer, dictionary.data)):
# определение минимальной стоимости удаления символов
curr_node_removal_costs = np.empty(dtype=np.float64, shape=(n,))
if len(node[0]) > 0:
curr_node_removal_costs[0] = min(removal_costs[symbol] for symbol in node[0])
for j, symbols in enumerate(node[1:], 1):
if len(symbols) == 0:
curr_node_removal_costs[j:] = curr_node_removal_costs[j-1]
break
curr_cost = min(removal_costs[symbol] for symbol in symbols)
curr_node_removal_costs[j] = min(curr_node_removal_costs[j-1], curr_cost)
else:
curr_node_removal_costs[:] = np.inf
# определение минимальной стоимости вставки
for a in curr_alphabet:
curr_symbol_costs = np.empty(dtype=np.float64, shape=(n,))
curr_symbol_costs.fill(insertion_costs[a])
for j, symbols in enumerate(node):
if a in symbols:
curr_symbol_costs[j:] = 0.0
break
curr_symbol_costs[j] = min(curr_symbol_costs[j], curr_node_removal_costs[j])
costs_in_node[a] = curr_symbol_costs
return answer
|
def _precompute_absense_costs(dictionary, removal_costs, insertion_costs, n,
allow_spaces=False):
"""
Вычисляет минимальную стоимость появления нового символа в узлах словаря
в соответствии со штрафами из costs
Аргументы:
---------------
dictionary : Trie
словарь, хранящийся в виде ациклического автомата
removal_costs : dict
штрафы за удаление символов
insertion_costs : dict
штрафы за вставку символов
n : int
глубина ``заглядывания вперёд'' в словаре
Возвращает
---------------
answer : list of dicts, len(answer)=len(dictionary)
answer[i][a][j] равно минимальному штрафу за появление символа a
в j-ой позиции в вершине с номером i
"""
answer = [dict() for node in dictionary.data]
if n == 0:
return answer
curr_alphabet = copy.copy(dictionary.alphabet)
if allow_spaces:
curr_alphabet += [' ']
for l, (costs_in_node, node) in enumerate(zip(answer, dictionary.data)):
# определение минимальной стоимости удаления символов
curr_node_removal_costs = np.empty(dtype=np.float64, shape=(n,))
if len(node[0]) > 0:
curr_node_removal_costs[0] = min(removal_costs[symbol] for symbol in node[0])
for j, symbols in enumerate(node[1:], 1):
if len(symbols) == 0:
curr_node_removal_costs[j:] = curr_node_removal_costs[j-1]
break
curr_cost = min(removal_costs[symbol] for symbol in symbols)
curr_node_removal_costs[j] = min(curr_node_removal_costs[j-1], curr_cost)
else:
curr_node_removal_costs[:] = np.inf
# определение минимальной стоимости вставки
for a in curr_alphabet:
curr_symbol_costs = np.empty(dtype=np.float64, shape=(n,))
curr_symbol_costs.fill(insertion_costs[a])
for j, symbols in enumerate(node):
if a in symbols:
curr_symbol_costs[j:] = 0.0
break
curr_symbol_costs[j] = min(curr_symbol_costs[j], curr_node_removal_costs[j])
costs_in_node[a] = curr_symbol_costs
return answer
|
[
"Вычисляет",
"минимальную",
"стоимость",
"появления",
"нового",
"символа",
"в",
"узлах",
"словаря",
"в",
"соответствии",
"со",
"штрафами",
"из",
"costs"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L214-L269
|
[
"def",
"_precompute_absense_costs",
"(",
"dictionary",
",",
"removal_costs",
",",
"insertion_costs",
",",
"n",
",",
"allow_spaces",
"=",
"False",
")",
":",
"answer",
"=",
"[",
"dict",
"(",
")",
"for",
"node",
"in",
"dictionary",
".",
"data",
"]",
"if",
"n",
"==",
"0",
":",
"return",
"answer",
"curr_alphabet",
"=",
"copy",
".",
"copy",
"(",
"dictionary",
".",
"alphabet",
")",
"if",
"allow_spaces",
":",
"curr_alphabet",
"+=",
"[",
"' '",
"]",
"for",
"l",
",",
"(",
"costs_in_node",
",",
"node",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"answer",
",",
"dictionary",
".",
"data",
")",
")",
":",
"# определение минимальной стоимости удаления символов",
"curr_node_removal_costs",
"=",
"np",
".",
"empty",
"(",
"dtype",
"=",
"np",
".",
"float64",
",",
"shape",
"=",
"(",
"n",
",",
")",
")",
"if",
"len",
"(",
"node",
"[",
"0",
"]",
")",
">",
"0",
":",
"curr_node_removal_costs",
"[",
"0",
"]",
"=",
"min",
"(",
"removal_costs",
"[",
"symbol",
"]",
"for",
"symbol",
"in",
"node",
"[",
"0",
"]",
")",
"for",
"j",
",",
"symbols",
"in",
"enumerate",
"(",
"node",
"[",
"1",
":",
"]",
",",
"1",
")",
":",
"if",
"len",
"(",
"symbols",
")",
"==",
"0",
":",
"curr_node_removal_costs",
"[",
"j",
":",
"]",
"=",
"curr_node_removal_costs",
"[",
"j",
"-",
"1",
"]",
"break",
"curr_cost",
"=",
"min",
"(",
"removal_costs",
"[",
"symbol",
"]",
"for",
"symbol",
"in",
"symbols",
")",
"curr_node_removal_costs",
"[",
"j",
"]",
"=",
"min",
"(",
"curr_node_removal_costs",
"[",
"j",
"-",
"1",
"]",
",",
"curr_cost",
")",
"else",
":",
"curr_node_removal_costs",
"[",
":",
"]",
"=",
"np",
".",
"inf",
"# определение минимальной стоимости вставки",
"for",
"a",
"in",
"curr_alphabet",
":",
"curr_symbol_costs",
"=",
"np",
".",
"empty",
"(",
"dtype",
"=",
"np",
".",
"float64",
",",
"shape",
"=",
"(",
"n",
",",
")",
")",
"curr_symbol_costs",
".",
"fill",
"(",
"insertion_costs",
"[",
"a",
"]",
")",
"for",
"j",
",",
"symbols",
"in",
"enumerate",
"(",
"node",
")",
":",
"if",
"a",
"in",
"symbols",
":",
"curr_symbol_costs",
"[",
"j",
":",
"]",
"=",
"0.0",
"break",
"curr_symbol_costs",
"[",
"j",
"]",
"=",
"min",
"(",
"curr_symbol_costs",
"[",
"j",
"]",
",",
"curr_node_removal_costs",
"[",
"j",
"]",
")",
"costs_in_node",
"[",
"a",
"]",
"=",
"curr_symbol_costs",
"return",
"answer"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
LevenshteinSearcher.search
|
Finds all dictionary words in d-window from word
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def search(self, word, d, allow_spaces=True, return_cost=True):
"""
Finds all dictionary words in d-window from word
"""
if not all((c in self.alphabet
or (c == " " and self.allow_spaces)) for c in word):
return []
# raise ValueError("{0} contains an incorrect symbol".format(word))
return self._trie_search(
word, d, allow_spaces=allow_spaces, return_cost=return_cost)
|
def search(self, word, d, allow_spaces=True, return_cost=True):
"""
Finds all dictionary words in d-window from word
"""
if not all((c in self.alphabet
or (c == " " and self.allow_spaces)) for c in word):
return []
# raise ValueError("{0} contains an incorrect symbol".format(word))
return self._trie_search(
word, d, allow_spaces=allow_spaces, return_cost=return_cost)
|
[
"Finds",
"all",
"dictionary",
"words",
"in",
"d",
"-",
"window",
"from",
"word"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L44-L53
|
[
"def",
"search",
"(",
"self",
",",
"word",
",",
"d",
",",
"allow_spaces",
"=",
"True",
",",
"return_cost",
"=",
"True",
")",
":",
"if",
"not",
"all",
"(",
"(",
"c",
"in",
"self",
".",
"alphabet",
"or",
"(",
"c",
"==",
"\" \"",
"and",
"self",
".",
"allow_spaces",
")",
")",
"for",
"c",
"in",
"word",
")",
":",
"return",
"[",
"]",
"# raise ValueError(\"{0} contains an incorrect symbol\".format(word))",
"return",
"self",
".",
"_trie_search",
"(",
"word",
",",
"d",
",",
"allow_spaces",
"=",
"allow_spaces",
",",
"return_cost",
"=",
"return_cost",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
LevenshteinSearcher._trie_search
|
Находит все слова в префиксном боре, расстояние до которых
в соответствии с заданным преобразователем не превышает d
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _trie_search(self, word, d, transducer=None,
allow_spaces=True, return_cost=True):
"""
Находит все слова в префиксном боре, расстояние до которых
в соответствии с заданным преобразователем не превышает d
"""
if transducer is None:
# разобраться с пробелами
transducer = self.transducer.inverse()
allow_spaces &= self.allow_spaces
trie = self.dictionary
# инициализация переменных
used_agenda_keys = set()
agenda = SortedListWithKey(key=(lambda x:x[1]))
h = self.h_func(word, trie.root)
# agenda[self.agenda_key("", 0, trie.root)] = (0.0, 0.0, h)
key, value = ("", 0, trie.root), (0.0, 0.0, h)
agenda.add((key, value))
answer = dict()
k = 0
# очередь с приоритетом с промежуточными результатами
while len(agenda) > 0:
key, value = agenda.pop(0)
if key in used_agenda_keys:
continue
used_agenda_keys.add(key)
low, pos, index = key
cost, g, h = value
# g --- текущая стоимость, h --- нижняя оценка будущей стоимости
# cost = g + h --- нижняя оценка суммарной стоимости
k += 1
max_upperside_length = min(len(word) - pos, transducer.max_up_length)
for upperside_length in range(max_upperside_length + 1):
new_pos = pos + upperside_length
curr_up = word[pos: new_pos]
if curr_up not in transducer.operation_costs:
continue
for curr_low, curr_cost in transducer.operation_costs[curr_up].items():
new_g = g + curr_cost
if new_g > d: #если g > d, то h можно не вычислять
continue
if curr_low == " ":
if allow_spaces and trie.is_final(index):
new_index = trie.root
else:
new_index = Trie.NO_NODE
else:
new_index = trie.descend(index, curr_low)
if new_index is Trie.NO_NODE:
continue
new_low = low + curr_low
new_h = self.h_func(word[new_pos: ], new_index)
new_cost = new_g + new_h
if new_cost > d:
continue
new_key = (new_low, new_pos, new_index)
new_value = (new_cost, new_g, new_h)
if new_pos == len(word) and trie.is_final(new_index):
old_g = answer.get(new_low, None)
if old_g is None or new_g < old_g:
answer[new_low] = new_g
agenda.add((new_key, new_value))
answer = sorted(answer.items(), key=(lambda x: x[1]))
if return_cost:
return answer
else:
return [elem[0] for elem in answer]
|
def _trie_search(self, word, d, transducer=None,
allow_spaces=True, return_cost=True):
"""
Находит все слова в префиксном боре, расстояние до которых
в соответствии с заданным преобразователем не превышает d
"""
if transducer is None:
# разобраться с пробелами
transducer = self.transducer.inverse()
allow_spaces &= self.allow_spaces
trie = self.dictionary
# инициализация переменных
used_agenda_keys = set()
agenda = SortedListWithKey(key=(lambda x:x[1]))
h = self.h_func(word, trie.root)
# agenda[self.agenda_key("", 0, trie.root)] = (0.0, 0.0, h)
key, value = ("", 0, trie.root), (0.0, 0.0, h)
agenda.add((key, value))
answer = dict()
k = 0
# очередь с приоритетом с промежуточными результатами
while len(agenda) > 0:
key, value = agenda.pop(0)
if key in used_agenda_keys:
continue
used_agenda_keys.add(key)
low, pos, index = key
cost, g, h = value
# g --- текущая стоимость, h --- нижняя оценка будущей стоимости
# cost = g + h --- нижняя оценка суммарной стоимости
k += 1
max_upperside_length = min(len(word) - pos, transducer.max_up_length)
for upperside_length in range(max_upperside_length + 1):
new_pos = pos + upperside_length
curr_up = word[pos: new_pos]
if curr_up not in transducer.operation_costs:
continue
for curr_low, curr_cost in transducer.operation_costs[curr_up].items():
new_g = g + curr_cost
if new_g > d: #если g > d, то h можно не вычислять
continue
if curr_low == " ":
if allow_spaces and trie.is_final(index):
new_index = trie.root
else:
new_index = Trie.NO_NODE
else:
new_index = trie.descend(index, curr_low)
if new_index is Trie.NO_NODE:
continue
new_low = low + curr_low
new_h = self.h_func(word[new_pos: ], new_index)
new_cost = new_g + new_h
if new_cost > d:
continue
new_key = (new_low, new_pos, new_index)
new_value = (new_cost, new_g, new_h)
if new_pos == len(word) and trie.is_final(new_index):
old_g = answer.get(new_low, None)
if old_g is None or new_g < old_g:
answer[new_low] = new_g
agenda.add((new_key, new_value))
answer = sorted(answer.items(), key=(lambda x: x[1]))
if return_cost:
return answer
else:
return [elem[0] for elem in answer]
|
[
"Находит",
"все",
"слова",
"в",
"префиксном",
"боре",
"расстояние",
"до",
"которых",
"в",
"соответствии",
"с",
"заданным",
"преобразователем",
"не",
"превышает",
"d"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L55-L121
|
[
"def",
"_trie_search",
"(",
"self",
",",
"word",
",",
"d",
",",
"transducer",
"=",
"None",
",",
"allow_spaces",
"=",
"True",
",",
"return_cost",
"=",
"True",
")",
":",
"if",
"transducer",
"is",
"None",
":",
"# разобраться с пробелами",
"transducer",
"=",
"self",
".",
"transducer",
".",
"inverse",
"(",
")",
"allow_spaces",
"&=",
"self",
".",
"allow_spaces",
"trie",
"=",
"self",
".",
"dictionary",
"# инициализация переменных",
"used_agenda_keys",
"=",
"set",
"(",
")",
"agenda",
"=",
"SortedListWithKey",
"(",
"key",
"=",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
")",
"h",
"=",
"self",
".",
"h_func",
"(",
"word",
",",
"trie",
".",
"root",
")",
"# agenda[self.agenda_key(\"\", 0, trie.root)] = (0.0, 0.0, h)",
"key",
",",
"value",
"=",
"(",
"\"\"",
",",
"0",
",",
"trie",
".",
"root",
")",
",",
"(",
"0.0",
",",
"0.0",
",",
"h",
")",
"agenda",
".",
"add",
"(",
"(",
"key",
",",
"value",
")",
")",
"answer",
"=",
"dict",
"(",
")",
"k",
"=",
"0",
"# очередь с приоритетом с промежуточными результатами",
"while",
"len",
"(",
"agenda",
")",
">",
"0",
":",
"key",
",",
"value",
"=",
"agenda",
".",
"pop",
"(",
"0",
")",
"if",
"key",
"in",
"used_agenda_keys",
":",
"continue",
"used_agenda_keys",
".",
"add",
"(",
"key",
")",
"low",
",",
"pos",
",",
"index",
"=",
"key",
"cost",
",",
"g",
",",
"h",
"=",
"value",
"# g --- текущая стоимость, h --- нижняя оценка будущей стоимости",
"# cost = g + h --- нижняя оценка суммарной стоимости",
"k",
"+=",
"1",
"max_upperside_length",
"=",
"min",
"(",
"len",
"(",
"word",
")",
"-",
"pos",
",",
"transducer",
".",
"max_up_length",
")",
"for",
"upperside_length",
"in",
"range",
"(",
"max_upperside_length",
"+",
"1",
")",
":",
"new_pos",
"=",
"pos",
"+",
"upperside_length",
"curr_up",
"=",
"word",
"[",
"pos",
":",
"new_pos",
"]",
"if",
"curr_up",
"not",
"in",
"transducer",
".",
"operation_costs",
":",
"continue",
"for",
"curr_low",
",",
"curr_cost",
"in",
"transducer",
".",
"operation_costs",
"[",
"curr_up",
"]",
".",
"items",
"(",
")",
":",
"new_g",
"=",
"g",
"+",
"curr_cost",
"if",
"new_g",
">",
"d",
":",
"#если g > d, то h можно не вычислять",
"continue",
"if",
"curr_low",
"==",
"\" \"",
":",
"if",
"allow_spaces",
"and",
"trie",
".",
"is_final",
"(",
"index",
")",
":",
"new_index",
"=",
"trie",
".",
"root",
"else",
":",
"new_index",
"=",
"Trie",
".",
"NO_NODE",
"else",
":",
"new_index",
"=",
"trie",
".",
"descend",
"(",
"index",
",",
"curr_low",
")",
"if",
"new_index",
"is",
"Trie",
".",
"NO_NODE",
":",
"continue",
"new_low",
"=",
"low",
"+",
"curr_low",
"new_h",
"=",
"self",
".",
"h_func",
"(",
"word",
"[",
"new_pos",
":",
"]",
",",
"new_index",
")",
"new_cost",
"=",
"new_g",
"+",
"new_h",
"if",
"new_cost",
">",
"d",
":",
"continue",
"new_key",
"=",
"(",
"new_low",
",",
"new_pos",
",",
"new_index",
")",
"new_value",
"=",
"(",
"new_cost",
",",
"new_g",
",",
"new_h",
")",
"if",
"new_pos",
"==",
"len",
"(",
"word",
")",
"and",
"trie",
".",
"is_final",
"(",
"new_index",
")",
":",
"old_g",
"=",
"answer",
".",
"get",
"(",
"new_low",
",",
"None",
")",
"if",
"old_g",
"is",
"None",
"or",
"new_g",
"<",
"old_g",
":",
"answer",
"[",
"new_low",
"]",
"=",
"new_g",
"agenda",
".",
"add",
"(",
"(",
"new_key",
",",
"new_value",
")",
")",
"answer",
"=",
"sorted",
"(",
"answer",
".",
"items",
"(",
")",
",",
"key",
"=",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
")",
"if",
"return_cost",
":",
"return",
"answer",
"else",
":",
"return",
"[",
"elem",
"[",
"0",
"]",
"for",
"elem",
"in",
"answer",
"]"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
LevenshteinSearcher._precompute_euristics
|
Предвычисляет будущие символы и стоимости операций с ними
для h-эвристики
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _precompute_euristics(self):
"""
Предвычисляет будущие символы и стоимости операций с ними
для h-эвристики
"""
if self.euristics is None:
return
# вычисление минимальной стоимости операции,
# приводящей к появлению ('+') или исчезновению ('-') данного символа
removal_costs = {a : np.inf for a in self.alphabet}
insertion_costs = {a : np.inf for a in self.alphabet}
if self.allow_spaces:
removal_costs[' '] = np.inf
insertion_costs[' '] = np.inf
for up, costs in self.transducer.operation_costs.items():
for low, cost in costs.items():
if up == low:
continue
if up != '':
removal_cost = cost / len(up)
for a in up:
removal_costs[a] = min(removal_costs[a], removal_cost)
if low != '':
insertion_cost = cost / len(low)
for a in low:
insertion_costs[a] = min(insertion_costs[a], insertion_cost)
# предвычисление возможных будущих символов в узлах дерева
# precompute_future_symbols(self.dictionary, self.euristics, self.allow_spaces)
# предвычисление стоимостей потери символа в узлах дерева
self._absense_costs_by_node = _precompute_absense_costs(
self.dictionary, removal_costs, insertion_costs,
self.euristics, self.allow_spaces)
# массив для сохранения эвристик
self._temporary_euristics = [dict() for i in range(len(self.dictionary))]
|
def _precompute_euristics(self):
"""
Предвычисляет будущие символы и стоимости операций с ними
для h-эвристики
"""
if self.euristics is None:
return
# вычисление минимальной стоимости операции,
# приводящей к появлению ('+') или исчезновению ('-') данного символа
removal_costs = {a : np.inf for a in self.alphabet}
insertion_costs = {a : np.inf for a in self.alphabet}
if self.allow_spaces:
removal_costs[' '] = np.inf
insertion_costs[' '] = np.inf
for up, costs in self.transducer.operation_costs.items():
for low, cost in costs.items():
if up == low:
continue
if up != '':
removal_cost = cost / len(up)
for a in up:
removal_costs[a] = min(removal_costs[a], removal_cost)
if low != '':
insertion_cost = cost / len(low)
for a in low:
insertion_costs[a] = min(insertion_costs[a], insertion_cost)
# предвычисление возможных будущих символов в узлах дерева
# precompute_future_symbols(self.dictionary, self.euristics, self.allow_spaces)
# предвычисление стоимостей потери символа в узлах дерева
self._absense_costs_by_node = _precompute_absense_costs(
self.dictionary, removal_costs, insertion_costs,
self.euristics, self.allow_spaces)
# массив для сохранения эвристик
self._temporary_euristics = [dict() for i in range(len(self.dictionary))]
|
[
"Предвычисляет",
"будущие",
"символы",
"и",
"стоимости",
"операций",
"с",
"ними",
"для",
"h",
"-",
"эвристики"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L123-L156
|
[
"def",
"_precompute_euristics",
"(",
"self",
")",
":",
"if",
"self",
".",
"euristics",
"is",
"None",
":",
"return",
"# вычисление минимальной стоимости операции,",
"# приводящей к появлению ('+') или исчезновению ('-') данного символа",
"removal_costs",
"=",
"{",
"a",
":",
"np",
".",
"inf",
"for",
"a",
"in",
"self",
".",
"alphabet",
"}",
"insertion_costs",
"=",
"{",
"a",
":",
"np",
".",
"inf",
"for",
"a",
"in",
"self",
".",
"alphabet",
"}",
"if",
"self",
".",
"allow_spaces",
":",
"removal_costs",
"[",
"' '",
"]",
"=",
"np",
".",
"inf",
"insertion_costs",
"[",
"' '",
"]",
"=",
"np",
".",
"inf",
"for",
"up",
",",
"costs",
"in",
"self",
".",
"transducer",
".",
"operation_costs",
".",
"items",
"(",
")",
":",
"for",
"low",
",",
"cost",
"in",
"costs",
".",
"items",
"(",
")",
":",
"if",
"up",
"==",
"low",
":",
"continue",
"if",
"up",
"!=",
"''",
":",
"removal_cost",
"=",
"cost",
"/",
"len",
"(",
"up",
")",
"for",
"a",
"in",
"up",
":",
"removal_costs",
"[",
"a",
"]",
"=",
"min",
"(",
"removal_costs",
"[",
"a",
"]",
",",
"removal_cost",
")",
"if",
"low",
"!=",
"''",
":",
"insertion_cost",
"=",
"cost",
"/",
"len",
"(",
"low",
")",
"for",
"a",
"in",
"low",
":",
"insertion_costs",
"[",
"a",
"]",
"=",
"min",
"(",
"insertion_costs",
"[",
"a",
"]",
",",
"insertion_cost",
")",
"# предвычисление возможных будущих символов в узлах дерева",
"# precompute_future_symbols(self.dictionary, self.euristics, self.allow_spaces)",
"# предвычисление стоимостей потери символа в узлах дерева",
"self",
".",
"_absense_costs_by_node",
"=",
"_precompute_absense_costs",
"(",
"self",
".",
"dictionary",
",",
"removal_costs",
",",
"insertion_costs",
",",
"self",
".",
"euristics",
",",
"self",
".",
"allow_spaces",
")",
"# массив для сохранения эвристик",
"self",
".",
"_temporary_euristics",
"=",
"[",
"dict",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"dictionary",
")",
")",
"]"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
LevenshteinSearcher._euristic_h_function
|
Вычисление h-эвристики из работы Hulden,2009 для текущей вершины словаря
Аргументы:
----------
suffix : string
непрочитанный суффикс входного слова
index : int
индекс текущего узла в словаре
Возвращает:
-----------
cost : float
оценка снизу для стоимости замены,
приводящей к входному слову с суффиксом suffix,
если прочитанный префикс слова без опечатки
привёл в вершину с номером index
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _euristic_h_function(self, suffix, index):
"""
Вычисление h-эвристики из работы Hulden,2009 для текущей вершины словаря
Аргументы:
----------
suffix : string
непрочитанный суффикс входного слова
index : int
индекс текущего узла в словаре
Возвращает:
-----------
cost : float
оценка снизу для стоимости замены,
приводящей к входному слову с суффиксом suffix,
если прочитанный префикс слова без опечатки
привёл в вершину с номером index
"""
if self.euristics > 0:
suffix = suffix[:self.euristics]
# кэширование результатов
index_temporary_euristics = self._temporary_euristics[index]
cost = index_temporary_euristics.get(suffix, None)
if cost is not None:
return cost
# извлечение нужных данных из массивов
absense_costs = self._absense_costs_by_node[index]
data = self.dictionary.data[index]
costs = np.zeros(dtype=np.float64, shape=(self.euristics,))
# costs[j] --- оценка штрафа при предпросмотре вперёд на j символов
for i, a in enumerate(suffix):
costs[i:] += absense_costs[a][i:]
cost = max(costs)
index_temporary_euristics[suffix] = cost
return cost
|
def _euristic_h_function(self, suffix, index):
"""
Вычисление h-эвристики из работы Hulden,2009 для текущей вершины словаря
Аргументы:
----------
suffix : string
непрочитанный суффикс входного слова
index : int
индекс текущего узла в словаре
Возвращает:
-----------
cost : float
оценка снизу для стоимости замены,
приводящей к входному слову с суффиксом suffix,
если прочитанный префикс слова без опечатки
привёл в вершину с номером index
"""
if self.euristics > 0:
suffix = suffix[:self.euristics]
# кэширование результатов
index_temporary_euristics = self._temporary_euristics[index]
cost = index_temporary_euristics.get(suffix, None)
if cost is not None:
return cost
# извлечение нужных данных из массивов
absense_costs = self._absense_costs_by_node[index]
data = self.dictionary.data[index]
costs = np.zeros(dtype=np.float64, shape=(self.euristics,))
# costs[j] --- оценка штрафа при предпросмотре вперёд на j символов
for i, a in enumerate(suffix):
costs[i:] += absense_costs[a][i:]
cost = max(costs)
index_temporary_euristics[suffix] = cost
return cost
|
[
"Вычисление",
"h",
"-",
"эвристики",
"из",
"работы",
"Hulden",
"2009",
"для",
"текущей",
"вершины",
"словаря"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L164-L199
|
[
"def",
"_euristic_h_function",
"(",
"self",
",",
"suffix",
",",
"index",
")",
":",
"if",
"self",
".",
"euristics",
">",
"0",
":",
"suffix",
"=",
"suffix",
"[",
":",
"self",
".",
"euristics",
"]",
"# кэширование результатов",
"index_temporary_euristics",
"=",
"self",
".",
"_temporary_euristics",
"[",
"index",
"]",
"cost",
"=",
"index_temporary_euristics",
".",
"get",
"(",
"suffix",
",",
"None",
")",
"if",
"cost",
"is",
"not",
"None",
":",
"return",
"cost",
"# извлечение нужных данных из массивов",
"absense_costs",
"=",
"self",
".",
"_absense_costs_by_node",
"[",
"index",
"]",
"data",
"=",
"self",
".",
"dictionary",
".",
"data",
"[",
"index",
"]",
"costs",
"=",
"np",
".",
"zeros",
"(",
"dtype",
"=",
"np",
".",
"float64",
",",
"shape",
"=",
"(",
"self",
".",
"euristics",
",",
")",
")",
"# costs[j] --- оценка штрафа при предпросмотре вперёд на j символов",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"suffix",
")",
":",
"costs",
"[",
"i",
":",
"]",
"+=",
"absense_costs",
"[",
"a",
"]",
"[",
"i",
":",
"]",
"cost",
"=",
"max",
"(",
"costs",
")",
"index_temporary_euristics",
"[",
"suffix",
"]",
"=",
"cost",
"return",
"cost"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer.get_operation_cost
|
Возвращает стоимость элементарной трансдукции up->low
или np.inf, если такой элементарной трансдукции нет
Аргументы:
----------
up, low : string
элементы элементарной трансдукции
Возвращает:
-----------
cost : float
стоимость элементарной трансдукции up->low
(np.inf, если такая трансдукция отсутствует)
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def get_operation_cost(self, up, low):
"""
Возвращает стоимость элементарной трансдукции up->low
или np.inf, если такой элементарной трансдукции нет
Аргументы:
----------
up, low : string
элементы элементарной трансдукции
Возвращает:
-----------
cost : float
стоимость элементарной трансдукции up->low
(np.inf, если такая трансдукция отсутствует)
"""
up_costs = self.operation_costs.get(up, None)
if up_costs is None:
return np.inf
cost = up_costs.get(low, np.inf)
return cost
|
def get_operation_cost(self, up, low):
"""
Возвращает стоимость элементарной трансдукции up->low
или np.inf, если такой элементарной трансдукции нет
Аргументы:
----------
up, low : string
элементы элементарной трансдукции
Возвращает:
-----------
cost : float
стоимость элементарной трансдукции up->low
(np.inf, если такая трансдукция отсутствует)
"""
up_costs = self.operation_costs.get(up, None)
if up_costs is None:
return np.inf
cost = up_costs.get(low, np.inf)
return cost
|
[
"Возвращает",
"стоимость",
"элементарной",
"трансдукции",
"up",
"-",
">",
"low",
"или",
"np",
".",
"inf",
"если",
"такой",
"элементарной",
"трансдукции",
"нет"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L308-L328
|
[
"def",
"get_operation_cost",
"(",
"self",
",",
"up",
",",
"low",
")",
":",
"up_costs",
"=",
"self",
".",
"operation_costs",
".",
"get",
"(",
"up",
",",
"None",
")",
"if",
"up_costs",
"is",
"None",
":",
"return",
"np",
".",
"inf",
"cost",
"=",
"up_costs",
".",
"get",
"(",
"low",
",",
"np",
".",
"inf",
")",
"return",
"cost"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer.inverse
|
Строит пробразователь, задающий обратное конечное преобразование
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def inverse(self):
"""
Строит пробразователь, задающий обратное конечное преобразование
"""
# УПРОСТИТЬ ОБРАЩЕНИЕ!!!
inversed_transducer = SegmentTransducer(self.alphabet, operation_costs=dict())
inversed_transducer.operation_costs = self._reversed_operation_costs
inversed_transducer._reversed_operation_costs = self.operation_costs
inversed_transducer.max_low_length = self.max_up_length
inversed_transducer.max_up_length = self.max_low_length
inversed_transducer.max_low_lengths_by_up = self.max_up_lengths_by_low
inversed_transducer.max_up_lengths_by_low = self.max_low_lengths_by_up
return inversed_transducer
|
def inverse(self):
"""
Строит пробразователь, задающий обратное конечное преобразование
"""
# УПРОСТИТЬ ОБРАЩЕНИЕ!!!
inversed_transducer = SegmentTransducer(self.alphabet, operation_costs=dict())
inversed_transducer.operation_costs = self._reversed_operation_costs
inversed_transducer._reversed_operation_costs = self.operation_costs
inversed_transducer.max_low_length = self.max_up_length
inversed_transducer.max_up_length = self.max_low_length
inversed_transducer.max_low_lengths_by_up = self.max_up_lengths_by_low
inversed_transducer.max_up_lengths_by_low = self.max_low_lengths_by_up
return inversed_transducer
|
[
"Строит",
"пробразователь",
"задающий",
"обратное",
"конечное",
"преобразование"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L330-L342
|
[
"def",
"inverse",
"(",
"self",
")",
":",
"# УПРОСТИТЬ ОБРАЩЕНИЕ!!!",
"inversed_transducer",
"=",
"SegmentTransducer",
"(",
"self",
".",
"alphabet",
",",
"operation_costs",
"=",
"dict",
"(",
")",
")",
"inversed_transducer",
".",
"operation_costs",
"=",
"self",
".",
"_reversed_operation_costs",
"inversed_transducer",
".",
"_reversed_operation_costs",
"=",
"self",
".",
"operation_costs",
"inversed_transducer",
".",
"max_low_length",
"=",
"self",
".",
"max_up_length",
"inversed_transducer",
".",
"max_up_length",
"=",
"self",
".",
"max_low_length",
"inversed_transducer",
".",
"max_low_lengths_by_up",
"=",
"self",
".",
"max_up_lengths_by_low",
"inversed_transducer",
".",
"max_up_lengths_by_low",
"=",
"self",
".",
"max_low_lengths_by_up",
"return",
"inversed_transducer"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer.distance
|
Вычисляет трансдукцию минимальной стоимости,
отображающую first в second
Аргументы:
-----------
first : string
second : string
Верхний и нижний элементы трансдукции
return_transduction : bool (optional, default=False)
следует ли возвращать трансдукцию минимального веса
(см. возвращаемое значение)
Возвращает:
-----------
(final_cost, transductions) : tuple(float, list)
если return_transduction=True, то возвращает
минимальную стоимость трансдукции, переводящей first в second
и список трансдукций с данной стоимостью
final_cost : float
если return_transduction=False, то возвращает
минимальную стоимость трансдукции, переводящей first в second
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def distance(self, first, second, return_transduction = False):
"""
Вычисляет трансдукцию минимальной стоимости,
отображающую first в second
Аргументы:
-----------
first : string
second : string
Верхний и нижний элементы трансдукции
return_transduction : bool (optional, default=False)
следует ли возвращать трансдукцию минимального веса
(см. возвращаемое значение)
Возвращает:
-----------
(final_cost, transductions) : tuple(float, list)
если return_transduction=True, то возвращает
минимальную стоимость трансдукции, переводящей first в second
и список трансдукций с данной стоимостью
final_cost : float
если return_transduction=False, то возвращает
минимальную стоимость трансдукции, переводящей first в second
"""
if return_transduction:
add_pred = (lambda x, y: (y == np.inf or x < y))
else:
add_pred = (lambda x, y: (y == np.inf or x <= y))
clear_pred = (lambda x, y: (y < np.inf and x < y))
update_func = lambda x, y: min(x, y)
costs, backtraces = self._fill_levenshtein_table(first, second,
update_func, add_pred, clear_pred)
final_cost = costs[-1][-1]
if final_cost == np.inf:
transductions = [None]
elif return_transduction:
transductions = self._backtraces_to_transductions(first, second, backtraces,
final_cost, return_cost=False)
if return_transduction:
return final_cost, transductions
else:
return final_cost
|
def distance(self, first, second, return_transduction = False):
"""
Вычисляет трансдукцию минимальной стоимости,
отображающую first в second
Аргументы:
-----------
first : string
second : string
Верхний и нижний элементы трансдукции
return_transduction : bool (optional, default=False)
следует ли возвращать трансдукцию минимального веса
(см. возвращаемое значение)
Возвращает:
-----------
(final_cost, transductions) : tuple(float, list)
если return_transduction=True, то возвращает
минимальную стоимость трансдукции, переводящей first в second
и список трансдукций с данной стоимостью
final_cost : float
если return_transduction=False, то возвращает
минимальную стоимость трансдукции, переводящей first в second
"""
if return_transduction:
add_pred = (lambda x, y: (y == np.inf or x < y))
else:
add_pred = (lambda x, y: (y == np.inf or x <= y))
clear_pred = (lambda x, y: (y < np.inf and x < y))
update_func = lambda x, y: min(x, y)
costs, backtraces = self._fill_levenshtein_table(first, second,
update_func, add_pred, clear_pred)
final_cost = costs[-1][-1]
if final_cost == np.inf:
transductions = [None]
elif return_transduction:
transductions = self._backtraces_to_transductions(first, second, backtraces,
final_cost, return_cost=False)
if return_transduction:
return final_cost, transductions
else:
return final_cost
|
[
"Вычисляет",
"трансдукцию",
"минимальной",
"стоимости",
"отображающую",
"first",
"в",
"second"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L344-L387
|
[
"def",
"distance",
"(",
"self",
",",
"first",
",",
"second",
",",
"return_transduction",
"=",
"False",
")",
":",
"if",
"return_transduction",
":",
"add_pred",
"=",
"(",
"lambda",
"x",
",",
"y",
":",
"(",
"y",
"==",
"np",
".",
"inf",
"or",
"x",
"<",
"y",
")",
")",
"else",
":",
"add_pred",
"=",
"(",
"lambda",
"x",
",",
"y",
":",
"(",
"y",
"==",
"np",
".",
"inf",
"or",
"x",
"<=",
"y",
")",
")",
"clear_pred",
"=",
"(",
"lambda",
"x",
",",
"y",
":",
"(",
"y",
"<",
"np",
".",
"inf",
"and",
"x",
"<",
"y",
")",
")",
"update_func",
"=",
"lambda",
"x",
",",
"y",
":",
"min",
"(",
"x",
",",
"y",
")",
"costs",
",",
"backtraces",
"=",
"self",
".",
"_fill_levenshtein_table",
"(",
"first",
",",
"second",
",",
"update_func",
",",
"add_pred",
",",
"clear_pred",
")",
"final_cost",
"=",
"costs",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"if",
"final_cost",
"==",
"np",
".",
"inf",
":",
"transductions",
"=",
"[",
"None",
"]",
"elif",
"return_transduction",
":",
"transductions",
"=",
"self",
".",
"_backtraces_to_transductions",
"(",
"first",
",",
"second",
",",
"backtraces",
",",
"final_cost",
",",
"return_cost",
"=",
"False",
")",
"if",
"return_transduction",
":",
"return",
"final_cost",
",",
"transductions",
"else",
":",
"return",
"final_cost"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer.transduce
|
Возвращает все трансдукции, переводящие first в second,
чья стоимость не превышает threshold
Возвращает:
----------
result : list
список вида [(трансдукция, стоимость)]
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def transduce(self, first, second, threshold):
"""
Возвращает все трансдукции, переводящие first в second,
чья стоимость не превышает threshold
Возвращает:
----------
result : list
список вида [(трансдукция, стоимость)]
"""
add_pred = (lambda x, y: x <= threshold)
clear_pred =(lambda x, y: False)
update_func = (lambda x, y: min(x, y))
costs, backtraces = self._fill_levenshtein_table(first, second,
update_func, add_pred, clear_pred,
threshold=threshold)
result = self._backtraces_to_transductions(first, second,
backtraces, threshold, return_cost=True)
return result
|
def transduce(self, first, second, threshold):
"""
Возвращает все трансдукции, переводящие first в second,
чья стоимость не превышает threshold
Возвращает:
----------
result : list
список вида [(трансдукция, стоимость)]
"""
add_pred = (lambda x, y: x <= threshold)
clear_pred =(lambda x, y: False)
update_func = (lambda x, y: min(x, y))
costs, backtraces = self._fill_levenshtein_table(first, second,
update_func, add_pred, clear_pred,
threshold=threshold)
result = self._backtraces_to_transductions(first, second,
backtraces, threshold, return_cost=True)
return result
|
[
"Возвращает",
"все",
"трансдукции",
"переводящие",
"first",
"в",
"second",
"чья",
"стоимость",
"не",
"превышает",
"threshold"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L389-L407
|
[
"def",
"transduce",
"(",
"self",
",",
"first",
",",
"second",
",",
"threshold",
")",
":",
"add_pred",
"=",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"<=",
"threshold",
")",
"clear_pred",
"=",
"(",
"lambda",
"x",
",",
"y",
":",
"False",
")",
"update_func",
"=",
"(",
"lambda",
"x",
",",
"y",
":",
"min",
"(",
"x",
",",
"y",
")",
")",
"costs",
",",
"backtraces",
"=",
"self",
".",
"_fill_levenshtein_table",
"(",
"first",
",",
"second",
",",
"update_func",
",",
"add_pred",
",",
"clear_pred",
",",
"threshold",
"=",
"threshold",
")",
"result",
"=",
"self",
".",
"_backtraces_to_transductions",
"(",
"first",
",",
"second",
",",
"backtraces",
",",
"threshold",
",",
"return_cost",
"=",
"True",
")",
"return",
"result"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer.lower_transductions
|
Возвращает все трансдукции с верхним элементом word,
чья стоимость не превышает max_cost
` Возвращает:
----------
result : list
список вида [(трансдукция, стоимость)], если return_cost=True
список трансдукций, если return_cost=False
список отсортирован в порядке возрастания стоимости трансдукции
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def lower_transductions(self, word, max_cost, return_cost=True):
"""
Возвращает все трансдукции с верхним элементом word,
чья стоимость не превышает max_cost
` Возвращает:
----------
result : list
список вида [(трансдукция, стоимость)], если return_cost=True
список трансдукций, если return_cost=False
список отсортирован в порядке возрастания стоимости трансдукции
"""
prefixes = [[] for i in range(len(word) + 1)]
prefixes[0].append(((), 0.0))
for pos in range(len(prefixes)):
# вставки
prefixes[pos] = self._perform_insertions(prefixes[pos], max_cost)
max_upperside_length = min(len(word) - pos, self.max_up_length)
for upperside_length in range(1, max_upperside_length + 1):
up = word[pos: pos + upperside_length]
for low, low_cost in self.operation_costs.get(up, dict()).items():
for transduction, cost in prefixes[pos]:
new_cost = cost + low_cost
if new_cost <= max_cost:
new_transduction = transduction +(up, low)
prefixes[pos + upperside_length].append((new_transduction, new_cost))
answer = sorted(prefixes[-1], key=(lambda x: x[0]))
if return_cost:
return answer
else:
return [elem[0] for elem in answer]
|
def lower_transductions(self, word, max_cost, return_cost=True):
"""
Возвращает все трансдукции с верхним элементом word,
чья стоимость не превышает max_cost
` Возвращает:
----------
result : list
список вида [(трансдукция, стоимость)], если return_cost=True
список трансдукций, если return_cost=False
список отсортирован в порядке возрастания стоимости трансдукции
"""
prefixes = [[] for i in range(len(word) + 1)]
prefixes[0].append(((), 0.0))
for pos in range(len(prefixes)):
# вставки
prefixes[pos] = self._perform_insertions(prefixes[pos], max_cost)
max_upperside_length = min(len(word) - pos, self.max_up_length)
for upperside_length in range(1, max_upperside_length + 1):
up = word[pos: pos + upperside_length]
for low, low_cost in self.operation_costs.get(up, dict()).items():
for transduction, cost in prefixes[pos]:
new_cost = cost + low_cost
if new_cost <= max_cost:
new_transduction = transduction +(up, low)
prefixes[pos + upperside_length].append((new_transduction, new_cost))
answer = sorted(prefixes[-1], key=(lambda x: x[0]))
if return_cost:
return answer
else:
return [elem[0] for elem in answer]
|
[
"Возвращает",
"все",
"трансдукции",
"с",
"верхним",
"элементом",
"word",
"чья",
"стоимость",
"не",
"превышает",
"max_cost"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L409-L439
|
[
"def",
"lower_transductions",
"(",
"self",
",",
"word",
",",
"max_cost",
",",
"return_cost",
"=",
"True",
")",
":",
"prefixes",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"word",
")",
"+",
"1",
")",
"]",
"prefixes",
"[",
"0",
"]",
".",
"append",
"(",
"(",
"(",
")",
",",
"0.0",
")",
")",
"for",
"pos",
"in",
"range",
"(",
"len",
"(",
"prefixes",
")",
")",
":",
"# вставки",
"prefixes",
"[",
"pos",
"]",
"=",
"self",
".",
"_perform_insertions",
"(",
"prefixes",
"[",
"pos",
"]",
",",
"max_cost",
")",
"max_upperside_length",
"=",
"min",
"(",
"len",
"(",
"word",
")",
"-",
"pos",
",",
"self",
".",
"max_up_length",
")",
"for",
"upperside_length",
"in",
"range",
"(",
"1",
",",
"max_upperside_length",
"+",
"1",
")",
":",
"up",
"=",
"word",
"[",
"pos",
":",
"pos",
"+",
"upperside_length",
"]",
"for",
"low",
",",
"low_cost",
"in",
"self",
".",
"operation_costs",
".",
"get",
"(",
"up",
",",
"dict",
"(",
")",
")",
".",
"items",
"(",
")",
":",
"for",
"transduction",
",",
"cost",
"in",
"prefixes",
"[",
"pos",
"]",
":",
"new_cost",
"=",
"cost",
"+",
"low_cost",
"if",
"new_cost",
"<=",
"max_cost",
":",
"new_transduction",
"=",
"transduction",
"+",
"(",
"up",
",",
"low",
")",
"prefixes",
"[",
"pos",
"+",
"upperside_length",
"]",
".",
"append",
"(",
"(",
"new_transduction",
",",
"new_cost",
")",
")",
"answer",
"=",
"sorted",
"(",
"prefixes",
"[",
"-",
"1",
"]",
",",
"key",
"=",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
")",
"if",
"return_cost",
":",
"return",
"answer",
"else",
":",
"return",
"[",
"elem",
"[",
"0",
"]",
"for",
"elem",
"in",
"answer",
"]"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer._fill_levenshtein_table
|
Функция, динамически заполняющая таблицу costs стоимости трансдукций,
costs[i][j] --- минимальная стоимость трансдукции,
переводящей first[:i] в second[:j]
Аргументы:
----------
first, second : string
Верхний и нижний элементы трансдукции
update_func : callable, float*float -> bool
update_func(x, y) возвращает новое значение в ячейке таблицы costs,
если старое значение --- y, а потенциально новое значение --- x
везде update_func = min
add_pred : callable : float*float -> bool
add_pred(x, y) возвращает, производится ли добавление
нового элемента p стоимости x в ячейку backtraces[i][j]
в зависимости от значения costs[i][j]=y и текущей стоимости x
clear_pred : callable : float*float -> bool
clear_pred(x, y) возвращает, производится ли очистка
ячейки backtraces[i][j] в зависимости от значения costs[i][j]=y
и текущей стоимости x элемента p, добавляемого в эту ячейку
Возвращает:
-----------
costs : array, dtype=float, shape=(len(first)+1, len(second)+1)
массив, в ячейке с индексами i, j которого хранится
минимальная стоимость трансдукции, переводящей first[:i] в second[:j]
backtraces : array, dtype=list, shape=(len(first)+1, len(second)+1)
массив, в ячейке с индексами i, j которого хранятся
обратные ссылки на предыдущую ячейку в оптимальной трансдукции,
приводящей в ячейку backtraces[i][j]
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _fill_levenshtein_table(self, first, second, update_func, add_pred, clear_pred,
threshold=None):
"""
Функция, динамически заполняющая таблицу costs стоимости трансдукций,
costs[i][j] --- минимальная стоимость трансдукции,
переводящей first[:i] в second[:j]
Аргументы:
----------
first, second : string
Верхний и нижний элементы трансдукции
update_func : callable, float*float -> bool
update_func(x, y) возвращает новое значение в ячейке таблицы costs,
если старое значение --- y, а потенциально новое значение --- x
везде update_func = min
add_pred : callable : float*float -> bool
add_pred(x, y) возвращает, производится ли добавление
нового элемента p стоимости x в ячейку backtraces[i][j]
в зависимости от значения costs[i][j]=y и текущей стоимости x
clear_pred : callable : float*float -> bool
clear_pred(x, y) возвращает, производится ли очистка
ячейки backtraces[i][j] в зависимости от значения costs[i][j]=y
и текущей стоимости x элемента p, добавляемого в эту ячейку
Возвращает:
-----------
costs : array, dtype=float, shape=(len(first)+1, len(second)+1)
массив, в ячейке с индексами i, j которого хранится
минимальная стоимость трансдукции, переводящей first[:i] в second[:j]
backtraces : array, dtype=list, shape=(len(first)+1, len(second)+1)
массив, в ячейке с индексами i, j которого хранятся
обратные ссылки на предыдущую ячейку в оптимальной трансдукции,
приводящей в ячейку backtraces[i][j]
"""
m, n = len(first), len(second)
# если threshold=None, то в качестве порога берётся удвоенная стоимость
# трансдукции, отображающей символы на одинаковых позициях друг в друга
if threshold is None:
threshold = 0.0
for a, b in zip(first, second):
threshold += self.get_operation_cost(a, b)
if m > n:
for a in first[n: ]:
threshold += self.get_operation_cost(a, '')
elif m < n:
for b in second[m: ]:
threshold += self.get_operation_cost('', b)
threshold *= 2
# инициализация возвращаемых массивов
costs = np.zeros(shape=(m + 1, n + 1), dtype=np.float64)
costs[:] = np.inf
backtraces = [None] * (m + 1)
for i in range(m + 1):
backtraces[i] = [[] for j in range(n + 1)]
costs[0][0] = 0.0
for i in range(m + 1):
for i_right in range(i, min(i + self.max_up_length, m) + 1):
up = first[i: i_right]
max_low_length = self.max_low_lengths_by_up.get(up, -1)
if max_low_length == -1: # no up key in transduction
continue
up_costs = self.operation_costs[up]
for j in range(n + 1):
if costs[i][j] > threshold:
continue
if len(backtraces[i][j]) == 0 and i + j > 0:
continue # не нашлось обратных ссылок
for j_right in range((j if i_right > i else j + 1),
min(j + max_low_length, n) + 1):
low = second[j: j_right]
curr_cost = up_costs.get(low, np.inf)
old_cost = costs[i_right][j_right]
new_cost = costs[i][j] + curr_cost
if new_cost > threshold:
continue
if add_pred(new_cost, old_cost):
if clear_pred(new_cost, old_cost):
backtraces[i_right][j_right] = []
costs[i_right][j_right] = update_func(new_cost, old_cost)
backtraces[i_right][j_right].append((i, j))
return costs, backtraces
|
def _fill_levenshtein_table(self, first, second, update_func, add_pred, clear_pred,
threshold=None):
"""
Функция, динамически заполняющая таблицу costs стоимости трансдукций,
costs[i][j] --- минимальная стоимость трансдукции,
переводящей first[:i] в second[:j]
Аргументы:
----------
first, second : string
Верхний и нижний элементы трансдукции
update_func : callable, float*float -> bool
update_func(x, y) возвращает новое значение в ячейке таблицы costs,
если старое значение --- y, а потенциально новое значение --- x
везде update_func = min
add_pred : callable : float*float -> bool
add_pred(x, y) возвращает, производится ли добавление
нового элемента p стоимости x в ячейку backtraces[i][j]
в зависимости от значения costs[i][j]=y и текущей стоимости x
clear_pred : callable : float*float -> bool
clear_pred(x, y) возвращает, производится ли очистка
ячейки backtraces[i][j] в зависимости от значения costs[i][j]=y
и текущей стоимости x элемента p, добавляемого в эту ячейку
Возвращает:
-----------
costs : array, dtype=float, shape=(len(first)+1, len(second)+1)
массив, в ячейке с индексами i, j которого хранится
минимальная стоимость трансдукции, переводящей first[:i] в second[:j]
backtraces : array, dtype=list, shape=(len(first)+1, len(second)+1)
массив, в ячейке с индексами i, j которого хранятся
обратные ссылки на предыдущую ячейку в оптимальной трансдукции,
приводящей в ячейку backtraces[i][j]
"""
m, n = len(first), len(second)
# если threshold=None, то в качестве порога берётся удвоенная стоимость
# трансдукции, отображающей символы на одинаковых позициях друг в друга
if threshold is None:
threshold = 0.0
for a, b in zip(first, second):
threshold += self.get_operation_cost(a, b)
if m > n:
for a in first[n: ]:
threshold += self.get_operation_cost(a, '')
elif m < n:
for b in second[m: ]:
threshold += self.get_operation_cost('', b)
threshold *= 2
# инициализация возвращаемых массивов
costs = np.zeros(shape=(m + 1, n + 1), dtype=np.float64)
costs[:] = np.inf
backtraces = [None] * (m + 1)
for i in range(m + 1):
backtraces[i] = [[] for j in range(n + 1)]
costs[0][0] = 0.0
for i in range(m + 1):
for i_right in range(i, min(i + self.max_up_length, m) + 1):
up = first[i: i_right]
max_low_length = self.max_low_lengths_by_up.get(up, -1)
if max_low_length == -1: # no up key in transduction
continue
up_costs = self.operation_costs[up]
for j in range(n + 1):
if costs[i][j] > threshold:
continue
if len(backtraces[i][j]) == 0 and i + j > 0:
continue # не нашлось обратных ссылок
for j_right in range((j if i_right > i else j + 1),
min(j + max_low_length, n) + 1):
low = second[j: j_right]
curr_cost = up_costs.get(low, np.inf)
old_cost = costs[i_right][j_right]
new_cost = costs[i][j] + curr_cost
if new_cost > threshold:
continue
if add_pred(new_cost, old_cost):
if clear_pred(new_cost, old_cost):
backtraces[i_right][j_right] = []
costs[i_right][j_right] = update_func(new_cost, old_cost)
backtraces[i_right][j_right].append((i, j))
return costs, backtraces
|
[
"Функция",
"динамически",
"заполняющая",
"таблицу",
"costs",
"стоимости",
"трансдукций",
"costs",
"[",
"i",
"]",
"[",
"j",
"]",
"---",
"минимальная",
"стоимость",
"трансдукции",
"переводящей",
"first",
"[",
":",
"i",
"]",
"в",
"second",
"[",
":",
"j",
"]"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L463-L543
|
[
"def",
"_fill_levenshtein_table",
"(",
"self",
",",
"first",
",",
"second",
",",
"update_func",
",",
"add_pred",
",",
"clear_pred",
",",
"threshold",
"=",
"None",
")",
":",
"m",
",",
"n",
"=",
"len",
"(",
"first",
")",
",",
"len",
"(",
"second",
")",
"# если threshold=None, то в качестве порога берётся удвоенная стоимость",
"# трансдукции, отображающей символы на одинаковых позициях друг в друга",
"if",
"threshold",
"is",
"None",
":",
"threshold",
"=",
"0.0",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"first",
",",
"second",
")",
":",
"threshold",
"+=",
"self",
".",
"get_operation_cost",
"(",
"a",
",",
"b",
")",
"if",
"m",
">",
"n",
":",
"for",
"a",
"in",
"first",
"[",
"n",
":",
"]",
":",
"threshold",
"+=",
"self",
".",
"get_operation_cost",
"(",
"a",
",",
"''",
")",
"elif",
"m",
"<",
"n",
":",
"for",
"b",
"in",
"second",
"[",
"m",
":",
"]",
":",
"threshold",
"+=",
"self",
".",
"get_operation_cost",
"(",
"''",
",",
"b",
")",
"threshold",
"*=",
"2",
"# инициализация возвращаемых массивов",
"costs",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"m",
"+",
"1",
",",
"n",
"+",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"costs",
"[",
":",
"]",
"=",
"np",
".",
"inf",
"backtraces",
"=",
"[",
"None",
"]",
"*",
"(",
"m",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"m",
"+",
"1",
")",
":",
"backtraces",
"[",
"i",
"]",
"=",
"[",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"n",
"+",
"1",
")",
"]",
"costs",
"[",
"0",
"]",
"[",
"0",
"]",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"m",
"+",
"1",
")",
":",
"for",
"i_right",
"in",
"range",
"(",
"i",
",",
"min",
"(",
"i",
"+",
"self",
".",
"max_up_length",
",",
"m",
")",
"+",
"1",
")",
":",
"up",
"=",
"first",
"[",
"i",
":",
"i_right",
"]",
"max_low_length",
"=",
"self",
".",
"max_low_lengths_by_up",
".",
"get",
"(",
"up",
",",
"-",
"1",
")",
"if",
"max_low_length",
"==",
"-",
"1",
":",
"# no up key in transduction",
"continue",
"up_costs",
"=",
"self",
".",
"operation_costs",
"[",
"up",
"]",
"for",
"j",
"in",
"range",
"(",
"n",
"+",
"1",
")",
":",
"if",
"costs",
"[",
"i",
"]",
"[",
"j",
"]",
">",
"threshold",
":",
"continue",
"if",
"len",
"(",
"backtraces",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"==",
"0",
"and",
"i",
"+",
"j",
">",
"0",
":",
"continue",
"# не нашлось обратных ссылок",
"for",
"j_right",
"in",
"range",
"(",
"(",
"j",
"if",
"i_right",
">",
"i",
"else",
"j",
"+",
"1",
")",
",",
"min",
"(",
"j",
"+",
"max_low_length",
",",
"n",
")",
"+",
"1",
")",
":",
"low",
"=",
"second",
"[",
"j",
":",
"j_right",
"]",
"curr_cost",
"=",
"up_costs",
".",
"get",
"(",
"low",
",",
"np",
".",
"inf",
")",
"old_cost",
"=",
"costs",
"[",
"i_right",
"]",
"[",
"j_right",
"]",
"new_cost",
"=",
"costs",
"[",
"i",
"]",
"[",
"j",
"]",
"+",
"curr_cost",
"if",
"new_cost",
">",
"threshold",
":",
"continue",
"if",
"add_pred",
"(",
"new_cost",
",",
"old_cost",
")",
":",
"if",
"clear_pred",
"(",
"new_cost",
",",
"old_cost",
")",
":",
"backtraces",
"[",
"i_right",
"]",
"[",
"j_right",
"]",
"=",
"[",
"]",
"costs",
"[",
"i_right",
"]",
"[",
"j_right",
"]",
"=",
"update_func",
"(",
"new_cost",
",",
"old_cost",
")",
"backtraces",
"[",
"i_right",
"]",
"[",
"j_right",
"]",
".",
"append",
"(",
"(",
"i",
",",
"j",
")",
")",
"return",
"costs",
",",
"backtraces"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer._make_reversed_operation_costs
|
Заполняет массив _reversed_operation_costs
на основе имеющегося массива operation_costs
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _make_reversed_operation_costs(self):
"""
Заполняет массив _reversed_operation_costs
на основе имеющегося массива operation_costs
"""
_reversed_operation_costs = dict()
for up, costs in self.operation_costs.items():
for low, cost in costs.items():
if low not in _reversed_operation_costs:
_reversed_operation_costs[low] = dict()
_reversed_operation_costs[low][up] = cost
self._reversed_operation_costs = _reversed_operation_costs
|
def _make_reversed_operation_costs(self):
"""
Заполняет массив _reversed_operation_costs
на основе имеющегося массива operation_costs
"""
_reversed_operation_costs = dict()
for up, costs in self.operation_costs.items():
for low, cost in costs.items():
if low not in _reversed_operation_costs:
_reversed_operation_costs[low] = dict()
_reversed_operation_costs[low][up] = cost
self._reversed_operation_costs = _reversed_operation_costs
|
[
"Заполняет",
"массив",
"_reversed_operation_costs",
"на",
"основе",
"имеющегося",
"массива",
"operation_costs"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L545-L556
|
[
"def",
"_make_reversed_operation_costs",
"(",
"self",
")",
":",
"_reversed_operation_costs",
"=",
"dict",
"(",
")",
"for",
"up",
",",
"costs",
"in",
"self",
".",
"operation_costs",
".",
"items",
"(",
")",
":",
"for",
"low",
",",
"cost",
"in",
"costs",
".",
"items",
"(",
")",
":",
"if",
"low",
"not",
"in",
"_reversed_operation_costs",
":",
"_reversed_operation_costs",
"[",
"low",
"]",
"=",
"dict",
"(",
")",
"_reversed_operation_costs",
"[",
"low",
"]",
"[",
"up",
"]",
"=",
"cost",
"self",
".",
"_reversed_operation_costs",
"=",
"_reversed_operation_costs"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer._make_maximal_key_lengths
|
Вычисляет максимальную длину элемента low
в элементарной трансдукции (up, low) для каждого up
и максимальную длину элемента up
в элементарной трансдукции (up, low) для каждого low
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _make_maximal_key_lengths(self):
"""
Вычисляет максимальную длину элемента low
в элементарной трансдукции (up, low) для каждого up
и максимальную длину элемента up
в элементарной трансдукции (up, low) для каждого low
"""
self.max_up_length =\
(max(len(up) for up in self.operation_costs)
if len(self.operation_costs) > 0 else -1)
self.max_low_length =\
(max(len(low) for low in self._reversed_operation_costs)
if len(self._reversed_operation_costs) > 0 else -1)
self.max_low_lengths_by_up, self.max_up_lengths_by_low = dict(), dict()
for up, costs in self.operation_costs.items():
self.max_low_lengths_by_up[up] =\
max(len(low) for low in costs) if len(costs) > 0 else -1
for low, costs in self._reversed_operation_costs.items():
self.max_up_lengths_by_low[low] =\
max(len(up) for up in costs) if len(costs) > 0 else -1
|
def _make_maximal_key_lengths(self):
"""
Вычисляет максимальную длину элемента low
в элементарной трансдукции (up, low) для каждого up
и максимальную длину элемента up
в элементарной трансдукции (up, low) для каждого low
"""
self.max_up_length =\
(max(len(up) for up in self.operation_costs)
if len(self.operation_costs) > 0 else -1)
self.max_low_length =\
(max(len(low) for low in self._reversed_operation_costs)
if len(self._reversed_operation_costs) > 0 else -1)
self.max_low_lengths_by_up, self.max_up_lengths_by_low = dict(), dict()
for up, costs in self.operation_costs.items():
self.max_low_lengths_by_up[up] =\
max(len(low) for low in costs) if len(costs) > 0 else -1
for low, costs in self._reversed_operation_costs.items():
self.max_up_lengths_by_low[low] =\
max(len(up) for up in costs) if len(costs) > 0 else -1
|
[
"Вычисляет",
"максимальную",
"длину",
"элемента",
"low",
"в",
"элементарной",
"трансдукции",
"(",
"up",
"low",
")",
"для",
"каждого",
"up",
"и",
"максимальную",
"длину",
"элемента",
"up",
"в",
"элементарной",
"трансдукции",
"(",
"up",
"low",
")",
"для",
"каждого",
"low"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L558-L577
|
[
"def",
"_make_maximal_key_lengths",
"(",
"self",
")",
":",
"self",
".",
"max_up_length",
"=",
"(",
"max",
"(",
"len",
"(",
"up",
")",
"for",
"up",
"in",
"self",
".",
"operation_costs",
")",
"if",
"len",
"(",
"self",
".",
"operation_costs",
")",
">",
"0",
"else",
"-",
"1",
")",
"self",
".",
"max_low_length",
"=",
"(",
"max",
"(",
"len",
"(",
"low",
")",
"for",
"low",
"in",
"self",
".",
"_reversed_operation_costs",
")",
"if",
"len",
"(",
"self",
".",
"_reversed_operation_costs",
")",
">",
"0",
"else",
"-",
"1",
")",
"self",
".",
"max_low_lengths_by_up",
",",
"self",
".",
"max_up_lengths_by_low",
"=",
"dict",
"(",
")",
",",
"dict",
"(",
")",
"for",
"up",
",",
"costs",
"in",
"self",
".",
"operation_costs",
".",
"items",
"(",
")",
":",
"self",
".",
"max_low_lengths_by_up",
"[",
"up",
"]",
"=",
"max",
"(",
"len",
"(",
"low",
")",
"for",
"low",
"in",
"costs",
")",
"if",
"len",
"(",
"costs",
")",
">",
"0",
"else",
"-",
"1",
"for",
"low",
",",
"costs",
"in",
"self",
".",
"_reversed_operation_costs",
".",
"items",
"(",
")",
":",
"self",
".",
"max_up_lengths_by_low",
"[",
"low",
"]",
"=",
"max",
"(",
"len",
"(",
"up",
")",
"for",
"up",
"in",
"costs",
")",
"if",
"len",
"(",
"costs",
")",
">",
"0",
"else",
"-",
"1"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer._backtraces_to_transductions
|
Восстанавливает трансдукции по таблице обратных ссылок
Аргументы:
----------
first, second : string
верхние и нижние элементы трансдукции
backtraces : array-like, dtype=list, shape=(len(first)+1, len(second)+1)
таблица обратных ссылок
threshold : float
порог для отсева трансдукций,
возвращаются только трансдукции стоимостью <= threshold
return_cost : bool (optional, default=False)
если True, то вместе с трансдукциями возвращается их стоимость
Возвращает:
-----------
result : list
список вида [(трансдукция, стоимость)], если return_cost=True
и вида [трансдукция], если return_cost=False,
содержащий все трансдукции, переводящие first в second,
чья стоимость не превышает threshold
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _backtraces_to_transductions(self, first, second, backtraces, threshold, return_cost=False):
"""
Восстанавливает трансдукции по таблице обратных ссылок
Аргументы:
----------
first, second : string
верхние и нижние элементы трансдукции
backtraces : array-like, dtype=list, shape=(len(first)+1, len(second)+1)
таблица обратных ссылок
threshold : float
порог для отсева трансдукций,
возвращаются только трансдукции стоимостью <= threshold
return_cost : bool (optional, default=False)
если True, то вместе с трансдукциями возвращается их стоимость
Возвращает:
-----------
result : list
список вида [(трансдукция, стоимость)], если return_cost=True
и вида [трансдукция], если return_cost=False,
содержащий все трансдукции, переводящие first в second,
чья стоимость не превышает threshold
"""
m, n = len(first), len(second)
agenda = [None] * (m + 1)
for i in range(m + 1):
agenda[i] = [[] for j in range(n+1)]
agenda[m][n] = [((), 0.0)]
for i_right in range(m, -1, -1):
for j_right in range(n, -1, -1):
current_agenda = agenda[i_right][j_right]
if len(current_agenda) == 0:
continue
for (i, j) in backtraces[i_right][j_right]:
up, low = first[i:i_right], second[j:j_right]
add_cost = self.operation_costs[up][low]
for elem, cost in current_agenda:
new_cost = cost + add_cost
if new_cost <= threshold: # удаление трансдукций большой стоимости
agenda[i][j].append((((up, low),) + elem, new_cost))
if return_cost:
return agenda[0][0]
else:
return [elem[0] for elem in agenda[0][0]]
|
def _backtraces_to_transductions(self, first, second, backtraces, threshold, return_cost=False):
"""
Восстанавливает трансдукции по таблице обратных ссылок
Аргументы:
----------
first, second : string
верхние и нижние элементы трансдукции
backtraces : array-like, dtype=list, shape=(len(first)+1, len(second)+1)
таблица обратных ссылок
threshold : float
порог для отсева трансдукций,
возвращаются только трансдукции стоимостью <= threshold
return_cost : bool (optional, default=False)
если True, то вместе с трансдукциями возвращается их стоимость
Возвращает:
-----------
result : list
список вида [(трансдукция, стоимость)], если return_cost=True
и вида [трансдукция], если return_cost=False,
содержащий все трансдукции, переводящие first в second,
чья стоимость не превышает threshold
"""
m, n = len(first), len(second)
agenda = [None] * (m + 1)
for i in range(m + 1):
agenda[i] = [[] for j in range(n+1)]
agenda[m][n] = [((), 0.0)]
for i_right in range(m, -1, -1):
for j_right in range(n, -1, -1):
current_agenda = agenda[i_right][j_right]
if len(current_agenda) == 0:
continue
for (i, j) in backtraces[i_right][j_right]:
up, low = first[i:i_right], second[j:j_right]
add_cost = self.operation_costs[up][low]
for elem, cost in current_agenda:
new_cost = cost + add_cost
if new_cost <= threshold: # удаление трансдукций большой стоимости
agenda[i][j].append((((up, low),) + elem, new_cost))
if return_cost:
return agenda[0][0]
else:
return [elem[0] for elem in agenda[0][0]]
|
[
"Восстанавливает",
"трансдукции",
"по",
"таблице",
"обратных",
"ссылок"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L579-L623
|
[
"def",
"_backtraces_to_transductions",
"(",
"self",
",",
"first",
",",
"second",
",",
"backtraces",
",",
"threshold",
",",
"return_cost",
"=",
"False",
")",
":",
"m",
",",
"n",
"=",
"len",
"(",
"first",
")",
",",
"len",
"(",
"second",
")",
"agenda",
"=",
"[",
"None",
"]",
"*",
"(",
"m",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"m",
"+",
"1",
")",
":",
"agenda",
"[",
"i",
"]",
"=",
"[",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"n",
"+",
"1",
")",
"]",
"agenda",
"[",
"m",
"]",
"[",
"n",
"]",
"=",
"[",
"(",
"(",
")",
",",
"0.0",
")",
"]",
"for",
"i_right",
"in",
"range",
"(",
"m",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"for",
"j_right",
"in",
"range",
"(",
"n",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"current_agenda",
"=",
"agenda",
"[",
"i_right",
"]",
"[",
"j_right",
"]",
"if",
"len",
"(",
"current_agenda",
")",
"==",
"0",
":",
"continue",
"for",
"(",
"i",
",",
"j",
")",
"in",
"backtraces",
"[",
"i_right",
"]",
"[",
"j_right",
"]",
":",
"up",
",",
"low",
"=",
"first",
"[",
"i",
":",
"i_right",
"]",
",",
"second",
"[",
"j",
":",
"j_right",
"]",
"add_cost",
"=",
"self",
".",
"operation_costs",
"[",
"up",
"]",
"[",
"low",
"]",
"for",
"elem",
",",
"cost",
"in",
"current_agenda",
":",
"new_cost",
"=",
"cost",
"+",
"add_cost",
"if",
"new_cost",
"<=",
"threshold",
":",
"# удаление трансдукций большой стоимости",
"agenda",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"append",
"(",
"(",
"(",
"(",
"up",
",",
"low",
")",
",",
")",
"+",
"elem",
",",
"new_cost",
")",
")",
"if",
"return_cost",
":",
"return",
"agenda",
"[",
"0",
"]",
"[",
"0",
"]",
"else",
":",
"return",
"[",
"elem",
"[",
"0",
"]",
"for",
"elem",
"in",
"agenda",
"[",
"0",
"]",
"[",
"0",
"]",
"]"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer._perform_insertions
|
возвращает все трансдукции стоимости <= max_cost,
которые можно получить из элементов initial
Аргументы:
----------
initial : list of tuples
список исходных трансдукций вида [(трансдукция, стоимость)]
max_cost : float
максимальная стоимость трансдукции
Возвращает:
-----------
final : list of tuples
финальный список трансдукций вида [(трансдукция, стоимость)]
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _perform_insertions(self, initial, max_cost):
"""
возвращает все трансдукции стоимости <= max_cost,
которые можно получить из элементов initial
Аргументы:
----------
initial : list of tuples
список исходных трансдукций вида [(трансдукция, стоимость)]
max_cost : float
максимальная стоимость трансдукции
Возвращает:
-----------
final : list of tuples
финальный список трансдукций вида [(трансдукция, стоимость)]
"""
queue = list(initial)
final = initial
while len(queue) > 0:
transduction, cost = queue[0]
queue = queue[1:]
for string, string_cost in self.operation_costs[""].items():
new_cost = cost + string_cost
if new_cost <= max_cost:
new_transduction = transduction + ("", string)
final.append((new_transduction, new_cost))
queue.append((new_transduction, new_cost))
return final
|
def _perform_insertions(self, initial, max_cost):
"""
возвращает все трансдукции стоимости <= max_cost,
которые можно получить из элементов initial
Аргументы:
----------
initial : list of tuples
список исходных трансдукций вида [(трансдукция, стоимость)]
max_cost : float
максимальная стоимость трансдукции
Возвращает:
-----------
final : list of tuples
финальный список трансдукций вида [(трансдукция, стоимость)]
"""
queue = list(initial)
final = initial
while len(queue) > 0:
transduction, cost = queue[0]
queue = queue[1:]
for string, string_cost in self.operation_costs[""].items():
new_cost = cost + string_cost
if new_cost <= max_cost:
new_transduction = transduction + ("", string)
final.append((new_transduction, new_cost))
queue.append((new_transduction, new_cost))
return final
|
[
"возвращает",
"все",
"трансдукции",
"стоимости",
"<",
"=",
"max_cost",
"которые",
"можно",
"получить",
"из",
"элементов",
"initial"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L625-L653
|
[
"def",
"_perform_insertions",
"(",
"self",
",",
"initial",
",",
"max_cost",
")",
":",
"queue",
"=",
"list",
"(",
"initial",
")",
"final",
"=",
"initial",
"while",
"len",
"(",
"queue",
")",
">",
"0",
":",
"transduction",
",",
"cost",
"=",
"queue",
"[",
"0",
"]",
"queue",
"=",
"queue",
"[",
"1",
":",
"]",
"for",
"string",
",",
"string_cost",
"in",
"self",
".",
"operation_costs",
"[",
"\"\"",
"]",
".",
"items",
"(",
")",
":",
"new_cost",
"=",
"cost",
"+",
"string_cost",
"if",
"new_cost",
"<=",
"max_cost",
":",
"new_transduction",
"=",
"transduction",
"+",
"(",
"\"\"",
",",
"string",
")",
"final",
".",
"append",
"(",
"(",
"new_transduction",
",",
"new_cost",
")",
")",
"queue",
".",
"append",
"(",
"(",
"new_transduction",
",",
"new_cost",
")",
")",
"return",
"final"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
SegmentTransducer._make_default_operation_costs
|
sets 1.0 cost for every replacement, insertion, deletion and transposition
|
deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py
|
def _make_default_operation_costs(self, allow_spaces=False):
"""
sets 1.0 cost for every replacement, insertion, deletion and transposition
"""
self.operation_costs = dict()
self.operation_costs[""] = {c: 1.0 for c in list(self.alphabet) + [' ']}
for a in self.alphabet:
current_costs = {c: 1.0 for c in self.alphabet}
current_costs[a] = 0.0
current_costs[""] = 1.0
if allow_spaces:
current_costs[" "] = 1.0
self.operation_costs[a] = current_costs
# транспозиции
for a, b in itertools.permutations(self.alphabet, 2):
self.operation_costs[a + b] = {b + a: 1.0}
# пробелы
if allow_spaces:
self.operation_costs[" "] = {c: 1.0 for c in self.alphabet}
self.operation_costs[" "][""] = 1.0
|
def _make_default_operation_costs(self, allow_spaces=False):
"""
sets 1.0 cost for every replacement, insertion, deletion and transposition
"""
self.operation_costs = dict()
self.operation_costs[""] = {c: 1.0 for c in list(self.alphabet) + [' ']}
for a in self.alphabet:
current_costs = {c: 1.0 for c in self.alphabet}
current_costs[a] = 0.0
current_costs[""] = 1.0
if allow_spaces:
current_costs[" "] = 1.0
self.operation_costs[a] = current_costs
# транспозиции
for a, b in itertools.permutations(self.alphabet, 2):
self.operation_costs[a + b] = {b + a: 1.0}
# пробелы
if allow_spaces:
self.operation_costs[" "] = {c: 1.0 for c in self.alphabet}
self.operation_costs[" "][""] = 1.0
|
[
"sets",
"1",
".",
"0",
"cost",
"for",
"every",
"replacement",
"insertion",
"deletion",
"and",
"transposition"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/levenshtein_searcher.py#L655-L674
|
[
"def",
"_make_default_operation_costs",
"(",
"self",
",",
"allow_spaces",
"=",
"False",
")",
":",
"self",
".",
"operation_costs",
"=",
"dict",
"(",
")",
"self",
".",
"operation_costs",
"[",
"\"\"",
"]",
"=",
"{",
"c",
":",
"1.0",
"for",
"c",
"in",
"list",
"(",
"self",
".",
"alphabet",
")",
"+",
"[",
"' '",
"]",
"}",
"for",
"a",
"in",
"self",
".",
"alphabet",
":",
"current_costs",
"=",
"{",
"c",
":",
"1.0",
"for",
"c",
"in",
"self",
".",
"alphabet",
"}",
"current_costs",
"[",
"a",
"]",
"=",
"0.0",
"current_costs",
"[",
"\"\"",
"]",
"=",
"1.0",
"if",
"allow_spaces",
":",
"current_costs",
"[",
"\" \"",
"]",
"=",
"1.0",
"self",
".",
"operation_costs",
"[",
"a",
"]",
"=",
"current_costs",
"# транспозиции",
"for",
"a",
",",
"b",
"in",
"itertools",
".",
"permutations",
"(",
"self",
".",
"alphabet",
",",
"2",
")",
":",
"self",
".",
"operation_costs",
"[",
"a",
"+",
"b",
"]",
"=",
"{",
"b",
"+",
"a",
":",
"1.0",
"}",
"# пробелы",
"if",
"allow_spaces",
":",
"self",
".",
"operation_costs",
"[",
"\" \"",
"]",
"=",
"{",
"c",
":",
"1.0",
"for",
"c",
"in",
"self",
".",
"alphabet",
"}",
"self",
".",
"operation_costs",
"[",
"\" \"",
"]",
"[",
"\"\"",
"]",
"=",
"1.0"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Conversation._start_timer
|
Initiates self-destruct timer.
|
deeppavlov/utils/alexa/conversation.py
|
def _start_timer(self) -> None:
"""Initiates self-destruct timer."""
self.timer = Timer(self.config['conversation_lifetime'], self.self_destruct_callback)
self.timer.start()
|
def _start_timer(self) -> None:
"""Initiates self-destruct timer."""
self.timer = Timer(self.config['conversation_lifetime'], self.self_destruct_callback)
self.timer.start()
|
[
"Initiates",
"self",
"-",
"destruct",
"timer",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/conversation.py#L69-L72
|
[
"def",
"_start_timer",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"timer",
"=",
"Timer",
"(",
"self",
".",
"config",
"[",
"'conversation_lifetime'",
"]",
",",
"self",
".",
"self_destruct_callback",
")",
"self",
".",
"timer",
".",
"start",
"(",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Conversation.handle_request
|
Routes Alexa requests to appropriate handlers.
Args:
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
|
deeppavlov/utils/alexa/conversation.py
|
def handle_request(self, request: dict) -> dict:
"""Routes Alexa requests to appropriate handlers.
Args:
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
"""
request_type = request['request']['type']
request_id = request['request']['requestId']
log.debug(f'Received request. Type: {request_type}, id: {request_id}')
if request_type in self.handled_requests.keys():
response: dict = self.handled_requests[request_type](request)
else:
response: dict = self.handled_requests['_unsupported'](request)
log.warning(f'Unsupported request type: {request_type}, request id: {request_id}')
self._rearm_self_destruct()
return response
|
def handle_request(self, request: dict) -> dict:
"""Routes Alexa requests to appropriate handlers.
Args:
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
"""
request_type = request['request']['type']
request_id = request['request']['requestId']
log.debug(f'Received request. Type: {request_type}, id: {request_id}')
if request_type in self.handled_requests.keys():
response: dict = self.handled_requests[request_type](request)
else:
response: dict = self.handled_requests['_unsupported'](request)
log.warning(f'Unsupported request type: {request_type}, request id: {request_id}')
self._rearm_self_destruct()
return response
|
[
"Routes",
"Alexa",
"requests",
"to",
"appropriate",
"handlers",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/conversation.py#L79-L99
|
[
"def",
"handle_request",
"(",
"self",
",",
"request",
":",
"dict",
")",
"->",
"dict",
":",
"request_type",
"=",
"request",
"[",
"'request'",
"]",
"[",
"'type'",
"]",
"request_id",
"=",
"request",
"[",
"'request'",
"]",
"[",
"'requestId'",
"]",
"log",
".",
"debug",
"(",
"f'Received request. Type: {request_type}, id: {request_id}'",
")",
"if",
"request_type",
"in",
"self",
".",
"handled_requests",
".",
"keys",
"(",
")",
":",
"response",
":",
"dict",
"=",
"self",
".",
"handled_requests",
"[",
"request_type",
"]",
"(",
"request",
")",
"else",
":",
"response",
":",
"dict",
"=",
"self",
".",
"handled_requests",
"[",
"'_unsupported'",
"]",
"(",
"request",
")",
"log",
".",
"warning",
"(",
"f'Unsupported request type: {request_type}, request id: {request_id}'",
")",
"self",
".",
"_rearm_self_destruct",
"(",
")",
"return",
"response"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Conversation._act
|
Infers DeepPavlov agent with raw user input extracted from Alexa request.
Args:
utterance: Raw user input extracted from Alexa request.
Returns:
response: DeepPavlov agent response.
|
deeppavlov/utils/alexa/conversation.py
|
def _act(self, utterance: str) -> list:
"""Infers DeepPavlov agent with raw user input extracted from Alexa request.
Args:
utterance: Raw user input extracted from Alexa request.
Returns:
response: DeepPavlov agent response.
"""
if self.stateful:
utterance = [[utterance], [self.key]]
else:
utterance = [[utterance]]
agent_response: list = self.agent(*utterance)
return agent_response
|
def _act(self, utterance: str) -> list:
"""Infers DeepPavlov agent with raw user input extracted from Alexa request.
Args:
utterance: Raw user input extracted from Alexa request.
Returns:
response: DeepPavlov agent response.
"""
if self.stateful:
utterance = [[utterance], [self.key]]
else:
utterance = [[utterance]]
agent_response: list = self.agent(*utterance)
return agent_response
|
[
"Infers",
"DeepPavlov",
"agent",
"with",
"raw",
"user",
"input",
"extracted",
"from",
"Alexa",
"request",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/conversation.py#L101-L116
|
[
"def",
"_act",
"(",
"self",
",",
"utterance",
":",
"str",
")",
"->",
"list",
":",
"if",
"self",
".",
"stateful",
":",
"utterance",
"=",
"[",
"[",
"utterance",
"]",
",",
"[",
"self",
".",
"key",
"]",
"]",
"else",
":",
"utterance",
"=",
"[",
"[",
"utterance",
"]",
"]",
"agent_response",
":",
"list",
"=",
"self",
".",
"agent",
"(",
"*",
"utterance",
")",
"return",
"agent_response"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Conversation._generate_response
|
Populates generated response with additional data conforming Alexa response specification.
Args:
response: Raw user input extracted from Alexa request.
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
|
deeppavlov/utils/alexa/conversation.py
|
def _generate_response(self, response: dict, request: dict) -> dict:
"""Populates generated response with additional data conforming Alexa response specification.
Args:
response: Raw user input extracted from Alexa request.
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
"""
response_template = deepcopy(self.response_template)
response_template['sessionAttributes']['sessionId'] = request['session']['sessionId']
for key, value in response_template.items():
if key not in response.keys():
response[key] = value
return response
|
def _generate_response(self, response: dict, request: dict) -> dict:
"""Populates generated response with additional data conforming Alexa response specification.
Args:
response: Raw user input extracted from Alexa request.
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
"""
response_template = deepcopy(self.response_template)
response_template['sessionAttributes']['sessionId'] = request['session']['sessionId']
for key, value in response_template.items():
if key not in response.keys():
response[key] = value
return response
|
[
"Populates",
"generated",
"response",
"with",
"additional",
"data",
"conforming",
"Alexa",
"response",
"specification",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/conversation.py#L118-L134
|
[
"def",
"_generate_response",
"(",
"self",
",",
"response",
":",
"dict",
",",
"request",
":",
"dict",
")",
"->",
"dict",
":",
"response_template",
"=",
"deepcopy",
"(",
"self",
".",
"response_template",
")",
"response_template",
"[",
"'sessionAttributes'",
"]",
"[",
"'sessionId'",
"]",
"=",
"request",
"[",
"'session'",
"]",
"[",
"'sessionId'",
"]",
"for",
"key",
",",
"value",
"in",
"response_template",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"response",
".",
"keys",
"(",
")",
":",
"response",
"[",
"key",
"]",
"=",
"value",
"return",
"response"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Conversation._handle_intent
|
Handles IntentRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
|
deeppavlov/utils/alexa/conversation.py
|
def _handle_intent(self, request: dict) -> dict:
"""Handles IntentRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
intent_name = self.config['intent_name']
slot_name = self.config['slot_name']
request_id = request['request']['requestId']
request_intent: dict = request['request']['intent']
if intent_name != request_intent['name']:
log.error(f"Wrong intent name received: {request_intent['name']} in request {request_id}")
return {'error': 'wrong intent name'}
if slot_name not in request_intent['slots'].keys():
log.error(f'No slot named {slot_name} found in request {request_id}')
return {'error': 'no slot found'}
utterance = request_intent['slots'][slot_name]['value']
agent_response = self._act(utterance)
if not agent_response:
log.error(f'Some error during response generation for request {request_id}')
return {'error': 'error during response generation'}
prediction: RichMessage = agent_response[0]
prediction: list = prediction.alexa()
if not prediction:
log.error(f'Some error during response generation for request {request_id}')
return {'error': 'error during response generation'}
response = self._generate_response(prediction[0], request)
return response
|
def _handle_intent(self, request: dict) -> dict:
"""Handles IntentRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
intent_name = self.config['intent_name']
slot_name = self.config['slot_name']
request_id = request['request']['requestId']
request_intent: dict = request['request']['intent']
if intent_name != request_intent['name']:
log.error(f"Wrong intent name received: {request_intent['name']} in request {request_id}")
return {'error': 'wrong intent name'}
if slot_name not in request_intent['slots'].keys():
log.error(f'No slot named {slot_name} found in request {request_id}')
return {'error': 'no slot found'}
utterance = request_intent['slots'][slot_name]['value']
agent_response = self._act(utterance)
if not agent_response:
log.error(f'Some error during response generation for request {request_id}')
return {'error': 'error during response generation'}
prediction: RichMessage = agent_response[0]
prediction: list = prediction.alexa()
if not prediction:
log.error(f'Some error during response generation for request {request_id}')
return {'error': 'error during response generation'}
response = self._generate_response(prediction[0], request)
return response
|
[
"Handles",
"IntentRequest",
"Alexa",
"request",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/conversation.py#L136-L174
|
[
"def",
"_handle_intent",
"(",
"self",
",",
"request",
":",
"dict",
")",
"->",
"dict",
":",
"intent_name",
"=",
"self",
".",
"config",
"[",
"'intent_name'",
"]",
"slot_name",
"=",
"self",
".",
"config",
"[",
"'slot_name'",
"]",
"request_id",
"=",
"request",
"[",
"'request'",
"]",
"[",
"'requestId'",
"]",
"request_intent",
":",
"dict",
"=",
"request",
"[",
"'request'",
"]",
"[",
"'intent'",
"]",
"if",
"intent_name",
"!=",
"request_intent",
"[",
"'name'",
"]",
":",
"log",
".",
"error",
"(",
"f\"Wrong intent name received: {request_intent['name']} in request {request_id}\"",
")",
"return",
"{",
"'error'",
":",
"'wrong intent name'",
"}",
"if",
"slot_name",
"not",
"in",
"request_intent",
"[",
"'slots'",
"]",
".",
"keys",
"(",
")",
":",
"log",
".",
"error",
"(",
"f'No slot named {slot_name} found in request {request_id}'",
")",
"return",
"{",
"'error'",
":",
"'no slot found'",
"}",
"utterance",
"=",
"request_intent",
"[",
"'slots'",
"]",
"[",
"slot_name",
"]",
"[",
"'value'",
"]",
"agent_response",
"=",
"self",
".",
"_act",
"(",
"utterance",
")",
"if",
"not",
"agent_response",
":",
"log",
".",
"error",
"(",
"f'Some error during response generation for request {request_id}'",
")",
"return",
"{",
"'error'",
":",
"'error during response generation'",
"}",
"prediction",
":",
"RichMessage",
"=",
"agent_response",
"[",
"0",
"]",
"prediction",
":",
"list",
"=",
"prediction",
".",
"alexa",
"(",
")",
"if",
"not",
"prediction",
":",
"log",
".",
"error",
"(",
"f'Some error during response generation for request {request_id}'",
")",
"return",
"{",
"'error'",
":",
"'error during response generation'",
"}",
"response",
"=",
"self",
".",
"_generate_response",
"(",
"prediction",
"[",
"0",
"]",
",",
"request",
")",
"return",
"response"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Conversation._handle_launch
|
Handles LaunchRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
|
deeppavlov/utils/alexa/conversation.py
|
def _handle_launch(self, request: dict) -> dict:
"""Handles LaunchRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.config['start_message']
},
'card': {
'type': 'Simple',
'content': self.config['start_message']
}
}
}
response = self._generate_response(response, request)
return response
|
def _handle_launch(self, request: dict) -> dict:
"""Handles LaunchRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.config['start_message']
},
'card': {
'type': 'Simple',
'content': self.config['start_message']
}
}
}
response = self._generate_response(response, request)
return response
|
[
"Handles",
"LaunchRequest",
"Alexa",
"request",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/conversation.py#L176-L200
|
[
"def",
"_handle_launch",
"(",
"self",
",",
"request",
":",
"dict",
")",
"->",
"dict",
":",
"response",
"=",
"{",
"'response'",
":",
"{",
"'shouldEndSession'",
":",
"False",
",",
"'outputSpeech'",
":",
"{",
"'type'",
":",
"'PlainText'",
",",
"'text'",
":",
"self",
".",
"config",
"[",
"'start_message'",
"]",
"}",
",",
"'card'",
":",
"{",
"'type'",
":",
"'Simple'",
",",
"'content'",
":",
"self",
".",
"config",
"[",
"'start_message'",
"]",
"}",
"}",
"}",
"response",
"=",
"self",
".",
"_generate_response",
"(",
"response",
",",
"request",
")",
"return",
"response"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Conversation._handle_unsupported
|
Handles all unsupported types of Alexa requests. Returns standard message.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
|
deeppavlov/utils/alexa/conversation.py
|
def _handle_unsupported(self, request: dict) -> dict:
"""Handles all unsupported types of Alexa requests. Returns standard message.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.config['unsupported_message']
},
'card': {
'type': 'Simple',
'content': self.config['unsupported_message']
}
}
}
response = self._generate_response(response, request)
return response
|
def _handle_unsupported(self, request: dict) -> dict:
"""Handles all unsupported types of Alexa requests. Returns standard message.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.config['unsupported_message']
},
'card': {
'type': 'Simple',
'content': self.config['unsupported_message']
}
}
}
response = self._generate_response(response, request)
return response
|
[
"Handles",
"all",
"unsupported",
"types",
"of",
"Alexa",
"requests",
".",
"Returns",
"standard",
"message",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/conversation.py#L214-L238
|
[
"def",
"_handle_unsupported",
"(",
"self",
",",
"request",
":",
"dict",
")",
"->",
"dict",
":",
"response",
"=",
"{",
"'response'",
":",
"{",
"'shouldEndSession'",
":",
"False",
",",
"'outputSpeech'",
":",
"{",
"'type'",
":",
"'PlainText'",
",",
"'text'",
":",
"self",
".",
"config",
"[",
"'unsupported_message'",
"]",
"}",
",",
"'card'",
":",
"{",
"'type'",
":",
"'Simple'",
",",
"'content'",
":",
"self",
".",
"config",
"[",
"'unsupported_message'",
"]",
"}",
"}",
"}",
"response",
"=",
"self",
".",
"_generate_response",
"(",
"response",
",",
"request",
")",
"return",
"response"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Struct._repr_pretty_
|
method that defines ``Struct``'s pretty printing rules for iPython
Args:
p (IPython.lib.pretty.RepresentationPrinter): pretty printer object
cycle (bool): is ``True`` if pretty detected a cycle
|
deeppavlov/configs/__init__.py
|
def _repr_pretty_(self, p, cycle):
"""method that defines ``Struct``'s pretty printing rules for iPython
Args:
p (IPython.lib.pretty.RepresentationPrinter): pretty printer object
cycle (bool): is ``True`` if pretty detected a cycle
"""
if cycle:
p.text('Struct(...)')
else:
with p.group(7, 'Struct(', ')'):
p.pretty(self._asdict())
|
def _repr_pretty_(self, p, cycle):
"""method that defines ``Struct``'s pretty printing rules for iPython
Args:
p (IPython.lib.pretty.RepresentationPrinter): pretty printer object
cycle (bool): is ``True`` if pretty detected a cycle
"""
if cycle:
p.text('Struct(...)')
else:
with p.group(7, 'Struct(', ')'):
p.pretty(self._asdict())
|
[
"method",
"that",
"defines",
"Struct",
"s",
"pretty",
"printing",
"rules",
"for",
"iPython"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/configs/__init__.py#L56-L67
|
[
"def",
"_repr_pretty_",
"(",
"self",
",",
"p",
",",
"cycle",
")",
":",
"if",
"cycle",
":",
"p",
".",
"text",
"(",
"'Struct(...)'",
")",
"else",
":",
"with",
"p",
".",
"group",
"(",
"7",
",",
"'Struct('",
",",
"')'",
")",
":",
"p",
".",
"pretty",
"(",
"self",
".",
"_asdict",
"(",
")",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
elmo_loss2ppl
|
Calculates perplexity by loss
Args:
losses: list of numpy arrays of model losses
Returns:
perplexity : float
|
deeppavlov/metrics/elmo_metrics.py
|
def elmo_loss2ppl(losses: List[np.ndarray]) -> float:
""" Calculates perplexity by loss
Args:
losses: list of numpy arrays of model losses
Returns:
perplexity : float
"""
avg_loss = np.mean(losses)
return float(np.exp(avg_loss))
|
def elmo_loss2ppl(losses: List[np.ndarray]) -> float:
""" Calculates perplexity by loss
Args:
losses: list of numpy arrays of model losses
Returns:
perplexity : float
"""
avg_loss = np.mean(losses)
return float(np.exp(avg_loss))
|
[
"Calculates",
"perplexity",
"by",
"loss"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/elmo_metrics.py#L23-L33
|
[
"def",
"elmo_loss2ppl",
"(",
"losses",
":",
"List",
"[",
"np",
".",
"ndarray",
"]",
")",
"->",
"float",
":",
"avg_loss",
"=",
"np",
".",
"mean",
"(",
"losses",
")",
"return",
"float",
"(",
"np",
".",
"exp",
"(",
"avg_loss",
")",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
LanguageModel._build_loss
|
Create:
self.total_loss: total loss op for training
self.softmax_W, softmax_b: the softmax variables
self.next_token_id / _reverse: placeholders for gold input
|
deeppavlov/models/elmo/bilm_model.py
|
def _build_loss(self, lstm_outputs):
"""
Create:
self.total_loss: total loss op for training
self.softmax_W, softmax_b: the softmax variables
self.next_token_id / _reverse: placeholders for gold input
"""
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
n_tokens_vocab = self.options['n_tokens_vocab']
# DEFINE next_token_id and *_reverse placeholders for the gold input
def _get_next_token_placeholders(suffix):
name = 'next_token_id' + suffix
id_placeholder = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps),
name=name)
return id_placeholder
# get the window and weight placeholders
self.next_token_id = _get_next_token_placeholders('')
if self.bidirectional:
self.next_token_id_reverse = _get_next_token_placeholders(
'_reverse')
# DEFINE THE SOFTMAX VARIABLES
# get the dimension of the softmax weights
# softmax dimension is the size of the output projection_dim
softmax_dim = self.options['lstm']['projection_dim']
# the output softmax variables -- they are shared if bidirectional
if self.share_embedding_softmax:
# softmax_W is just the embedding layer
self.softmax_W = self.embedding_weights
with tf.variable_scope('softmax'), tf.device('/cpu:0'):
# Glorit init (std=(1.0 / sqrt(fan_in))
softmax_init = tf.random_normal_initializer(0.0, 1.0 / np.sqrt(softmax_dim))
if not self.share_embedding_softmax:
self.softmax_W = tf.get_variable(
'W', [n_tokens_vocab, softmax_dim],
dtype=DTYPE,
initializer=softmax_init
)
self.softmax_b = tf.get_variable(
'b', [n_tokens_vocab],
dtype=DTYPE,
initializer=tf.constant_initializer(0.0))
# now calculate losses
# loss for each direction of the LSTM
self.individual_train_losses = []
self.individual_eval_losses = []
if self.bidirectional:
next_ids = [self.next_token_id, self.next_token_id_reverse]
else:
next_ids = [self.next_token_id]
for id_placeholder, lstm_output_flat in zip(next_ids, lstm_outputs):
# flatten the LSTM output and next token id gold to shape:
# (batch_size * unroll_steps, softmax_dim)
# Flatten and reshape the token_id placeholders
next_token_id_flat = tf.reshape(id_placeholder, [-1, 1])
with tf.control_dependencies([lstm_output_flat]):
sampled_losses = tf.nn.sampled_softmax_loss(self.softmax_W, self.softmax_b,
next_token_id_flat, lstm_output_flat,
self.options['n_negative_samples_batch'],
self.options['n_tokens_vocab'],
num_true=1)
# get the full softmax loss
output_scores = tf.matmul(
lstm_output_flat,
tf.transpose(self.softmax_W)
) + self.softmax_b
# NOTE: tf.nn.sparse_softmax_cross_entropy_with_logits
# expects unnormalized output since it performs the
# softmax internally
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=output_scores,
labels=tf.squeeze(next_token_id_flat, squeeze_dims=[1])
)
sampled_losses = tf.reshape(sampled_losses, [self.options['batch_size'], -1])
losses = tf.reshape(losses, [self.options['batch_size'], -1])
self.individual_train_losses.append(tf.reduce_mean(sampled_losses, axis=1))
self.individual_eval_losses.append(tf.reduce_mean(losses, axis=1))
# now make the total loss -- it's the train of the individual losses
if self.bidirectional:
self.total_train_loss = 0.5 * (self.individual_train_losses[0] + self.individual_train_losses[1])
self.total_eval_loss = 0.5 * (self.individual_eval_losses[0] + self.individual_eval_losses[1])
else:
self.total_train_loss = self.individual_train_losses[0]
self.total_eval_loss = self.individual_eval_losses[0]
|
def _build_loss(self, lstm_outputs):
"""
Create:
self.total_loss: total loss op for training
self.softmax_W, softmax_b: the softmax variables
self.next_token_id / _reverse: placeholders for gold input
"""
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
n_tokens_vocab = self.options['n_tokens_vocab']
# DEFINE next_token_id and *_reverse placeholders for the gold input
def _get_next_token_placeholders(suffix):
name = 'next_token_id' + suffix
id_placeholder = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps),
name=name)
return id_placeholder
# get the window and weight placeholders
self.next_token_id = _get_next_token_placeholders('')
if self.bidirectional:
self.next_token_id_reverse = _get_next_token_placeholders(
'_reverse')
# DEFINE THE SOFTMAX VARIABLES
# get the dimension of the softmax weights
# softmax dimension is the size of the output projection_dim
softmax_dim = self.options['lstm']['projection_dim']
# the output softmax variables -- they are shared if bidirectional
if self.share_embedding_softmax:
# softmax_W is just the embedding layer
self.softmax_W = self.embedding_weights
with tf.variable_scope('softmax'), tf.device('/cpu:0'):
# Glorit init (std=(1.0 / sqrt(fan_in))
softmax_init = tf.random_normal_initializer(0.0, 1.0 / np.sqrt(softmax_dim))
if not self.share_embedding_softmax:
self.softmax_W = tf.get_variable(
'W', [n_tokens_vocab, softmax_dim],
dtype=DTYPE,
initializer=softmax_init
)
self.softmax_b = tf.get_variable(
'b', [n_tokens_vocab],
dtype=DTYPE,
initializer=tf.constant_initializer(0.0))
# now calculate losses
# loss for each direction of the LSTM
self.individual_train_losses = []
self.individual_eval_losses = []
if self.bidirectional:
next_ids = [self.next_token_id, self.next_token_id_reverse]
else:
next_ids = [self.next_token_id]
for id_placeholder, lstm_output_flat in zip(next_ids, lstm_outputs):
# flatten the LSTM output and next token id gold to shape:
# (batch_size * unroll_steps, softmax_dim)
# Flatten and reshape the token_id placeholders
next_token_id_flat = tf.reshape(id_placeholder, [-1, 1])
with tf.control_dependencies([lstm_output_flat]):
sampled_losses = tf.nn.sampled_softmax_loss(self.softmax_W, self.softmax_b,
next_token_id_flat, lstm_output_flat,
self.options['n_negative_samples_batch'],
self.options['n_tokens_vocab'],
num_true=1)
# get the full softmax loss
output_scores = tf.matmul(
lstm_output_flat,
tf.transpose(self.softmax_W)
) + self.softmax_b
# NOTE: tf.nn.sparse_softmax_cross_entropy_with_logits
# expects unnormalized output since it performs the
# softmax internally
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=output_scores,
labels=tf.squeeze(next_token_id_flat, squeeze_dims=[1])
)
sampled_losses = tf.reshape(sampled_losses, [self.options['batch_size'], -1])
losses = tf.reshape(losses, [self.options['batch_size'], -1])
self.individual_train_losses.append(tf.reduce_mean(sampled_losses, axis=1))
self.individual_eval_losses.append(tf.reduce_mean(losses, axis=1))
# now make the total loss -- it's the train of the individual losses
if self.bidirectional:
self.total_train_loss = 0.5 * (self.individual_train_losses[0] + self.individual_train_losses[1])
self.total_eval_loss = 0.5 * (self.individual_eval_losses[0] + self.individual_eval_losses[1])
else:
self.total_train_loss = self.individual_train_losses[0]
self.total_eval_loss = self.individual_eval_losses[0]
|
[
"Create",
":",
"self",
".",
"total_loss",
":",
"total",
"loss",
"op",
"for",
"training",
"self",
".",
"softmax_W",
"softmax_b",
":",
"the",
"softmax",
"variables",
"self",
".",
"next_token_id",
"/",
"_reverse",
":",
"placeholders",
"for",
"gold",
"input"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/elmo/bilm_model.py#L412-L509
|
[
"def",
"_build_loss",
"(",
"self",
",",
"lstm_outputs",
")",
":",
"batch_size",
"=",
"self",
".",
"options",
"[",
"'batch_size'",
"]",
"unroll_steps",
"=",
"self",
".",
"options",
"[",
"'unroll_steps'",
"]",
"n_tokens_vocab",
"=",
"self",
".",
"options",
"[",
"'n_tokens_vocab'",
"]",
"# DEFINE next_token_id and *_reverse placeholders for the gold input",
"def",
"_get_next_token_placeholders",
"(",
"suffix",
")",
":",
"name",
"=",
"'next_token_id'",
"+",
"suffix",
"id_placeholder",
"=",
"tf",
".",
"placeholder",
"(",
"DTYPE_INT",
",",
"shape",
"=",
"(",
"batch_size",
",",
"unroll_steps",
")",
",",
"name",
"=",
"name",
")",
"return",
"id_placeholder",
"# get the window and weight placeholders",
"self",
".",
"next_token_id",
"=",
"_get_next_token_placeholders",
"(",
"''",
")",
"if",
"self",
".",
"bidirectional",
":",
"self",
".",
"next_token_id_reverse",
"=",
"_get_next_token_placeholders",
"(",
"'_reverse'",
")",
"# DEFINE THE SOFTMAX VARIABLES",
"# get the dimension of the softmax weights",
"# softmax dimension is the size of the output projection_dim",
"softmax_dim",
"=",
"self",
".",
"options",
"[",
"'lstm'",
"]",
"[",
"'projection_dim'",
"]",
"# the output softmax variables -- they are shared if bidirectional",
"if",
"self",
".",
"share_embedding_softmax",
":",
"# softmax_W is just the embedding layer",
"self",
".",
"softmax_W",
"=",
"self",
".",
"embedding_weights",
"with",
"tf",
".",
"variable_scope",
"(",
"'softmax'",
")",
",",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"# Glorit init (std=(1.0 / sqrt(fan_in))",
"softmax_init",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"0.0",
",",
"1.0",
"/",
"np",
".",
"sqrt",
"(",
"softmax_dim",
")",
")",
"if",
"not",
"self",
".",
"share_embedding_softmax",
":",
"self",
".",
"softmax_W",
"=",
"tf",
".",
"get_variable",
"(",
"'W'",
",",
"[",
"n_tokens_vocab",
",",
"softmax_dim",
"]",
",",
"dtype",
"=",
"DTYPE",
",",
"initializer",
"=",
"softmax_init",
")",
"self",
".",
"softmax_b",
"=",
"tf",
".",
"get_variable",
"(",
"'b'",
",",
"[",
"n_tokens_vocab",
"]",
",",
"dtype",
"=",
"DTYPE",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0.0",
")",
")",
"# now calculate losses",
"# loss for each direction of the LSTM",
"self",
".",
"individual_train_losses",
"=",
"[",
"]",
"self",
".",
"individual_eval_losses",
"=",
"[",
"]",
"if",
"self",
".",
"bidirectional",
":",
"next_ids",
"=",
"[",
"self",
".",
"next_token_id",
",",
"self",
".",
"next_token_id_reverse",
"]",
"else",
":",
"next_ids",
"=",
"[",
"self",
".",
"next_token_id",
"]",
"for",
"id_placeholder",
",",
"lstm_output_flat",
"in",
"zip",
"(",
"next_ids",
",",
"lstm_outputs",
")",
":",
"# flatten the LSTM output and next token id gold to shape:",
"# (batch_size * unroll_steps, softmax_dim)",
"# Flatten and reshape the token_id placeholders",
"next_token_id_flat",
"=",
"tf",
".",
"reshape",
"(",
"id_placeholder",
",",
"[",
"-",
"1",
",",
"1",
"]",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"lstm_output_flat",
"]",
")",
":",
"sampled_losses",
"=",
"tf",
".",
"nn",
".",
"sampled_softmax_loss",
"(",
"self",
".",
"softmax_W",
",",
"self",
".",
"softmax_b",
",",
"next_token_id_flat",
",",
"lstm_output_flat",
",",
"self",
".",
"options",
"[",
"'n_negative_samples_batch'",
"]",
",",
"self",
".",
"options",
"[",
"'n_tokens_vocab'",
"]",
",",
"num_true",
"=",
"1",
")",
"# get the full softmax loss",
"output_scores",
"=",
"tf",
".",
"matmul",
"(",
"lstm_output_flat",
",",
"tf",
".",
"transpose",
"(",
"self",
".",
"softmax_W",
")",
")",
"+",
"self",
".",
"softmax_b",
"# NOTE: tf.nn.sparse_softmax_cross_entropy_with_logits",
"# expects unnormalized output since it performs the",
"# softmax internally",
"losses",
"=",
"tf",
".",
"nn",
".",
"sparse_softmax_cross_entropy_with_logits",
"(",
"logits",
"=",
"output_scores",
",",
"labels",
"=",
"tf",
".",
"squeeze",
"(",
"next_token_id_flat",
",",
"squeeze_dims",
"=",
"[",
"1",
"]",
")",
")",
"sampled_losses",
"=",
"tf",
".",
"reshape",
"(",
"sampled_losses",
",",
"[",
"self",
".",
"options",
"[",
"'batch_size'",
"]",
",",
"-",
"1",
"]",
")",
"losses",
"=",
"tf",
".",
"reshape",
"(",
"losses",
",",
"[",
"self",
".",
"options",
"[",
"'batch_size'",
"]",
",",
"-",
"1",
"]",
")",
"self",
".",
"individual_train_losses",
".",
"append",
"(",
"tf",
".",
"reduce_mean",
"(",
"sampled_losses",
",",
"axis",
"=",
"1",
")",
")",
"self",
".",
"individual_eval_losses",
".",
"append",
"(",
"tf",
".",
"reduce_mean",
"(",
"losses",
",",
"axis",
"=",
"1",
")",
")",
"# now make the total loss -- it's the train of the individual losses",
"if",
"self",
".",
"bidirectional",
":",
"self",
".",
"total_train_loss",
"=",
"0.5",
"*",
"(",
"self",
".",
"individual_train_losses",
"[",
"0",
"]",
"+",
"self",
".",
"individual_train_losses",
"[",
"1",
"]",
")",
"self",
".",
"total_eval_loss",
"=",
"0.5",
"*",
"(",
"self",
".",
"individual_eval_losses",
"[",
"0",
"]",
"+",
"self",
".",
"individual_eval_losses",
"[",
"1",
"]",
")",
"else",
":",
"self",
".",
"total_train_loss",
"=",
"self",
".",
"individual_train_losses",
"[",
"0",
"]",
"self",
".",
"total_eval_loss",
"=",
"self",
".",
"individual_eval_losses",
"[",
"0",
"]"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
build_model
|
Build and return the model described in corresponding configuration file.
|
deeppavlov/core/commands/infer.py
|
def build_model(config: Union[str, Path, dict], mode: str = 'infer',
load_trained: bool = False, download: bool = False,
serialized: Optional[bytes] = None) -> Chainer:
"""Build and return the model described in corresponding configuration file."""
config = parse_config(config)
if serialized:
serialized: list = pickle.loads(serialized)
if download:
deep_download(config)
import_packages(config.get('metadata', {}).get('imports', []))
model_config = config['chainer']
model = Chainer(model_config['in'], model_config['out'], model_config.get('in_y'))
for component_config in model_config['pipe']:
if load_trained and ('fit_on' in component_config or 'in_y' in component_config):
try:
component_config['load_path'] = component_config['save_path']
except KeyError:
log.warning('No "save_path" parameter for the {} component, so "load_path" will not be renewed'
.format(component_config.get('class_name', component_config.get('ref', 'UNKNOWN'))))
if serialized and 'in' in component_config:
component_serialized = serialized.pop(0)
else:
component_serialized = None
component = from_params(component_config, mode=mode, serialized=component_serialized)
if 'in' in component_config:
c_in = component_config['in']
c_out = component_config['out']
in_y = component_config.get('in_y', None)
main = component_config.get('main', False)
model.append(component, c_in, c_out, in_y, main)
return model
|
def build_model(config: Union[str, Path, dict], mode: str = 'infer',
load_trained: bool = False, download: bool = False,
serialized: Optional[bytes] = None) -> Chainer:
"""Build and return the model described in corresponding configuration file."""
config = parse_config(config)
if serialized:
serialized: list = pickle.loads(serialized)
if download:
deep_download(config)
import_packages(config.get('metadata', {}).get('imports', []))
model_config = config['chainer']
model = Chainer(model_config['in'], model_config['out'], model_config.get('in_y'))
for component_config in model_config['pipe']:
if load_trained and ('fit_on' in component_config or 'in_y' in component_config):
try:
component_config['load_path'] = component_config['save_path']
except KeyError:
log.warning('No "save_path" parameter for the {} component, so "load_path" will not be renewed'
.format(component_config.get('class_name', component_config.get('ref', 'UNKNOWN'))))
if serialized and 'in' in component_config:
component_serialized = serialized.pop(0)
else:
component_serialized = None
component = from_params(component_config, mode=mode, serialized=component_serialized)
if 'in' in component_config:
c_in = component_config['in']
c_out = component_config['out']
in_y = component_config.get('in_y', None)
main = component_config.get('main', False)
model.append(component, c_in, c_out, in_y, main)
return model
|
[
"Build",
"and",
"return",
"the",
"model",
"described",
"in",
"corresponding",
"configuration",
"file",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/commands/infer.py#L30-L70
|
[
"def",
"build_model",
"(",
"config",
":",
"Union",
"[",
"str",
",",
"Path",
",",
"dict",
"]",
",",
"mode",
":",
"str",
"=",
"'infer'",
",",
"load_trained",
":",
"bool",
"=",
"False",
",",
"download",
":",
"bool",
"=",
"False",
",",
"serialized",
":",
"Optional",
"[",
"bytes",
"]",
"=",
"None",
")",
"->",
"Chainer",
":",
"config",
"=",
"parse_config",
"(",
"config",
")",
"if",
"serialized",
":",
"serialized",
":",
"list",
"=",
"pickle",
".",
"loads",
"(",
"serialized",
")",
"if",
"download",
":",
"deep_download",
"(",
"config",
")",
"import_packages",
"(",
"config",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'imports'",
",",
"[",
"]",
")",
")",
"model_config",
"=",
"config",
"[",
"'chainer'",
"]",
"model",
"=",
"Chainer",
"(",
"model_config",
"[",
"'in'",
"]",
",",
"model_config",
"[",
"'out'",
"]",
",",
"model_config",
".",
"get",
"(",
"'in_y'",
")",
")",
"for",
"component_config",
"in",
"model_config",
"[",
"'pipe'",
"]",
":",
"if",
"load_trained",
"and",
"(",
"'fit_on'",
"in",
"component_config",
"or",
"'in_y'",
"in",
"component_config",
")",
":",
"try",
":",
"component_config",
"[",
"'load_path'",
"]",
"=",
"component_config",
"[",
"'save_path'",
"]",
"except",
"KeyError",
":",
"log",
".",
"warning",
"(",
"'No \"save_path\" parameter for the {} component, so \"load_path\" will not be renewed'",
".",
"format",
"(",
"component_config",
".",
"get",
"(",
"'class_name'",
",",
"component_config",
".",
"get",
"(",
"'ref'",
",",
"'UNKNOWN'",
")",
")",
")",
")",
"if",
"serialized",
"and",
"'in'",
"in",
"component_config",
":",
"component_serialized",
"=",
"serialized",
".",
"pop",
"(",
"0",
")",
"else",
":",
"component_serialized",
"=",
"None",
"component",
"=",
"from_params",
"(",
"component_config",
",",
"mode",
"=",
"mode",
",",
"serialized",
"=",
"component_serialized",
")",
"if",
"'in'",
"in",
"component_config",
":",
"c_in",
"=",
"component_config",
"[",
"'in'",
"]",
"c_out",
"=",
"component_config",
"[",
"'out'",
"]",
"in_y",
"=",
"component_config",
".",
"get",
"(",
"'in_y'",
",",
"None",
")",
"main",
"=",
"component_config",
".",
"get",
"(",
"'main'",
",",
"False",
")",
"model",
".",
"append",
"(",
"component",
",",
"c_in",
",",
"c_out",
",",
"in_y",
",",
"main",
")",
"return",
"model"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
interact_model
|
Start interaction with the model described in corresponding configuration file.
|
deeppavlov/core/commands/infer.py
|
def interact_model(config: Union[str, Path, dict]) -> None:
"""Start interaction with the model described in corresponding configuration file."""
model = build_model(config)
while True:
args = []
for in_x in model.in_x:
args.append((input('{}::'.format(in_x)),))
# check for exit command
if args[-1][0] in {'exit', 'stop', 'quit', 'q'}:
return
pred = model(*args)
if len(model.out_params) > 1:
pred = zip(*pred)
print('>>', *pred)
|
def interact_model(config: Union[str, Path, dict]) -> None:
"""Start interaction with the model described in corresponding configuration file."""
model = build_model(config)
while True:
args = []
for in_x in model.in_x:
args.append((input('{}::'.format(in_x)),))
# check for exit command
if args[-1][0] in {'exit', 'stop', 'quit', 'q'}:
return
pred = model(*args)
if len(model.out_params) > 1:
pred = zip(*pred)
print('>>', *pred)
|
[
"Start",
"interaction",
"with",
"the",
"model",
"described",
"in",
"corresponding",
"configuration",
"file",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/commands/infer.py#L73-L89
|
[
"def",
"interact_model",
"(",
"config",
":",
"Union",
"[",
"str",
",",
"Path",
",",
"dict",
"]",
")",
"->",
"None",
":",
"model",
"=",
"build_model",
"(",
"config",
")",
"while",
"True",
":",
"args",
"=",
"[",
"]",
"for",
"in_x",
"in",
"model",
".",
"in_x",
":",
"args",
".",
"append",
"(",
"(",
"input",
"(",
"'{}::'",
".",
"format",
"(",
"in_x",
")",
")",
",",
")",
")",
"# check for exit command",
"if",
"args",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"in",
"{",
"'exit'",
",",
"'stop'",
",",
"'quit'",
",",
"'q'",
"}",
":",
"return",
"pred",
"=",
"model",
"(",
"*",
"args",
")",
"if",
"len",
"(",
"model",
".",
"out_params",
")",
">",
"1",
":",
"pred",
"=",
"zip",
"(",
"*",
"pred",
")",
"print",
"(",
"'>>'",
",",
"*",
"pred",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
predict_on_stream
|
Make a prediction with the component described in corresponding configuration file.
|
deeppavlov/core/commands/infer.py
|
def predict_on_stream(config: Union[str, Path, dict], batch_size: int = 1, file_path: Optional[str] = None) -> None:
"""Make a prediction with the component described in corresponding configuration file."""
if file_path is None or file_path == '-':
if sys.stdin.isatty():
raise RuntimeError('To process data from terminal please use interact mode')
f = sys.stdin
else:
f = open(file_path, encoding='utf8')
model: Chainer = build_model(config)
args_count = len(model.in_x)
while True:
batch = list((l.strip() for l in islice(f, batch_size * args_count)))
if not batch:
break
args = []
for i in range(args_count):
args.append(batch[i::args_count])
res = model(*args)
if len(model.out_params) == 1:
res = [res]
for res in zip(*res):
res = json.dumps(res, ensure_ascii=False)
print(res, flush=True)
if f is not sys.stdin:
f.close()
|
def predict_on_stream(config: Union[str, Path, dict], batch_size: int = 1, file_path: Optional[str] = None) -> None:
"""Make a prediction with the component described in corresponding configuration file."""
if file_path is None or file_path == '-':
if sys.stdin.isatty():
raise RuntimeError('To process data from terminal please use interact mode')
f = sys.stdin
else:
f = open(file_path, encoding='utf8')
model: Chainer = build_model(config)
args_count = len(model.in_x)
while True:
batch = list((l.strip() for l in islice(f, batch_size * args_count)))
if not batch:
break
args = []
for i in range(args_count):
args.append(batch[i::args_count])
res = model(*args)
if len(model.out_params) == 1:
res = [res]
for res in zip(*res):
res = json.dumps(res, ensure_ascii=False)
print(res, flush=True)
if f is not sys.stdin:
f.close()
|
[
"Make",
"a",
"prediction",
"with",
"the",
"component",
"described",
"in",
"corresponding",
"configuration",
"file",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/commands/infer.py#L92-L122
|
[
"def",
"predict_on_stream",
"(",
"config",
":",
"Union",
"[",
"str",
",",
"Path",
",",
"dict",
"]",
",",
"batch_size",
":",
"int",
"=",
"1",
",",
"file_path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"if",
"file_path",
"is",
"None",
"or",
"file_path",
"==",
"'-'",
":",
"if",
"sys",
".",
"stdin",
".",
"isatty",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'To process data from terminal please use interact mode'",
")",
"f",
"=",
"sys",
".",
"stdin",
"else",
":",
"f",
"=",
"open",
"(",
"file_path",
",",
"encoding",
"=",
"'utf8'",
")",
"model",
":",
"Chainer",
"=",
"build_model",
"(",
"config",
")",
"args_count",
"=",
"len",
"(",
"model",
".",
"in_x",
")",
"while",
"True",
":",
"batch",
"=",
"list",
"(",
"(",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"islice",
"(",
"f",
",",
"batch_size",
"*",
"args_count",
")",
")",
")",
"if",
"not",
"batch",
":",
"break",
"args",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"args_count",
")",
":",
"args",
".",
"append",
"(",
"batch",
"[",
"i",
":",
":",
"args_count",
"]",
")",
"res",
"=",
"model",
"(",
"*",
"args",
")",
"if",
"len",
"(",
"model",
".",
"out_params",
")",
"==",
"1",
":",
"res",
"=",
"[",
"res",
"]",
"for",
"res",
"in",
"zip",
"(",
"*",
"res",
")",
":",
"res",
"=",
"json",
".",
"dumps",
"(",
"res",
",",
"ensure_ascii",
"=",
"False",
")",
"print",
"(",
"res",
",",
"flush",
"=",
"True",
")",
"if",
"f",
"is",
"not",
"sys",
".",
"stdin",
":",
"f",
".",
"close",
"(",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
read_infile
|
Reads input file in CONLL-U format
Args:
infile: a path to a file
word_column: column containing words (default=1)
pos_column: column containing part-of-speech labels (default=3)
tag_column: column containing fine-grained tags (default=5)
max_sents: maximal number of sents to read
read_only_words: whether to read only words
Returns:
a list of sentences. Each item contains a word sequence and a tag sequence, which is ``None``
in case ``read_only_words = True``
|
deeppavlov/dataset_readers/morphotagging_dataset_reader.py
|
def read_infile(infile: Union[Path, str], from_words=False,
word_column: int = WORD_COLUMN, pos_column: int = POS_COLUMN,
tag_column: int = TAG_COLUMN, max_sents: int = -1,
read_only_words: bool = False) -> List[Tuple[List, Union[List, None]]]:
"""Reads input file in CONLL-U format
Args:
infile: a path to a file
word_column: column containing words (default=1)
pos_column: column containing part-of-speech labels (default=3)
tag_column: column containing fine-grained tags (default=5)
max_sents: maximal number of sents to read
read_only_words: whether to read only words
Returns:
a list of sentences. Each item contains a word sequence and a tag sequence, which is ``None``
in case ``read_only_words = True``
"""
answer, curr_word_sent, curr_tag_sent = [], [], []
if from_words:
word_column, read_only_words = 0, True
with open(infile, "r", encoding="utf8") as fin:
for line in fin:
line = line.strip()
if line.startswith("#"):
continue
if line == "":
if len(curr_word_sent) > 0:
if read_only_words:
curr_tag_sent = None
answer.append((curr_word_sent, curr_tag_sent))
curr_tag_sent, curr_word_sent = [], []
if len(answer) == max_sents:
break
continue
splitted = line.split("\t")
index = splitted[0]
if not from_words and not index.isdigit():
continue
curr_word_sent.append(splitted[word_column])
if not read_only_words:
pos, tag = splitted[pos_column], splitted[tag_column]
tag = pos if tag == "_" else "{},{}".format(pos, tag)
curr_tag_sent.append(tag)
if len(curr_word_sent) > 0:
if read_only_words:
curr_tag_sent = None
answer.append((curr_word_sent, curr_tag_sent))
return answer
|
def read_infile(infile: Union[Path, str], from_words=False,
word_column: int = WORD_COLUMN, pos_column: int = POS_COLUMN,
tag_column: int = TAG_COLUMN, max_sents: int = -1,
read_only_words: bool = False) -> List[Tuple[List, Union[List, None]]]:
"""Reads input file in CONLL-U format
Args:
infile: a path to a file
word_column: column containing words (default=1)
pos_column: column containing part-of-speech labels (default=3)
tag_column: column containing fine-grained tags (default=5)
max_sents: maximal number of sents to read
read_only_words: whether to read only words
Returns:
a list of sentences. Each item contains a word sequence and a tag sequence, which is ``None``
in case ``read_only_words = True``
"""
answer, curr_word_sent, curr_tag_sent = [], [], []
if from_words:
word_column, read_only_words = 0, True
with open(infile, "r", encoding="utf8") as fin:
for line in fin:
line = line.strip()
if line.startswith("#"):
continue
if line == "":
if len(curr_word_sent) > 0:
if read_only_words:
curr_tag_sent = None
answer.append((curr_word_sent, curr_tag_sent))
curr_tag_sent, curr_word_sent = [], []
if len(answer) == max_sents:
break
continue
splitted = line.split("\t")
index = splitted[0]
if not from_words and not index.isdigit():
continue
curr_word_sent.append(splitted[word_column])
if not read_only_words:
pos, tag = splitted[pos_column], splitted[tag_column]
tag = pos if tag == "_" else "{},{}".format(pos, tag)
curr_tag_sent.append(tag)
if len(curr_word_sent) > 0:
if read_only_words:
curr_tag_sent = None
answer.append((curr_word_sent, curr_tag_sent))
return answer
|
[
"Reads",
"input",
"file",
"in",
"CONLL",
"-",
"U",
"format"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/dataset_readers/morphotagging_dataset_reader.py#L33-L81
|
[
"def",
"read_infile",
"(",
"infile",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
",",
"from_words",
"=",
"False",
",",
"word_column",
":",
"int",
"=",
"WORD_COLUMN",
",",
"pos_column",
":",
"int",
"=",
"POS_COLUMN",
",",
"tag_column",
":",
"int",
"=",
"TAG_COLUMN",
",",
"max_sents",
":",
"int",
"=",
"-",
"1",
",",
"read_only_words",
":",
"bool",
"=",
"False",
")",
"->",
"List",
"[",
"Tuple",
"[",
"List",
",",
"Union",
"[",
"List",
",",
"None",
"]",
"]",
"]",
":",
"answer",
",",
"curr_word_sent",
",",
"curr_tag_sent",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"if",
"from_words",
":",
"word_column",
",",
"read_only_words",
"=",
"0",
",",
"True",
"with",
"open",
"(",
"infile",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf8\"",
")",
"as",
"fin",
":",
"for",
"line",
"in",
"fin",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"continue",
"if",
"line",
"==",
"\"\"",
":",
"if",
"len",
"(",
"curr_word_sent",
")",
">",
"0",
":",
"if",
"read_only_words",
":",
"curr_tag_sent",
"=",
"None",
"answer",
".",
"append",
"(",
"(",
"curr_word_sent",
",",
"curr_tag_sent",
")",
")",
"curr_tag_sent",
",",
"curr_word_sent",
"=",
"[",
"]",
",",
"[",
"]",
"if",
"len",
"(",
"answer",
")",
"==",
"max_sents",
":",
"break",
"continue",
"splitted",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"index",
"=",
"splitted",
"[",
"0",
"]",
"if",
"not",
"from_words",
"and",
"not",
"index",
".",
"isdigit",
"(",
")",
":",
"continue",
"curr_word_sent",
".",
"append",
"(",
"splitted",
"[",
"word_column",
"]",
")",
"if",
"not",
"read_only_words",
":",
"pos",
",",
"tag",
"=",
"splitted",
"[",
"pos_column",
"]",
",",
"splitted",
"[",
"tag_column",
"]",
"tag",
"=",
"pos",
"if",
"tag",
"==",
"\"_\"",
"else",
"\"{},{}\"",
".",
"format",
"(",
"pos",
",",
"tag",
")",
"curr_tag_sent",
".",
"append",
"(",
"tag",
")",
"if",
"len",
"(",
"curr_word_sent",
")",
">",
"0",
":",
"if",
"read_only_words",
":",
"curr_tag_sent",
"=",
"None",
"answer",
".",
"append",
"(",
"(",
"curr_word_sent",
",",
"curr_tag_sent",
")",
")",
"return",
"answer"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
preprocess_data
|
Processes all words in data using
:func:`~deeppavlov.dataset_iterators.morphotagger_iterator.process_word`.
Args:
data: a list of pairs (words, tags), each pair corresponds to a single sentence
to_lower: whether to lowercase
append_case: whether to add case mark
Returns:
a list of preprocessed sentences
|
deeppavlov/dataset_iterators/morphotagger_iterator.py
|
def preprocess_data(data: List[Tuple[List[str], List[str]]], to_lower: bool = True,
append_case: str = "first") -> List[Tuple[List[Tuple[str]], List[str]]]:
"""Processes all words in data using
:func:`~deeppavlov.dataset_iterators.morphotagger_iterator.process_word`.
Args:
data: a list of pairs (words, tags), each pair corresponds to a single sentence
to_lower: whether to lowercase
append_case: whether to add case mark
Returns:
a list of preprocessed sentences
"""
new_data = []
for words, tags in data:
new_words = [process_word(word, to_lower=to_lower, append_case=append_case)
for word in words]
# tags could also be processed in future
new_tags = tags
new_data.append((new_words, new_tags))
return new_data
|
def preprocess_data(data: List[Tuple[List[str], List[str]]], to_lower: bool = True,
append_case: str = "first") -> List[Tuple[List[Tuple[str]], List[str]]]:
"""Processes all words in data using
:func:`~deeppavlov.dataset_iterators.morphotagger_iterator.process_word`.
Args:
data: a list of pairs (words, tags), each pair corresponds to a single sentence
to_lower: whether to lowercase
append_case: whether to add case mark
Returns:
a list of preprocessed sentences
"""
new_data = []
for words, tags in data:
new_words = [process_word(word, to_lower=to_lower, append_case=append_case)
for word in words]
# tags could also be processed in future
new_tags = tags
new_data.append((new_words, new_tags))
return new_data
|
[
"Processes",
"all",
"words",
"in",
"data",
"using",
":",
"func",
":",
"~deeppavlov",
".",
"dataset_iterators",
".",
"morphotagger_iterator",
".",
"process_word",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/dataset_iterators/morphotagger_iterator.py#L25-L45
|
[
"def",
"preprocess_data",
"(",
"data",
":",
"List",
"[",
"Tuple",
"[",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"str",
"]",
"]",
"]",
",",
"to_lower",
":",
"bool",
"=",
"True",
",",
"append_case",
":",
"str",
"=",
"\"first\"",
")",
"->",
"List",
"[",
"Tuple",
"[",
"List",
"[",
"Tuple",
"[",
"str",
"]",
"]",
",",
"List",
"[",
"str",
"]",
"]",
"]",
":",
"new_data",
"=",
"[",
"]",
"for",
"words",
",",
"tags",
"in",
"data",
":",
"new_words",
"=",
"[",
"process_word",
"(",
"word",
",",
"to_lower",
"=",
"to_lower",
",",
"append_case",
"=",
"append_case",
")",
"for",
"word",
"in",
"words",
"]",
"# tags could also be processed in future",
"new_tags",
"=",
"tags",
"new_data",
".",
"append",
"(",
"(",
"new_words",
",",
"new_tags",
")",
")",
"return",
"new_data"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
fn_from_str
|
Returns a function object with the name given in string.
|
deeppavlov/core/common/metrics_registry.py
|
def fn_from_str(name: str) -> Callable[..., Any]:
"""Returns a function object with the name given in string."""
try:
module_name, fn_name = name.split(':')
except ValueError:
raise ConfigError('Expected function description in a `module.submodules:function_name` form, but got `{}`'
.format(name))
return getattr(importlib.import_module(module_name), fn_name)
|
def fn_from_str(name: str) -> Callable[..., Any]:
"""Returns a function object with the name given in string."""
try:
module_name, fn_name = name.split(':')
except ValueError:
raise ConfigError('Expected function description in a `module.submodules:function_name` form, but got `{}`'
.format(name))
return getattr(importlib.import_module(module_name), fn_name)
|
[
"Returns",
"a",
"function",
"object",
"with",
"the",
"name",
"given",
"in",
"string",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/common/metrics_registry.py#L19-L27
|
[
"def",
"fn_from_str",
"(",
"name",
":",
"str",
")",
"->",
"Callable",
"[",
"...",
",",
"Any",
"]",
":",
"try",
":",
"module_name",
",",
"fn_name",
"=",
"name",
".",
"split",
"(",
"':'",
")",
"except",
"ValueError",
":",
"raise",
"ConfigError",
"(",
"'Expected function description in a `module.submodules:function_name` form, but got `{}`'",
".",
"format",
"(",
"name",
")",
")",
"return",
"getattr",
"(",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
",",
"fn_name",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
register_metric
|
Decorator for metric registration.
|
deeppavlov/core/common/metrics_registry.py
|
def register_metric(metric_name: str) -> Callable[..., Any]:
"""Decorator for metric registration."""
def decorate(fn):
fn_name = fn.__module__ + ':' + fn.__name__
if metric_name in _REGISTRY and _REGISTRY[metric_name] != fn_name:
log.warning('"{}" is already registered as a metric name, the old function will be ignored'
.format(metric_name))
_REGISTRY[metric_name] = fn_name
return fn
return decorate
|
def register_metric(metric_name: str) -> Callable[..., Any]:
"""Decorator for metric registration."""
def decorate(fn):
fn_name = fn.__module__ + ':' + fn.__name__
if metric_name in _REGISTRY and _REGISTRY[metric_name] != fn_name:
log.warning('"{}" is already registered as a metric name, the old function will be ignored'
.format(metric_name))
_REGISTRY[metric_name] = fn_name
return fn
return decorate
|
[
"Decorator",
"for",
"metric",
"registration",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/common/metrics_registry.py#L30-L39
|
[
"def",
"register_metric",
"(",
"metric_name",
":",
"str",
")",
"->",
"Callable",
"[",
"...",
",",
"Any",
"]",
":",
"def",
"decorate",
"(",
"fn",
")",
":",
"fn_name",
"=",
"fn",
".",
"__module__",
"+",
"':'",
"+",
"fn",
".",
"__name__",
"if",
"metric_name",
"in",
"_REGISTRY",
"and",
"_REGISTRY",
"[",
"metric_name",
"]",
"!=",
"fn_name",
":",
"log",
".",
"warning",
"(",
"'\"{}\" is already registered as a metric name, the old function will be ignored'",
".",
"format",
"(",
"metric_name",
")",
")",
"_REGISTRY",
"[",
"metric_name",
"]",
"=",
"fn_name",
"return",
"fn",
"return",
"decorate"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
get_metric_by_name
|
Returns a metric callable with a corresponding name.
|
deeppavlov/core/common/metrics_registry.py
|
def get_metric_by_name(name: str) -> Callable[..., Any]:
"""Returns a metric callable with a corresponding name."""
if name not in _REGISTRY:
raise ConfigError(f'"{name}" is not registered as a metric')
return fn_from_str(_REGISTRY[name])
|
def get_metric_by_name(name: str) -> Callable[..., Any]:
"""Returns a metric callable with a corresponding name."""
if name not in _REGISTRY:
raise ConfigError(f'"{name}" is not registered as a metric')
return fn_from_str(_REGISTRY[name])
|
[
"Returns",
"a",
"metric",
"callable",
"with",
"a",
"corresponding",
"name",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/common/metrics_registry.py#L42-L46
|
[
"def",
"get_metric_by_name",
"(",
"name",
":",
"str",
")",
"->",
"Callable",
"[",
"...",
",",
"Any",
"]",
":",
"if",
"name",
"not",
"in",
"_REGISTRY",
":",
"raise",
"ConfigError",
"(",
"f'\"{name}\" is not registered as a metric'",
")",
"return",
"fn_from_str",
"(",
"_REGISTRY",
"[",
"name",
"]",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
DecayType.from_str
|
Convert given string label of decay type to special index
Args:
label: name of decay type.
Set of values: `"linear"`, `"cosine"`, `"exponential"`,
`"onecycle"`, `"trapezoid"`, `["polynomial", K]`, where K is a polynomial power
Returns:
index of decay type
|
deeppavlov/core/models/lr_scheduled_model.py
|
def from_str(cls, label: str) -> int:
"""
Convert given string label of decay type to special index
Args:
label: name of decay type.
Set of values: `"linear"`, `"cosine"`, `"exponential"`,
`"onecycle"`, `"trapezoid"`, `["polynomial", K]`, where K is a polynomial power
Returns:
index of decay type
"""
label_norm = label.replace('1', 'one').upper()
if label_norm in cls.__members__:
return DecayType[label_norm]
else:
raise NotImplementedError
|
def from_str(cls, label: str) -> int:
"""
Convert given string label of decay type to special index
Args:
label: name of decay type.
Set of values: `"linear"`, `"cosine"`, `"exponential"`,
`"onecycle"`, `"trapezoid"`, `["polynomial", K]`, where K is a polynomial power
Returns:
index of decay type
"""
label_norm = label.replace('1', 'one').upper()
if label_norm in cls.__members__:
return DecayType[label_norm]
else:
raise NotImplementedError
|
[
"Convert",
"given",
"string",
"label",
"of",
"decay",
"type",
"to",
"special",
"index"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/lr_scheduled_model.py#L40-L56
|
[
"def",
"from_str",
"(",
"cls",
",",
"label",
":",
"str",
")",
"->",
"int",
":",
"label_norm",
"=",
"label",
".",
"replace",
"(",
"'1'",
",",
"'one'",
")",
".",
"upper",
"(",
")",
"if",
"label_norm",
"in",
"cls",
".",
"__members__",
":",
"return",
"DecayType",
"[",
"label_norm",
"]",
"else",
":",
"raise",
"NotImplementedError"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
LRScheduledModel.fit
|
Find the best learning rate schedule, and set obtained values of learning rate
and momentum for further model training. Best learning rate will be divided
by `fit_learning_rate_div` for further training model.
Args:
*args: arguments
Returns:
|
deeppavlov/core/models/lr_scheduled_model.py
|
def fit(self, *args):
"""
Find the best learning rate schedule, and set obtained values of learning rate
and momentum for further model training. Best learning rate will be divided
by `fit_learning_rate_div` for further training model.
Args:
*args: arguments
Returns:
"""
data = list(zip(*args))
self.save()
if self._fit_batch_size is None:
raise ConfigError("in order to use fit() method"
" set `fit_batch_size` parameter")
bs = int(self._fit_batch_size)
data_len = len(data)
num_batches = self._fit_max_batches or ((data_len - 1) // bs + 1)
avg_loss = 0.
best_loss = float('inf')
lrs, losses = [], []
_lr_find_schedule = DecayScheduler(start_val=self._fit_learning_rate[0],
end_val=self._fit_learning_rate[1],
dec_type="exponential",
num_it=num_batches)
self._lr = _lr_find_schedule.start_val
self._mom = 0.
self._update_graph_variables(learning_rate=self._lr, momentum=self._mom)
best_lr = _lr_find_schedule.start_val
for i in range(num_batches):
batch_start = (i * bs) % data_len
batch_end = batch_start + bs
report = self.train_on_batch(*zip(*data[batch_start:batch_end]))
if not isinstance(report, dict):
report = {'loss': report}
# Calculating smoothed loss
avg_loss = self._fit_beta*avg_loss + (1 - self._fit_beta)*report['loss']
smoothed_loss = avg_loss / (1 - self._fit_beta**(i + 1))
lrs.append(self._lr)
losses.append(smoothed_loss)
log.info(f"Batch {i}/{num_batches}: smooth_loss = {smoothed_loss}"
f", lr = {self._lr}, best_lr = {best_lr}")
if math.isnan(smoothed_loss) or (smoothed_loss > 4 * best_loss):
break
if (smoothed_loss < best_loss) and (i >= self._fit_min_batches):
best_loss = smoothed_loss
best_lr = self._lr
self._lr = _lr_find_schedule.next_val()
self._update_graph_variables(learning_rate=self._lr)
if i >= num_batches:
break
# best_lr /= 10
end_val = self._get_best(lrs, losses)
start_val = end_val
if self._lr_schedule.dec_type in (DecayType.ONECYCLE, DecayType.TRAPEZOID):
start_val = end_val / self._fit_learning_rate_div
elif self._lr_schedule.dec_type in (DecayType.POLYNOMIAL, DecayType.EXPONENTIAL,
DecayType.LINEAR, DecayType.COSINE):
start_val = end_val
end_val = end_val / self._fit_learning_rate_div
self._lr_schedule = DecayScheduler(start_val=start_val,
end_val=end_val,
num_it=self._lr_schedule.nb,
dec_type=self._lr_schedule.dec_type,
extra=self._lr_schedule.extra)
log.info(f"Found best learning rate value = {best_lr}"
f", setting new learning rate schedule with {self._lr_schedule}.")
self.load()
self._lr = self._lr_schedule.start_val
self._mom = self._mom_schedule.start_val
self._update_graph_variables(learning_rate=self._lr, momentum=self._mom)
return {'smoothed_loss': losses, 'learning_rate': lrs}
|
def fit(self, *args):
"""
Find the best learning rate schedule, and set obtained values of learning rate
and momentum for further model training. Best learning rate will be divided
by `fit_learning_rate_div` for further training model.
Args:
*args: arguments
Returns:
"""
data = list(zip(*args))
self.save()
if self._fit_batch_size is None:
raise ConfigError("in order to use fit() method"
" set `fit_batch_size` parameter")
bs = int(self._fit_batch_size)
data_len = len(data)
num_batches = self._fit_max_batches or ((data_len - 1) // bs + 1)
avg_loss = 0.
best_loss = float('inf')
lrs, losses = [], []
_lr_find_schedule = DecayScheduler(start_val=self._fit_learning_rate[0],
end_val=self._fit_learning_rate[1],
dec_type="exponential",
num_it=num_batches)
self._lr = _lr_find_schedule.start_val
self._mom = 0.
self._update_graph_variables(learning_rate=self._lr, momentum=self._mom)
best_lr = _lr_find_schedule.start_val
for i in range(num_batches):
batch_start = (i * bs) % data_len
batch_end = batch_start + bs
report = self.train_on_batch(*zip(*data[batch_start:batch_end]))
if not isinstance(report, dict):
report = {'loss': report}
# Calculating smoothed loss
avg_loss = self._fit_beta*avg_loss + (1 - self._fit_beta)*report['loss']
smoothed_loss = avg_loss / (1 - self._fit_beta**(i + 1))
lrs.append(self._lr)
losses.append(smoothed_loss)
log.info(f"Batch {i}/{num_batches}: smooth_loss = {smoothed_loss}"
f", lr = {self._lr}, best_lr = {best_lr}")
if math.isnan(smoothed_loss) or (smoothed_loss > 4 * best_loss):
break
if (smoothed_loss < best_loss) and (i >= self._fit_min_batches):
best_loss = smoothed_loss
best_lr = self._lr
self._lr = _lr_find_schedule.next_val()
self._update_graph_variables(learning_rate=self._lr)
if i >= num_batches:
break
# best_lr /= 10
end_val = self._get_best(lrs, losses)
start_val = end_val
if self._lr_schedule.dec_type in (DecayType.ONECYCLE, DecayType.TRAPEZOID):
start_val = end_val / self._fit_learning_rate_div
elif self._lr_schedule.dec_type in (DecayType.POLYNOMIAL, DecayType.EXPONENTIAL,
DecayType.LINEAR, DecayType.COSINE):
start_val = end_val
end_val = end_val / self._fit_learning_rate_div
self._lr_schedule = DecayScheduler(start_val=start_val,
end_val=end_val,
num_it=self._lr_schedule.nb,
dec_type=self._lr_schedule.dec_type,
extra=self._lr_schedule.extra)
log.info(f"Found best learning rate value = {best_lr}"
f", setting new learning rate schedule with {self._lr_schedule}.")
self.load()
self._lr = self._lr_schedule.start_val
self._mom = self._mom_schedule.start_val
self._update_graph_variables(learning_rate=self._lr, momentum=self._mom)
return {'smoothed_loss': losses, 'learning_rate': lrs}
|
[
"Find",
"the",
"best",
"learning",
"rate",
"schedule",
"and",
"set",
"obtained",
"values",
"of",
"learning",
"rate",
"and",
"momentum",
"for",
"further",
"model",
"training",
".",
"Best",
"learning",
"rate",
"will",
"be",
"divided",
"by",
"fit_learning_rate_div",
"for",
"further",
"training",
"model",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/lr_scheduled_model.py#L294-L371
|
[
"def",
"fit",
"(",
"self",
",",
"*",
"args",
")",
":",
"data",
"=",
"list",
"(",
"zip",
"(",
"*",
"args",
")",
")",
"self",
".",
"save",
"(",
")",
"if",
"self",
".",
"_fit_batch_size",
"is",
"None",
":",
"raise",
"ConfigError",
"(",
"\"in order to use fit() method\"",
"\" set `fit_batch_size` parameter\"",
")",
"bs",
"=",
"int",
"(",
"self",
".",
"_fit_batch_size",
")",
"data_len",
"=",
"len",
"(",
"data",
")",
"num_batches",
"=",
"self",
".",
"_fit_max_batches",
"or",
"(",
"(",
"data_len",
"-",
"1",
")",
"//",
"bs",
"+",
"1",
")",
"avg_loss",
"=",
"0.",
"best_loss",
"=",
"float",
"(",
"'inf'",
")",
"lrs",
",",
"losses",
"=",
"[",
"]",
",",
"[",
"]",
"_lr_find_schedule",
"=",
"DecayScheduler",
"(",
"start_val",
"=",
"self",
".",
"_fit_learning_rate",
"[",
"0",
"]",
",",
"end_val",
"=",
"self",
".",
"_fit_learning_rate",
"[",
"1",
"]",
",",
"dec_type",
"=",
"\"exponential\"",
",",
"num_it",
"=",
"num_batches",
")",
"self",
".",
"_lr",
"=",
"_lr_find_schedule",
".",
"start_val",
"self",
".",
"_mom",
"=",
"0.",
"self",
".",
"_update_graph_variables",
"(",
"learning_rate",
"=",
"self",
".",
"_lr",
",",
"momentum",
"=",
"self",
".",
"_mom",
")",
"best_lr",
"=",
"_lr_find_schedule",
".",
"start_val",
"for",
"i",
"in",
"range",
"(",
"num_batches",
")",
":",
"batch_start",
"=",
"(",
"i",
"*",
"bs",
")",
"%",
"data_len",
"batch_end",
"=",
"batch_start",
"+",
"bs",
"report",
"=",
"self",
".",
"train_on_batch",
"(",
"*",
"zip",
"(",
"*",
"data",
"[",
"batch_start",
":",
"batch_end",
"]",
")",
")",
"if",
"not",
"isinstance",
"(",
"report",
",",
"dict",
")",
":",
"report",
"=",
"{",
"'loss'",
":",
"report",
"}",
"# Calculating smoothed loss",
"avg_loss",
"=",
"self",
".",
"_fit_beta",
"*",
"avg_loss",
"+",
"(",
"1",
"-",
"self",
".",
"_fit_beta",
")",
"*",
"report",
"[",
"'loss'",
"]",
"smoothed_loss",
"=",
"avg_loss",
"/",
"(",
"1",
"-",
"self",
".",
"_fit_beta",
"**",
"(",
"i",
"+",
"1",
")",
")",
"lrs",
".",
"append",
"(",
"self",
".",
"_lr",
")",
"losses",
".",
"append",
"(",
"smoothed_loss",
")",
"log",
".",
"info",
"(",
"f\"Batch {i}/{num_batches}: smooth_loss = {smoothed_loss}\"",
"f\", lr = {self._lr}, best_lr = {best_lr}\"",
")",
"if",
"math",
".",
"isnan",
"(",
"smoothed_loss",
")",
"or",
"(",
"smoothed_loss",
">",
"4",
"*",
"best_loss",
")",
":",
"break",
"if",
"(",
"smoothed_loss",
"<",
"best_loss",
")",
"and",
"(",
"i",
">=",
"self",
".",
"_fit_min_batches",
")",
":",
"best_loss",
"=",
"smoothed_loss",
"best_lr",
"=",
"self",
".",
"_lr",
"self",
".",
"_lr",
"=",
"_lr_find_schedule",
".",
"next_val",
"(",
")",
"self",
".",
"_update_graph_variables",
"(",
"learning_rate",
"=",
"self",
".",
"_lr",
")",
"if",
"i",
">=",
"num_batches",
":",
"break",
"# best_lr /= 10",
"end_val",
"=",
"self",
".",
"_get_best",
"(",
"lrs",
",",
"losses",
")",
"start_val",
"=",
"end_val",
"if",
"self",
".",
"_lr_schedule",
".",
"dec_type",
"in",
"(",
"DecayType",
".",
"ONECYCLE",
",",
"DecayType",
".",
"TRAPEZOID",
")",
":",
"start_val",
"=",
"end_val",
"/",
"self",
".",
"_fit_learning_rate_div",
"elif",
"self",
".",
"_lr_schedule",
".",
"dec_type",
"in",
"(",
"DecayType",
".",
"POLYNOMIAL",
",",
"DecayType",
".",
"EXPONENTIAL",
",",
"DecayType",
".",
"LINEAR",
",",
"DecayType",
".",
"COSINE",
")",
":",
"start_val",
"=",
"end_val",
"end_val",
"=",
"end_val",
"/",
"self",
".",
"_fit_learning_rate_div",
"self",
".",
"_lr_schedule",
"=",
"DecayScheduler",
"(",
"start_val",
"=",
"start_val",
",",
"end_val",
"=",
"end_val",
",",
"num_it",
"=",
"self",
".",
"_lr_schedule",
".",
"nb",
",",
"dec_type",
"=",
"self",
".",
"_lr_schedule",
".",
"dec_type",
",",
"extra",
"=",
"self",
".",
"_lr_schedule",
".",
"extra",
")",
"log",
".",
"info",
"(",
"f\"Found best learning rate value = {best_lr}\"",
"f\", setting new learning rate schedule with {self._lr_schedule}.\"",
")",
"self",
".",
"load",
"(",
")",
"self",
".",
"_lr",
"=",
"self",
".",
"_lr_schedule",
".",
"start_val",
"self",
".",
"_mom",
"=",
"self",
".",
"_mom_schedule",
".",
"start_val",
"self",
".",
"_update_graph_variables",
"(",
"learning_rate",
"=",
"self",
".",
"_lr",
",",
"momentum",
"=",
"self",
".",
"_mom",
")",
"return",
"{",
"'smoothed_loss'",
":",
"losses",
",",
"'learning_rate'",
":",
"lrs",
"}"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
LRScheduledModel._get_best
|
Find the best value according to given losses
Args:
values: list of considered values
losses: list of obtained loss values corresponding to `values`
max_loss_div: maximal divergence of loss to be considered significant
min_val_div: minimum divergence of loss to be considered significant
Returns:
best value divided by `min_val_div`
|
deeppavlov/core/models/lr_scheduled_model.py
|
def _get_best(values: List[float], losses: List[float],
max_loss_div: float = 0.9, min_val_div: float = 10.0) -> float:
"""
Find the best value according to given losses
Args:
values: list of considered values
losses: list of obtained loss values corresponding to `values`
max_loss_div: maximal divergence of loss to be considered significant
min_val_div: minimum divergence of loss to be considered significant
Returns:
best value divided by `min_val_div`
"""
assert len(values) == len(losses), "lengths of values and losses should be equal"
min_ind = np.argmin(losses)
for i in range(min_ind - 1, 0, -1):
if (losses[i] * max_loss_div > losses[min_ind]) or\
(values[i] * min_val_div < values[min_ind]):
return values[i + 1]
return values[min_ind] / min_val_div
|
def _get_best(values: List[float], losses: List[float],
max_loss_div: float = 0.9, min_val_div: float = 10.0) -> float:
"""
Find the best value according to given losses
Args:
values: list of considered values
losses: list of obtained loss values corresponding to `values`
max_loss_div: maximal divergence of loss to be considered significant
min_val_div: minimum divergence of loss to be considered significant
Returns:
best value divided by `min_val_div`
"""
assert len(values) == len(losses), "lengths of values and losses should be equal"
min_ind = np.argmin(losses)
for i in range(min_ind - 1, 0, -1):
if (losses[i] * max_loss_div > losses[min_ind]) or\
(values[i] * min_val_div < values[min_ind]):
return values[i + 1]
return values[min_ind] / min_val_div
|
[
"Find",
"the",
"best",
"value",
"according",
"to",
"given",
"losses"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/lr_scheduled_model.py#L374-L394
|
[
"def",
"_get_best",
"(",
"values",
":",
"List",
"[",
"float",
"]",
",",
"losses",
":",
"List",
"[",
"float",
"]",
",",
"max_loss_div",
":",
"float",
"=",
"0.9",
",",
"min_val_div",
":",
"float",
"=",
"10.0",
")",
"->",
"float",
":",
"assert",
"len",
"(",
"values",
")",
"==",
"len",
"(",
"losses",
")",
",",
"\"lengths of values and losses should be equal\"",
"min_ind",
"=",
"np",
".",
"argmin",
"(",
"losses",
")",
"for",
"i",
"in",
"range",
"(",
"min_ind",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"if",
"(",
"losses",
"[",
"i",
"]",
"*",
"max_loss_div",
">",
"losses",
"[",
"min_ind",
"]",
")",
"or",
"(",
"values",
"[",
"i",
"]",
"*",
"min_val_div",
"<",
"values",
"[",
"min_ind",
"]",
")",
":",
"return",
"values",
"[",
"i",
"+",
"1",
"]",
"return",
"values",
"[",
"min_ind",
"]",
"/",
"min_val_div"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
LRScheduledModel.process_event
|
Update learning rate and momentum variables after event (given by `event_name`)
Args:
event_name: name of event after which the method was called.
Set of values: `"after_validation"`, `"after_batch"`, `"after_epoch"`, `"after_train_log"`
data: dictionary with parameters values
Returns:
None
|
deeppavlov/core/models/lr_scheduled_model.py
|
def process_event(self, event_name: str, data: dict) -> None:
"""
Update learning rate and momentum variables after event (given by `event_name`)
Args:
event_name: name of event after which the method was called.
Set of values: `"after_validation"`, `"after_batch"`, `"after_epoch"`, `"after_train_log"`
data: dictionary with parameters values
Returns:
None
"""
if event_name == "after_validation":
if data['impatience'] > self._learning_rate_last_impatience:
self._learning_rate_cur_impatience += 1
else:
self._learning_rate_cur_impatience = 0
self._learning_rate_last_impatience = data['impatience']
if (self._learning_rate_drop_patience is not None) and\
(self._learning_rate_cur_impatience >=
self._learning_rate_drop_patience):
self._learning_rate_cur_impatience = 0
self._learning_rate_cur_div *= self._learning_rate_drop_div
self._lr /= self._learning_rate_drop_div
self._update_graph_variables(learning_rate=self._lr)
log.info(f"New learning rate dividor = {self._learning_rate_cur_div}")
if event_name == 'after_batch':
if (self._lr is not None) and self._lr_update_on_batch:
self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div
self._update_graph_variables(learning_rate=self._lr)
if (self._mom is not None) and self._mom_update_on_batch:
self._mom = min(1., max(0., self._mom_schedule.next_val()))
self._update_graph_variables(momentum=self._mom)
if event_name == 'after_epoch':
if (self._lr is not None) and not self._lr_update_on_batch:
self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div
self._update_graph_variables(learning_rate=self._lr)
if (self._mom is not None) and not self._mom_update_on_batch:
self._mom = min(1., max(0., self._mom_schedule.next_val()))
self._update_graph_variables(momentum=self._mom)
if event_name == 'after_train_log':
if (self._lr is not None) and ('learning_rate' not in data):
data['learning_rate'] = self._lr
if (self._mom is not None) and ('momentum' not in data):
data['momentum'] = self._mom
|
def process_event(self, event_name: str, data: dict) -> None:
"""
Update learning rate and momentum variables after event (given by `event_name`)
Args:
event_name: name of event after which the method was called.
Set of values: `"after_validation"`, `"after_batch"`, `"after_epoch"`, `"after_train_log"`
data: dictionary with parameters values
Returns:
None
"""
if event_name == "after_validation":
if data['impatience'] > self._learning_rate_last_impatience:
self._learning_rate_cur_impatience += 1
else:
self._learning_rate_cur_impatience = 0
self._learning_rate_last_impatience = data['impatience']
if (self._learning_rate_drop_patience is not None) and\
(self._learning_rate_cur_impatience >=
self._learning_rate_drop_patience):
self._learning_rate_cur_impatience = 0
self._learning_rate_cur_div *= self._learning_rate_drop_div
self._lr /= self._learning_rate_drop_div
self._update_graph_variables(learning_rate=self._lr)
log.info(f"New learning rate dividor = {self._learning_rate_cur_div}")
if event_name == 'after_batch':
if (self._lr is not None) and self._lr_update_on_batch:
self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div
self._update_graph_variables(learning_rate=self._lr)
if (self._mom is not None) and self._mom_update_on_batch:
self._mom = min(1., max(0., self._mom_schedule.next_val()))
self._update_graph_variables(momentum=self._mom)
if event_name == 'after_epoch':
if (self._lr is not None) and not self._lr_update_on_batch:
self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div
self._update_graph_variables(learning_rate=self._lr)
if (self._mom is not None) and not self._mom_update_on_batch:
self._mom = min(1., max(0., self._mom_schedule.next_val()))
self._update_graph_variables(momentum=self._mom)
if event_name == 'after_train_log':
if (self._lr is not None) and ('learning_rate' not in data):
data['learning_rate'] = self._lr
if (self._mom is not None) and ('momentum' not in data):
data['momentum'] = self._mom
|
[
"Update",
"learning",
"rate",
"and",
"momentum",
"variables",
"after",
"event",
"(",
"given",
"by",
"event_name",
")"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/lr_scheduled_model.py#L396-L442
|
[
"def",
"process_event",
"(",
"self",
",",
"event_name",
":",
"str",
",",
"data",
":",
"dict",
")",
"->",
"None",
":",
"if",
"event_name",
"==",
"\"after_validation\"",
":",
"if",
"data",
"[",
"'impatience'",
"]",
">",
"self",
".",
"_learning_rate_last_impatience",
":",
"self",
".",
"_learning_rate_cur_impatience",
"+=",
"1",
"else",
":",
"self",
".",
"_learning_rate_cur_impatience",
"=",
"0",
"self",
".",
"_learning_rate_last_impatience",
"=",
"data",
"[",
"'impatience'",
"]",
"if",
"(",
"self",
".",
"_learning_rate_drop_patience",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"_learning_rate_cur_impatience",
">=",
"self",
".",
"_learning_rate_drop_patience",
")",
":",
"self",
".",
"_learning_rate_cur_impatience",
"=",
"0",
"self",
".",
"_learning_rate_cur_div",
"*=",
"self",
".",
"_learning_rate_drop_div",
"self",
".",
"_lr",
"/=",
"self",
".",
"_learning_rate_drop_div",
"self",
".",
"_update_graph_variables",
"(",
"learning_rate",
"=",
"self",
".",
"_lr",
")",
"log",
".",
"info",
"(",
"f\"New learning rate dividor = {self._learning_rate_cur_div}\"",
")",
"if",
"event_name",
"==",
"'after_batch'",
":",
"if",
"(",
"self",
".",
"_lr",
"is",
"not",
"None",
")",
"and",
"self",
".",
"_lr_update_on_batch",
":",
"self",
".",
"_lr",
"=",
"self",
".",
"_lr_schedule",
".",
"next_val",
"(",
")",
"/",
"self",
".",
"_learning_rate_cur_div",
"self",
".",
"_update_graph_variables",
"(",
"learning_rate",
"=",
"self",
".",
"_lr",
")",
"if",
"(",
"self",
".",
"_mom",
"is",
"not",
"None",
")",
"and",
"self",
".",
"_mom_update_on_batch",
":",
"self",
".",
"_mom",
"=",
"min",
"(",
"1.",
",",
"max",
"(",
"0.",
",",
"self",
".",
"_mom_schedule",
".",
"next_val",
"(",
")",
")",
")",
"self",
".",
"_update_graph_variables",
"(",
"momentum",
"=",
"self",
".",
"_mom",
")",
"if",
"event_name",
"==",
"'after_epoch'",
":",
"if",
"(",
"self",
".",
"_lr",
"is",
"not",
"None",
")",
"and",
"not",
"self",
".",
"_lr_update_on_batch",
":",
"self",
".",
"_lr",
"=",
"self",
".",
"_lr_schedule",
".",
"next_val",
"(",
")",
"/",
"self",
".",
"_learning_rate_cur_div",
"self",
".",
"_update_graph_variables",
"(",
"learning_rate",
"=",
"self",
".",
"_lr",
")",
"if",
"(",
"self",
".",
"_mom",
"is",
"not",
"None",
")",
"and",
"not",
"self",
".",
"_mom_update_on_batch",
":",
"self",
".",
"_mom",
"=",
"min",
"(",
"1.",
",",
"max",
"(",
"0.",
",",
"self",
".",
"_mom_schedule",
".",
"next_val",
"(",
")",
")",
")",
"self",
".",
"_update_graph_variables",
"(",
"momentum",
"=",
"self",
".",
"_mom",
")",
"if",
"event_name",
"==",
"'after_train_log'",
":",
"if",
"(",
"self",
".",
"_lr",
"is",
"not",
"None",
")",
"and",
"(",
"'learning_rate'",
"not",
"in",
"data",
")",
":",
"data",
"[",
"'learning_rate'",
"]",
"=",
"self",
".",
"_lr",
"if",
"(",
"self",
".",
"_mom",
"is",
"not",
"None",
")",
"and",
"(",
"'momentum'",
"not",
"in",
"data",
")",
":",
"data",
"[",
"'momentum'",
"]",
"=",
"self",
".",
"_mom"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
Embedder._encode
|
Embed one text sample
Args:
tokens: tokenized text sample
mean: whether to return mean embedding of tokens per sample
Returns:
list of embedded tokens or array of mean values
|
deeppavlov/models/embedders/abstract_embedder.py
|
def _encode(self, tokens: List[str], mean: bool) -> Union[List[np.ndarray], np.ndarray]:
"""
Embed one text sample
Args:
tokens: tokenized text sample
mean: whether to return mean embedding of tokens per sample
Returns:
list of embedded tokens or array of mean values
"""
embedded_tokens = []
for t in tokens:
try:
emb = self.tok2emb[t]
except KeyError:
try:
emb = self._get_word_vector(t)
except KeyError:
emb = np.zeros(self.dim, dtype=np.float32)
self.tok2emb[t] = emb
embedded_tokens.append(emb)
if mean is None:
mean = self.mean
if mean:
filtered = [et for et in embedded_tokens if np.any(et)]
if filtered:
return np.mean(filtered, axis=0)
return np.zeros(self.dim, dtype=np.float32)
return embedded_tokens
|
def _encode(self, tokens: List[str], mean: bool) -> Union[List[np.ndarray], np.ndarray]:
"""
Embed one text sample
Args:
tokens: tokenized text sample
mean: whether to return mean embedding of tokens per sample
Returns:
list of embedded tokens or array of mean values
"""
embedded_tokens = []
for t in tokens:
try:
emb = self.tok2emb[t]
except KeyError:
try:
emb = self._get_word_vector(t)
except KeyError:
emb = np.zeros(self.dim, dtype=np.float32)
self.tok2emb[t] = emb
embedded_tokens.append(emb)
if mean is None:
mean = self.mean
if mean:
filtered = [et for et in embedded_tokens if np.any(et)]
if filtered:
return np.mean(filtered, axis=0)
return np.zeros(self.dim, dtype=np.float32)
return embedded_tokens
|
[
"Embed",
"one",
"text",
"sample"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/embedders/abstract_embedder.py#L103-L135
|
[
"def",
"_encode",
"(",
"self",
",",
"tokens",
":",
"List",
"[",
"str",
"]",
",",
"mean",
":",
"bool",
")",
"->",
"Union",
"[",
"List",
"[",
"np",
".",
"ndarray",
"]",
",",
"np",
".",
"ndarray",
"]",
":",
"embedded_tokens",
"=",
"[",
"]",
"for",
"t",
"in",
"tokens",
":",
"try",
":",
"emb",
"=",
"self",
".",
"tok2emb",
"[",
"t",
"]",
"except",
"KeyError",
":",
"try",
":",
"emb",
"=",
"self",
".",
"_get_word_vector",
"(",
"t",
")",
"except",
"KeyError",
":",
"emb",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"dim",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"self",
".",
"tok2emb",
"[",
"t",
"]",
"=",
"emb",
"embedded_tokens",
".",
"append",
"(",
"emb",
")",
"if",
"mean",
"is",
"None",
":",
"mean",
"=",
"self",
".",
"mean",
"if",
"mean",
":",
"filtered",
"=",
"[",
"et",
"for",
"et",
"in",
"embedded_tokens",
"if",
"np",
".",
"any",
"(",
"et",
")",
"]",
"if",
"filtered",
":",
"return",
"np",
".",
"mean",
"(",
"filtered",
",",
"axis",
"=",
"0",
")",
"return",
"np",
".",
"zeros",
"(",
"self",
".",
"dim",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"return",
"embedded_tokens"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
read_requirements
|
parses requirements from requirements.txt
|
setup.py
|
def read_requirements():
"""parses requirements from requirements.txt"""
reqs_path = os.path.join(__location__, 'requirements.txt')
with open(reqs_path, encoding='utf8') as f:
reqs = [line.strip() for line in f if not line.strip().startswith('#')]
names = []
links = []
for req in reqs:
if '://' in req:
links.append(req)
else:
names.append(req)
return {'install_requires': names, 'dependency_links': links}
|
def read_requirements():
"""parses requirements from requirements.txt"""
reqs_path = os.path.join(__location__, 'requirements.txt')
with open(reqs_path, encoding='utf8') as f:
reqs = [line.strip() for line in f if not line.strip().startswith('#')]
names = []
links = []
for req in reqs:
if '://' in req:
links.append(req)
else:
names.append(req)
return {'install_requires': names, 'dependency_links': links}
|
[
"parses",
"requirements",
"from",
"requirements",
".",
"txt"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/setup.py#L22-L35
|
[
"def",
"read_requirements",
"(",
")",
":",
"reqs_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__location__",
",",
"'requirements.txt'",
")",
"with",
"open",
"(",
"reqs_path",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"f",
":",
"reqs",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"f",
"if",
"not",
"line",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'#'",
")",
"]",
"names",
"=",
"[",
"]",
"links",
"=",
"[",
"]",
"for",
"req",
"in",
"reqs",
":",
"if",
"'://'",
"in",
"req",
":",
"links",
".",
"append",
"(",
"req",
")",
"else",
":",
"names",
".",
"append",
"(",
"req",
")",
"return",
"{",
"'install_requires'",
":",
"names",
",",
"'dependency_links'",
":",
"links",
"}"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
detokenize
|
Detokenizing a text undoes the tokenizing operation, restores
punctuation and spaces to the places that people expect them to be.
Ideally, `detokenize(tokenize(text))` should be identical to `text`,
except for line breaks.
|
deeppavlov/models/tokenizers/utils.py
|
def detokenize(tokens):
"""
Detokenizing a text undoes the tokenizing operation, restores
punctuation and spaces to the places that people expect them to be.
Ideally, `detokenize(tokenize(text))` should be identical to `text`,
except for line breaks.
"""
text = ' '.join(tokens)
step0 = text.replace('. . .', '...')
step1 = step0.replace("`` ", '"').replace(" ''", '"')
step2 = step1.replace(" ( ", " (").replace(" ) ", ") ")
step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2)
step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3)
step5 = step4.replace(" '", "'").replace(" n't", "n't") \
.replace(" nt", "nt").replace("can not", "cannot")
step6 = step5.replace(" ` ", " '")
return step6.strip()
|
def detokenize(tokens):
"""
Detokenizing a text undoes the tokenizing operation, restores
punctuation and spaces to the places that people expect them to be.
Ideally, `detokenize(tokenize(text))` should be identical to `text`,
except for line breaks.
"""
text = ' '.join(tokens)
step0 = text.replace('. . .', '...')
step1 = step0.replace("`` ", '"').replace(" ''", '"')
step2 = step1.replace(" ( ", " (").replace(" ) ", ") ")
step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2)
step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3)
step5 = step4.replace(" '", "'").replace(" n't", "n't") \
.replace(" nt", "nt").replace("can not", "cannot")
step6 = step5.replace(" ` ", " '")
return step6.strip()
|
[
"Detokenizing",
"a",
"text",
"undoes",
"the",
"tokenizing",
"operation",
"restores",
"punctuation",
"and",
"spaces",
"to",
"the",
"places",
"that",
"people",
"expect",
"them",
"to",
"be",
".",
"Ideally",
"detokenize",
"(",
"tokenize",
"(",
"text",
"))",
"should",
"be",
"identical",
"to",
"text",
"except",
"for",
"line",
"breaks",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/tokenizers/utils.py#L19-L35
|
[
"def",
"detokenize",
"(",
"tokens",
")",
":",
"text",
"=",
"' '",
".",
"join",
"(",
"tokens",
")",
"step0",
"=",
"text",
".",
"replace",
"(",
"'. . .'",
",",
"'...'",
")",
"step1",
"=",
"step0",
".",
"replace",
"(",
"\"`` \"",
",",
"'\"'",
")",
".",
"replace",
"(",
"\" ''\"",
",",
"'\"'",
")",
"step2",
"=",
"step1",
".",
"replace",
"(",
"\" ( \"",
",",
"\" (\"",
")",
".",
"replace",
"(",
"\" ) \"",
",",
"\") \"",
")",
"step3",
"=",
"re",
".",
"sub",
"(",
"r' ([.,:;?!%]+)([ \\'\"`])'",
",",
"r\"\\1\\2\"",
",",
"step2",
")",
"step4",
"=",
"re",
".",
"sub",
"(",
"r' ([.,:;?!%]+)$'",
",",
"r\"\\1\"",
",",
"step3",
")",
"step5",
"=",
"step4",
".",
"replace",
"(",
"\" '\"",
",",
"\"'\"",
")",
".",
"replace",
"(",
"\" n't\"",
",",
"\"n't\"",
")",
".",
"replace",
"(",
"\" nt\"",
",",
"\"nt\"",
")",
".",
"replace",
"(",
"\"can not\"",
",",
"\"cannot\"",
")",
"step6",
"=",
"step5",
".",
"replace",
"(",
"\" ` \"",
",",
"\" '\"",
")",
"return",
"step6",
".",
"strip",
"(",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
ngramize
|
Make ngrams from a list of tokens/lemmas
:param items: list of tokens, lemmas or other strings to form ngrams
:param ngram_range: range for producing ngrams, ex. for unigrams + bigrams should be set to
(1, 2), for bigrams only should be set to (2, 2)
:return: ngrams (as strings) generator
|
deeppavlov/models/tokenizers/utils.py
|
def ngramize(items: List[str], ngram_range=(1, 1)) -> Generator[List[str], Any, None]:
"""
Make ngrams from a list of tokens/lemmas
:param items: list of tokens, lemmas or other strings to form ngrams
:param ngram_range: range for producing ngrams, ex. for unigrams + bigrams should be set to
(1, 2), for bigrams only should be set to (2, 2)
:return: ngrams (as strings) generator
"""
ngrams = []
ranges = [(0, i) for i in range(ngram_range[0], ngram_range[1] + 1)]
for r in ranges:
ngrams += list(zip(*[items[j:] for j in range(*r)]))
formatted_ngrams = [' '.join(item) for item in ngrams]
yield formatted_ngrams
|
def ngramize(items: List[str], ngram_range=(1, 1)) -> Generator[List[str], Any, None]:
"""
Make ngrams from a list of tokens/lemmas
:param items: list of tokens, lemmas or other strings to form ngrams
:param ngram_range: range for producing ngrams, ex. for unigrams + bigrams should be set to
(1, 2), for bigrams only should be set to (2, 2)
:return: ngrams (as strings) generator
"""
ngrams = []
ranges = [(0, i) for i in range(ngram_range[0], ngram_range[1] + 1)]
for r in ranges:
ngrams += list(zip(*[items[j:] for j in range(*r)]))
formatted_ngrams = [' '.join(item) for item in ngrams]
yield formatted_ngrams
|
[
"Make",
"ngrams",
"from",
"a",
"list",
"of",
"tokens",
"/",
"lemmas",
":",
"param",
"items",
":",
"list",
"of",
"tokens",
"lemmas",
"or",
"other",
"strings",
"to",
"form",
"ngrams",
":",
"param",
"ngram_range",
":",
"range",
"for",
"producing",
"ngrams",
"ex",
".",
"for",
"unigrams",
"+",
"bigrams",
"should",
"be",
"set",
"to",
"(",
"1",
"2",
")",
"for",
"bigrams",
"only",
"should",
"be",
"set",
"to",
"(",
"2",
"2",
")",
":",
"return",
":",
"ngrams",
"(",
"as",
"strings",
")",
"generator"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/tokenizers/utils.py#L38-L54
|
[
"def",
"ngramize",
"(",
"items",
":",
"List",
"[",
"str",
"]",
",",
"ngram_range",
"=",
"(",
"1",
",",
"1",
")",
")",
"->",
"Generator",
"[",
"List",
"[",
"str",
"]",
",",
"Any",
",",
"None",
"]",
":",
"ngrams",
"=",
"[",
"]",
"ranges",
"=",
"[",
"(",
"0",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"ngram_range",
"[",
"0",
"]",
",",
"ngram_range",
"[",
"1",
"]",
"+",
"1",
")",
"]",
"for",
"r",
"in",
"ranges",
":",
"ngrams",
"+=",
"list",
"(",
"zip",
"(",
"*",
"[",
"items",
"[",
"j",
":",
"]",
"for",
"j",
"in",
"range",
"(",
"*",
"r",
")",
"]",
")",
")",
"formatted_ngrams",
"=",
"[",
"' '",
".",
"join",
"(",
"item",
")",
"for",
"item",
"in",
"ngrams",
"]",
"yield",
"formatted_ngrams"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
sk_log_loss
|
Calculates log loss.
Args:
y_true: list or array of true values
y_predicted: list or array of predicted values
Returns:
Log loss
|
deeppavlov/metrics/log_loss.py
|
def sk_log_loss(y_true: Union[List[List[float]], List[List[int]], np.ndarray],
y_predicted: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:
"""
Calculates log loss.
Args:
y_true: list or array of true values
y_predicted: list or array of predicted values
Returns:
Log loss
"""
return log_loss(y_true, y_predicted)
|
def sk_log_loss(y_true: Union[List[List[float]], List[List[int]], np.ndarray],
y_predicted: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:
"""
Calculates log loss.
Args:
y_true: list or array of true values
y_predicted: list or array of predicted values
Returns:
Log loss
"""
return log_loss(y_true, y_predicted)
|
[
"Calculates",
"log",
"loss",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/log_loss.py#L25-L37
|
[
"def",
"sk_log_loss",
"(",
"y_true",
":",
"Union",
"[",
"List",
"[",
"List",
"[",
"float",
"]",
"]",
",",
"List",
"[",
"List",
"[",
"int",
"]",
"]",
",",
"np",
".",
"ndarray",
"]",
",",
"y_predicted",
":",
"Union",
"[",
"List",
"[",
"List",
"[",
"float",
"]",
"]",
",",
"List",
"[",
"List",
"[",
"int",
"]",
"]",
",",
"np",
".",
"ndarray",
"]",
")",
"->",
"float",
":",
"return",
"log_loss",
"(",
"y_true",
",",
"y_predicted",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
make_module_spec
|
Makes a module spec.
Args:
options: LM hyperparameters.
weight_file: location of the hdf5 file with LM weights.
Returns:
A module spec object used for constructing a TF-Hub module.
|
deeppavlov/models/elmo/elmo2tfhub.py
|
def make_module_spec(options, weight_file):
"""Makes a module spec.
Args:
options: LM hyperparameters.
weight_file: location of the hdf5 file with LM weights.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
# init
_bos_id = 256
_eos_id = 257
_bow_id = 258
_eow_id = 259
_pad_id = 260
_max_word_length = 50
_parallel_iterations = 10
_max_batch_size = 1024
id_dtype = tf.int32
id_nptype = np.int32
max_word_length = tf.constant(_max_word_length, dtype=id_dtype, name='max_word_length')
version = tf.constant('from_dp_1', dtype=tf.string, name='version')
# the charcter representation of the begin/end of sentence characters
def _make_bos_eos(c):
r = np.zeros([_max_word_length], dtype=id_nptype)
r[:] = _pad_id
r[0] = _bow_id
r[1] = c
r[2] = _eow_id
return tf.constant(r, dtype=id_dtype)
bos_ids = _make_bos_eos(_bos_id)
eos_ids = _make_bos_eos(_eos_id)
def token2ids(token):
with tf.name_scope("token2ids_preprocessor"):
char_ids = tf.decode_raw(token, tf.uint8, name='decode_raw2get_char_ids')
char_ids = tf.cast(char_ids, tf.int32, name='cast2int_token')
char_ids = tf.strided_slice(char_ids, [0], [max_word_length - 2],
[1], name='slice2resized_token')
ids_num = tf.shape(char_ids)[0]
fill_ids_num = (_max_word_length - 2) - ids_num
pads = tf.fill([fill_ids_num], _pad_id)
bow_token_eow_pads = tf.concat([[_bow_id], char_ids, [_eow_id], pads],
0, name='concat2bow_token_eow_pads')
return bow_token_eow_pads
def sentence_tagging_and_padding(sen_dim):
with tf.name_scope("sentence_tagging_and_padding_preprocessor"):
sen = sen_dim[0]
dim = sen_dim[1]
extra_dim = tf.shape(sen)[0] - dim
sen = tf.slice(sen, [0, 0], [dim, max_word_length], name='slice2sen')
bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]], 0, name='concat2bos_sen_eos')
bos_sen_eos_plus_one = bos_sen_eos + 1
bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one, [[0, extra_dim], [0, 0]],
"CONSTANT", name='pad2bos_sen_eos_pads')
return bos_sen_eos_pads
# Input placeholders to the biLM.
tokens = tf.placeholder(shape=(None, None), dtype=tf.string, name='ph2tokens')
sequence_len = tf.placeholder(shape=(None, ), dtype=tf.int32, name='ph2sequence_len')
tok_shape = tf.shape(tokens)
line_tokens = tf.reshape(tokens, shape=[-1], name='reshape2line_tokens')
with tf.device('/cpu:0'):
tok_ids = tf.map_fn(
token2ids,
line_tokens,
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_tok_ids')
tok_ids = tf.reshape(tok_ids, [tok_shape[0], tok_shape[1], -1], name='reshape2tok_ids')
with tf.device('/cpu:0'):
sen_ids = tf.map_fn(
sentence_tagging_and_padding,
(tok_ids, sequence_len),
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_sen_ids')
# Build the biLM graph.
bilm = BidirectionalLanguageModel(options, str(weight_file),
max_batch_size=_max_batch_size)
embeddings_op = bilm(sen_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
elmo_output = weight_layers('elmo_output', embeddings_op, l2_coef=0.0)
weighted_op = elmo_output['weighted_op']
mean_op = elmo_output['mean_op']
word_emb = elmo_output['word_emb']
lstm_outputs1 = elmo_output['lstm_outputs1']
lstm_outputs2 = elmo_output['lstm_outputs2']
hub.add_signature("tokens", {"tokens": tokens, "sequence_len": sequence_len},
{"elmo": weighted_op,
"default": mean_op,
"word_emb": word_emb,
"lstm_outputs1": lstm_outputs1,
"lstm_outputs2": lstm_outputs2,
"version": version})
# #########################Next signature############################# #
# Input placeholders to the biLM.
def_strings = tf.placeholder(shape=(None), dtype=tf.string)
def_tokens_sparse = tf.string_split(def_strings)
def_tokens_dense = tf.sparse_to_dense(sparse_indices=def_tokens_sparse.indices,
output_shape=def_tokens_sparse.dense_shape,
sparse_values=def_tokens_sparse.values,
default_value=''
)
def_mask = tf.not_equal(def_tokens_dense, '')
def_int_mask = tf.cast(def_mask, dtype=tf.int32)
def_sequence_len = tf.reduce_sum(def_int_mask, axis=-1)
def_tok_shape = tf.shape(def_tokens_dense)
def_line_tokens = tf.reshape(def_tokens_dense, shape=[-1], name='reshape2line_tokens')
with tf.device('/cpu:0'):
def_tok_ids = tf.map_fn(
token2ids,
def_line_tokens,
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_tok_ids')
def_tok_ids = tf.reshape(def_tok_ids, [def_tok_shape[0], def_tok_shape[1], -1], name='reshape2tok_ids')
with tf.device('/cpu:0'):
def_sen_ids = tf.map_fn(
sentence_tagging_and_padding,
(def_tok_ids, def_sequence_len),
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_sen_ids')
# Get ops to compute the LM embeddings.
def_embeddings_op = bilm(def_sen_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
def_elmo_output = weight_layers('elmo_output', def_embeddings_op, l2_coef=0.0, reuse=True)
def_weighted_op = def_elmo_output['weighted_op']
def_mean_op = def_elmo_output['mean_op']
def_word_emb = def_elmo_output['word_emb']
def_lstm_outputs1 = def_elmo_output['lstm_outputs1']
def_lstm_outputs2 = def_elmo_output['lstm_outputs2']
hub.add_signature("default", {"strings": def_strings},
{"elmo": def_weighted_op,
"default": def_mean_op,
"word_emb": def_word_emb,
"lstm_outputs1": def_lstm_outputs1,
"lstm_outputs2": def_lstm_outputs2,
"version": version})
return hub.create_module_spec(module_fn)
|
def make_module_spec(options, weight_file):
"""Makes a module spec.
Args:
options: LM hyperparameters.
weight_file: location of the hdf5 file with LM weights.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
# init
_bos_id = 256
_eos_id = 257
_bow_id = 258
_eow_id = 259
_pad_id = 260
_max_word_length = 50
_parallel_iterations = 10
_max_batch_size = 1024
id_dtype = tf.int32
id_nptype = np.int32
max_word_length = tf.constant(_max_word_length, dtype=id_dtype, name='max_word_length')
version = tf.constant('from_dp_1', dtype=tf.string, name='version')
# the charcter representation of the begin/end of sentence characters
def _make_bos_eos(c):
r = np.zeros([_max_word_length], dtype=id_nptype)
r[:] = _pad_id
r[0] = _bow_id
r[1] = c
r[2] = _eow_id
return tf.constant(r, dtype=id_dtype)
bos_ids = _make_bos_eos(_bos_id)
eos_ids = _make_bos_eos(_eos_id)
def token2ids(token):
with tf.name_scope("token2ids_preprocessor"):
char_ids = tf.decode_raw(token, tf.uint8, name='decode_raw2get_char_ids')
char_ids = tf.cast(char_ids, tf.int32, name='cast2int_token')
char_ids = tf.strided_slice(char_ids, [0], [max_word_length - 2],
[1], name='slice2resized_token')
ids_num = tf.shape(char_ids)[0]
fill_ids_num = (_max_word_length - 2) - ids_num
pads = tf.fill([fill_ids_num], _pad_id)
bow_token_eow_pads = tf.concat([[_bow_id], char_ids, [_eow_id], pads],
0, name='concat2bow_token_eow_pads')
return bow_token_eow_pads
def sentence_tagging_and_padding(sen_dim):
with tf.name_scope("sentence_tagging_and_padding_preprocessor"):
sen = sen_dim[0]
dim = sen_dim[1]
extra_dim = tf.shape(sen)[0] - dim
sen = tf.slice(sen, [0, 0], [dim, max_word_length], name='slice2sen')
bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]], 0, name='concat2bos_sen_eos')
bos_sen_eos_plus_one = bos_sen_eos + 1
bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one, [[0, extra_dim], [0, 0]],
"CONSTANT", name='pad2bos_sen_eos_pads')
return bos_sen_eos_pads
# Input placeholders to the biLM.
tokens = tf.placeholder(shape=(None, None), dtype=tf.string, name='ph2tokens')
sequence_len = tf.placeholder(shape=(None, ), dtype=tf.int32, name='ph2sequence_len')
tok_shape = tf.shape(tokens)
line_tokens = tf.reshape(tokens, shape=[-1], name='reshape2line_tokens')
with tf.device('/cpu:0'):
tok_ids = tf.map_fn(
token2ids,
line_tokens,
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_tok_ids')
tok_ids = tf.reshape(tok_ids, [tok_shape[0], tok_shape[1], -1], name='reshape2tok_ids')
with tf.device('/cpu:0'):
sen_ids = tf.map_fn(
sentence_tagging_and_padding,
(tok_ids, sequence_len),
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_sen_ids')
# Build the biLM graph.
bilm = BidirectionalLanguageModel(options, str(weight_file),
max_batch_size=_max_batch_size)
embeddings_op = bilm(sen_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
elmo_output = weight_layers('elmo_output', embeddings_op, l2_coef=0.0)
weighted_op = elmo_output['weighted_op']
mean_op = elmo_output['mean_op']
word_emb = elmo_output['word_emb']
lstm_outputs1 = elmo_output['lstm_outputs1']
lstm_outputs2 = elmo_output['lstm_outputs2']
hub.add_signature("tokens", {"tokens": tokens, "sequence_len": sequence_len},
{"elmo": weighted_op,
"default": mean_op,
"word_emb": word_emb,
"lstm_outputs1": lstm_outputs1,
"lstm_outputs2": lstm_outputs2,
"version": version})
# #########################Next signature############################# #
# Input placeholders to the biLM.
def_strings = tf.placeholder(shape=(None), dtype=tf.string)
def_tokens_sparse = tf.string_split(def_strings)
def_tokens_dense = tf.sparse_to_dense(sparse_indices=def_tokens_sparse.indices,
output_shape=def_tokens_sparse.dense_shape,
sparse_values=def_tokens_sparse.values,
default_value=''
)
def_mask = tf.not_equal(def_tokens_dense, '')
def_int_mask = tf.cast(def_mask, dtype=tf.int32)
def_sequence_len = tf.reduce_sum(def_int_mask, axis=-1)
def_tok_shape = tf.shape(def_tokens_dense)
def_line_tokens = tf.reshape(def_tokens_dense, shape=[-1], name='reshape2line_tokens')
with tf.device('/cpu:0'):
def_tok_ids = tf.map_fn(
token2ids,
def_line_tokens,
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_tok_ids')
def_tok_ids = tf.reshape(def_tok_ids, [def_tok_shape[0], def_tok_shape[1], -1], name='reshape2tok_ids')
with tf.device('/cpu:0'):
def_sen_ids = tf.map_fn(
sentence_tagging_and_padding,
(def_tok_ids, def_sequence_len),
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_sen_ids')
# Get ops to compute the LM embeddings.
def_embeddings_op = bilm(def_sen_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
def_elmo_output = weight_layers('elmo_output', def_embeddings_op, l2_coef=0.0, reuse=True)
def_weighted_op = def_elmo_output['weighted_op']
def_mean_op = def_elmo_output['mean_op']
def_word_emb = def_elmo_output['word_emb']
def_lstm_outputs1 = def_elmo_output['lstm_outputs1']
def_lstm_outputs2 = def_elmo_output['lstm_outputs2']
hub.add_signature("default", {"strings": def_strings},
{"elmo": def_weighted_op,
"default": def_mean_op,
"word_emb": def_word_emb,
"lstm_outputs1": def_lstm_outputs1,
"lstm_outputs2": def_lstm_outputs2,
"version": version})
return hub.create_module_spec(module_fn)
|
[
"Makes",
"a",
"module",
"spec",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/elmo/elmo2tfhub.py#L23-L187
|
[
"def",
"make_module_spec",
"(",
"options",
",",
"weight_file",
")",
":",
"def",
"module_fn",
"(",
")",
":",
"\"\"\"Spec function for a token embedding module.\"\"\"",
"# init",
"_bos_id",
"=",
"256",
"_eos_id",
"=",
"257",
"_bow_id",
"=",
"258",
"_eow_id",
"=",
"259",
"_pad_id",
"=",
"260",
"_max_word_length",
"=",
"50",
"_parallel_iterations",
"=",
"10",
"_max_batch_size",
"=",
"1024",
"id_dtype",
"=",
"tf",
".",
"int32",
"id_nptype",
"=",
"np",
".",
"int32",
"max_word_length",
"=",
"tf",
".",
"constant",
"(",
"_max_word_length",
",",
"dtype",
"=",
"id_dtype",
",",
"name",
"=",
"'max_word_length'",
")",
"version",
"=",
"tf",
".",
"constant",
"(",
"'from_dp_1'",
",",
"dtype",
"=",
"tf",
".",
"string",
",",
"name",
"=",
"'version'",
")",
"# the charcter representation of the begin/end of sentence characters",
"def",
"_make_bos_eos",
"(",
"c",
")",
":",
"r",
"=",
"np",
".",
"zeros",
"(",
"[",
"_max_word_length",
"]",
",",
"dtype",
"=",
"id_nptype",
")",
"r",
"[",
":",
"]",
"=",
"_pad_id",
"r",
"[",
"0",
"]",
"=",
"_bow_id",
"r",
"[",
"1",
"]",
"=",
"c",
"r",
"[",
"2",
"]",
"=",
"_eow_id",
"return",
"tf",
".",
"constant",
"(",
"r",
",",
"dtype",
"=",
"id_dtype",
")",
"bos_ids",
"=",
"_make_bos_eos",
"(",
"_bos_id",
")",
"eos_ids",
"=",
"_make_bos_eos",
"(",
"_eos_id",
")",
"def",
"token2ids",
"(",
"token",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"token2ids_preprocessor\"",
")",
":",
"char_ids",
"=",
"tf",
".",
"decode_raw",
"(",
"token",
",",
"tf",
".",
"uint8",
",",
"name",
"=",
"'decode_raw2get_char_ids'",
")",
"char_ids",
"=",
"tf",
".",
"cast",
"(",
"char_ids",
",",
"tf",
".",
"int32",
",",
"name",
"=",
"'cast2int_token'",
")",
"char_ids",
"=",
"tf",
".",
"strided_slice",
"(",
"char_ids",
",",
"[",
"0",
"]",
",",
"[",
"max_word_length",
"-",
"2",
"]",
",",
"[",
"1",
"]",
",",
"name",
"=",
"'slice2resized_token'",
")",
"ids_num",
"=",
"tf",
".",
"shape",
"(",
"char_ids",
")",
"[",
"0",
"]",
"fill_ids_num",
"=",
"(",
"_max_word_length",
"-",
"2",
")",
"-",
"ids_num",
"pads",
"=",
"tf",
".",
"fill",
"(",
"[",
"fill_ids_num",
"]",
",",
"_pad_id",
")",
"bow_token_eow_pads",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"_bow_id",
"]",
",",
"char_ids",
",",
"[",
"_eow_id",
"]",
",",
"pads",
"]",
",",
"0",
",",
"name",
"=",
"'concat2bow_token_eow_pads'",
")",
"return",
"bow_token_eow_pads",
"def",
"sentence_tagging_and_padding",
"(",
"sen_dim",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"sentence_tagging_and_padding_preprocessor\"",
")",
":",
"sen",
"=",
"sen_dim",
"[",
"0",
"]",
"dim",
"=",
"sen_dim",
"[",
"1",
"]",
"extra_dim",
"=",
"tf",
".",
"shape",
"(",
"sen",
")",
"[",
"0",
"]",
"-",
"dim",
"sen",
"=",
"tf",
".",
"slice",
"(",
"sen",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"dim",
",",
"max_word_length",
"]",
",",
"name",
"=",
"'slice2sen'",
")",
"bos_sen_eos",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"bos_ids",
"]",
",",
"sen",
",",
"[",
"eos_ids",
"]",
"]",
",",
"0",
",",
"name",
"=",
"'concat2bos_sen_eos'",
")",
"bos_sen_eos_plus_one",
"=",
"bos_sen_eos",
"+",
"1",
"bos_sen_eos_pads",
"=",
"tf",
".",
"pad",
"(",
"bos_sen_eos_plus_one",
",",
"[",
"[",
"0",
",",
"extra_dim",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
",",
"\"CONSTANT\"",
",",
"name",
"=",
"'pad2bos_sen_eos_pads'",
")",
"return",
"bos_sen_eos_pads",
"# Input placeholders to the biLM.",
"tokens",
"=",
"tf",
".",
"placeholder",
"(",
"shape",
"=",
"(",
"None",
",",
"None",
")",
",",
"dtype",
"=",
"tf",
".",
"string",
",",
"name",
"=",
"'ph2tokens'",
")",
"sequence_len",
"=",
"tf",
".",
"placeholder",
"(",
"shape",
"=",
"(",
"None",
",",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"'ph2sequence_len'",
")",
"tok_shape",
"=",
"tf",
".",
"shape",
"(",
"tokens",
")",
"line_tokens",
"=",
"tf",
".",
"reshape",
"(",
"tokens",
",",
"shape",
"=",
"[",
"-",
"1",
"]",
",",
"name",
"=",
"'reshape2line_tokens'",
")",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"tok_ids",
"=",
"tf",
".",
"map_fn",
"(",
"token2ids",
",",
"line_tokens",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"back_prop",
"=",
"False",
",",
"parallel_iterations",
"=",
"_parallel_iterations",
",",
"name",
"=",
"'map_fn2get_tok_ids'",
")",
"tok_ids",
"=",
"tf",
".",
"reshape",
"(",
"tok_ids",
",",
"[",
"tok_shape",
"[",
"0",
"]",
",",
"tok_shape",
"[",
"1",
"]",
",",
"-",
"1",
"]",
",",
"name",
"=",
"'reshape2tok_ids'",
")",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"sen_ids",
"=",
"tf",
".",
"map_fn",
"(",
"sentence_tagging_and_padding",
",",
"(",
"tok_ids",
",",
"sequence_len",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"back_prop",
"=",
"False",
",",
"parallel_iterations",
"=",
"_parallel_iterations",
",",
"name",
"=",
"'map_fn2get_sen_ids'",
")",
"# Build the biLM graph.",
"bilm",
"=",
"BidirectionalLanguageModel",
"(",
"options",
",",
"str",
"(",
"weight_file",
")",
",",
"max_batch_size",
"=",
"_max_batch_size",
")",
"embeddings_op",
"=",
"bilm",
"(",
"sen_ids",
")",
"# Get an op to compute ELMo (weighted average of the internal biLM layers)",
"elmo_output",
"=",
"weight_layers",
"(",
"'elmo_output'",
",",
"embeddings_op",
",",
"l2_coef",
"=",
"0.0",
")",
"weighted_op",
"=",
"elmo_output",
"[",
"'weighted_op'",
"]",
"mean_op",
"=",
"elmo_output",
"[",
"'mean_op'",
"]",
"word_emb",
"=",
"elmo_output",
"[",
"'word_emb'",
"]",
"lstm_outputs1",
"=",
"elmo_output",
"[",
"'lstm_outputs1'",
"]",
"lstm_outputs2",
"=",
"elmo_output",
"[",
"'lstm_outputs2'",
"]",
"hub",
".",
"add_signature",
"(",
"\"tokens\"",
",",
"{",
"\"tokens\"",
":",
"tokens",
",",
"\"sequence_len\"",
":",
"sequence_len",
"}",
",",
"{",
"\"elmo\"",
":",
"weighted_op",
",",
"\"default\"",
":",
"mean_op",
",",
"\"word_emb\"",
":",
"word_emb",
",",
"\"lstm_outputs1\"",
":",
"lstm_outputs1",
",",
"\"lstm_outputs2\"",
":",
"lstm_outputs2",
",",
"\"version\"",
":",
"version",
"}",
")",
"# #########################Next signature############################# #",
"# Input placeholders to the biLM.",
"def_strings",
"=",
"tf",
".",
"placeholder",
"(",
"shape",
"=",
"(",
"None",
")",
",",
"dtype",
"=",
"tf",
".",
"string",
")",
"def_tokens_sparse",
"=",
"tf",
".",
"string_split",
"(",
"def_strings",
")",
"def_tokens_dense",
"=",
"tf",
".",
"sparse_to_dense",
"(",
"sparse_indices",
"=",
"def_tokens_sparse",
".",
"indices",
",",
"output_shape",
"=",
"def_tokens_sparse",
".",
"dense_shape",
",",
"sparse_values",
"=",
"def_tokens_sparse",
".",
"values",
",",
"default_value",
"=",
"''",
")",
"def_mask",
"=",
"tf",
".",
"not_equal",
"(",
"def_tokens_dense",
",",
"''",
")",
"def_int_mask",
"=",
"tf",
".",
"cast",
"(",
"def_mask",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"def_sequence_len",
"=",
"tf",
".",
"reduce_sum",
"(",
"def_int_mask",
",",
"axis",
"=",
"-",
"1",
")",
"def_tok_shape",
"=",
"tf",
".",
"shape",
"(",
"def_tokens_dense",
")",
"def_line_tokens",
"=",
"tf",
".",
"reshape",
"(",
"def_tokens_dense",
",",
"shape",
"=",
"[",
"-",
"1",
"]",
",",
"name",
"=",
"'reshape2line_tokens'",
")",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"def_tok_ids",
"=",
"tf",
".",
"map_fn",
"(",
"token2ids",
",",
"def_line_tokens",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"back_prop",
"=",
"False",
",",
"parallel_iterations",
"=",
"_parallel_iterations",
",",
"name",
"=",
"'map_fn2get_tok_ids'",
")",
"def_tok_ids",
"=",
"tf",
".",
"reshape",
"(",
"def_tok_ids",
",",
"[",
"def_tok_shape",
"[",
"0",
"]",
",",
"def_tok_shape",
"[",
"1",
"]",
",",
"-",
"1",
"]",
",",
"name",
"=",
"'reshape2tok_ids'",
")",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"def_sen_ids",
"=",
"tf",
".",
"map_fn",
"(",
"sentence_tagging_and_padding",
",",
"(",
"def_tok_ids",
",",
"def_sequence_len",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"back_prop",
"=",
"False",
",",
"parallel_iterations",
"=",
"_parallel_iterations",
",",
"name",
"=",
"'map_fn2get_sen_ids'",
")",
"# Get ops to compute the LM embeddings.",
"def_embeddings_op",
"=",
"bilm",
"(",
"def_sen_ids",
")",
"# Get an op to compute ELMo (weighted average of the internal biLM layers)",
"def_elmo_output",
"=",
"weight_layers",
"(",
"'elmo_output'",
",",
"def_embeddings_op",
",",
"l2_coef",
"=",
"0.0",
",",
"reuse",
"=",
"True",
")",
"def_weighted_op",
"=",
"def_elmo_output",
"[",
"'weighted_op'",
"]",
"def_mean_op",
"=",
"def_elmo_output",
"[",
"'mean_op'",
"]",
"def_word_emb",
"=",
"def_elmo_output",
"[",
"'word_emb'",
"]",
"def_lstm_outputs1",
"=",
"def_elmo_output",
"[",
"'lstm_outputs1'",
"]",
"def_lstm_outputs2",
"=",
"def_elmo_output",
"[",
"'lstm_outputs2'",
"]",
"hub",
".",
"add_signature",
"(",
"\"default\"",
",",
"{",
"\"strings\"",
":",
"def_strings",
"}",
",",
"{",
"\"elmo\"",
":",
"def_weighted_op",
",",
"\"default\"",
":",
"def_mean_op",
",",
"\"word_emb\"",
":",
"def_word_emb",
",",
"\"lstm_outputs1\"",
":",
"def_lstm_outputs1",
",",
"\"lstm_outputs2\"",
":",
"def_lstm_outputs2",
",",
"\"version\"",
":",
"version",
"}",
")",
"return",
"hub",
".",
"create_module_spec",
"(",
"module_fn",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
export2hub
|
Exports a TF-Hub module
|
deeppavlov/models/elmo/elmo2tfhub.py
|
def export2hub(weight_file, hub_dir, options):
"""Exports a TF-Hub module
"""
spec = make_module_spec(options, str(weight_file))
try:
with tf.Graph().as_default():
module = hub.Module(spec)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if hub_dir.exists():
shutil.rmtree(hub_dir)
module.export(str(hub_dir), sess)
finally:
pass
|
def export2hub(weight_file, hub_dir, options):
"""Exports a TF-Hub module
"""
spec = make_module_spec(options, str(weight_file))
try:
with tf.Graph().as_default():
module = hub.Module(spec)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if hub_dir.exists():
shutil.rmtree(hub_dir)
module.export(str(hub_dir), sess)
finally:
pass
|
[
"Exports",
"a",
"TF",
"-",
"Hub",
"module"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/elmo/elmo2tfhub.py#L190-L206
|
[
"def",
"export2hub",
"(",
"weight_file",
",",
"hub_dir",
",",
"options",
")",
":",
"spec",
"=",
"make_module_spec",
"(",
"options",
",",
"str",
"(",
"weight_file",
")",
")",
"try",
":",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"module",
"=",
"hub",
".",
"Module",
"(",
"spec",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"sess",
".",
"run",
"(",
"tf",
".",
"global_variables_initializer",
"(",
")",
")",
"if",
"hub_dir",
".",
"exists",
"(",
")",
":",
"shutil",
".",
"rmtree",
"(",
"hub_dir",
")",
"module",
".",
"export",
"(",
"str",
"(",
"hub_dir",
")",
",",
"sess",
")",
"finally",
":",
"pass"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
show_details
|
Format catalog item output
Parameters:
item_data: item's attributes values
Returns:
[rich_message]: list of formatted rich message
|
deeppavlov/agents/ecommerce_agent/ecommerce_agent.py
|
def show_details(item_data: Dict[Any, Any]) -> str:
"""Format catalog item output
Parameters:
item_data: item's attributes values
Returns:
[rich_message]: list of formatted rich message
"""
txt = ""
for key, value in item_data.items():
txt += "**" + str(key) + "**" + ': ' + str(value) + " \n"
return txt
|
def show_details(item_data: Dict[Any, Any]) -> str:
"""Format catalog item output
Parameters:
item_data: item's attributes values
Returns:
[rich_message]: list of formatted rich message
"""
txt = ""
for key, value in item_data.items():
txt += "**" + str(key) + "**" + ': ' + str(value) + " \n"
return txt
|
[
"Format",
"catalog",
"item",
"output"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/agents/ecommerce_agent/ecommerce_agent.py#L158-L173
|
[
"def",
"show_details",
"(",
"item_data",
":",
"Dict",
"[",
"Any",
",",
"Any",
"]",
")",
"->",
"str",
":",
"txt",
"=",
"\"\"",
"for",
"key",
",",
"value",
"in",
"item_data",
".",
"items",
"(",
")",
":",
"txt",
"+=",
"\"**\"",
"+",
"str",
"(",
"key",
")",
"+",
"\"**\"",
"+",
"': '",
"+",
"str",
"(",
"value",
")",
"+",
"\" \\n\"",
"return",
"txt"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
make_agent
|
Make an agent
Returns:
agent: created Ecommerce agent
|
deeppavlov/agents/ecommerce_agent/ecommerce_agent.py
|
def make_agent() -> EcommerceAgent:
"""Make an agent
Returns:
agent: created Ecommerce agent
"""
config_path = find_config('tfidf_retrieve')
skill = build_model(config_path)
agent = EcommerceAgent(skills=[skill])
return agent
|
def make_agent() -> EcommerceAgent:
"""Make an agent
Returns:
agent: created Ecommerce agent
"""
config_path = find_config('tfidf_retrieve')
skill = build_model(config_path)
agent = EcommerceAgent(skills=[skill])
return agent
|
[
"Make",
"an",
"agent"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/agents/ecommerce_agent/ecommerce_agent.py#L176-L186
|
[
"def",
"make_agent",
"(",
")",
"->",
"EcommerceAgent",
":",
"config_path",
"=",
"find_config",
"(",
"'tfidf_retrieve'",
")",
"skill",
"=",
"build_model",
"(",
"config_path",
")",
"agent",
"=",
"EcommerceAgent",
"(",
"skills",
"=",
"[",
"skill",
"]",
")",
"return",
"agent"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
main
|
Parse parameters and run ms bot framework
|
deeppavlov/agents/ecommerce_agent/ecommerce_agent.py
|
def main():
"""Parse parameters and run ms bot framework"""
args = parser.parse_args()
run_ms_bot_framework_server(agent_generator=make_agent,
app_id=args.ms_id,
app_secret=args.ms_secret,
stateful=True)
|
def main():
"""Parse parameters and run ms bot framework"""
args = parser.parse_args()
run_ms_bot_framework_server(agent_generator=make_agent,
app_id=args.ms_id,
app_secret=args.ms_secret,
stateful=True)
|
[
"Parse",
"parameters",
"and",
"run",
"ms",
"bot",
"framework"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/agents/ecommerce_agent/ecommerce_agent.py#L189-L196
|
[
"def",
"main",
"(",
")",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"run_ms_bot_framework_server",
"(",
"agent_generator",
"=",
"make_agent",
",",
"app_id",
"=",
"args",
".",
"ms_id",
",",
"app_secret",
"=",
"args",
".",
"ms_secret",
",",
"stateful",
"=",
"True",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
EcommerceAgent._call
|
Processes batch of utterances and returns corresponding responses batch.
Args:
utterances_batch: Batch of incoming utterances.
utterances_ids: Batch of dialog IDs corresponding to incoming utterances.
Returns:
responses: A batch of responses corresponding to the
utterance batch received by agent.
|
deeppavlov/agents/ecommerce_agent/ecommerce_agent.py
|
def _call(self, utterances_batch: List[str], utterances_ids: List[int] = None) -> List[RichMessage]:
"""Processes batch of utterances and returns corresponding responses batch.
Args:
utterances_batch: Batch of incoming utterances.
utterances_ids: Batch of dialog IDs corresponding to incoming utterances.
Returns:
responses: A batch of responses corresponding to the
utterance batch received by agent.
"""
rich_message = RichMessage()
for utt_id, utt in enumerate(utterances_batch):
if utterances_ids:
id_ = utterances_ids[utt_id]
log.debug(f'Utterance: {utt}')
if utt == "/start":
welcome = "I am a new e-commerce bot. I will help you to find products that you are looking for. Please type your request in plain text."
rich_message.add_control(PlainText(welcome))
continue
if utt[0] == "@":
command, *parts = utt.split(":")
log.debug(f'Actions: {parts}')
if command == "@details":
batch_index = int(parts[0]) # batch index in history list
item_index = int(parts[1]) # index in batch
rich_message.add_control(PlainText(show_details(
self.history[id_][batch_index][item_index])))
continue
if command == "@entropy":
state = self.history[id_][int(parts[0])]
state[parts[1]] = parts[2]
state["start"] = 0
state["stop"] = 5
utt = state['query']
self.states[id_] = state
if command == "@next":
state = self.history[id_][int(parts[0])]
state['start'] = state['stop']
state['stop'] = state['stop'] + 5
utt = state['query']
self.states[id_] = state
else:
if id_ not in self.states:
self.states[id_] = {}
self.states[id_]["start"] = 0
self.states[id_]["stop"] = 5
responses_batch, confidences_batch, state_batch = self.skills[0](
[utt], self.history[id_], [self.states[id_]])
# update `self.states` with retrieved results
self.states[id_] = state_batch[0]
self.states[id_]["query"] = utt
items_batch, entropy_batch = responses_batch
for batch_idx, items in enumerate(items_batch):
self.history[id_].append(items)
self.history[id_].append(self.states[id_])
for idx, item in enumerate(items):
rich_message.add_control(_draw_item(item, idx, self.history[id_]))
if len(items) == self.states[id_]['stop'] - self.states[id_]['start']:
buttons_frame = _draw_tail(entropy_batch[batch_idx], self.history[id_])
rich_message.add_control(buttons_frame)
return [rich_message]
|
def _call(self, utterances_batch: List[str], utterances_ids: List[int] = None) -> List[RichMessage]:
"""Processes batch of utterances and returns corresponding responses batch.
Args:
utterances_batch: Batch of incoming utterances.
utterances_ids: Batch of dialog IDs corresponding to incoming utterances.
Returns:
responses: A batch of responses corresponding to the
utterance batch received by agent.
"""
rich_message = RichMessage()
for utt_id, utt in enumerate(utterances_batch):
if utterances_ids:
id_ = utterances_ids[utt_id]
log.debug(f'Utterance: {utt}')
if utt == "/start":
welcome = "I am a new e-commerce bot. I will help you to find products that you are looking for. Please type your request in plain text."
rich_message.add_control(PlainText(welcome))
continue
if utt[0] == "@":
command, *parts = utt.split(":")
log.debug(f'Actions: {parts}')
if command == "@details":
batch_index = int(parts[0]) # batch index in history list
item_index = int(parts[1]) # index in batch
rich_message.add_control(PlainText(show_details(
self.history[id_][batch_index][item_index])))
continue
if command == "@entropy":
state = self.history[id_][int(parts[0])]
state[parts[1]] = parts[2]
state["start"] = 0
state["stop"] = 5
utt = state['query']
self.states[id_] = state
if command == "@next":
state = self.history[id_][int(parts[0])]
state['start'] = state['stop']
state['stop'] = state['stop'] + 5
utt = state['query']
self.states[id_] = state
else:
if id_ not in self.states:
self.states[id_] = {}
self.states[id_]["start"] = 0
self.states[id_]["stop"] = 5
responses_batch, confidences_batch, state_batch = self.skills[0](
[utt], self.history[id_], [self.states[id_]])
# update `self.states` with retrieved results
self.states[id_] = state_batch[0]
self.states[id_]["query"] = utt
items_batch, entropy_batch = responses_batch
for batch_idx, items in enumerate(items_batch):
self.history[id_].append(items)
self.history[id_].append(self.states[id_])
for idx, item in enumerate(items):
rich_message.add_control(_draw_item(item, idx, self.history[id_]))
if len(items) == self.states[id_]['stop'] - self.states[id_]['start']:
buttons_frame = _draw_tail(entropy_batch[batch_idx], self.history[id_])
rich_message.add_control(buttons_frame)
return [rich_message]
|
[
"Processes",
"batch",
"of",
"utterances",
"and",
"returns",
"corresponding",
"responses",
"batch",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/agents/ecommerce_agent/ecommerce_agent.py#L53-L131
|
[
"def",
"_call",
"(",
"self",
",",
"utterances_batch",
":",
"List",
"[",
"str",
"]",
",",
"utterances_ids",
":",
"List",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"RichMessage",
"]",
":",
"rich_message",
"=",
"RichMessage",
"(",
")",
"for",
"utt_id",
",",
"utt",
"in",
"enumerate",
"(",
"utterances_batch",
")",
":",
"if",
"utterances_ids",
":",
"id_",
"=",
"utterances_ids",
"[",
"utt_id",
"]",
"log",
".",
"debug",
"(",
"f'Utterance: {utt}'",
")",
"if",
"utt",
"==",
"\"/start\"",
":",
"welcome",
"=",
"\"I am a new e-commerce bot. I will help you to find products that you are looking for. Please type your request in plain text.\"",
"rich_message",
".",
"add_control",
"(",
"PlainText",
"(",
"welcome",
")",
")",
"continue",
"if",
"utt",
"[",
"0",
"]",
"==",
"\"@\"",
":",
"command",
",",
"",
"*",
"parts",
"=",
"utt",
".",
"split",
"(",
"\":\"",
")",
"log",
".",
"debug",
"(",
"f'Actions: {parts}'",
")",
"if",
"command",
"==",
"\"@details\"",
":",
"batch_index",
"=",
"int",
"(",
"parts",
"[",
"0",
"]",
")",
"# batch index in history list",
"item_index",
"=",
"int",
"(",
"parts",
"[",
"1",
"]",
")",
"# index in batch",
"rich_message",
".",
"add_control",
"(",
"PlainText",
"(",
"show_details",
"(",
"self",
".",
"history",
"[",
"id_",
"]",
"[",
"batch_index",
"]",
"[",
"item_index",
"]",
")",
")",
")",
"continue",
"if",
"command",
"==",
"\"@entropy\"",
":",
"state",
"=",
"self",
".",
"history",
"[",
"id_",
"]",
"[",
"int",
"(",
"parts",
"[",
"0",
"]",
")",
"]",
"state",
"[",
"parts",
"[",
"1",
"]",
"]",
"=",
"parts",
"[",
"2",
"]",
"state",
"[",
"\"start\"",
"]",
"=",
"0",
"state",
"[",
"\"stop\"",
"]",
"=",
"5",
"utt",
"=",
"state",
"[",
"'query'",
"]",
"self",
".",
"states",
"[",
"id_",
"]",
"=",
"state",
"if",
"command",
"==",
"\"@next\"",
":",
"state",
"=",
"self",
".",
"history",
"[",
"id_",
"]",
"[",
"int",
"(",
"parts",
"[",
"0",
"]",
")",
"]",
"state",
"[",
"'start'",
"]",
"=",
"state",
"[",
"'stop'",
"]",
"state",
"[",
"'stop'",
"]",
"=",
"state",
"[",
"'stop'",
"]",
"+",
"5",
"utt",
"=",
"state",
"[",
"'query'",
"]",
"self",
".",
"states",
"[",
"id_",
"]",
"=",
"state",
"else",
":",
"if",
"id_",
"not",
"in",
"self",
".",
"states",
":",
"self",
".",
"states",
"[",
"id_",
"]",
"=",
"{",
"}",
"self",
".",
"states",
"[",
"id_",
"]",
"[",
"\"start\"",
"]",
"=",
"0",
"self",
".",
"states",
"[",
"id_",
"]",
"[",
"\"stop\"",
"]",
"=",
"5",
"responses_batch",
",",
"confidences_batch",
",",
"state_batch",
"=",
"self",
".",
"skills",
"[",
"0",
"]",
"(",
"[",
"utt",
"]",
",",
"self",
".",
"history",
"[",
"id_",
"]",
",",
"[",
"self",
".",
"states",
"[",
"id_",
"]",
"]",
")",
"# update `self.states` with retrieved results",
"self",
".",
"states",
"[",
"id_",
"]",
"=",
"state_batch",
"[",
"0",
"]",
"self",
".",
"states",
"[",
"id_",
"]",
"[",
"\"query\"",
"]",
"=",
"utt",
"items_batch",
",",
"entropy_batch",
"=",
"responses_batch",
"for",
"batch_idx",
",",
"items",
"in",
"enumerate",
"(",
"items_batch",
")",
":",
"self",
".",
"history",
"[",
"id_",
"]",
".",
"append",
"(",
"items",
")",
"self",
".",
"history",
"[",
"id_",
"]",
".",
"append",
"(",
"self",
".",
"states",
"[",
"id_",
"]",
")",
"for",
"idx",
",",
"item",
"in",
"enumerate",
"(",
"items",
")",
":",
"rich_message",
".",
"add_control",
"(",
"_draw_item",
"(",
"item",
",",
"idx",
",",
"self",
".",
"history",
"[",
"id_",
"]",
")",
")",
"if",
"len",
"(",
"items",
")",
"==",
"self",
".",
"states",
"[",
"id_",
"]",
"[",
"'stop'",
"]",
"-",
"self",
".",
"states",
"[",
"id_",
"]",
"[",
"'start'",
"]",
":",
"buttons_frame",
"=",
"_draw_tail",
"(",
"entropy_batch",
"[",
"batch_idx",
"]",
",",
"self",
".",
"history",
"[",
"id_",
"]",
")",
"rich_message",
".",
"add_control",
"(",
"buttons_frame",
")",
"return",
"[",
"rich_message",
"]"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
TemporalDropout
|
Drops with :dropout probability temporal steps of input 3D tensor
|
deeppavlov/models/morpho_tagger/cells.py
|
def TemporalDropout(inputs, dropout=0.0):
"""
Drops with :dropout probability temporal steps of input 3D tensor
"""
# TO DO: adapt for >3D tensors
if dropout == 0.0:
return inputs
inputs_func = lambda x: kb.ones_like(inputs[:, :, 0:1])
inputs_mask = kl.Lambda(inputs_func)(inputs)
inputs_mask = kl.Dropout(dropout)(inputs_mask)
tiling_shape = [1, 1, kb.shape(inputs)[2]] + [1] * (kb.ndim(inputs) - 3)
inputs_mask = kl.Lambda(kb.tile, arguments={"n": tiling_shape},
output_shape=inputs._keras_shape[1:])(inputs_mask)
answer = kl.Multiply()([inputs, inputs_mask])
return answer
|
def TemporalDropout(inputs, dropout=0.0):
"""
Drops with :dropout probability temporal steps of input 3D tensor
"""
# TO DO: adapt for >3D tensors
if dropout == 0.0:
return inputs
inputs_func = lambda x: kb.ones_like(inputs[:, :, 0:1])
inputs_mask = kl.Lambda(inputs_func)(inputs)
inputs_mask = kl.Dropout(dropout)(inputs_mask)
tiling_shape = [1, 1, kb.shape(inputs)[2]] + [1] * (kb.ndim(inputs) - 3)
inputs_mask = kl.Lambda(kb.tile, arguments={"n": tiling_shape},
output_shape=inputs._keras_shape[1:])(inputs_mask)
answer = kl.Multiply()([inputs, inputs_mask])
return answer
|
[
"Drops",
"with",
":",
"dropout",
"probability",
"temporal",
"steps",
"of",
"input",
"3D",
"tensor"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/cells.py#L141-L155
|
[
"def",
"TemporalDropout",
"(",
"inputs",
",",
"dropout",
"=",
"0.0",
")",
":",
"# TO DO: adapt for >3D tensors",
"if",
"dropout",
"==",
"0.0",
":",
"return",
"inputs",
"inputs_func",
"=",
"lambda",
"x",
":",
"kb",
".",
"ones_like",
"(",
"inputs",
"[",
":",
",",
":",
",",
"0",
":",
"1",
"]",
")",
"inputs_mask",
"=",
"kl",
".",
"Lambda",
"(",
"inputs_func",
")",
"(",
"inputs",
")",
"inputs_mask",
"=",
"kl",
".",
"Dropout",
"(",
"dropout",
")",
"(",
"inputs_mask",
")",
"tiling_shape",
"=",
"[",
"1",
",",
"1",
",",
"kb",
".",
"shape",
"(",
"inputs",
")",
"[",
"2",
"]",
"]",
"+",
"[",
"1",
"]",
"*",
"(",
"kb",
".",
"ndim",
"(",
"inputs",
")",
"-",
"3",
")",
"inputs_mask",
"=",
"kl",
".",
"Lambda",
"(",
"kb",
".",
"tile",
",",
"arguments",
"=",
"{",
"\"n\"",
":",
"tiling_shape",
"}",
",",
"output_shape",
"=",
"inputs",
".",
"_keras_shape",
"[",
"1",
":",
"]",
")",
"(",
"inputs_mask",
")",
"answer",
"=",
"kl",
".",
"Multiply",
"(",
")",
"(",
"[",
"inputs",
",",
"inputs_mask",
"]",
")",
"return",
"answer"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
positions_func
|
A layer filling i-th column of a 2D tensor with
1+ln(1+i) when it contains a meaningful symbol
and with 0 when it contains PAD
|
deeppavlov/models/morpho_tagger/cells.py
|
def positions_func(inputs, pad=0):
"""
A layer filling i-th column of a 2D tensor with
1+ln(1+i) when it contains a meaningful symbol
and with 0 when it contains PAD
"""
position_inputs = kb.cumsum(kb.ones_like(inputs, dtype="float32"), axis=1)
position_inputs *= kb.cast(kb.not_equal(inputs, pad), "float32")
return kb.log(1.0 + position_inputs)
|
def positions_func(inputs, pad=0):
"""
A layer filling i-th column of a 2D tensor with
1+ln(1+i) when it contains a meaningful symbol
and with 0 when it contains PAD
"""
position_inputs = kb.cumsum(kb.ones_like(inputs, dtype="float32"), axis=1)
position_inputs *= kb.cast(kb.not_equal(inputs, pad), "float32")
return kb.log(1.0 + position_inputs)
|
[
"A",
"layer",
"filling",
"i",
"-",
"th",
"column",
"of",
"a",
"2D",
"tensor",
"with",
"1",
"+",
"ln",
"(",
"1",
"+",
"i",
")",
"when",
"it",
"contains",
"a",
"meaningful",
"symbol",
"and",
"with",
"0",
"when",
"it",
"contains",
"PAD"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/morpho_tagger/cells.py#L158-L166
|
[
"def",
"positions_func",
"(",
"inputs",
",",
"pad",
"=",
"0",
")",
":",
"position_inputs",
"=",
"kb",
".",
"cumsum",
"(",
"kb",
".",
"ones_like",
"(",
"inputs",
",",
"dtype",
"=",
"\"float32\"",
")",
",",
"axis",
"=",
"1",
")",
"position_inputs",
"*=",
"kb",
".",
"cast",
"(",
"kb",
".",
"not_equal",
"(",
"inputs",
",",
"pad",
")",
",",
"\"float32\"",
")",
"return",
"kb",
".",
"log",
"(",
"1.0",
"+",
"position_inputs",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
download
|
Download a file from URL to one or several target locations
Args:
dest_file_path: path or list of paths to the file destination files (including file name)
source_url: the source URL
force_download: download file if it already exists, or not
|
deeppavlov/core/data/utils.py
|
def download(dest_file_path: [List[Union[str, Path]]], source_url: str, force_download=True):
"""Download a file from URL to one or several target locations
Args:
dest_file_path: path or list of paths to the file destination files (including file name)
source_url: the source URL
force_download: download file if it already exists, or not
"""
if isinstance(dest_file_path, list):
dest_file_paths = [Path(path) for path in dest_file_path]
else:
dest_file_paths = [Path(dest_file_path).absolute()]
if not force_download:
to_check = list(dest_file_paths)
dest_file_paths = []
for p in to_check:
if p.exists():
log.info(f'File already exists in {p}')
else:
dest_file_paths.append(p)
if dest_file_paths:
cache_dir = os.getenv('DP_CACHE_DIR')
cached_exists = False
if cache_dir:
first_dest_path = Path(cache_dir) / md5(source_url.encode('utf8')).hexdigest()[:15]
cached_exists = first_dest_path.exists()
else:
first_dest_path = dest_file_paths.pop()
if not cached_exists:
first_dest_path.parent.mkdir(parents=True, exist_ok=True)
simple_download(source_url, first_dest_path)
else:
log.info(f'Found cached {source_url} in {first_dest_path}')
for dest_path in dest_file_paths:
dest_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(str(first_dest_path), str(dest_path))
|
def download(dest_file_path: [List[Union[str, Path]]], source_url: str, force_download=True):
"""Download a file from URL to one or several target locations
Args:
dest_file_path: path or list of paths to the file destination files (including file name)
source_url: the source URL
force_download: download file if it already exists, or not
"""
if isinstance(dest_file_path, list):
dest_file_paths = [Path(path) for path in dest_file_path]
else:
dest_file_paths = [Path(dest_file_path).absolute()]
if not force_download:
to_check = list(dest_file_paths)
dest_file_paths = []
for p in to_check:
if p.exists():
log.info(f'File already exists in {p}')
else:
dest_file_paths.append(p)
if dest_file_paths:
cache_dir = os.getenv('DP_CACHE_DIR')
cached_exists = False
if cache_dir:
first_dest_path = Path(cache_dir) / md5(source_url.encode('utf8')).hexdigest()[:15]
cached_exists = first_dest_path.exists()
else:
first_dest_path = dest_file_paths.pop()
if not cached_exists:
first_dest_path.parent.mkdir(parents=True, exist_ok=True)
simple_download(source_url, first_dest_path)
else:
log.info(f'Found cached {source_url} in {first_dest_path}')
for dest_path in dest_file_paths:
dest_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(str(first_dest_path), str(dest_path))
|
[
"Download",
"a",
"file",
"from",
"URL",
"to",
"one",
"or",
"several",
"target",
"locations"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/utils.py#L83-L125
|
[
"def",
"download",
"(",
"dest_file_path",
":",
"[",
"List",
"[",
"Union",
"[",
"str",
",",
"Path",
"]",
"]",
"]",
",",
"source_url",
":",
"str",
",",
"force_download",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"dest_file_path",
",",
"list",
")",
":",
"dest_file_paths",
"=",
"[",
"Path",
"(",
"path",
")",
"for",
"path",
"in",
"dest_file_path",
"]",
"else",
":",
"dest_file_paths",
"=",
"[",
"Path",
"(",
"dest_file_path",
")",
".",
"absolute",
"(",
")",
"]",
"if",
"not",
"force_download",
":",
"to_check",
"=",
"list",
"(",
"dest_file_paths",
")",
"dest_file_paths",
"=",
"[",
"]",
"for",
"p",
"in",
"to_check",
":",
"if",
"p",
".",
"exists",
"(",
")",
":",
"log",
".",
"info",
"(",
"f'File already exists in {p}'",
")",
"else",
":",
"dest_file_paths",
".",
"append",
"(",
"p",
")",
"if",
"dest_file_paths",
":",
"cache_dir",
"=",
"os",
".",
"getenv",
"(",
"'DP_CACHE_DIR'",
")",
"cached_exists",
"=",
"False",
"if",
"cache_dir",
":",
"first_dest_path",
"=",
"Path",
"(",
"cache_dir",
")",
"/",
"md5",
"(",
"source_url",
".",
"encode",
"(",
"'utf8'",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"15",
"]",
"cached_exists",
"=",
"first_dest_path",
".",
"exists",
"(",
")",
"else",
":",
"first_dest_path",
"=",
"dest_file_paths",
".",
"pop",
"(",
")",
"if",
"not",
"cached_exists",
":",
"first_dest_path",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"simple_download",
"(",
"source_url",
",",
"first_dest_path",
")",
"else",
":",
"log",
".",
"info",
"(",
"f'Found cached {source_url} in {first_dest_path}'",
")",
"for",
"dest_path",
"in",
"dest_file_paths",
":",
"dest_path",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"shutil",
".",
"copy",
"(",
"str",
"(",
"first_dest_path",
")",
",",
"str",
"(",
"dest_path",
")",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
untar
|
Simple tar archive extractor
Args:
file_path: path to the tar file to be extracted
extract_folder: folder to which the files will be extracted
|
deeppavlov/core/data/utils.py
|
def untar(file_path, extract_folder=None):
"""Simple tar archive extractor
Args:
file_path: path to the tar file to be extracted
extract_folder: folder to which the files will be extracted
"""
file_path = Path(file_path)
if extract_folder is None:
extract_folder = file_path.parent
extract_folder = Path(extract_folder)
tar = tarfile.open(file_path)
tar.extractall(extract_folder)
tar.close()
|
def untar(file_path, extract_folder=None):
"""Simple tar archive extractor
Args:
file_path: path to the tar file to be extracted
extract_folder: folder to which the files will be extracted
"""
file_path = Path(file_path)
if extract_folder is None:
extract_folder = file_path.parent
extract_folder = Path(extract_folder)
tar = tarfile.open(file_path)
tar.extractall(extract_folder)
tar.close()
|
[
"Simple",
"tar",
"archive",
"extractor"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/utils.py#L128-L142
|
[
"def",
"untar",
"(",
"file_path",
",",
"extract_folder",
"=",
"None",
")",
":",
"file_path",
"=",
"Path",
"(",
"file_path",
")",
"if",
"extract_folder",
"is",
"None",
":",
"extract_folder",
"=",
"file_path",
".",
"parent",
"extract_folder",
"=",
"Path",
"(",
"extract_folder",
")",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"file_path",
")",
"tar",
".",
"extractall",
"(",
"extract_folder",
")",
"tar",
".",
"close",
"(",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
ungzip
|
Simple .gz archive extractor
Args:
file_path: path to the gzip file to be extracted
extract_path: path where the file will be extracted
|
deeppavlov/core/data/utils.py
|
def ungzip(file_path, extract_path: Path = None):
"""Simple .gz archive extractor
Args:
file_path: path to the gzip file to be extracted
extract_path: path where the file will be extracted
"""
CHUNK = 16 * 1024
file_path = Path(file_path)
extract_path = extract_path or file_path.with_suffix('')
with gzip.open(file_path, 'rb') as fin, extract_path.open('wb') as fout:
while True:
block = fin.read(CHUNK)
if not block:
break
fout.write(block)
|
def ungzip(file_path, extract_path: Path = None):
"""Simple .gz archive extractor
Args:
file_path: path to the gzip file to be extracted
extract_path: path where the file will be extracted
"""
CHUNK = 16 * 1024
file_path = Path(file_path)
extract_path = extract_path or file_path.with_suffix('')
with gzip.open(file_path, 'rb') as fin, extract_path.open('wb') as fout:
while True:
block = fin.read(CHUNK)
if not block:
break
fout.write(block)
|
[
"Simple",
".",
"gz",
"archive",
"extractor"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/utils.py#L145-L162
|
[
"def",
"ungzip",
"(",
"file_path",
",",
"extract_path",
":",
"Path",
"=",
"None",
")",
":",
"CHUNK",
"=",
"16",
"*",
"1024",
"file_path",
"=",
"Path",
"(",
"file_path",
")",
"extract_path",
"=",
"extract_path",
"or",
"file_path",
".",
"with_suffix",
"(",
"''",
")",
"with",
"gzip",
".",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"as",
"fin",
",",
"extract_path",
".",
"open",
"(",
"'wb'",
")",
"as",
"fout",
":",
"while",
"True",
":",
"block",
"=",
"fin",
".",
"read",
"(",
"CHUNK",
")",
"if",
"not",
"block",
":",
"break",
"fout",
".",
"write",
"(",
"block",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
download_decompress
|
Download and extract .tar.gz or .gz file to one or several target locations.
The archive is deleted if extraction was successful.
Args:
url: URL for file downloading
download_path: path to the directory where downloaded file will be stored
until the end of extraction
extract_paths: path or list of paths where contents of archive will be extracted
|
deeppavlov/core/data/utils.py
|
def download_decompress(url: str, download_path: [Path, str], extract_paths=None):
"""Download and extract .tar.gz or .gz file to one or several target locations.
The archive is deleted if extraction was successful.
Args:
url: URL for file downloading
download_path: path to the directory where downloaded file will be stored
until the end of extraction
extract_paths: path or list of paths where contents of archive will be extracted
"""
file_name = Path(urlparse(url).path).name
download_path = Path(download_path)
if extract_paths is None:
extract_paths = [download_path]
elif isinstance(extract_paths, list):
extract_paths = [Path(path) for path in extract_paths]
else:
extract_paths = [Path(extract_paths)]
cache_dir = os.getenv('DP_CACHE_DIR')
extracted = False
if cache_dir:
cache_dir = Path(cache_dir)
url_hash = md5(url.encode('utf8')).hexdigest()[:15]
arch_file_path = cache_dir / url_hash
extracted_path = cache_dir / (url_hash + '_extracted')
extracted = extracted_path.exists()
if not extracted and not arch_file_path.exists():
simple_download(url, arch_file_path)
else:
arch_file_path = download_path / file_name
simple_download(url, arch_file_path)
extracted_path = extract_paths.pop()
if not extracted:
log.info('Extracting {} archive into {}'.format(arch_file_path, extracted_path))
extracted_path.mkdir(parents=True, exist_ok=True)
if file_name.endswith('.tar.gz'):
untar(arch_file_path, extracted_path)
elif file_name.endswith('.gz'):
ungzip(arch_file_path, extracted_path / Path(file_name).with_suffix('').name)
elif file_name.endswith('.zip'):
with zipfile.ZipFile(arch_file_path, 'r') as zip_ref:
zip_ref.extractall(extracted_path)
else:
raise RuntimeError(f'Trying to extract an unknown type of archive {file_name}')
if not cache_dir:
arch_file_path.unlink()
for extract_path in extract_paths:
for src in extracted_path.iterdir():
dest = extract_path / src.name
if src.is_dir():
copytree(src, dest)
else:
extract_path.mkdir(parents=True, exist_ok=True)
shutil.copy(str(src), str(dest))
|
def download_decompress(url: str, download_path: [Path, str], extract_paths=None):
"""Download and extract .tar.gz or .gz file to one or several target locations.
The archive is deleted if extraction was successful.
Args:
url: URL for file downloading
download_path: path to the directory where downloaded file will be stored
until the end of extraction
extract_paths: path or list of paths where contents of archive will be extracted
"""
file_name = Path(urlparse(url).path).name
download_path = Path(download_path)
if extract_paths is None:
extract_paths = [download_path]
elif isinstance(extract_paths, list):
extract_paths = [Path(path) for path in extract_paths]
else:
extract_paths = [Path(extract_paths)]
cache_dir = os.getenv('DP_CACHE_DIR')
extracted = False
if cache_dir:
cache_dir = Path(cache_dir)
url_hash = md5(url.encode('utf8')).hexdigest()[:15]
arch_file_path = cache_dir / url_hash
extracted_path = cache_dir / (url_hash + '_extracted')
extracted = extracted_path.exists()
if not extracted and not arch_file_path.exists():
simple_download(url, arch_file_path)
else:
arch_file_path = download_path / file_name
simple_download(url, arch_file_path)
extracted_path = extract_paths.pop()
if not extracted:
log.info('Extracting {} archive into {}'.format(arch_file_path, extracted_path))
extracted_path.mkdir(parents=True, exist_ok=True)
if file_name.endswith('.tar.gz'):
untar(arch_file_path, extracted_path)
elif file_name.endswith('.gz'):
ungzip(arch_file_path, extracted_path / Path(file_name).with_suffix('').name)
elif file_name.endswith('.zip'):
with zipfile.ZipFile(arch_file_path, 'r') as zip_ref:
zip_ref.extractall(extracted_path)
else:
raise RuntimeError(f'Trying to extract an unknown type of archive {file_name}')
if not cache_dir:
arch_file_path.unlink()
for extract_path in extract_paths:
for src in extracted_path.iterdir():
dest = extract_path / src.name
if src.is_dir():
copytree(src, dest)
else:
extract_path.mkdir(parents=True, exist_ok=True)
shutil.copy(str(src), str(dest))
|
[
"Download",
"and",
"extract",
".",
"tar",
".",
"gz",
"or",
".",
"gz",
"file",
"to",
"one",
"or",
"several",
"target",
"locations",
".",
"The",
"archive",
"is",
"deleted",
"if",
"extraction",
"was",
"successful",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/utils.py#L165-L224
|
[
"def",
"download_decompress",
"(",
"url",
":",
"str",
",",
"download_path",
":",
"[",
"Path",
",",
"str",
"]",
",",
"extract_paths",
"=",
"None",
")",
":",
"file_name",
"=",
"Path",
"(",
"urlparse",
"(",
"url",
")",
".",
"path",
")",
".",
"name",
"download_path",
"=",
"Path",
"(",
"download_path",
")",
"if",
"extract_paths",
"is",
"None",
":",
"extract_paths",
"=",
"[",
"download_path",
"]",
"elif",
"isinstance",
"(",
"extract_paths",
",",
"list",
")",
":",
"extract_paths",
"=",
"[",
"Path",
"(",
"path",
")",
"for",
"path",
"in",
"extract_paths",
"]",
"else",
":",
"extract_paths",
"=",
"[",
"Path",
"(",
"extract_paths",
")",
"]",
"cache_dir",
"=",
"os",
".",
"getenv",
"(",
"'DP_CACHE_DIR'",
")",
"extracted",
"=",
"False",
"if",
"cache_dir",
":",
"cache_dir",
"=",
"Path",
"(",
"cache_dir",
")",
"url_hash",
"=",
"md5",
"(",
"url",
".",
"encode",
"(",
"'utf8'",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"15",
"]",
"arch_file_path",
"=",
"cache_dir",
"/",
"url_hash",
"extracted_path",
"=",
"cache_dir",
"/",
"(",
"url_hash",
"+",
"'_extracted'",
")",
"extracted",
"=",
"extracted_path",
".",
"exists",
"(",
")",
"if",
"not",
"extracted",
"and",
"not",
"arch_file_path",
".",
"exists",
"(",
")",
":",
"simple_download",
"(",
"url",
",",
"arch_file_path",
")",
"else",
":",
"arch_file_path",
"=",
"download_path",
"/",
"file_name",
"simple_download",
"(",
"url",
",",
"arch_file_path",
")",
"extracted_path",
"=",
"extract_paths",
".",
"pop",
"(",
")",
"if",
"not",
"extracted",
":",
"log",
".",
"info",
"(",
"'Extracting {} archive into {}'",
".",
"format",
"(",
"arch_file_path",
",",
"extracted_path",
")",
")",
"extracted_path",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"if",
"file_name",
".",
"endswith",
"(",
"'.tar.gz'",
")",
":",
"untar",
"(",
"arch_file_path",
",",
"extracted_path",
")",
"elif",
"file_name",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"ungzip",
"(",
"arch_file_path",
",",
"extracted_path",
"/",
"Path",
"(",
"file_name",
")",
".",
"with_suffix",
"(",
"''",
")",
".",
"name",
")",
"elif",
"file_name",
".",
"endswith",
"(",
"'.zip'",
")",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"arch_file_path",
",",
"'r'",
")",
"as",
"zip_ref",
":",
"zip_ref",
".",
"extractall",
"(",
"extracted_path",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"f'Trying to extract an unknown type of archive {file_name}'",
")",
"if",
"not",
"cache_dir",
":",
"arch_file_path",
".",
"unlink",
"(",
")",
"for",
"extract_path",
"in",
"extract_paths",
":",
"for",
"src",
"in",
"extracted_path",
".",
"iterdir",
"(",
")",
":",
"dest",
"=",
"extract_path",
"/",
"src",
".",
"name",
"if",
"src",
".",
"is_dir",
"(",
")",
":",
"copytree",
"(",
"src",
",",
"dest",
")",
"else",
":",
"extract_path",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"shutil",
".",
"copy",
"(",
"str",
"(",
"src",
")",
",",
"str",
"(",
"dest",
")",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
update_dict_recursive
|
Updates dict recursively
You need to use this function to update dictionary if depth of editing_dict is more then 1
Args:
editable_dict: dictionary, that will be edited
editing_dict: dictionary, that contains edits
Returns:
None
|
deeppavlov/core/data/utils.py
|
def update_dict_recursive(editable_dict: dict, editing_dict: dict) -> None:
"""Updates dict recursively
You need to use this function to update dictionary if depth of editing_dict is more then 1
Args:
editable_dict: dictionary, that will be edited
editing_dict: dictionary, that contains edits
Returns:
None
"""
for k, v in editing_dict.items():
if isinstance(v, collections.Mapping):
update_dict_recursive(editable_dict.get(k, {}), v)
else:
editable_dict[k] = v
|
def update_dict_recursive(editable_dict: dict, editing_dict: dict) -> None:
"""Updates dict recursively
You need to use this function to update dictionary if depth of editing_dict is more then 1
Args:
editable_dict: dictionary, that will be edited
editing_dict: dictionary, that contains edits
Returns:
None
"""
for k, v in editing_dict.items():
if isinstance(v, collections.Mapping):
update_dict_recursive(editable_dict.get(k, {}), v)
else:
editable_dict[k] = v
|
[
"Updates",
"dict",
"recursively"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/utils.py#L431-L446
|
[
"def",
"update_dict_recursive",
"(",
"editable_dict",
":",
"dict",
",",
"editing_dict",
":",
"dict",
")",
"->",
"None",
":",
"for",
"k",
",",
"v",
"in",
"editing_dict",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"collections",
".",
"Mapping",
")",
":",
"update_dict_recursive",
"(",
"editable_dict",
".",
"get",
"(",
"k",
",",
"{",
"}",
")",
",",
"v",
")",
"else",
":",
"editable_dict",
"[",
"k",
"]",
"=",
"v"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
path_set_md5
|
Given a file URL, return a md5 query of the file
Args:
url: a given URL
Returns:
URL of the md5 file
|
deeppavlov/core/data/utils.py
|
def path_set_md5(url):
"""Given a file URL, return a md5 query of the file
Args:
url: a given URL
Returns:
URL of the md5 file
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
path += '.md5'
return urlunsplit((scheme, netloc, path, query_string, fragment))
|
def path_set_md5(url):
"""Given a file URL, return a md5 query of the file
Args:
url: a given URL
Returns:
URL of the md5 file
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
path += '.md5'
return urlunsplit((scheme, netloc, path, query_string, fragment))
|
[
"Given",
"a",
"file",
"URL",
"return",
"a",
"md5",
"query",
"of",
"the",
"file"
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/utils.py#L449-L460
|
[
"def",
"path_set_md5",
"(",
"url",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"query_string",
",",
"fragment",
"=",
"urlsplit",
"(",
"url",
")",
"path",
"+=",
"'.md5'",
"return",
"urlunsplit",
"(",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"query_string",
",",
"fragment",
")",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
set_query_parameter
|
Given a URL, set or replace a query parameter and return the modified URL.
Args:
url: a given URL
param_name: the parameter name to add
param_value: the parameter value
Returns:
URL with the added parameter
|
deeppavlov/core/data/utils.py
|
def set_query_parameter(url, param_name, param_value):
"""Given a URL, set or replace a query parameter and return the modified URL.
Args:
url: a given URL
param_name: the parameter name to add
param_value: the parameter value
Returns:
URL with the added parameter
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params[param_name] = [param_value]
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
|
def set_query_parameter(url, param_name, param_value):
"""Given a URL, set or replace a query parameter and return the modified URL.
Args:
url: a given URL
param_name: the parameter name to add
param_value: the parameter value
Returns:
URL with the added parameter
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params[param_name] = [param_value]
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
|
[
"Given",
"a",
"URL",
"set",
"or",
"replace",
"a",
"query",
"parameter",
"and",
"return",
"the",
"modified",
"URL",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/data/utils.py#L463-L480
|
[
"def",
"set_query_parameter",
"(",
"url",
",",
"param_name",
",",
"param_value",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"query_string",
",",
"fragment",
"=",
"urlsplit",
"(",
"url",
")",
"query_params",
"=",
"parse_qs",
"(",
"query_string",
")",
"query_params",
"[",
"param_name",
"]",
"=",
"[",
"param_value",
"]",
"new_query_string",
"=",
"urlencode",
"(",
"query_params",
",",
"doseq",
"=",
"True",
")",
"return",
"urlunsplit",
"(",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"new_query_string",
",",
"fragment",
")",
")"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
test
|
PlainText.alexa
|
Returns Amazon Alexa compatible state of the PlainText instance.
Creating Amazon Alexa response blank with populated "outputSpeech" and
"card sections.
Returns:
response: Amazon Alexa representation of PlainText state.
|
deeppavlov/agents/rich_content/default_rich_content.py
|
def alexa(self) -> dict:
"""Returns Amazon Alexa compatible state of the PlainText instance.
Creating Amazon Alexa response blank with populated "outputSpeech" and
"card sections.
Returns:
response: Amazon Alexa representation of PlainText state.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.content},
'card': {
'type': 'Simple',
'content': self.content
}
}
}
return response
|
def alexa(self) -> dict:
"""Returns Amazon Alexa compatible state of the PlainText instance.
Creating Amazon Alexa response blank with populated "outputSpeech" and
"card sections.
Returns:
response: Amazon Alexa representation of PlainText state.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.content},
'card': {
'type': 'Simple',
'content': self.content
}
}
}
return response
|
[
"Returns",
"Amazon",
"Alexa",
"compatible",
"state",
"of",
"the",
"PlainText",
"instance",
"."
] |
deepmipt/DeepPavlov
|
python
|
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/agents/rich_content/default_rich_content.py#L58-L80
|
[
"def",
"alexa",
"(",
"self",
")",
"->",
"dict",
":",
"response",
"=",
"{",
"'response'",
":",
"{",
"'shouldEndSession'",
":",
"False",
",",
"'outputSpeech'",
":",
"{",
"'type'",
":",
"'PlainText'",
",",
"'text'",
":",
"self",
".",
"content",
"}",
",",
"'card'",
":",
"{",
"'type'",
":",
"'Simple'",
",",
"'content'",
":",
"self",
".",
"content",
"}",
"}",
"}",
"return",
"response"
] |
f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.