INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Dropout with the same drop mask for all fixed_mask_dims
def variational_dropout(units, keep_prob, fixed_mask_dims=(1,)): """ Dropout with the same drop mask for all fixed_mask_dims Args: units: a tensor, usually with shapes [B x T x F], where B - batch size T - tokens dimension F - feature dimension keep_prob: keep probability fixed_mask_dims: in these dimensions the mask will be the same Returns: dropped units tensor """ units_shape = tf.shape(units) noise_shape = [units_shape[n] for n in range(len(units.shape))] for dim in fixed_mask_dims: noise_shape[dim] = 1 return tf.nn.dropout(units, keep_prob, noise_shape)
Builds the network using Keras.
def build(self): """Builds the network using Keras. """ word_inputs = kl.Input(shape=(None, MAX_WORD_LENGTH+2), dtype="int32") inputs = [word_inputs] word_outputs = self._build_word_cnn(word_inputs) if len(self.word_vectorizers) > 0: additional_word_inputs = [kl.Input(shape=(None, input_dim), dtype="float32") for input_dim, dense_dim in self.word_vectorizers] inputs.extend(additional_word_inputs) additional_word_embeddings = [kl.Dense(dense_dim)(additional_word_inputs[i]) for i, (_, dense_dim) in enumerate(self.word_vectorizers)] word_outputs = kl.Concatenate()([word_outputs] + additional_word_embeddings) outputs, lstm_outputs = self._build_basic_network(word_outputs) compile_args = {"optimizer": ko.nadam(lr=0.002, clipnorm=5.0), "loss": "categorical_crossentropy", "metrics": ["accuracy"]} self.model_ = Model(inputs, outputs) self.model_.compile(**compile_args) if self.verbose > 0: self.model_.summary(print_fn=log.info) return self
Builds word - level network
def _build_word_cnn(self, inputs): """Builds word-level network """ inputs = kl.Lambda(kb.one_hot, arguments={"num_classes": self.symbols_number_}, output_shape=lambda x: tuple(x) + (self.symbols_number_,))(inputs) char_embeddings = kl.Dense(self.char_embeddings_size, use_bias=False)(inputs) conv_outputs = [] self.char_output_dim_ = 0 for window_size, filters_number in zip(self.char_window_size, self.char_filters): curr_output = char_embeddings curr_filters_number = (min(self.char_filter_multiple * window_size, 200) if filters_number is None else filters_number) for _ in range(self.char_conv_layers - 1): curr_output = kl.Conv2D(curr_filters_number, (1, window_size), padding="same", activation="relu", data_format="channels_last")(curr_output) if self.conv_dropout > 0.0: curr_output = kl.Dropout(self.conv_dropout)(curr_output) curr_output = kl.Conv2D(curr_filters_number, (1, window_size), padding="same", activation="relu", data_format="channels_last")(curr_output) conv_outputs.append(curr_output) self.char_output_dim_ += curr_filters_number if len(conv_outputs) > 1: conv_output = kl.Concatenate(axis=-1)(conv_outputs) else: conv_output = conv_outputs[0] highway_input = kl.Lambda(kb.max, arguments={"axis": -2})(conv_output) if self.intermediate_dropout > 0.0: highway_input = kl.Dropout(self.intermediate_dropout)(highway_input) for i in range(self.char_highway_layers - 1): highway_input = Highway(activation="relu")(highway_input) if self.highway_dropout > 0.0: highway_input = kl.Dropout(self.highway_dropout)(highway_input) highway_output = Highway(activation="relu")(highway_input) return highway_output
Creates the basic network architecture transforming word embeddings to intermediate outputs
def _build_basic_network(self, word_outputs): """ Creates the basic network architecture, transforming word embeddings to intermediate outputs """ if self.word_dropout > 0.0: lstm_outputs = kl.Dropout(self.word_dropout)(word_outputs) else: lstm_outputs = word_outputs for j in range(self.word_lstm_layers-1): lstm_outputs = kl.Bidirectional( kl.LSTM(self.word_lstm_units[j], return_sequences=True, dropout=self.lstm_dropout))(lstm_outputs) lstm_outputs = kl.Bidirectional( kl.LSTM(self.word_lstm_units[-1], return_sequences=True, dropout=self.lstm_dropout))(lstm_outputs) pre_outputs = kl.TimeDistributed( kl.Dense(self.tags_number_, activation="softmax", activity_regularizer=self.regularizer), name="p")(lstm_outputs) return pre_outputs, lstm_outputs
Trains model on a single batch
def train_on_batch(self, data: List[Iterable], labels: Iterable[list]) -> None: """Trains model on a single batch Args: data: a batch of word sequences labels: a batch of correct tag sequences Returns: the trained model """ X, Y = self._transform_batch(data, labels) self.model_.train_on_batch(X, Y)
Makes predictions on a single batch
def predict_on_batch(self, data: Union[list, tuple], return_indexes: bool = False) -> List[List[str]]: """ Makes predictions on a single batch Args: data: a batch of word sequences together with additional inputs return_indexes: whether to return tag indexes in vocabulary or tags themselves Returns: a batch of label sequences """ X = self._transform_batch(data) objects_number, lengths = len(X[0]), [len(elem) for elem in data[0]] Y = self.model_.predict_on_batch(X) labels = np.argmax(Y, axis=-1) answer: List[List[str]] = [None] * objects_number for i, (elem, length) in enumerate(zip(labels, lengths)): elem = elem[:length] answer[i] = elem if return_indexes else self.tags.idxs2toks(elem) return answer
Transforms a sentence to Numpy array which will be the network input.
def _make_sent_vector(self, sent: List, bucket_length: int =None) -> np.ndarray: """Transforms a sentence to Numpy array, which will be the network input. Args: sent: input sentence bucket_length: the width of the bucket Returns: A 3d array, answer[i][j][k] contains the index of k-th letter in j-th word of i-th input sentence. """ bucket_length = bucket_length or len(sent) answer = np.zeros(shape=(bucket_length, MAX_WORD_LENGTH+2), dtype=np.int32) for i, word in enumerate(sent): answer[i, 0] = self.tags.tok2idx("BEGIN") m = min(len(word), MAX_WORD_LENGTH) for j, x in enumerate(word[-m:]): answer[i, j+1] = self.symbols.tok2idx(x) answer[i, m+1] = self.tags.tok2idx("END") answer[i, m+2:] = self.tags.tok2idx("PAD") return answer
Transforms a sentence of tags to Numpy array which will be the network target.
def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray: """Transforms a sentence of tags to Numpy array, which will be the network target. Args: tags: input sentence of tags bucket_length: the width of the bucket Returns: A 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence. """ bucket_length = bucket_length or len(tags) answer = np.zeros(shape=(bucket_length,), dtype=np.int32) for i, tag in enumerate(tags): answer[i] = self.tags.tok2idx(tag) return answer
Calculate BLEU score
def bleu_advanced(y_true: List[Any], y_predicted: List[Any], weights: Tuple=(1,), smoothing_function=SMOOTH.method1, auto_reweigh=False, penalty=True) -> float: """Calculate BLEU score Parameters: y_true: list of reference tokens y_predicted: list of query tokens weights: n-gram weights smoothing_function: SmoothingFunction auto_reweigh: Option to re-normalize the weights uniformly penalty: either enable brevity penalty or not Return: BLEU score """ bleu_measure = sentence_bleu([y_true], y_predicted, weights, smoothing_function, auto_reweigh) hyp_len = len(y_predicted) hyp_lengths = hyp_len ref_lengths = closest_ref_length([y_true], hyp_len) bpenalty = brevity_penalty(ref_lengths, hyp_lengths) if penalty is True or bpenalty == 0: return bleu_measure return bleu_measure/bpenalty
Verify signature certificate URL against Amazon Alexa requirements.
def verify_sc_url(url: str) -> bool: """Verify signature certificate URL against Amazon Alexa requirements. Each call of Agent passes incoming utterances batch through skills filter, agent skills, skills processor. Batch of dialog IDs can be provided, in other case utterances indexes in incoming batch are used as dialog IDs. Args: url: Signature certificate URL from SignatureCertChainUrl HTTP header. Returns: result: True if verification was successful, False if not. """ parsed = urlsplit(url) scheme: str = parsed.scheme netloc: str = parsed.netloc path: str = parsed.path try: port = parsed.port except ValueError: port = None result = (scheme.lower() == 'https' and netloc.lower().split(':')[0] == 's3.amazonaws.com' and path.startswith('/echo.api/') and (port == 443 or port is None)) return result
Extracts pycrypto X509 objects from SSL certificates chain string.
def extract_certs(certs_txt: str) -> List[crypto.X509]: """Extracts pycrypto X509 objects from SSL certificates chain string. Args: certs_txt: SSL certificates chain string. Returns: result: List of pycrypto X509 objects. """ pattern = r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----' certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL) certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt] return certs
Verifies Subject Alternative Names ( SANs ) for Amazon certificate.
def verify_sans(amazon_cert: crypto.X509) -> bool: """Verifies Subject Alternative Names (SANs) for Amazon certificate. Args: amazon_cert: Pycrypto X509 Amazon certificate. Returns: result: True if verification was successful, False if not. """ cert_extentions = [amazon_cert.get_extension(i) for i in range(amazon_cert.get_extension_count())] subject_alt_names = '' for extention in cert_extentions: if 'subjectAltName' in str(extention.get_short_name()): subject_alt_names = extention.__str__() break result = 'echo-api.amazon.com' in subject_alt_names return result
Verifies if Amazon and additional certificates creates chain of trust to a root CA.
def verify_certs_chain(certs_chain: List[crypto.X509], amazon_cert: crypto.X509) -> bool: """Verifies if Amazon and additional certificates creates chain of trust to a root CA. Args: certs_chain: List of pycrypto X509 intermediate certificates from signature chain URL. amazon_cert: Pycrypto X509 Amazon certificate. Returns: result: True if verification was successful, False if not. """ store = crypto.X509Store() # add certificates from Amazon provided certs chain for cert in certs_chain: store.add_cert(cert) # add CA certificates default_verify_paths = ssl.get_default_verify_paths() default_verify_file = default_verify_paths.cafile default_verify_file = Path(default_verify_file).resolve() if default_verify_file else None default_verify_path = default_verify_paths.capath default_verify_path = Path(default_verify_path).resolve() if default_verify_path else None ca_files = [ca_file for ca_file in default_verify_path.iterdir()] if default_verify_path else [] if default_verify_file: ca_files.append(default_verify_file) for ca_file in ca_files: ca_file: Path if ca_file.is_file(): with ca_file.open('r', encoding='ascii') as crt_f: ca_certs_txt = crt_f.read() ca_certs = extract_certs(ca_certs_txt) for cert in ca_certs: store.add_cert(cert) # add CA certificates (Windows) ssl_context = ssl.create_default_context() der_certs = ssl_context.get_ca_certs(binary_form=True) pem_certs = '\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in der_certs]) ca_certs = extract_certs(pem_certs) for ca_cert in ca_certs: store.add_cert(ca_cert) store_context = crypto.X509StoreContext(store, amazon_cert) try: store_context.verify_certificate() result = True except crypto.X509StoreContextError: result = False return result
Verifies Alexa request signature.
def verify_signature(amazon_cert: crypto.X509, signature: str, request_body: bytes) -> bool: """Verifies Alexa request signature. Args: amazon_cert: Pycrypto X509 Amazon certificate. signature: Base64 decoded Alexa request signature from Signature HTTP header. request_body: full HTTPS request body Returns: result: True if verification was successful, False if not. """ signature = base64.b64decode(signature) try: crypto.verify(amazon_cert, signature, request_body, 'sha1') result = True except crypto.Error: result = False return result
Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]: """Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements. Args: signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header. Returns: result: Amazon certificate if verification was successful, None if not. """ try: certs_chain_get = requests.get(signature_chain_url) except requests.exceptions.ConnectionError as e: log.error(f'Amazon signature chain get error: {e}') return None certs_chain_txt = certs_chain_get.text certs_chain = extract_certs(certs_chain_txt) amazon_cert: crypto.X509 = certs_chain.pop(0) # verify signature chain url sc_url_verification = verify_sc_url(signature_chain_url) if not sc_url_verification: log.error(f'Amazon signature url {signature_chain_url} was not verified') # verify not expired expired_verification = not amazon_cert.has_expired() if not expired_verification: log.error(f'Amazon certificate ({signature_chain_url}) expired') # verify subject alternative names sans_verification = verify_sans(amazon_cert) if not sans_verification: log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed') # verify certs chain chain_verification = verify_certs_chain(certs_chain, amazon_cert) if not chain_verification: log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed') result = (sc_url_verification and expired_verification and sans_verification and chain_verification) return amazon_cert if result else None
Returns list of json compatible states of the RichMessage instance nested controls.
def json(self) -> list: """Returns list of json compatible states of the RichMessage instance nested controls. Returns: json_controls: Json representation of RichMessage instance nested controls. """ json_controls = [control.json() for control in self.controls] return json_controls
Returns list of MS Bot Framework compatible states of the RichMessage instance nested controls.
def ms_bot_framework(self) -> list: """Returns list of MS Bot Framework compatible states of the RichMessage instance nested controls. Returns: ms_bf_controls: MS Bot Framework representation of RichMessage instance nested controls. """ ms_bf_controls = [control.ms_bot_framework() for control in self.controls] return ms_bf_controls
Returns list of Telegram compatible states of the RichMessage instance nested controls.
def telegram(self) -> list: """Returns list of Telegram compatible states of the RichMessage instance nested controls. Returns: telegram_controls: Telegram representation of RichMessage instance nested controls. """ telegram_controls = [control.telegram() for control in self.controls] return telegram_controls
Returns list of Amazon Alexa compatible states of the RichMessage instance nested controls.
def alexa(self) -> list: """Returns list of Amazon Alexa compatible states of the RichMessage instance nested controls. Returns: alexa_controls: Amazon Alexa representation of RichMessage instance nested controls. """ alexa_controls = [control.alexa() for control in self.controls] return alexa_controls
DeepPavlov console configuration utility.
def main(): """DeepPavlov console configuration utility.""" args = parser.parse_args() path = get_settings_path() if args.default: if populate_settings_dir(force=True): print(f'Populated {path} with default settings files') else: print(f'{path} is already a default settings directory') else: print(f'Current DeepPavlov settings path: {path}')
Constructs function encapsulated in the graph.
def _graph_wrap(func, graph): """Constructs function encapsulated in the graph.""" @wraps(func) def _wrapped(*args, **kwargs): with graph.as_default(): return func(*args, **kwargs) return _wrapped
Constructs function encapsulated in the graph and the session.
def _keras_wrap(func, graph, session): """Constructs function encapsulated in the graph and the session.""" import keras.backend as K @wraps(func) def _wrapped(*args, **kwargs): with graph.as_default(): K.set_session(session) return func(*args, **kwargs) return _wrapped
Compute Area Under the Curve ( AUC ) from prediction scores.
def roc_auc_score(y_true: Union[List[List[float]], List[List[int]], np.ndarray], y_pred: Union[List[List[float]], List[List[int]], np.ndarray]) -> float: """ Compute Area Under the Curve (AUC) from prediction scores. Args: y_true: true binary labels y_pred: target scores, can either be probability estimates of the positive class Returns: Area Under the Curve (AUC) from prediction scores """ try: return sklearn.metrics.roc_auc_score(np.squeeze(np.array(y_true)), np.squeeze(np.array(y_pred)), average="macro") except ValueError: return 0.
Convert a token to a hash of given size. Args: token: a word hash_size: hash size
def hash_(token: str, hash_size: int) -> int: """Convert a token to a hash of given size. Args: token: a word hash_size: hash size Returns: int, hashed token """ return murmurhash3_32(token, positive=True) % hash_size
Calculate accuracy in terms of absolute coincidence
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float: """ Calculate accuracy in terms of absolute coincidence Args: y_true: array of true values y_predicted: array of predicted values Returns: portion of absolutely coincidental samples """ examples_len = len(y_true) correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)]) return correct / examples_len if examples_len else 0
Rounds predictions and calculates accuracy in terms of absolute coincidence.
def round_accuracy(y_true, y_predicted): """ Rounds predictions and calculates accuracy in terms of absolute coincidence. Args: y_true: list of true values y_predicted: list of predicted values Returns: portion of absolutely coincidental samples """ predictions = [round(x) for x in y_predicted] examples_len = len(y_true) correct = sum([y1 == y2 for y1, y2 in zip(y_true, predictions)]) return correct / examples_len if examples_len else 0
We ll stub out all the initializers in the pretrained LM with a function that loads the weights from the file
def _pretrained_initializer(varname, weight_file, embedding_weight_file=None): """ We'll stub out all the initializers in the pretrained LM with a function that loads the weights from the file """ weight_name_map = {} for i in range(2): for j in range(8): # if we decide to add more layers root = 'RNN_{}/RNN/MultiRNNCell/Cell{}'.format(i, j) weight_name_map[root + '/rnn/lstm_cell/kernel'] = \ root + '/LSTMCell/W_0' weight_name_map[root + '/rnn/lstm_cell/bias'] = \ root + '/LSTMCell/B' weight_name_map[root + '/rnn/lstm_cell/projection/kernel'] = \ root + '/LSTMCell/W_P_0' # convert the graph name to that in the checkpoint varname_in_file = varname[5:] if varname_in_file.startswith('RNN'): varname_in_file = weight_name_map[varname_in_file] if varname_in_file == 'embedding': with h5py.File(embedding_weight_file, 'r') as fin: # Have added a special 0 index for padding not present # in the original model. embed_weights = fin[varname_in_file][...] weights = np.zeros( (embed_weights.shape[0] + 1, embed_weights.shape[1]), dtype=DTYPE ) weights[1:, :] = embed_weights else: with h5py.File(weight_file, 'r') as fin: if varname_in_file == 'char_embed': # Have added a special 0 index for padding not present # in the original model. char_embed_weights = fin[varname_in_file][...] weights = np.zeros( (char_embed_weights.shape[0] + 1, char_embed_weights.shape[1]), dtype=DTYPE ) weights[1:, :] = char_embed_weights else: weights = fin[varname_in_file][...] # Tensorflow initializers are callables that accept a shape parameter # and some optional kwargs def ret(shape, **kwargs): if list(shape) != list(weights.shape): raise ValueError( "Invalid shape initializing {0}, got {1}, expected {2}".format( varname_in_file, shape, weights.shape) ) return weights return ret
Weight the layers of a biLM with trainable scalar weights to compute ELMo representations.
def weight_layers(name, bilm_ops, l2_coef=None, use_top_only=False, do_layer_norm=False, reuse=False): """ Weight the layers of a biLM with trainable scalar weights to compute ELMo representations. For each output layer, this returns two ops. The first computes a layer specific weighted average of the biLM layers, and the second the l2 regularizer loss term. The regularization terms are also add to tf.GraphKeys.REGULARIZATION_LOSSES Input: name = a string prefix used for the trainable variable names bilm_ops = the tensorflow ops returned to compute internal representations from a biLM. This is the return value from BidirectionalLanguageModel(...)(ids_placeholder) l2_coef: the l2 regularization coefficient $\lambda$. Pass None or 0.0 for no regularization. use_top_only: if True, then only use the top layer. do_layer_norm: if True, then apply layer normalization to each biLM layer before normalizing reuse: reuse an aggregation variable scope. Output: { 'weighted_op': op to compute weighted average for output, 'regularization_op': op to compute regularization term } """ def _l2_regularizer(weights): if l2_coef is not None: return l2_coef * tf.reduce_sum(tf.square(weights)) else: return 0.0 # Get ops for computing LM embeddings and mask lm_embeddings = bilm_ops['lm_embeddings'] mask = bilm_ops['mask'] n_lm_layers = int(lm_embeddings.get_shape()[1]) lm_dim = int(lm_embeddings.get_shape()[3]) # import pdb; pdb.set_trace() with tf.control_dependencies([lm_embeddings, mask]): # Cast the mask and broadcast for layer use. mask_float = tf.cast(mask, 'float32') broadcast_mask = tf.expand_dims(mask_float, axis=-1) def _do_ln(x): # do layer normalization excluding the mask x_masked = x * broadcast_mask N = tf.reduce_sum(mask_float) * lm_dim mean = tf.reduce_sum(x_masked) / N variance = tf.reduce_sum(((x_masked - mean) * broadcast_mask)**2) / N return tf.nn.batch_normalization( x, mean, variance, None, None, 1E-12 ) if use_top_only: layers = tf.split(lm_embeddings, n_lm_layers, axis=1) # just the top layer sum_pieces = tf.squeeze(layers[-1], squeeze_dims=1) # no regularization reg = 0.0 else: with tf.variable_scope("aggregation", reuse=reuse): W = tf.get_variable( '{}_ELMo_W'.format(name), shape=(n_lm_layers, ), initializer=tf.zeros_initializer, regularizer=_l2_regularizer, trainable=True, ) # normalize the weights normed_weights = tf.split( tf.nn.softmax(W + 1.0 / n_lm_layers), n_lm_layers ) # split LM layers layers = tf.split(lm_embeddings, n_lm_layers, axis=1) # compute the weighted, normalized LM activations pieces = [] for w, t in zip(normed_weights, layers): if do_layer_norm: pieces.append(w * _do_ln(tf.squeeze(t, squeeze_dims=1))) else: pieces.append(w * tf.squeeze(t, squeeze_dims=1)) sum_pieces = tf.add_n(pieces) # get the regularizer reg = [ r for r in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) if r.name.find('{}_ELMo_W/'.format(name)) >= 0 ] if len(reg) != 1: raise ValueError # scale the weighted sum by gamma with tf.variable_scope("aggregation", reuse=reuse): gamma = tf.get_variable( '{}_ELMo_gamma'.format(name), shape=(1, ), initializer=tf.ones_initializer, regularizer=None, trainable=True, ) weighted_lm_layers = sum_pieces * gamma weighted_lm_layers_masked = sum_pieces * broadcast_mask weighted_lm_layers_sum = tf.reduce_sum(weighted_lm_layers_masked, 1) mask_sum = tf.reduce_sum(mask_float, 1) mask_sum = tf.maximum(mask_sum, [1]) weighted_lm_layers_mean = weighted_lm_layers_sum / tf.expand_dims(mask_sum, - 1) word_emb_2n = tf.squeeze(layers[0], [1]) word_emb_1n = tf.slice(word_emb_2n, [0, 0, 0], [-1, -1, lm_dim // 2]) # to 512 lstm_outputs1 = tf.squeeze(layers[1], [1]) lstm_outputs2 = tf.squeeze(layers[2], [1]) ret = {'weighted_op': weighted_lm_layers, 'mean_op': weighted_lm_layers_mean, 'regularization_op': reg, 'word_emb': word_emb_1n, 'lstm_outputs1': lstm_outputs1, 'lstm_outputs2': lstm_outputs2, } return ret
options contains key char_cnn: {
def _build_word_char_embeddings(self): """ options contains key 'char_cnn': { 'n_characters': 262, # includes the start / end characters 'max_characters_per_token': 50, 'filters': [ [1, 32], [2, 32], [3, 64], [4, 128], [5, 256], [6, 512], [7, 512] ], 'activation': 'tanh', # for the character embedding 'embedding': {'dim': 16} # for highway layers # if omitted, then no highway layers 'n_highway': 2, } """ projection_dim = self.options['lstm']['projection_dim'] cnn_options = self.options['char_cnn'] filters = cnn_options['filters'] n_filters = sum(f[1] for f in filters) max_chars = cnn_options['max_characters_per_token'] char_embed_dim = cnn_options['embedding']['dim'] n_chars = cnn_options['n_characters'] if n_chars != 262: raise Exception("Set n_characters=262 after training see a \ https://github.com/allenai/bilm-tf/blob/master/README.md") if cnn_options['activation'] == 'tanh': activation = tf.nn.tanh elif cnn_options['activation'] == 'relu': activation = tf.nn.relu # the character embeddings with tf.device("/cpu:0"): self.embedding_weights = tf.get_variable("char_embed", [n_chars, char_embed_dim], dtype=DTYPE, initializer=tf.random_uniform_initializer(-1.0, 1.0)) # shape (batch_size, unroll_steps, max_chars, embed_dim) self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights, self.ids_placeholder) # the convolutions def make_convolutions(inp): with tf.variable_scope('CNN'): convolutions = [] for i, (width, num) in enumerate(filters): if cnn_options['activation'] == 'relu': # He initialization for ReLU activation # with char embeddings init between -1 and 1 # w_init = tf.random_normal_initializer( # mean=0.0, # stddev=np.sqrt(2.0 / (width * char_embed_dim)) # ) # Kim et al 2015, +/- 0.05 w_init = tf.random_uniform_initializer( minval=-0.05, maxval=0.05) elif cnn_options['activation'] == 'tanh': # glorot init w_init = tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / (width * char_embed_dim)) ) w = tf.get_variable( "W_cnn_%s" % i, [1, width, char_embed_dim, num], initializer=w_init, dtype=DTYPE) b = tf.get_variable( "b_cnn_%s" % i, [num], dtype=DTYPE, initializer=tf.constant_initializer(0.0)) conv = tf.nn.conv2d(inp, w, strides=[1, 1, 1, 1], padding="VALID") + b # now max pool conv = tf.nn.max_pool(conv, [1, 1, max_chars - width + 1, 1], [1, 1, 1, 1], 'VALID') # activation conv = activation(conv) conv = tf.squeeze(conv, squeeze_dims=[2]) convolutions.append(conv) return tf.concat(convolutions, 2) embedding = make_convolutions(self.char_embedding) # for highway and projection layers n_highway = cnn_options.get('n_highway') use_highway = n_highway is not None and n_highway > 0 use_proj = n_filters != projection_dim if use_highway or use_proj: # reshape from (batch_size, n_tokens, dim) to (-1, dim) batch_size_n_tokens = tf.shape(embedding)[0:2] embedding = tf.reshape(embedding, [-1, n_filters]) # set up weights for projection if use_proj: assert n_filters > projection_dim with tf.variable_scope('CNN_proj'): W_proj_cnn = tf.get_variable( "W_proj", [n_filters, projection_dim], initializer=tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / n_filters)), dtype=DTYPE) b_proj_cnn = tf.get_variable( "b_proj", [projection_dim], initializer=tf.constant_initializer(0.0), dtype=DTYPE) # apply highways layers def high(x, ww_carry, bb_carry, ww_tr, bb_tr): carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry) transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr) return carry_gate * transform_gate + (1.0 - carry_gate) * x if use_highway: highway_dim = n_filters for i in range(n_highway): with tf.variable_scope('CNN_high_%s' % i): W_carry = tf.get_variable( 'W_carry', [highway_dim, highway_dim], # glorit init initializer=tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / highway_dim)), dtype=DTYPE) b_carry = tf.get_variable( 'b_carry', [highway_dim], initializer=tf.constant_initializer(-2.0), dtype=DTYPE) W_transform = tf.get_variable( 'W_transform', [highway_dim, highway_dim], initializer=tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / highway_dim)), dtype=DTYPE) b_transform = tf.get_variable( 'b_transform', [highway_dim], initializer=tf.constant_initializer(0.0), dtype=DTYPE) embedding = high(embedding, W_carry, b_carry, W_transform, b_transform) # finally project down if needed if use_proj: embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn # reshape back to (batch_size, tokens, dim) if use_highway or use_proj: shp = tf.concat([batch_size_n_tokens, [projection_dim]], axis=0) embedding = tf.reshape(embedding, shp) # at last assign attributes for remainder of the model self.embedding = embedding
Reads a file from a path and returns data as a list of tuples of inputs and correct outputs for every data type in train valid and test.
def read(self, data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[Any, Any]]]: """Reads a file from a path and returns data as a list of tuples of inputs and correct outputs for every data type in ``train``, ``valid`` and ``test``. """ raise NotImplementedError
Builds agent based on PatternMatchingSkill and HighestConfidenceSelector.
def make_hello_bot_agent() -> DefaultAgent: """Builds agent based on PatternMatchingSkill and HighestConfidenceSelector. This is agent building tutorial. You can use this .py file to check how hello-bot agent works. Returns: agent: Agent capable of handling several simple greetings. """ skill_hello = PatternMatchingSkill(['Hello world'], patterns=['hi', 'hello', 'good day']) skill_bye = PatternMatchingSkill(['Goodbye world', 'See you around'], patterns=['bye', 'chao', 'see you']) skill_fallback = PatternMatchingSkill(['I don\'t understand, sorry', 'I can say "Hello world"']) agent = DefaultAgent([skill_hello, skill_bye, skill_fallback], skills_processor=HighestConfidenceSelector()) return agent
Takes an array of integers and transforms it to an array of one - hot encoded vectors
def to_one_hot(x, k): """ Takes an array of integers and transforms it to an array of one-hot encoded vectors """ unit = np.eye(k, dtype=int) return unit[x]
Prettifies the dictionary of metrics.
def prettify_metrics(metrics: List[Tuple[str, float]], precision: int = 4) -> OrderedDict: """Prettifies the dictionary of metrics.""" prettified_metrics = OrderedDict() for key, value in metrics: value = round(value, precision) prettified_metrics[key] = value return prettified_metrics
Populate settings directory with default settings files
def populate_settings_dir(force: bool = False) -> bool: """ Populate settings directory with default settings files Args: force: if ``True``, replace existing settings files with default ones Returns: ``True`` if any files were copied and ``False`` otherwise """ res = False if _default_settings_path == _settings_path: return res for src in list(_default_settings_path.glob('**/*.json')): dest = _settings_path / src.relative_to(_default_settings_path) if not force and dest.exists(): continue res = True dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy(src, dest) return res
Updates dialogue state with new slots calculates features. Returns: Tracker:.
def update_state(self, slots: Union[List[Tuple[str, Any]], Dict[str, Any]]) -> 'Tracker': """ Updates dialogue state with new ``slots``, calculates features. Returns: Tracker: .""" pass
Returns predictions of morphotagging model given in config: config_path:.
def predict_with_model(config_path: [Path, str]) -> List[Optional[List[str]]]: """Returns predictions of morphotagging model given in config :config_path:. Args: config_path: a path to config Returns: a list of morphological analyses for each sentence. Each analysis is either a list of tags or a list of full CONLL-U descriptions. """ config = parse_config(config_path) reader_config = config['dataset_reader'] reader = get_model(reader_config['class_name'])() data_path = expand_path(reader_config.get('data_path', '')) read_params = {k: v for k, v in reader_config.items() if k not in ['class_name', 'data_path']} data: Dict = reader.read(data_path, **read_params) iterator_config = config['dataset_iterator'] iterator: MorphoTaggerDatasetIterator = from_params(iterator_config, data=data) model = build_model(config, load_trained=True) answers = [None] * len(iterator.test) batch_size = config['predict'].get("batch_size", -1) for indexes, (x, _) in iterator.gen_batches( batch_size=batch_size, data_type="test", shuffle=False, return_indexes=True): y = model(x) for i, elem in zip(indexes, y): answers[i] = elem outfile = config['predict'].get("outfile") if outfile is not None: outfile = Path(outfile) if not outfile.exists(): outfile.parent.mkdir(parents=True, exist_ok=True) with open(outfile, "w", encoding="utf8") as fout: for elem in answers: fout.write(elem + "\n") return answers
Initiates Flask web service with Alexa skill.
def run_alexa_server(agent_generator: callable, multi_instance: bool = False, stateful: bool = False, port: Optional[int] = None, https: bool = False, ssl_key: str = None, ssl_cert: str = None) -> None: """Initiates Flask web service with Alexa skill. Args: agent_generator: Callback Alexa agents factory. multi_instance: Multi instance mode flag. stateful: Stateful mode flag. port: Flask web service port. https: Flag for running Alexa skill service in https mode. ssl_key: SSL key file path. ssl_cert: SSL certificate file path. """ server_config_path = Path(get_settings_path(), SERVER_CONFIG_FILENAME).resolve() server_params = read_json(server_config_path) host = server_params['common_defaults']['host'] port = port or server_params['common_defaults']['port'] alexa_server_params = server_params['alexa_defaults'] alexa_server_params['multi_instance'] = multi_instance or server_params['common_defaults']['multi_instance'] alexa_server_params['stateful'] = stateful or server_params['common_defaults']['stateful'] alexa_server_params['amazon_cert_lifetime'] = AMAZON_CERTIFICATE_LIFETIME if https: ssh_key_path = Path(ssl_key or server_params['https_key_path']).resolve() if not ssh_key_path.is_file(): e = FileNotFoundError('Ssh key file not found: please provide correct path in --key param or ' 'https_key_path param in server configuration file') log.error(e) raise e ssh_cert_path = Path(ssl_cert or server_params['https_cert_path']).resolve() if not ssh_cert_path.is_file(): e = FileNotFoundError('Ssh certificate file not found: please provide correct path in --cert param or ' 'https_cert_path param in server configuration file') log.error(e) raise e ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) ssl_context.load_cert_chain(ssh_cert_path, ssh_key_path) else: ssl_context = None input_q = Queue() output_q = Queue() bot = Bot(agent_generator, alexa_server_params, input_q, output_q) bot.start() endpoint_description = { 'description': 'Amazon Alexa custom service endpoint', 'parameters': [ { 'name': 'Signature', 'in': 'header', 'required': 'true', 'type': 'string', 'example': 'Z5H5wqd06ExFVPNfJiqhKvAFjkf+cTVodOUirucHGcEVAMO1LfvgqWUkZ/X1ITDZbI0w+SMwVkEQZlkeThbVS/54M22StNDUtfz4Ua20xNDpIPwcWIACAmZ38XxbbTEFJI5WwqrbilNcfzqiGrIPfdO5rl+/xUjHFUdcJdUY/QzBxXsceytVYfEiR9MzOCN2m4C0XnpThUavAu159KrLj8AkuzN0JF87iXv+zOEeZRgEuwmsAnJrRUwkJ4yWokEPnSVdjF0D6f6CscfyvRe9nsWShq7/zRTa41meweh+n006zvf58MbzRdXPB22RI4AN0ksWW7hSC8/QLAKQE+lvaw==', }, { 'name': 'Signaturecertchainurl', 'in': 'header', 'required': 'true', 'type': 'string', 'example': 'https://s3.amazonaws.com/echo.api/echo-api-cert-6-ats.pem', }, { 'name': 'data', 'in': 'body', 'required': 'true', 'example': { 'version': '1.0', 'session': { 'new': False, 'sessionId': 'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63', 'application': { 'applicationId': 'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6' }, 'attributes': { 'sessionId': 'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63' }, 'user': { 'userId': 'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ' } }, 'context': { 'System': { 'application': { 'applicationId': 'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6' }, 'user': { 'userId': 'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ' }, 'device': { 'deviceId': 'amzn1.ask.device.AFQAMLYOYQUUACSE7HFVYS4ZI2KUB35JPHQRUPKTDCAU3A47WESP5L57KSWT5L6RT3FVXWH4OA2DNPJRMZ2VGEIACF3PJEIDCOUWUBC4W5RPJNUB3ZVT22J4UJN5UL3T2UBP36RVHFJ5P4IPT2HUY3P2YOY33IOU4O33HUAG7R2BUNROEH4T2', 'supportedInterfaces': {} }, 'apiEndpoint': 'https://api.amazonalexa.com', 'apiAccessToken': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjhiMTdhNWRlLTM3NDktNDkxOS1hYTFmLWUwYmJhZjhhNDZhNiIsImV4cCI6MTU0NTIyMzY1OCwiaWF0IjoxNTQ1MjIwMDU4LCJuYmYiOjE1NDUyMjAwNTgsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZRQU1MWU9ZUVVVQUNTRTdIRlZZUzRaSTJLVUIzNUpQSFFSVVBLVERDQVUzQTQ3V0VTUDVMNTdLU1dUNUw2UlQzRlZYV0g0T0EyRE5QSlJNWjJWR0VJQUNGM1BKRUlEQ09VV1VCQzRXNVJQSk5VQjNaVlQyMko0VUpONVVMM1QyVUJQMzZSVkhGSjVQNElQVDJIVVkzUDJZT1kzM0lPVTRPMzNIVUFHN1IyQlVOUk9FSDRUMiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFHUjRSMkxPVkhNTk1OT0dST0JWTkxVN0NMNEM1N1g0NjVYSkYyVDJGNTVPVVhOVExDWERRUDNJNTVVWFpJQUxFS0taSjZRMk1BNU1FRlNNWlZQRUw1TlZaUzZGWkxFVTQ0NEJWT0xQQjVXVkg1Q0hZVFFBS0dEN1ZGTEdQUkZaVkhISDJOSUI0SEtOSEhHWDZITTZTNlFEV0NLWFdPSVpMN09OTlFTQlVDVlBNWlFLTUNZWFJHNUJBMlBPWUVYRkRYUlhDR0VWRFdWU01QUSJ9fQ.jcomYhBhU485T4uoe2NyhWnL-kZHoPQKpcycFqa-1sy_lSIitfFGup9DKrf2NkN-I9lZ3xwq9llqx9WRN78fVJjN6GLcDhBDH0irPwt3n9_V7_5bfB6KARv5ZG-JKOmZlLBqQbnln0DAJ10D8HNiytMARNEwduMBVDNK0A5z6YxtRcLYYFD2-Ieg_V8Qx90eE2pd2U5xOuIEL0pXfSoiJ8vpxb8BKwaMO47tdE4qhg_k7v8ClwyXg3EMEhZFjixYNqdW1tCrwDGj58IWMXDyzZhIlRMh6uudMOT6scSzcNVD0v42IOTZ3S_X6rG01B7xhUDlZXMqkrCuzOyqctGaPw' }, 'Viewport': { 'experiences': [ { 'arcMinuteWidth': 246, 'arcMinuteHeight': 144, 'canRotate': False, 'canResize': False } ], 'shape': 'RECTANGLE', 'pixelWidth': 1024, 'pixelHeight': 600, 'dpi': 160, 'currentPixelWidth': 1024, 'currentPixelHeight': 600, 'touch': [ 'SINGLE' ] } }, 'request': { 'type': 'IntentRequest', 'requestId': 'amzn1.echo-api.request.388d0f6e-04b9-4450-a687-b9abaa73ac6a', 'timestamp': '2018-12-19T11:47:38Z', 'locale': 'en-US', 'intent': { 'name': 'AskDeepPavlov', 'confirmationStatus': 'NONE', 'slots': { 'raw_input': { 'name': 'raw_input', 'value': 'my beautiful sandbox skill', 'resolutions': { 'resolutionsPerAuthority': [ { 'authority': 'amzn1.er-authority.echo-sdk.amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6.GetInput', 'status': { 'code': 'ER_SUCCESS_NO_MATCH' } } ] }, 'confirmationStatus': 'NONE', 'source': 'USER' } } } } } } ], 'responses': { "200": { "description": "A model response" } } } @app.route('/') def index(): return redirect('/apidocs/') @app.route('/interact', methods=['POST']) @swag_from(endpoint_description) def handle_request(): request_body: bytes = request.get_data() signature_chain_url: str = request.headers.get('Signaturecertchainurl') signature: str = request.headers.get('Signature') alexa_request: dict = request.get_json() request_dict = { 'request_body': request_body, 'signature_chain_url': signature_chain_url, 'signature': signature, 'alexa_request': alexa_request } bot.input_queue.put(request_dict) response: dict = bot.output_queue.get() response_code = 400 if 'error' in response.keys() else 200 return jsonify(response), response_code app.run(host=host, port=port, threaded=True, ssl_context=ssl_context)
Load model parameters from self. load_path
def load(self, exclude_scopes: tuple = ('Optimizer',)) -> None: """Load model parameters from self.load_path""" if not hasattr(self, 'sess'): raise RuntimeError('Your TensorFlow model {} must' ' have sess attribute!'.format(self.__class__.__name__)) path = str(self.load_path.resolve()) # Check presence of the model files if tf.train.checkpoint_exists(path): log.info('[loading model from {}]'.format(path)) # Exclude optimizer variables from saved variables var_list = self._get_saveable_variables(exclude_scopes) saver = tf.train.Saver(var_list) saver.restore(self.sess, path)
Save model parameters to self. save_path
def save(self, exclude_scopes: tuple = ('Optimizer',)) -> None: """Save model parameters to self.save_path""" if not hasattr(self, 'sess'): raise RuntimeError('Your TensorFlow model {} must' ' have sess attribute!'.format(self.__class__.__name__)) path = str(self.save_path.resolve()) log.info('[saving model to {}]'.format(path)) var_list = self._get_saveable_variables(exclude_scopes) saver = tf.train.Saver(var_list) saver.save(self.sess, path)
Get train operation for given loss
def get_train_op(self, loss, learning_rate, optimizer=None, clip_norm=None, learnable_scopes=None, optimizer_scope_name=None, **kwargs): """ Get train operation for given loss Args: loss: loss, tf tensor or scalar learning_rate: scalar or placeholder. clip_norm: clip gradients norm by clip_norm. learnable_scopes: which scopes are trainable (None for all). optimizer: instance of tf.train.Optimizer, default Adam. **kwargs: parameters passed to tf.train.Optimizer object (scalars or placeholders). Returns: train_op """ if optimizer_scope_name is None: opt_scope = tf.variable_scope('Optimizer') else: opt_scope = tf.variable_scope(optimizer_scope_name) with opt_scope: if learnable_scopes is None: variables_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) else: variables_to_train = [] for scope_name in learnable_scopes: variables_to_train.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name)) if optimizer is None: optimizer = tf.train.AdamOptimizer # For batch norm it is necessary to update running averages extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(extra_update_ops): def clip_if_not_none(grad): if grad is not None: return tf.clip_by_norm(grad, clip_norm) opt = optimizer(learning_rate, **kwargs) grads_and_vars = opt.compute_gradients(loss, var_list=variables_to_train) if clip_norm is not None: grads_and_vars = [(clip_if_not_none(grad), var) for grad, var in grads_and_vars] train_op = opt.apply_gradients(grads_and_vars) return train_op
Print number of * trainable * parameters in the network
def print_number_of_parameters(): """ Print number of *trainable* parameters in the network """ log.info('Number of parameters: ') variables = tf.trainable_variables() blocks = defaultdict(int) for var in variables: # Get the top level scope name of variable block_name = var.name.split('/')[0] number_of_parameters = np.prod(var.get_shape().as_list()) blocks[block_name] += number_of_parameters for block_name, cnt in blocks.items(): log.info("{} - {}.".format(block_name, cnt)) total_num_parameters = np.sum(list(blocks.values())) log.info('Total number of parameters equal {}'.format(total_num_parameters))
Вычисляет минимальную стоимость появления нового символа в узлах словаря в соответствии со штрафами из costs
def _precompute_absense_costs(dictionary, removal_costs, insertion_costs, n, allow_spaces=False): """ Вычисляет минимальную стоимость появления нового символа в узлах словаря в соответствии со штрафами из costs Аргументы: --------------- dictionary : Trie словарь, хранящийся в виде ациклического автомата removal_costs : dict штрафы за удаление символов insertion_costs : dict штрафы за вставку символов n : int глубина ``заглядывания вперёд'' в словаре Возвращает --------------- answer : list of dicts, len(answer)=len(dictionary) answer[i][a][j] равно минимальному штрафу за появление символа a в j-ой позиции в вершине с номером i """ answer = [dict() for node in dictionary.data] if n == 0: return answer curr_alphabet = copy.copy(dictionary.alphabet) if allow_spaces: curr_alphabet += [' '] for l, (costs_in_node, node) in enumerate(zip(answer, dictionary.data)): # определение минимальной стоимости удаления символов curr_node_removal_costs = np.empty(dtype=np.float64, shape=(n,)) if len(node[0]) > 0: curr_node_removal_costs[0] = min(removal_costs[symbol] for symbol in node[0]) for j, symbols in enumerate(node[1:], 1): if len(symbols) == 0: curr_node_removal_costs[j:] = curr_node_removal_costs[j-1] break curr_cost = min(removal_costs[symbol] for symbol in symbols) curr_node_removal_costs[j] = min(curr_node_removal_costs[j-1], curr_cost) else: curr_node_removal_costs[:] = np.inf # определение минимальной стоимости вставки for a in curr_alphabet: curr_symbol_costs = np.empty(dtype=np.float64, shape=(n,)) curr_symbol_costs.fill(insertion_costs[a]) for j, symbols in enumerate(node): if a in symbols: curr_symbol_costs[j:] = 0.0 break curr_symbol_costs[j] = min(curr_symbol_costs[j], curr_node_removal_costs[j]) costs_in_node[a] = curr_symbol_costs return answer
Finds all dictionary words in d - window from word
def search(self, word, d, allow_spaces=True, return_cost=True): """ Finds all dictionary words in d-window from word """ if not all((c in self.alphabet or (c == " " and self.allow_spaces)) for c in word): return [] # raise ValueError("{0} contains an incorrect symbol".format(word)) return self._trie_search( word, d, allow_spaces=allow_spaces, return_cost=return_cost)
Находит все слова в префиксном боре расстояние до которых в соответствии с заданным преобразователем не превышает d
def _trie_search(self, word, d, transducer=None, allow_spaces=True, return_cost=True): """ Находит все слова в префиксном боре, расстояние до которых в соответствии с заданным преобразователем не превышает d """ if transducer is None: # разобраться с пробелами transducer = self.transducer.inverse() allow_spaces &= self.allow_spaces trie = self.dictionary # инициализация переменных used_agenda_keys = set() agenda = SortedListWithKey(key=(lambda x:x[1])) h = self.h_func(word, trie.root) # agenda[self.agenda_key("", 0, trie.root)] = (0.0, 0.0, h) key, value = ("", 0, trie.root), (0.0, 0.0, h) agenda.add((key, value)) answer = dict() k = 0 # очередь с приоритетом с промежуточными результатами while len(agenda) > 0: key, value = agenda.pop(0) if key in used_agenda_keys: continue used_agenda_keys.add(key) low, pos, index = key cost, g, h = value # g --- текущая стоимость, h --- нижняя оценка будущей стоимости # cost = g + h --- нижняя оценка суммарной стоимости k += 1 max_upperside_length = min(len(word) - pos, transducer.max_up_length) for upperside_length in range(max_upperside_length + 1): new_pos = pos + upperside_length curr_up = word[pos: new_pos] if curr_up not in transducer.operation_costs: continue for curr_low, curr_cost in transducer.operation_costs[curr_up].items(): new_g = g + curr_cost if new_g > d: #если g > d, то h можно не вычислять continue if curr_low == " ": if allow_spaces and trie.is_final(index): new_index = trie.root else: new_index = Trie.NO_NODE else: new_index = trie.descend(index, curr_low) if new_index is Trie.NO_NODE: continue new_low = low + curr_low new_h = self.h_func(word[new_pos: ], new_index) new_cost = new_g + new_h if new_cost > d: continue new_key = (new_low, new_pos, new_index) new_value = (new_cost, new_g, new_h) if new_pos == len(word) and trie.is_final(new_index): old_g = answer.get(new_low, None) if old_g is None or new_g < old_g: answer[new_low] = new_g agenda.add((new_key, new_value)) answer = sorted(answer.items(), key=(lambda x: x[1])) if return_cost: return answer else: return [elem[0] for elem in answer]
Предвычисляет будущие символы и стоимости операций с ними для h - эвристики
def _precompute_euristics(self): """ Предвычисляет будущие символы и стоимости операций с ними для h-эвристики """ if self.euristics is None: return # вычисление минимальной стоимости операции, # приводящей к появлению ('+') или исчезновению ('-') данного символа removal_costs = {a : np.inf for a in self.alphabet} insertion_costs = {a : np.inf for a in self.alphabet} if self.allow_spaces: removal_costs[' '] = np.inf insertion_costs[' '] = np.inf for up, costs in self.transducer.operation_costs.items(): for low, cost in costs.items(): if up == low: continue if up != '': removal_cost = cost / len(up) for a in up: removal_costs[a] = min(removal_costs[a], removal_cost) if low != '': insertion_cost = cost / len(low) for a in low: insertion_costs[a] = min(insertion_costs[a], insertion_cost) # предвычисление возможных будущих символов в узлах дерева # precompute_future_symbols(self.dictionary, self.euristics, self.allow_spaces) # предвычисление стоимостей потери символа в узлах дерева self._absense_costs_by_node = _precompute_absense_costs( self.dictionary, removal_costs, insertion_costs, self.euristics, self.allow_spaces) # массив для сохранения эвристик self._temporary_euristics = [dict() for i in range(len(self.dictionary))]
Вычисление h - эвристики из работы Hulden 2009 для текущей вершины словаря
def _euristic_h_function(self, suffix, index): """ Вычисление h-эвристики из работы Hulden,2009 для текущей вершины словаря Аргументы: ---------- suffix : string непрочитанный суффикс входного слова index : int индекс текущего узла в словаре Возвращает: ----------- cost : float оценка снизу для стоимости замены, приводящей к входному слову с суффиксом suffix, если прочитанный префикс слова без опечатки привёл в вершину с номером index """ if self.euristics > 0: suffix = suffix[:self.euristics] # кэширование результатов index_temporary_euristics = self._temporary_euristics[index] cost = index_temporary_euristics.get(suffix, None) if cost is not None: return cost # извлечение нужных данных из массивов absense_costs = self._absense_costs_by_node[index] data = self.dictionary.data[index] costs = np.zeros(dtype=np.float64, shape=(self.euristics,)) # costs[j] --- оценка штрафа при предпросмотре вперёд на j символов for i, a in enumerate(suffix): costs[i:] += absense_costs[a][i:] cost = max(costs) index_temporary_euristics[suffix] = cost return cost
Возвращает стоимость элементарной трансдукции up - > low или np. inf если такой элементарной трансдукции нет
def get_operation_cost(self, up, low): """ Возвращает стоимость элементарной трансдукции up->low или np.inf, если такой элементарной трансдукции нет Аргументы: ---------- up, low : string элементы элементарной трансдукции Возвращает: ----------- cost : float стоимость элементарной трансдукции up->low (np.inf, если такая трансдукция отсутствует) """ up_costs = self.operation_costs.get(up, None) if up_costs is None: return np.inf cost = up_costs.get(low, np.inf) return cost
Строит пробразователь задающий обратное конечное преобразование
def inverse(self): """ Строит пробразователь, задающий обратное конечное преобразование """ # УПРОСТИТЬ ОБРАЩЕНИЕ!!! inversed_transducer = SegmentTransducer(self.alphabet, operation_costs=dict()) inversed_transducer.operation_costs = self._reversed_operation_costs inversed_transducer._reversed_operation_costs = self.operation_costs inversed_transducer.max_low_length = self.max_up_length inversed_transducer.max_up_length = self.max_low_length inversed_transducer.max_low_lengths_by_up = self.max_up_lengths_by_low inversed_transducer.max_up_lengths_by_low = self.max_low_lengths_by_up return inversed_transducer
Вычисляет трансдукцию минимальной стоимости отображающую first в second
def distance(self, first, second, return_transduction = False): """ Вычисляет трансдукцию минимальной стоимости, отображающую first в second Аргументы: ----------- first : string second : string Верхний и нижний элементы трансдукции return_transduction : bool (optional, default=False) следует ли возвращать трансдукцию минимального веса (см. возвращаемое значение) Возвращает: ----------- (final_cost, transductions) : tuple(float, list) если return_transduction=True, то возвращает минимальную стоимость трансдукции, переводящей first в second и список трансдукций с данной стоимостью final_cost : float если return_transduction=False, то возвращает минимальную стоимость трансдукции, переводящей first в second """ if return_transduction: add_pred = (lambda x, y: (y == np.inf or x < y)) else: add_pred = (lambda x, y: (y == np.inf or x <= y)) clear_pred = (lambda x, y: (y < np.inf and x < y)) update_func = lambda x, y: min(x, y) costs, backtraces = self._fill_levenshtein_table(first, second, update_func, add_pred, clear_pred) final_cost = costs[-1][-1] if final_cost == np.inf: transductions = [None] elif return_transduction: transductions = self._backtraces_to_transductions(first, second, backtraces, final_cost, return_cost=False) if return_transduction: return final_cost, transductions else: return final_cost
Возвращает все трансдукции переводящие first в second чья стоимость не превышает threshold
def transduce(self, first, second, threshold): """ Возвращает все трансдукции, переводящие first в second, чья стоимость не превышает threshold Возвращает: ---------- result : list список вида [(трансдукция, стоимость)] """ add_pred = (lambda x, y: x <= threshold) clear_pred =(lambda x, y: False) update_func = (lambda x, y: min(x, y)) costs, backtraces = self._fill_levenshtein_table(first, second, update_func, add_pred, clear_pred, threshold=threshold) result = self._backtraces_to_transductions(first, second, backtraces, threshold, return_cost=True) return result
Возвращает все трансдукции с верхним элементом word чья стоимость не превышает max_cost
def lower_transductions(self, word, max_cost, return_cost=True): """ Возвращает все трансдукции с верхним элементом word, чья стоимость не превышает max_cost ` Возвращает: ---------- result : list список вида [(трансдукция, стоимость)], если return_cost=True список трансдукций, если return_cost=False список отсортирован в порядке возрастания стоимости трансдукции """ prefixes = [[] for i in range(len(word) + 1)] prefixes[0].append(((), 0.0)) for pos in range(len(prefixes)): # вставки prefixes[pos] = self._perform_insertions(prefixes[pos], max_cost) max_upperside_length = min(len(word) - pos, self.max_up_length) for upperside_length in range(1, max_upperside_length + 1): up = word[pos: pos + upperside_length] for low, low_cost in self.operation_costs.get(up, dict()).items(): for transduction, cost in prefixes[pos]: new_cost = cost + low_cost if new_cost <= max_cost: new_transduction = transduction +(up, low) prefixes[pos + upperside_length].append((new_transduction, new_cost)) answer = sorted(prefixes[-1], key=(lambda x: x[0])) if return_cost: return answer else: return [elem[0] for elem in answer]
Функция динамически заполняющая таблицу costs стоимости трансдукций costs [ i ] [ j ] --- минимальная стоимость трансдукции переводящей first [: i ] в second [: j ]
def _fill_levenshtein_table(self, first, second, update_func, add_pred, clear_pred, threshold=None): """ Функция, динамически заполняющая таблицу costs стоимости трансдукций, costs[i][j] --- минимальная стоимость трансдукции, переводящей first[:i] в second[:j] Аргументы: ---------- first, second : string Верхний и нижний элементы трансдукции update_func : callable, float*float -> bool update_func(x, y) возвращает новое значение в ячейке таблицы costs, если старое значение --- y, а потенциально новое значение --- x везде update_func = min add_pred : callable : float*float -> bool add_pred(x, y) возвращает, производится ли добавление нового элемента p стоимости x в ячейку backtraces[i][j] в зависимости от значения costs[i][j]=y и текущей стоимости x clear_pred : callable : float*float -> bool clear_pred(x, y) возвращает, производится ли очистка ячейки backtraces[i][j] в зависимости от значения costs[i][j]=y и текущей стоимости x элемента p, добавляемого в эту ячейку Возвращает: ----------- costs : array, dtype=float, shape=(len(first)+1, len(second)+1) массив, в ячейке с индексами i, j которого хранится минимальная стоимость трансдукции, переводящей first[:i] в second[:j] backtraces : array, dtype=list, shape=(len(first)+1, len(second)+1) массив, в ячейке с индексами i, j которого хранятся обратные ссылки на предыдущую ячейку в оптимальной трансдукции, приводящей в ячейку backtraces[i][j] """ m, n = len(first), len(second) # если threshold=None, то в качестве порога берётся удвоенная стоимость # трансдукции, отображающей символы на одинаковых позициях друг в друга if threshold is None: threshold = 0.0 for a, b in zip(first, second): threshold += self.get_operation_cost(a, b) if m > n: for a in first[n: ]: threshold += self.get_operation_cost(a, '') elif m < n: for b in second[m: ]: threshold += self.get_operation_cost('', b) threshold *= 2 # инициализация возвращаемых массивов costs = np.zeros(shape=(m + 1, n + 1), dtype=np.float64) costs[:] = np.inf backtraces = [None] * (m + 1) for i in range(m + 1): backtraces[i] = [[] for j in range(n + 1)] costs[0][0] = 0.0 for i in range(m + 1): for i_right in range(i, min(i + self.max_up_length, m) + 1): up = first[i: i_right] max_low_length = self.max_low_lengths_by_up.get(up, -1) if max_low_length == -1: # no up key in transduction continue up_costs = self.operation_costs[up] for j in range(n + 1): if costs[i][j] > threshold: continue if len(backtraces[i][j]) == 0 and i + j > 0: continue # не нашлось обратных ссылок for j_right in range((j if i_right > i else j + 1), min(j + max_low_length, n) + 1): low = second[j: j_right] curr_cost = up_costs.get(low, np.inf) old_cost = costs[i_right][j_right] new_cost = costs[i][j] + curr_cost if new_cost > threshold: continue if add_pred(new_cost, old_cost): if clear_pred(new_cost, old_cost): backtraces[i_right][j_right] = [] costs[i_right][j_right] = update_func(new_cost, old_cost) backtraces[i_right][j_right].append((i, j)) return costs, backtraces
Заполняет массив _reversed_operation_costs на основе имеющегося массива operation_costs
def _make_reversed_operation_costs(self): """ Заполняет массив _reversed_operation_costs на основе имеющегося массива operation_costs """ _reversed_operation_costs = dict() for up, costs in self.operation_costs.items(): for low, cost in costs.items(): if low not in _reversed_operation_costs: _reversed_operation_costs[low] = dict() _reversed_operation_costs[low][up] = cost self._reversed_operation_costs = _reversed_operation_costs
Вычисляет максимальную длину элемента low в элементарной трансдукции ( up low ) для каждого up и максимальную длину элемента up в элементарной трансдукции ( up low ) для каждого low
def _make_maximal_key_lengths(self): """ Вычисляет максимальную длину элемента low в элементарной трансдукции (up, low) для каждого up и максимальную длину элемента up в элементарной трансдукции (up, low) для каждого low """ self.max_up_length =\ (max(len(up) for up in self.operation_costs) if len(self.operation_costs) > 0 else -1) self.max_low_length =\ (max(len(low) for low in self._reversed_operation_costs) if len(self._reversed_operation_costs) > 0 else -1) self.max_low_lengths_by_up, self.max_up_lengths_by_low = dict(), dict() for up, costs in self.operation_costs.items(): self.max_low_lengths_by_up[up] =\ max(len(low) for low in costs) if len(costs) > 0 else -1 for low, costs in self._reversed_operation_costs.items(): self.max_up_lengths_by_low[low] =\ max(len(up) for up in costs) if len(costs) > 0 else -1
Восстанавливает трансдукции по таблице обратных ссылок
def _backtraces_to_transductions(self, first, second, backtraces, threshold, return_cost=False): """ Восстанавливает трансдукции по таблице обратных ссылок Аргументы: ---------- first, second : string верхние и нижние элементы трансдукции backtraces : array-like, dtype=list, shape=(len(first)+1, len(second)+1) таблица обратных ссылок threshold : float порог для отсева трансдукций, возвращаются только трансдукции стоимостью <= threshold return_cost : bool (optional, default=False) если True, то вместе с трансдукциями возвращается их стоимость Возвращает: ----------- result : list список вида [(трансдукция, стоимость)], если return_cost=True и вида [трансдукция], если return_cost=False, содержащий все трансдукции, переводящие first в second, чья стоимость не превышает threshold """ m, n = len(first), len(second) agenda = [None] * (m + 1) for i in range(m + 1): agenda[i] = [[] for j in range(n+1)] agenda[m][n] = [((), 0.0)] for i_right in range(m, -1, -1): for j_right in range(n, -1, -1): current_agenda = agenda[i_right][j_right] if len(current_agenda) == 0: continue for (i, j) in backtraces[i_right][j_right]: up, low = first[i:i_right], second[j:j_right] add_cost = self.operation_costs[up][low] for elem, cost in current_agenda: new_cost = cost + add_cost if new_cost <= threshold: # удаление трансдукций большой стоимости agenda[i][j].append((((up, low),) + elem, new_cost)) if return_cost: return agenda[0][0] else: return [elem[0] for elem in agenda[0][0]]
возвращает все трансдукции стоимости < = max_cost которые можно получить из элементов initial
def _perform_insertions(self, initial, max_cost): """ возвращает все трансдукции стоимости <= max_cost, которые можно получить из элементов initial Аргументы: ---------- initial : list of tuples список исходных трансдукций вида [(трансдукция, стоимость)] max_cost : float максимальная стоимость трансдукции Возвращает: ----------- final : list of tuples финальный список трансдукций вида [(трансдукция, стоимость)] """ queue = list(initial) final = initial while len(queue) > 0: transduction, cost = queue[0] queue = queue[1:] for string, string_cost in self.operation_costs[""].items(): new_cost = cost + string_cost if new_cost <= max_cost: new_transduction = transduction + ("", string) final.append((new_transduction, new_cost)) queue.append((new_transduction, new_cost)) return final
sets 1. 0 cost for every replacement insertion deletion and transposition
def _make_default_operation_costs(self, allow_spaces=False): """ sets 1.0 cost for every replacement, insertion, deletion and transposition """ self.operation_costs = dict() self.operation_costs[""] = {c: 1.0 for c in list(self.alphabet) + [' ']} for a in self.alphabet: current_costs = {c: 1.0 for c in self.alphabet} current_costs[a] = 0.0 current_costs[""] = 1.0 if allow_spaces: current_costs[" "] = 1.0 self.operation_costs[a] = current_costs # транспозиции for a, b in itertools.permutations(self.alphabet, 2): self.operation_costs[a + b] = {b + a: 1.0} # пробелы if allow_spaces: self.operation_costs[" "] = {c: 1.0 for c in self.alphabet} self.operation_costs[" "][""] = 1.0
Initiates self - destruct timer.
def _start_timer(self) -> None: """Initiates self-destruct timer.""" self.timer = Timer(self.config['conversation_lifetime'], self.self_destruct_callback) self.timer.start()
Routes Alexa requests to appropriate handlers.
def handle_request(self, request: dict) -> dict: """Routes Alexa requests to appropriate handlers. Args: request: Alexa request. Returns: response: Response conforming Alexa response specification. """ request_type = request['request']['type'] request_id = request['request']['requestId'] log.debug(f'Received request. Type: {request_type}, id: {request_id}') if request_type in self.handled_requests.keys(): response: dict = self.handled_requests[request_type](request) else: response: dict = self.handled_requests['_unsupported'](request) log.warning(f'Unsupported request type: {request_type}, request id: {request_id}') self._rearm_self_destruct() return response
Infers DeepPavlov agent with raw user input extracted from Alexa request.
def _act(self, utterance: str) -> list: """Infers DeepPavlov agent with raw user input extracted from Alexa request. Args: utterance: Raw user input extracted from Alexa request. Returns: response: DeepPavlov agent response. """ if self.stateful: utterance = [[utterance], [self.key]] else: utterance = [[utterance]] agent_response: list = self.agent(*utterance) return agent_response
Populates generated response with additional data conforming Alexa response specification.
def _generate_response(self, response: dict, request: dict) -> dict: """Populates generated response with additional data conforming Alexa response specification. Args: response: Raw user input extracted from Alexa request. request: Alexa request. Returns: response: Response conforming Alexa response specification. """ response_template = deepcopy(self.response_template) response_template['sessionAttributes']['sessionId'] = request['session']['sessionId'] for key, value in response_template.items(): if key not in response.keys(): response[key] = value return response
Handles IntentRequest Alexa request.
def _handle_intent(self, request: dict) -> dict: """Handles IntentRequest Alexa request. Args: request: Alexa request. Returns: response: "response" part of response dict conforming Alexa specification. """ intent_name = self.config['intent_name'] slot_name = self.config['slot_name'] request_id = request['request']['requestId'] request_intent: dict = request['request']['intent'] if intent_name != request_intent['name']: log.error(f"Wrong intent name received: {request_intent['name']} in request {request_id}") return {'error': 'wrong intent name'} if slot_name not in request_intent['slots'].keys(): log.error(f'No slot named {slot_name} found in request {request_id}') return {'error': 'no slot found'} utterance = request_intent['slots'][slot_name]['value'] agent_response = self._act(utterance) if not agent_response: log.error(f'Some error during response generation for request {request_id}') return {'error': 'error during response generation'} prediction: RichMessage = agent_response[0] prediction: list = prediction.alexa() if not prediction: log.error(f'Some error during response generation for request {request_id}') return {'error': 'error during response generation'} response = self._generate_response(prediction[0], request) return response
Handles LaunchRequest Alexa request.
def _handle_launch(self, request: dict) -> dict: """Handles LaunchRequest Alexa request. Args: request: Alexa request. Returns: response: "response" part of response dict conforming Alexa specification. """ response = { 'response': { 'shouldEndSession': False, 'outputSpeech': { 'type': 'PlainText', 'text': self.config['start_message'] }, 'card': { 'type': 'Simple', 'content': self.config['start_message'] } } } response = self._generate_response(response, request) return response
Handles all unsupported types of Alexa requests. Returns standard message.
def _handle_unsupported(self, request: dict) -> dict: """Handles all unsupported types of Alexa requests. Returns standard message. Args: request: Alexa request. Returns: response: "response" part of response dict conforming Alexa specification. """ response = { 'response': { 'shouldEndSession': False, 'outputSpeech': { 'type': 'PlainText', 'text': self.config['unsupported_message'] }, 'card': { 'type': 'Simple', 'content': self.config['unsupported_message'] } } } response = self._generate_response(response, request) return response
method that defines Struct s pretty printing rules for iPython
def _repr_pretty_(self, p, cycle): """method that defines ``Struct``'s pretty printing rules for iPython Args: p (IPython.lib.pretty.RepresentationPrinter): pretty printer object cycle (bool): is ``True`` if pretty detected a cycle """ if cycle: p.text('Struct(...)') else: with p.group(7, 'Struct(', ')'): p.pretty(self._asdict())
Calculates perplexity by loss
def elmo_loss2ppl(losses: List[np.ndarray]) -> float: """ Calculates perplexity by loss Args: losses: list of numpy arrays of model losses Returns: perplexity : float """ avg_loss = np.mean(losses) return float(np.exp(avg_loss))
Create: self. total_loss: total loss op for training self. softmax_W softmax_b: the softmax variables self. next_token_id/ _reverse: placeholders for gold input
def _build_loss(self, lstm_outputs): """ Create: self.total_loss: total loss op for training self.softmax_W, softmax_b: the softmax variables self.next_token_id / _reverse: placeholders for gold input """ batch_size = self.options['batch_size'] unroll_steps = self.options['unroll_steps'] n_tokens_vocab = self.options['n_tokens_vocab'] # DEFINE next_token_id and *_reverse placeholders for the gold input def _get_next_token_placeholders(suffix): name = 'next_token_id' + suffix id_placeholder = tf.placeholder(DTYPE_INT, shape=(batch_size, unroll_steps), name=name) return id_placeholder # get the window and weight placeholders self.next_token_id = _get_next_token_placeholders('') if self.bidirectional: self.next_token_id_reverse = _get_next_token_placeholders( '_reverse') # DEFINE THE SOFTMAX VARIABLES # get the dimension of the softmax weights # softmax dimension is the size of the output projection_dim softmax_dim = self.options['lstm']['projection_dim'] # the output softmax variables -- they are shared if bidirectional if self.share_embedding_softmax: # softmax_W is just the embedding layer self.softmax_W = self.embedding_weights with tf.variable_scope('softmax'), tf.device('/cpu:0'): # Glorit init (std=(1.0 / sqrt(fan_in)) softmax_init = tf.random_normal_initializer(0.0, 1.0 / np.sqrt(softmax_dim)) if not self.share_embedding_softmax: self.softmax_W = tf.get_variable( 'W', [n_tokens_vocab, softmax_dim], dtype=DTYPE, initializer=softmax_init ) self.softmax_b = tf.get_variable( 'b', [n_tokens_vocab], dtype=DTYPE, initializer=tf.constant_initializer(0.0)) # now calculate losses # loss for each direction of the LSTM self.individual_train_losses = [] self.individual_eval_losses = [] if self.bidirectional: next_ids = [self.next_token_id, self.next_token_id_reverse] else: next_ids = [self.next_token_id] for id_placeholder, lstm_output_flat in zip(next_ids, lstm_outputs): # flatten the LSTM output and next token id gold to shape: # (batch_size * unroll_steps, softmax_dim) # Flatten and reshape the token_id placeholders next_token_id_flat = tf.reshape(id_placeholder, [-1, 1]) with tf.control_dependencies([lstm_output_flat]): sampled_losses = tf.nn.sampled_softmax_loss(self.softmax_W, self.softmax_b, next_token_id_flat, lstm_output_flat, self.options['n_negative_samples_batch'], self.options['n_tokens_vocab'], num_true=1) # get the full softmax loss output_scores = tf.matmul( lstm_output_flat, tf.transpose(self.softmax_W) ) + self.softmax_b # NOTE: tf.nn.sparse_softmax_cross_entropy_with_logits # expects unnormalized output since it performs the # softmax internally losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=output_scores, labels=tf.squeeze(next_token_id_flat, squeeze_dims=[1]) ) sampled_losses = tf.reshape(sampled_losses, [self.options['batch_size'], -1]) losses = tf.reshape(losses, [self.options['batch_size'], -1]) self.individual_train_losses.append(tf.reduce_mean(sampled_losses, axis=1)) self.individual_eval_losses.append(tf.reduce_mean(losses, axis=1)) # now make the total loss -- it's the train of the individual losses if self.bidirectional: self.total_train_loss = 0.5 * (self.individual_train_losses[0] + self.individual_train_losses[1]) self.total_eval_loss = 0.5 * (self.individual_eval_losses[0] + self.individual_eval_losses[1]) else: self.total_train_loss = self.individual_train_losses[0] self.total_eval_loss = self.individual_eval_losses[0]
Build and return the model described in corresponding configuration file.
def build_model(config: Union[str, Path, dict], mode: str = 'infer', load_trained: bool = False, download: bool = False, serialized: Optional[bytes] = None) -> Chainer: """Build and return the model described in corresponding configuration file.""" config = parse_config(config) if serialized: serialized: list = pickle.loads(serialized) if download: deep_download(config) import_packages(config.get('metadata', {}).get('imports', [])) model_config = config['chainer'] model = Chainer(model_config['in'], model_config['out'], model_config.get('in_y')) for component_config in model_config['pipe']: if load_trained and ('fit_on' in component_config or 'in_y' in component_config): try: component_config['load_path'] = component_config['save_path'] except KeyError: log.warning('No "save_path" parameter for the {} component, so "load_path" will not be renewed' .format(component_config.get('class_name', component_config.get('ref', 'UNKNOWN')))) if serialized and 'in' in component_config: component_serialized = serialized.pop(0) else: component_serialized = None component = from_params(component_config, mode=mode, serialized=component_serialized) if 'in' in component_config: c_in = component_config['in'] c_out = component_config['out'] in_y = component_config.get('in_y', None) main = component_config.get('main', False) model.append(component, c_in, c_out, in_y, main) return model
Start interaction with the model described in corresponding configuration file.
def interact_model(config: Union[str, Path, dict]) -> None: """Start interaction with the model described in corresponding configuration file.""" model = build_model(config) while True: args = [] for in_x in model.in_x: args.append((input('{}::'.format(in_x)),)) # check for exit command if args[-1][0] in {'exit', 'stop', 'quit', 'q'}: return pred = model(*args) if len(model.out_params) > 1: pred = zip(*pred) print('>>', *pred)
Make a prediction with the component described in corresponding configuration file.
def predict_on_stream(config: Union[str, Path, dict], batch_size: int = 1, file_path: Optional[str] = None) -> None: """Make a prediction with the component described in corresponding configuration file.""" if file_path is None or file_path == '-': if sys.stdin.isatty(): raise RuntimeError('To process data from terminal please use interact mode') f = sys.stdin else: f = open(file_path, encoding='utf8') model: Chainer = build_model(config) args_count = len(model.in_x) while True: batch = list((l.strip() for l in islice(f, batch_size * args_count))) if not batch: break args = [] for i in range(args_count): args.append(batch[i::args_count]) res = model(*args) if len(model.out_params) == 1: res = [res] for res in zip(*res): res = json.dumps(res, ensure_ascii=False) print(res, flush=True) if f is not sys.stdin: f.close()
Reads input file in CONLL - U format
def read_infile(infile: Union[Path, str], from_words=False, word_column: int = WORD_COLUMN, pos_column: int = POS_COLUMN, tag_column: int = TAG_COLUMN, max_sents: int = -1, read_only_words: bool = False) -> List[Tuple[List, Union[List, None]]]: """Reads input file in CONLL-U format Args: infile: a path to a file word_column: column containing words (default=1) pos_column: column containing part-of-speech labels (default=3) tag_column: column containing fine-grained tags (default=5) max_sents: maximal number of sents to read read_only_words: whether to read only words Returns: a list of sentences. Each item contains a word sequence and a tag sequence, which is ``None`` in case ``read_only_words = True`` """ answer, curr_word_sent, curr_tag_sent = [], [], [] if from_words: word_column, read_only_words = 0, True with open(infile, "r", encoding="utf8") as fin: for line in fin: line = line.strip() if line.startswith("#"): continue if line == "": if len(curr_word_sent) > 0: if read_only_words: curr_tag_sent = None answer.append((curr_word_sent, curr_tag_sent)) curr_tag_sent, curr_word_sent = [], [] if len(answer) == max_sents: break continue splitted = line.split("\t") index = splitted[0] if not from_words and not index.isdigit(): continue curr_word_sent.append(splitted[word_column]) if not read_only_words: pos, tag = splitted[pos_column], splitted[tag_column] tag = pos if tag == "_" else "{},{}".format(pos, tag) curr_tag_sent.append(tag) if len(curr_word_sent) > 0: if read_only_words: curr_tag_sent = None answer.append((curr_word_sent, curr_tag_sent)) return answer
Processes all words in data using: func: ~deeppavlov. dataset_iterators. morphotagger_iterator. process_word.
def preprocess_data(data: List[Tuple[List[str], List[str]]], to_lower: bool = True, append_case: str = "first") -> List[Tuple[List[Tuple[str]], List[str]]]: """Processes all words in data using :func:`~deeppavlov.dataset_iterators.morphotagger_iterator.process_word`. Args: data: a list of pairs (words, tags), each pair corresponds to a single sentence to_lower: whether to lowercase append_case: whether to add case mark Returns: a list of preprocessed sentences """ new_data = [] for words, tags in data: new_words = [process_word(word, to_lower=to_lower, append_case=append_case) for word in words] # tags could also be processed in future new_tags = tags new_data.append((new_words, new_tags)) return new_data
Returns a function object with the name given in string.
def fn_from_str(name: str) -> Callable[..., Any]: """Returns a function object with the name given in string.""" try: module_name, fn_name = name.split(':') except ValueError: raise ConfigError('Expected function description in a `module.submodules:function_name` form, but got `{}`' .format(name)) return getattr(importlib.import_module(module_name), fn_name)
Decorator for metric registration.
def register_metric(metric_name: str) -> Callable[..., Any]: """Decorator for metric registration.""" def decorate(fn): fn_name = fn.__module__ + ':' + fn.__name__ if metric_name in _REGISTRY and _REGISTRY[metric_name] != fn_name: log.warning('"{}" is already registered as a metric name, the old function will be ignored' .format(metric_name)) _REGISTRY[metric_name] = fn_name return fn return decorate
Returns a metric callable with a corresponding name.
def get_metric_by_name(name: str) -> Callable[..., Any]: """Returns a metric callable with a corresponding name.""" if name not in _REGISTRY: raise ConfigError(f'"{name}" is not registered as a metric') return fn_from_str(_REGISTRY[name])
Convert given string label of decay type to special index
def from_str(cls, label: str) -> int: """ Convert given string label of decay type to special index Args: label: name of decay type. Set of values: `"linear"`, `"cosine"`, `"exponential"`, `"onecycle"`, `"trapezoid"`, `["polynomial", K]`, where K is a polynomial power Returns: index of decay type """ label_norm = label.replace('1', 'one').upper() if label_norm in cls.__members__: return DecayType[label_norm] else: raise NotImplementedError
Find the best learning rate schedule and set obtained values of learning rate and momentum for further model training. Best learning rate will be divided by fit_learning_rate_div for further training model.
def fit(self, *args): """ Find the best learning rate schedule, and set obtained values of learning rate and momentum for further model training. Best learning rate will be divided by `fit_learning_rate_div` for further training model. Args: *args: arguments Returns: """ data = list(zip(*args)) self.save() if self._fit_batch_size is None: raise ConfigError("in order to use fit() method" " set `fit_batch_size` parameter") bs = int(self._fit_batch_size) data_len = len(data) num_batches = self._fit_max_batches or ((data_len - 1) // bs + 1) avg_loss = 0. best_loss = float('inf') lrs, losses = [], [] _lr_find_schedule = DecayScheduler(start_val=self._fit_learning_rate[0], end_val=self._fit_learning_rate[1], dec_type="exponential", num_it=num_batches) self._lr = _lr_find_schedule.start_val self._mom = 0. self._update_graph_variables(learning_rate=self._lr, momentum=self._mom) best_lr = _lr_find_schedule.start_val for i in range(num_batches): batch_start = (i * bs) % data_len batch_end = batch_start + bs report = self.train_on_batch(*zip(*data[batch_start:batch_end])) if not isinstance(report, dict): report = {'loss': report} # Calculating smoothed loss avg_loss = self._fit_beta*avg_loss + (1 - self._fit_beta)*report['loss'] smoothed_loss = avg_loss / (1 - self._fit_beta**(i + 1)) lrs.append(self._lr) losses.append(smoothed_loss) log.info(f"Batch {i}/{num_batches}: smooth_loss = {smoothed_loss}" f", lr = {self._lr}, best_lr = {best_lr}") if math.isnan(smoothed_loss) or (smoothed_loss > 4 * best_loss): break if (smoothed_loss < best_loss) and (i >= self._fit_min_batches): best_loss = smoothed_loss best_lr = self._lr self._lr = _lr_find_schedule.next_val() self._update_graph_variables(learning_rate=self._lr) if i >= num_batches: break # best_lr /= 10 end_val = self._get_best(lrs, losses) start_val = end_val if self._lr_schedule.dec_type in (DecayType.ONECYCLE, DecayType.TRAPEZOID): start_val = end_val / self._fit_learning_rate_div elif self._lr_schedule.dec_type in (DecayType.POLYNOMIAL, DecayType.EXPONENTIAL, DecayType.LINEAR, DecayType.COSINE): start_val = end_val end_val = end_val / self._fit_learning_rate_div self._lr_schedule = DecayScheduler(start_val=start_val, end_val=end_val, num_it=self._lr_schedule.nb, dec_type=self._lr_schedule.dec_type, extra=self._lr_schedule.extra) log.info(f"Found best learning rate value = {best_lr}" f", setting new learning rate schedule with {self._lr_schedule}.") self.load() self._lr = self._lr_schedule.start_val self._mom = self._mom_schedule.start_val self._update_graph_variables(learning_rate=self._lr, momentum=self._mom) return {'smoothed_loss': losses, 'learning_rate': lrs}
Find the best value according to given losses
def _get_best(values: List[float], losses: List[float], max_loss_div: float = 0.9, min_val_div: float = 10.0) -> float: """ Find the best value according to given losses Args: values: list of considered values losses: list of obtained loss values corresponding to `values` max_loss_div: maximal divergence of loss to be considered significant min_val_div: minimum divergence of loss to be considered significant Returns: best value divided by `min_val_div` """ assert len(values) == len(losses), "lengths of values and losses should be equal" min_ind = np.argmin(losses) for i in range(min_ind - 1, 0, -1): if (losses[i] * max_loss_div > losses[min_ind]) or\ (values[i] * min_val_div < values[min_ind]): return values[i + 1] return values[min_ind] / min_val_div
Update learning rate and momentum variables after event ( given by event_name )
def process_event(self, event_name: str, data: dict) -> None: """ Update learning rate and momentum variables after event (given by `event_name`) Args: event_name: name of event after which the method was called. Set of values: `"after_validation"`, `"after_batch"`, `"after_epoch"`, `"after_train_log"` data: dictionary with parameters values Returns: None """ if event_name == "after_validation": if data['impatience'] > self._learning_rate_last_impatience: self._learning_rate_cur_impatience += 1 else: self._learning_rate_cur_impatience = 0 self._learning_rate_last_impatience = data['impatience'] if (self._learning_rate_drop_patience is not None) and\ (self._learning_rate_cur_impatience >= self._learning_rate_drop_patience): self._learning_rate_cur_impatience = 0 self._learning_rate_cur_div *= self._learning_rate_drop_div self._lr /= self._learning_rate_drop_div self._update_graph_variables(learning_rate=self._lr) log.info(f"New learning rate dividor = {self._learning_rate_cur_div}") if event_name == 'after_batch': if (self._lr is not None) and self._lr_update_on_batch: self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div self._update_graph_variables(learning_rate=self._lr) if (self._mom is not None) and self._mom_update_on_batch: self._mom = min(1., max(0., self._mom_schedule.next_val())) self._update_graph_variables(momentum=self._mom) if event_name == 'after_epoch': if (self._lr is not None) and not self._lr_update_on_batch: self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div self._update_graph_variables(learning_rate=self._lr) if (self._mom is not None) and not self._mom_update_on_batch: self._mom = min(1., max(0., self._mom_schedule.next_val())) self._update_graph_variables(momentum=self._mom) if event_name == 'after_train_log': if (self._lr is not None) and ('learning_rate' not in data): data['learning_rate'] = self._lr if (self._mom is not None) and ('momentum' not in data): data['momentum'] = self._mom
Embed one text sample
def _encode(self, tokens: List[str], mean: bool) -> Union[List[np.ndarray], np.ndarray]: """ Embed one text sample Args: tokens: tokenized text sample mean: whether to return mean embedding of tokens per sample Returns: list of embedded tokens or array of mean values """ embedded_tokens = [] for t in tokens: try: emb = self.tok2emb[t] except KeyError: try: emb = self._get_word_vector(t) except KeyError: emb = np.zeros(self.dim, dtype=np.float32) self.tok2emb[t] = emb embedded_tokens.append(emb) if mean is None: mean = self.mean if mean: filtered = [et for et in embedded_tokens if np.any(et)] if filtered: return np.mean(filtered, axis=0) return np.zeros(self.dim, dtype=np.float32) return embedded_tokens
parses requirements from requirements. txt
def read_requirements(): """parses requirements from requirements.txt""" reqs_path = os.path.join(__location__, 'requirements.txt') with open(reqs_path, encoding='utf8') as f: reqs = [line.strip() for line in f if not line.strip().startswith('#')] names = [] links = [] for req in reqs: if '://' in req: links.append(req) else: names.append(req) return {'install_requires': names, 'dependency_links': links}
Detokenizing a text undoes the tokenizing operation restores punctuation and spaces to the places that people expect them to be. Ideally detokenize ( tokenize ( text )) should be identical to text except for line breaks.
def detokenize(tokens): """ Detokenizing a text undoes the tokenizing operation, restores punctuation and spaces to the places that people expect them to be. Ideally, `detokenize(tokenize(text))` should be identical to `text`, except for line breaks. """ text = ' '.join(tokens) step0 = text.replace('. . .', '...') step1 = step0.replace("`` ", '"').replace(" ''", '"') step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3) step5 = step4.replace(" '", "'").replace(" n't", "n't") \ .replace(" nt", "nt").replace("can not", "cannot") step6 = step5.replace(" ` ", " '") return step6.strip()
Make ngrams from a list of tokens/ lemmas: param items: list of tokens lemmas or other strings to form ngrams: param ngram_range: range for producing ngrams ex. for unigrams + bigrams should be set to ( 1 2 ) for bigrams only should be set to ( 2 2 ): return: ngrams ( as strings ) generator
def ngramize(items: List[str], ngram_range=(1, 1)) -> Generator[List[str], Any, None]: """ Make ngrams from a list of tokens/lemmas :param items: list of tokens, lemmas or other strings to form ngrams :param ngram_range: range for producing ngrams, ex. for unigrams + bigrams should be set to (1, 2), for bigrams only should be set to (2, 2) :return: ngrams (as strings) generator """ ngrams = [] ranges = [(0, i) for i in range(ngram_range[0], ngram_range[1] + 1)] for r in ranges: ngrams += list(zip(*[items[j:] for j in range(*r)])) formatted_ngrams = [' '.join(item) for item in ngrams] yield formatted_ngrams
Calculates log loss.
def sk_log_loss(y_true: Union[List[List[float]], List[List[int]], np.ndarray], y_predicted: Union[List[List[float]], List[List[int]], np.ndarray]) -> float: """ Calculates log loss. Args: y_true: list or array of true values y_predicted: list or array of predicted values Returns: Log loss """ return log_loss(y_true, y_predicted)
Makes a module spec.
def make_module_spec(options, weight_file): """Makes a module spec. Args: options: LM hyperparameters. weight_file: location of the hdf5 file with LM weights. Returns: A module spec object used for constructing a TF-Hub module. """ def module_fn(): """Spec function for a token embedding module.""" # init _bos_id = 256 _eos_id = 257 _bow_id = 258 _eow_id = 259 _pad_id = 260 _max_word_length = 50 _parallel_iterations = 10 _max_batch_size = 1024 id_dtype = tf.int32 id_nptype = np.int32 max_word_length = tf.constant(_max_word_length, dtype=id_dtype, name='max_word_length') version = tf.constant('from_dp_1', dtype=tf.string, name='version') # the charcter representation of the begin/end of sentence characters def _make_bos_eos(c): r = np.zeros([_max_word_length], dtype=id_nptype) r[:] = _pad_id r[0] = _bow_id r[1] = c r[2] = _eow_id return tf.constant(r, dtype=id_dtype) bos_ids = _make_bos_eos(_bos_id) eos_ids = _make_bos_eos(_eos_id) def token2ids(token): with tf.name_scope("token2ids_preprocessor"): char_ids = tf.decode_raw(token, tf.uint8, name='decode_raw2get_char_ids') char_ids = tf.cast(char_ids, tf.int32, name='cast2int_token') char_ids = tf.strided_slice(char_ids, [0], [max_word_length - 2], [1], name='slice2resized_token') ids_num = tf.shape(char_ids)[0] fill_ids_num = (_max_word_length - 2) - ids_num pads = tf.fill([fill_ids_num], _pad_id) bow_token_eow_pads = tf.concat([[_bow_id], char_ids, [_eow_id], pads], 0, name='concat2bow_token_eow_pads') return bow_token_eow_pads def sentence_tagging_and_padding(sen_dim): with tf.name_scope("sentence_tagging_and_padding_preprocessor"): sen = sen_dim[0] dim = sen_dim[1] extra_dim = tf.shape(sen)[0] - dim sen = tf.slice(sen, [0, 0], [dim, max_word_length], name='slice2sen') bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]], 0, name='concat2bos_sen_eos') bos_sen_eos_plus_one = bos_sen_eos + 1 bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one, [[0, extra_dim], [0, 0]], "CONSTANT", name='pad2bos_sen_eos_pads') return bos_sen_eos_pads # Input placeholders to the biLM. tokens = tf.placeholder(shape=(None, None), dtype=tf.string, name='ph2tokens') sequence_len = tf.placeholder(shape=(None, ), dtype=tf.int32, name='ph2sequence_len') tok_shape = tf.shape(tokens) line_tokens = tf.reshape(tokens, shape=[-1], name='reshape2line_tokens') with tf.device('/cpu:0'): tok_ids = tf.map_fn( token2ids, line_tokens, dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations, name='map_fn2get_tok_ids') tok_ids = tf.reshape(tok_ids, [tok_shape[0], tok_shape[1], -1], name='reshape2tok_ids') with tf.device('/cpu:0'): sen_ids = tf.map_fn( sentence_tagging_and_padding, (tok_ids, sequence_len), dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations, name='map_fn2get_sen_ids') # Build the biLM graph. bilm = BidirectionalLanguageModel(options, str(weight_file), max_batch_size=_max_batch_size) embeddings_op = bilm(sen_ids) # Get an op to compute ELMo (weighted average of the internal biLM layers) elmo_output = weight_layers('elmo_output', embeddings_op, l2_coef=0.0) weighted_op = elmo_output['weighted_op'] mean_op = elmo_output['mean_op'] word_emb = elmo_output['word_emb'] lstm_outputs1 = elmo_output['lstm_outputs1'] lstm_outputs2 = elmo_output['lstm_outputs2'] hub.add_signature("tokens", {"tokens": tokens, "sequence_len": sequence_len}, {"elmo": weighted_op, "default": mean_op, "word_emb": word_emb, "lstm_outputs1": lstm_outputs1, "lstm_outputs2": lstm_outputs2, "version": version}) # #########################Next signature############################# # # Input placeholders to the biLM. def_strings = tf.placeholder(shape=(None), dtype=tf.string) def_tokens_sparse = tf.string_split(def_strings) def_tokens_dense = tf.sparse_to_dense(sparse_indices=def_tokens_sparse.indices, output_shape=def_tokens_sparse.dense_shape, sparse_values=def_tokens_sparse.values, default_value='' ) def_mask = tf.not_equal(def_tokens_dense, '') def_int_mask = tf.cast(def_mask, dtype=tf.int32) def_sequence_len = tf.reduce_sum(def_int_mask, axis=-1) def_tok_shape = tf.shape(def_tokens_dense) def_line_tokens = tf.reshape(def_tokens_dense, shape=[-1], name='reshape2line_tokens') with tf.device('/cpu:0'): def_tok_ids = tf.map_fn( token2ids, def_line_tokens, dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations, name='map_fn2get_tok_ids') def_tok_ids = tf.reshape(def_tok_ids, [def_tok_shape[0], def_tok_shape[1], -1], name='reshape2tok_ids') with tf.device('/cpu:0'): def_sen_ids = tf.map_fn( sentence_tagging_and_padding, (def_tok_ids, def_sequence_len), dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations, name='map_fn2get_sen_ids') # Get ops to compute the LM embeddings. def_embeddings_op = bilm(def_sen_ids) # Get an op to compute ELMo (weighted average of the internal biLM layers) def_elmo_output = weight_layers('elmo_output', def_embeddings_op, l2_coef=0.0, reuse=True) def_weighted_op = def_elmo_output['weighted_op'] def_mean_op = def_elmo_output['mean_op'] def_word_emb = def_elmo_output['word_emb'] def_lstm_outputs1 = def_elmo_output['lstm_outputs1'] def_lstm_outputs2 = def_elmo_output['lstm_outputs2'] hub.add_signature("default", {"strings": def_strings}, {"elmo": def_weighted_op, "default": def_mean_op, "word_emb": def_word_emb, "lstm_outputs1": def_lstm_outputs1, "lstm_outputs2": def_lstm_outputs2, "version": version}) return hub.create_module_spec(module_fn)
Exports a TF - Hub module
def export2hub(weight_file, hub_dir, options): """Exports a TF-Hub module """ spec = make_module_spec(options, str(weight_file)) try: with tf.Graph().as_default(): module = hub.Module(spec) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if hub_dir.exists(): shutil.rmtree(hub_dir) module.export(str(hub_dir), sess) finally: pass
Format catalog item output
def show_details(item_data: Dict[Any, Any]) -> str: """Format catalog item output Parameters: item_data: item's attributes values Returns: [rich_message]: list of formatted rich message """ txt = "" for key, value in item_data.items(): txt += "**" + str(key) + "**" + ': ' + str(value) + " \n" return txt
Make an agent
def make_agent() -> EcommerceAgent: """Make an agent Returns: agent: created Ecommerce agent """ config_path = find_config('tfidf_retrieve') skill = build_model(config_path) agent = EcommerceAgent(skills=[skill]) return agent
Parse parameters and run ms bot framework
def main(): """Parse parameters and run ms bot framework""" args = parser.parse_args() run_ms_bot_framework_server(agent_generator=make_agent, app_id=args.ms_id, app_secret=args.ms_secret, stateful=True)
Processes batch of utterances and returns corresponding responses batch.
def _call(self, utterances_batch: List[str], utterances_ids: List[int] = None) -> List[RichMessage]: """Processes batch of utterances and returns corresponding responses batch. Args: utterances_batch: Batch of incoming utterances. utterances_ids: Batch of dialog IDs corresponding to incoming utterances. Returns: responses: A batch of responses corresponding to the utterance batch received by agent. """ rich_message = RichMessage() for utt_id, utt in enumerate(utterances_batch): if utterances_ids: id_ = utterances_ids[utt_id] log.debug(f'Utterance: {utt}') if utt == "/start": welcome = "I am a new e-commerce bot. I will help you to find products that you are looking for. Please type your request in plain text." rich_message.add_control(PlainText(welcome)) continue if utt[0] == "@": command, *parts = utt.split(":") log.debug(f'Actions: {parts}') if command == "@details": batch_index = int(parts[0]) # batch index in history list item_index = int(parts[1]) # index in batch rich_message.add_control(PlainText(show_details( self.history[id_][batch_index][item_index]))) continue if command == "@entropy": state = self.history[id_][int(parts[0])] state[parts[1]] = parts[2] state["start"] = 0 state["stop"] = 5 utt = state['query'] self.states[id_] = state if command == "@next": state = self.history[id_][int(parts[0])] state['start'] = state['stop'] state['stop'] = state['stop'] + 5 utt = state['query'] self.states[id_] = state else: if id_ not in self.states: self.states[id_] = {} self.states[id_]["start"] = 0 self.states[id_]["stop"] = 5 responses_batch, confidences_batch, state_batch = self.skills[0]( [utt], self.history[id_], [self.states[id_]]) # update `self.states` with retrieved results self.states[id_] = state_batch[0] self.states[id_]["query"] = utt items_batch, entropy_batch = responses_batch for batch_idx, items in enumerate(items_batch): self.history[id_].append(items) self.history[id_].append(self.states[id_]) for idx, item in enumerate(items): rich_message.add_control(_draw_item(item, idx, self.history[id_])) if len(items) == self.states[id_]['stop'] - self.states[id_]['start']: buttons_frame = _draw_tail(entropy_batch[batch_idx], self.history[id_]) rich_message.add_control(buttons_frame) return [rich_message]
Drops with: dropout probability temporal steps of input 3D tensor
def TemporalDropout(inputs, dropout=0.0): """ Drops with :dropout probability temporal steps of input 3D tensor """ # TO DO: adapt for >3D tensors if dropout == 0.0: return inputs inputs_func = lambda x: kb.ones_like(inputs[:, :, 0:1]) inputs_mask = kl.Lambda(inputs_func)(inputs) inputs_mask = kl.Dropout(dropout)(inputs_mask) tiling_shape = [1, 1, kb.shape(inputs)[2]] + [1] * (kb.ndim(inputs) - 3) inputs_mask = kl.Lambda(kb.tile, arguments={"n": tiling_shape}, output_shape=inputs._keras_shape[1:])(inputs_mask) answer = kl.Multiply()([inputs, inputs_mask]) return answer
A layer filling i - th column of a 2D tensor with 1 + ln ( 1 + i ) when it contains a meaningful symbol and with 0 when it contains PAD
def positions_func(inputs, pad=0): """ A layer filling i-th column of a 2D tensor with 1+ln(1+i) when it contains a meaningful symbol and with 0 when it contains PAD """ position_inputs = kb.cumsum(kb.ones_like(inputs, dtype="float32"), axis=1) position_inputs *= kb.cast(kb.not_equal(inputs, pad), "float32") return kb.log(1.0 + position_inputs)
Download a file from URL to one or several target locations
def download(dest_file_path: [List[Union[str, Path]]], source_url: str, force_download=True): """Download a file from URL to one or several target locations Args: dest_file_path: path or list of paths to the file destination files (including file name) source_url: the source URL force_download: download file if it already exists, or not """ if isinstance(dest_file_path, list): dest_file_paths = [Path(path) for path in dest_file_path] else: dest_file_paths = [Path(dest_file_path).absolute()] if not force_download: to_check = list(dest_file_paths) dest_file_paths = [] for p in to_check: if p.exists(): log.info(f'File already exists in {p}') else: dest_file_paths.append(p) if dest_file_paths: cache_dir = os.getenv('DP_CACHE_DIR') cached_exists = False if cache_dir: first_dest_path = Path(cache_dir) / md5(source_url.encode('utf8')).hexdigest()[:15] cached_exists = first_dest_path.exists() else: first_dest_path = dest_file_paths.pop() if not cached_exists: first_dest_path.parent.mkdir(parents=True, exist_ok=True) simple_download(source_url, first_dest_path) else: log.info(f'Found cached {source_url} in {first_dest_path}') for dest_path in dest_file_paths: dest_path.parent.mkdir(parents=True, exist_ok=True) shutil.copy(str(first_dest_path), str(dest_path))
Simple tar archive extractor
def untar(file_path, extract_folder=None): """Simple tar archive extractor Args: file_path: path to the tar file to be extracted extract_folder: folder to which the files will be extracted """ file_path = Path(file_path) if extract_folder is None: extract_folder = file_path.parent extract_folder = Path(extract_folder) tar = tarfile.open(file_path) tar.extractall(extract_folder) tar.close()
Simple. gz archive extractor
def ungzip(file_path, extract_path: Path = None): """Simple .gz archive extractor Args: file_path: path to the gzip file to be extracted extract_path: path where the file will be extracted """ CHUNK = 16 * 1024 file_path = Path(file_path) extract_path = extract_path or file_path.with_suffix('') with gzip.open(file_path, 'rb') as fin, extract_path.open('wb') as fout: while True: block = fin.read(CHUNK) if not block: break fout.write(block)
Download and extract. tar. gz or. gz file to one or several target locations. The archive is deleted if extraction was successful.
def download_decompress(url: str, download_path: [Path, str], extract_paths=None): """Download and extract .tar.gz or .gz file to one or several target locations. The archive is deleted if extraction was successful. Args: url: URL for file downloading download_path: path to the directory where downloaded file will be stored until the end of extraction extract_paths: path or list of paths where contents of archive will be extracted """ file_name = Path(urlparse(url).path).name download_path = Path(download_path) if extract_paths is None: extract_paths = [download_path] elif isinstance(extract_paths, list): extract_paths = [Path(path) for path in extract_paths] else: extract_paths = [Path(extract_paths)] cache_dir = os.getenv('DP_CACHE_DIR') extracted = False if cache_dir: cache_dir = Path(cache_dir) url_hash = md5(url.encode('utf8')).hexdigest()[:15] arch_file_path = cache_dir / url_hash extracted_path = cache_dir / (url_hash + '_extracted') extracted = extracted_path.exists() if not extracted and not arch_file_path.exists(): simple_download(url, arch_file_path) else: arch_file_path = download_path / file_name simple_download(url, arch_file_path) extracted_path = extract_paths.pop() if not extracted: log.info('Extracting {} archive into {}'.format(arch_file_path, extracted_path)) extracted_path.mkdir(parents=True, exist_ok=True) if file_name.endswith('.tar.gz'): untar(arch_file_path, extracted_path) elif file_name.endswith('.gz'): ungzip(arch_file_path, extracted_path / Path(file_name).with_suffix('').name) elif file_name.endswith('.zip'): with zipfile.ZipFile(arch_file_path, 'r') as zip_ref: zip_ref.extractall(extracted_path) else: raise RuntimeError(f'Trying to extract an unknown type of archive {file_name}') if not cache_dir: arch_file_path.unlink() for extract_path in extract_paths: for src in extracted_path.iterdir(): dest = extract_path / src.name if src.is_dir(): copytree(src, dest) else: extract_path.mkdir(parents=True, exist_ok=True) shutil.copy(str(src), str(dest))
Updates dict recursively
def update_dict_recursive(editable_dict: dict, editing_dict: dict) -> None: """Updates dict recursively You need to use this function to update dictionary if depth of editing_dict is more then 1 Args: editable_dict: dictionary, that will be edited editing_dict: dictionary, that contains edits Returns: None """ for k, v in editing_dict.items(): if isinstance(v, collections.Mapping): update_dict_recursive(editable_dict.get(k, {}), v) else: editable_dict[k] = v
Given a file URL return a md5 query of the file
def path_set_md5(url): """Given a file URL, return a md5 query of the file Args: url: a given URL Returns: URL of the md5 file """ scheme, netloc, path, query_string, fragment = urlsplit(url) path += '.md5' return urlunsplit((scheme, netloc, path, query_string, fragment))
Given a URL set or replace a query parameter and return the modified URL.
def set_query_parameter(url, param_name, param_value): """Given a URL, set or replace a query parameter and return the modified URL. Args: url: a given URL param_name: the parameter name to add param_value: the parameter value Returns: URL with the added parameter """ scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) query_params[param_name] = [param_value] new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
Returns Amazon Alexa compatible state of the PlainText instance.
def alexa(self) -> dict: """Returns Amazon Alexa compatible state of the PlainText instance. Creating Amazon Alexa response blank with populated "outputSpeech" and "card sections. Returns: response: Amazon Alexa representation of PlainText state. """ response = { 'response': { 'shouldEndSession': False, 'outputSpeech': { 'type': 'PlainText', 'text': self.content}, 'card': { 'type': 'Simple', 'content': self.content } } } return response