partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
PyIndenterMode.indent
Performs an indentation
pyqode/python/modes/indenter.py
def indent(self): """ Performs an indentation """ if not self.tab_always_indent: super(PyIndenterMode, self).indent() else: cursor = self.editor.textCursor() assert isinstance(cursor, QtGui.QTextCursor) if cursor.hasSelection(): self.indent_selection(cursor) else: # simply insert indentation at the cursor position tab_len = self.editor.tab_length cursor.beginEditBlock() if self.editor.use_spaces_instead_of_tabs: cursor.insertText(tab_len * " ") else: cursor.insertText('\t') cursor.endEditBlock() self.editor.setTextCursor(cursor)
def indent(self): """ Performs an indentation """ if not self.tab_always_indent: super(PyIndenterMode, self).indent() else: cursor = self.editor.textCursor() assert isinstance(cursor, QtGui.QTextCursor) if cursor.hasSelection(): self.indent_selection(cursor) else: # simply insert indentation at the cursor position tab_len = self.editor.tab_length cursor.beginEditBlock() if self.editor.use_spaces_instead_of_tabs: cursor.insertText(tab_len * " ") else: cursor.insertText('\t') cursor.endEditBlock() self.editor.setTextCursor(cursor)
[ "Performs", "an", "indentation" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/indenter.py#L38-L58
[ "def", "indent", "(", "self", ")", ":", "if", "not", "self", ".", "tab_always_indent", ":", "super", "(", "PyIndenterMode", ",", "self", ")", ".", "indent", "(", ")", "else", ":", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "assert", "isinstance", "(", "cursor", ",", "QtGui", ".", "QTextCursor", ")", "if", "cursor", ".", "hasSelection", "(", ")", ":", "self", ".", "indent_selection", "(", "cursor", ")", "else", ":", "# simply insert indentation at the cursor position", "tab_len", "=", "self", ".", "editor", ".", "tab_length", "cursor", ".", "beginEditBlock", "(", ")", "if", "self", ".", "editor", ".", "use_spaces_instead_of_tabs", ":", "cursor", ".", "insertText", "(", "tab_len", "*", "\" \"", ")", "else", ":", "cursor", ".", "insertText", "(", "'\\t'", ")", "cursor", ".", "endEditBlock", "(", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PyIndenterMode.unindent
Performs an un-indentation
pyqode/python/modes/indenter.py
def unindent(self): """ Performs an un-indentation """ if self.tab_always_indent: cursor = self.editor.textCursor() if not cursor.hasSelection(): cursor.select(cursor.LineUnderCursor) self.unindent_selection(cursor) else: super(PyIndenterMode, self).unindent()
def unindent(self): """ Performs an un-indentation """ if self.tab_always_indent: cursor = self.editor.textCursor() if not cursor.hasSelection(): cursor.select(cursor.LineUnderCursor) self.unindent_selection(cursor) else: super(PyIndenterMode, self).unindent()
[ "Performs", "an", "un", "-", "indentation" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/indenter.py#L60-L70
[ "def", "unindent", "(", "self", ")", ":", "if", "self", ".", "tab_always_indent", ":", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "if", "not", "cursor", ".", "hasSelection", "(", ")", ":", "cursor", ".", "select", "(", "cursor", ".", "LineUnderCursor", ")", "self", ".", "unindent_selection", "(", "cursor", ")", "else", ":", "super", "(", "PyIndenterMode", ",", "self", ")", ".", "unindent", "(", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PyAutoIndentMode._handle_indent_between_paren
Handle indent between symbols such as parenthesis, braces,...
pyqode/python/modes/autoindent.py
def _handle_indent_between_paren(self, column, line, parent_impl, tc): """ Handle indent between symbols such as parenthesis, braces,... """ pre, post = parent_impl next_char = self._get_next_char(tc) prev_char = self._get_prev_char(tc) prev_open = prev_char in ['[', '(', '{'] next_close = next_char in [']', ')', '}'] (open_line, open_symbol_col), (close_line, close_col) = \ self._get_paren_pos(tc, column) open_line_txt = self._helper.line_text(open_line) open_line_indent = len(open_line_txt) - len(open_line_txt.lstrip()) if prev_open: post = (open_line_indent + self.editor.tab_length) * ' ' elif next_close and prev_char != ',': post = open_line_indent * ' ' elif tc.block().blockNumber() == open_line: post = open_symbol_col * ' ' # adapt indent if cursor on closing line and next line have same # indent -> PEP8 compliance if close_line and close_col: txt = self._helper.line_text(close_line) bn = tc.block().blockNumber() flg = bn == close_line next_indent = self._helper.line_indent(bn + 1) * ' ' if flg and txt.strip().endswith(':') and next_indent == post: # | look at how the previous line ( ``':'):`` ) was # over-indented, this is actually what we are trying to # achieve here post += self.editor.tab_length * ' ' # breaking string if next_char in ['"', "'"]: tc.movePosition(tc.Left) is_string = self._helper.is_comment_or_string(tc, formats=['string']) if next_char in ['"', "'"]: tc.movePosition(tc.Right) if is_string: trav = QTextCursor(tc) while self._helper.is_comment_or_string( trav, formats=['string']): trav.movePosition(trav.Left) trav.movePosition(trav.Right) symbol = '%s' % self._get_next_char(trav) pre += symbol post += symbol return pre, post
def _handle_indent_between_paren(self, column, line, parent_impl, tc): """ Handle indent between symbols such as parenthesis, braces,... """ pre, post = parent_impl next_char = self._get_next_char(tc) prev_char = self._get_prev_char(tc) prev_open = prev_char in ['[', '(', '{'] next_close = next_char in [']', ')', '}'] (open_line, open_symbol_col), (close_line, close_col) = \ self._get_paren_pos(tc, column) open_line_txt = self._helper.line_text(open_line) open_line_indent = len(open_line_txt) - len(open_line_txt.lstrip()) if prev_open: post = (open_line_indent + self.editor.tab_length) * ' ' elif next_close and prev_char != ',': post = open_line_indent * ' ' elif tc.block().blockNumber() == open_line: post = open_symbol_col * ' ' # adapt indent if cursor on closing line and next line have same # indent -> PEP8 compliance if close_line and close_col: txt = self._helper.line_text(close_line) bn = tc.block().blockNumber() flg = bn == close_line next_indent = self._helper.line_indent(bn + 1) * ' ' if flg and txt.strip().endswith(':') and next_indent == post: # | look at how the previous line ( ``':'):`` ) was # over-indented, this is actually what we are trying to # achieve here post += self.editor.tab_length * ' ' # breaking string if next_char in ['"', "'"]: tc.movePosition(tc.Left) is_string = self._helper.is_comment_or_string(tc, formats=['string']) if next_char in ['"', "'"]: tc.movePosition(tc.Right) if is_string: trav = QTextCursor(tc) while self._helper.is_comment_or_string( trav, formats=['string']): trav.movePosition(trav.Left) trav.movePosition(trav.Right) symbol = '%s' % self._get_next_char(trav) pre += symbol post += symbol return pre, post
[ "Handle", "indent", "between", "symbols", "such", "as", "parenthesis", "braces", "..." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/autoindent.py#L249-L298
[ "def", "_handle_indent_between_paren", "(", "self", ",", "column", ",", "line", ",", "parent_impl", ",", "tc", ")", ":", "pre", ",", "post", "=", "parent_impl", "next_char", "=", "self", ".", "_get_next_char", "(", "tc", ")", "prev_char", "=", "self", ".", "_get_prev_char", "(", "tc", ")", "prev_open", "=", "prev_char", "in", "[", "'['", ",", "'('", ",", "'{'", "]", "next_close", "=", "next_char", "in", "[", "']'", ",", "')'", ",", "'}'", "]", "(", "open_line", ",", "open_symbol_col", ")", ",", "(", "close_line", ",", "close_col", ")", "=", "self", ".", "_get_paren_pos", "(", "tc", ",", "column", ")", "open_line_txt", "=", "self", ".", "_helper", ".", "line_text", "(", "open_line", ")", "open_line_indent", "=", "len", "(", "open_line_txt", ")", "-", "len", "(", "open_line_txt", ".", "lstrip", "(", ")", ")", "if", "prev_open", ":", "post", "=", "(", "open_line_indent", "+", "self", ".", "editor", ".", "tab_length", ")", "*", "' '", "elif", "next_close", "and", "prev_char", "!=", "','", ":", "post", "=", "open_line_indent", "*", "' '", "elif", "tc", ".", "block", "(", ")", ".", "blockNumber", "(", ")", "==", "open_line", ":", "post", "=", "open_symbol_col", "*", "' '", "# adapt indent if cursor on closing line and next line have same", "# indent -> PEP8 compliance", "if", "close_line", "and", "close_col", ":", "txt", "=", "self", ".", "_helper", ".", "line_text", "(", "close_line", ")", "bn", "=", "tc", ".", "block", "(", ")", ".", "blockNumber", "(", ")", "flg", "=", "bn", "==", "close_line", "next_indent", "=", "self", ".", "_helper", ".", "line_indent", "(", "bn", "+", "1", ")", "*", "' '", "if", "flg", "and", "txt", ".", "strip", "(", ")", ".", "endswith", "(", "':'", ")", "and", "next_indent", "==", "post", ":", "# | look at how the previous line ( ``':'):`` ) was", "# over-indented, this is actually what we are trying to", "# achieve here", "post", "+=", "self", ".", "editor", ".", "tab_length", "*", "' '", "# breaking string", "if", "next_char", "in", "[", "'\"'", ",", "\"'\"", "]", ":", "tc", ".", "movePosition", "(", "tc", ".", "Left", ")", "is_string", "=", "self", ".", "_helper", ".", "is_comment_or_string", "(", "tc", ",", "formats", "=", "[", "'string'", "]", ")", "if", "next_char", "in", "[", "'\"'", ",", "\"'\"", "]", ":", "tc", ".", "movePosition", "(", "tc", ".", "Right", ")", "if", "is_string", ":", "trav", "=", "QTextCursor", "(", "tc", ")", "while", "self", ".", "_helper", ".", "is_comment_or_string", "(", "trav", ",", "formats", "=", "[", "'string'", "]", ")", ":", "trav", ".", "movePosition", "(", "trav", ".", "Left", ")", "trav", ".", "movePosition", "(", "trav", ".", "Right", ")", "symbol", "=", "'%s'", "%", "self", ".", "_get_next_char", "(", "trav", ")", "pre", "+=", "symbol", "post", "+=", "symbol", "return", "pre", ",", "post" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PyAutoIndentMode._at_block_start
Improve QTextCursor.atBlockStart to ignore spaces
pyqode/python/modes/autoindent.py
def _at_block_start(tc, line): """ Improve QTextCursor.atBlockStart to ignore spaces """ if tc.atBlockStart(): return True column = tc.columnNumber() indentation = len(line) - len(line.lstrip()) return column <= indentation
def _at_block_start(tc, line): """ Improve QTextCursor.atBlockStart to ignore spaces """ if tc.atBlockStart(): return True column = tc.columnNumber() indentation = len(line) - len(line.lstrip()) return column <= indentation
[ "Improve", "QTextCursor", ".", "atBlockStart", "to", "ignore", "spaces" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/autoindent.py#L301-L309
[ "def", "_at_block_start", "(", "tc", ",", "line", ")", ":", "if", "tc", ".", "atBlockStart", "(", ")", ":", "return", "True", "column", "=", "tc", ".", "columnNumber", "(", ")", "indentation", "=", "len", "(", "line", ")", "-", "len", "(", "line", ".", "lstrip", "(", ")", ")", "return", "column", "<=", "indentation" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PyFileManager.detect_encoding
For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding```
pyqode/python/managers/file.py
def detect_encoding(self, path): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding``` """ with open(path, 'rb') as file: source = file.read() # take care of line encodings (not in jedi) source = source.replace(b'\r', b'') source_str = str(source).replace('\\n', '\n') byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) return 'UTF-8'
def detect_encoding(self, path): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding``` """ with open(path, 'rb') as file: source = file.read() # take care of line encodings (not in jedi) source = source.replace(b'\r', b'') source_str = str(source).replace('\\n', '\n') byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) return 'UTF-8'
[ "For", "the", "implementation", "of", "encoding", "definitions", "in", "Python", "look", "at", ":", "-", "http", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0263", "/" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/managers/file.py#L23-L47
[ "def", "detect_encoding", "(", "self", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "file", ":", "source", "=", "file", ".", "read", "(", ")", "# take care of line encodings (not in jedi)", "source", "=", "source", ".", "replace", "(", "b'\\r'", ",", "b''", ")", "source_str", "=", "str", "(", "source", ")", ".", "replace", "(", "'\\\\n'", ",", "'\\n'", ")", "byte_mark", "=", "ast", ".", "literal_eval", "(", "r\"b'\\xef\\xbb\\xbf'\"", ")", "if", "source", ".", "startswith", "(", "byte_mark", ")", ":", "# UTF-8 byte-order mark", "return", "'utf-8'", "first_two_lines", "=", "re", ".", "match", "(", "r'(?:[^\\n]*\\n){0,2}'", ",", "source_str", ")", ".", "group", "(", "0", ")", "possible_encoding", "=", "re", ".", "search", "(", "r\"coding[=:]\\s*([-\\w.]+)\"", ",", "first_two_lines", ")", "if", "possible_encoding", ":", "return", "possible_encoding", ".", "group", "(", "1", ")", "return", "'UTF-8'" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
CommentsMode.on_state_changed
Called when the mode is activated/deactivated
pyqode/python/modes/comments.py
def on_state_changed(self, state): """ Called when the mode is activated/deactivated """ if state: self.action.triggered.connect(self.comment) self.editor.add_action(self.action, sub_menu='Python') if 'pyqt5' in os.environ['QT_API'].lower(): self.editor.key_pressed.connect(self.on_key_pressed) else: self.editor.remove_action(self.action, sub_menu='Python') self.action.triggered.disconnect(self.comment) if 'pyqt5' in os.environ['QT_API'].lower(): self.editor.key_pressed.disconnect(self.on_key_pressed)
def on_state_changed(self, state): """ Called when the mode is activated/deactivated """ if state: self.action.triggered.connect(self.comment) self.editor.add_action(self.action, sub_menu='Python') if 'pyqt5' in os.environ['QT_API'].lower(): self.editor.key_pressed.connect(self.on_key_pressed) else: self.editor.remove_action(self.action, sub_menu='Python') self.action.triggered.disconnect(self.comment) if 'pyqt5' in os.environ['QT_API'].lower(): self.editor.key_pressed.disconnect(self.on_key_pressed)
[ "Called", "when", "the", "mode", "is", "activated", "/", "deactivated" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/comments.py#L19-L32
[ "def", "on_state_changed", "(", "self", ",", "state", ")", ":", "if", "state", ":", "self", ".", "action", ".", "triggered", ".", "connect", "(", "self", ".", "comment", ")", "self", ".", "editor", ".", "add_action", "(", "self", ".", "action", ",", "sub_menu", "=", "'Python'", ")", "if", "'pyqt5'", "in", "os", ".", "environ", "[", "'QT_API'", "]", ".", "lower", "(", ")", ":", "self", ".", "editor", ".", "key_pressed", ".", "connect", "(", "self", ".", "on_key_pressed", ")", "else", ":", "self", ".", "editor", ".", "remove_action", "(", "self", ".", "action", ",", "sub_menu", "=", "'Python'", ")", "self", ".", "action", ".", "triggered", ".", "disconnect", "(", "self", ".", "comment", ")", "if", "'pyqt5'", "in", "os", ".", "environ", "[", "'QT_API'", "]", ".", "lower", "(", ")", ":", "self", ".", "editor", ".", "key_pressed", ".", "disconnect", "(", "self", ".", "on_key_pressed", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
CommentsMode.comment
Comments/Uncomments the selected lines or the current lines if there is no selection.
pyqode/python/modes/comments.py
def comment(self): """ Comments/Uncomments the selected lines or the current lines if there is no selection. """ cursor = self.editor.textCursor() # get the indent at which comment should be inserted and whether to # comment or uncomment the selected text indent, comment, nb_lines = self.get_operation() has_selection = cursor.hasSelection() if nb_lines > 1: self._move_cursor_to_selection_start(cursor) cursor.beginEditBlock() for i in range(nb_lines): self.comment_line(indent, cursor, comment) cursor.movePosition(cursor.NextBlock) cursor.endEditBlock() else: # comment a single line cursor.beginEditBlock() self.comment_line(indent, cursor, comment) if not has_selection: # move to the first non-whitespace character of the next line cursor.movePosition(cursor.NextBlock) text = cursor.block().text() indent = len(text) - len(text.lstrip()) cursor.movePosition(cursor.Right, cursor.MoveAnchor, indent) cursor.endEditBlock() self.editor.setTextCursor(cursor) else: cursor.endEditBlock()
def comment(self): """ Comments/Uncomments the selected lines or the current lines if there is no selection. """ cursor = self.editor.textCursor() # get the indent at which comment should be inserted and whether to # comment or uncomment the selected text indent, comment, nb_lines = self.get_operation() has_selection = cursor.hasSelection() if nb_lines > 1: self._move_cursor_to_selection_start(cursor) cursor.beginEditBlock() for i in range(nb_lines): self.comment_line(indent, cursor, comment) cursor.movePosition(cursor.NextBlock) cursor.endEditBlock() else: # comment a single line cursor.beginEditBlock() self.comment_line(indent, cursor, comment) if not has_selection: # move to the first non-whitespace character of the next line cursor.movePosition(cursor.NextBlock) text = cursor.block().text() indent = len(text) - len(text.lstrip()) cursor.movePosition(cursor.Right, cursor.MoveAnchor, indent) cursor.endEditBlock() self.editor.setTextCursor(cursor) else: cursor.endEditBlock()
[ "Comments", "/", "Uncomments", "the", "selected", "lines", "or", "the", "current", "lines", "if", "there", "is", "no", "selection", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/comments.py#L78-L108
[ "def", "comment", "(", "self", ")", ":", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "# get the indent at which comment should be inserted and whether to", "# comment or uncomment the selected text", "indent", ",", "comment", ",", "nb_lines", "=", "self", ".", "get_operation", "(", ")", "has_selection", "=", "cursor", ".", "hasSelection", "(", ")", "if", "nb_lines", ">", "1", ":", "self", ".", "_move_cursor_to_selection_start", "(", "cursor", ")", "cursor", ".", "beginEditBlock", "(", ")", "for", "i", "in", "range", "(", "nb_lines", ")", ":", "self", ".", "comment_line", "(", "indent", ",", "cursor", ",", "comment", ")", "cursor", ".", "movePosition", "(", "cursor", ".", "NextBlock", ")", "cursor", ".", "endEditBlock", "(", ")", "else", ":", "# comment a single line", "cursor", ".", "beginEditBlock", "(", ")", "self", ".", "comment_line", "(", "indent", ",", "cursor", ",", "comment", ")", "if", "not", "has_selection", ":", "# move to the first non-whitespace character of the next line", "cursor", ".", "movePosition", "(", "cursor", ".", "NextBlock", ")", "text", "=", "cursor", ".", "block", "(", ")", ".", "text", "(", ")", "indent", "=", "len", "(", "text", ")", "-", "len", "(", "text", ".", "lstrip", "(", ")", ")", "cursor", ".", "movePosition", "(", "cursor", ".", "Right", ",", "cursor", ".", "MoveAnchor", ",", "indent", ")", "cursor", ".", "endEditBlock", "(", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "else", ":", "cursor", ".", "endEditBlock", "(", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PyCodeEditBase.setPlainText
Extends QCodeEdit.setPlainText to allow user to setPlainText without mimetype (since the python syntax highlighter does not use it).
pyqode/python/widgets/code_edit.py
def setPlainText(self, txt, mimetype='text/x-python', encoding='utf-8'): """ Extends QCodeEdit.setPlainText to allow user to setPlainText without mimetype (since the python syntax highlighter does not use it). """ try: self.syntax_highlighter.docstrings[:] = [] self.syntax_highlighter.import_statements[:] = [] except AttributeError: pass super(PyCodeEditBase, self).setPlainText(txt, mimetype, encoding)
def setPlainText(self, txt, mimetype='text/x-python', encoding='utf-8'): """ Extends QCodeEdit.setPlainText to allow user to setPlainText without mimetype (since the python syntax highlighter does not use it). """ try: self.syntax_highlighter.docstrings[:] = [] self.syntax_highlighter.import_statements[:] = [] except AttributeError: pass super(PyCodeEditBase, self).setPlainText(txt, mimetype, encoding)
[ "Extends", "QCodeEdit", ".", "setPlainText", "to", "allow", "user", "to", "setPlainText", "without", "mimetype", "(", "since", "the", "python", "syntax", "highlighter", "does", "not", "use", "it", ")", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/widgets/code_edit.py#L34-L44
[ "def", "setPlainText", "(", "self", ",", "txt", ",", "mimetype", "=", "'text/x-python'", ",", "encoding", "=", "'utf-8'", ")", ":", "try", ":", "self", ".", "syntax_highlighter", ".", "docstrings", "[", ":", "]", "=", "[", "]", "self", ".", "syntax_highlighter", ".", "import_statements", "[", ":", "]", "=", "[", "]", "except", "AttributeError", ":", "pass", "super", "(", "PyCodeEditBase", ",", "self", ")", ".", "setPlainText", "(", "txt", ",", "mimetype", ",", "encoding", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PyConsole.update_terminal_colors
Update terminal color scheme based on the pygments color scheme colors
pyqode/python/widgets/console.py
def update_terminal_colors(self): """ Update terminal color scheme based on the pygments color scheme colors """ self.color_scheme = self.create_color_scheme( background=self.syntax_highlighter.color_scheme.background, foreground=self.syntax_highlighter.color_scheme.formats['normal'].foreground().color())
def update_terminal_colors(self): """ Update terminal color scheme based on the pygments color scheme colors """ self.color_scheme = self.create_color_scheme( background=self.syntax_highlighter.color_scheme.background, foreground=self.syntax_highlighter.color_scheme.formats['normal'].foreground().color())
[ "Update", "terminal", "color", "scheme", "based", "on", "the", "pygments", "color", "scheme", "colors" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/widgets/console.py#L71-L77
[ "def", "update_terminal_colors", "(", "self", ")", ":", "self", ".", "color_scheme", "=", "self", ".", "create_color_scheme", "(", "background", "=", "self", ".", "syntax_highlighter", ".", "color_scheme", ".", "background", ",", "foreground", "=", "self", ".", "syntax_highlighter", ".", "color_scheme", ".", "formats", "[", "'normal'", "]", ".", "foreground", "(", ")", ".", "color", "(", ")", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PyInteractiveConsole.mouseMoveEvent
Extends mouseMoveEvent to display a pointing hand cursor when the mouse cursor is over a file location
pyqode/python/widgets/interactive.py
def mouseMoveEvent(self, e): """ Extends mouseMoveEvent to display a pointing hand cursor when the mouse cursor is over a file location """ super(PyInteractiveConsole, self).mouseMoveEvent(e) cursor = self.cursorForPosition(e.pos()) assert isinstance(cursor, QtGui.QTextCursor) p = cursor.positionInBlock() usd = cursor.block().userData() if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block: if QtWidgets.QApplication.overrideCursor() is None: QtWidgets.QApplication.setOverrideCursor( QtGui.QCursor(QtCore.Qt.PointingHandCursor)) else: if QtWidgets.QApplication.overrideCursor() is not None: QtWidgets.QApplication.restoreOverrideCursor()
def mouseMoveEvent(self, e): """ Extends mouseMoveEvent to display a pointing hand cursor when the mouse cursor is over a file location """ super(PyInteractiveConsole, self).mouseMoveEvent(e) cursor = self.cursorForPosition(e.pos()) assert isinstance(cursor, QtGui.QTextCursor) p = cursor.positionInBlock() usd = cursor.block().userData() if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block: if QtWidgets.QApplication.overrideCursor() is None: QtWidgets.QApplication.setOverrideCursor( QtGui.QCursor(QtCore.Qt.PointingHandCursor)) else: if QtWidgets.QApplication.overrideCursor() is not None: QtWidgets.QApplication.restoreOverrideCursor()
[ "Extends", "mouseMoveEvent", "to", "display", "a", "pointing", "hand", "cursor", "when", "the", "mouse", "cursor", "is", "over", "a", "file", "location" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/widgets/interactive.py#L97-L113
[ "def", "mouseMoveEvent", "(", "self", ",", "e", ")", ":", "super", "(", "PyInteractiveConsole", ",", "self", ")", ".", "mouseMoveEvent", "(", "e", ")", "cursor", "=", "self", ".", "cursorForPosition", "(", "e", ".", "pos", "(", ")", ")", "assert", "isinstance", "(", "cursor", ",", "QtGui", ".", "QTextCursor", ")", "p", "=", "cursor", ".", "positionInBlock", "(", ")", "usd", "=", "cursor", ".", "block", "(", ")", ".", "userData", "(", ")", "if", "usd", "and", "usd", ".", "start_pos_in_block", "<=", "p", "<=", "usd", ".", "end_pos_in_block", ":", "if", "QtWidgets", ".", "QApplication", ".", "overrideCursor", "(", ")", "is", "None", ":", "QtWidgets", ".", "QApplication", ".", "setOverrideCursor", "(", "QtGui", ".", "QCursor", "(", "QtCore", ".", "Qt", ".", "PointingHandCursor", ")", ")", "else", ":", "if", "QtWidgets", ".", "QApplication", ".", "overrideCursor", "(", ")", "is", "not", "None", ":", "QtWidgets", ".", "QApplication", ".", "restoreOverrideCursor", "(", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PyInteractiveConsole.mousePressEvent
Emits open_file_requested if the press event occured over a file location string.
pyqode/python/widgets/interactive.py
def mousePressEvent(self, e): """ Emits open_file_requested if the press event occured over a file location string. """ super(PyInteractiveConsole, self).mousePressEvent(e) cursor = self.cursorForPosition(e.pos()) p = cursor.positionInBlock() usd = cursor.block().userData() if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block: if e.button() == QtCore.Qt.LeftButton: self.open_file_requested.emit(usd.filename, usd.line)
def mousePressEvent(self, e): """ Emits open_file_requested if the press event occured over a file location string. """ super(PyInteractiveConsole, self).mousePressEvent(e) cursor = self.cursorForPosition(e.pos()) p = cursor.positionInBlock() usd = cursor.block().userData() if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block: if e.button() == QtCore.Qt.LeftButton: self.open_file_requested.emit(usd.filename, usd.line)
[ "Emits", "open_file_requested", "if", "the", "press", "event", "occured", "over", "a", "file", "location", "string", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/widgets/interactive.py#L115-L126
[ "def", "mousePressEvent", "(", "self", ",", "e", ")", ":", "super", "(", "PyInteractiveConsole", ",", "self", ")", ".", "mousePressEvent", "(", "e", ")", "cursor", "=", "self", ".", "cursorForPosition", "(", "e", ".", "pos", "(", ")", ")", "p", "=", "cursor", ".", "positionInBlock", "(", ")", "usd", "=", "cursor", ".", "block", "(", ")", ".", "userData", "(", ")", "if", "usd", "and", "usd", ".", "start_pos_in_block", "<=", "p", "<=", "usd", ".", "end_pos_in_block", ":", "if", "e", ".", "button", "(", ")", "==", "QtCore", ".", "Qt", ".", "LeftButton", ":", "self", ".", "open_file_requested", ".", "emit", "(", "usd", ".", "filename", ",", "usd", ".", "line", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
PythonFoldDetector.detect_fold_level
Perfoms fold level detection for current block (take previous block into account). :param prev_block: previous block, None if `block` is the first block. :param block: block to analyse. :return: block fold level
pyqode/python/folding.py
def detect_fold_level(self, prev_block, block): """ Perfoms fold level detection for current block (take previous block into account). :param prev_block: previous block, None if `block` is the first block. :param block: block to analyse. :return: block fold level """ # Python is an indent based language so use indentation for folding # makes sense but we restrict new regions to indentation after a ':', # that way only the real logical blocks are displayed. lvl = super(PythonFoldDetector, self).detect_fold_level( prev_block, block) # cancel false indentation, indentation can only happen if there is # ':' on the previous line prev_lvl = TextBlockHelper.get_fold_lvl(prev_block) if prev_block and lvl > prev_lvl and not ( self._strip_comments(prev_block).endswith(':')): lvl = prev_lvl lvl = self._handle_docstrings(block, lvl, prev_block) lvl = self._handle_imports(block, lvl, prev_block) return lvl
def detect_fold_level(self, prev_block, block): """ Perfoms fold level detection for current block (take previous block into account). :param prev_block: previous block, None if `block` is the first block. :param block: block to analyse. :return: block fold level """ # Python is an indent based language so use indentation for folding # makes sense but we restrict new regions to indentation after a ':', # that way only the real logical blocks are displayed. lvl = super(PythonFoldDetector, self).detect_fold_level( prev_block, block) # cancel false indentation, indentation can only happen if there is # ':' on the previous line prev_lvl = TextBlockHelper.get_fold_lvl(prev_block) if prev_block and lvl > prev_lvl and not ( self._strip_comments(prev_block).endswith(':')): lvl = prev_lvl lvl = self._handle_docstrings(block, lvl, prev_block) lvl = self._handle_imports(block, lvl, prev_block) return lvl
[ "Perfoms", "fold", "level", "detection", "for", "current", "block", "(", "take", "previous", "block", "into", "account", ")", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/folding.py#L63-L85
[ "def", "detect_fold_level", "(", "self", ",", "prev_block", ",", "block", ")", ":", "# Python is an indent based language so use indentation for folding", "# makes sense but we restrict new regions to indentation after a ':',", "# that way only the real logical blocks are displayed.", "lvl", "=", "super", "(", "PythonFoldDetector", ",", "self", ")", ".", "detect_fold_level", "(", "prev_block", ",", "block", ")", "# cancel false indentation, indentation can only happen if there is", "# ':' on the previous line", "prev_lvl", "=", "TextBlockHelper", ".", "get_fold_lvl", "(", "prev_block", ")", "if", "prev_block", "and", "lvl", ">", "prev_lvl", "and", "not", "(", "self", ".", "_strip_comments", "(", "prev_block", ")", ".", "endswith", "(", "':'", ")", ")", ":", "lvl", "=", "prev_lvl", "lvl", "=", "self", ".", "_handle_docstrings", "(", "block", ",", "lvl", ",", "prev_block", ")", "lvl", "=", "self", ".", "_handle_imports", "(", "block", ",", "lvl", ",", "prev_block", ")", "return", "lvl" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.setup_actions
Connects slots to signals
examples/pynotepad/pynotepad/main_window.py
def setup_actions(self): """ Connects slots to signals """ self.actionOpen.triggered.connect(self.on_open) self.actionNew.triggered.connect(self.on_new) self.actionSave.triggered.connect(self.on_save) self.actionSave_as.triggered.connect(self.on_save_as) self.actionQuit.triggered.connect( QtWidgets.QApplication.instance().quit) self.tabWidget.current_changed.connect(self.on_current_tab_changed) self.tabWidget.last_tab_closed.connect(self.on_last_tab_closed) self.actionAbout.triggered.connect(self.on_about) self.actionRun.triggered.connect(self.on_run) self.interactiveConsole.process_finished.connect( self.on_process_finished) self.actionConfigure_run.triggered.connect(self.on_configure_run)
def setup_actions(self): """ Connects slots to signals """ self.actionOpen.triggered.connect(self.on_open) self.actionNew.triggered.connect(self.on_new) self.actionSave.triggered.connect(self.on_save) self.actionSave_as.triggered.connect(self.on_save_as) self.actionQuit.triggered.connect( QtWidgets.QApplication.instance().quit) self.tabWidget.current_changed.connect(self.on_current_tab_changed) self.tabWidget.last_tab_closed.connect(self.on_last_tab_closed) self.actionAbout.triggered.connect(self.on_about) self.actionRun.triggered.connect(self.on_run) self.interactiveConsole.process_finished.connect( self.on_process_finished) self.actionConfigure_run.triggered.connect(self.on_configure_run)
[ "Connects", "slots", "to", "signals" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L63-L77
[ "def", "setup_actions", "(", "self", ")", ":", "self", ".", "actionOpen", ".", "triggered", ".", "connect", "(", "self", ".", "on_open", ")", "self", ".", "actionNew", ".", "triggered", ".", "connect", "(", "self", ".", "on_new", ")", "self", ".", "actionSave", ".", "triggered", ".", "connect", "(", "self", ".", "on_save", ")", "self", ".", "actionSave_as", ".", "triggered", ".", "connect", "(", "self", ".", "on_save_as", ")", "self", ".", "actionQuit", ".", "triggered", ".", "connect", "(", "QtWidgets", ".", "QApplication", ".", "instance", "(", ")", ".", "quit", ")", "self", ".", "tabWidget", ".", "current_changed", ".", "connect", "(", "self", ".", "on_current_tab_changed", ")", "self", ".", "tabWidget", ".", "last_tab_closed", ".", "connect", "(", "self", ".", "on_last_tab_closed", ")", "self", ".", "actionAbout", ".", "triggered", ".", "connect", "(", "self", ".", "on_about", ")", "self", ".", "actionRun", ".", "triggered", ".", "connect", "(", "self", ".", "on_run", ")", "self", ".", "interactiveConsole", ".", "process_finished", ".", "connect", "(", "self", ".", "on_process_finished", ")", "self", ".", "actionConfigure_run", ".", "triggered", ".", "connect", "(", "self", ".", "on_configure_run", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.setup_editor
Setup the python editor, run the server and connect a few signals. :param editor: editor to setup.
examples/pynotepad/pynotepad/main_window.py
def setup_editor(self, editor): """ Setup the python editor, run the server and connect a few signals. :param editor: editor to setup. """ editor.cursorPositionChanged.connect(self.on_cursor_pos_changed) try: m = editor.modes.get(modes.GoToAssignmentsMode) except KeyError: pass else: assert isinstance(m, modes.GoToAssignmentsMode) m.out_of_doc.connect(self.on_goto_out_of_doc)
def setup_editor(self, editor): """ Setup the python editor, run the server and connect a few signals. :param editor: editor to setup. """ editor.cursorPositionChanged.connect(self.on_cursor_pos_changed) try: m = editor.modes.get(modes.GoToAssignmentsMode) except KeyError: pass else: assert isinstance(m, modes.GoToAssignmentsMode) m.out_of_doc.connect(self.on_goto_out_of_doc)
[ "Setup", "the", "python", "editor", "run", "the", "server", "and", "connect", "a", "few", "signals", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L103-L116
[ "def", "setup_editor", "(", "self", ",", "editor", ")", ":", "editor", ".", "cursorPositionChanged", ".", "connect", "(", "self", ".", "on_cursor_pos_changed", ")", "try", ":", "m", "=", "editor", ".", "modes", ".", "get", "(", "modes", ".", "GoToAssignmentsMode", ")", "except", "KeyError", ":", "pass", "else", ":", "assert", "isinstance", "(", "m", ",", "modes", ".", "GoToAssignmentsMode", ")", "m", ".", "out_of_doc", ".", "connect", "(", "self", ".", "on_goto_out_of_doc", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.open_file
Creates a new GenericCodeEdit, opens the requested file and adds it to the tab widget. :param path: Path of the file to open :return The opened editor if open succeeded.
examples/pynotepad/pynotepad/main_window.py
def open_file(self, path, line=None): """ Creates a new GenericCodeEdit, opens the requested file and adds it to the tab widget. :param path: Path of the file to open :return The opened editor if open succeeded. """ editor = None if path: interpreter, pyserver, args = self._get_backend_parameters() editor = self.tabWidget.open_document( path, None, interpreter=interpreter, server_script=pyserver, args=args) if editor: self.setup_editor(editor) self.recent_files_manager.open_file(path) self.menu_recents.update_actions() if line is not None: TextHelper(self.tabWidget.current_widget()).goto_line(line) return editor
def open_file(self, path, line=None): """ Creates a new GenericCodeEdit, opens the requested file and adds it to the tab widget. :param path: Path of the file to open :return The opened editor if open succeeded. """ editor = None if path: interpreter, pyserver, args = self._get_backend_parameters() editor = self.tabWidget.open_document( path, None, interpreter=interpreter, server_script=pyserver, args=args) if editor: self.setup_editor(editor) self.recent_files_manager.open_file(path) self.menu_recents.update_actions() if line is not None: TextHelper(self.tabWidget.current_widget()).goto_line(line) return editor
[ "Creates", "a", "new", "GenericCodeEdit", "opens", "the", "requested", "file", "and", "adds", "it", "to", "the", "tab", "widget", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L118-L139
[ "def", "open_file", "(", "self", ",", "path", ",", "line", "=", "None", ")", ":", "editor", "=", "None", "if", "path", ":", "interpreter", ",", "pyserver", ",", "args", "=", "self", ".", "_get_backend_parameters", "(", ")", "editor", "=", "self", ".", "tabWidget", ".", "open_document", "(", "path", ",", "None", ",", "interpreter", "=", "interpreter", ",", "server_script", "=", "pyserver", ",", "args", "=", "args", ")", "if", "editor", ":", "self", ".", "setup_editor", "(", "editor", ")", "self", ".", "recent_files_manager", ".", "open_file", "(", "path", ")", "self", ".", "menu_recents", ".", "update_actions", "(", ")", "if", "line", "is", "not", "None", ":", "TextHelper", "(", "self", ".", "tabWidget", ".", "current_widget", "(", ")", ")", ".", "goto_line", "(", "line", ")", "return", "editor" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow._get_backend_parameters
Gets the pyqode backend parameters (interpreter and script).
examples/pynotepad/pynotepad/main_window.py
def _get_backend_parameters(self): """ Gets the pyqode backend parameters (interpreter and script). """ frozen = hasattr(sys, 'frozen') interpreter = Settings().interpreter if frozen: interpreter = None pyserver = server.__file__ if interpreter is not None else 'server.exe' args = [] return interpreter, pyserver, args
def _get_backend_parameters(self): """ Gets the pyqode backend parameters (interpreter and script). """ frozen = hasattr(sys, 'frozen') interpreter = Settings().interpreter if frozen: interpreter = None pyserver = server.__file__ if interpreter is not None else 'server.exe' args = [] return interpreter, pyserver, args
[ "Gets", "the", "pyqode", "backend", "parameters", "(", "interpreter", "and", "script", ")", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L141-L151
[ "def", "_get_backend_parameters", "(", "self", ")", ":", "frozen", "=", "hasattr", "(", "sys", ",", "'frozen'", ")", "interpreter", "=", "Settings", "(", ")", ".", "interpreter", "if", "frozen", ":", "interpreter", "=", "None", "pyserver", "=", "server", ".", "__file__", "if", "interpreter", "is", "not", "None", "else", "'server.exe'", "args", "=", "[", "]", "return", "interpreter", ",", "pyserver", ",", "args" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.on_new
Add a new empty code editor to the tab widget
examples/pynotepad/pynotepad/main_window.py
def on_new(self): """ Add a new empty code editor to the tab widget """ interpreter, pyserver, args = self._get_backend_parameters() self.setup_editor(self.tabWidget.create_new_document( extension='.py', interpreter=interpreter, server_script=pyserver, args=args)) self.actionRun.setDisabled(True) self.actionConfigure_run.setDisabled(True)
def on_new(self): """ Add a new empty code editor to the tab widget """ interpreter, pyserver, args = self._get_backend_parameters() self.setup_editor(self.tabWidget.create_new_document( extension='.py', interpreter=interpreter, server_script=pyserver, args=args)) self.actionRun.setDisabled(True) self.actionConfigure_run.setDisabled(True)
[ "Add", "a", "new", "empty", "code", "editor", "to", "the", "tab", "widget" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L153-L162
[ "def", "on_new", "(", "self", ")", ":", "interpreter", ",", "pyserver", ",", "args", "=", "self", ".", "_get_backend_parameters", "(", ")", "self", ".", "setup_editor", "(", "self", ".", "tabWidget", ".", "create_new_document", "(", "extension", "=", "'.py'", ",", "interpreter", "=", "interpreter", ",", "server_script", "=", "pyserver", ",", "args", "=", "args", ")", ")", "self", ".", "actionRun", ".", "setDisabled", "(", "True", ")", "self", ".", "actionConfigure_run", ".", "setDisabled", "(", "True", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.on_open
Shows an open file dialog and open the file if the dialog was accepted.
examples/pynotepad/pynotepad/main_window.py
def on_open(self): """ Shows an open file dialog and open the file if the dialog was accepted. """ filename, filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open') if filename: self.open_file(filename) self.actionRun.setEnabled(True) self.actionConfigure_run.setEnabled(True)
def on_open(self): """ Shows an open file dialog and open the file if the dialog was accepted. """ filename, filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open') if filename: self.open_file(filename) self.actionRun.setEnabled(True) self.actionConfigure_run.setEnabled(True)
[ "Shows", "an", "open", "file", "dialog", "and", "open", "the", "file", "if", "the", "dialog", "was", "accepted", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L164-L174
[ "def", "on_open", "(", "self", ")", ":", "filename", ",", "filter", "=", "QtWidgets", ".", "QFileDialog", ".", "getOpenFileName", "(", "self", ",", "'Open'", ")", "if", "filename", ":", "self", ".", "open_file", "(", "filename", ")", "self", ".", "actionRun", ".", "setEnabled", "(", "True", ")", "self", ".", "actionConfigure_run", ".", "setEnabled", "(", "True", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.on_save_as
Save the current editor document as.
examples/pynotepad/pynotepad/main_window.py
def on_save_as(self): """ Save the current editor document as. """ path = self.tabWidget.current_widget().file.path path = os.path.dirname(path) if path else '' filename, filter = QtWidgets.QFileDialog.getSaveFileName( self, 'Save', path) if filename: self.tabWidget.save_current(filename) self.recent_files_manager.open_file(filename) self.menu_recents.update_actions() self.actionRun.setEnabled(True) self.actionConfigure_run.setEnabled(True) self._update_status_bar(self.tabWidget.current_widget())
def on_save_as(self): """ Save the current editor document as. """ path = self.tabWidget.current_widget().file.path path = os.path.dirname(path) if path else '' filename, filter = QtWidgets.QFileDialog.getSaveFileName( self, 'Save', path) if filename: self.tabWidget.save_current(filename) self.recent_files_manager.open_file(filename) self.menu_recents.update_actions() self.actionRun.setEnabled(True) self.actionConfigure_run.setEnabled(True) self._update_status_bar(self.tabWidget.current_widget())
[ "Save", "the", "current", "editor", "document", "as", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L181-L195
[ "def", "on_save_as", "(", "self", ")", ":", "path", "=", "self", ".", "tabWidget", ".", "current_widget", "(", ")", ".", "file", ".", "path", "path", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "if", "path", "else", "''", "filename", ",", "filter", "=", "QtWidgets", ".", "QFileDialog", ".", "getSaveFileName", "(", "self", ",", "'Save'", ",", "path", ")", "if", "filename", ":", "self", ".", "tabWidget", ".", "save_current", "(", "filename", ")", "self", ".", "recent_files_manager", ".", "open_file", "(", "filename", ")", "self", ".", "menu_recents", ".", "update_actions", "(", ")", "self", ".", "actionRun", ".", "setEnabled", "(", "True", ")", "self", ".", "actionConfigure_run", ".", "setEnabled", "(", "True", ")", "self", ".", "_update_status_bar", "(", "self", ".", "tabWidget", ".", "current_widget", "(", ")", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.setup_mnu_style
setup the style menu for an editor tab
examples/pynotepad/pynotepad/main_window.py
def setup_mnu_style(self, editor): """ setup the style menu for an editor tab """ menu = QtWidgets.QMenu('Styles', self.menuEdit) group = QtWidgets.QActionGroup(self) self.styles_group = group current_style = editor.syntax_highlighter.color_scheme.name group.triggered.connect(self.on_style_changed) for s in sorted(PYGMENTS_STYLES): a = QtWidgets.QAction(menu) a.setText(s) a.setCheckable(True) if s == current_style: a.setChecked(True) group.addAction(a) menu.addAction(a) self.menuEdit.addMenu(menu)
def setup_mnu_style(self, editor): """ setup the style menu for an editor tab """ menu = QtWidgets.QMenu('Styles', self.menuEdit) group = QtWidgets.QActionGroup(self) self.styles_group = group current_style = editor.syntax_highlighter.color_scheme.name group.triggered.connect(self.on_style_changed) for s in sorted(PYGMENTS_STYLES): a = QtWidgets.QAction(menu) a.setText(s) a.setCheckable(True) if s == current_style: a.setChecked(True) group.addAction(a) menu.addAction(a) self.menuEdit.addMenu(menu)
[ "setup", "the", "style", "menu", "for", "an", "editor", "tab" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L208-L223
[ "def", "setup_mnu_style", "(", "self", ",", "editor", ")", ":", "menu", "=", "QtWidgets", ".", "QMenu", "(", "'Styles'", ",", "self", ".", "menuEdit", ")", "group", "=", "QtWidgets", ".", "QActionGroup", "(", "self", ")", "self", ".", "styles_group", "=", "group", "current_style", "=", "editor", ".", "syntax_highlighter", ".", "color_scheme", ".", "name", "group", ".", "triggered", ".", "connect", "(", "self", ".", "on_style_changed", ")", "for", "s", "in", "sorted", "(", "PYGMENTS_STYLES", ")", ":", "a", "=", "QtWidgets", ".", "QAction", "(", "menu", ")", "a", ".", "setText", "(", "s", ")", "a", ".", "setCheckable", "(", "True", ")", "if", "s", "==", "current_style", ":", "a", ".", "setChecked", "(", "True", ")", "group", ".", "addAction", "(", "a", ")", "menu", ".", "addAction", "(", "a", ")", "self", ".", "menuEdit", ".", "addMenu", "(", "menu", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.setup_mnu_panels
Setup the panels menu for the current editor. :param editor:
examples/pynotepad/pynotepad/main_window.py
def setup_mnu_panels(self, editor): """ Setup the panels menu for the current editor. :param editor: """ for panel in editor.panels: if panel.dynamic: continue a = QtWidgets.QAction(self.menuModes) a.setText(panel.name) a.setCheckable(True) a.setChecked(panel.enabled) a.changed.connect(self.on_panel_state_changed) a.panel = weakref.proxy(panel) self.menuPanels.addAction(a)
def setup_mnu_panels(self, editor): """ Setup the panels menu for the current editor. :param editor: """ for panel in editor.panels: if panel.dynamic: continue a = QtWidgets.QAction(self.menuModes) a.setText(panel.name) a.setCheckable(True) a.setChecked(panel.enabled) a.changed.connect(self.on_panel_state_changed) a.panel = weakref.proxy(panel) self.menuPanels.addAction(a)
[ "Setup", "the", "panels", "menu", "for", "the", "current", "editor", ".", ":", "param", "editor", ":" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L235-L249
[ "def", "setup_mnu_panels", "(", "self", ",", "editor", ")", ":", "for", "panel", "in", "editor", ".", "panels", ":", "if", "panel", ".", "dynamic", ":", "continue", "a", "=", "QtWidgets", ".", "QAction", "(", "self", ".", "menuModes", ")", "a", ".", "setText", "(", "panel", ".", "name", ")", "a", ".", "setCheckable", "(", "True", ")", "a", ".", "setChecked", "(", "panel", ".", "enabled", ")", "a", ".", "changed", ".", "connect", "(", "self", ".", "on_panel_state_changed", ")", "a", ".", "panel", "=", "weakref", ".", "proxy", "(", "panel", ")", "self", ".", "menuPanels", ".", "addAction", "(", "a", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.on_current_tab_changed
Update action states when the current tab changed.
examples/pynotepad/pynotepad/main_window.py
def on_current_tab_changed(self): """ Update action states when the current tab changed. """ self.menuEdit.clear() self.menuModes.clear() self.menuPanels.clear() editor = self.tabWidget.current_widget() self.menuEdit.setEnabled(editor is not None) self.menuModes.setEnabled(editor is not None) self.menuPanels.setEnabled(editor is not None) self.actionSave.setEnabled(editor is not None) self.actionSave_as.setEnabled(editor is not None) self.actionConfigure_run.setEnabled(editor is not None) self.actionRun.setEnabled(editor is not None) if editor is not None: self.setup_mnu_edit(editor) self.setup_mnu_modes(editor) self.setup_mnu_panels(editor) self.widgetOutline.set_editor(editor) self._update_status_bar(editor)
def on_current_tab_changed(self): """ Update action states when the current tab changed. """ self.menuEdit.clear() self.menuModes.clear() self.menuPanels.clear() editor = self.tabWidget.current_widget() self.menuEdit.setEnabled(editor is not None) self.menuModes.setEnabled(editor is not None) self.menuPanels.setEnabled(editor is not None) self.actionSave.setEnabled(editor is not None) self.actionSave_as.setEnabled(editor is not None) self.actionConfigure_run.setEnabled(editor is not None) self.actionRun.setEnabled(editor is not None) if editor is not None: self.setup_mnu_edit(editor) self.setup_mnu_modes(editor) self.setup_mnu_panels(editor) self.widgetOutline.set_editor(editor) self._update_status_bar(editor)
[ "Update", "action", "states", "when", "the", "current", "tab", "changed", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L254-L274
[ "def", "on_current_tab_changed", "(", "self", ")", ":", "self", ".", "menuEdit", ".", "clear", "(", ")", "self", ".", "menuModes", ".", "clear", "(", ")", "self", ".", "menuPanels", ".", "clear", "(", ")", "editor", "=", "self", ".", "tabWidget", ".", "current_widget", "(", ")", "self", ".", "menuEdit", ".", "setEnabled", "(", "editor", "is", "not", "None", ")", "self", ".", "menuModes", ".", "setEnabled", "(", "editor", "is", "not", "None", ")", "self", ".", "menuPanels", ".", "setEnabled", "(", "editor", "is", "not", "None", ")", "self", ".", "actionSave", ".", "setEnabled", "(", "editor", "is", "not", "None", ")", "self", ".", "actionSave_as", ".", "setEnabled", "(", "editor", "is", "not", "None", ")", "self", ".", "actionConfigure_run", ".", "setEnabled", "(", "editor", "is", "not", "None", ")", "self", ".", "actionRun", ".", "setEnabled", "(", "editor", "is", "not", "None", ")", "if", "editor", "is", "not", "None", ":", "self", ".", "setup_mnu_edit", "(", "editor", ")", "self", ".", "setup_mnu_modes", "(", "editor", ")", "self", ".", "setup_mnu_panels", "(", "editor", ")", "self", ".", "widgetOutline", ".", "set_editor", "(", "editor", ")", "self", ".", "_update_status_bar", "(", "editor", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.on_run
Run the current current script
examples/pynotepad/pynotepad/main_window.py
def on_run(self): """ Run the current current script """ filename = self.tabWidget.current_widget().file.path wd = os.path.dirname(filename) args = Settings().get_run_config_for_file(filename) self.interactiveConsole.start_process( Settings().interpreter, args=[filename] + args, cwd=wd) self.dockWidget.show() self.actionRun.setEnabled(False) self.actionConfigure_run.setEnabled(False)
def on_run(self): """ Run the current current script """ filename = self.tabWidget.current_widget().file.path wd = os.path.dirname(filename) args = Settings().get_run_config_for_file(filename) self.interactiveConsole.start_process( Settings().interpreter, args=[filename] + args, cwd=wd) self.dockWidget.show() self.actionRun.setEnabled(False) self.actionConfigure_run.setEnabled(False)
[ "Run", "the", "current", "current", "script" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L326-L337
[ "def", "on_run", "(", "self", ")", ":", "filename", "=", "self", ".", "tabWidget", ".", "current_widget", "(", ")", ".", "file", ".", "path", "wd", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "args", "=", "Settings", "(", ")", ".", "get_run_config_for_file", "(", "filename", ")", "self", ".", "interactiveConsole", ".", "start_process", "(", "Settings", "(", ")", ".", "interpreter", ",", "args", "=", "[", "filename", "]", "+", "args", ",", "cwd", "=", "wd", ")", "self", ".", "dockWidget", ".", "show", "(", ")", "self", ".", "actionRun", ".", "setEnabled", "(", "False", ")", "self", ".", "actionConfigure_run", ".", "setEnabled", "(", "False", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
MainWindow.on_goto_out_of_doc
Open the a new tab when goto goes out of the current document. :param assignment: Destination
examples/pynotepad/pynotepad/main_window.py
def on_goto_out_of_doc(self, assignment): """ Open the a new tab when goto goes out of the current document. :param assignment: Destination """ editor = self.open_file(assignment.module_path) if editor: TextHelper(editor).goto_line(assignment.line, assignment.column)
def on_goto_out_of_doc(self, assignment): """ Open the a new tab when goto goes out of the current document. :param assignment: Destination """ editor = self.open_file(assignment.module_path) if editor: TextHelper(editor).goto_line(assignment.line, assignment.column)
[ "Open", "the", "a", "new", "tab", "when", "goto", "goes", "out", "of", "the", "current", "document", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/examples/pynotepad/pynotepad/main_window.py#L339-L347
[ "def", "on_goto_out_of_doc", "(", "self", ",", "assignment", ")", ":", "editor", "=", "self", ".", "open_file", "(", "assignment", ".", "module_path", ")", "if", "editor", ":", "TextHelper", "(", "editor", ")", ".", "goto_line", "(", "assignment", ".", "line", ",", "assignment", ".", "column", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
calltips
Worker that returns a list of calltips. A calltips is a tuple made of the following parts: - module_name: name of the module of the function invoked - call_name: name of the function that is being called - params: the list of parameter names. - index: index of the current parameter - bracket_start :returns tuple(module_name, call_name, params)
pyqode/python/backend/workers.py
def calltips(request_data): """ Worker that returns a list of calltips. A calltips is a tuple made of the following parts: - module_name: name of the module of the function invoked - call_name: name of the function that is being called - params: the list of parameter names. - index: index of the current parameter - bracket_start :returns tuple(module_name, call_name, params) """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = request_data['encoding'] encoding = 'utf-8' # use jedi to get call signatures script = jedi.Script(code, line, column, path, encoding) signatures = script.call_signatures() for sig in signatures: results = (str(sig.module_name), str(sig.name), [p.description for p in sig.params], sig.index, sig.bracket_start, column) # todo: add support for multiple signatures, for that we need a custom # widget for showing calltips. return results return []
def calltips(request_data): """ Worker that returns a list of calltips. A calltips is a tuple made of the following parts: - module_name: name of the module of the function invoked - call_name: name of the function that is being called - params: the list of parameter names. - index: index of the current parameter - bracket_start :returns tuple(module_name, call_name, params) """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = request_data['encoding'] encoding = 'utf-8' # use jedi to get call signatures script = jedi.Script(code, line, column, path, encoding) signatures = script.call_signatures() for sig in signatures: results = (str(sig.module_name), str(sig.name), [p.description for p in sig.params], sig.index, sig.bracket_start, column) # todo: add support for multiple signatures, for that we need a custom # widget for showing calltips. return results return []
[ "Worker", "that", "returns", "a", "list", "of", "calltips", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/backend/workers.py#L21-L50
[ "def", "calltips", "(", "request_data", ")", ":", "code", "=", "request_data", "[", "'code'", "]", "line", "=", "request_data", "[", "'line'", "]", "+", "1", "column", "=", "request_data", "[", "'column'", "]", "path", "=", "request_data", "[", "'path'", "]", "# encoding = request_data['encoding']", "encoding", "=", "'utf-8'", "# use jedi to get call signatures", "script", "=", "jedi", ".", "Script", "(", "code", ",", "line", ",", "column", ",", "path", ",", "encoding", ")", "signatures", "=", "script", ".", "call_signatures", "(", ")", "for", "sig", "in", "signatures", ":", "results", "=", "(", "str", "(", "sig", ".", "module_name", ")", ",", "str", "(", "sig", ".", "name", ")", ",", "[", "p", ".", "description", "for", "p", "in", "sig", ".", "params", "]", ",", "sig", ".", "index", ",", "sig", ".", "bracket_start", ",", "column", ")", "# todo: add support for multiple signatures, for that we need a custom", "# widget for showing calltips.", "return", "results", "return", "[", "]" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
goto_assignments
Go to assignements worker.
pyqode/python/backend/workers.py
def goto_assignments(request_data): """ Go to assignements worker. """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = request_data['encoding'] encoding = 'utf-8' script = jedi.Script(code, line, column, path, encoding) try: definitions = script.goto_assignments() except jedi.NotFoundError: pass else: ret_val = [(d.module_path, d.line - 1 if d.line else None, d.column, d.full_name) for d in definitions] return ret_val
def goto_assignments(request_data): """ Go to assignements worker. """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = request_data['encoding'] encoding = 'utf-8' script = jedi.Script(code, line, column, path, encoding) try: definitions = script.goto_assignments() except jedi.NotFoundError: pass else: ret_val = [(d.module_path, d.line - 1 if d.line else None, d.column, d.full_name) for d in definitions] return ret_val
[ "Go", "to", "assignements", "worker", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/backend/workers.py#L53-L72
[ "def", "goto_assignments", "(", "request_data", ")", ":", "code", "=", "request_data", "[", "'code'", "]", "line", "=", "request_data", "[", "'line'", "]", "+", "1", "column", "=", "request_data", "[", "'column'", "]", "path", "=", "request_data", "[", "'path'", "]", "# encoding = request_data['encoding']", "encoding", "=", "'utf-8'", "script", "=", "jedi", ".", "Script", "(", "code", ",", "line", ",", "column", ",", "path", ",", "encoding", ")", "try", ":", "definitions", "=", "script", ".", "goto_assignments", "(", ")", "except", "jedi", ".", "NotFoundError", ":", "pass", "else", ":", "ret_val", "=", "[", "(", "d", ".", "module_path", ",", "d", ".", "line", "-", "1", "if", "d", ".", "line", "else", "None", ",", "d", ".", "column", ",", "d", ".", "full_name", ")", "for", "d", "in", "definitions", "]", "return", "ret_val" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
defined_names
Returns the list of defined names for the document.
pyqode/python/backend/workers.py
def defined_names(request_data): """ Returns the list of defined names for the document. """ global _old_definitions ret_val = [] path = request_data['path'] toplvl_definitions = jedi.names( request_data['code'], path, 'utf-8') for d in toplvl_definitions: definition = _extract_def(d, path) if d.type != 'import': ret_val.append(definition) ret_val = [d.to_dict() for d in ret_val] return ret_val
def defined_names(request_data): """ Returns the list of defined names for the document. """ global _old_definitions ret_val = [] path = request_data['path'] toplvl_definitions = jedi.names( request_data['code'], path, 'utf-8') for d in toplvl_definitions: definition = _extract_def(d, path) if d.type != 'import': ret_val.append(definition) ret_val = [d.to_dict() for d in ret_val] return ret_val
[ "Returns", "the", "list", "of", "defined", "names", "for", "the", "document", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/backend/workers.py#L105-L119
[ "def", "defined_names", "(", "request_data", ")", ":", "global", "_old_definitions", "ret_val", "=", "[", "]", "path", "=", "request_data", "[", "'path'", "]", "toplvl_definitions", "=", "jedi", ".", "names", "(", "request_data", "[", "'code'", "]", ",", "path", ",", "'utf-8'", ")", "for", "d", "in", "toplvl_definitions", ":", "definition", "=", "_extract_def", "(", "d", ",", "path", ")", "if", "d", ".", "type", "!=", "'import'", ":", "ret_val", ".", "append", "(", "definition", ")", "ret_val", "=", "[", "d", ".", "to_dict", "(", ")", "for", "d", "in", "ret_val", "]", "return", "ret_val" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
quick_doc
Worker that returns the documentation of the symbol under cursor.
pyqode/python/backend/workers.py
def quick_doc(request_data): """ Worker that returns the documentation of the symbol under cursor. """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = 'utf-8' encoding = 'utf-8' script = jedi.Script(code, line, column, path, encoding) try: definitions = script.goto_definitions() except jedi.NotFoundError: return [] else: ret_val = [d.docstring() for d in definitions] return ret_val
def quick_doc(request_data): """ Worker that returns the documentation of the symbol under cursor. """ code = request_data['code'] line = request_data['line'] + 1 column = request_data['column'] path = request_data['path'] # encoding = 'utf-8' encoding = 'utf-8' script = jedi.Script(code, line, column, path, encoding) try: definitions = script.goto_definitions() except jedi.NotFoundError: return [] else: ret_val = [d.docstring() for d in definitions] return ret_val
[ "Worker", "that", "returns", "the", "documentation", "of", "the", "symbol", "under", "cursor", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/backend/workers.py#L122-L139
[ "def", "quick_doc", "(", "request_data", ")", ":", "code", "=", "request_data", "[", "'code'", "]", "line", "=", "request_data", "[", "'line'", "]", "+", "1", "column", "=", "request_data", "[", "'column'", "]", "path", "=", "request_data", "[", "'path'", "]", "# encoding = 'utf-8'", "encoding", "=", "'utf-8'", "script", "=", "jedi", ".", "Script", "(", "code", ",", "line", ",", "column", ",", "path", ",", "encoding", ")", "try", ":", "definitions", "=", "script", ".", "goto_definitions", "(", ")", "except", "jedi", ".", "NotFoundError", ":", "return", "[", "]", "else", ":", "ret_val", "=", "[", "d", ".", "docstring", "(", ")", "for", "d", "in", "definitions", "]", "return", "ret_val" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
run_pep8
Worker that run the pep8 tool on the current editor text. :returns a list of tuples (msg, msg_type, line_number)
pyqode/python/backend/workers.py
def run_pep8(request_data): """ Worker that run the pep8 tool on the current editor text. :returns a list of tuples (msg, msg_type, line_number) """ import pycodestyle from pyqode.python.backend.pep8utils import CustomChecker WARNING = 1 code = request_data['code'] path = request_data['path'] max_line_length = request_data['max_line_length'] ignore_rules = request_data['ignore_rules'] ignore_rules += ['W291', 'W292', 'W293', 'W391'] pycodestyle.MAX_LINE_LENGTH = max_line_length # setup our custom style guide with our custom checker which returns a list # of strings instread of spitting the results at stdout pep8style = pycodestyle.StyleGuide(parse_argv=False, config_file='', checker_class=CustomChecker) try: results = pep8style.input_file(path, lines=code.splitlines(True)) except Exception: _logger().exception('Failed to run PEP8 analysis with data=%r' % request_data) return [] else: messages = [] for line_number, offset, code, text, doc in results: if code in ignore_rules: continue messages.append(('[PEP8] %s: %s' % (code, text), WARNING, line_number - 1)) return messages
def run_pep8(request_data): """ Worker that run the pep8 tool on the current editor text. :returns a list of tuples (msg, msg_type, line_number) """ import pycodestyle from pyqode.python.backend.pep8utils import CustomChecker WARNING = 1 code = request_data['code'] path = request_data['path'] max_line_length = request_data['max_line_length'] ignore_rules = request_data['ignore_rules'] ignore_rules += ['W291', 'W292', 'W293', 'W391'] pycodestyle.MAX_LINE_LENGTH = max_line_length # setup our custom style guide with our custom checker which returns a list # of strings instread of spitting the results at stdout pep8style = pycodestyle.StyleGuide(parse_argv=False, config_file='', checker_class=CustomChecker) try: results = pep8style.input_file(path, lines=code.splitlines(True)) except Exception: _logger().exception('Failed to run PEP8 analysis with data=%r' % request_data) return [] else: messages = [] for line_number, offset, code, text, doc in results: if code in ignore_rules: continue messages.append(('[PEP8] %s: %s' % (code, text), WARNING, line_number - 1)) return messages
[ "Worker", "that", "run", "the", "pep8", "tool", "on", "the", "current", "editor", "text", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/backend/workers.py#L142-L174
[ "def", "run_pep8", "(", "request_data", ")", ":", "import", "pycodestyle", "from", "pyqode", ".", "python", ".", "backend", ".", "pep8utils", "import", "CustomChecker", "WARNING", "=", "1", "code", "=", "request_data", "[", "'code'", "]", "path", "=", "request_data", "[", "'path'", "]", "max_line_length", "=", "request_data", "[", "'max_line_length'", "]", "ignore_rules", "=", "request_data", "[", "'ignore_rules'", "]", "ignore_rules", "+=", "[", "'W291'", ",", "'W292'", ",", "'W293'", ",", "'W391'", "]", "pycodestyle", ".", "MAX_LINE_LENGTH", "=", "max_line_length", "# setup our custom style guide with our custom checker which returns a list", "# of strings instread of spitting the results at stdout", "pep8style", "=", "pycodestyle", ".", "StyleGuide", "(", "parse_argv", "=", "False", ",", "config_file", "=", "''", ",", "checker_class", "=", "CustomChecker", ")", "try", ":", "results", "=", "pep8style", ".", "input_file", "(", "path", ",", "lines", "=", "code", ".", "splitlines", "(", "True", ")", ")", "except", "Exception", ":", "_logger", "(", ")", ".", "exception", "(", "'Failed to run PEP8 analysis with data=%r'", "%", "request_data", ")", "return", "[", "]", "else", ":", "messages", "=", "[", "]", "for", "line_number", ",", "offset", ",", "code", ",", "text", ",", "doc", "in", "results", ":", "if", "code", "in", "ignore_rules", ":", "continue", "messages", ".", "append", "(", "(", "'[PEP8] %s: %s'", "%", "(", "code", ",", "text", ")", ",", "WARNING", ",", "line_number", "-", "1", ")", ")", "return", "messages" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
run_pyflakes
Worker that run a frosted (the fork of pyflakes) code analysis on the current editor text.
pyqode/python/backend/workers.py
def run_pyflakes(request_data): """ Worker that run a frosted (the fork of pyflakes) code analysis on the current editor text. """ global prev_results from pyflakes import checker import _ast WARNING = 1 ERROR = 2 ret_val = [] code = request_data['code'] path = request_data['path'] encoding = request_data['encoding'] if not encoding: encoding = 'utf-8' if not path: path = os.path.join(tempfile.gettempdir(), 'temp.py') if not code: return [] else: # First, compile into an AST and handle syntax errors. try: tree = compile(code.encode(encoding), path, "exec", _ast.PyCF_ONLY_AST) except SyntaxError as value: msg = '[pyFlakes] %s' % value.args[0] (lineno, offset, text) = value.lineno - 1, value.offset, value.text # If there's an encoding problem with the file, the text is None if text is None: # Avoid using msg, since for the only known case, it # contains a bogus message that claims the encoding the # file declared was unknown.s _logger().warning("[SyntaxError] %s: problem decoding source", path) else: ret_val.append((msg, ERROR, lineno)) else: # Okay, it's syntactically valid. Now check it. w = checker.Checker(tree, os.path.split(path)[1]) w.messages.sort(key=lambda m: m.lineno) for message in w.messages: msg = "[pyFlakes] %s" % str(message).split(':')[-1].strip() line = message.lineno - 1 status = WARNING \ if message.__class__ not in PYFLAKES_ERROR_MESSAGES \ else ERROR ret_val.append((msg, status, line)) prev_results = ret_val return ret_val
def run_pyflakes(request_data): """ Worker that run a frosted (the fork of pyflakes) code analysis on the current editor text. """ global prev_results from pyflakes import checker import _ast WARNING = 1 ERROR = 2 ret_val = [] code = request_data['code'] path = request_data['path'] encoding = request_data['encoding'] if not encoding: encoding = 'utf-8' if not path: path = os.path.join(tempfile.gettempdir(), 'temp.py') if not code: return [] else: # First, compile into an AST and handle syntax errors. try: tree = compile(code.encode(encoding), path, "exec", _ast.PyCF_ONLY_AST) except SyntaxError as value: msg = '[pyFlakes] %s' % value.args[0] (lineno, offset, text) = value.lineno - 1, value.offset, value.text # If there's an encoding problem with the file, the text is None if text is None: # Avoid using msg, since for the only known case, it # contains a bogus message that claims the encoding the # file declared was unknown.s _logger().warning("[SyntaxError] %s: problem decoding source", path) else: ret_val.append((msg, ERROR, lineno)) else: # Okay, it's syntactically valid. Now check it. w = checker.Checker(tree, os.path.split(path)[1]) w.messages.sort(key=lambda m: m.lineno) for message in w.messages: msg = "[pyFlakes] %s" % str(message).split(':')[-1].strip() line = message.lineno - 1 status = WARNING \ if message.__class__ not in PYFLAKES_ERROR_MESSAGES \ else ERROR ret_val.append((msg, status, line)) prev_results = ret_val return ret_val
[ "Worker", "that", "run", "a", "frosted", "(", "the", "fork", "of", "pyflakes", ")", "code", "analysis", "on", "the", "current", "editor", "text", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/backend/workers.py#L186-L235
[ "def", "run_pyflakes", "(", "request_data", ")", ":", "global", "prev_results", "from", "pyflakes", "import", "checker", "import", "_ast", "WARNING", "=", "1", "ERROR", "=", "2", "ret_val", "=", "[", "]", "code", "=", "request_data", "[", "'code'", "]", "path", "=", "request_data", "[", "'path'", "]", "encoding", "=", "request_data", "[", "'encoding'", "]", "if", "not", "encoding", ":", "encoding", "=", "'utf-8'", "if", "not", "path", ":", "path", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "'temp.py'", ")", "if", "not", "code", ":", "return", "[", "]", "else", ":", "# First, compile into an AST and handle syntax errors.", "try", ":", "tree", "=", "compile", "(", "code", ".", "encode", "(", "encoding", ")", ",", "path", ",", "\"exec\"", ",", "_ast", ".", "PyCF_ONLY_AST", ")", "except", "SyntaxError", "as", "value", ":", "msg", "=", "'[pyFlakes] %s'", "%", "value", ".", "args", "[", "0", "]", "(", "lineno", ",", "offset", ",", "text", ")", "=", "value", ".", "lineno", "-", "1", ",", "value", ".", "offset", ",", "value", ".", "text", "# If there's an encoding problem with the file, the text is None", "if", "text", "is", "None", ":", "# Avoid using msg, since for the only known case, it", "# contains a bogus message that claims the encoding the", "# file declared was unknown.s", "_logger", "(", ")", ".", "warning", "(", "\"[SyntaxError] %s: problem decoding source\"", ",", "path", ")", "else", ":", "ret_val", ".", "append", "(", "(", "msg", ",", "ERROR", ",", "lineno", ")", ")", "else", ":", "# Okay, it's syntactically valid. Now check it.", "w", "=", "checker", ".", "Checker", "(", "tree", ",", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", ")", "w", ".", "messages", ".", "sort", "(", "key", "=", "lambda", "m", ":", "m", ".", "lineno", ")", "for", "message", "in", "w", ".", "messages", ":", "msg", "=", "\"[pyFlakes] %s\"", "%", "str", "(", "message", ")", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ".", "strip", "(", ")", "line", "=", "message", ".", "lineno", "-", "1", "status", "=", "WARNING", "if", "message", ".", "__class__", "not", "in", "PYFLAKES_ERROR_MESSAGES", "else", "ERROR", "ret_val", ".", "append", "(", "(", "msg", ",", "status", ",", "line", ")", ")", "prev_results", "=", "ret_val", "return", "ret_val" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
icon_from_typename
Returns the icon resource filename that corresponds to the given typename. :param name: name of the completion. Use to make the distinction between public and private completions (using the count of starting '_') :pram typename: the typename reported by jedi :returns: The associate icon resource filename or None.
pyqode/python/backend/workers.py
def icon_from_typename(name, icon_type): """ Returns the icon resource filename that corresponds to the given typename. :param name: name of the completion. Use to make the distinction between public and private completions (using the count of starting '_') :pram typename: the typename reported by jedi :returns: The associate icon resource filename or None. """ ICONS = { 'CLASS': ICON_CLASS, 'IMPORT': ICON_NAMESPACE, 'STATEMENT': ICON_VAR, 'FORFLOW': ICON_VAR, 'FORSTMT': ICON_VAR, 'WITHSTMT': ICON_VAR, 'GLOBALSTMT': ICON_VAR, 'MODULE': ICON_NAMESPACE, 'KEYWORD': ICON_KEYWORD, 'PARAM': ICON_VAR, 'ARRAY': ICON_VAR, 'INSTANCEELEMENT': ICON_VAR, 'INSTANCE': ICON_VAR, 'PARAM-PRIV': ICON_VAR, 'PARAM-PROT': ICON_VAR, 'FUNCTION': ICON_FUNC, 'DEF': ICON_FUNC, 'FUNCTION-PRIV': ICON_FUNC_PRIVATE, 'FUNCTION-PROT': ICON_FUNC_PROTECTED } ret_val = None icon_type = icon_type.upper() # jedi 0.8 introduced NamedPart class, which have a string instead of being # one if hasattr(name, "string"): name = name.string if icon_type == "FORFLOW" or icon_type == "STATEMENT": icon_type = "PARAM" if icon_type == "PARAM" or icon_type == "FUNCTION": if name.startswith("__"): icon_type += "-PRIV" elif name.startswith("_"): icon_type += "-PROT" if icon_type in ICONS: ret_val = ICONS[icon_type] elif icon_type: _logger().warning("Unimplemented completion icon_type: %s", icon_type) return ret_val
def icon_from_typename(name, icon_type): """ Returns the icon resource filename that corresponds to the given typename. :param name: name of the completion. Use to make the distinction between public and private completions (using the count of starting '_') :pram typename: the typename reported by jedi :returns: The associate icon resource filename or None. """ ICONS = { 'CLASS': ICON_CLASS, 'IMPORT': ICON_NAMESPACE, 'STATEMENT': ICON_VAR, 'FORFLOW': ICON_VAR, 'FORSTMT': ICON_VAR, 'WITHSTMT': ICON_VAR, 'GLOBALSTMT': ICON_VAR, 'MODULE': ICON_NAMESPACE, 'KEYWORD': ICON_KEYWORD, 'PARAM': ICON_VAR, 'ARRAY': ICON_VAR, 'INSTANCEELEMENT': ICON_VAR, 'INSTANCE': ICON_VAR, 'PARAM-PRIV': ICON_VAR, 'PARAM-PROT': ICON_VAR, 'FUNCTION': ICON_FUNC, 'DEF': ICON_FUNC, 'FUNCTION-PRIV': ICON_FUNC_PRIVATE, 'FUNCTION-PROT': ICON_FUNC_PROTECTED } ret_val = None icon_type = icon_type.upper() # jedi 0.8 introduced NamedPart class, which have a string instead of being # one if hasattr(name, "string"): name = name.string if icon_type == "FORFLOW" or icon_type == "STATEMENT": icon_type = "PARAM" if icon_type == "PARAM" or icon_type == "FUNCTION": if name.startswith("__"): icon_type += "-PRIV" elif name.startswith("_"): icon_type += "-PROT" if icon_type in ICONS: ret_val = ICONS[icon_type] elif icon_type: _logger().warning("Unimplemented completion icon_type: %s", icon_type) return ret_val
[ "Returns", "the", "icon", "resource", "filename", "that", "corresponds", "to", "the", "given", "typename", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/backend/workers.py#L248-L296
[ "def", "icon_from_typename", "(", "name", ",", "icon_type", ")", ":", "ICONS", "=", "{", "'CLASS'", ":", "ICON_CLASS", ",", "'IMPORT'", ":", "ICON_NAMESPACE", ",", "'STATEMENT'", ":", "ICON_VAR", ",", "'FORFLOW'", ":", "ICON_VAR", ",", "'FORSTMT'", ":", "ICON_VAR", ",", "'WITHSTMT'", ":", "ICON_VAR", ",", "'GLOBALSTMT'", ":", "ICON_VAR", ",", "'MODULE'", ":", "ICON_NAMESPACE", ",", "'KEYWORD'", ":", "ICON_KEYWORD", ",", "'PARAM'", ":", "ICON_VAR", ",", "'ARRAY'", ":", "ICON_VAR", ",", "'INSTANCEELEMENT'", ":", "ICON_VAR", ",", "'INSTANCE'", ":", "ICON_VAR", ",", "'PARAM-PRIV'", ":", "ICON_VAR", ",", "'PARAM-PROT'", ":", "ICON_VAR", ",", "'FUNCTION'", ":", "ICON_FUNC", ",", "'DEF'", ":", "ICON_FUNC", ",", "'FUNCTION-PRIV'", ":", "ICON_FUNC_PRIVATE", ",", "'FUNCTION-PROT'", ":", "ICON_FUNC_PROTECTED", "}", "ret_val", "=", "None", "icon_type", "=", "icon_type", ".", "upper", "(", ")", "# jedi 0.8 introduced NamedPart class, which have a string instead of being", "# one", "if", "hasattr", "(", "name", ",", "\"string\"", ")", ":", "name", "=", "name", ".", "string", "if", "icon_type", "==", "\"FORFLOW\"", "or", "icon_type", "==", "\"STATEMENT\"", ":", "icon_type", "=", "\"PARAM\"", "if", "icon_type", "==", "\"PARAM\"", "or", "icon_type", "==", "\"FUNCTION\"", ":", "if", "name", ".", "startswith", "(", "\"__\"", ")", ":", "icon_type", "+=", "\"-PRIV\"", "elif", "name", ".", "startswith", "(", "\"_\"", ")", ":", "icon_type", "+=", "\"-PROT\"", "if", "icon_type", "in", "ICONS", ":", "ret_val", "=", "ICONS", "[", "icon_type", "]", "elif", "icon_type", ":", "_logger", "(", ")", ".", "warning", "(", "\"Unimplemented completion icon_type: %s\"", ",", "icon_type", ")", "return", "ret_val" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
JediCompletionProvider.complete
Completes python code using `jedi`_. :returns: a list of completion.
pyqode/python/backend/workers.py
def complete(code, line, column, path, encoding, prefix): """ Completes python code using `jedi`_. :returns: a list of completion. """ ret_val = [] try: script = jedi.Script(code, line + 1, column, path, encoding) completions = script.completions() print('completions: %r' % completions) except jedi.NotFoundError: completions = [] for completion in completions: ret_val.append({ 'name': completion.name, 'icon': icon_from_typename( completion.name, completion.type), 'tooltip': completion.description}) return ret_val
def complete(code, line, column, path, encoding, prefix): """ Completes python code using `jedi`_. :returns: a list of completion. """ ret_val = [] try: script = jedi.Script(code, line + 1, column, path, encoding) completions = script.completions() print('completions: %r' % completions) except jedi.NotFoundError: completions = [] for completion in completions: ret_val.append({ 'name': completion.name, 'icon': icon_from_typename( completion.name, completion.type), 'tooltip': completion.description}) return ret_val
[ "Completes", "python", "code", "using", "jedi", "_", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/backend/workers.py#L307-L326
[ "def", "complete", "(", "code", ",", "line", ",", "column", ",", "path", ",", "encoding", ",", "prefix", ")", ":", "ret_val", "=", "[", "]", "try", ":", "script", "=", "jedi", ".", "Script", "(", "code", ",", "line", "+", "1", ",", "column", ",", "path", ",", "encoding", ")", "completions", "=", "script", ".", "completions", "(", ")", "print", "(", "'completions: %r'", "%", "completions", ")", "except", "jedi", ".", "NotFoundError", ":", "completions", "=", "[", "]", "for", "completion", "in", "completions", ":", "ret_val", ".", "append", "(", "{", "'name'", ":", "completion", ".", "name", ",", "'icon'", ":", "icon_from_typename", "(", "completion", ".", "name", ",", "completion", ".", "type", ")", ",", "'tooltip'", ":", "completion", ".", "description", "}", ")", "return", "ret_val" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
make_python_patterns
Strongly inspired from idlelib.ColorDelegator.make_pat
pyqode/python/modes/sh.py
def make_python_patterns(additional_keywords=[], additional_builtins=[]): """Strongly inspired from idlelib.ColorDelegator.make_pat""" kw = r"\b" + any("keyword", kwlist + additional_keywords) + r"\b" kw_namespace = r"\b" + any("namespace", kw_namespace_list) + r"\b" word_operators = r"\b" + any("operator_word", wordop_list) + r"\b" builtinlist = [str(name) for name in dir(builtins) if not name.startswith('_')] + additional_builtins for v in ['None', 'True', 'False']: builtinlist.remove(v) builtin = r"([^.'\"\\#]\b|^)" + any("builtin", builtinlist) + r"\b" builtin_fct = any("builtin_fct", [r'_{2}[a-zA-Z_]*_{2}']) comment = any("comment", [r"#[^\n]*"]) instance = any("instance", [r"\bself\b", r"\bcls\b"]) decorator = any('decorator', [r'@\w*', r'.setter']) number = any("number", [r"\b[+-]?[0-9]+[lLjJ]?\b", r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b", r"\b[+-]?0[oO][0-7]+[lL]?\b", r"\b[+-]?0[bB][01]+[lL]?\b", r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?[jJ]?\b"]) sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?' uf_sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*(\\)$(?!')$" uf_dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*(\\)$(?!")$' sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?' uf_sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(\\)?(?!''')$" uf_dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(\\)?(?!""")$' string = any("string", [sq3string, dq3string, sqstring, dqstring]) ufstring1 = any("uf_sqstring", [uf_sqstring]) ufstring2 = any("uf_dqstring", [uf_dqstring]) ufstring3 = any("uf_sq3string", [uf_sq3string]) ufstring4 = any("uf_dq3string", [uf_dq3string]) return "|".join([instance, decorator, kw, kw_namespace, builtin, word_operators, builtin_fct, comment, ufstring1, ufstring2, ufstring3, ufstring4, string, number, any("SYNC", [r"\n"])])
def make_python_patterns(additional_keywords=[], additional_builtins=[]): """Strongly inspired from idlelib.ColorDelegator.make_pat""" kw = r"\b" + any("keyword", kwlist + additional_keywords) + r"\b" kw_namespace = r"\b" + any("namespace", kw_namespace_list) + r"\b" word_operators = r"\b" + any("operator_word", wordop_list) + r"\b" builtinlist = [str(name) for name in dir(builtins) if not name.startswith('_')] + additional_builtins for v in ['None', 'True', 'False']: builtinlist.remove(v) builtin = r"([^.'\"\\#]\b|^)" + any("builtin", builtinlist) + r"\b" builtin_fct = any("builtin_fct", [r'_{2}[a-zA-Z_]*_{2}']) comment = any("comment", [r"#[^\n]*"]) instance = any("instance", [r"\bself\b", r"\bcls\b"]) decorator = any('decorator', [r'@\w*', r'.setter']) number = any("number", [r"\b[+-]?[0-9]+[lLjJ]?\b", r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b", r"\b[+-]?0[oO][0-7]+[lL]?\b", r"\b[+-]?0[bB][01]+[lL]?\b", r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?[jJ]?\b"]) sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?' uf_sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*(\\)$(?!')$" uf_dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*(\\)$(?!")$' sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?' uf_sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(\\)?(?!''')$" uf_dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(\\)?(?!""")$' string = any("string", [sq3string, dq3string, sqstring, dqstring]) ufstring1 = any("uf_sqstring", [uf_sqstring]) ufstring2 = any("uf_dqstring", [uf_dqstring]) ufstring3 = any("uf_sq3string", [uf_sq3string]) ufstring4 = any("uf_dq3string", [uf_dq3string]) return "|".join([instance, decorator, kw, kw_namespace, builtin, word_operators, builtin_fct, comment, ufstring1, ufstring2, ufstring3, ufstring4, string, number, any("SYNC", [r"\n"])])
[ "Strongly", "inspired", "from", "idlelib", ".", "ColorDelegator", ".", "make_pat" ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/sh.py#L61-L97
[ "def", "make_python_patterns", "(", "additional_keywords", "=", "[", "]", ",", "additional_builtins", "=", "[", "]", ")", ":", "kw", "=", "r\"\\b\"", "+", "any", "(", "\"keyword\"", ",", "kwlist", "+", "additional_keywords", ")", "+", "r\"\\b\"", "kw_namespace", "=", "r\"\\b\"", "+", "any", "(", "\"namespace\"", ",", "kw_namespace_list", ")", "+", "r\"\\b\"", "word_operators", "=", "r\"\\b\"", "+", "any", "(", "\"operator_word\"", ",", "wordop_list", ")", "+", "r\"\\b\"", "builtinlist", "=", "[", "str", "(", "name", ")", "for", "name", "in", "dir", "(", "builtins", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", "]", "+", "additional_builtins", "for", "v", "in", "[", "'None'", ",", "'True'", ",", "'False'", "]", ":", "builtinlist", ".", "remove", "(", "v", ")", "builtin", "=", "r\"([^.'\\\"\\\\#]\\b|^)\"", "+", "any", "(", "\"builtin\"", ",", "builtinlist", ")", "+", "r\"\\b\"", "builtin_fct", "=", "any", "(", "\"builtin_fct\"", ",", "[", "r'_{2}[a-zA-Z_]*_{2}'", "]", ")", "comment", "=", "any", "(", "\"comment\"", ",", "[", "r\"#[^\\n]*\"", "]", ")", "instance", "=", "any", "(", "\"instance\"", ",", "[", "r\"\\bself\\b\"", ",", "r\"\\bcls\\b\"", "]", ")", "decorator", "=", "any", "(", "'decorator'", ",", "[", "r'@\\w*'", ",", "r'.setter'", "]", ")", "number", "=", "any", "(", "\"number\"", ",", "[", "r\"\\b[+-]?[0-9]+[lLjJ]?\\b\"", ",", "r\"\\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\\b\"", ",", "r\"\\b[+-]?0[oO][0-7]+[lL]?\\b\"", ",", "r\"\\b[+-]?0[bB][01]+[lL]?\\b\"", ",", "r\"\\b[+-]?[0-9]+(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?[jJ]?\\b\"", "]", ")", "sqstring", "=", "r\"(\\b[rRuU])?'[^'\\\\\\n]*(\\\\.[^'\\\\\\n]*)*'?\"", "dqstring", "=", "r'(\\b[rRuU])?\"[^\"\\\\\\n]*(\\\\.[^\"\\\\\\n]*)*\"?'", "uf_sqstring", "=", "r\"(\\b[rRuU])?'[^'\\\\\\n]*(\\\\.[^'\\\\\\n]*)*(\\\\)$(?!')$\"", "uf_dqstring", "=", "r'(\\b[rRuU])?\"[^\"\\\\\\n]*(\\\\.[^\"\\\\\\n]*)*(\\\\)$(?!\")$'", "sq3string", "=", "r\"(\\b[rRuU])?'''[^'\\\\]*((\\\\.|'(?!''))[^'\\\\]*)*(''')?\"", "dq3string", "=", "r'(\\b[rRuU])?\"\"\"[^\"\\\\]*((\\\\.|\"(?!\"\"))[^\"\\\\]*)*(\"\"\")?'", "uf_sq3string", "=", "r\"(\\b[rRuU])?'''[^'\\\\]*((\\\\.|'(?!''))[^'\\\\]*)*(\\\\)?(?!''')$\"", "uf_dq3string", "=", "r'(\\b[rRuU])?\"\"\"[^\"\\\\]*((\\\\.|\"(?!\"\"))[^\"\\\\]*)*(\\\\)?(?!\"\"\")$'", "string", "=", "any", "(", "\"string\"", ",", "[", "sq3string", ",", "dq3string", ",", "sqstring", ",", "dqstring", "]", ")", "ufstring1", "=", "any", "(", "\"uf_sqstring\"", ",", "[", "uf_sqstring", "]", ")", "ufstring2", "=", "any", "(", "\"uf_dqstring\"", ",", "[", "uf_dqstring", "]", ")", "ufstring3", "=", "any", "(", "\"uf_sq3string\"", ",", "[", "uf_sq3string", "]", ")", "ufstring4", "=", "any", "(", "\"uf_dq3string\"", ",", "[", "uf_dq3string", "]", ")", "return", "\"|\"", ".", "join", "(", "[", "instance", ",", "decorator", ",", "kw", ",", "kw_namespace", ",", "builtin", ",", "word_operators", ",", "builtin_fct", ",", "comment", ",", "ufstring1", ",", "ufstring2", ",", "ufstring3", ",", "ufstring4", ",", "string", ",", "number", ",", "any", "(", "\"SYNC\"", ",", "[", "r\"\\n\"", "]", ")", "]", ")" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
GoToAssignmentsMode._check_word_cursor
Request a go to assignment. :param tc: Text cursor which contains the text that we must look for its assignment. Can be None to go to the text that is under the text cursor. :type tc: QtGui.QTextCursor
pyqode/python/modes/goto_assignements.py
def _check_word_cursor(self, tc=None): """ Request a go to assignment. :param tc: Text cursor which contains the text that we must look for its assignment. Can be None to go to the text that is under the text cursor. :type tc: QtGui.QTextCursor """ if not tc: tc = TextHelper(self.editor).word_under_cursor() request_data = { 'code': self.editor.toPlainText(), 'line': tc.blockNumber(), 'column': tc.columnNumber(), 'path': self.editor.file.path, 'encoding': self.editor.file.encoding } try: self.editor.backend.send_request( workers.goto_assignments, request_data, on_receive=self._on_results_available) except NotRunning: pass
def _check_word_cursor(self, tc=None): """ Request a go to assignment. :param tc: Text cursor which contains the text that we must look for its assignment. Can be None to go to the text that is under the text cursor. :type tc: QtGui.QTextCursor """ if not tc: tc = TextHelper(self.editor).word_under_cursor() request_data = { 'code': self.editor.toPlainText(), 'line': tc.blockNumber(), 'column': tc.columnNumber(), 'path': self.editor.file.path, 'encoding': self.editor.file.encoding } try: self.editor.backend.send_request( workers.goto_assignments, request_data, on_receive=self._on_results_available) except NotRunning: pass
[ "Request", "a", "go", "to", "assignment", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/goto_assignements.py#L92-L116
[ "def", "_check_word_cursor", "(", "self", ",", "tc", "=", "None", ")", ":", "if", "not", "tc", ":", "tc", "=", "TextHelper", "(", "self", ".", "editor", ")", ".", "word_under_cursor", "(", ")", "request_data", "=", "{", "'code'", ":", "self", ".", "editor", ".", "toPlainText", "(", ")", ",", "'line'", ":", "tc", ".", "blockNumber", "(", ")", ",", "'column'", ":", "tc", ".", "columnNumber", "(", ")", ",", "'path'", ":", "self", ".", "editor", ".", "file", ".", "path", ",", "'encoding'", ":", "self", ".", "editor", ".", "file", ".", "encoding", "}", "try", ":", "self", ".", "editor", ".", "backend", ".", "send_request", "(", "workers", ".", "goto_assignments", ",", "request_data", ",", "on_receive", "=", "self", ".", "_on_results_available", ")", "except", "NotRunning", ":", "pass" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
GoToAssignmentsMode._unique
Not performant but works.
pyqode/python/modes/goto_assignements.py
def _unique(self, seq): """ Not performant but works. """ # order preserving checked = [] for e in seq: present = False for c in checked: if str(c) == str(e): present = True break if not present: checked.append(e) return checked
def _unique(self, seq): """ Not performant but works. """ # order preserving checked = [] for e in seq: present = False for c in checked: if str(c) == str(e): present = True break if not present: checked.append(e) return checked
[ "Not", "performant", "but", "works", "." ]
pyQode/pyqode.python
python
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/goto_assignements.py#L133-L147
[ "def", "_unique", "(", "self", ",", "seq", ")", ":", "# order preserving", "checked", "=", "[", "]", "for", "e", "in", "seq", ":", "present", "=", "False", "for", "c", "in", "checked", ":", "if", "str", "(", "c", ")", "==", "str", "(", "e", ")", ":", "present", "=", "True", "break", "if", "not", "present", ":", "checked", ".", "append", "(", "e", ")", "return", "checked" ]
821e000ea2e2638a82ce095a559e69afd9bd4f38
valid
read_bgen
r""" Read a given BGEN file. Parameters ---------- filepath : str A bgen file path. metafile_filepath : str, optional If ``None``, it will try to read the ``filepath + ".metadata"`` file. If this is not possible, it will create one. It tries to create one at ``filepath + ".metadata"``. If that is also no possible, it tries to create one at a temporary folder. samples_filepath : str, optional A sample file in `gen format <https://goo.gl/bCzo7m>`_. If ``samples_filepath`` is provided, sample ids are read from this file. Otherwise, it reads from the bgen file itself if possible. Defaults to ``None``. verbose : bool, optional ``True`` to show progress; ``False`` otherwise. Defaults to ``True``. Returns ------- variants : :class:`dask.dataFrame.DataFrame` Variant position, chromosomes, rsids, etc. samples : :class:`pandas.Series` Sample identifications. genotype : list List of genotypes. Examples -------- .. doctest:: >>> from bgen_reader import example_files, read_bgen >>> >>> with example_files("haplotypes.bgen") as filepath: ... bgen = read_bgen(filepath, verbose=False) ... variants = bgen["variants"] ... samples = bgen["samples"] ... ... v = variants.loc[0].compute() ... g = bgen["genotype"][0].compute() ... print(v) ... print(samples) ... print(g["probs"][0]) id rsid chrom pos nalleles allele_ids vaddr 0 SNP1 RS1 1 1 2 A,G 102 0 sample_0 1 sample_1 2 sample_2 3 sample_3 Name: id, dtype: object [1. 0. 1. 0.]
bgen_reader/_reader.py
def read_bgen(filepath, metafile_filepath=None, samples_filepath=None, verbose=True): r""" Read a given BGEN file. Parameters ---------- filepath : str A bgen file path. metafile_filepath : str, optional If ``None``, it will try to read the ``filepath + ".metadata"`` file. If this is not possible, it will create one. It tries to create one at ``filepath + ".metadata"``. If that is also no possible, it tries to create one at a temporary folder. samples_filepath : str, optional A sample file in `gen format <https://goo.gl/bCzo7m>`_. If ``samples_filepath`` is provided, sample ids are read from this file. Otherwise, it reads from the bgen file itself if possible. Defaults to ``None``. verbose : bool, optional ``True`` to show progress; ``False`` otherwise. Defaults to ``True``. Returns ------- variants : :class:`dask.dataFrame.DataFrame` Variant position, chromosomes, rsids, etc. samples : :class:`pandas.Series` Sample identifications. genotype : list List of genotypes. Examples -------- .. doctest:: >>> from bgen_reader import example_files, read_bgen >>> >>> with example_files("haplotypes.bgen") as filepath: ... bgen = read_bgen(filepath, verbose=False) ... variants = bgen["variants"] ... samples = bgen["samples"] ... ... v = variants.loc[0].compute() ... g = bgen["genotype"][0].compute() ... print(v) ... print(samples) ... print(g["probs"][0]) id rsid chrom pos nalleles allele_ids vaddr 0 SNP1 RS1 1 1 2 A,G 102 0 sample_0 1 sample_1 2 sample_2 3 sample_3 Name: id, dtype: object [1. 0. 1. 0.] """ assert_file_exist(filepath) assert_file_readable(filepath) metafile_filepath = _get_valid_metafile_filepath(filepath, metafile_filepath) if not os.path.exists(metafile_filepath): if verbose: print( f"We will create the metafile `{metafile_filepath}`. This file will " "speed up further\nreads and only need to be created once. So, please, " "bear with me." ) create_metafile(filepath, metafile_filepath, verbose) samples = get_samples(filepath, samples_filepath, verbose) variants = map_metadata(filepath, metafile_filepath) genotype = map_genotype(filepath, metafile_filepath, verbose) return dict(variants=variants, samples=samples, genotype=genotype)
def read_bgen(filepath, metafile_filepath=None, samples_filepath=None, verbose=True): r""" Read a given BGEN file. Parameters ---------- filepath : str A bgen file path. metafile_filepath : str, optional If ``None``, it will try to read the ``filepath + ".metadata"`` file. If this is not possible, it will create one. It tries to create one at ``filepath + ".metadata"``. If that is also no possible, it tries to create one at a temporary folder. samples_filepath : str, optional A sample file in `gen format <https://goo.gl/bCzo7m>`_. If ``samples_filepath`` is provided, sample ids are read from this file. Otherwise, it reads from the bgen file itself if possible. Defaults to ``None``. verbose : bool, optional ``True`` to show progress; ``False`` otherwise. Defaults to ``True``. Returns ------- variants : :class:`dask.dataFrame.DataFrame` Variant position, chromosomes, rsids, etc. samples : :class:`pandas.Series` Sample identifications. genotype : list List of genotypes. Examples -------- .. doctest:: >>> from bgen_reader import example_files, read_bgen >>> >>> with example_files("haplotypes.bgen") as filepath: ... bgen = read_bgen(filepath, verbose=False) ... variants = bgen["variants"] ... samples = bgen["samples"] ... ... v = variants.loc[0].compute() ... g = bgen["genotype"][0].compute() ... print(v) ... print(samples) ... print(g["probs"][0]) id rsid chrom pos nalleles allele_ids vaddr 0 SNP1 RS1 1 1 2 A,G 102 0 sample_0 1 sample_1 2 sample_2 3 sample_3 Name: id, dtype: object [1. 0. 1. 0.] """ assert_file_exist(filepath) assert_file_readable(filepath) metafile_filepath = _get_valid_metafile_filepath(filepath, metafile_filepath) if not os.path.exists(metafile_filepath): if verbose: print( f"We will create the metafile `{metafile_filepath}`. This file will " "speed up further\nreads and only need to be created once. So, please, " "bear with me." ) create_metafile(filepath, metafile_filepath, verbose) samples = get_samples(filepath, samples_filepath, verbose) variants = map_metadata(filepath, metafile_filepath) genotype = map_genotype(filepath, metafile_filepath, verbose) return dict(variants=variants, samples=samples, genotype=genotype)
[ "r", "Read", "a", "given", "BGEN", "file", "." ]
limix/bgen-reader-py
python
https://github.com/limix/bgen-reader-py/blob/3f66a39e15a71b981e8c5f887a4adc3ad486a45f/bgen_reader/_reader.py#L16-L87
[ "def", "read_bgen", "(", "filepath", ",", "metafile_filepath", "=", "None", ",", "samples_filepath", "=", "None", ",", "verbose", "=", "True", ")", ":", "assert_file_exist", "(", "filepath", ")", "assert_file_readable", "(", "filepath", ")", "metafile_filepath", "=", "_get_valid_metafile_filepath", "(", "filepath", ",", "metafile_filepath", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "metafile_filepath", ")", ":", "if", "verbose", ":", "print", "(", "f\"We will create the metafile `{metafile_filepath}`. This file will \"", "\"speed up further\\nreads and only need to be created once. So, please, \"", "\"bear with me.\"", ")", "create_metafile", "(", "filepath", ",", "metafile_filepath", ",", "verbose", ")", "samples", "=", "get_samples", "(", "filepath", ",", "samples_filepath", ",", "verbose", ")", "variants", "=", "map_metadata", "(", "filepath", ",", "metafile_filepath", ")", "genotype", "=", "map_genotype", "(", "filepath", ",", "metafile_filepath", ",", "verbose", ")", "return", "dict", "(", "variants", "=", "variants", ",", "samples", "=", "samples", ",", "genotype", "=", "genotype", ")" ]
3f66a39e15a71b981e8c5f887a4adc3ad486a45f
valid
CheckFileParser._validateDirectives
We should enforce for every CHECK-NOT and CHECK-NOT-L directive that the next directive (if it exists) is a CHECK or CHECK-L directive
OutputCheck/CheckFileParser.py
def _validateDirectives(self, directiveList, checkFileName): if len(directiveList) == 0: raise ParsingException("'{file}' does not contain any CHECK directives".format(file=checkFileName)) from . import Directives """ We should enforce for every CHECK-NOT and CHECK-NOT-L directive that the next directive (if it exists) is a CHECK or CHECK-L directive """ last = len(directiveList) -1 supportedDirectives = [ Directives.Check, Directives.CheckLiteral ] for (index,directive) in enumerate(directiveList): if isA(directive, [Directives.CheckNot, Directives.CheckNotLiteral]): if index < last: after = directiveList[index +1] if not isA(after, supportedDirectives): requiredTypes = " or ".join( [ "CHECK{suffix}".format(suffix=d.directiveToken()) for d in supportedDirectives]) raise ParsingException("{directive} must have a {requiredTypes} directive after it instead of a {bad}".format( directive=directive, requiredTypes=requiredTypes, check=Directives.Check.directiveToken(), bad=after) )
def _validateDirectives(self, directiveList, checkFileName): if len(directiveList) == 0: raise ParsingException("'{file}' does not contain any CHECK directives".format(file=checkFileName)) from . import Directives """ We should enforce for every CHECK-NOT and CHECK-NOT-L directive that the next directive (if it exists) is a CHECK or CHECK-L directive """ last = len(directiveList) -1 supportedDirectives = [ Directives.Check, Directives.CheckLiteral ] for (index,directive) in enumerate(directiveList): if isA(directive, [Directives.CheckNot, Directives.CheckNotLiteral]): if index < last: after = directiveList[index +1] if not isA(after, supportedDirectives): requiredTypes = " or ".join( [ "CHECK{suffix}".format(suffix=d.directiveToken()) for d in supportedDirectives]) raise ParsingException("{directive} must have a {requiredTypes} directive after it instead of a {bad}".format( directive=directive, requiredTypes=requiredTypes, check=Directives.Check.directiveToken(), bad=after) )
[ "We", "should", "enforce", "for", "every", "CHECK", "-", "NOT", "and", "CHECK", "-", "NOT", "-", "L", "directive", "that", "the", "next", "directive", "(", "if", "it", "exists", ")", "is", "a", "CHECK", "or", "CHECK", "-", "L", "directive" ]
stp/OutputCheck
python
https://github.com/stp/OutputCheck/blob/eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d/OutputCheck/CheckFileParser.py#L109-L132
[ "def", "_validateDirectives", "(", "self", ",", "directiveList", ",", "checkFileName", ")", ":", "if", "len", "(", "directiveList", ")", "==", "0", ":", "raise", "ParsingException", "(", "\"'{file}' does not contain any CHECK directives\"", ".", "format", "(", "file", "=", "checkFileName", ")", ")", "from", ".", "import", "Directives", "last", "=", "len", "(", "directiveList", ")", "-", "1", "supportedDirectives", "=", "[", "Directives", ".", "Check", ",", "Directives", ".", "CheckLiteral", "]", "for", "(", "index", ",", "directive", ")", "in", "enumerate", "(", "directiveList", ")", ":", "if", "isA", "(", "directive", ",", "[", "Directives", ".", "CheckNot", ",", "Directives", ".", "CheckNotLiteral", "]", ")", ":", "if", "index", "<", "last", ":", "after", "=", "directiveList", "[", "index", "+", "1", "]", "if", "not", "isA", "(", "after", ",", "supportedDirectives", ")", ":", "requiredTypes", "=", "\" or \"", ".", "join", "(", "[", "\"CHECK{suffix}\"", ".", "format", "(", "suffix", "=", "d", ".", "directiveToken", "(", ")", ")", "for", "d", "in", "supportedDirectives", "]", ")", "raise", "ParsingException", "(", "\"{directive} must have a {requiredTypes} directive after it instead of a {bad}\"", ".", "format", "(", "directive", "=", "directive", ",", "requiredTypes", "=", "requiredTypes", ",", "check", "=", "Directives", ".", "Check", ".", "directiveToken", "(", ")", ",", "bad", "=", "after", ")", ")" ]
eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d
valid
CheckFileParser._substituteCheckPattern
Do various ${} substitutions
OutputCheck/CheckFileParser.py
def _substituteCheckPattern(self, inputString, lineNumber, lastLineNumber, checkFileName, isForRegex): """ Do various ${} substitutions """ assert isinstance(inputString, str) assert isinstance(lineNumber, int) assert isinstance(lastLineNumber, int) assert isinstance(checkFileName, str) """ Do ${LINE}, ${LINE:+N}, and ${LINE:-N} substitutions. To escape prepend with slash """ sPattern = r'\$\{LINE(\:(?P<sign>\+|-)(?P<offset>\d+))?\}' matcher = re.compile(sPattern) result = "" loop = True start = 0 end = len(inputString) # Not inclusive while loop: m = matcher.search(inputString, start, end) if not m: # No match so copy verbatim _logger.debug('Result is currently "{}"'.format(result)) result += inputString[start:end] break # And we're done :) else: prevIndex = max(0, m.start() -1) _logger.debug('Previous character before match is at index {index} "{char}"'.format(index=prevIndex, char=inputString[prevIndex])) if inputString[prevIndex] == "\\": # User asked to escape _logger.debug('Substitution is escaped') _logger.debug('Result is currently "{}"'.format(result)) result += inputString[start:prevIndex] # Copy before escaping character _logger.debug('Result is currently "{}"'.format(result)) result += inputString[(prevIndex+1):m.end()] # Copy the ${LINE..} verbatim start = min(m.end(), end) _logger.debug('Result is currently "{}"'.format(result)) _logger.debug('Next search is {start}:{end} = "{ss}"'.format(start=start, end=end, ss=inputString[start:end])) else: _logger.debug('Result is currently "{}"'.format(result)) _logger.debug('Doing subsitution. Found at {begin}:{end} = {ss}'.format(begin=m.start(),end=m.end(), ss=inputString[m.start():m.end()])) result += inputString[start:m.start()] # Copy before substitution starts if m.groupdict()['sign'] == None: # No offset just substitute line number _logger.debug('No offset') result += str(lineNumber) else: offset = 1 if m.groupdict()['sign'] == '+' else -1 offset *= int(m.groupdict()['offset']) _logger.debug('Offset is {}'.format(offset)) requestedLineNumber = lineNumber + offset _logger.debug('Request line number to print is {}'.format(requestedLineNumber)) if requestedLineNumber <= 0: raise ParsingException('{file}:{line}:{col} offset gives line number < 1'.format(file=checkFileName, line=lineNumber, col=m.start())) elif requestedLineNumber > lastLineNumber: raise ParsingException('{file}:{line}:{col} offset gives line number past the end of file'.format(file=checkFileName, line=lineNumber, col=m.start())) result += str(requestedLineNumber) start = min(m.end(),end) _logger.debug('Next search is {start}:{end} = "{ss}"'.format(start=start, end=end, ss=inputString[start:end])) """ Do simple ${...} substitutions """ # Do ${CHECKFILE_NAME} substitution basenameCheckFileName = os.path.basename(checkFileName) assert basenameCheckFileName.count('\\') == 0 result = self._simpleSubstitution("CHECKFILE_NAME", basenameCheckFileName, result) # Do ${CHECKFILE_ABS_PATH} substitution abspathCheckFileName = os.path.abspath(checkFileName) if isForRegex: # Note slash substitution is for Windows paths (e.g. "c:\mything\foo.txt") which can break regexes if we don't # correctly escape them. abspathCheckFileName = abspathCheckFileName.replace('\\', '\\\\') result = self._simpleSubstitution("CHECKFILE_ABS_PATH", abspathCheckFileName, result) assert len(result) != 0 return result
def _substituteCheckPattern(self, inputString, lineNumber, lastLineNumber, checkFileName, isForRegex): """ Do various ${} substitutions """ assert isinstance(inputString, str) assert isinstance(lineNumber, int) assert isinstance(lastLineNumber, int) assert isinstance(checkFileName, str) """ Do ${LINE}, ${LINE:+N}, and ${LINE:-N} substitutions. To escape prepend with slash """ sPattern = r'\$\{LINE(\:(?P<sign>\+|-)(?P<offset>\d+))?\}' matcher = re.compile(sPattern) result = "" loop = True start = 0 end = len(inputString) # Not inclusive while loop: m = matcher.search(inputString, start, end) if not m: # No match so copy verbatim _logger.debug('Result is currently "{}"'.format(result)) result += inputString[start:end] break # And we're done :) else: prevIndex = max(0, m.start() -1) _logger.debug('Previous character before match is at index {index} "{char}"'.format(index=prevIndex, char=inputString[prevIndex])) if inputString[prevIndex] == "\\": # User asked to escape _logger.debug('Substitution is escaped') _logger.debug('Result is currently "{}"'.format(result)) result += inputString[start:prevIndex] # Copy before escaping character _logger.debug('Result is currently "{}"'.format(result)) result += inputString[(prevIndex+1):m.end()] # Copy the ${LINE..} verbatim start = min(m.end(), end) _logger.debug('Result is currently "{}"'.format(result)) _logger.debug('Next search is {start}:{end} = "{ss}"'.format(start=start, end=end, ss=inputString[start:end])) else: _logger.debug('Result is currently "{}"'.format(result)) _logger.debug('Doing subsitution. Found at {begin}:{end} = {ss}'.format(begin=m.start(),end=m.end(), ss=inputString[m.start():m.end()])) result += inputString[start:m.start()] # Copy before substitution starts if m.groupdict()['sign'] == None: # No offset just substitute line number _logger.debug('No offset') result += str(lineNumber) else: offset = 1 if m.groupdict()['sign'] == '+' else -1 offset *= int(m.groupdict()['offset']) _logger.debug('Offset is {}'.format(offset)) requestedLineNumber = lineNumber + offset _logger.debug('Request line number to print is {}'.format(requestedLineNumber)) if requestedLineNumber <= 0: raise ParsingException('{file}:{line}:{col} offset gives line number < 1'.format(file=checkFileName, line=lineNumber, col=m.start())) elif requestedLineNumber > lastLineNumber: raise ParsingException('{file}:{line}:{col} offset gives line number past the end of file'.format(file=checkFileName, line=lineNumber, col=m.start())) result += str(requestedLineNumber) start = min(m.end(),end) _logger.debug('Next search is {start}:{end} = "{ss}"'.format(start=start, end=end, ss=inputString[start:end])) """ Do simple ${...} substitutions """ # Do ${CHECKFILE_NAME} substitution basenameCheckFileName = os.path.basename(checkFileName) assert basenameCheckFileName.count('\\') == 0 result = self._simpleSubstitution("CHECKFILE_NAME", basenameCheckFileName, result) # Do ${CHECKFILE_ABS_PATH} substitution abspathCheckFileName = os.path.abspath(checkFileName) if isForRegex: # Note slash substitution is for Windows paths (e.g. "c:\mything\foo.txt") which can break regexes if we don't # correctly escape them. abspathCheckFileName = abspathCheckFileName.replace('\\', '\\\\') result = self._simpleSubstitution("CHECKFILE_ABS_PATH", abspathCheckFileName, result) assert len(result) != 0 return result
[ "Do", "various", "$", "{}", "substitutions" ]
stp/OutputCheck
python
https://github.com/stp/OutputCheck/blob/eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d/OutputCheck/CheckFileParser.py#L134-L220
[ "def", "_substituteCheckPattern", "(", "self", ",", "inputString", ",", "lineNumber", ",", "lastLineNumber", ",", "checkFileName", ",", "isForRegex", ")", ":", "assert", "isinstance", "(", "inputString", ",", "str", ")", "assert", "isinstance", "(", "lineNumber", ",", "int", ")", "assert", "isinstance", "(", "lastLineNumber", ",", "int", ")", "assert", "isinstance", "(", "checkFileName", ",", "str", ")", "\"\"\"\n Do ${LINE}, ${LINE:+N}, and ${LINE:-N} substitutions.\n To escape prepend with slash\n \"\"\"", "sPattern", "=", "r'\\$\\{LINE(\\:(?P<sign>\\+|-)(?P<offset>\\d+))?\\}'", "matcher", "=", "re", ".", "compile", "(", "sPattern", ")", "result", "=", "\"\"", "loop", "=", "True", "start", "=", "0", "end", "=", "len", "(", "inputString", ")", "# Not inclusive", "while", "loop", ":", "m", "=", "matcher", ".", "search", "(", "inputString", ",", "start", ",", "end", ")", "if", "not", "m", ":", "# No match so copy verbatim", "_logger", ".", "debug", "(", "'Result is currently \"{}\"'", ".", "format", "(", "result", ")", ")", "result", "+=", "inputString", "[", "start", ":", "end", "]", "break", "# And we're done :)", "else", ":", "prevIndex", "=", "max", "(", "0", ",", "m", ".", "start", "(", ")", "-", "1", ")", "_logger", ".", "debug", "(", "'Previous character before match is at index {index} \"{char}\"'", ".", "format", "(", "index", "=", "prevIndex", ",", "char", "=", "inputString", "[", "prevIndex", "]", ")", ")", "if", "inputString", "[", "prevIndex", "]", "==", "\"\\\\\"", ":", "# User asked to escape", "_logger", ".", "debug", "(", "'Substitution is escaped'", ")", "_logger", ".", "debug", "(", "'Result is currently \"{}\"'", ".", "format", "(", "result", ")", ")", "result", "+=", "inputString", "[", "start", ":", "prevIndex", "]", "# Copy before escaping character", "_logger", ".", "debug", "(", "'Result is currently \"{}\"'", ".", "format", "(", "result", ")", ")", "result", "+=", "inputString", "[", "(", "prevIndex", "+", "1", ")", ":", "m", ".", "end", "(", ")", "]", "# Copy the ${LINE..} verbatim", "start", "=", "min", "(", "m", ".", "end", "(", ")", ",", "end", ")", "_logger", ".", "debug", "(", "'Result is currently \"{}\"'", ".", "format", "(", "result", ")", ")", "_logger", ".", "debug", "(", "'Next search is {start}:{end} = \"{ss}\"'", ".", "format", "(", "start", "=", "start", ",", "end", "=", "end", ",", "ss", "=", "inputString", "[", "start", ":", "end", "]", ")", ")", "else", ":", "_logger", ".", "debug", "(", "'Result is currently \"{}\"'", ".", "format", "(", "result", ")", ")", "_logger", ".", "debug", "(", "'Doing subsitution. Found at {begin}:{end} = {ss}'", ".", "format", "(", "begin", "=", "m", ".", "start", "(", ")", ",", "end", "=", "m", ".", "end", "(", ")", ",", "ss", "=", "inputString", "[", "m", ".", "start", "(", ")", ":", "m", ".", "end", "(", ")", "]", ")", ")", "result", "+=", "inputString", "[", "start", ":", "m", ".", "start", "(", ")", "]", "# Copy before substitution starts", "if", "m", ".", "groupdict", "(", ")", "[", "'sign'", "]", "==", "None", ":", "# No offset just substitute line number", "_logger", ".", "debug", "(", "'No offset'", ")", "result", "+=", "str", "(", "lineNumber", ")", "else", ":", "offset", "=", "1", "if", "m", ".", "groupdict", "(", ")", "[", "'sign'", "]", "==", "'+'", "else", "-", "1", "offset", "*=", "int", "(", "m", ".", "groupdict", "(", ")", "[", "'offset'", "]", ")", "_logger", ".", "debug", "(", "'Offset is {}'", ".", "format", "(", "offset", ")", ")", "requestedLineNumber", "=", "lineNumber", "+", "offset", "_logger", ".", "debug", "(", "'Request line number to print is {}'", ".", "format", "(", "requestedLineNumber", ")", ")", "if", "requestedLineNumber", "<=", "0", ":", "raise", "ParsingException", "(", "'{file}:{line}:{col} offset gives line number < 1'", ".", "format", "(", "file", "=", "checkFileName", ",", "line", "=", "lineNumber", ",", "col", "=", "m", ".", "start", "(", ")", ")", ")", "elif", "requestedLineNumber", ">", "lastLineNumber", ":", "raise", "ParsingException", "(", "'{file}:{line}:{col} offset gives line number past the end of file'", ".", "format", "(", "file", "=", "checkFileName", ",", "line", "=", "lineNumber", ",", "col", "=", "m", ".", "start", "(", ")", ")", ")", "result", "+=", "str", "(", "requestedLineNumber", ")", "start", "=", "min", "(", "m", ".", "end", "(", ")", ",", "end", ")", "_logger", ".", "debug", "(", "'Next search is {start}:{end} = \"{ss}\"'", ".", "format", "(", "start", "=", "start", ",", "end", "=", "end", ",", "ss", "=", "inputString", "[", "start", ":", "end", "]", ")", ")", "\"\"\"\n Do simple ${...} substitutions\n \"\"\"", "# Do ${CHECKFILE_NAME} substitution", "basenameCheckFileName", "=", "os", ".", "path", ".", "basename", "(", "checkFileName", ")", "assert", "basenameCheckFileName", ".", "count", "(", "'\\\\'", ")", "==", "0", "result", "=", "self", ".", "_simpleSubstitution", "(", "\"CHECKFILE_NAME\"", ",", "basenameCheckFileName", ",", "result", ")", "# Do ${CHECKFILE_ABS_PATH} substitution", "abspathCheckFileName", "=", "os", ".", "path", ".", "abspath", "(", "checkFileName", ")", "if", "isForRegex", ":", "# Note slash substitution is for Windows paths (e.g. \"c:\\mything\\foo.txt\") which can break regexes if we don't", "# correctly escape them.", "abspathCheckFileName", "=", "abspathCheckFileName", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "result", "=", "self", ".", "_simpleSubstitution", "(", "\"CHECKFILE_ABS_PATH\"", ",", "abspathCheckFileName", ",", "result", ")", "assert", "len", "(", "result", ")", "!=", "0", "return", "result" ]
eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d
valid
create_metafile
r"""Create variants metadata file. Variants metadata file helps speed up subsequent reads of the associated bgen file. Parameters ---------- bgen_filepath : str Bgen file path. metafile_file : str Metafile file path. verbose : bool ``True`` to show progress; ``False`` otherwise. Examples -------- .. doctest:: >>> import os >>> from bgen_reader import create_metafile, example_files >>> >>> with example_files("example.32bits.bgen") as filepath: ... folder = os.path.dirname(filepath) ... metafile_filepath = os.path.join(folder, filepath + ".metadata") ... ... try: ... create_metafile(filepath, metafile_filepath, verbose=False) ... finally: ... if os.path.exists(metafile_filepath): ... os.remove(metafile_filepath)
bgen_reader/_metadata.py
def create_metafile(bgen_filepath, metafile_filepath, verbose=True): r"""Create variants metadata file. Variants metadata file helps speed up subsequent reads of the associated bgen file. Parameters ---------- bgen_filepath : str Bgen file path. metafile_file : str Metafile file path. verbose : bool ``True`` to show progress; ``False`` otherwise. Examples -------- .. doctest:: >>> import os >>> from bgen_reader import create_metafile, example_files >>> >>> with example_files("example.32bits.bgen") as filepath: ... folder = os.path.dirname(filepath) ... metafile_filepath = os.path.join(folder, filepath + ".metadata") ... ... try: ... create_metafile(filepath, metafile_filepath, verbose=False) ... finally: ... if os.path.exists(metafile_filepath): ... os.remove(metafile_filepath) """ if verbose: verbose = 1 else: verbose = 0 bgen_filepath = make_sure_bytes(bgen_filepath) metafile_filepath = make_sure_bytes(metafile_filepath) assert_file_exist(bgen_filepath) assert_file_readable(bgen_filepath) if exists(metafile_filepath): raise ValueError(f"The file {metafile_filepath} already exists.") with bgen_file(bgen_filepath) as bgen: nparts = _estimate_best_npartitions(lib.bgen_nvariants(bgen)) metafile = lib.bgen_create_metafile(bgen, metafile_filepath, nparts, verbose) if metafile == ffi.NULL: raise RuntimeError(f"Error while creating metafile: {metafile_filepath}.") if lib.bgen_close_metafile(metafile) != 0: raise RuntimeError(f"Error while closing metafile: {metafile_filepath}.")
def create_metafile(bgen_filepath, metafile_filepath, verbose=True): r"""Create variants metadata file. Variants metadata file helps speed up subsequent reads of the associated bgen file. Parameters ---------- bgen_filepath : str Bgen file path. metafile_file : str Metafile file path. verbose : bool ``True`` to show progress; ``False`` otherwise. Examples -------- .. doctest:: >>> import os >>> from bgen_reader import create_metafile, example_files >>> >>> with example_files("example.32bits.bgen") as filepath: ... folder = os.path.dirname(filepath) ... metafile_filepath = os.path.join(folder, filepath + ".metadata") ... ... try: ... create_metafile(filepath, metafile_filepath, verbose=False) ... finally: ... if os.path.exists(metafile_filepath): ... os.remove(metafile_filepath) """ if verbose: verbose = 1 else: verbose = 0 bgen_filepath = make_sure_bytes(bgen_filepath) metafile_filepath = make_sure_bytes(metafile_filepath) assert_file_exist(bgen_filepath) assert_file_readable(bgen_filepath) if exists(metafile_filepath): raise ValueError(f"The file {metafile_filepath} already exists.") with bgen_file(bgen_filepath) as bgen: nparts = _estimate_best_npartitions(lib.bgen_nvariants(bgen)) metafile = lib.bgen_create_metafile(bgen, metafile_filepath, nparts, verbose) if metafile == ffi.NULL: raise RuntimeError(f"Error while creating metafile: {metafile_filepath}.") if lib.bgen_close_metafile(metafile) != 0: raise RuntimeError(f"Error while closing metafile: {metafile_filepath}.")
[ "r", "Create", "variants", "metadata", "file", "." ]
limix/bgen-reader-py
python
https://github.com/limix/bgen-reader-py/blob/3f66a39e15a71b981e8c5f887a4adc3ad486a45f/bgen_reader/_metadata.py#L10-L63
[ "def", "create_metafile", "(", "bgen_filepath", ",", "metafile_filepath", ",", "verbose", "=", "True", ")", ":", "if", "verbose", ":", "verbose", "=", "1", "else", ":", "verbose", "=", "0", "bgen_filepath", "=", "make_sure_bytes", "(", "bgen_filepath", ")", "metafile_filepath", "=", "make_sure_bytes", "(", "metafile_filepath", ")", "assert_file_exist", "(", "bgen_filepath", ")", "assert_file_readable", "(", "bgen_filepath", ")", "if", "exists", "(", "metafile_filepath", ")", ":", "raise", "ValueError", "(", "f\"The file {metafile_filepath} already exists.\"", ")", "with", "bgen_file", "(", "bgen_filepath", ")", "as", "bgen", ":", "nparts", "=", "_estimate_best_npartitions", "(", "lib", ".", "bgen_nvariants", "(", "bgen", ")", ")", "metafile", "=", "lib", ".", "bgen_create_metafile", "(", "bgen", ",", "metafile_filepath", ",", "nparts", ",", "verbose", ")", "if", "metafile", "==", "ffi", ".", "NULL", ":", "raise", "RuntimeError", "(", "f\"Error while creating metafile: {metafile_filepath}.\"", ")", "if", "lib", ".", "bgen_close_metafile", "(", "metafile", ")", "!=", "0", ":", "raise", "RuntimeError", "(", "f\"Error while closing metafile: {metafile_filepath}.\"", ")" ]
3f66a39e15a71b981e8c5f887a4adc3ad486a45f
valid
CheckLiteral.match
Search through lines for match. Raise an Exception if fail to match If match is succesful return the position the match was found
OutputCheck/Directives.py
def match(self, subsetLines, offsetOfSubset, fileName): """ Search through lines for match. Raise an Exception if fail to match If match is succesful return the position the match was found """ for (offset,l) in enumerate(subsetLines): column = l.find(self.literal) if column != -1: truePosition = offset + offsetOfSubset _logger.debug('Found match on line {}, col {}'.format(str(truePosition+ 1), column)) _logger.debug('Line is {}'.format(l)) self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +1) return truePosition # No Match found self.failed = True raise DirectiveException(self)
def match(self, subsetLines, offsetOfSubset, fileName): """ Search through lines for match. Raise an Exception if fail to match If match is succesful return the position the match was found """ for (offset,l) in enumerate(subsetLines): column = l.find(self.literal) if column != -1: truePosition = offset + offsetOfSubset _logger.debug('Found match on line {}, col {}'.format(str(truePosition+ 1), column)) _logger.debug('Line is {}'.format(l)) self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +1) return truePosition # No Match found self.failed = True raise DirectiveException(self)
[ "Search", "through", "lines", "for", "match", ".", "Raise", "an", "Exception", "if", "fail", "to", "match", "If", "match", "is", "succesful", "return", "the", "position", "the", "match", "was", "found" ]
stp/OutputCheck
python
https://github.com/stp/OutputCheck/blob/eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d/OutputCheck/Directives.py#L108-L126
[ "def", "match", "(", "self", ",", "subsetLines", ",", "offsetOfSubset", ",", "fileName", ")", ":", "for", "(", "offset", ",", "l", ")", "in", "enumerate", "(", "subsetLines", ")", ":", "column", "=", "l", ".", "find", "(", "self", ".", "literal", ")", "if", "column", "!=", "-", "1", ":", "truePosition", "=", "offset", "+", "offsetOfSubset", "_logger", ".", "debug", "(", "'Found match on line {}, col {}'", ".", "format", "(", "str", "(", "truePosition", "+", "1", ")", ",", "column", ")", ")", "_logger", ".", "debug", "(", "'Line is {}'", ".", "format", "(", "l", ")", ")", "self", ".", "matchLocation", "=", "CheckFileParser", ".", "FileLocation", "(", "fileName", ",", "truePosition", "+", "1", ")", "return", "truePosition", "# No Match found", "self", ".", "failed", "=", "True", "raise", "DirectiveException", "(", "self", ")" ]
eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d
valid
CheckNot.match
Search through lines for match. Raise an Exception if a match
OutputCheck/Directives.py
def match(self, subsetLines, offsetOfSubset, fileName): """ Search through lines for match. Raise an Exception if a match """ for (offset,l) in enumerate(subsetLines): for t in self.regex: m = t.Regex.search(l) if m != None: truePosition = offset + offsetOfSubset _logger.debug('Found match on line {}'.format(str(truePosition+ 1))) _logger.debug('Line is {}'.format(l)) self.failed = True self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +1) raise DirectiveException(self)
def match(self, subsetLines, offsetOfSubset, fileName): """ Search through lines for match. Raise an Exception if a match """ for (offset,l) in enumerate(subsetLines): for t in self.regex: m = t.Regex.search(l) if m != None: truePosition = offset + offsetOfSubset _logger.debug('Found match on line {}'.format(str(truePosition+ 1))) _logger.debug('Line is {}'.format(l)) self.failed = True self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +1) raise DirectiveException(self)
[ "Search", "through", "lines", "for", "match", ".", "Raise", "an", "Exception", "if", "a", "match" ]
stp/OutputCheck
python
https://github.com/stp/OutputCheck/blob/eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d/OutputCheck/Directives.py#L195-L209
[ "def", "match", "(", "self", ",", "subsetLines", ",", "offsetOfSubset", ",", "fileName", ")", ":", "for", "(", "offset", ",", "l", ")", "in", "enumerate", "(", "subsetLines", ")", ":", "for", "t", "in", "self", ".", "regex", ":", "m", "=", "t", ".", "Regex", ".", "search", "(", "l", ")", "if", "m", "!=", "None", ":", "truePosition", "=", "offset", "+", "offsetOfSubset", "_logger", ".", "debug", "(", "'Found match on line {}'", ".", "format", "(", "str", "(", "truePosition", "+", "1", ")", ")", ")", "_logger", ".", "debug", "(", "'Line is {}'", ".", "format", "(", "l", ")", ")", "self", ".", "failed", "=", "True", "self", ".", "matchLocation", "=", "CheckFileParser", ".", "FileLocation", "(", "fileName", ",", "truePosition", "+", "1", ")", "raise", "DirectiveException", "(", "self", ")" ]
eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d
valid
isA
Return true if ``instance`` is an instance of any the Directive types in ``typeList``
OutputCheck/Utils.py
def isA(instance, typeList): """ Return true if ``instance`` is an instance of any the Directive types in ``typeList`` """ return any(map(lambda iType: isinstance(instance,iType), typeList))
def isA(instance, typeList): """ Return true if ``instance`` is an instance of any the Directive types in ``typeList`` """ return any(map(lambda iType: isinstance(instance,iType), typeList))
[ "Return", "true", "if", "instance", "is", "an", "instance", "of", "any", "the", "Directive", "types", "in", "typeList" ]
stp/OutputCheck
python
https://github.com/stp/OutputCheck/blob/eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d/OutputCheck/Utils.py#L1-L6
[ "def", "isA", "(", "instance", ",", "typeList", ")", ":", "return", "any", "(", "map", "(", "lambda", "iType", ":", "isinstance", "(", "instance", ",", "iType", ")", ",", "typeList", ")", ")" ]
eab62a5dd5129f6a4ebfbe4bbe41d35611f7c48d
valid
_touch
Touch a file. Credits to <https://stackoverflow.com/a/1160227>.
bgen_reader/_file.py
def _touch(fname, mode=0o666, dir_fd=None, **kwargs): """ Touch a file. Credits to <https://stackoverflow.com/a/1160227>. """ flags = os.O_CREAT | os.O_APPEND with os.fdopen(os.open(fname, flags=flags, mode=mode, dir_fd=dir_fd)) as f: os.utime( f.fileno() if os.utime in os.supports_fd else fname, dir_fd=None if os.supports_fd else dir_fd, **kwargs, )
def _touch(fname, mode=0o666, dir_fd=None, **kwargs): """ Touch a file. Credits to <https://stackoverflow.com/a/1160227>. """ flags = os.O_CREAT | os.O_APPEND with os.fdopen(os.open(fname, flags=flags, mode=mode, dir_fd=dir_fd)) as f: os.utime( f.fileno() if os.utime in os.supports_fd else fname, dir_fd=None if os.supports_fd else dir_fd, **kwargs, )
[ "Touch", "a", "file", "." ]
limix/bgen-reader-py
python
https://github.com/limix/bgen-reader-py/blob/3f66a39e15a71b981e8c5f887a4adc3ad486a45f/bgen_reader/_file.py#L36-L47
[ "def", "_touch", "(", "fname", ",", "mode", "=", "0o666", ",", "dir_fd", "=", "None", ",", "*", "*", "kwargs", ")", ":", "flags", "=", "os", ".", "O_CREAT", "|", "os", ".", "O_APPEND", "with", "os", ".", "fdopen", "(", "os", ".", "open", "(", "fname", ",", "flags", "=", "flags", ",", "mode", "=", "mode", ",", "dir_fd", "=", "dir_fd", ")", ")", "as", "f", ":", "os", ".", "utime", "(", "f", ".", "fileno", "(", ")", "if", "os", ".", "utime", "in", "os", ".", "supports_fd", "else", "fname", ",", "dir_fd", "=", "None", "if", "os", ".", "supports_fd", "else", "dir_fd", ",", "*", "*", "kwargs", ",", ")" ]
3f66a39e15a71b981e8c5f887a4adc3ad486a45f
valid
allele_frequency
r""" Compute allele frequency from its expectation. Parameters ---------- expec : array_like Allele expectations encoded as a samples-by-alleles matrix. Returns ------- :class:`numpy.ndarray` Allele frequencies encoded as a variants-by-alleles matrix. Examples -------- .. doctest:: >>> from bgen_reader import read_bgen, example_files >>> from bgen_reader import allele_expectation, allele_frequency >>> >>> # Download an example >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> bgen = read_bgen(filepath, verbose=False) >>> >>> variants = bgen["variants"] >>> samples = bgen["samples"] >>> genotype = bgen["genotype"] >>> >>> variant = variants[variants["rsid"] == "RSID_6"].compute() >>> variant_idx = variant.index.item() >>> >>> p = genotype[variant_idx].compute()["probs"] >>> # For unphased genotypes only. >>> e = allele_expectation(bgen, variant_idx) >>> f = allele_frequency(e) >>> >>> alleles = variant["allele_ids"].item().split(",") >>> print(alleles[0] + ": {}".format(f[0])) A: 229.23103218810434 >>> print(alleles[1] + ": {}".format(f[1])) G: 270.7689678118956 >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 4 SNPID_6 RSID_6 01 6000 2 A,G 19377 >>> >>> # Clean-up the example >>> example.close()
bgen_reader/_dosage.py
def allele_frequency(expec): r""" Compute allele frequency from its expectation. Parameters ---------- expec : array_like Allele expectations encoded as a samples-by-alleles matrix. Returns ------- :class:`numpy.ndarray` Allele frequencies encoded as a variants-by-alleles matrix. Examples -------- .. doctest:: >>> from bgen_reader import read_bgen, example_files >>> from bgen_reader import allele_expectation, allele_frequency >>> >>> # Download an example >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> bgen = read_bgen(filepath, verbose=False) >>> >>> variants = bgen["variants"] >>> samples = bgen["samples"] >>> genotype = bgen["genotype"] >>> >>> variant = variants[variants["rsid"] == "RSID_6"].compute() >>> variant_idx = variant.index.item() >>> >>> p = genotype[variant_idx].compute()["probs"] >>> # For unphased genotypes only. >>> e = allele_expectation(bgen, variant_idx) >>> f = allele_frequency(e) >>> >>> alleles = variant["allele_ids"].item().split(",") >>> print(alleles[0] + ": {}".format(f[0])) A: 229.23103218810434 >>> print(alleles[1] + ": {}".format(f[1])) G: 270.7689678118956 >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 4 SNPID_6 RSID_6 01 6000 2 A,G 19377 >>> >>> # Clean-up the example >>> example.close() """ expec = asarray(expec, float) if expec.ndim != 2: raise ValueError("Expectation matrix must be bi-dimensional.") ploidy = expec.shape[-1] return expec.sum(-2) / ploidy
def allele_frequency(expec): r""" Compute allele frequency from its expectation. Parameters ---------- expec : array_like Allele expectations encoded as a samples-by-alleles matrix. Returns ------- :class:`numpy.ndarray` Allele frequencies encoded as a variants-by-alleles matrix. Examples -------- .. doctest:: >>> from bgen_reader import read_bgen, example_files >>> from bgen_reader import allele_expectation, allele_frequency >>> >>> # Download an example >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> bgen = read_bgen(filepath, verbose=False) >>> >>> variants = bgen["variants"] >>> samples = bgen["samples"] >>> genotype = bgen["genotype"] >>> >>> variant = variants[variants["rsid"] == "RSID_6"].compute() >>> variant_idx = variant.index.item() >>> >>> p = genotype[variant_idx].compute()["probs"] >>> # For unphased genotypes only. >>> e = allele_expectation(bgen, variant_idx) >>> f = allele_frequency(e) >>> >>> alleles = variant["allele_ids"].item().split(",") >>> print(alleles[0] + ": {}".format(f[0])) A: 229.23103218810434 >>> print(alleles[1] + ": {}".format(f[1])) G: 270.7689678118956 >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 4 SNPID_6 RSID_6 01 6000 2 A,G 19377 >>> >>> # Clean-up the example >>> example.close() """ expec = asarray(expec, float) if expec.ndim != 2: raise ValueError("Expectation matrix must be bi-dimensional.") ploidy = expec.shape[-1] return expec.sum(-2) / ploidy
[ "r", "Compute", "allele", "frequency", "from", "its", "expectation", "." ]
limix/bgen-reader-py
python
https://github.com/limix/bgen-reader-py/blob/3f66a39e15a71b981e8c5f887a4adc3ad486a45f/bgen_reader/_dosage.py#L6-L60
[ "def", "allele_frequency", "(", "expec", ")", ":", "expec", "=", "asarray", "(", "expec", ",", "float", ")", "if", "expec", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"Expectation matrix must be bi-dimensional.\"", ")", "ploidy", "=", "expec", ".", "shape", "[", "-", "1", "]", "return", "expec", ".", "sum", "(", "-", "2", ")", "/", "ploidy" ]
3f66a39e15a71b981e8c5f887a4adc3ad486a45f
valid
compute_dosage
r""" Compute dosage from allele expectation. Parameters ---------- expec : array_like Allele expectations encoded as a samples-by-alleles matrix. alt : array_like, optional Alternative allele index. If ``None``, the allele having the minor allele frequency for the provided ``expec`` is used as the alternative. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` Dosage encoded as an array of size equal to the number of samples. Examples -------- .. code-block:: python :caption: First a quick-start example. >>> from bgen_reader import allele_expectation, compute_dosage >>> from bgen_reader import example_files, read_bgen >>> >>> # Download an example. >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Read the example. >>> bgen = read_bgen(filepath, verbose=False) >>> >>> # Extract the allele expectations of the fourth variant. >>> variant_idx = 3 >>> e = allele_expectation(bgen, variant_idx) >>> >>> # Compute the dosage when considering the first allele >>> # as the reference/alternative one. >>> alt_allele_idx = 1 >>> d = compute_dosage(e, alt=alt_allele_idx) >>> >>> # Print the dosage of the first five samples only. >>> print(d[:5]) [1.96185308 0.00982666 0.01745552 1.00347899 1.01153563] >>> >>> # Clean-up the example >>> example.close() .. code-block:: python :caption: Genotype probabilities, allele expectations and frequencies. >>> from bgen_reader import ( ... allele_expectation, ... allele_frequency, ... compute_dosage, ... example_files, ... read_bgen, ... ) >>> from pandas import DataFrame >>> from xarray import DataArray >>> >>> # Download an example >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Open the bgen file. >>> bgen = read_bgen(filepath, verbose=False) >>> variants = bgen["variants"] >>> genotype = bgen["genotype"] >>> samples = bgen["samples"] >>> >>> variant_idx = 3 >>> variant = variants.loc[variant_idx].compute() >>> # Print the metadata of the fourth variant. >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 3 SNPID_5 RSID_5 01 5000 2 A,G 16034 >>> geno = bgen["genotype"][variant_idx].compute() >>> metageno = DataFrame({k: geno[k] for k in ["ploidy", "missing"]}, ... index=samples) >>> metageno.index.name = "sample" >>> print(metageno) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE ploidy missing sample sample_001 2 False sample_002 2 False sample_003 2 False sample_004 2 False ... ... ... sample_497 2 False sample_498 2 False sample_499 2 False sample_500 2 False <BLANKLINE> [500 rows x 2 columns] >>> p = DataArray( ... geno["probs"], ... name="probability", ... coords={"sample": samples}, ... dims=["sample", "genotype"], ... ) >>> # Print the genotype probabilities. >>> print(p.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE genotype 0 1 2 sample sample_001 0.00488 0.02838 0.96674 sample_002 0.99045 0.00928 0.00027 sample_003 0.98932 0.00391 0.00677 sample_004 0.00662 0.98328 0.01010 ... ... ... ... sample_497 0.00137 0.01312 0.98550 sample_498 0.00552 0.99423 0.00024 sample_499 0.01266 0.01154 0.97580 sample_500 0.00021 0.98431 0.01547 <BLANKLINE> [500 rows x 3 columns] >>> alleles = variant["allele_ids"].item().split(",") >>> e = DataArray( ... allele_expectation(bgen, variant_idx), ... name="expectation", ... coords={"sample": samples, "allele": alleles}, ... dims=["sample", "allele"], ... ) >>> # Print the allele expectations. >>> print(e.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE allele A G sample sample_001 0.03815 1.96185 sample_002 1.99017 0.00983 sample_003 1.98254 0.01746 sample_004 0.99652 1.00348 ... ... ... sample_497 0.01587 1.98413 sample_498 1.00528 0.99472 sample_499 0.03687 1.96313 sample_500 0.98474 1.01526 <BLANKLINE> [500 rows x 2 columns] >>> rsid = variant["rsid"].item() >>> chrom = variant["chrom"].item() >>> variant_name = f"{chrom}:{rsid}" >>> f = DataFrame(allele_frequency(e), columns=[variant_name], index=alleles) >>> f.index.name = "allele" >>> # Allele frequencies. >>> print(f) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE 01:RSID_5 allele A 305.97218 G 194.02782 >>> alt = f.idxmin().item() >>> alt_idx = alleles.index(alt) >>> d = compute_dosage(e, alt=alt_idx).to_series() >>> d = DataFrame(d.values, columns=[f"alt={alt}"], index=d.index) >>> # Dosages when considering G as the alternative allele. >>> print(d) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE alt=G sample sample_001 1.96185 sample_002 0.00983 sample_003 0.01746 sample_004 1.00348 ... ... sample_497 1.98413 sample_498 0.99472 sample_499 1.96313 sample_500 1.01526 <BLANKLINE> [500 rows x 1 columns] >>> >>> # Clean-up the example >>> example.close()
bgen_reader/_dosage.py
def compute_dosage(expec, alt=None): r""" Compute dosage from allele expectation. Parameters ---------- expec : array_like Allele expectations encoded as a samples-by-alleles matrix. alt : array_like, optional Alternative allele index. If ``None``, the allele having the minor allele frequency for the provided ``expec`` is used as the alternative. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` Dosage encoded as an array of size equal to the number of samples. Examples -------- .. code-block:: python :caption: First a quick-start example. >>> from bgen_reader import allele_expectation, compute_dosage >>> from bgen_reader import example_files, read_bgen >>> >>> # Download an example. >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Read the example. >>> bgen = read_bgen(filepath, verbose=False) >>> >>> # Extract the allele expectations of the fourth variant. >>> variant_idx = 3 >>> e = allele_expectation(bgen, variant_idx) >>> >>> # Compute the dosage when considering the first allele >>> # as the reference/alternative one. >>> alt_allele_idx = 1 >>> d = compute_dosage(e, alt=alt_allele_idx) >>> >>> # Print the dosage of the first five samples only. >>> print(d[:5]) [1.96185308 0.00982666 0.01745552 1.00347899 1.01153563] >>> >>> # Clean-up the example >>> example.close() .. code-block:: python :caption: Genotype probabilities, allele expectations and frequencies. >>> from bgen_reader import ( ... allele_expectation, ... allele_frequency, ... compute_dosage, ... example_files, ... read_bgen, ... ) >>> from pandas import DataFrame >>> from xarray import DataArray >>> >>> # Download an example >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Open the bgen file. >>> bgen = read_bgen(filepath, verbose=False) >>> variants = bgen["variants"] >>> genotype = bgen["genotype"] >>> samples = bgen["samples"] >>> >>> variant_idx = 3 >>> variant = variants.loc[variant_idx].compute() >>> # Print the metadata of the fourth variant. >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 3 SNPID_5 RSID_5 01 5000 2 A,G 16034 >>> geno = bgen["genotype"][variant_idx].compute() >>> metageno = DataFrame({k: geno[k] for k in ["ploidy", "missing"]}, ... index=samples) >>> metageno.index.name = "sample" >>> print(metageno) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE ploidy missing sample sample_001 2 False sample_002 2 False sample_003 2 False sample_004 2 False ... ... ... sample_497 2 False sample_498 2 False sample_499 2 False sample_500 2 False <BLANKLINE> [500 rows x 2 columns] >>> p = DataArray( ... geno["probs"], ... name="probability", ... coords={"sample": samples}, ... dims=["sample", "genotype"], ... ) >>> # Print the genotype probabilities. >>> print(p.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE genotype 0 1 2 sample sample_001 0.00488 0.02838 0.96674 sample_002 0.99045 0.00928 0.00027 sample_003 0.98932 0.00391 0.00677 sample_004 0.00662 0.98328 0.01010 ... ... ... ... sample_497 0.00137 0.01312 0.98550 sample_498 0.00552 0.99423 0.00024 sample_499 0.01266 0.01154 0.97580 sample_500 0.00021 0.98431 0.01547 <BLANKLINE> [500 rows x 3 columns] >>> alleles = variant["allele_ids"].item().split(",") >>> e = DataArray( ... allele_expectation(bgen, variant_idx), ... name="expectation", ... coords={"sample": samples, "allele": alleles}, ... dims=["sample", "allele"], ... ) >>> # Print the allele expectations. >>> print(e.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE allele A G sample sample_001 0.03815 1.96185 sample_002 1.99017 0.00983 sample_003 1.98254 0.01746 sample_004 0.99652 1.00348 ... ... ... sample_497 0.01587 1.98413 sample_498 1.00528 0.99472 sample_499 0.03687 1.96313 sample_500 0.98474 1.01526 <BLANKLINE> [500 rows x 2 columns] >>> rsid = variant["rsid"].item() >>> chrom = variant["chrom"].item() >>> variant_name = f"{chrom}:{rsid}" >>> f = DataFrame(allele_frequency(e), columns=[variant_name], index=alleles) >>> f.index.name = "allele" >>> # Allele frequencies. >>> print(f) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE 01:RSID_5 allele A 305.97218 G 194.02782 >>> alt = f.idxmin().item() >>> alt_idx = alleles.index(alt) >>> d = compute_dosage(e, alt=alt_idx).to_series() >>> d = DataFrame(d.values, columns=[f"alt={alt}"], index=d.index) >>> # Dosages when considering G as the alternative allele. >>> print(d) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE alt=G sample sample_001 1.96185 sample_002 0.00983 sample_003 0.01746 sample_004 1.00348 ... ... sample_497 1.98413 sample_498 0.99472 sample_499 1.96313 sample_500 1.01526 <BLANKLINE> [500 rows x 1 columns] >>> >>> # Clean-up the example >>> example.close() """ if alt is None: return expec[..., -1] try: return expec[:, alt] except NotImplementedError: alt = asarray(alt, int) return asarray(expec, float)[:, alt]
def compute_dosage(expec, alt=None): r""" Compute dosage from allele expectation. Parameters ---------- expec : array_like Allele expectations encoded as a samples-by-alleles matrix. alt : array_like, optional Alternative allele index. If ``None``, the allele having the minor allele frequency for the provided ``expec`` is used as the alternative. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` Dosage encoded as an array of size equal to the number of samples. Examples -------- .. code-block:: python :caption: First a quick-start example. >>> from bgen_reader import allele_expectation, compute_dosage >>> from bgen_reader import example_files, read_bgen >>> >>> # Download an example. >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Read the example. >>> bgen = read_bgen(filepath, verbose=False) >>> >>> # Extract the allele expectations of the fourth variant. >>> variant_idx = 3 >>> e = allele_expectation(bgen, variant_idx) >>> >>> # Compute the dosage when considering the first allele >>> # as the reference/alternative one. >>> alt_allele_idx = 1 >>> d = compute_dosage(e, alt=alt_allele_idx) >>> >>> # Print the dosage of the first five samples only. >>> print(d[:5]) [1.96185308 0.00982666 0.01745552 1.00347899 1.01153563] >>> >>> # Clean-up the example >>> example.close() .. code-block:: python :caption: Genotype probabilities, allele expectations and frequencies. >>> from bgen_reader import ( ... allele_expectation, ... allele_frequency, ... compute_dosage, ... example_files, ... read_bgen, ... ) >>> from pandas import DataFrame >>> from xarray import DataArray >>> >>> # Download an example >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Open the bgen file. >>> bgen = read_bgen(filepath, verbose=False) >>> variants = bgen["variants"] >>> genotype = bgen["genotype"] >>> samples = bgen["samples"] >>> >>> variant_idx = 3 >>> variant = variants.loc[variant_idx].compute() >>> # Print the metadata of the fourth variant. >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 3 SNPID_5 RSID_5 01 5000 2 A,G 16034 >>> geno = bgen["genotype"][variant_idx].compute() >>> metageno = DataFrame({k: geno[k] for k in ["ploidy", "missing"]}, ... index=samples) >>> metageno.index.name = "sample" >>> print(metageno) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE ploidy missing sample sample_001 2 False sample_002 2 False sample_003 2 False sample_004 2 False ... ... ... sample_497 2 False sample_498 2 False sample_499 2 False sample_500 2 False <BLANKLINE> [500 rows x 2 columns] >>> p = DataArray( ... geno["probs"], ... name="probability", ... coords={"sample": samples}, ... dims=["sample", "genotype"], ... ) >>> # Print the genotype probabilities. >>> print(p.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE genotype 0 1 2 sample sample_001 0.00488 0.02838 0.96674 sample_002 0.99045 0.00928 0.00027 sample_003 0.98932 0.00391 0.00677 sample_004 0.00662 0.98328 0.01010 ... ... ... ... sample_497 0.00137 0.01312 0.98550 sample_498 0.00552 0.99423 0.00024 sample_499 0.01266 0.01154 0.97580 sample_500 0.00021 0.98431 0.01547 <BLANKLINE> [500 rows x 3 columns] >>> alleles = variant["allele_ids"].item().split(",") >>> e = DataArray( ... allele_expectation(bgen, variant_idx), ... name="expectation", ... coords={"sample": samples, "allele": alleles}, ... dims=["sample", "allele"], ... ) >>> # Print the allele expectations. >>> print(e.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE allele A G sample sample_001 0.03815 1.96185 sample_002 1.99017 0.00983 sample_003 1.98254 0.01746 sample_004 0.99652 1.00348 ... ... ... sample_497 0.01587 1.98413 sample_498 1.00528 0.99472 sample_499 0.03687 1.96313 sample_500 0.98474 1.01526 <BLANKLINE> [500 rows x 2 columns] >>> rsid = variant["rsid"].item() >>> chrom = variant["chrom"].item() >>> variant_name = f"{chrom}:{rsid}" >>> f = DataFrame(allele_frequency(e), columns=[variant_name], index=alleles) >>> f.index.name = "allele" >>> # Allele frequencies. >>> print(f) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE 01:RSID_5 allele A 305.97218 G 194.02782 >>> alt = f.idxmin().item() >>> alt_idx = alleles.index(alt) >>> d = compute_dosage(e, alt=alt_idx).to_series() >>> d = DataFrame(d.values, columns=[f"alt={alt}"], index=d.index) >>> # Dosages when considering G as the alternative allele. >>> print(d) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE alt=G sample sample_001 1.96185 sample_002 0.00983 sample_003 0.01746 sample_004 1.00348 ... ... sample_497 1.98413 sample_498 0.99472 sample_499 1.96313 sample_500 1.01526 <BLANKLINE> [500 rows x 1 columns] >>> >>> # Clean-up the example >>> example.close() """ if alt is None: return expec[..., -1] try: return expec[:, alt] except NotImplementedError: alt = asarray(alt, int) return asarray(expec, float)[:, alt]
[ "r", "Compute", "dosage", "from", "allele", "expectation", "." ]
limix/bgen-reader-py
python
https://github.com/limix/bgen-reader-py/blob/3f66a39e15a71b981e8c5f887a4adc3ad486a45f/bgen_reader/_dosage.py#L63-L242
[ "def", "compute_dosage", "(", "expec", ",", "alt", "=", "None", ")", ":", "if", "alt", "is", "None", ":", "return", "expec", "[", "...", ",", "-", "1", "]", "try", ":", "return", "expec", "[", ":", ",", "alt", "]", "except", "NotImplementedError", ":", "alt", "=", "asarray", "(", "alt", ",", "int", ")", "return", "asarray", "(", "expec", ",", "float", ")", "[", ":", ",", "alt", "]" ]
3f66a39e15a71b981e8c5f887a4adc3ad486a45f
valid
allele_expectation
r""" Allele expectation. Compute the expectation of each allele from the genotype probabilities. Parameters ---------- bgen : bgen_file Bgen file handler. variant_idx : int Variant index. Returns ------- :class:`numpy.ndarray` Samples-by-alleles matrix of allele expectations. Note ---- This function supports unphased genotypes only. Examples -------- .. doctest:: >>> from bgen_reader import allele_expectation, example_files, read_bgen >>> >>> from texttable import Texttable >>> >>> # Download an example. >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Read the example. >>> bgen = read_bgen(filepath, verbose=False) >>> >>> variants = bgen["variants"] >>> samples = bgen["samples"] >>> genotype = bgen["genotype"] >>> >>> genotype = bgen["genotype"] >>> # This `compute` call will return a pandas data frame, >>> variant = variants[variants["rsid"] == "RSID_6"].compute() >>> # from which we retrieve the variant index. >>> variant_idx = variant.index.item() >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 4 SNPID_6 RSID_6 01 6000 2 A,G 19377 >>> genotype = bgen["genotype"] >>> # Samples is a pandas series, and we retrieve the >>> # sample index from the sample name. >>> sample_idx = samples[samples == "sample_005"].index.item() >>> >>> genotype = bgen["genotype"] >>> # This `compute` call will return a dictionary from which >>> # we can get the probability matrix the corresponding >>> # variant. >>> p = genotype[variant_idx].compute()["probs"][sample_idx] >>> >>> genotype = bgen["genotype"] >>> # Allele expectation makes sense for unphased genotypes only, >>> # which is the case here. >>> e = allele_expectation(bgen, variant_idx)[sample_idx] >>> >>> genotype = bgen["genotype"] >>> alleles = variant["allele_ids"].item().split(",") >>> >>> genotype = bgen["genotype"] >>> >>> # Print what we have got in a nice format. >>> table = Texttable() >>> table = table.add_rows( ... [ ... ["", "AA", "AG", "GG", "E[.]"], ... ["p"] + list(p) + ["na"], ... ["#" + alleles[0], 2, 1, 0, e[0]], ... ["#" + alleles[1], 0, 1, 2, e[1]], ... ] ... ) >>> print(table.draw()) +----+-------+-------+-------+-------+ | | AA | AG | GG | E[.] | +====+=======+=======+=======+=======+ | p | 0.012 | 0.987 | 0.001 | na | +----+-------+-------+-------+-------+ | #A | 2 | 1 | 0 | 1.011 | +----+-------+-------+-------+-------+ | #G | 0 | 1 | 2 | 0.989 | +----+-------+-------+-------+-------+ >>> >>> # Clean-up. >>> example.close()
bgen_reader/_dosage.py
def allele_expectation(bgen, variant_idx): r""" Allele expectation. Compute the expectation of each allele from the genotype probabilities. Parameters ---------- bgen : bgen_file Bgen file handler. variant_idx : int Variant index. Returns ------- :class:`numpy.ndarray` Samples-by-alleles matrix of allele expectations. Note ---- This function supports unphased genotypes only. Examples -------- .. doctest:: >>> from bgen_reader import allele_expectation, example_files, read_bgen >>> >>> from texttable import Texttable >>> >>> # Download an example. >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Read the example. >>> bgen = read_bgen(filepath, verbose=False) >>> >>> variants = bgen["variants"] >>> samples = bgen["samples"] >>> genotype = bgen["genotype"] >>> >>> genotype = bgen["genotype"] >>> # This `compute` call will return a pandas data frame, >>> variant = variants[variants["rsid"] == "RSID_6"].compute() >>> # from which we retrieve the variant index. >>> variant_idx = variant.index.item() >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 4 SNPID_6 RSID_6 01 6000 2 A,G 19377 >>> genotype = bgen["genotype"] >>> # Samples is a pandas series, and we retrieve the >>> # sample index from the sample name. >>> sample_idx = samples[samples == "sample_005"].index.item() >>> >>> genotype = bgen["genotype"] >>> # This `compute` call will return a dictionary from which >>> # we can get the probability matrix the corresponding >>> # variant. >>> p = genotype[variant_idx].compute()["probs"][sample_idx] >>> >>> genotype = bgen["genotype"] >>> # Allele expectation makes sense for unphased genotypes only, >>> # which is the case here. >>> e = allele_expectation(bgen, variant_idx)[sample_idx] >>> >>> genotype = bgen["genotype"] >>> alleles = variant["allele_ids"].item().split(",") >>> >>> genotype = bgen["genotype"] >>> >>> # Print what we have got in a nice format. >>> table = Texttable() >>> table = table.add_rows( ... [ ... ["", "AA", "AG", "GG", "E[.]"], ... ["p"] + list(p) + ["na"], ... ["#" + alleles[0], 2, 1, 0, e[0]], ... ["#" + alleles[1], 0, 1, 2, e[1]], ... ] ... ) >>> print(table.draw()) +----+-------+-------+-------+-------+ | | AA | AG | GG | E[.] | +====+=======+=======+=======+=======+ | p | 0.012 | 0.987 | 0.001 | na | +----+-------+-------+-------+-------+ | #A | 2 | 1 | 0 | 1.011 | +----+-------+-------+-------+-------+ | #G | 0 | 1 | 2 | 0.989 | +----+-------+-------+-------+-------+ >>> >>> # Clean-up. >>> example.close() """ geno = bgen["genotype"][variant_idx].compute() if geno["phased"]: raise ValueError("Allele expectation is define for unphased genotypes only.") nalleles = bgen["variants"].loc[variant_idx, "nalleles"].compute().item() genotypes = get_genotypes(geno["ploidy"], nalleles) expec = [] for i in range(len(genotypes)): count = asarray(genotypes_to_allele_counts(genotypes[i]), float) n = count.shape[0] expec.append((count.T * geno["probs"][i, :n]).sum(1)) return stack(expec, axis=0)
def allele_expectation(bgen, variant_idx): r""" Allele expectation. Compute the expectation of each allele from the genotype probabilities. Parameters ---------- bgen : bgen_file Bgen file handler. variant_idx : int Variant index. Returns ------- :class:`numpy.ndarray` Samples-by-alleles matrix of allele expectations. Note ---- This function supports unphased genotypes only. Examples -------- .. doctest:: >>> from bgen_reader import allele_expectation, example_files, read_bgen >>> >>> from texttable import Texttable >>> >>> # Download an example. >>> example = example_files("example.32bits.bgen") >>> filepath = example.filepath >>> >>> # Read the example. >>> bgen = read_bgen(filepath, verbose=False) >>> >>> variants = bgen["variants"] >>> samples = bgen["samples"] >>> genotype = bgen["genotype"] >>> >>> genotype = bgen["genotype"] >>> # This `compute` call will return a pandas data frame, >>> variant = variants[variants["rsid"] == "RSID_6"].compute() >>> # from which we retrieve the variant index. >>> variant_idx = variant.index.item() >>> print(variant) id rsid chrom pos nalleles allele_ids vaddr 4 SNPID_6 RSID_6 01 6000 2 A,G 19377 >>> genotype = bgen["genotype"] >>> # Samples is a pandas series, and we retrieve the >>> # sample index from the sample name. >>> sample_idx = samples[samples == "sample_005"].index.item() >>> >>> genotype = bgen["genotype"] >>> # This `compute` call will return a dictionary from which >>> # we can get the probability matrix the corresponding >>> # variant. >>> p = genotype[variant_idx].compute()["probs"][sample_idx] >>> >>> genotype = bgen["genotype"] >>> # Allele expectation makes sense for unphased genotypes only, >>> # which is the case here. >>> e = allele_expectation(bgen, variant_idx)[sample_idx] >>> >>> genotype = bgen["genotype"] >>> alleles = variant["allele_ids"].item().split(",") >>> >>> genotype = bgen["genotype"] >>> >>> # Print what we have got in a nice format. >>> table = Texttable() >>> table = table.add_rows( ... [ ... ["", "AA", "AG", "GG", "E[.]"], ... ["p"] + list(p) + ["na"], ... ["#" + alleles[0], 2, 1, 0, e[0]], ... ["#" + alleles[1], 0, 1, 2, e[1]], ... ] ... ) >>> print(table.draw()) +----+-------+-------+-------+-------+ | | AA | AG | GG | E[.] | +====+=======+=======+=======+=======+ | p | 0.012 | 0.987 | 0.001 | na | +----+-------+-------+-------+-------+ | #A | 2 | 1 | 0 | 1.011 | +----+-------+-------+-------+-------+ | #G | 0 | 1 | 2 | 0.989 | +----+-------+-------+-------+-------+ >>> >>> # Clean-up. >>> example.close() """ geno = bgen["genotype"][variant_idx].compute() if geno["phased"]: raise ValueError("Allele expectation is define for unphased genotypes only.") nalleles = bgen["variants"].loc[variant_idx, "nalleles"].compute().item() genotypes = get_genotypes(geno["ploidy"], nalleles) expec = [] for i in range(len(genotypes)): count = asarray(genotypes_to_allele_counts(genotypes[i]), float) n = count.shape[0] expec.append((count.T * geno["probs"][i, :n]).sum(1)) return stack(expec, axis=0)
[ "r", "Allele", "expectation", "." ]
limix/bgen-reader-py
python
https://github.com/limix/bgen-reader-py/blob/3f66a39e15a71b981e8c5f887a4adc3ad486a45f/bgen_reader/_dosage.py#L245-L350
[ "def", "allele_expectation", "(", "bgen", ",", "variant_idx", ")", ":", "geno", "=", "bgen", "[", "\"genotype\"", "]", "[", "variant_idx", "]", ".", "compute", "(", ")", "if", "geno", "[", "\"phased\"", "]", ":", "raise", "ValueError", "(", "\"Allele expectation is define for unphased genotypes only.\"", ")", "nalleles", "=", "bgen", "[", "\"variants\"", "]", ".", "loc", "[", "variant_idx", ",", "\"nalleles\"", "]", ".", "compute", "(", ")", ".", "item", "(", ")", "genotypes", "=", "get_genotypes", "(", "geno", "[", "\"ploidy\"", "]", ",", "nalleles", ")", "expec", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "genotypes", ")", ")", ":", "count", "=", "asarray", "(", "genotypes_to_allele_counts", "(", "genotypes", "[", "i", "]", ")", ",", "float", ")", "n", "=", "count", ".", "shape", "[", "0", "]", "expec", ".", "append", "(", "(", "count", ".", "T", "*", "geno", "[", "\"probs\"", "]", "[", "i", ",", ":", "n", "]", ")", ".", "sum", "(", "1", ")", ")", "return", "stack", "(", "expec", ",", "axis", "=", "0", ")" ]
3f66a39e15a71b981e8c5f887a4adc3ad486a45f
valid
Windows.find_libname
Try to infer the correct library name.
libpath.py
def find_libname(self, name): """Try to infer the correct library name.""" names = ["{}.lib", "lib{}.lib", "{}lib.lib"] names = [n.format(name) for n in names] dirs = self.get_library_dirs() for d in dirs: for n in names: if exists(join(d, n)): return n[:-4] msg = "Could not find the {} library.".format(name) raise ValueError(msg)
def find_libname(self, name): """Try to infer the correct library name.""" names = ["{}.lib", "lib{}.lib", "{}lib.lib"] names = [n.format(name) for n in names] dirs = self.get_library_dirs() for d in dirs: for n in names: if exists(join(d, n)): return n[:-4] msg = "Could not find the {} library.".format(name) raise ValueError(msg)
[ "Try", "to", "infer", "the", "correct", "library", "name", "." ]
limix/bgen-reader-py
python
https://github.com/limix/bgen-reader-py/blob/3f66a39e15a71b981e8c5f887a4adc3ad486a45f/libpath.py#L109-L119
[ "def", "find_libname", "(", "self", ",", "name", ")", ":", "names", "=", "[", "\"{}.lib\"", ",", "\"lib{}.lib\"", ",", "\"{}lib.lib\"", "]", "names", "=", "[", "n", ".", "format", "(", "name", ")", "for", "n", "in", "names", "]", "dirs", "=", "self", ".", "get_library_dirs", "(", ")", "for", "d", "in", "dirs", ":", "for", "n", "in", "names", ":", "if", "exists", "(", "join", "(", "d", ",", "n", ")", ")", ":", "return", "n", "[", ":", "-", "4", "]", "msg", "=", "\"Could not find the {} library.\"", ".", "format", "(", "name", ")", "raise", "ValueError", "(", "msg", ")" ]
3f66a39e15a71b981e8c5f887a4adc3ad486a45f
valid
LeaveOneGroupOut.split
Generate indices to split data into training and test set. Parameters ---------- X : array-like, of length n_samples Training data, includes reaction's containers y : array-like, of length n_samples The target variable for supervised learning problems. groups : array-like, with shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.
CIMtools/model_selection/group_out.py
def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, of length n_samples Training data, includes reaction's containers y : array-like, of length n_samples The target variable for supervised learning problems. groups : array-like, with shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) cgrs = [~r for r in X] structure_condition = defaultdict(set) for structure, condition in zip(cgrs, groups): structure_condition[structure].add(condition) train_data = defaultdict(list) test_data = [] for n, (structure, condition) in enumerate(zip(cgrs, groups)): train_data[condition].append(n) if len(structure_condition[structure]) > 1: test_data.append(n) for condition, indexes in train_data.items(): test_index = [index for index in indexes if index in test_data] if test_index: train_index = [i for cond, ind in train_data.items() if cond != condition for i in ind] yield array(train_index), array(test_index)
def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, of length n_samples Training data, includes reaction's containers y : array-like, of length n_samples The target variable for supervised learning problems. groups : array-like, with shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) cgrs = [~r for r in X] structure_condition = defaultdict(set) for structure, condition in zip(cgrs, groups): structure_condition[structure].add(condition) train_data = defaultdict(list) test_data = [] for n, (structure, condition) in enumerate(zip(cgrs, groups)): train_data[condition].append(n) if len(structure_condition[structure]) > 1: test_data.append(n) for condition, indexes in train_data.items(): test_index = [index for index in indexes if index in test_data] if test_index: train_index = [i for cond, ind in train_data.items() if cond != condition for i in ind] yield array(train_index), array(test_index)
[ "Generate", "indices", "to", "split", "data", "into", "training", "and", "test", "set", ".", "Parameters", "----------", "X", ":", "array", "-", "like", "of", "length", "n_samples", "Training", "data", "includes", "reaction", "s", "containers", "y", ":", "array", "-", "like", "of", "length", "n_samples", "The", "target", "variable", "for", "supervised", "learning", "problems", ".", "groups", ":", "array", "-", "like", "with", "shape", "(", "n_samples", ")", "Group", "labels", "for", "the", "samples", "used", "while", "splitting", "the", "dataset", "into", "train", "/", "test", "set", ".", "Yields", "------", "train", ":", "ndarray", "The", "training", "set", "indices", "for", "that", "split", ".", "test", ":", "ndarray", "The", "testing", "set", "indices", "for", "that", "split", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/model_selection/group_out.py#L54-L92
[ "def", "split", "(", "self", ",", "X", ",", "y", "=", "None", ",", "groups", "=", "None", ")", ":", "X", ",", "y", ",", "groups", "=", "indexable", "(", "X", ",", "y", ",", "groups", ")", "cgrs", "=", "[", "~", "r", "for", "r", "in", "X", "]", "structure_condition", "=", "defaultdict", "(", "set", ")", "for", "structure", ",", "condition", "in", "zip", "(", "cgrs", ",", "groups", ")", ":", "structure_condition", "[", "structure", "]", ".", "add", "(", "condition", ")", "train_data", "=", "defaultdict", "(", "list", ")", "test_data", "=", "[", "]", "for", "n", ",", "(", "structure", ",", "condition", ")", "in", "enumerate", "(", "zip", "(", "cgrs", ",", "groups", ")", ")", ":", "train_data", "[", "condition", "]", ".", "append", "(", "n", ")", "if", "len", "(", "structure_condition", "[", "structure", "]", ")", ">", "1", ":", "test_data", ".", "append", "(", "n", ")", "for", "condition", ",", "indexes", "in", "train_data", ".", "items", "(", ")", ":", "test_index", "=", "[", "index", "for", "index", "in", "indexes", "if", "index", "in", "test_data", "]", "if", "test_index", ":", "train_index", "=", "[", "i", "for", "cond", ",", "ind", "in", "train_data", ".", "items", "(", ")", "if", "cond", "!=", "condition", "for", "i", "in", "ind", "]", "yield", "array", "(", "train_index", ")", ",", "array", "(", "test_index", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
molconvert_chemaxon
molconvert wrapper :param data: buffer or string or path to file :return: array of molecules of reactions
CIMtools/datasets/molconvert_chemaxon.py
def molconvert_chemaxon(data): """ molconvert wrapper :param data: buffer or string or path to file :return: array of molecules of reactions """ if isinstance(data, Path): with data.open('rb') as f: data = f.read() elif isinstance(data, StringIO): data = data.read().encode() elif isinstance(data, BytesIO): data = data.read() elif hasattr(data, 'read'): # check if data is open(filename, mode) data = data.read() if isinstance(data, str): data = data.encode() elif isinstance(data, str): data = data.encode() elif not isinstance(data, bytes): raise ValueError('invalid input') try: p = run(['molconvert', '-g', 'mrv'], input=data, stdout=PIPE) except FileNotFoundError as e: raise ConfigurationError from e if p.returncode != 0: raise ConfigurationError(p.stderr.decode()) with BytesIO(p.stdout) as f, MRVread(f) as r: return iter2array(r)
def molconvert_chemaxon(data): """ molconvert wrapper :param data: buffer or string or path to file :return: array of molecules of reactions """ if isinstance(data, Path): with data.open('rb') as f: data = f.read() elif isinstance(data, StringIO): data = data.read().encode() elif isinstance(data, BytesIO): data = data.read() elif hasattr(data, 'read'): # check if data is open(filename, mode) data = data.read() if isinstance(data, str): data = data.encode() elif isinstance(data, str): data = data.encode() elif not isinstance(data, bytes): raise ValueError('invalid input') try: p = run(['molconvert', '-g', 'mrv'], input=data, stdout=PIPE) except FileNotFoundError as e: raise ConfigurationError from e if p.returncode != 0: raise ConfigurationError(p.stderr.decode()) with BytesIO(p.stdout) as f, MRVread(f) as r: return iter2array(r)
[ "molconvert", "wrapper", ":", "param", "data", ":", "buffer", "or", "string", "or", "path", "to", "file", ":", "return", ":", "array", "of", "molecules", "of", "reactions" ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/datasets/molconvert_chemaxon.py#L27-L58
[ "def", "molconvert_chemaxon", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "Path", ")", ":", "with", "data", ".", "open", "(", "'rb'", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "elif", "isinstance", "(", "data", ",", "StringIO", ")", ":", "data", "=", "data", ".", "read", "(", ")", ".", "encode", "(", ")", "elif", "isinstance", "(", "data", ",", "BytesIO", ")", ":", "data", "=", "data", ".", "read", "(", ")", "elif", "hasattr", "(", "data", ",", "'read'", ")", ":", "# check if data is open(filename, mode)", "data", "=", "data", ".", "read", "(", ")", "if", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "data", ".", "encode", "(", ")", "elif", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "data", ".", "encode", "(", ")", "elif", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "raise", "ValueError", "(", "'invalid input'", ")", "try", ":", "p", "=", "run", "(", "[", "'molconvert'", ",", "'-g'", ",", "'mrv'", "]", ",", "input", "=", "data", ",", "stdout", "=", "PIPE", ")", "except", "FileNotFoundError", "as", "e", ":", "raise", "ConfigurationError", "from", "e", "if", "p", ".", "returncode", "!=", "0", ":", "raise", "ConfigurationError", "(", "p", ".", "stderr", ".", "decode", "(", ")", ")", "with", "BytesIO", "(", "p", ".", "stdout", ")", "as", "f", ",", "MRVread", "(", "f", ")", "as", "r", ":", "return", "iter2array", "(", "r", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
SimilarityDistance.fit
Fit distance-based AD. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Returns ------- self : object Returns self.
CIMtools/applicability_domain/similarity_distance.py
def fit(self, X, y=None): """Fit distance-based AD. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Returns ------- self : object Returns self. """ # Check data X = check_array(X) self.tree = BallTree(X, leaf_size=self.leaf_size, metric=self.metric) dist_train = self.tree.query(X, k=2)[0] if self.threshold == 'auto': self.threshold_value = 0.5 * sqrt(var(dist_train[:, 1])) + mean(dist_train[:, 1]) elif self.threshold == 'cv': if y is None: raise ValueError("Y must be specified to find the optimal threshold.") y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None) self.threshold_value = 0 score = 0 Y_pred, Y_true, AD = [], [], [] cv = KFold(n_splits=5, random_state=1, shuffle=True) for train_index, test_index in cv.split(X): x_train = safe_indexing(X, train_index) x_test = safe_indexing(X, test_index) y_train = safe_indexing(y, train_index) y_test = safe_indexing(y, test_index) data_test = safe_indexing(dist_train[:, 1], test_index) if self.reg_model is None: reg_model = RandomForestRegressor(n_estimators=500, random_state=1).fit(x_train, y_train) else: reg_model = clone(self.reg_model).fit(x_train, y_train) Y_pred.append(reg_model.predict(x_test)) Y_true.append(y_test) AD.append(data_test) AD_ = unique(hstack(AD)) for z in AD_: AD_new = hstack(AD) <= z if self.score == 'ba_ad': val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) elif self.score == 'rmse_ad': val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) if val >= score: score = val self.threshold_value = z else: self.threshold_value = self.threshold return self
def fit(self, X, y=None): """Fit distance-based AD. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Returns ------- self : object Returns self. """ # Check data X = check_array(X) self.tree = BallTree(X, leaf_size=self.leaf_size, metric=self.metric) dist_train = self.tree.query(X, k=2)[0] if self.threshold == 'auto': self.threshold_value = 0.5 * sqrt(var(dist_train[:, 1])) + mean(dist_train[:, 1]) elif self.threshold == 'cv': if y is None: raise ValueError("Y must be specified to find the optimal threshold.") y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None) self.threshold_value = 0 score = 0 Y_pred, Y_true, AD = [], [], [] cv = KFold(n_splits=5, random_state=1, shuffle=True) for train_index, test_index in cv.split(X): x_train = safe_indexing(X, train_index) x_test = safe_indexing(X, test_index) y_train = safe_indexing(y, train_index) y_test = safe_indexing(y, test_index) data_test = safe_indexing(dist_train[:, 1], test_index) if self.reg_model is None: reg_model = RandomForestRegressor(n_estimators=500, random_state=1).fit(x_train, y_train) else: reg_model = clone(self.reg_model).fit(x_train, y_train) Y_pred.append(reg_model.predict(x_test)) Y_true.append(y_test) AD.append(data_test) AD_ = unique(hstack(AD)) for z in AD_: AD_new = hstack(AD) <= z if self.score == 'ba_ad': val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) elif self.score == 'rmse_ad': val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) if val >= score: score = val self.threshold_value = z else: self.threshold_value = self.threshold return self
[ "Fit", "distance", "-", "based", "AD", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/similarity_distance.py#L105-L158
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "# Check data", "X", "=", "check_array", "(", "X", ")", "self", ".", "tree", "=", "BallTree", "(", "X", ",", "leaf_size", "=", "self", ".", "leaf_size", ",", "metric", "=", "self", ".", "metric", ")", "dist_train", "=", "self", ".", "tree", ".", "query", "(", "X", ",", "k", "=", "2", ")", "[", "0", "]", "if", "self", ".", "threshold", "==", "'auto'", ":", "self", ".", "threshold_value", "=", "0.5", "*", "sqrt", "(", "var", "(", "dist_train", "[", ":", ",", "1", "]", ")", ")", "+", "mean", "(", "dist_train", "[", ":", ",", "1", "]", ")", "elif", "self", ".", "threshold", "==", "'cv'", ":", "if", "y", "is", "None", ":", "raise", "ValueError", "(", "\"Y must be specified to find the optimal threshold.\"", ")", "y", "=", "check_array", "(", "y", ",", "accept_sparse", "=", "'csc'", ",", "ensure_2d", "=", "False", ",", "dtype", "=", "None", ")", "self", ".", "threshold_value", "=", "0", "score", "=", "0", "Y_pred", ",", "Y_true", ",", "AD", "=", "[", "]", ",", "[", "]", ",", "[", "]", "cv", "=", "KFold", "(", "n_splits", "=", "5", ",", "random_state", "=", "1", ",", "shuffle", "=", "True", ")", "for", "train_index", ",", "test_index", "in", "cv", ".", "split", "(", "X", ")", ":", "x_train", "=", "safe_indexing", "(", "X", ",", "train_index", ")", "x_test", "=", "safe_indexing", "(", "X", ",", "test_index", ")", "y_train", "=", "safe_indexing", "(", "y", ",", "train_index", ")", "y_test", "=", "safe_indexing", "(", "y", ",", "test_index", ")", "data_test", "=", "safe_indexing", "(", "dist_train", "[", ":", ",", "1", "]", ",", "test_index", ")", "if", "self", ".", "reg_model", "is", "None", ":", "reg_model", "=", "RandomForestRegressor", "(", "n_estimators", "=", "500", ",", "random_state", "=", "1", ")", ".", "fit", "(", "x_train", ",", "y_train", ")", "else", ":", "reg_model", "=", "clone", "(", "self", ".", "reg_model", ")", ".", "fit", "(", "x_train", ",", "y_train", ")", "Y_pred", ".", "append", "(", "reg_model", ".", "predict", "(", "x_test", ")", ")", "Y_true", ".", "append", "(", "y_test", ")", "AD", ".", "append", "(", "data_test", ")", "AD_", "=", "unique", "(", "hstack", "(", "AD", ")", ")", "for", "z", "in", "AD_", ":", "AD_new", "=", "hstack", "(", "AD", ")", "<=", "z", "if", "self", ".", "score", "==", "'ba_ad'", ":", "val", "=", "balanced_accuracy_score_with_ad", "(", "Y_true", "=", "hstack", "(", "Y_true", ")", ",", "Y_pred", "=", "hstack", "(", "Y_pred", ")", ",", "AD", "=", "AD_new", ")", "elif", "self", ".", "score", "==", "'rmse_ad'", ":", "val", "=", "rmse_score_with_ad", "(", "Y_true", "=", "hstack", "(", "Y_true", ")", ",", "Y_pred", "=", "hstack", "(", "Y_pred", ")", ",", "AD", "=", "AD_new", ")", "if", "val", ">=", "score", ":", "score", "=", "val", "self", ".", "threshold_value", "=", "z", "else", ":", "self", ".", "threshold_value", "=", "self", ".", "threshold", "return", "self" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
SimilarityDistance.predict_proba
Returns the value of the nearest neighbor from the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array, shape (n_samples,)
CIMtools/applicability_domain/similarity_distance.py
def predict_proba(self, X): """Returns the value of the nearest neighbor from the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array, shape (n_samples,) """ # Check is fit had been called check_is_fitted(self, ['tree']) # Check data X = check_array(X) return self.tree.query(X)[0].flatten()
def predict_proba(self, X): """Returns the value of the nearest neighbor from the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array, shape (n_samples,) """ # Check is fit had been called check_is_fitted(self, ['tree']) # Check data X = check_array(X) return self.tree.query(X)[0].flatten()
[ "Returns", "the", "value", "of", "the", "nearest", "neighbor", "from", "the", "training", "set", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/similarity_distance.py#L160-L178
[ "def", "predict_proba", "(", "self", ",", "X", ")", ":", "# Check is fit had been called", "check_is_fitted", "(", "self", ",", "[", "'tree'", "]", ")", "# Check data", "X", "=", "check_array", "(", "X", ")", "return", "self", ".", "tree", ".", "query", "(", "X", ")", "[", "0", "]", ".", "flatten", "(", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
SimilarityDistance.predict
Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array, shape (n_samples,) For each observations, tells whether or not (True or False) it should be considered as an inlier according to the fitted model.
CIMtools/applicability_domain/similarity_distance.py
def predict(self, X): """Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array, shape (n_samples,) For each observations, tells whether or not (True or False) it should be considered as an inlier according to the fitted model. """ # Check is fit had been called check_is_fitted(self, ['tree']) # Check data X = check_array(X) return self.tree.query(X)[0].flatten() <= self.threshold_value
def predict(self, X): """Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array, shape (n_samples,) For each observations, tells whether or not (True or False) it should be considered as an inlier according to the fitted model. """ # Check is fit had been called check_is_fitted(self, ['tree']) # Check data X = check_array(X) return self.tree.query(X)[0].flatten() <= self.threshold_value
[ "Predict", "if", "a", "particular", "sample", "is", "an", "outlier", "or", "not", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/similarity_distance.py#L180-L200
[ "def", "predict", "(", "self", ",", "X", ")", ":", "# Check is fit had been called", "check_is_fitted", "(", "self", ",", "[", "'tree'", "]", ")", "# Check data", "X", "=", "check_array", "(", "X", ")", "return", "self", ".", "tree", ".", "query", "(", "X", ")", "[", "0", "]", ".", "flatten", "(", ")", "<=", "self", ".", "threshold_value" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Leverage.fit
Learning is to find the inverse matrix for X and calculate the threshold. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (real numbers in regression). Returns ------- self : object
CIMtools/applicability_domain/leverage.py
def fit(self, X, y=None): """Learning is to find the inverse matrix for X and calculate the threshold. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (real numbers in regression). Returns ------- self : object """ # Check that X have correct shape X = check_array(X) self.inverse_influence_matrix = self.__make_inverse_matrix(X) if self.threshold == 'auto': self.threshold_value = 3 * (1 + X.shape[1]) / X.shape[0] elif self.threshold == 'cv': if y is None: raise ValueError("Y must be specified to find the optimal threshold.") y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None) self.threshold_value = 0 score = 0 Y_pred, Y_true, AD = [], [], [] cv = KFold(n_splits=5, random_state=1, shuffle=True) for train_index, test_index in cv.split(X): x_train = safe_indexing(X, train_index) x_test = safe_indexing(X, test_index) y_train = safe_indexing(y, train_index) y_test = safe_indexing(y, test_index) if self.reg_model is None: reg_model = RandomForestRegressor(n_estimators=500, random_state=1).fit(x_train, y_train) else: reg_model = clone(self.reg_model).fit(x_train, y_train) Y_pred.append(reg_model.predict(x_test)) Y_true.append(y_test) ad_model = self.__make_inverse_matrix(x_train) AD.append(self.__find_leverages(x_test, ad_model)) AD_ = unique(hstack(AD)) for z in AD_: AD_new = hstack(AD) <= z if self.score == 'ba_ad': val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) elif self.score == 'rmse_ad': val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) if val >= score: score = val self.threshold_value = z else: self.threshold_value = self.threshold return self
def fit(self, X, y=None): """Learning is to find the inverse matrix for X and calculate the threshold. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (real numbers in regression). Returns ------- self : object """ # Check that X have correct shape X = check_array(X) self.inverse_influence_matrix = self.__make_inverse_matrix(X) if self.threshold == 'auto': self.threshold_value = 3 * (1 + X.shape[1]) / X.shape[0] elif self.threshold == 'cv': if y is None: raise ValueError("Y must be specified to find the optimal threshold.") y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None) self.threshold_value = 0 score = 0 Y_pred, Y_true, AD = [], [], [] cv = KFold(n_splits=5, random_state=1, shuffle=True) for train_index, test_index in cv.split(X): x_train = safe_indexing(X, train_index) x_test = safe_indexing(X, test_index) y_train = safe_indexing(y, train_index) y_test = safe_indexing(y, test_index) if self.reg_model is None: reg_model = RandomForestRegressor(n_estimators=500, random_state=1).fit(x_train, y_train) else: reg_model = clone(self.reg_model).fit(x_train, y_train) Y_pred.append(reg_model.predict(x_test)) Y_true.append(y_test) ad_model = self.__make_inverse_matrix(x_train) AD.append(self.__find_leverages(x_test, ad_model)) AD_ = unique(hstack(AD)) for z in AD_: AD_new = hstack(AD) <= z if self.score == 'ba_ad': val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) elif self.score == 'rmse_ad': val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) if val >= score: score = val self.threshold_value = z else: self.threshold_value = self.threshold return self
[ "Learning", "is", "to", "find", "the", "inverse", "matrix", "for", "X", "and", "calculate", "the", "threshold", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/leverage.py#L75-L128
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "# Check that X have correct shape", "X", "=", "check_array", "(", "X", ")", "self", ".", "inverse_influence_matrix", "=", "self", ".", "__make_inverse_matrix", "(", "X", ")", "if", "self", ".", "threshold", "==", "'auto'", ":", "self", ".", "threshold_value", "=", "3", "*", "(", "1", "+", "X", ".", "shape", "[", "1", "]", ")", "/", "X", ".", "shape", "[", "0", "]", "elif", "self", ".", "threshold", "==", "'cv'", ":", "if", "y", "is", "None", ":", "raise", "ValueError", "(", "\"Y must be specified to find the optimal threshold.\"", ")", "y", "=", "check_array", "(", "y", ",", "accept_sparse", "=", "'csc'", ",", "ensure_2d", "=", "False", ",", "dtype", "=", "None", ")", "self", ".", "threshold_value", "=", "0", "score", "=", "0", "Y_pred", ",", "Y_true", ",", "AD", "=", "[", "]", ",", "[", "]", ",", "[", "]", "cv", "=", "KFold", "(", "n_splits", "=", "5", ",", "random_state", "=", "1", ",", "shuffle", "=", "True", ")", "for", "train_index", ",", "test_index", "in", "cv", ".", "split", "(", "X", ")", ":", "x_train", "=", "safe_indexing", "(", "X", ",", "train_index", ")", "x_test", "=", "safe_indexing", "(", "X", ",", "test_index", ")", "y_train", "=", "safe_indexing", "(", "y", ",", "train_index", ")", "y_test", "=", "safe_indexing", "(", "y", ",", "test_index", ")", "if", "self", ".", "reg_model", "is", "None", ":", "reg_model", "=", "RandomForestRegressor", "(", "n_estimators", "=", "500", ",", "random_state", "=", "1", ")", ".", "fit", "(", "x_train", ",", "y_train", ")", "else", ":", "reg_model", "=", "clone", "(", "self", ".", "reg_model", ")", ".", "fit", "(", "x_train", ",", "y_train", ")", "Y_pred", ".", "append", "(", "reg_model", ".", "predict", "(", "x_test", ")", ")", "Y_true", ".", "append", "(", "y_test", ")", "ad_model", "=", "self", ".", "__make_inverse_matrix", "(", "x_train", ")", "AD", ".", "append", "(", "self", ".", "__find_leverages", "(", "x_test", ",", "ad_model", ")", ")", "AD_", "=", "unique", "(", "hstack", "(", "AD", ")", ")", "for", "z", "in", "AD_", ":", "AD_new", "=", "hstack", "(", "AD", ")", "<=", "z", "if", "self", ".", "score", "==", "'ba_ad'", ":", "val", "=", "balanced_accuracy_score_with_ad", "(", "Y_true", "=", "hstack", "(", "Y_true", ")", ",", "Y_pred", "=", "hstack", "(", "Y_pred", ")", ",", "AD", "=", "AD_new", ")", "elif", "self", ".", "score", "==", "'rmse_ad'", ":", "val", "=", "rmse_score_with_ad", "(", "Y_true", "=", "hstack", "(", "Y_true", ")", ",", "Y_pred", "=", "hstack", "(", "Y_pred", ")", ",", "AD", "=", "AD_new", ")", "if", "val", ">=", "score", ":", "score", "=", "val", "self", ".", "threshold_value", "=", "z", "else", ":", "self", ".", "threshold_value", "=", "self", ".", "threshold", "return", "self" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Leverage.predict_proba
Predict the distances for X to center of the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- leverages: array of shape = [n_samples] The objects distances to center of the training set.
CIMtools/applicability_domain/leverage.py
def predict_proba(self, X): """Predict the distances for X to center of the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- leverages: array of shape = [n_samples] The objects distances to center of the training set. """ # Check is fit had been called check_is_fitted(self, ['inverse_influence_matrix']) # Check that X have correct shape X = check_array(X) return self.__find_leverages(X, self.inverse_influence_matrix)
def predict_proba(self, X): """Predict the distances for X to center of the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- leverages: array of shape = [n_samples] The objects distances to center of the training set. """ # Check is fit had been called check_is_fitted(self, ['inverse_influence_matrix']) # Check that X have correct shape X = check_array(X) return self.__find_leverages(X, self.inverse_influence_matrix)
[ "Predict", "the", "distances", "for", "X", "to", "center", "of", "the", "training", "set", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/leverage.py#L130-L149
[ "def", "predict_proba", "(", "self", ",", "X", ")", ":", "# Check is fit had been called", "check_is_fitted", "(", "self", ",", "[", "'inverse_influence_matrix'", "]", ")", "# Check that X have correct shape", "X", "=", "check_array", "(", "X", ")", "return", "self", ".", "__find_leverages", "(", "X", ",", "self", ".", "inverse_influence_matrix", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Leverage.predict
Predict inside or outside AD for X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- ad : array of shape = [n_samples] Array contains True (reaction in AD) and False (reaction residing outside AD).
CIMtools/applicability_domain/leverage.py
def predict(self, X): """Predict inside or outside AD for X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- ad : array of shape = [n_samples] Array contains True (reaction in AD) and False (reaction residing outside AD). """ # Check is fit had been called check_is_fitted(self, ['inverse_influence_matrix']) # Check that X have correct shape X = check_array(X) return self.__find_leverages(X, self.inverse_influence_matrix) <= self.threshold_value
def predict(self, X): """Predict inside or outside AD for X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- ad : array of shape = [n_samples] Array contains True (reaction in AD) and False (reaction residing outside AD). """ # Check is fit had been called check_is_fitted(self, ['inverse_influence_matrix']) # Check that X have correct shape X = check_array(X) return self.__find_leverages(X, self.inverse_influence_matrix) <= self.threshold_value
[ "Predict", "inside", "or", "outside", "AD", "for", "X", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/leverage.py#L151-L170
[ "def", "predict", "(", "self", ",", "X", ")", ":", "# Check is fit had been called", "check_is_fitted", "(", "self", ",", "[", "'inverse_influence_matrix'", "]", ")", "# Check that X have correct shape", "X", "=", "check_array", "(", "X", ")", "return", "self", ".", "__find_leverages", "(", "X", ",", "self", ".", "inverse_influence_matrix", ")", "<=", "self", ".", "threshold_value" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
ConditionsToDataFrame.get_feature_names
Get feature names. Returns ------- feature_names : list of strings Names of the features produced by transform.
CIMtools/conditions_container.py
def get_feature_names(self): """Get feature names. Returns ------- feature_names : list of strings Names of the features produced by transform. """ return ['temperature', 'pressure'] + [f'solvent.{x}' for x in range(1, self.max_solvents + 1)] + \ [f'solvent_amount.{x}' for x in range(1, self.max_solvents + 1)]
def get_feature_names(self): """Get feature names. Returns ------- feature_names : list of strings Names of the features produced by transform. """ return ['temperature', 'pressure'] + [f'solvent.{x}' for x in range(1, self.max_solvents + 1)] + \ [f'solvent_amount.{x}' for x in range(1, self.max_solvents + 1)]
[ "Get", "feature", "names", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/conditions_container.py#L148-L157
[ "def", "get_feature_names", "(", "self", ")", ":", "return", "[", "'temperature'", ",", "'pressure'", "]", "+", "[", "f'solvent.{x}'", "for", "x", "in", "range", "(", "1", ",", "self", ".", "max_solvents", "+", "1", ")", "]", "+", "[", "f'solvent_amount.{x}'", "for", "x", "in", "range", "(", "1", ",", "self", ".", "max_solvents", "+", "1", ")", "]" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Box.fit
Find min and max values of every feature. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The training input samples. y : Ignored not used, present for API consistency by convention. Returns ------- self : object
CIMtools/applicability_domain/bounding_box.py
def fit(self, X, y=None): """Find min and max values of every feature. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The training input samples. y : Ignored not used, present for API consistency by convention. Returns ------- self : object """ # Check that X have correct shape X = check_array(X) self._x_min = X.min(axis=0) # axis=0 will find the minimum values ​​by columns (for each feature) self._x_max = X.max(axis=0) # axis=0 will find the minimum values ​​by columns (for each feature) return self
def fit(self, X, y=None): """Find min and max values of every feature. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The training input samples. y : Ignored not used, present for API consistency by convention. Returns ------- self : object """ # Check that X have correct shape X = check_array(X) self._x_min = X.min(axis=0) # axis=0 will find the minimum values ​​by columns (for each feature) self._x_max = X.max(axis=0) # axis=0 will find the minimum values ​​by columns (for each feature) return self
[ "Find", "min", "and", "max", "values", "of", "every", "feature", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/bounding_box.py#L37-L56
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "# Check that X have correct shape", "X", "=", "check_array", "(", "X", ")", "self", ".", "_x_min", "=", "X", ".", "min", "(", "axis", "=", "0", ")", "# axis=0 will find the minimum values ​​by columns (for each feature)", "self", ".", "_x_max", "=", "X", ".", "max", "(", "axis", "=", "0", ")", "# axis=0 will find the minimum values ​​by columns (for each feature)", "return", "self" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Box.predict
Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- is_inlier : array, shape (n_samples,) For each observations, tells whether or not (True or False) it should be considered as an inlier according to the fitted model.
CIMtools/applicability_domain/bounding_box.py
def predict(self, X): """ Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- is_inlier : array, shape (n_samples,) For each observations, tells whether or not (True or False) it should be considered as an inlier according to the fitted model. """ # Check is fit had been called check_is_fitted(self, ['_x_min', '_x_max']) # Input validation X = check_array(X) return ((X - self._x_min).min(axis=1) >= 0) & ((self._x_max - X).min(axis=1) >= 0)
def predict(self, X): """ Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- is_inlier : array, shape (n_samples,) For each observations, tells whether or not (True or False) it should be considered as an inlier according to the fitted model. """ # Check is fit had been called check_is_fitted(self, ['_x_min', '_x_max']) # Input validation X = check_array(X) return ((X - self._x_min).min(axis=1) >= 0) & ((self._x_max - X).min(axis=1) >= 0)
[ "Predict", "if", "a", "particular", "sample", "is", "an", "outlier", "or", "not", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/bounding_box.py#L58-L79
[ "def", "predict", "(", "self", ",", "X", ")", ":", "# Check is fit had been called", "check_is_fitted", "(", "self", ",", "[", "'_x_min'", ",", "'_x_max'", "]", ")", "# Input validation", "X", "=", "check_array", "(", "X", ")", "return", "(", "(", "X", "-", "self", ".", "_x_min", ")", ".", "min", "(", "axis", "=", "1", ")", ">=", "0", ")", "&", "(", "(", "self", ".", "_x_max", "-", "X", ")", ".", "min", "(", "axis", "=", "1", ")", ">=", "0", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
TransformationOut.split
Generate indices to split data into training and test set. Parameters ---------- X : array-like, of length n_samples Training data, includes reaction's containers y : array-like, of length n_samples The target variable for supervised learning problems. groups : array-like, with shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.
CIMtools/model_selection/transformation_out.py
def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, of length n_samples Training data, includes reaction's containers y : array-like, of length n_samples The target variable for supervised learning problems. groups : array-like, with shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) cgrs = [~r for r in X] condition_structure = defaultdict(set) for structure, condition in zip(cgrs, groups): condition_structure[condition].add(structure) train_data = defaultdict(list) test_data = [] for n, (structure, condition) in enumerate(zip(cgrs, groups)): train_data[structure].append(n) if len(condition_structure[condition]) > 1: test_data.append(n) if self.n_splits > len(train_data): raise ValueError("Cannot have number of splits n_splits=%d greater" " than the number of transformations: %d." % (self.n_splits, len(train_data))) structures_weight = sorted(((x, len(y)) for x, y in train_data.items()), key=lambda x: x[1], reverse=True) fold_mean_size = len(cgrs) // self.n_splits if structures_weight[0][1] > fold_mean_size: warning('You have transformation that greater fold size') for idx in range(self.n_repeats): train_folds = [[] for _ in range(self.n_splits)] for structure, structure_length in structures_weight: if self.shuffle: check_random_state(self.random_state).shuffle(train_folds) for fold in train_folds[:-1]: if len(fold) + structure_length <= fold_mean_size: fold.extend(train_data[structure]) break else: roulette_param = (structure_length - fold_mean_size + len(fold)) / structure_length if random() > roulette_param: fold.extend(train_data[structure]) break else: train_folds[-1].extend(train_data[structure]) test_folds = [[] for _ in range(self.n_splits)] for test, train in zip(test_folds, train_folds): for index in train: if index in test_data: test.append(index) for i in range(self.n_splits): train_index = [] for fold in train_folds[:i]: train_index.extend(fold) for fold in train_folds[i+1:]: train_index.extend(fold) test_index = test_folds[i] yield array(train_index), array(test_index)
def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, of length n_samples Training data, includes reaction's containers y : array-like, of length n_samples The target variable for supervised learning problems. groups : array-like, with shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) cgrs = [~r for r in X] condition_structure = defaultdict(set) for structure, condition in zip(cgrs, groups): condition_structure[condition].add(structure) train_data = defaultdict(list) test_data = [] for n, (structure, condition) in enumerate(zip(cgrs, groups)): train_data[structure].append(n) if len(condition_structure[condition]) > 1: test_data.append(n) if self.n_splits > len(train_data): raise ValueError("Cannot have number of splits n_splits=%d greater" " than the number of transformations: %d." % (self.n_splits, len(train_data))) structures_weight = sorted(((x, len(y)) for x, y in train_data.items()), key=lambda x: x[1], reverse=True) fold_mean_size = len(cgrs) // self.n_splits if structures_weight[0][1] > fold_mean_size: warning('You have transformation that greater fold size') for idx in range(self.n_repeats): train_folds = [[] for _ in range(self.n_splits)] for structure, structure_length in structures_weight: if self.shuffle: check_random_state(self.random_state).shuffle(train_folds) for fold in train_folds[:-1]: if len(fold) + structure_length <= fold_mean_size: fold.extend(train_data[structure]) break else: roulette_param = (structure_length - fold_mean_size + len(fold)) / structure_length if random() > roulette_param: fold.extend(train_data[structure]) break else: train_folds[-1].extend(train_data[structure]) test_folds = [[] for _ in range(self.n_splits)] for test, train in zip(test_folds, train_folds): for index in train: if index in test_data: test.append(index) for i in range(self.n_splits): train_index = [] for fold in train_folds[:i]: train_index.extend(fold) for fold in train_folds[i+1:]: train_index.extend(fold) test_index = test_folds[i] yield array(train_index), array(test_index)
[ "Generate", "indices", "to", "split", "data", "into", "training", "and", "test", "set", ".", "Parameters", "----------", "X", ":", "array", "-", "like", "of", "length", "n_samples", "Training", "data", "includes", "reaction", "s", "containers", "y", ":", "array", "-", "like", "of", "length", "n_samples", "The", "target", "variable", "for", "supervised", "learning", "problems", ".", "groups", ":", "array", "-", "like", "with", "shape", "(", "n_samples", ")", "Group", "labels", "for", "the", "samples", "used", "while", "splitting", "the", "dataset", "into", "train", "/", "test", "set", ".", "Yields", "------", "train", ":", "ndarray", "The", "training", "set", "indices", "for", "that", "split", ".", "test", ":", "ndarray", "The", "testing", "set", "indices", "for", "that", "split", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/model_selection/transformation_out.py#L81-L156
[ "def", "split", "(", "self", ",", "X", ",", "y", "=", "None", ",", "groups", "=", "None", ")", ":", "X", ",", "y", ",", "groups", "=", "indexable", "(", "X", ",", "y", ",", "groups", ")", "cgrs", "=", "[", "~", "r", "for", "r", "in", "X", "]", "condition_structure", "=", "defaultdict", "(", "set", ")", "for", "structure", ",", "condition", "in", "zip", "(", "cgrs", ",", "groups", ")", ":", "condition_structure", "[", "condition", "]", ".", "add", "(", "structure", ")", "train_data", "=", "defaultdict", "(", "list", ")", "test_data", "=", "[", "]", "for", "n", ",", "(", "structure", ",", "condition", ")", "in", "enumerate", "(", "zip", "(", "cgrs", ",", "groups", ")", ")", ":", "train_data", "[", "structure", "]", ".", "append", "(", "n", ")", "if", "len", "(", "condition_structure", "[", "condition", "]", ")", ">", "1", ":", "test_data", ".", "append", "(", "n", ")", "if", "self", ".", "n_splits", ">", "len", "(", "train_data", ")", ":", "raise", "ValueError", "(", "\"Cannot have number of splits n_splits=%d greater\"", "\" than the number of transformations: %d.\"", "%", "(", "self", ".", "n_splits", ",", "len", "(", "train_data", ")", ")", ")", "structures_weight", "=", "sorted", "(", "(", "(", "x", ",", "len", "(", "y", ")", ")", "for", "x", ",", "y", "in", "train_data", ".", "items", "(", ")", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "fold_mean_size", "=", "len", "(", "cgrs", ")", "//", "self", ".", "n_splits", "if", "structures_weight", "[", "0", "]", "[", "1", "]", ">", "fold_mean_size", ":", "warning", "(", "'You have transformation that greater fold size'", ")", "for", "idx", "in", "range", "(", "self", ".", "n_repeats", ")", ":", "train_folds", "=", "[", "[", "]", "for", "_", "in", "range", "(", "self", ".", "n_splits", ")", "]", "for", "structure", ",", "structure_length", "in", "structures_weight", ":", "if", "self", ".", "shuffle", ":", "check_random_state", "(", "self", ".", "random_state", ")", ".", "shuffle", "(", "train_folds", ")", "for", "fold", "in", "train_folds", "[", ":", "-", "1", "]", ":", "if", "len", "(", "fold", ")", "+", "structure_length", "<=", "fold_mean_size", ":", "fold", ".", "extend", "(", "train_data", "[", "structure", "]", ")", "break", "else", ":", "roulette_param", "=", "(", "structure_length", "-", "fold_mean_size", "+", "len", "(", "fold", ")", ")", "/", "structure_length", "if", "random", "(", ")", ">", "roulette_param", ":", "fold", ".", "extend", "(", "train_data", "[", "structure", "]", ")", "break", "else", ":", "train_folds", "[", "-", "1", "]", ".", "extend", "(", "train_data", "[", "structure", "]", ")", "test_folds", "=", "[", "[", "]", "for", "_", "in", "range", "(", "self", ".", "n_splits", ")", "]", "for", "test", ",", "train", "in", "zip", "(", "test_folds", ",", "train_folds", ")", ":", "for", "index", "in", "train", ":", "if", "index", "in", "test_data", ":", "test", ".", "append", "(", "index", ")", "for", "i", "in", "range", "(", "self", ".", "n_splits", ")", ":", "train_index", "=", "[", "]", "for", "fold", "in", "train_folds", "[", ":", "i", "]", ":", "train_index", ".", "extend", "(", "fold", ")", "for", "fold", "in", "train_folds", "[", "i", "+", "1", ":", "]", ":", "train_index", ".", "extend", "(", "fold", ")", "test_index", "=", "test_folds", "[", "i", "]", "yield", "array", "(", "train_index", ")", ",", "array", "(", "test_index", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
CIMtoolsTransformerMixin.fit
Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines.
CIMtools/base.py
def fit(self, x, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ if self._dtype is not None: iter2array(x, dtype=self._dtype) else: iter2array(x) return self
def fit(self, x, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ if self._dtype is not None: iter2array(x, dtype=self._dtype) else: iter2array(x) return self
[ "Do", "nothing", "and", "return", "the", "estimator", "unchanged" ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/base.py#L26-L35
[ "def", "fit", "(", "self", ",", "x", ",", "y", "=", "None", ")", ":", "if", "self", ".", "_dtype", "is", "not", "None", ":", "iter2array", "(", "x", ",", "dtype", "=", "self", ".", "_dtype", ")", "else", ":", "iter2array", "(", "x", ")", "return", "self" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Fragmentor.finalize
finalize partial fitting procedure
CIMtools/preprocessing/fragmentor.py
def finalize(self): """ finalize partial fitting procedure """ if self.__head_less: warn(f'{self.__class__.__name__} configured to head less mode. finalize unusable') elif not self.__head_generate: warn(f'{self.__class__.__name__} already finalized or fitted') elif not self.__head_dict: raise NotFittedError(f'{self.__class__.__name__} instance is not fitted yet') else: if self.remove_rare_ratio: self.__clean_head(*self.__head_rare) self.__prepare_header() self.__head_rare = None self.__head_generate = False
def finalize(self): """ finalize partial fitting procedure """ if self.__head_less: warn(f'{self.__class__.__name__} configured to head less mode. finalize unusable') elif not self.__head_generate: warn(f'{self.__class__.__name__} already finalized or fitted') elif not self.__head_dict: raise NotFittedError(f'{self.__class__.__name__} instance is not fitted yet') else: if self.remove_rare_ratio: self.__clean_head(*self.__head_rare) self.__prepare_header() self.__head_rare = None self.__head_generate = False
[ "finalize", "partial", "fitting", "procedure" ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/preprocessing/fragmentor.py#L116-L131
[ "def", "finalize", "(", "self", ")", ":", "if", "self", ".", "__head_less", ":", "warn", "(", "f'{self.__class__.__name__} configured to head less mode. finalize unusable'", ")", "elif", "not", "self", ".", "__head_generate", ":", "warn", "(", "f'{self.__class__.__name__} already finalized or fitted'", ")", "elif", "not", "self", ".", "__head_dict", ":", "raise", "NotFittedError", "(", "f'{self.__class__.__name__} instance is not fitted yet'", ")", "else", ":", "if", "self", ".", "remove_rare_ratio", ":", "self", ".", "__clean_head", "(", "*", "self", ".", "__head_rare", ")", "self", ".", "__prepare_header", "(", ")", "self", ".", "__head_rare", "=", "None", "self", ".", "__head_generate", "=", "False" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Fragmentor._reset
Reset internal data-dependent state. __init__ parameters are not touched.
CIMtools/preprocessing/fragmentor.py
def _reset(self): """Reset internal data-dependent state. __init__ parameters are not touched. """ if not self.__head_less: if not self.__head_generate: self.__head_generate = True if self.__head_dict: self.__head_dump = self.__head_dict = None if self.__head_rare is not None: self.__head_rare = None self.delete_work_path()
def _reset(self): """Reset internal data-dependent state. __init__ parameters are not touched. """ if not self.__head_less: if not self.__head_generate: self.__head_generate = True if self.__head_dict: self.__head_dump = self.__head_dict = None if self.__head_rare is not None: self.__head_rare = None self.delete_work_path()
[ "Reset", "internal", "data", "-", "dependent", "state", ".", "__init__", "parameters", "are", "not", "touched", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/preprocessing/fragmentor.py#L133-L145
[ "def", "_reset", "(", "self", ")", ":", "if", "not", "self", ".", "__head_less", ":", "if", "not", "self", ".", "__head_generate", ":", "self", ".", "__head_generate", "=", "True", "if", "self", ".", "__head_dict", ":", "self", ".", "__head_dump", "=", "self", ".", "__head_dict", "=", "None", "if", "self", ".", "__head_rare", "is", "not", "None", ":", "self", ".", "__head_rare", "=", "None", "self", ".", "delete_work_path", "(", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Fragmentor.get_feature_names
Get feature names. Returns ------- feature_names : list of strings Names of the features produced by transform.
CIMtools/preprocessing/fragmentor.py
def get_feature_names(self): """Get feature names. Returns ------- feature_names : list of strings Names of the features produced by transform. """ if self.__head_less: raise AttributeError(f'{self.__class__.__name__} instance configured to head less mode') elif not self.__head_dict: raise NotFittedError(f'{self.__class__.__name__} instance is not fitted yet') return list(self.__head_dict.values())
def get_feature_names(self): """Get feature names. Returns ------- feature_names : list of strings Names of the features produced by transform. """ if self.__head_less: raise AttributeError(f'{self.__class__.__name__} instance configured to head less mode') elif not self.__head_dict: raise NotFittedError(f'{self.__class__.__name__} instance is not fitted yet') return list(self.__head_dict.values())
[ "Get", "feature", "names", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/preprocessing/fragmentor.py#L147-L159
[ "def", "get_feature_names", "(", "self", ")", ":", "if", "self", ".", "__head_less", ":", "raise", "AttributeError", "(", "f'{self.__class__.__name__} instance configured to head less mode'", ")", "elif", "not", "self", ".", "__head_dict", ":", "raise", "NotFittedError", "(", "f'{self.__class__.__name__} instance is not fitted yet'", ")", "return", "list", "(", "self", ".", "__head_dict", ".", "values", "(", ")", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Fragmentor.fit
Compute the header.
CIMtools/preprocessing/fragmentor.py
def fit(self, x, y=None): """Compute the header. """ x = iter2array(x, dtype=(MoleculeContainer, CGRContainer)) if self.__head_less: warn(f'{self.__class__.__name__} configured to head less mode. fit unusable') return self self._reset() self.__prepare(x) return self
def fit(self, x, y=None): """Compute the header. """ x = iter2array(x, dtype=(MoleculeContainer, CGRContainer)) if self.__head_less: warn(f'{self.__class__.__name__} configured to head less mode. fit unusable') return self self._reset() self.__prepare(x) return self
[ "Compute", "the", "header", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/preprocessing/fragmentor.py#L161-L172
[ "def", "fit", "(", "self", ",", "x", ",", "y", "=", "None", ")", ":", "x", "=", "iter2array", "(", "x", ",", "dtype", "=", "(", "MoleculeContainer", ",", "CGRContainer", ")", ")", "if", "self", ".", "__head_less", ":", "warn", "(", "f'{self.__class__.__name__} configured to head less mode. fit unusable'", ")", "return", "self", "self", ".", "_reset", "(", ")", "self", ".", "__prepare", "(", "x", ")", "return", "self" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
ReactionTypeControl.fit
Fit structure-based AD. The training model memorizes the unique set of reaction signature. Parameters ---------- X : after read rdf file Returns ------- self : object
CIMtools/applicability_domain/reaction_type_control.py
def fit(self, X): """Fit structure-based AD. The training model memorizes the unique set of reaction signature. Parameters ---------- X : after read rdf file Returns ------- self : object """ X = iter2array(X, dtype=ReactionContainer) self._train_signatures = {self.__get_signature(x) for x in X} return self
def fit(self, X): """Fit structure-based AD. The training model memorizes the unique set of reaction signature. Parameters ---------- X : after read rdf file Returns ------- self : object """ X = iter2array(X, dtype=ReactionContainer) self._train_signatures = {self.__get_signature(x) for x in X} return self
[ "Fit", "structure", "-", "based", "AD", ".", "The", "training", "model", "memorizes", "the", "unique", "set", "of", "reaction", "signature", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/reaction_type_control.py#L51-L64
[ "def", "fit", "(", "self", ",", "X", ")", ":", "X", "=", "iter2array", "(", "X", ",", "dtype", "=", "ReactionContainer", ")", "self", ".", "_train_signatures", "=", "{", "self", ".", "__get_signature", "(", "x", ")", "for", "x", "in", "X", "}", "return", "self" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
ReactionTypeControl.predict
Reaction is considered belonging to model’s AD if its reaction signature coincides with ones used in training set. Parameters ---------- X : after read rdf file Returns ------- self : array contains True (reaction in AD) and False (reaction residing outside AD).
CIMtools/applicability_domain/reaction_type_control.py
def predict(self, X): """Reaction is considered belonging to model’s AD if its reaction signature coincides with ones used in training set. Parameters ---------- X : after read rdf file Returns ------- self : array contains True (reaction in AD) and False (reaction residing outside AD). """ check_is_fitted(self, ['_train_signatures']) X = iter2array(X, dtype=ReactionContainer) return array([self.__get_signature(x) in self._train_signatures for x in X])
def predict(self, X): """Reaction is considered belonging to model’s AD if its reaction signature coincides with ones used in training set. Parameters ---------- X : after read rdf file Returns ------- self : array contains True (reaction in AD) and False (reaction residing outside AD). """ check_is_fitted(self, ['_train_signatures']) X = iter2array(X, dtype=ReactionContainer) return array([self.__get_signature(x) in self._train_signatures for x in X])
[ "Reaction", "is", "considered", "belonging", "to", "model’s", "AD", "if", "its", "reaction", "signature", "coincides", "with", "ones", "used", "in", "training", "set", "." ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/reaction_type_control.py#L66-L80
[ "def", "predict", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "[", "'_train_signatures'", "]", ")", "X", "=", "iter2array", "(", "X", ",", "dtype", "=", "ReactionContainer", ")", "return", "array", "(", "[", "self", ".", "__get_signature", "(", "x", ")", "in", "self", ".", "_train_signatures", "for", "x", "in", "X", "]", ")" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
Eval.__parser
adopted from Paul McGuire example. http://pyparsing.wikispaces.com/file/view/fourFn.py
CIMtools/preprocessing/equation.py
def __parser(expression): """ adopted from Paul McGuire example. http://pyparsing.wikispaces.com/file/view/fourFn.py """ expr_stack = [] def push_first(strg, loc, toks): expr_stack.append(toks[0]) def push_u_minus(strg, loc, toks): if toks and toks[0] == '-': expr_stack.append('unary -') point = Literal('.') _e = CaselessLiteral('E') fnumber = Combine(Word('+-' + nums, nums) + Optional(point + Optional(Word(nums))) + Optional(_e + Word('+-' + nums, nums))) ident = Word(alphas, alphas + nums + '_$') plus = Literal("+") minus = Literal("-") mult = Literal("*") div = Literal("/") lpar = Literal("(").suppress() rpar = Literal(")").suppress() addop = plus | minus multop = mult | div expop = Literal("^") _pi = CaselessLiteral("PI") x = CaselessLiteral("X") expr = Forward() atom = (Optional("-") + (x | _pi | _e | fnumber | ident + lpar + expr + rpar).setParseAction(push_first) | (lpar + expr.suppress() + rpar)).setParseAction(push_u_minus) factor = Forward() factor << atom + ZeroOrMore((expop + factor).setParseAction(push_first)) term = factor + ZeroOrMore((multop + factor).setParseAction(push_first)) expr << term + ZeroOrMore((addop + term).setParseAction(push_first)) expr.parseString(expression) return expr_stack
def __parser(expression): """ adopted from Paul McGuire example. http://pyparsing.wikispaces.com/file/view/fourFn.py """ expr_stack = [] def push_first(strg, loc, toks): expr_stack.append(toks[0]) def push_u_minus(strg, loc, toks): if toks and toks[0] == '-': expr_stack.append('unary -') point = Literal('.') _e = CaselessLiteral('E') fnumber = Combine(Word('+-' + nums, nums) + Optional(point + Optional(Word(nums))) + Optional(_e + Word('+-' + nums, nums))) ident = Word(alphas, alphas + nums + '_$') plus = Literal("+") minus = Literal("-") mult = Literal("*") div = Literal("/") lpar = Literal("(").suppress() rpar = Literal(")").suppress() addop = plus | minus multop = mult | div expop = Literal("^") _pi = CaselessLiteral("PI") x = CaselessLiteral("X") expr = Forward() atom = (Optional("-") + (x | _pi | _e | fnumber | ident + lpar + expr + rpar).setParseAction(push_first) | (lpar + expr.suppress() + rpar)).setParseAction(push_u_minus) factor = Forward() factor << atom + ZeroOrMore((expop + factor).setParseAction(push_first)) term = factor + ZeroOrMore((multop + factor).setParseAction(push_first)) expr << term + ZeroOrMore((addop + term).setParseAction(push_first)) expr.parseString(expression) return expr_stack
[ "adopted", "from", "Paul", "McGuire", "example", ".", "http", ":", "//", "pyparsing", ".", "wikispaces", ".", "com", "/", "file", "/", "view", "/", "fourFn", ".", "py" ]
stsouko/CIMtools
python
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/preprocessing/equation.py#L58-L100
[ "def", "__parser", "(", "expression", ")", ":", "expr_stack", "=", "[", "]", "def", "push_first", "(", "strg", ",", "loc", ",", "toks", ")", ":", "expr_stack", ".", "append", "(", "toks", "[", "0", "]", ")", "def", "push_u_minus", "(", "strg", ",", "loc", ",", "toks", ")", ":", "if", "toks", "and", "toks", "[", "0", "]", "==", "'-'", ":", "expr_stack", ".", "append", "(", "'unary -'", ")", "point", "=", "Literal", "(", "'.'", ")", "_e", "=", "CaselessLiteral", "(", "'E'", ")", "fnumber", "=", "Combine", "(", "Word", "(", "'+-'", "+", "nums", ",", "nums", ")", "+", "Optional", "(", "point", "+", "Optional", "(", "Word", "(", "nums", ")", ")", ")", "+", "Optional", "(", "_e", "+", "Word", "(", "'+-'", "+", "nums", ",", "nums", ")", ")", ")", "ident", "=", "Word", "(", "alphas", ",", "alphas", "+", "nums", "+", "'_$'", ")", "plus", "=", "Literal", "(", "\"+\"", ")", "minus", "=", "Literal", "(", "\"-\"", ")", "mult", "=", "Literal", "(", "\"*\"", ")", "div", "=", "Literal", "(", "\"/\"", ")", "lpar", "=", "Literal", "(", "\"(\"", ")", ".", "suppress", "(", ")", "rpar", "=", "Literal", "(", "\")\"", ")", ".", "suppress", "(", ")", "addop", "=", "plus", "|", "minus", "multop", "=", "mult", "|", "div", "expop", "=", "Literal", "(", "\"^\"", ")", "_pi", "=", "CaselessLiteral", "(", "\"PI\"", ")", "x", "=", "CaselessLiteral", "(", "\"X\"", ")", "expr", "=", "Forward", "(", ")", "atom", "=", "(", "Optional", "(", "\"-\"", ")", "+", "(", "x", "|", "_pi", "|", "_e", "|", "fnumber", "|", "ident", "+", "lpar", "+", "expr", "+", "rpar", ")", ".", "setParseAction", "(", "push_first", ")", "|", "(", "lpar", "+", "expr", ".", "suppress", "(", ")", "+", "rpar", ")", ")", ".", "setParseAction", "(", "push_u_minus", ")", "factor", "=", "Forward", "(", ")", "factor", "<<", "atom", "+", "ZeroOrMore", "(", "(", "expop", "+", "factor", ")", ".", "setParseAction", "(", "push_first", ")", ")", "term", "=", "factor", "+", "ZeroOrMore", "(", "(", "multop", "+", "factor", ")", ".", "setParseAction", "(", "push_first", ")", ")", "expr", "<<", "term", "+", "ZeroOrMore", "(", "(", "addop", "+", "term", ")", ".", "setParseAction", "(", "push_first", ")", ")", "expr", ".", "parseString", "(", "expression", ")", "return", "expr_stack" ]
cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3
valid
from_int
:params data: integer :returns: proquint made from input data :type data: int :rtype: string
morango/utils/proquint.py
def from_int(data): """ :params data: integer :returns: proquint made from input data :type data: int :rtype: string """ if not isinstance(data, int) and not isinstance(data, long): raise TypeError('Input must be integer') res = [] while data > 0 or not res: for j in range(5): if not j % 2: res += CONSONANTS[(data & 0xf)] data >>= 4 else: res += VOWELS[(data & 0x3)] data >>= 2 if data > 0: res += '-' res.reverse() return ''.join(res)
def from_int(data): """ :params data: integer :returns: proquint made from input data :type data: int :rtype: string """ if not isinstance(data, int) and not isinstance(data, long): raise TypeError('Input must be integer') res = [] while data > 0 or not res: for j in range(5): if not j % 2: res += CONSONANTS[(data & 0xf)] data >>= 4 else: res += VOWELS[(data & 0x3)] data >>= 2 if data > 0: res += '-' res.reverse() return ''.join(res)
[ ":", "params", "data", ":", "integer", ":", "returns", ":", "proquint", "made", "from", "input", "data", ":", "type", "data", ":", "int", ":", "rtype", ":", "string" ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/proquint.py#L57-L79
[ "def", "from_int", "(", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "int", ")", "and", "not", "isinstance", "(", "data", ",", "long", ")", ":", "raise", "TypeError", "(", "'Input must be integer'", ")", "res", "=", "[", "]", "while", "data", ">", "0", "or", "not", "res", ":", "for", "j", "in", "range", "(", "5", ")", ":", "if", "not", "j", "%", "2", ":", "res", "+=", "CONSONANTS", "[", "(", "data", "&", "0xf", ")", "]", "data", ">>=", "4", "else", ":", "res", "+=", "VOWELS", "[", "(", "data", "&", "0x3", ")", "]", "data", ">>=", "2", "if", "data", ">", "0", ":", "res", "+=", "'-'", "res", ".", "reverse", "(", ")", "return", "''", ".", "join", "(", "res", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
to_int
:params data: proquint :returns: proquint decoded into an integer :type data: string :rtype: int
morango/utils/proquint.py
def to_int(data): """ :params data: proquint :returns: proquint decoded into an integer :type data: string :rtype: int """ if not isinstance(data, basestring): raise TypeError('Input must be string') res = 0 for part in data.split('-'): if len(part) != 5: raise ValueError('Malformed proquint') for j in range(5): try: if not j % 2: res <<= 4 res |= CONSONANTS.index(part[j]) else: res <<= 2 res |= VOWELS.index(part[j]) except ValueError: raise ValueError('Unknown character \'{!s}\' in proquint'.format(part[j])) return res
def to_int(data): """ :params data: proquint :returns: proquint decoded into an integer :type data: string :rtype: int """ if not isinstance(data, basestring): raise TypeError('Input must be string') res = 0 for part in data.split('-'): if len(part) != 5: raise ValueError('Malformed proquint') for j in range(5): try: if not j % 2: res <<= 4 res |= CONSONANTS.index(part[j]) else: res <<= 2 res |= VOWELS.index(part[j]) except ValueError: raise ValueError('Unknown character \'{!s}\' in proquint'.format(part[j])) return res
[ ":", "params", "data", ":", "proquint", ":", "returns", ":", "proquint", "decoded", "into", "an", "integer", ":", "type", "data", ":", "string", ":", "rtype", ":", "int" ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/proquint.py#L82-L106
[ "def", "to_int", "(", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "basestring", ")", ":", "raise", "TypeError", "(", "'Input must be string'", ")", "res", "=", "0", "for", "part", "in", "data", ".", "split", "(", "'-'", ")", ":", "if", "len", "(", "part", ")", "!=", "5", ":", "raise", "ValueError", "(", "'Malformed proquint'", ")", "for", "j", "in", "range", "(", "5", ")", ":", "try", ":", "if", "not", "j", "%", "2", ":", "res", "<<=", "4", "res", "|=", "CONSONANTS", ".", "index", "(", "part", "[", "j", "]", ")", "else", ":", "res", "<<=", "2", "res", "|=", "VOWELS", ".", "index", "(", "part", "[", "j", "]", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Unknown character \\'{!s}\\' in proquint'", ".", "format", "(", "part", "[", "j", "]", ")", ")", "return", "res" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
SharedKey.get_or_create_shared_key
Create a shared public/private key pair for certificate pushing, if the settings allow.
morango/crypto.py
def get_or_create_shared_key(cls, force_new=False): """ Create a shared public/private key pair for certificate pushing, if the settings allow. """ if force_new: with transaction.atomic(): SharedKey.objects.filter(current=True).update(current=False) key = Key() return SharedKey.objects.create(public_key=key, private_key=key, current=True) # create a new shared key if one doesn't exist try: return SharedKey.objects.get(current=True) except SharedKey.DoesNotExist: key = Key() return SharedKey.objects.create(public_key=key, private_key=key, current=True)
def get_or_create_shared_key(cls, force_new=False): """ Create a shared public/private key pair for certificate pushing, if the settings allow. """ if force_new: with transaction.atomic(): SharedKey.objects.filter(current=True).update(current=False) key = Key() return SharedKey.objects.create(public_key=key, private_key=key, current=True) # create a new shared key if one doesn't exist try: return SharedKey.objects.get(current=True) except SharedKey.DoesNotExist: key = Key() return SharedKey.objects.create(public_key=key, private_key=key, current=True)
[ "Create", "a", "shared", "public", "/", "private", "key", "pair", "for", "certificate", "pushing", "if", "the", "settings", "allow", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/crypto.py#L359-L378
[ "def", "get_or_create_shared_key", "(", "cls", ",", "force_new", "=", "False", ")", ":", "if", "force_new", ":", "with", "transaction", ".", "atomic", "(", ")", ":", "SharedKey", ".", "objects", ".", "filter", "(", "current", "=", "True", ")", ".", "update", "(", "current", "=", "False", ")", "key", "=", "Key", "(", ")", "return", "SharedKey", ".", "objects", ".", "create", "(", "public_key", "=", "key", ",", "private_key", "=", "key", ",", "current", "=", "True", ")", "# create a new shared key if one doesn't exist", "try", ":", "return", "SharedKey", ".", "objects", ".", "get", "(", "current", "=", "True", ")", "except", "SharedKey", ".", "DoesNotExist", ":", "key", "=", "Key", "(", ")", "return", "SharedKey", ".", "objects", ".", "create", "(", "public_key", "=", "key", ",", "private_key", "=", "key", ",", "current", "=", "True", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
_self_referential_fk
Return whether this model has a self ref FK, and the name for the field
morango/controller.py
def _self_referential_fk(klass_model): """ Return whether this model has a self ref FK, and the name for the field """ for f in klass_model._meta.concrete_fields: if f.related_model: if issubclass(klass_model, f.related_model): return f.attname return None
def _self_referential_fk(klass_model): """ Return whether this model has a self ref FK, and the name for the field """ for f in klass_model._meta.concrete_fields: if f.related_model: if issubclass(klass_model, f.related_model): return f.attname return None
[ "Return", "whether", "this", "model", "has", "a", "self", "ref", "FK", "and", "the", "name", "for", "the", "field" ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/controller.py#L5-L13
[ "def", "_self_referential_fk", "(", "klass_model", ")", ":", "for", "f", "in", "klass_model", ".", "_meta", ".", "concrete_fields", ":", "if", "f", ".", "related_model", ":", "if", "issubclass", "(", "klass_model", ",", "f", ".", "related_model", ")", ":", "return", "f", ".", "attname", "return", "None" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
InstanceIDModel.get_or_create_current_instance
Get the instance model corresponding to the current system, or create a new one if the system is new or its properties have changed (e.g. OS from upgrade).
morango/models.py
def get_or_create_current_instance(cls): """Get the instance model corresponding to the current system, or create a new one if the system is new or its properties have changed (e.g. OS from upgrade).""" # on Android, platform.platform() barfs, so we handle that safely here try: plat = platform.platform() except: plat = "Unknown (Android?)" kwargs = { "platform": plat, "hostname": platform.node(), "sysversion": sys.version, "database": DatabaseIDModel.get_or_create_current_database_id(), "db_path": os.path.abspath(settings.DATABASES['default']['NAME']), "system_id": os.environ.get("MORANGO_SYSTEM_ID", ""), } # try to get the MAC address, but exclude it if it was a fake (random) address mac = uuid.getnode() if (mac >> 40) % 2 == 0: # 8th bit (of 48 bits, from left) is 1 if MAC is fake hashable_identifier = "{}:{}".format(kwargs['database'].id, mac) kwargs["node_id"] = hashlib.sha1(hashable_identifier.encode('utf-8')).hexdigest()[:20] else: kwargs["node_id"] = "" # do within transaction so we only ever have 1 current instance ID with transaction.atomic(): InstanceIDModel.objects.filter(current=True).update(current=False) obj, created = InstanceIDModel.objects.get_or_create(**kwargs) obj.current = True obj.save() return obj, created
def get_or_create_current_instance(cls): """Get the instance model corresponding to the current system, or create a new one if the system is new or its properties have changed (e.g. OS from upgrade).""" # on Android, platform.platform() barfs, so we handle that safely here try: plat = platform.platform() except: plat = "Unknown (Android?)" kwargs = { "platform": plat, "hostname": platform.node(), "sysversion": sys.version, "database": DatabaseIDModel.get_or_create_current_database_id(), "db_path": os.path.abspath(settings.DATABASES['default']['NAME']), "system_id": os.environ.get("MORANGO_SYSTEM_ID", ""), } # try to get the MAC address, but exclude it if it was a fake (random) address mac = uuid.getnode() if (mac >> 40) % 2 == 0: # 8th bit (of 48 bits, from left) is 1 if MAC is fake hashable_identifier = "{}:{}".format(kwargs['database'].id, mac) kwargs["node_id"] = hashlib.sha1(hashable_identifier.encode('utf-8')).hexdigest()[:20] else: kwargs["node_id"] = "" # do within transaction so we only ever have 1 current instance ID with transaction.atomic(): InstanceIDModel.objects.filter(current=True).update(current=False) obj, created = InstanceIDModel.objects.get_or_create(**kwargs) obj.current = True obj.save() return obj, created
[ "Get", "the", "instance", "model", "corresponding", "to", "the", "current", "system", "or", "create", "a", "new", "one", "if", "the", "system", "is", "new", "or", "its", "properties", "have", "changed", "(", "e", ".", "g", ".", "OS", "from", "upgrade", ")", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/models.py#L100-L134
[ "def", "get_or_create_current_instance", "(", "cls", ")", ":", "# on Android, platform.platform() barfs, so we handle that safely here", "try", ":", "plat", "=", "platform", ".", "platform", "(", ")", "except", ":", "plat", "=", "\"Unknown (Android?)\"", "kwargs", "=", "{", "\"platform\"", ":", "plat", ",", "\"hostname\"", ":", "platform", ".", "node", "(", ")", ",", "\"sysversion\"", ":", "sys", ".", "version", ",", "\"database\"", ":", "DatabaseIDModel", ".", "get_or_create_current_database_id", "(", ")", ",", "\"db_path\"", ":", "os", ".", "path", ".", "abspath", "(", "settings", ".", "DATABASES", "[", "'default'", "]", "[", "'NAME'", "]", ")", ",", "\"system_id\"", ":", "os", ".", "environ", ".", "get", "(", "\"MORANGO_SYSTEM_ID\"", ",", "\"\"", ")", ",", "}", "# try to get the MAC address, but exclude it if it was a fake (random) address", "mac", "=", "uuid", ".", "getnode", "(", ")", "if", "(", "mac", ">>", "40", ")", "%", "2", "==", "0", ":", "# 8th bit (of 48 bits, from left) is 1 if MAC is fake", "hashable_identifier", "=", "\"{}:{}\"", ".", "format", "(", "kwargs", "[", "'database'", "]", ".", "id", ",", "mac", ")", "kwargs", "[", "\"node_id\"", "]", "=", "hashlib", ".", "sha1", "(", "hashable_identifier", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "[", ":", "20", "]", "else", ":", "kwargs", "[", "\"node_id\"", "]", "=", "\"\"", "# do within transaction so we only ever have 1 current instance ID", "with", "transaction", ".", "atomic", "(", ")", ":", "InstanceIDModel", ".", "objects", ".", "filter", "(", "current", "=", "True", ")", ".", "update", "(", "current", "=", "False", ")", "obj", ",", "created", "=", "InstanceIDModel", ".", "objects", ".", "get_or_create", "(", "*", "*", "kwargs", ")", "obj", ".", "current", "=", "True", "obj", ".", "save", "(", ")", "return", "obj", ",", "created" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
Store._deserialize_store_model
When deserializing a store model, we look at the deleted flags to know if we should delete the app model. Upon loading the app model in memory we validate the app models fields, if any errors occurs we follow foreign key relationships to see if the related model has been deleted to propagate that deletion to the target app model. We return: None => if the model was deleted successfully model => if the model validates successfully
morango/models.py
def _deserialize_store_model(self, fk_cache): """ When deserializing a store model, we look at the deleted flags to know if we should delete the app model. Upon loading the app model in memory we validate the app models fields, if any errors occurs we follow foreign key relationships to see if the related model has been deleted to propagate that deletion to the target app model. We return: None => if the model was deleted successfully model => if the model validates successfully """ klass_model = _profile_models[self.profile][self.model_name] # if store model marked as deleted, attempt to delete in app layer if self.deleted: # if hard deleted, propagate to related models if self.hard_deleted: try: klass_model.objects.get(id=self.id).delete(hard_delete=True) except klass_model.DoesNotExist: pass else: klass_model.objects.filter(id=self.id).delete() return None else: # load model into memory app_model = klass_model.deserialize(json.loads(self.serialized)) app_model._morango_source_id = self.source_id app_model._morango_partition = self.partition app_model._morango_dirty_bit = False try: # validate and return the model app_model.cached_clean_fields(fk_cache) return app_model except exceptions.ValidationError as e: logger.warn("Validation error for {model} with id {id}: {error}".format(model=klass_model.__name__, id=app_model.id, error=e)) # check FKs in store to see if any of those models were deleted or hard_deleted to propagate to this model fk_ids = [getattr(app_model, field.attname) for field in app_model._meta.fields if isinstance(field, ForeignKey)] for fk_id in fk_ids: try: st_model = Store.objects.get(id=fk_id) if st_model.deleted: # if hard deleted, propagate to store model if st_model.hard_deleted: app_model._update_hard_deleted_models() app_model._update_deleted_models() return None except Store.DoesNotExist: pass raise e
def _deserialize_store_model(self, fk_cache): """ When deserializing a store model, we look at the deleted flags to know if we should delete the app model. Upon loading the app model in memory we validate the app models fields, if any errors occurs we follow foreign key relationships to see if the related model has been deleted to propagate that deletion to the target app model. We return: None => if the model was deleted successfully model => if the model validates successfully """ klass_model = _profile_models[self.profile][self.model_name] # if store model marked as deleted, attempt to delete in app layer if self.deleted: # if hard deleted, propagate to related models if self.hard_deleted: try: klass_model.objects.get(id=self.id).delete(hard_delete=True) except klass_model.DoesNotExist: pass else: klass_model.objects.filter(id=self.id).delete() return None else: # load model into memory app_model = klass_model.deserialize(json.loads(self.serialized)) app_model._morango_source_id = self.source_id app_model._morango_partition = self.partition app_model._morango_dirty_bit = False try: # validate and return the model app_model.cached_clean_fields(fk_cache) return app_model except exceptions.ValidationError as e: logger.warn("Validation error for {model} with id {id}: {error}".format(model=klass_model.__name__, id=app_model.id, error=e)) # check FKs in store to see if any of those models were deleted or hard_deleted to propagate to this model fk_ids = [getattr(app_model, field.attname) for field in app_model._meta.fields if isinstance(field, ForeignKey)] for fk_id in fk_ids: try: st_model = Store.objects.get(id=fk_id) if st_model.deleted: # if hard deleted, propagate to store model if st_model.hard_deleted: app_model._update_hard_deleted_models() app_model._update_deleted_models() return None except Store.DoesNotExist: pass raise e
[ "When", "deserializing", "a", "store", "model", "we", "look", "at", "the", "deleted", "flags", "to", "know", "if", "we", "should", "delete", "the", "app", "model", ".", "Upon", "loading", "the", "app", "model", "in", "memory", "we", "validate", "the", "app", "models", "fields", "if", "any", "errors", "occurs", "we", "follow", "foreign", "key", "relationships", "to", "see", "if", "the", "related", "model", "has", "been", "deleted", "to", "propagate", "that", "deletion", "to", "the", "target", "app", "model", ".", "We", "return", ":", "None", "=", ">", "if", "the", "model", "was", "deleted", "successfully", "model", "=", ">", "if", "the", "model", "validates", "successfully" ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/models.py#L288-L335
[ "def", "_deserialize_store_model", "(", "self", ",", "fk_cache", ")", ":", "klass_model", "=", "_profile_models", "[", "self", ".", "profile", "]", "[", "self", ".", "model_name", "]", "# if store model marked as deleted, attempt to delete in app layer", "if", "self", ".", "deleted", ":", "# if hard deleted, propagate to related models", "if", "self", ".", "hard_deleted", ":", "try", ":", "klass_model", ".", "objects", ".", "get", "(", "id", "=", "self", ".", "id", ")", ".", "delete", "(", "hard_delete", "=", "True", ")", "except", "klass_model", ".", "DoesNotExist", ":", "pass", "else", ":", "klass_model", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "delete", "(", ")", "return", "None", "else", ":", "# load model into memory", "app_model", "=", "klass_model", ".", "deserialize", "(", "json", ".", "loads", "(", "self", ".", "serialized", ")", ")", "app_model", ".", "_morango_source_id", "=", "self", ".", "source_id", "app_model", ".", "_morango_partition", "=", "self", ".", "partition", "app_model", ".", "_morango_dirty_bit", "=", "False", "try", ":", "# validate and return the model", "app_model", ".", "cached_clean_fields", "(", "fk_cache", ")", "return", "app_model", "except", "exceptions", ".", "ValidationError", "as", "e", ":", "logger", ".", "warn", "(", "\"Validation error for {model} with id {id}: {error}\"", ".", "format", "(", "model", "=", "klass_model", ".", "__name__", ",", "id", "=", "app_model", ".", "id", ",", "error", "=", "e", ")", ")", "# check FKs in store to see if any of those models were deleted or hard_deleted to propagate to this model", "fk_ids", "=", "[", "getattr", "(", "app_model", ",", "field", ".", "attname", ")", "for", "field", "in", "app_model", ".", "_meta", ".", "fields", "if", "isinstance", "(", "field", ",", "ForeignKey", ")", "]", "for", "fk_id", "in", "fk_ids", ":", "try", ":", "st_model", "=", "Store", ".", "objects", ".", "get", "(", "id", "=", "fk_id", ")", "if", "st_model", ".", "deleted", ":", "# if hard deleted, propagate to store model", "if", "st_model", ".", "hard_deleted", ":", "app_model", ".", "_update_hard_deleted_models", "(", ")", "app_model", ".", "_update_deleted_models", "(", ")", "return", "None", "except", "Store", ".", "DoesNotExist", ":", "pass", "raise", "e" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
SyncableModel.serialize
All concrete fields of the ``SyncableModel`` subclass, except for those specifically blacklisted, are returned in a dict.
morango/models.py
def serialize(self): """All concrete fields of the ``SyncableModel`` subclass, except for those specifically blacklisted, are returned in a dict.""" # NOTE: code adapted from https://github.com/django/django/blob/master/django/forms/models.py#L75 opts = self._meta data = {} for f in opts.concrete_fields: if f.attname in self.morango_fields_not_to_serialize: continue if f.attname in self._morango_internal_fields_not_to_serialize: continue # case if model is morango mptt if f.attname in getattr(self, '_internal_mptt_fields_not_to_serialize', '_internal_fields_not_to_serialize'): continue if hasattr(f, 'value_from_object_json_compatible'): data[f.attname] = f.value_from_object_json_compatible(self) else: data[f.attname] = f.value_from_object(self) return data
def serialize(self): """All concrete fields of the ``SyncableModel`` subclass, except for those specifically blacklisted, are returned in a dict.""" # NOTE: code adapted from https://github.com/django/django/blob/master/django/forms/models.py#L75 opts = self._meta data = {} for f in opts.concrete_fields: if f.attname in self.morango_fields_not_to_serialize: continue if f.attname in self._morango_internal_fields_not_to_serialize: continue # case if model is morango mptt if f.attname in getattr(self, '_internal_mptt_fields_not_to_serialize', '_internal_fields_not_to_serialize'): continue if hasattr(f, 'value_from_object_json_compatible'): data[f.attname] = f.value_from_object_json_compatible(self) else: data[f.attname] = f.value_from_object(self) return data
[ "All", "concrete", "fields", "of", "the", "SyncableModel", "subclass", "except", "for", "those", "specifically", "blacklisted", "are", "returned", "in", "a", "dict", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/models.py#L528-L546
[ "def", "serialize", "(", "self", ")", ":", "# NOTE: code adapted from https://github.com/django/django/blob/master/django/forms/models.py#L75", "opts", "=", "self", ".", "_meta", "data", "=", "{", "}", "for", "f", "in", "opts", ".", "concrete_fields", ":", "if", "f", ".", "attname", "in", "self", ".", "morango_fields_not_to_serialize", ":", "continue", "if", "f", ".", "attname", "in", "self", ".", "_morango_internal_fields_not_to_serialize", ":", "continue", "# case if model is morango mptt", "if", "f", ".", "attname", "in", "getattr", "(", "self", ",", "'_internal_mptt_fields_not_to_serialize'", ",", "'_internal_fields_not_to_serialize'", ")", ":", "continue", "if", "hasattr", "(", "f", ",", "'value_from_object_json_compatible'", ")", ":", "data", "[", "f", ".", "attname", "]", "=", "f", ".", "value_from_object_json_compatible", "(", "self", ")", "else", ":", "data", "[", "f", ".", "attname", "]", "=", "f", ".", "value_from_object", "(", "self", ")", "return", "data" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
SyncableModel.deserialize
Returns an unsaved class object based on the valid properties passed in.
morango/models.py
def deserialize(cls, dict_model): """Returns an unsaved class object based on the valid properties passed in.""" kwargs = {} for f in cls._meta.concrete_fields: if f.attname in dict_model: kwargs[f.attname] = dict_model[f.attname] return cls(**kwargs)
def deserialize(cls, dict_model): """Returns an unsaved class object based on the valid properties passed in.""" kwargs = {} for f in cls._meta.concrete_fields: if f.attname in dict_model: kwargs[f.attname] = dict_model[f.attname] return cls(**kwargs)
[ "Returns", "an", "unsaved", "class", "object", "based", "on", "the", "valid", "properties", "passed", "in", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/models.py#L549-L555
[ "def", "deserialize", "(", "cls", ",", "dict_model", ")", ":", "kwargs", "=", "{", "}", "for", "f", "in", "cls", ".", "_meta", ".", "concrete_fields", ":", "if", "f", ".", "attname", "in", "dict_model", ":", "kwargs", "[", "f", ".", "attname", "]", "=", "dict_model", "[", "f", ".", "attname", "]", "return", "cls", "(", "*", "*", "kwargs", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
UUIDField.get_default
Returns the default value for this field.
morango/utils/uuids.py
def get_default(self): """ Returns the default value for this field. """ if self.has_default(): if callable(self.default): default = self.default() if isinstance(default, uuid.UUID): return default.hex return default if isinstance(self.default, uuid.UUID): return self.default.hex return self.default return None
def get_default(self): """ Returns the default value for this field. """ if self.has_default(): if callable(self.default): default = self.default() if isinstance(default, uuid.UUID): return default.hex return default if isinstance(self.default, uuid.UUID): return self.default.hex return self.default return None
[ "Returns", "the", "default", "value", "for", "this", "field", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/uuids.py#L51-L64
[ "def", "get_default", "(", "self", ")", ":", "if", "self", ".", "has_default", "(", ")", ":", "if", "callable", "(", "self", ".", "default", ")", ":", "default", "=", "self", ".", "default", "(", ")", "if", "isinstance", "(", "default", ",", "uuid", ".", "UUID", ")", ":", "return", "default", ".", "hex", "return", "default", "if", "isinstance", "(", "self", ".", "default", ",", "uuid", ".", "UUID", ")", ":", "return", "self", ".", "default", ".", "hex", "return", "self", ".", "default", "return", "None" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
UUIDModelMixin.calculate_uuid
Should return a 32-digit hex string for a UUID that is calculated as a function of a set of fields from the model.
morango/utils/uuids.py
def calculate_uuid(self): """Should return a 32-digit hex string for a UUID that is calculated as a function of a set of fields from the model.""" # raise an error if no inputs to the UUID calculation were specified if self.uuid_input_fields is None: raise NotImplementedError("""You must define either a 'uuid_input_fields' attribute (with a tuple of field names) or override the 'calculate_uuid' method, on models that inherit from UUIDModelMixin. If you want a fully random UUID, you can set 'uuid_input_fields' to the string 'RANDOM'.""") # if the UUID has been set to be random, return a random UUID if self.uuid_input_fields == "RANDOM": return uuid.uuid4().hex # if we got this far, uuid_input_fields should be a tuple assert isinstance(self.uuid_input_fields, tuple), "'uuid_input_fields' must either be a tuple or the string 'RANDOM'" # calculate the input to the UUID function hashable_input_vals = [] for field in self.uuid_input_fields: new_value = getattr(self, field) if new_value: hashable_input_vals.append(str(new_value)) hashable_input = ":".join(hashable_input_vals) # if all the values were falsey, just return a random UUID, to avoid collisions if not hashable_input: return uuid.uuid4().hex # compute the UUID as a function of the input values return sha2_uuid(hashable_input)
def calculate_uuid(self): """Should return a 32-digit hex string for a UUID that is calculated as a function of a set of fields from the model.""" # raise an error if no inputs to the UUID calculation were specified if self.uuid_input_fields is None: raise NotImplementedError("""You must define either a 'uuid_input_fields' attribute (with a tuple of field names) or override the 'calculate_uuid' method, on models that inherit from UUIDModelMixin. If you want a fully random UUID, you can set 'uuid_input_fields' to the string 'RANDOM'.""") # if the UUID has been set to be random, return a random UUID if self.uuid_input_fields == "RANDOM": return uuid.uuid4().hex # if we got this far, uuid_input_fields should be a tuple assert isinstance(self.uuid_input_fields, tuple), "'uuid_input_fields' must either be a tuple or the string 'RANDOM'" # calculate the input to the UUID function hashable_input_vals = [] for field in self.uuid_input_fields: new_value = getattr(self, field) if new_value: hashable_input_vals.append(str(new_value)) hashable_input = ":".join(hashable_input_vals) # if all the values were falsey, just return a random UUID, to avoid collisions if not hashable_input: return uuid.uuid4().hex # compute the UUID as a function of the input values return sha2_uuid(hashable_input)
[ "Should", "return", "a", "32", "-", "digit", "hex", "string", "for", "a", "UUID", "that", "is", "calculated", "as", "a", "function", "of", "a", "set", "of", "fields", "from", "the", "model", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/uuids.py#L82-L112
[ "def", "calculate_uuid", "(", "self", ")", ":", "# raise an error if no inputs to the UUID calculation were specified", "if", "self", ".", "uuid_input_fields", "is", "None", ":", "raise", "NotImplementedError", "(", "\"\"\"You must define either a 'uuid_input_fields' attribute\n (with a tuple of field names) or override the 'calculate_uuid' method, on models\n that inherit from UUIDModelMixin. If you want a fully random UUID, you can set\n 'uuid_input_fields' to the string 'RANDOM'.\"\"\"", ")", "# if the UUID has been set to be random, return a random UUID", "if", "self", ".", "uuid_input_fields", "==", "\"RANDOM\"", ":", "return", "uuid", ".", "uuid4", "(", ")", ".", "hex", "# if we got this far, uuid_input_fields should be a tuple", "assert", "isinstance", "(", "self", ".", "uuid_input_fields", ",", "tuple", ")", ",", "\"'uuid_input_fields' must either be a tuple or the string 'RANDOM'\"", "# calculate the input to the UUID function", "hashable_input_vals", "=", "[", "]", "for", "field", "in", "self", ".", "uuid_input_fields", ":", "new_value", "=", "getattr", "(", "self", ",", "field", ")", "if", "new_value", ":", "hashable_input_vals", ".", "append", "(", "str", "(", "new_value", ")", ")", "hashable_input", "=", "\":\"", ".", "join", "(", "hashable_input_vals", ")", "# if all the values were falsey, just return a random UUID, to avoid collisions", "if", "not", "hashable_input", ":", "return", "uuid", ".", "uuid4", "(", ")", ".", "hex", "# compute the UUID as a function of the input values", "return", "sha2_uuid", "(", "hashable_input", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
add_to_deleted_models
Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark the model as deleted in the store.
morango/signals.py
def add_to_deleted_models(sender, instance=None, *args, **kwargs): """ Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark the model as deleted in the store. """ if issubclass(sender, SyncableModel): instance._update_deleted_models()
def add_to_deleted_models(sender, instance=None, *args, **kwargs): """ Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark the model as deleted in the store. """ if issubclass(sender, SyncableModel): instance._update_deleted_models()
[ "Whenever", "a", "model", "is", "deleted", "we", "record", "its", "ID", "in", "a", "separate", "model", "for", "tracking", "purposes", ".", "During", "serialization", "we", "will", "mark", "the", "model", "as", "deleted", "in", "the", "store", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/signals.py#L8-L14
[ "def", "add_to_deleted_models", "(", "sender", ",", "instance", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "issubclass", "(", "sender", ",", "SyncableModel", ")", ":", "instance", ".", "_update_deleted_models", "(", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
APIWrapper.make_request
Reusable method for performing requests. :param url - URL to request :param method - request method, default is 'get' :param headers - request headers :param data - post data :param callback - callback to be applied to response, default callback will parse response as json object. :param errors - specifies communication errors handling mode, possible values are: * strict (default) - throw an error as soon as one occurred * graceful - ignore certain errors, e.g. EmptyResponse * ignore - ignore all errors and return a result in any case. NOTE that it DOES NOT mean that no exceptions can be raised from this method, it mostly ignores communication related errors. * None or empty string equals to default :param verify - whether or not to verify SSL cert, default to False :param timeout - the timeout of the request in second, default to None :param params - additional query parameters for request
apiwrapper/apiwrapper.py
def make_request(self, url, method='get', headers=None, data=None, callback=None, errors=STRICT, verify=False, timeout=None, **params): """ Reusable method for performing requests. :param url - URL to request :param method - request method, default is 'get' :param headers - request headers :param data - post data :param callback - callback to be applied to response, default callback will parse response as json object. :param errors - specifies communication errors handling mode, possible values are: * strict (default) - throw an error as soon as one occurred * graceful - ignore certain errors, e.g. EmptyResponse * ignore - ignore all errors and return a result in any case. NOTE that it DOES NOT mean that no exceptions can be raised from this method, it mostly ignores communication related errors. * None or empty string equals to default :param verify - whether or not to verify SSL cert, default to False :param timeout - the timeout of the request in second, default to None :param params - additional query parameters for request """ error_modes = (STRICT, GRACEFUL, IGNORE) error_mode = errors or GRACEFUL if error_mode.lower() not in error_modes: raise ValueError( 'Possible values for errors argument are: %s' % ','.join(error_modes)) if callback is None: callback = self._default_resp_callback request = getattr(requests, method.lower()) log.debug('* Request URL: %s' % url) log.debug('* Request method: %s' % method) log.debug('* Request query params: %s' % params) log.debug('* Request headers: %s' % headers) log.debug('* Request timeout: %s' % timeout) r = request( url, headers=headers, data=data, verify=verify, timeout=timeout, params=params) log.debug('* r.url: %s' % r.url) try: r.raise_for_status() return callback(r) except Exception as e: return self._with_error_handling(r, e, error_mode, self.response_format)
def make_request(self, url, method='get', headers=None, data=None, callback=None, errors=STRICT, verify=False, timeout=None, **params): """ Reusable method for performing requests. :param url - URL to request :param method - request method, default is 'get' :param headers - request headers :param data - post data :param callback - callback to be applied to response, default callback will parse response as json object. :param errors - specifies communication errors handling mode, possible values are: * strict (default) - throw an error as soon as one occurred * graceful - ignore certain errors, e.g. EmptyResponse * ignore - ignore all errors and return a result in any case. NOTE that it DOES NOT mean that no exceptions can be raised from this method, it mostly ignores communication related errors. * None or empty string equals to default :param verify - whether or not to verify SSL cert, default to False :param timeout - the timeout of the request in second, default to None :param params - additional query parameters for request """ error_modes = (STRICT, GRACEFUL, IGNORE) error_mode = errors or GRACEFUL if error_mode.lower() not in error_modes: raise ValueError( 'Possible values for errors argument are: %s' % ','.join(error_modes)) if callback is None: callback = self._default_resp_callback request = getattr(requests, method.lower()) log.debug('* Request URL: %s' % url) log.debug('* Request method: %s' % method) log.debug('* Request query params: %s' % params) log.debug('* Request headers: %s' % headers) log.debug('* Request timeout: %s' % timeout) r = request( url, headers=headers, data=data, verify=verify, timeout=timeout, params=params) log.debug('* r.url: %s' % r.url) try: r.raise_for_status() return callback(r) except Exception as e: return self._with_error_handling(r, e, error_mode, self.response_format)
[ "Reusable", "method", "for", "performing", "requests", ".", ":", "param", "url", "-", "URL", "to", "request", ":", "param", "method", "-", "request", "method", "default", "is", "get", ":", "param", "headers", "-", "request", "headers", ":", "param", "data", "-", "post", "data", ":", "param", "callback", "-", "callback", "to", "be", "applied", "to", "response", "default", "callback", "will", "parse", "response", "as", "json", "object", ".", ":", "param", "errors", "-", "specifies", "communication", "errors", "handling", "mode", "possible", "values", "are", ":", "*", "strict", "(", "default", ")", "-", "throw", "an", "error", "as", "soon", "as", "one", "occurred", "*", "graceful", "-", "ignore", "certain", "errors", "e", ".", "g", ".", "EmptyResponse", "*", "ignore", "-", "ignore", "all", "errors", "and", "return", "a", "result", "in", "any", "case", ".", "NOTE", "that", "it", "DOES", "NOT", "mean", "that", "no", "exceptions", "can", "be", "raised", "from", "this", "method", "it", "mostly", "ignores", "communication", "related", "errors", ".", "*", "None", "or", "empty", "string", "equals", "to", "default", ":", "param", "verify", "-", "whether", "or", "not", "to", "verify", "SSL", "cert", "default", "to", "False", ":", "param", "timeout", "-", "the", "timeout", "of", "the", "request", "in", "second", "default", "to", "None", ":", "param", "params", "-", "additional", "query", "parameters", "for", "request" ]
ardydedase/apiwrapper
python
https://github.com/ardydedase/apiwrapper/blob/dd477e9f6fc5706b7a29c61a466cd63427d7c517/apiwrapper/apiwrapper.py#L89-L143
[ "def", "make_request", "(", "self", ",", "url", ",", "method", "=", "'get'", ",", "headers", "=", "None", ",", "data", "=", "None", ",", "callback", "=", "None", ",", "errors", "=", "STRICT", ",", "verify", "=", "False", ",", "timeout", "=", "None", ",", "*", "*", "params", ")", ":", "error_modes", "=", "(", "STRICT", ",", "GRACEFUL", ",", "IGNORE", ")", "error_mode", "=", "errors", "or", "GRACEFUL", "if", "error_mode", ".", "lower", "(", ")", "not", "in", "error_modes", ":", "raise", "ValueError", "(", "'Possible values for errors argument are: %s'", "%", "','", ".", "join", "(", "error_modes", ")", ")", "if", "callback", "is", "None", ":", "callback", "=", "self", ".", "_default_resp_callback", "request", "=", "getattr", "(", "requests", ",", "method", ".", "lower", "(", ")", ")", "log", ".", "debug", "(", "'* Request URL: %s'", "%", "url", ")", "log", ".", "debug", "(", "'* Request method: %s'", "%", "method", ")", "log", ".", "debug", "(", "'* Request query params: %s'", "%", "params", ")", "log", ".", "debug", "(", "'* Request headers: %s'", "%", "headers", ")", "log", ".", "debug", "(", "'* Request timeout: %s'", "%", "timeout", ")", "r", "=", "request", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "verify", "=", "verify", ",", "timeout", "=", "timeout", ",", "params", "=", "params", ")", "log", ".", "debug", "(", "'* r.url: %s'", "%", "r", ".", "url", ")", "try", ":", "r", ".", "raise_for_status", "(", ")", "return", "callback", "(", "r", ")", "except", "Exception", "as", "e", ":", "return", "self", ".", "_with_error_handling", "(", "r", ",", "e", ",", "error_mode", ",", "self", ".", "response_format", ")" ]
dd477e9f6fc5706b7a29c61a466cd63427d7c517
valid
APIWrapper._with_error_handling
Static method for error handling. :param resp - API response :param error - Error thrown :param mode - Error mode :param response_format - XML or json
apiwrapper/apiwrapper.py
def _with_error_handling(resp, error, mode, response_format): """ Static method for error handling. :param resp - API response :param error - Error thrown :param mode - Error mode :param response_format - XML or json """ def safe_parse(r): try: return APIWrapper._parse_resp(r, response_format) except (ValueError, SyntaxError) as ex: log.error(ex) r.parsed = None return r if isinstance(error, requests.HTTPError): if resp.status_code == 400: # It means that request parameters were rejected by the server, # so we need to enrich standard error message # with 'ValidationErrors' # from the response resp = safe_parse(resp) if resp.parsed is not None: parsed_resp = resp.parsed messages = [] if response_format == 'xml' and\ parsed_resp.find('./ValidationErrors') is not None: messages = [e.find('./Message').text for e in parsed_resp.findall('./ValidationErrors/ValidationErrorDto')] elif response_format == 'json' and 'ValidationErrors' in parsed_resp: messages = [e['Message'] for e in parsed_resp['ValidationErrors']] error = requests.HTTPError( '%s: %s' % (error, '\n\t'.join(messages)), response=resp) elif resp.status_code == 429: error = requests.HTTPError('%sToo many requests in the last minute.' % error, response=resp) if STRICT == mode: raise error elif GRACEFUL == mode: if isinstance(error, EmptyResponse): # Empty response is returned by the API occasionally, # in this case it makes sense to ignore it and retry. log.warning(error) resp.parsed = None return resp elif isinstance(error, requests.HTTPError): # Ignoring 'Too many requests' error, # since subsequent retries will come after a delay. if resp.status_code == 429: # Too many requests log.warning(error) return safe_parse(resp) else: raise error else: raise error else: # ignore everything, just log it and return whatever response we # have log.error(error) return safe_parse(resp)
def _with_error_handling(resp, error, mode, response_format): """ Static method for error handling. :param resp - API response :param error - Error thrown :param mode - Error mode :param response_format - XML or json """ def safe_parse(r): try: return APIWrapper._parse_resp(r, response_format) except (ValueError, SyntaxError) as ex: log.error(ex) r.parsed = None return r if isinstance(error, requests.HTTPError): if resp.status_code == 400: # It means that request parameters were rejected by the server, # so we need to enrich standard error message # with 'ValidationErrors' # from the response resp = safe_parse(resp) if resp.parsed is not None: parsed_resp = resp.parsed messages = [] if response_format == 'xml' and\ parsed_resp.find('./ValidationErrors') is not None: messages = [e.find('./Message').text for e in parsed_resp.findall('./ValidationErrors/ValidationErrorDto')] elif response_format == 'json' and 'ValidationErrors' in parsed_resp: messages = [e['Message'] for e in parsed_resp['ValidationErrors']] error = requests.HTTPError( '%s: %s' % (error, '\n\t'.join(messages)), response=resp) elif resp.status_code == 429: error = requests.HTTPError('%sToo many requests in the last minute.' % error, response=resp) if STRICT == mode: raise error elif GRACEFUL == mode: if isinstance(error, EmptyResponse): # Empty response is returned by the API occasionally, # in this case it makes sense to ignore it and retry. log.warning(error) resp.parsed = None return resp elif isinstance(error, requests.HTTPError): # Ignoring 'Too many requests' error, # since subsequent retries will come after a delay. if resp.status_code == 429: # Too many requests log.warning(error) return safe_parse(resp) else: raise error else: raise error else: # ignore everything, just log it and return whatever response we # have log.error(error) return safe_parse(resp)
[ "Static", "method", "for", "error", "handling", "." ]
ardydedase/apiwrapper
python
https://github.com/ardydedase/apiwrapper/blob/dd477e9f6fc5706b7a29c61a466cd63427d7c517/apiwrapper/apiwrapper.py#L155-L219
[ "def", "_with_error_handling", "(", "resp", ",", "error", ",", "mode", ",", "response_format", ")", ":", "def", "safe_parse", "(", "r", ")", ":", "try", ":", "return", "APIWrapper", ".", "_parse_resp", "(", "r", ",", "response_format", ")", "except", "(", "ValueError", ",", "SyntaxError", ")", "as", "ex", ":", "log", ".", "error", "(", "ex", ")", "r", ".", "parsed", "=", "None", "return", "r", "if", "isinstance", "(", "error", ",", "requests", ".", "HTTPError", ")", ":", "if", "resp", ".", "status_code", "==", "400", ":", "# It means that request parameters were rejected by the server,", "# so we need to enrich standard error message", "# with 'ValidationErrors'", "# from the response", "resp", "=", "safe_parse", "(", "resp", ")", "if", "resp", ".", "parsed", "is", "not", "None", ":", "parsed_resp", "=", "resp", ".", "parsed", "messages", "=", "[", "]", "if", "response_format", "==", "'xml'", "and", "parsed_resp", ".", "find", "(", "'./ValidationErrors'", ")", "is", "not", "None", ":", "messages", "=", "[", "e", ".", "find", "(", "'./Message'", ")", ".", "text", "for", "e", "in", "parsed_resp", ".", "findall", "(", "'./ValidationErrors/ValidationErrorDto'", ")", "]", "elif", "response_format", "==", "'json'", "and", "'ValidationErrors'", "in", "parsed_resp", ":", "messages", "=", "[", "e", "[", "'Message'", "]", "for", "e", "in", "parsed_resp", "[", "'ValidationErrors'", "]", "]", "error", "=", "requests", ".", "HTTPError", "(", "'%s: %s'", "%", "(", "error", ",", "'\\n\\t'", ".", "join", "(", "messages", ")", ")", ",", "response", "=", "resp", ")", "elif", "resp", ".", "status_code", "==", "429", ":", "error", "=", "requests", ".", "HTTPError", "(", "'%sToo many requests in the last minute.'", "%", "error", ",", "response", "=", "resp", ")", "if", "STRICT", "==", "mode", ":", "raise", "error", "elif", "GRACEFUL", "==", "mode", ":", "if", "isinstance", "(", "error", ",", "EmptyResponse", ")", ":", "# Empty response is returned by the API occasionally,", "# in this case it makes sense to ignore it and retry.", "log", ".", "warning", "(", "error", ")", "resp", ".", "parsed", "=", "None", "return", "resp", "elif", "isinstance", "(", "error", ",", "requests", ".", "HTTPError", ")", ":", "# Ignoring 'Too many requests' error,", "# since subsequent retries will come after a delay.", "if", "resp", ".", "status_code", "==", "429", ":", "# Too many requests", "log", ".", "warning", "(", "error", ")", "return", "safe_parse", "(", "resp", ")", "else", ":", "raise", "error", "else", ":", "raise", "error", "else", ":", "# ignore everything, just log it and return whatever response we", "# have", "log", ".", "error", "(", "error", ")", "return", "safe_parse", "(", "resp", ")" ]
dd477e9f6fc5706b7a29c61a466cd63427d7c517
valid
APIWrapper.poll
Poll the URL :param url - URL to poll, should be returned by 'create_session' call :param initial_delay - specifies how many seconds to wait before the first poll :param delay - specifies how many seconds to wait between the polls :param tries - number of polls to perform :param errors - errors handling mode, see corresponding parameter in 'make_request' method :param params - additional query params for each poll request
apiwrapper/apiwrapper.py
def poll(self, url, initial_delay=2, delay=1, tries=20, errors=STRICT, is_complete_callback=None, **params): """ Poll the URL :param url - URL to poll, should be returned by 'create_session' call :param initial_delay - specifies how many seconds to wait before the first poll :param delay - specifies how many seconds to wait between the polls :param tries - number of polls to perform :param errors - errors handling mode, see corresponding parameter in 'make_request' method :param params - additional query params for each poll request """ time.sleep(initial_delay) poll_response = None if is_complete_callback == None: is_complete_callback = self._default_poll_callback for n in range(tries): poll_response = self.make_request(url, headers=self._headers(), errors=errors, **params) if is_complete_callback(poll_response): return poll_response else: time.sleep(delay) if STRICT == errors: raise ExceededRetries( "Failed to poll within {0} tries.".format(tries)) else: return poll_response
def poll(self, url, initial_delay=2, delay=1, tries=20, errors=STRICT, is_complete_callback=None, **params): """ Poll the URL :param url - URL to poll, should be returned by 'create_session' call :param initial_delay - specifies how many seconds to wait before the first poll :param delay - specifies how many seconds to wait between the polls :param tries - number of polls to perform :param errors - errors handling mode, see corresponding parameter in 'make_request' method :param params - additional query params for each poll request """ time.sleep(initial_delay) poll_response = None if is_complete_callback == None: is_complete_callback = self._default_poll_callback for n in range(tries): poll_response = self.make_request(url, headers=self._headers(), errors=errors, **params) if is_complete_callback(poll_response): return poll_response else: time.sleep(delay) if STRICT == errors: raise ExceededRetries( "Failed to poll within {0} tries.".format(tries)) else: return poll_response
[ "Poll", "the", "URL", ":", "param", "url", "-", "URL", "to", "poll", "should", "be", "returned", "by", "create_session", "call", ":", "param", "initial_delay", "-", "specifies", "how", "many", "seconds", "to", "wait", "before", "the", "first", "poll", ":", "param", "delay", "-", "specifies", "how", "many", "seconds", "to", "wait", "between", "the", "polls", ":", "param", "tries", "-", "number", "of", "polls", "to", "perform", ":", "param", "errors", "-", "errors", "handling", "mode", "see", "corresponding", "parameter", "in", "make_request", "method", ":", "param", "params", "-", "additional", "query", "params", "for", "each", "poll", "request" ]
ardydedase/apiwrapper
python
https://github.com/ardydedase/apiwrapper/blob/dd477e9f6fc5706b7a29c61a466cd63427d7c517/apiwrapper/apiwrapper.py#L221-L250
[ "def", "poll", "(", "self", ",", "url", ",", "initial_delay", "=", "2", ",", "delay", "=", "1", ",", "tries", "=", "20", ",", "errors", "=", "STRICT", ",", "is_complete_callback", "=", "None", ",", "*", "*", "params", ")", ":", "time", ".", "sleep", "(", "initial_delay", ")", "poll_response", "=", "None", "if", "is_complete_callback", "==", "None", ":", "is_complete_callback", "=", "self", ".", "_default_poll_callback", "for", "n", "in", "range", "(", "tries", ")", ":", "poll_response", "=", "self", ".", "make_request", "(", "url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ",", "errors", "=", "errors", ",", "*", "*", "params", ")", "if", "is_complete_callback", "(", "poll_response", ")", ":", "return", "poll_response", "else", ":", "time", ".", "sleep", "(", "delay", ")", "if", "STRICT", "==", "errors", ":", "raise", "ExceededRetries", "(", "\"Failed to poll within {0} tries.\"", ".", "format", "(", "tries", ")", ")", "else", ":", "return", "poll_response" ]
dd477e9f6fc5706b7a29c61a466cd63427d7c517
valid
APIWrapper._default_poll_callback
Checks the condition in poll response to determine if it is complete and no subsequent poll requests should be done.
apiwrapper/apiwrapper.py
def _default_poll_callback(self, poll_resp): """ Checks the condition in poll response to determine if it is complete and no subsequent poll requests should be done. """ if poll_resp.parsed is None: return False success_list = ['UpdatesComplete', True, 'COMPLETE'] status = None if self.response_format == 'xml': status = poll_resp.parsed.find('./Status').text elif self.response_format == 'json': status = poll_resp.parsed.get( 'Status', poll_resp.parsed.get('status')) if status is None: raise RuntimeError('Unable to get poll response status.') return status in success_list
def _default_poll_callback(self, poll_resp): """ Checks the condition in poll response to determine if it is complete and no subsequent poll requests should be done. """ if poll_resp.parsed is None: return False success_list = ['UpdatesComplete', True, 'COMPLETE'] status = None if self.response_format == 'xml': status = poll_resp.parsed.find('./Status').text elif self.response_format == 'json': status = poll_resp.parsed.get( 'Status', poll_resp.parsed.get('status')) if status is None: raise RuntimeError('Unable to get poll response status.') return status in success_list
[ "Checks", "the", "condition", "in", "poll", "response", "to", "determine", "if", "it", "is", "complete", "and", "no", "subsequent", "poll", "requests", "should", "be", "done", "." ]
ardydedase/apiwrapper
python
https://github.com/ardydedase/apiwrapper/blob/dd477e9f6fc5706b7a29c61a466cd63427d7c517/apiwrapper/apiwrapper.py#L252-L268
[ "def", "_default_poll_callback", "(", "self", ",", "poll_resp", ")", ":", "if", "poll_resp", ".", "parsed", "is", "None", ":", "return", "False", "success_list", "=", "[", "'UpdatesComplete'", ",", "True", ",", "'COMPLETE'", "]", "status", "=", "None", "if", "self", ".", "response_format", "==", "'xml'", ":", "status", "=", "poll_resp", ".", "parsed", ".", "find", "(", "'./Status'", ")", ".", "text", "elif", "self", ".", "response_format", "==", "'json'", ":", "status", "=", "poll_resp", ".", "parsed", ".", "get", "(", "'Status'", ",", "poll_resp", ".", "parsed", ".", "get", "(", "'status'", ")", ")", "if", "status", "is", "None", ":", "raise", "RuntimeError", "(", "'Unable to get poll response status.'", ")", "return", "status", "in", "success_list" ]
dd477e9f6fc5706b7a29c61a466cd63427d7c517
valid
_fsic_queuing_calc
We set the lower counter between two same instance ids. If an instance_id exists in one fsic but not the other we want to give that counter a value of 0. :param fsic1: dictionary containing (instance_id, counter) pairs :param fsic2: dictionary containing (instance_id, counter) pairs :return ``dict`` of fsics to be used in queueing the correct records to the buffer
morango/utils/sync_utils.py
def _fsic_queuing_calc(fsic1, fsic2): """ We set the lower counter between two same instance ids. If an instance_id exists in one fsic but not the other we want to give that counter a value of 0. :param fsic1: dictionary containing (instance_id, counter) pairs :param fsic2: dictionary containing (instance_id, counter) pairs :return ``dict`` of fsics to be used in queueing the correct records to the buffer """ return {instance: fsic2.get(instance, 0) for instance, counter in six.iteritems(fsic1) if fsic2.get(instance, 0) < counter}
def _fsic_queuing_calc(fsic1, fsic2): """ We set the lower counter between two same instance ids. If an instance_id exists in one fsic but not the other we want to give that counter a value of 0. :param fsic1: dictionary containing (instance_id, counter) pairs :param fsic2: dictionary containing (instance_id, counter) pairs :return ``dict`` of fsics to be used in queueing the correct records to the buffer """ return {instance: fsic2.get(instance, 0) for instance, counter in six.iteritems(fsic1) if fsic2.get(instance, 0) < counter}
[ "We", "set", "the", "lower", "counter", "between", "two", "same", "instance", "ids", ".", "If", "an", "instance_id", "exists", "in", "one", "fsic", "but", "not", "the", "other", "we", "want", "to", "give", "that", "counter", "a", "value", "of", "0", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/sync_utils.py#L38-L47
[ "def", "_fsic_queuing_calc", "(", "fsic1", ",", "fsic2", ")", ":", "return", "{", "instance", ":", "fsic2", ".", "get", "(", "instance", ",", "0", ")", "for", "instance", ",", "counter", "in", "six", ".", "iteritems", "(", "fsic1", ")", "if", "fsic2", ".", "get", "(", "instance", ",", "0", ")", "<", "counter", "}" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
_serialize_into_store
Takes data from app layer and serializes the models into the store.
morango/utils/sync_utils.py
def _serialize_into_store(profile, filter=None): """ Takes data from app layer and serializes the models into the store. """ # ensure that we write and retrieve the counter in one go for consistency current_id = InstanceIDModel.get_current_instance_and_increment_counter() with transaction.atomic(): # create Q objects for filtering by prefixes prefix_condition = None if filter: prefix_condition = functools.reduce(lambda x, y: x | y, [Q(_morango_partition__startswith=prefix) for prefix in filter]) # filter through all models with the dirty bit turned on syncable_dict = _profile_models[profile] for (_, klass_model) in six.iteritems(syncable_dict): new_store_records = [] new_rmc_records = [] klass_queryset = klass_model.objects.filter(_morango_dirty_bit=True) if prefix_condition: klass_queryset = klass_queryset.filter(prefix_condition) store_records_dict = Store.objects.in_bulk(id_list=klass_queryset.values_list('id', flat=True)) for app_model in klass_queryset: try: store_model = store_records_dict[app_model.id] # if store record dirty and app record dirty, append store serialized to conflicting data if store_model.dirty_bit: store_model.conflicting_serialized_data = store_model.serialized + "\n" + store_model.conflicting_serialized_data store_model.dirty_bit = False # set new serialized data on this store model ser_dict = json.loads(store_model.serialized) ser_dict.update(app_model.serialize()) store_model.serialized = DjangoJSONEncoder().encode(ser_dict) # create or update instance and counter on the record max counter for this store model RecordMaxCounter.objects.update_or_create(defaults={'counter': current_id.counter}, instance_id=current_id.id, store_model_id=store_model.id) # update last saved bys for this store model store_model.last_saved_instance = current_id.id store_model.last_saved_counter = current_id.counter # update deleted flags in case it was previously deleted store_model.deleted = False store_model.hard_deleted = False # update this model store_model.save() except KeyError: kwargs = { 'id': app_model.id, 'serialized': DjangoJSONEncoder().encode(app_model.serialize()), 'last_saved_instance': current_id.id, 'last_saved_counter': current_id.counter, 'model_name': app_model.morango_model_name, 'profile': app_model.morango_profile, 'partition': app_model._morango_partition, 'source_id': app_model._morango_source_id, } # check if model has FK pointing to it and add the value to a field on the store self_ref_fk = _self_referential_fk(klass_model) if self_ref_fk: self_ref_fk_value = getattr(app_model, self_ref_fk) kwargs.update({'_self_ref_fk': self_ref_fk_value or ''}) # create store model and record max counter for the app model new_store_records.append(Store(**kwargs)) new_rmc_records.append(RecordMaxCounter(store_model_id=app_model.id, instance_id=current_id.id, counter=current_id.counter)) # bulk create store and rmc records for this class Store.objects.bulk_create(new_store_records) RecordMaxCounter.objects.bulk_create(new_rmc_records) # set dirty bit to false for all instances of this model klass_queryset.update(update_dirty_bit_to=False) # get list of ids of deleted models deleted_ids = DeletedModels.objects.filter(profile=profile).values_list('id', flat=True) # update last_saved_bys and deleted flag of all deleted store model instances deleted_store_records = Store.objects.filter(id__in=deleted_ids) deleted_store_records.update(dirty_bit=False, deleted=True, last_saved_instance=current_id.id, last_saved_counter=current_id.counter) # update rmcs counters for deleted models that have our instance id RecordMaxCounter.objects.filter(instance_id=current_id.id, store_model_id__in=deleted_ids).update(counter=current_id.counter) # get a list of deleted model ids that don't have an rmc for our instance id new_rmc_ids = deleted_store_records.exclude(recordmaxcounter__instance_id=current_id.id).values_list("id", flat=True) # bulk create these new rmcs RecordMaxCounter.objects.bulk_create([RecordMaxCounter(store_model_id=r_id, instance_id=current_id.id, counter=current_id.counter) for r_id in new_rmc_ids]) # clear deleted models table for this profile DeletedModels.objects.filter(profile=profile).delete() # handle logic for hard deletion models hard_deleted_ids = HardDeletedModels.objects.filter(profile=profile).values_list('id', flat=True) hard_deleted_store_records = Store.objects.filter(id__in=hard_deleted_ids) hard_deleted_store_records.update(hard_deleted=True, serialized='{}', conflicting_serialized_data='') HardDeletedModels.objects.filter(profile=profile).delete() # update our own database max counters after serialization if not filter: DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition="", defaults={'counter': current_id.counter}) else: for f in filter: DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition=f, defaults={'counter': current_id.counter})
def _serialize_into_store(profile, filter=None): """ Takes data from app layer and serializes the models into the store. """ # ensure that we write and retrieve the counter in one go for consistency current_id = InstanceIDModel.get_current_instance_and_increment_counter() with transaction.atomic(): # create Q objects for filtering by prefixes prefix_condition = None if filter: prefix_condition = functools.reduce(lambda x, y: x | y, [Q(_morango_partition__startswith=prefix) for prefix in filter]) # filter through all models with the dirty bit turned on syncable_dict = _profile_models[profile] for (_, klass_model) in six.iteritems(syncable_dict): new_store_records = [] new_rmc_records = [] klass_queryset = klass_model.objects.filter(_morango_dirty_bit=True) if prefix_condition: klass_queryset = klass_queryset.filter(prefix_condition) store_records_dict = Store.objects.in_bulk(id_list=klass_queryset.values_list('id', flat=True)) for app_model in klass_queryset: try: store_model = store_records_dict[app_model.id] # if store record dirty and app record dirty, append store serialized to conflicting data if store_model.dirty_bit: store_model.conflicting_serialized_data = store_model.serialized + "\n" + store_model.conflicting_serialized_data store_model.dirty_bit = False # set new serialized data on this store model ser_dict = json.loads(store_model.serialized) ser_dict.update(app_model.serialize()) store_model.serialized = DjangoJSONEncoder().encode(ser_dict) # create or update instance and counter on the record max counter for this store model RecordMaxCounter.objects.update_or_create(defaults={'counter': current_id.counter}, instance_id=current_id.id, store_model_id=store_model.id) # update last saved bys for this store model store_model.last_saved_instance = current_id.id store_model.last_saved_counter = current_id.counter # update deleted flags in case it was previously deleted store_model.deleted = False store_model.hard_deleted = False # update this model store_model.save() except KeyError: kwargs = { 'id': app_model.id, 'serialized': DjangoJSONEncoder().encode(app_model.serialize()), 'last_saved_instance': current_id.id, 'last_saved_counter': current_id.counter, 'model_name': app_model.morango_model_name, 'profile': app_model.morango_profile, 'partition': app_model._morango_partition, 'source_id': app_model._morango_source_id, } # check if model has FK pointing to it and add the value to a field on the store self_ref_fk = _self_referential_fk(klass_model) if self_ref_fk: self_ref_fk_value = getattr(app_model, self_ref_fk) kwargs.update({'_self_ref_fk': self_ref_fk_value or ''}) # create store model and record max counter for the app model new_store_records.append(Store(**kwargs)) new_rmc_records.append(RecordMaxCounter(store_model_id=app_model.id, instance_id=current_id.id, counter=current_id.counter)) # bulk create store and rmc records for this class Store.objects.bulk_create(new_store_records) RecordMaxCounter.objects.bulk_create(new_rmc_records) # set dirty bit to false for all instances of this model klass_queryset.update(update_dirty_bit_to=False) # get list of ids of deleted models deleted_ids = DeletedModels.objects.filter(profile=profile).values_list('id', flat=True) # update last_saved_bys and deleted flag of all deleted store model instances deleted_store_records = Store.objects.filter(id__in=deleted_ids) deleted_store_records.update(dirty_bit=False, deleted=True, last_saved_instance=current_id.id, last_saved_counter=current_id.counter) # update rmcs counters for deleted models that have our instance id RecordMaxCounter.objects.filter(instance_id=current_id.id, store_model_id__in=deleted_ids).update(counter=current_id.counter) # get a list of deleted model ids that don't have an rmc for our instance id new_rmc_ids = deleted_store_records.exclude(recordmaxcounter__instance_id=current_id.id).values_list("id", flat=True) # bulk create these new rmcs RecordMaxCounter.objects.bulk_create([RecordMaxCounter(store_model_id=r_id, instance_id=current_id.id, counter=current_id.counter) for r_id in new_rmc_ids]) # clear deleted models table for this profile DeletedModels.objects.filter(profile=profile).delete() # handle logic for hard deletion models hard_deleted_ids = HardDeletedModels.objects.filter(profile=profile).values_list('id', flat=True) hard_deleted_store_records = Store.objects.filter(id__in=hard_deleted_ids) hard_deleted_store_records.update(hard_deleted=True, serialized='{}', conflicting_serialized_data='') HardDeletedModels.objects.filter(profile=profile).delete() # update our own database max counters after serialization if not filter: DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition="", defaults={'counter': current_id.counter}) else: for f in filter: DatabaseMaxCounter.objects.update_or_create(instance_id=current_id.id, partition=f, defaults={'counter': current_id.counter})
[ "Takes", "data", "from", "app", "layer", "and", "serializes", "the", "models", "into", "the", "store", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/sync_utils.py#L49-L152
[ "def", "_serialize_into_store", "(", "profile", ",", "filter", "=", "None", ")", ":", "# ensure that we write and retrieve the counter in one go for consistency", "current_id", "=", "InstanceIDModel", ".", "get_current_instance_and_increment_counter", "(", ")", "with", "transaction", ".", "atomic", "(", ")", ":", "# create Q objects for filtering by prefixes", "prefix_condition", "=", "None", "if", "filter", ":", "prefix_condition", "=", "functools", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "|", "y", ",", "[", "Q", "(", "_morango_partition__startswith", "=", "prefix", ")", "for", "prefix", "in", "filter", "]", ")", "# filter through all models with the dirty bit turned on", "syncable_dict", "=", "_profile_models", "[", "profile", "]", "for", "(", "_", ",", "klass_model", ")", "in", "six", ".", "iteritems", "(", "syncable_dict", ")", ":", "new_store_records", "=", "[", "]", "new_rmc_records", "=", "[", "]", "klass_queryset", "=", "klass_model", ".", "objects", ".", "filter", "(", "_morango_dirty_bit", "=", "True", ")", "if", "prefix_condition", ":", "klass_queryset", "=", "klass_queryset", ".", "filter", "(", "prefix_condition", ")", "store_records_dict", "=", "Store", ".", "objects", ".", "in_bulk", "(", "id_list", "=", "klass_queryset", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", ")", "for", "app_model", "in", "klass_queryset", ":", "try", ":", "store_model", "=", "store_records_dict", "[", "app_model", ".", "id", "]", "# if store record dirty and app record dirty, append store serialized to conflicting data", "if", "store_model", ".", "dirty_bit", ":", "store_model", ".", "conflicting_serialized_data", "=", "store_model", ".", "serialized", "+", "\"\\n\"", "+", "store_model", ".", "conflicting_serialized_data", "store_model", ".", "dirty_bit", "=", "False", "# set new serialized data on this store model", "ser_dict", "=", "json", ".", "loads", "(", "store_model", ".", "serialized", ")", "ser_dict", ".", "update", "(", "app_model", ".", "serialize", "(", ")", ")", "store_model", ".", "serialized", "=", "DjangoJSONEncoder", "(", ")", ".", "encode", "(", "ser_dict", ")", "# create or update instance and counter on the record max counter for this store model", "RecordMaxCounter", ".", "objects", ".", "update_or_create", "(", "defaults", "=", "{", "'counter'", ":", "current_id", ".", "counter", "}", ",", "instance_id", "=", "current_id", ".", "id", ",", "store_model_id", "=", "store_model", ".", "id", ")", "# update last saved bys for this store model", "store_model", ".", "last_saved_instance", "=", "current_id", ".", "id", "store_model", ".", "last_saved_counter", "=", "current_id", ".", "counter", "# update deleted flags in case it was previously deleted", "store_model", ".", "deleted", "=", "False", "store_model", ".", "hard_deleted", "=", "False", "# update this model", "store_model", ".", "save", "(", ")", "except", "KeyError", ":", "kwargs", "=", "{", "'id'", ":", "app_model", ".", "id", ",", "'serialized'", ":", "DjangoJSONEncoder", "(", ")", ".", "encode", "(", "app_model", ".", "serialize", "(", ")", ")", ",", "'last_saved_instance'", ":", "current_id", ".", "id", ",", "'last_saved_counter'", ":", "current_id", ".", "counter", ",", "'model_name'", ":", "app_model", ".", "morango_model_name", ",", "'profile'", ":", "app_model", ".", "morango_profile", ",", "'partition'", ":", "app_model", ".", "_morango_partition", ",", "'source_id'", ":", "app_model", ".", "_morango_source_id", ",", "}", "# check if model has FK pointing to it and add the value to a field on the store", "self_ref_fk", "=", "_self_referential_fk", "(", "klass_model", ")", "if", "self_ref_fk", ":", "self_ref_fk_value", "=", "getattr", "(", "app_model", ",", "self_ref_fk", ")", "kwargs", ".", "update", "(", "{", "'_self_ref_fk'", ":", "self_ref_fk_value", "or", "''", "}", ")", "# create store model and record max counter for the app model", "new_store_records", ".", "append", "(", "Store", "(", "*", "*", "kwargs", ")", ")", "new_rmc_records", ".", "append", "(", "RecordMaxCounter", "(", "store_model_id", "=", "app_model", ".", "id", ",", "instance_id", "=", "current_id", ".", "id", ",", "counter", "=", "current_id", ".", "counter", ")", ")", "# bulk create store and rmc records for this class", "Store", ".", "objects", ".", "bulk_create", "(", "new_store_records", ")", "RecordMaxCounter", ".", "objects", ".", "bulk_create", "(", "new_rmc_records", ")", "# set dirty bit to false for all instances of this model", "klass_queryset", ".", "update", "(", "update_dirty_bit_to", "=", "False", ")", "# get list of ids of deleted models", "deleted_ids", "=", "DeletedModels", ".", "objects", ".", "filter", "(", "profile", "=", "profile", ")", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", "# update last_saved_bys and deleted flag of all deleted store model instances", "deleted_store_records", "=", "Store", ".", "objects", ".", "filter", "(", "id__in", "=", "deleted_ids", ")", "deleted_store_records", ".", "update", "(", "dirty_bit", "=", "False", ",", "deleted", "=", "True", ",", "last_saved_instance", "=", "current_id", ".", "id", ",", "last_saved_counter", "=", "current_id", ".", "counter", ")", "# update rmcs counters for deleted models that have our instance id", "RecordMaxCounter", ".", "objects", ".", "filter", "(", "instance_id", "=", "current_id", ".", "id", ",", "store_model_id__in", "=", "deleted_ids", ")", ".", "update", "(", "counter", "=", "current_id", ".", "counter", ")", "# get a list of deleted model ids that don't have an rmc for our instance id", "new_rmc_ids", "=", "deleted_store_records", ".", "exclude", "(", "recordmaxcounter__instance_id", "=", "current_id", ".", "id", ")", ".", "values_list", "(", "\"id\"", ",", "flat", "=", "True", ")", "# bulk create these new rmcs", "RecordMaxCounter", ".", "objects", ".", "bulk_create", "(", "[", "RecordMaxCounter", "(", "store_model_id", "=", "r_id", ",", "instance_id", "=", "current_id", ".", "id", ",", "counter", "=", "current_id", ".", "counter", ")", "for", "r_id", "in", "new_rmc_ids", "]", ")", "# clear deleted models table for this profile", "DeletedModels", ".", "objects", ".", "filter", "(", "profile", "=", "profile", ")", ".", "delete", "(", ")", "# handle logic for hard deletion models", "hard_deleted_ids", "=", "HardDeletedModels", ".", "objects", ".", "filter", "(", "profile", "=", "profile", ")", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", "hard_deleted_store_records", "=", "Store", ".", "objects", ".", "filter", "(", "id__in", "=", "hard_deleted_ids", ")", "hard_deleted_store_records", ".", "update", "(", "hard_deleted", "=", "True", ",", "serialized", "=", "'{}'", ",", "conflicting_serialized_data", "=", "''", ")", "HardDeletedModels", ".", "objects", ".", "filter", "(", "profile", "=", "profile", ")", ".", "delete", "(", ")", "# update our own database max counters after serialization", "if", "not", "filter", ":", "DatabaseMaxCounter", ".", "objects", ".", "update_or_create", "(", "instance_id", "=", "current_id", ".", "id", ",", "partition", "=", "\"\"", ",", "defaults", "=", "{", "'counter'", ":", "current_id", ".", "counter", "}", ")", "else", ":", "for", "f", "in", "filter", ":", "DatabaseMaxCounter", ".", "objects", ".", "update_or_create", "(", "instance_id", "=", "current_id", ".", "id", ",", "partition", "=", "f", ",", "defaults", "=", "{", "'counter'", ":", "current_id", ".", "counter", "}", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
_deserialize_from_store
Takes data from the store and integrates into the application.
morango/utils/sync_utils.py
def _deserialize_from_store(profile): """ Takes data from the store and integrates into the application. """ # we first serialize to avoid deserialization merge conflicts _serialize_into_store(profile) fk_cache = {} with transaction.atomic(): syncable_dict = _profile_models[profile] excluded_list = [] # iterate through classes which are in foreign key dependency order for model_name, klass_model in six.iteritems(syncable_dict): # handle cases where a class has a single FK reference to itself self_ref_fk = _self_referential_fk(klass_model) query = Q(model_name=klass_model.morango_model_name) for klass in klass_model.morango_model_dependencies: query |= Q(model_name=klass.morango_model_name) if self_ref_fk: clean_parents = Store.objects.filter(dirty_bit=False, profile=profile).filter(query).char_ids_list() dirty_children = Store.objects.filter(dirty_bit=True, profile=profile) \ .filter(Q(_self_ref_fk__in=clean_parents) | Q(_self_ref_fk='')).filter(query) # keep iterating until size of dirty_children is 0 while len(dirty_children) > 0: for store_model in dirty_children: try: app_model = store_model._deserialize_store_model(fk_cache) if app_model: with mute_signals(signals.pre_save, signals.post_save): app_model.save(update_dirty_bit_to=False) # we update a store model after we have deserialized it to be able to mark it as a clean parent store_model.dirty_bit = False store_model.save(update_fields=['dirty_bit']) except exceptions.ValidationError: # if the app model did not validate, we leave the store dirty bit set excluded_list.append(store_model.id) # update lists with new clean parents and dirty children clean_parents = Store.objects.filter(dirty_bit=False, profile=profile).filter(query).char_ids_list() dirty_children = Store.objects.filter(dirty_bit=True, profile=profile, _self_ref_fk__in=clean_parents).filter(query) else: # array for holding db values from the fields of each model for this class db_values = [] fields = klass_model._meta.fields for store_model in Store.objects.filter(model_name=model_name, profile=profile, dirty_bit=True): try: app_model = store_model._deserialize_store_model(fk_cache) # if the model was not deleted add its field values to the list if app_model: for f in fields: value = getattr(app_model, f.attname) db_value = f.get_db_prep_value(value, connection) db_values.append(db_value) except exceptions.ValidationError: # if the app model did not validate, we leave the store dirty bit set excluded_list.append(store_model.id) if db_values: # number of rows to update num_of_rows = len(db_values) // len(fields) # create '%s' placeholders for a single row placeholder_tuple = tuple(['%s' for _ in range(len(fields))]) # create list of the '%s' tuple placeholders based on number of rows to update placeholder_list = [str(placeholder_tuple) for _ in range(num_of_rows)] with connection.cursor() as cursor: DBBackend._bulk_insert_into_app_models(cursor, klass_model._meta.db_table, fields, db_values, placeholder_list) # clear dirty bit for all store models for this profile except for models that did not validate Store.objects.exclude(id__in=excluded_list).filter(profile=profile, dirty_bit=True).update(dirty_bit=False)
def _deserialize_from_store(profile): """ Takes data from the store and integrates into the application. """ # we first serialize to avoid deserialization merge conflicts _serialize_into_store(profile) fk_cache = {} with transaction.atomic(): syncable_dict = _profile_models[profile] excluded_list = [] # iterate through classes which are in foreign key dependency order for model_name, klass_model in six.iteritems(syncable_dict): # handle cases where a class has a single FK reference to itself self_ref_fk = _self_referential_fk(klass_model) query = Q(model_name=klass_model.morango_model_name) for klass in klass_model.morango_model_dependencies: query |= Q(model_name=klass.morango_model_name) if self_ref_fk: clean_parents = Store.objects.filter(dirty_bit=False, profile=profile).filter(query).char_ids_list() dirty_children = Store.objects.filter(dirty_bit=True, profile=profile) \ .filter(Q(_self_ref_fk__in=clean_parents) | Q(_self_ref_fk='')).filter(query) # keep iterating until size of dirty_children is 0 while len(dirty_children) > 0: for store_model in dirty_children: try: app_model = store_model._deserialize_store_model(fk_cache) if app_model: with mute_signals(signals.pre_save, signals.post_save): app_model.save(update_dirty_bit_to=False) # we update a store model after we have deserialized it to be able to mark it as a clean parent store_model.dirty_bit = False store_model.save(update_fields=['dirty_bit']) except exceptions.ValidationError: # if the app model did not validate, we leave the store dirty bit set excluded_list.append(store_model.id) # update lists with new clean parents and dirty children clean_parents = Store.objects.filter(dirty_bit=False, profile=profile).filter(query).char_ids_list() dirty_children = Store.objects.filter(dirty_bit=True, profile=profile, _self_ref_fk__in=clean_parents).filter(query) else: # array for holding db values from the fields of each model for this class db_values = [] fields = klass_model._meta.fields for store_model in Store.objects.filter(model_name=model_name, profile=profile, dirty_bit=True): try: app_model = store_model._deserialize_store_model(fk_cache) # if the model was not deleted add its field values to the list if app_model: for f in fields: value = getattr(app_model, f.attname) db_value = f.get_db_prep_value(value, connection) db_values.append(db_value) except exceptions.ValidationError: # if the app model did not validate, we leave the store dirty bit set excluded_list.append(store_model.id) if db_values: # number of rows to update num_of_rows = len(db_values) // len(fields) # create '%s' placeholders for a single row placeholder_tuple = tuple(['%s' for _ in range(len(fields))]) # create list of the '%s' tuple placeholders based on number of rows to update placeholder_list = [str(placeholder_tuple) for _ in range(num_of_rows)] with connection.cursor() as cursor: DBBackend._bulk_insert_into_app_models(cursor, klass_model._meta.db_table, fields, db_values, placeholder_list) # clear dirty bit for all store models for this profile except for models that did not validate Store.objects.exclude(id__in=excluded_list).filter(profile=profile, dirty_bit=True).update(dirty_bit=False)
[ "Takes", "data", "from", "the", "store", "and", "integrates", "into", "the", "application", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/sync_utils.py#L154-L223
[ "def", "_deserialize_from_store", "(", "profile", ")", ":", "# we first serialize to avoid deserialization merge conflicts", "_serialize_into_store", "(", "profile", ")", "fk_cache", "=", "{", "}", "with", "transaction", ".", "atomic", "(", ")", ":", "syncable_dict", "=", "_profile_models", "[", "profile", "]", "excluded_list", "=", "[", "]", "# iterate through classes which are in foreign key dependency order", "for", "model_name", ",", "klass_model", "in", "six", ".", "iteritems", "(", "syncable_dict", ")", ":", "# handle cases where a class has a single FK reference to itself", "self_ref_fk", "=", "_self_referential_fk", "(", "klass_model", ")", "query", "=", "Q", "(", "model_name", "=", "klass_model", ".", "morango_model_name", ")", "for", "klass", "in", "klass_model", ".", "morango_model_dependencies", ":", "query", "|=", "Q", "(", "model_name", "=", "klass", ".", "morango_model_name", ")", "if", "self_ref_fk", ":", "clean_parents", "=", "Store", ".", "objects", ".", "filter", "(", "dirty_bit", "=", "False", ",", "profile", "=", "profile", ")", ".", "filter", "(", "query", ")", ".", "char_ids_list", "(", ")", "dirty_children", "=", "Store", ".", "objects", ".", "filter", "(", "dirty_bit", "=", "True", ",", "profile", "=", "profile", ")", ".", "filter", "(", "Q", "(", "_self_ref_fk__in", "=", "clean_parents", ")", "|", "Q", "(", "_self_ref_fk", "=", "''", ")", ")", ".", "filter", "(", "query", ")", "# keep iterating until size of dirty_children is 0", "while", "len", "(", "dirty_children", ")", ">", "0", ":", "for", "store_model", "in", "dirty_children", ":", "try", ":", "app_model", "=", "store_model", ".", "_deserialize_store_model", "(", "fk_cache", ")", "if", "app_model", ":", "with", "mute_signals", "(", "signals", ".", "pre_save", ",", "signals", ".", "post_save", ")", ":", "app_model", ".", "save", "(", "update_dirty_bit_to", "=", "False", ")", "# we update a store model after we have deserialized it to be able to mark it as a clean parent", "store_model", ".", "dirty_bit", "=", "False", "store_model", ".", "save", "(", "update_fields", "=", "[", "'dirty_bit'", "]", ")", "except", "exceptions", ".", "ValidationError", ":", "# if the app model did not validate, we leave the store dirty bit set", "excluded_list", ".", "append", "(", "store_model", ".", "id", ")", "# update lists with new clean parents and dirty children", "clean_parents", "=", "Store", ".", "objects", ".", "filter", "(", "dirty_bit", "=", "False", ",", "profile", "=", "profile", ")", ".", "filter", "(", "query", ")", ".", "char_ids_list", "(", ")", "dirty_children", "=", "Store", ".", "objects", ".", "filter", "(", "dirty_bit", "=", "True", ",", "profile", "=", "profile", ",", "_self_ref_fk__in", "=", "clean_parents", ")", ".", "filter", "(", "query", ")", "else", ":", "# array for holding db values from the fields of each model for this class", "db_values", "=", "[", "]", "fields", "=", "klass_model", ".", "_meta", ".", "fields", "for", "store_model", "in", "Store", ".", "objects", ".", "filter", "(", "model_name", "=", "model_name", ",", "profile", "=", "profile", ",", "dirty_bit", "=", "True", ")", ":", "try", ":", "app_model", "=", "store_model", ".", "_deserialize_store_model", "(", "fk_cache", ")", "# if the model was not deleted add its field values to the list", "if", "app_model", ":", "for", "f", "in", "fields", ":", "value", "=", "getattr", "(", "app_model", ",", "f", ".", "attname", ")", "db_value", "=", "f", ".", "get_db_prep_value", "(", "value", ",", "connection", ")", "db_values", ".", "append", "(", "db_value", ")", "except", "exceptions", ".", "ValidationError", ":", "# if the app model did not validate, we leave the store dirty bit set", "excluded_list", ".", "append", "(", "store_model", ".", "id", ")", "if", "db_values", ":", "# number of rows to update", "num_of_rows", "=", "len", "(", "db_values", ")", "//", "len", "(", "fields", ")", "# create '%s' placeholders for a single row", "placeholder_tuple", "=", "tuple", "(", "[", "'%s'", "for", "_", "in", "range", "(", "len", "(", "fields", ")", ")", "]", ")", "# create list of the '%s' tuple placeholders based on number of rows to update", "placeholder_list", "=", "[", "str", "(", "placeholder_tuple", ")", "for", "_", "in", "range", "(", "num_of_rows", ")", "]", "with", "connection", ".", "cursor", "(", ")", "as", "cursor", ":", "DBBackend", ".", "_bulk_insert_into_app_models", "(", "cursor", ",", "klass_model", ".", "_meta", ".", "db_table", ",", "fields", ",", "db_values", ",", "placeholder_list", ")", "# clear dirty bit for all store models for this profile except for models that did not validate", "Store", ".", "objects", ".", "exclude", "(", "id__in", "=", "excluded_list", ")", ".", "filter", "(", "profile", "=", "profile", ",", "dirty_bit", "=", "True", ")", ".", "update", "(", "dirty_bit", "=", "False", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
_queue_into_buffer
Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance.
morango/utils/sync_utils.py
def _queue_into_buffer(transfersession): """ Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance. """ last_saved_by_conditions = [] filter_prefixes = Filter(transfersession.filter) server_fsic = json.loads(transfersession.server_fsic) client_fsic = json.loads(transfersession.client_fsic) if transfersession.push: fsics = _fsic_queuing_calc(client_fsic, server_fsic) else: fsics = _fsic_queuing_calc(server_fsic, client_fsic) # if fsics are identical or receiving end has newer data, then there is nothing to queue if not fsics: return # create condition for all push FSICs where instance_ids are equal, but internal counters are higher than FSICs counters for instance, counter in six.iteritems(fsics): last_saved_by_conditions += ["(last_saved_instance = '{0}' AND last_saved_counter > {1})".format(instance, counter)] if fsics: last_saved_by_conditions = [_join_with_logical_operator(last_saved_by_conditions, 'OR')] partition_conditions = [] # create condition for filtering by partitions for prefix in filter_prefixes: partition_conditions += ["partition LIKE '{}%'".format(prefix)] if filter_prefixes: partition_conditions = [_join_with_logical_operator(partition_conditions, 'OR')] # combine conditions fsic_and_partition_conditions = _join_with_logical_operator(last_saved_by_conditions + partition_conditions, 'AND') # filter by profile where_condition = _join_with_logical_operator([fsic_and_partition_conditions, "profile = '{}'".format(transfersession.sync_session.profile)], 'AND') # execute raw sql to take all records that match condition, to be put into buffer for transfer with connection.cursor() as cursor: queue_buffer = """INSERT INTO {outgoing_buffer} (model_uuid, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data, transfer_session_id, _self_ref_fk) SELECT id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data, '{transfer_session_id}', _self_ref_fk FROM {store} WHERE {condition}""".format(outgoing_buffer=Buffer._meta.db_table, transfer_session_id=transfersession.id, condition=where_condition, store=Store._meta.db_table) cursor.execute(queue_buffer) # take all record max counters that are foreign keyed onto store models, which were queued into the buffer queue_rmc_buffer = """INSERT INTO {outgoing_rmcb} (instance_id, counter, transfer_session_id, model_uuid) SELECT instance_id, counter, '{transfer_session_id}', store_model_id FROM {record_max_counter} AS rmc INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid WHERE buffer.transfer_session_id = '{transfer_session_id}' """.format(outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table, transfer_session_id=transfersession.id, record_max_counter=RecordMaxCounter._meta.db_table, outgoing_buffer=Buffer._meta.db_table) cursor.execute(queue_rmc_buffer)
def _queue_into_buffer(transfersession): """ Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance. """ last_saved_by_conditions = [] filter_prefixes = Filter(transfersession.filter) server_fsic = json.loads(transfersession.server_fsic) client_fsic = json.loads(transfersession.client_fsic) if transfersession.push: fsics = _fsic_queuing_calc(client_fsic, server_fsic) else: fsics = _fsic_queuing_calc(server_fsic, client_fsic) # if fsics are identical or receiving end has newer data, then there is nothing to queue if not fsics: return # create condition for all push FSICs where instance_ids are equal, but internal counters are higher than FSICs counters for instance, counter in six.iteritems(fsics): last_saved_by_conditions += ["(last_saved_instance = '{0}' AND last_saved_counter > {1})".format(instance, counter)] if fsics: last_saved_by_conditions = [_join_with_logical_operator(last_saved_by_conditions, 'OR')] partition_conditions = [] # create condition for filtering by partitions for prefix in filter_prefixes: partition_conditions += ["partition LIKE '{}%'".format(prefix)] if filter_prefixes: partition_conditions = [_join_with_logical_operator(partition_conditions, 'OR')] # combine conditions fsic_and_partition_conditions = _join_with_logical_operator(last_saved_by_conditions + partition_conditions, 'AND') # filter by profile where_condition = _join_with_logical_operator([fsic_and_partition_conditions, "profile = '{}'".format(transfersession.sync_session.profile)], 'AND') # execute raw sql to take all records that match condition, to be put into buffer for transfer with connection.cursor() as cursor: queue_buffer = """INSERT INTO {outgoing_buffer} (model_uuid, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data, transfer_session_id, _self_ref_fk) SELECT id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data, '{transfer_session_id}', _self_ref_fk FROM {store} WHERE {condition}""".format(outgoing_buffer=Buffer._meta.db_table, transfer_session_id=transfersession.id, condition=where_condition, store=Store._meta.db_table) cursor.execute(queue_buffer) # take all record max counters that are foreign keyed onto store models, which were queued into the buffer queue_rmc_buffer = """INSERT INTO {outgoing_rmcb} (instance_id, counter, transfer_session_id, model_uuid) SELECT instance_id, counter, '{transfer_session_id}', store_model_id FROM {record_max_counter} AS rmc INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid WHERE buffer.transfer_session_id = '{transfer_session_id}' """.format(outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table, transfer_session_id=transfersession.id, record_max_counter=RecordMaxCounter._meta.db_table, outgoing_buffer=Buffer._meta.db_table) cursor.execute(queue_rmc_buffer)
[ "Takes", "a", "chunk", "of", "data", "from", "the", "store", "to", "be", "put", "into", "the", "buffer", "to", "be", "sent", "to", "another", "morango", "instance", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/sync_utils.py#L227-L286
[ "def", "_queue_into_buffer", "(", "transfersession", ")", ":", "last_saved_by_conditions", "=", "[", "]", "filter_prefixes", "=", "Filter", "(", "transfersession", ".", "filter", ")", "server_fsic", "=", "json", ".", "loads", "(", "transfersession", ".", "server_fsic", ")", "client_fsic", "=", "json", ".", "loads", "(", "transfersession", ".", "client_fsic", ")", "if", "transfersession", ".", "push", ":", "fsics", "=", "_fsic_queuing_calc", "(", "client_fsic", ",", "server_fsic", ")", "else", ":", "fsics", "=", "_fsic_queuing_calc", "(", "server_fsic", ",", "client_fsic", ")", "# if fsics are identical or receiving end has newer data, then there is nothing to queue", "if", "not", "fsics", ":", "return", "# create condition for all push FSICs where instance_ids are equal, but internal counters are higher than FSICs counters", "for", "instance", ",", "counter", "in", "six", ".", "iteritems", "(", "fsics", ")", ":", "last_saved_by_conditions", "+=", "[", "\"(last_saved_instance = '{0}' AND last_saved_counter > {1})\"", ".", "format", "(", "instance", ",", "counter", ")", "]", "if", "fsics", ":", "last_saved_by_conditions", "=", "[", "_join_with_logical_operator", "(", "last_saved_by_conditions", ",", "'OR'", ")", "]", "partition_conditions", "=", "[", "]", "# create condition for filtering by partitions", "for", "prefix", "in", "filter_prefixes", ":", "partition_conditions", "+=", "[", "\"partition LIKE '{}%'\"", ".", "format", "(", "prefix", ")", "]", "if", "filter_prefixes", ":", "partition_conditions", "=", "[", "_join_with_logical_operator", "(", "partition_conditions", ",", "'OR'", ")", "]", "# combine conditions", "fsic_and_partition_conditions", "=", "_join_with_logical_operator", "(", "last_saved_by_conditions", "+", "partition_conditions", ",", "'AND'", ")", "# filter by profile", "where_condition", "=", "_join_with_logical_operator", "(", "[", "fsic_and_partition_conditions", ",", "\"profile = '{}'\"", ".", "format", "(", "transfersession", ".", "sync_session", ".", "profile", ")", "]", ",", "'AND'", ")", "# execute raw sql to take all records that match condition, to be put into buffer for transfer", "with", "connection", ".", "cursor", "(", ")", "as", "cursor", ":", "queue_buffer", "=", "\"\"\"INSERT INTO {outgoing_buffer}\n (model_uuid, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted,\n model_name, profile, partition, source_id, conflicting_serialized_data, transfer_session_id, _self_ref_fk)\n SELECT id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data, '{transfer_session_id}', _self_ref_fk\n FROM {store} WHERE {condition}\"\"\"", ".", "format", "(", "outgoing_buffer", "=", "Buffer", ".", "_meta", ".", "db_table", ",", "transfer_session_id", "=", "transfersession", ".", "id", ",", "condition", "=", "where_condition", ",", "store", "=", "Store", ".", "_meta", ".", "db_table", ")", "cursor", ".", "execute", "(", "queue_buffer", ")", "# take all record max counters that are foreign keyed onto store models, which were queued into the buffer", "queue_rmc_buffer", "=", "\"\"\"INSERT INTO {outgoing_rmcb}\n (instance_id, counter, transfer_session_id, model_uuid)\n SELECT instance_id, counter, '{transfer_session_id}', store_model_id\n FROM {record_max_counter} AS rmc\n INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid\n WHERE buffer.transfer_session_id = '{transfer_session_id}'\n \"\"\"", ".", "format", "(", "outgoing_rmcb", "=", "RecordMaxCounterBuffer", ".", "_meta", ".", "db_table", ",", "transfer_session_id", "=", "transfersession", ".", "id", ",", "record_max_counter", "=", "RecordMaxCounter", ".", "_meta", ".", "db_table", ",", "outgoing_buffer", "=", "Buffer", ".", "_meta", ".", "db_table", ")", "cursor", ".", "execute", "(", "queue_rmc_buffer", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
_dequeue_into_store
Takes data from the buffers and merges into the store and record max counters.
morango/utils/sync_utils.py
def _dequeue_into_store(transfersession): """ Takes data from the buffers and merges into the store and record max counters. """ with connection.cursor() as cursor: DBBackend._dequeuing_delete_rmcb_records(cursor, transfersession.id) DBBackend._dequeuing_delete_buffered_records(cursor, transfersession.id) current_id = InstanceIDModel.get_current_instance_and_increment_counter() DBBackend._dequeuing_merge_conflict_buffer(cursor, current_id, transfersession.id) DBBackend._dequeuing_merge_conflict_rmcb(cursor, transfersession.id) DBBackend._dequeuing_update_rmcs_last_saved_by(cursor, current_id, transfersession.id) DBBackend._dequeuing_delete_mc_rmcb(cursor, transfersession.id) DBBackend._dequeuing_delete_mc_buffer(cursor, transfersession.id) DBBackend._dequeuing_insert_remaining_buffer(cursor, transfersession.id) DBBackend._dequeuing_insert_remaining_rmcb(cursor, transfersession.id) DBBackend._dequeuing_delete_remaining_rmcb(cursor, transfersession.id) DBBackend._dequeuing_delete_remaining_buffer(cursor, transfersession.id) if getattr(settings, 'MORANGO_DESERIALIZE_AFTER_DEQUEUING', True): _deserialize_from_store(transfersession.sync_session.profile)
def _dequeue_into_store(transfersession): """ Takes data from the buffers and merges into the store and record max counters. """ with connection.cursor() as cursor: DBBackend._dequeuing_delete_rmcb_records(cursor, transfersession.id) DBBackend._dequeuing_delete_buffered_records(cursor, transfersession.id) current_id = InstanceIDModel.get_current_instance_and_increment_counter() DBBackend._dequeuing_merge_conflict_buffer(cursor, current_id, transfersession.id) DBBackend._dequeuing_merge_conflict_rmcb(cursor, transfersession.id) DBBackend._dequeuing_update_rmcs_last_saved_by(cursor, current_id, transfersession.id) DBBackend._dequeuing_delete_mc_rmcb(cursor, transfersession.id) DBBackend._dequeuing_delete_mc_buffer(cursor, transfersession.id) DBBackend._dequeuing_insert_remaining_buffer(cursor, transfersession.id) DBBackend._dequeuing_insert_remaining_rmcb(cursor, transfersession.id) DBBackend._dequeuing_delete_remaining_rmcb(cursor, transfersession.id) DBBackend._dequeuing_delete_remaining_buffer(cursor, transfersession.id) if getattr(settings, 'MORANGO_DESERIALIZE_AFTER_DEQUEUING', True): _deserialize_from_store(transfersession.sync_session.profile)
[ "Takes", "data", "from", "the", "buffers", "and", "merges", "into", "the", "store", "and", "record", "max", "counters", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/sync_utils.py#L289-L307
[ "def", "_dequeue_into_store", "(", "transfersession", ")", ":", "with", "connection", ".", "cursor", "(", ")", "as", "cursor", ":", "DBBackend", ".", "_dequeuing_delete_rmcb_records", "(", "cursor", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_delete_buffered_records", "(", "cursor", ",", "transfersession", ".", "id", ")", "current_id", "=", "InstanceIDModel", ".", "get_current_instance_and_increment_counter", "(", ")", "DBBackend", ".", "_dequeuing_merge_conflict_buffer", "(", "cursor", ",", "current_id", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_merge_conflict_rmcb", "(", "cursor", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_update_rmcs_last_saved_by", "(", "cursor", ",", "current_id", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_delete_mc_rmcb", "(", "cursor", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_delete_mc_buffer", "(", "cursor", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_insert_remaining_buffer", "(", "cursor", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_insert_remaining_rmcb", "(", "cursor", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_delete_remaining_rmcb", "(", "cursor", ",", "transfersession", ".", "id", ")", "DBBackend", ".", "_dequeuing_delete_remaining_buffer", "(", "cursor", ",", "transfersession", ".", "id", ")", "if", "getattr", "(", "settings", ",", "'MORANGO_DESERIALIZE_AFTER_DEQUEUING'", ",", "True", ")", ":", "_deserialize_from_store", "(", "transfersession", ".", "sync_session", ".", "profile", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
max_parameter_substitution
SQLite has a limit on the max number of variables allowed for parameter substitution. This limit is usually 999, but can be compiled to a different number. This function calculates what the max is for the sqlite version running on the device. We use the calculated value to chunk our SQL bulk insert statements when deserializing from the store to the app layer.
morango/util.py
def max_parameter_substitution(): """ SQLite has a limit on the max number of variables allowed for parameter substitution. This limit is usually 999, but can be compiled to a different number. This function calculates what the max is for the sqlite version running on the device. We use the calculated value to chunk our SQL bulk insert statements when deserializing from the store to the app layer. """ if os.path.isfile(SQLITE_VARIABLE_FILE_CACHE): return conn = sqlite3.connect(':memory:') low = 1 high = 1000 # hard limit for SQLITE_MAX_VARIABLE_NUMBER <http://www.sqlite.org/limits.html> conn.execute('CREATE TABLE T1 (id C1)') while low < high - 1: guess = (low + high) // 2 try: statement = 'select * from T1 where id in (%s)' % ','.join(['?' for _ in range(guess)]) values = [i for i in range(guess)] conn.execute(statement, values) except sqlite3.DatabaseError as ex: if 'too many SQL variables' in str(ex): high = guess else: raise else: low = guess conn.close() with open(SQLITE_VARIABLE_FILE_CACHE, 'w') as file: file.write(str(low))
def max_parameter_substitution(): """ SQLite has a limit on the max number of variables allowed for parameter substitution. This limit is usually 999, but can be compiled to a different number. This function calculates what the max is for the sqlite version running on the device. We use the calculated value to chunk our SQL bulk insert statements when deserializing from the store to the app layer. """ if os.path.isfile(SQLITE_VARIABLE_FILE_CACHE): return conn = sqlite3.connect(':memory:') low = 1 high = 1000 # hard limit for SQLITE_MAX_VARIABLE_NUMBER <http://www.sqlite.org/limits.html> conn.execute('CREATE TABLE T1 (id C1)') while low < high - 1: guess = (low + high) // 2 try: statement = 'select * from T1 where id in (%s)' % ','.join(['?' for _ in range(guess)]) values = [i for i in range(guess)] conn.execute(statement, values) except sqlite3.DatabaseError as ex: if 'too many SQL variables' in str(ex): high = guess else: raise else: low = guess conn.close() with open(SQLITE_VARIABLE_FILE_CACHE, 'w') as file: file.write(str(low))
[ "SQLite", "has", "a", "limit", "on", "the", "max", "number", "of", "variables", "allowed", "for", "parameter", "substitution", ".", "This", "limit", "is", "usually", "999", "but", "can", "be", "compiled", "to", "a", "different", "number", ".", "This", "function", "calculates", "what", "the", "max", "is", "for", "the", "sqlite", "version", "running", "on", "the", "device", ".", "We", "use", "the", "calculated", "value", "to", "chunk", "our", "SQL", "bulk", "insert", "statements", "when", "deserializing", "from", "the", "store", "to", "the", "app", "layer", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/util.py#L67-L94
[ "def", "max_parameter_substitution", "(", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "SQLITE_VARIABLE_FILE_CACHE", ")", ":", "return", "conn", "=", "sqlite3", ".", "connect", "(", "':memory:'", ")", "low", "=", "1", "high", "=", "1000", "# hard limit for SQLITE_MAX_VARIABLE_NUMBER <http://www.sqlite.org/limits.html>", "conn", ".", "execute", "(", "'CREATE TABLE T1 (id C1)'", ")", "while", "low", "<", "high", "-", "1", ":", "guess", "=", "(", "low", "+", "high", ")", "//", "2", "try", ":", "statement", "=", "'select * from T1 where id in (%s)'", "%", "','", ".", "join", "(", "[", "'?'", "for", "_", "in", "range", "(", "guess", ")", "]", ")", "values", "=", "[", "i", "for", "i", "in", "range", "(", "guess", ")", "]", "conn", ".", "execute", "(", "statement", ",", "values", ")", "except", "sqlite3", ".", "DatabaseError", "as", "ex", ":", "if", "'too many SQL variables'", "in", "str", "(", "ex", ")", ":", "high", "=", "guess", "else", ":", "raise", "else", ":", "low", "=", "guess", "conn", ".", "close", "(", ")", "with", "open", "(", "SQLITE_VARIABLE_FILE_CACHE", ",", "'w'", ")", "as", "file", ":", "file", ".", "write", "(", "str", "(", "low", ")", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
BasicMultiArgumentAuthentication.authenticate_credentials
Authenticate the userargs and password against Django auth backends. The "userargs" string may be just the username, or a querystring-encoded set of params.
morango/api/permissions.py
def authenticate_credentials(self, userargs, password, request=None): """ Authenticate the userargs and password against Django auth backends. The "userargs" string may be just the username, or a querystring-encoded set of params. """ credentials = { 'password': password } if "=" not in userargs: # if it doesn't seem to be in querystring format, just use it as the username credentials[get_user_model().USERNAME_FIELD] = userargs else: # parse out the user args from querystring format into the credentials dict for arg in userargs.split("&"): key, val = arg.split("=") credentials[key] = val # authenticate the user via Django's auth backends user = authenticate(**credentials) if user is None: raise exceptions.AuthenticationFailed('Invalid credentials.') if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return (user, None)
def authenticate_credentials(self, userargs, password, request=None): """ Authenticate the userargs and password against Django auth backends. The "userargs" string may be just the username, or a querystring-encoded set of params. """ credentials = { 'password': password } if "=" not in userargs: # if it doesn't seem to be in querystring format, just use it as the username credentials[get_user_model().USERNAME_FIELD] = userargs else: # parse out the user args from querystring format into the credentials dict for arg in userargs.split("&"): key, val = arg.split("=") credentials[key] = val # authenticate the user via Django's auth backends user = authenticate(**credentials) if user is None: raise exceptions.AuthenticationFailed('Invalid credentials.') if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return (user, None)
[ "Authenticate", "the", "userargs", "and", "password", "against", "Django", "auth", "backends", ".", "The", "userargs", "string", "may", "be", "just", "the", "username", "or", "a", "querystring", "-", "encoded", "set", "of", "params", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/api/permissions.py#L15-L43
[ "def", "authenticate_credentials", "(", "self", ",", "userargs", ",", "password", ",", "request", "=", "None", ")", ":", "credentials", "=", "{", "'password'", ":", "password", "}", "if", "\"=\"", "not", "in", "userargs", ":", "# if it doesn't seem to be in querystring format, just use it as the username", "credentials", "[", "get_user_model", "(", ")", ".", "USERNAME_FIELD", "]", "=", "userargs", "else", ":", "# parse out the user args from querystring format into the credentials dict", "for", "arg", "in", "userargs", ".", "split", "(", "\"&\"", ")", ":", "key", ",", "val", "=", "arg", ".", "split", "(", "\"=\"", ")", "credentials", "[", "key", "]", "=", "val", "# authenticate the user via Django's auth backends", "user", "=", "authenticate", "(", "*", "*", "credentials", ")", "if", "user", "is", "None", ":", "raise", "exceptions", ".", "AuthenticationFailed", "(", "'Invalid credentials.'", ")", "if", "not", "user", ".", "is_active", ":", "raise", "exceptions", ".", "AuthenticationFailed", "(", "'User inactive or deleted.'", ")", "return", "(", "user", ",", "None", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
_multiple_self_ref_fk_check
We check whether a class has more than 1 FK reference to itself.
morango/utils/register_models.py
def _multiple_self_ref_fk_check(class_model): """ We check whether a class has more than 1 FK reference to itself. """ self_fk = [] for f in class_model._meta.concrete_fields: if f.related_model in self_fk: return True if f.related_model == class_model: self_fk.append(class_model) return False
def _multiple_self_ref_fk_check(class_model): """ We check whether a class has more than 1 FK reference to itself. """ self_fk = [] for f in class_model._meta.concrete_fields: if f.related_model in self_fk: return True if f.related_model == class_model: self_fk.append(class_model) return False
[ "We", "check", "whether", "a", "class", "has", "more", "than", "1", "FK", "reference", "to", "itself", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/register_models.py#L21-L31
[ "def", "_multiple_self_ref_fk_check", "(", "class_model", ")", ":", "self_fk", "=", "[", "]", "for", "f", "in", "class_model", ".", "_meta", ".", "concrete_fields", ":", "if", "f", ".", "related_model", "in", "self_fk", ":", "return", "True", "if", "f", ".", "related_model", "==", "class_model", ":", "self_fk", ".", "append", "(", "class_model", ")", "return", "False" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
add_syncable_models
Per profile, adds each model to a dictionary mapping the morango model name to its model class. We sort by ForeignKey dependencies to safely sync data.
morango/utils/register_models.py
def add_syncable_models(): """ Per profile, adds each model to a dictionary mapping the morango model name to its model class. We sort by ForeignKey dependencies to safely sync data. """ import django.apps from morango.models import SyncableModel from morango.manager import SyncableModelManager from morango.query import SyncableModelQuerySet model_list = [] for model_class in django.apps.apps.get_models(): # several validation checks to assert models will be syncing correctly if issubclass(model_class, SyncableModel): name = model_class.__name__ if _multiple_self_ref_fk_check(model_class): raise InvalidMorangoModelConfiguration("Syncing models with more than 1 self referential ForeignKey is not supported.") try: from mptt import models from morango.utils.morango_mptt import MorangoMPTTModel, MorangoMPTTTreeManager, MorangoTreeQuerySet # mptt syncable model checks if issubclass(model_class, models.MPTTModel): if not issubclass(model_class, MorangoMPTTModel): raise InvalidMorangoModelConfiguration("{} that inherits from MPTTModel, should instead inherit from MorangoMPTTModel.".format(name)) if not isinstance(model_class.objects, MorangoMPTTTreeManager): raise InvalidMPTTManager("Manager for {} must inherit from MorangoMPTTTreeManager.".format(name)) if not isinstance(model_class.objects.none(), MorangoTreeQuerySet): raise InvalidMPTTQuerySet("Queryset for {} model must inherit from MorangoTreeQuerySet.".format(name)) except ImportError: pass # syncable model checks if not isinstance(model_class.objects, SyncableModelManager): raise InvalidSyncableManager("Manager for {} must inherit from SyncableModelManager.".format(name)) if not isinstance(model_class.objects.none(), SyncableModelQuerySet): raise InvalidSyncableQueryset("Queryset for {} model must inherit from SyncableModelQuerySet.".format(name)) if model_class._meta.many_to_many: raise UnsupportedFieldType("{} model with a ManyToManyField is not supported in morango.") if not hasattr(model_class, 'morango_model_name'): raise InvalidMorangoModelConfiguration("{} model must define a morango_model_name attribute".format(name)) if not hasattr(model_class, 'morango_profile'): raise InvalidMorangoModelConfiguration("{} model must define a morango_profile attribute".format(name)) # create empty list to hold model classes for profile if not yet created profile = model_class.morango_profile _profile_models[profile] = _profile_models.get(profile, []) # don't sync models where morango_model_name is None if model_class.morango_model_name is not None: _insert_model_into_profile_dict(model_class, profile) # for each profile, create a dict mapping from morango model names to model class for profile, model_list in iteritems(_profile_models): syncable_models_dict = OrderedDict() for model_class in model_list: syncable_models_dict[model_class.morango_model_name] = model_class _profile_models[profile] = syncable_models_dict
def add_syncable_models(): """ Per profile, adds each model to a dictionary mapping the morango model name to its model class. We sort by ForeignKey dependencies to safely sync data. """ import django.apps from morango.models import SyncableModel from morango.manager import SyncableModelManager from morango.query import SyncableModelQuerySet model_list = [] for model_class in django.apps.apps.get_models(): # several validation checks to assert models will be syncing correctly if issubclass(model_class, SyncableModel): name = model_class.__name__ if _multiple_self_ref_fk_check(model_class): raise InvalidMorangoModelConfiguration("Syncing models with more than 1 self referential ForeignKey is not supported.") try: from mptt import models from morango.utils.morango_mptt import MorangoMPTTModel, MorangoMPTTTreeManager, MorangoTreeQuerySet # mptt syncable model checks if issubclass(model_class, models.MPTTModel): if not issubclass(model_class, MorangoMPTTModel): raise InvalidMorangoModelConfiguration("{} that inherits from MPTTModel, should instead inherit from MorangoMPTTModel.".format(name)) if not isinstance(model_class.objects, MorangoMPTTTreeManager): raise InvalidMPTTManager("Manager for {} must inherit from MorangoMPTTTreeManager.".format(name)) if not isinstance(model_class.objects.none(), MorangoTreeQuerySet): raise InvalidMPTTQuerySet("Queryset for {} model must inherit from MorangoTreeQuerySet.".format(name)) except ImportError: pass # syncable model checks if not isinstance(model_class.objects, SyncableModelManager): raise InvalidSyncableManager("Manager for {} must inherit from SyncableModelManager.".format(name)) if not isinstance(model_class.objects.none(), SyncableModelQuerySet): raise InvalidSyncableQueryset("Queryset for {} model must inherit from SyncableModelQuerySet.".format(name)) if model_class._meta.many_to_many: raise UnsupportedFieldType("{} model with a ManyToManyField is not supported in morango.") if not hasattr(model_class, 'morango_model_name'): raise InvalidMorangoModelConfiguration("{} model must define a morango_model_name attribute".format(name)) if not hasattr(model_class, 'morango_profile'): raise InvalidMorangoModelConfiguration("{} model must define a morango_profile attribute".format(name)) # create empty list to hold model classes for profile if not yet created profile = model_class.morango_profile _profile_models[profile] = _profile_models.get(profile, []) # don't sync models where morango_model_name is None if model_class.morango_model_name is not None: _insert_model_into_profile_dict(model_class, profile) # for each profile, create a dict mapping from morango model names to model class for profile, model_list in iteritems(_profile_models): syncable_models_dict = OrderedDict() for model_class in model_list: syncable_models_dict[model_class.morango_model_name] = model_class _profile_models[profile] = syncable_models_dict
[ "Per", "profile", "adds", "each", "model", "to", "a", "dictionary", "mapping", "the", "morango", "model", "name", "to", "its", "model", "class", ".", "We", "sort", "by", "ForeignKey", "dependencies", "to", "safely", "sync", "data", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/register_models.py#L56-L112
[ "def", "add_syncable_models", "(", ")", ":", "import", "django", ".", "apps", "from", "morango", ".", "models", "import", "SyncableModel", "from", "morango", ".", "manager", "import", "SyncableModelManager", "from", "morango", ".", "query", "import", "SyncableModelQuerySet", "model_list", "=", "[", "]", "for", "model_class", "in", "django", ".", "apps", ".", "apps", ".", "get_models", "(", ")", ":", "# several validation checks to assert models will be syncing correctly", "if", "issubclass", "(", "model_class", ",", "SyncableModel", ")", ":", "name", "=", "model_class", ".", "__name__", "if", "_multiple_self_ref_fk_check", "(", "model_class", ")", ":", "raise", "InvalidMorangoModelConfiguration", "(", "\"Syncing models with more than 1 self referential ForeignKey is not supported.\"", ")", "try", ":", "from", "mptt", "import", "models", "from", "morango", ".", "utils", ".", "morango_mptt", "import", "MorangoMPTTModel", ",", "MorangoMPTTTreeManager", ",", "MorangoTreeQuerySet", "# mptt syncable model checks", "if", "issubclass", "(", "model_class", ",", "models", ".", "MPTTModel", ")", ":", "if", "not", "issubclass", "(", "model_class", ",", "MorangoMPTTModel", ")", ":", "raise", "InvalidMorangoModelConfiguration", "(", "\"{} that inherits from MPTTModel, should instead inherit from MorangoMPTTModel.\"", ".", "format", "(", "name", ")", ")", "if", "not", "isinstance", "(", "model_class", ".", "objects", ",", "MorangoMPTTTreeManager", ")", ":", "raise", "InvalidMPTTManager", "(", "\"Manager for {} must inherit from MorangoMPTTTreeManager.\"", ".", "format", "(", "name", ")", ")", "if", "not", "isinstance", "(", "model_class", ".", "objects", ".", "none", "(", ")", ",", "MorangoTreeQuerySet", ")", ":", "raise", "InvalidMPTTQuerySet", "(", "\"Queryset for {} model must inherit from MorangoTreeQuerySet.\"", ".", "format", "(", "name", ")", ")", "except", "ImportError", ":", "pass", "# syncable model checks", "if", "not", "isinstance", "(", "model_class", ".", "objects", ",", "SyncableModelManager", ")", ":", "raise", "InvalidSyncableManager", "(", "\"Manager for {} must inherit from SyncableModelManager.\"", ".", "format", "(", "name", ")", ")", "if", "not", "isinstance", "(", "model_class", ".", "objects", ".", "none", "(", ")", ",", "SyncableModelQuerySet", ")", ":", "raise", "InvalidSyncableQueryset", "(", "\"Queryset for {} model must inherit from SyncableModelQuerySet.\"", ".", "format", "(", "name", ")", ")", "if", "model_class", ".", "_meta", ".", "many_to_many", ":", "raise", "UnsupportedFieldType", "(", "\"{} model with a ManyToManyField is not supported in morango.\"", ")", "if", "not", "hasattr", "(", "model_class", ",", "'morango_model_name'", ")", ":", "raise", "InvalidMorangoModelConfiguration", "(", "\"{} model must define a morango_model_name attribute\"", ".", "format", "(", "name", ")", ")", "if", "not", "hasattr", "(", "model_class", ",", "'morango_profile'", ")", ":", "raise", "InvalidMorangoModelConfiguration", "(", "\"{} model must define a morango_profile attribute\"", ".", "format", "(", "name", ")", ")", "# create empty list to hold model classes for profile if not yet created", "profile", "=", "model_class", ".", "morango_profile", "_profile_models", "[", "profile", "]", "=", "_profile_models", ".", "get", "(", "profile", ",", "[", "]", ")", "# don't sync models where morango_model_name is None", "if", "model_class", ".", "morango_model_name", "is", "not", "None", ":", "_insert_model_into_profile_dict", "(", "model_class", ",", "profile", ")", "# for each profile, create a dict mapping from morango model names to model class", "for", "profile", ",", "model_list", "in", "iteritems", "(", "_profile_models", ")", ":", "syncable_models_dict", "=", "OrderedDict", "(", ")", "for", "model_class", "in", "model_list", ":", "syncable_models_dict", "[", "model_class", ".", "morango_model_name", "]", "=", "model_class", "_profile_models", "[", "profile", "]", "=", "syncable_models_dict" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
SQLWrapper._bulk_insert_into_app_models
Example query: `REPLACE INTO model (F1,F2,F3) VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s)` where values=[1,2,3,4,5,6,7,8,9]
morango/utils/backends/sqlite.py
def _bulk_insert_into_app_models(self, cursor, app_model, fields, db_values, placeholder_list): """ Example query: `REPLACE INTO model (F1,F2,F3) VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s)` where values=[1,2,3,4,5,6,7,8,9] """ # calculate and create equal sized chunks of data to insert incrementally num_of_rows_able_to_insert = self.SQLITE_MAX_VARIABLE_NUMBER // len(fields) num_of_values_able_to_insert = num_of_rows_able_to_insert * len(fields) value_chunks = [db_values[x:x + num_of_values_able_to_insert] for x in range(0, len(db_values), num_of_values_able_to_insert)] placeholder_chunks = [placeholder_list[x: x + num_of_rows_able_to_insert] for x in range(0, len(placeholder_list), num_of_rows_able_to_insert)] # insert data chunks fields = str(tuple(str(f.attname) for f in fields)).replace("'", '') for values, params in zip(value_chunks, placeholder_chunks): placeholder_str = ', '.join(params).replace("'", '') insert = """REPLACE INTO {app_model} {fields} VALUES {placeholder_str} """.format(app_model=app_model, fields=fields, placeholder_str=placeholder_str) # use DB-APIs parameter substitution (2nd parameter expects a sequence) cursor.execute(insert, values)
def _bulk_insert_into_app_models(self, cursor, app_model, fields, db_values, placeholder_list): """ Example query: `REPLACE INTO model (F1,F2,F3) VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s)` where values=[1,2,3,4,5,6,7,8,9] """ # calculate and create equal sized chunks of data to insert incrementally num_of_rows_able_to_insert = self.SQLITE_MAX_VARIABLE_NUMBER // len(fields) num_of_values_able_to_insert = num_of_rows_able_to_insert * len(fields) value_chunks = [db_values[x:x + num_of_values_able_to_insert] for x in range(0, len(db_values), num_of_values_able_to_insert)] placeholder_chunks = [placeholder_list[x: x + num_of_rows_able_to_insert] for x in range(0, len(placeholder_list), num_of_rows_able_to_insert)] # insert data chunks fields = str(tuple(str(f.attname) for f in fields)).replace("'", '') for values, params in zip(value_chunks, placeholder_chunks): placeholder_str = ', '.join(params).replace("'", '') insert = """REPLACE INTO {app_model} {fields} VALUES {placeholder_str} """.format(app_model=app_model, fields=fields, placeholder_str=placeholder_str) # use DB-APIs parameter substitution (2nd parameter expects a sequence) cursor.execute(insert, values)
[ "Example", "query", ":", "REPLACE", "INTO", "model", "(", "F1", "F2", "F3", ")", "VALUES", "(", "%s", "%s", "%s", ")", "(", "%s", "%s", "%s", ")", "(", "%s", "%s", "%s", ")", "where", "values", "=", "[", "1", "2", "3", "4", "5", "6", "7", "8", "9", "]" ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/utils/backends/sqlite.py#L18-L37
[ "def", "_bulk_insert_into_app_models", "(", "self", ",", "cursor", ",", "app_model", ",", "fields", ",", "db_values", ",", "placeholder_list", ")", ":", "# calculate and create equal sized chunks of data to insert incrementally", "num_of_rows_able_to_insert", "=", "self", ".", "SQLITE_MAX_VARIABLE_NUMBER", "//", "len", "(", "fields", ")", "num_of_values_able_to_insert", "=", "num_of_rows_able_to_insert", "*", "len", "(", "fields", ")", "value_chunks", "=", "[", "db_values", "[", "x", ":", "x", "+", "num_of_values_able_to_insert", "]", "for", "x", "in", "range", "(", "0", ",", "len", "(", "db_values", ")", ",", "num_of_values_able_to_insert", ")", "]", "placeholder_chunks", "=", "[", "placeholder_list", "[", "x", ":", "x", "+", "num_of_rows_able_to_insert", "]", "for", "x", "in", "range", "(", "0", ",", "len", "(", "placeholder_list", ")", ",", "num_of_rows_able_to_insert", ")", "]", "# insert data chunks", "fields", "=", "str", "(", "tuple", "(", "str", "(", "f", ".", "attname", ")", "for", "f", "in", "fields", ")", ")", ".", "replace", "(", "\"'\"", ",", "''", ")", "for", "values", ",", "params", "in", "zip", "(", "value_chunks", ",", "placeholder_chunks", ")", ":", "placeholder_str", "=", "', '", ".", "join", "(", "params", ")", ".", "replace", "(", "\"'\"", ",", "''", ")", "insert", "=", "\"\"\"REPLACE INTO {app_model} {fields}\n VALUES {placeholder_str}\n \"\"\"", ".", "format", "(", "app_model", "=", "app_model", ",", "fields", "=", "fields", ",", "placeholder_str", "=", "placeholder_str", ")", "# use DB-APIs parameter substitution (2nd parameter expects a sequence)", "cursor", ".", "execute", "(", "insert", ",", "values", ")" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
NetworkSyncConnection._request
Generic request method designed to handle any morango endpoint. :param endpoint: constant representing which morango endpoint we are querying :param method: HTTP verb/method for request :param lookup: the pk value for the specific object we are querying :param data: dict that will be form-encoded in request :param params: dict to be sent as part of URL's query string :param userargs: Authorization credentials :param password: :return: ``Response`` object from request
morango/syncsession.py
def _request(self, endpoint, method="GET", lookup=None, data={}, params={}, userargs=None, password=None): """ Generic request method designed to handle any morango endpoint. :param endpoint: constant representing which morango endpoint we are querying :param method: HTTP verb/method for request :param lookup: the pk value for the specific object we are querying :param data: dict that will be form-encoded in request :param params: dict to be sent as part of URL's query string :param userargs: Authorization credentials :param password: :return: ``Response`` object from request """ # convert user arguments into query str for passing to auth layer if isinstance(userargs, dict): userargs = "&".join(["{}={}".format(key, val) for (key, val) in iteritems(userargs)]) # build up url and send request if lookup: lookup = lookup + '/' url = urljoin(urljoin(self.base_url, endpoint), lookup) auth = (userargs, password) if userargs else None resp = requests.request(method, url, json=data, params=params, auth=auth) resp.raise_for_status() return resp
def _request(self, endpoint, method="GET", lookup=None, data={}, params={}, userargs=None, password=None): """ Generic request method designed to handle any morango endpoint. :param endpoint: constant representing which morango endpoint we are querying :param method: HTTP verb/method for request :param lookup: the pk value for the specific object we are querying :param data: dict that will be form-encoded in request :param params: dict to be sent as part of URL's query string :param userargs: Authorization credentials :param password: :return: ``Response`` object from request """ # convert user arguments into query str for passing to auth layer if isinstance(userargs, dict): userargs = "&".join(["{}={}".format(key, val) for (key, val) in iteritems(userargs)]) # build up url and send request if lookup: lookup = lookup + '/' url = urljoin(urljoin(self.base_url, endpoint), lookup) auth = (userargs, password) if userargs else None resp = requests.request(method, url, json=data, params=params, auth=auth) resp.raise_for_status() return resp
[ "Generic", "request", "method", "designed", "to", "handle", "any", "morango", "endpoint", "." ]
learningequality/morango
python
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/syncsession.py#L76-L100
[ "def", "_request", "(", "self", ",", "endpoint", ",", "method", "=", "\"GET\"", ",", "lookup", "=", "None", ",", "data", "=", "{", "}", ",", "params", "=", "{", "}", ",", "userargs", "=", "None", ",", "password", "=", "None", ")", ":", "# convert user arguments into query str for passing to auth layer", "if", "isinstance", "(", "userargs", ",", "dict", ")", ":", "userargs", "=", "\"&\"", ".", "join", "(", "[", "\"{}={}\"", ".", "format", "(", "key", ",", "val", ")", "for", "(", "key", ",", "val", ")", "in", "iteritems", "(", "userargs", ")", "]", ")", "# build up url and send request", "if", "lookup", ":", "lookup", "=", "lookup", "+", "'/'", "url", "=", "urljoin", "(", "urljoin", "(", "self", ".", "base_url", ",", "endpoint", ")", ",", "lookup", ")", "auth", "=", "(", "userargs", ",", "password", ")", "if", "userargs", "else", "None", "resp", "=", "requests", ".", "request", "(", "method", ",", "url", ",", "json", "=", "data", ",", "params", "=", "params", ",", "auth", "=", "auth", ")", "resp", ".", "raise_for_status", "(", ")", "return", "resp" ]
c3ec2554b026f65ac5f0fc5c9d439277fbac14f9
valid
fuzzyfinder
Args: input (str): A partial string which is typically entered by a user. collection (iterable): A collection of strings which will be filtered based on the `input`. accessor (function): If the `collection` is not an iterable of strings, then use the accessor to fetch the string that will be used for fuzzy matching. sort_results(bool): The suggestions are sorted by considering the smallest contiguous match, followed by where the match is found in the full string. If two suggestions have the same rank, they are then sorted alpha-numerically. This parameter controls the *last tie-breaker-alpha-numeric sorting*. The sorting based on match length and position will be intact. Returns: suggestions (generator): A generator object that produces a list of suggestions narrowed down from `collection` using the `input`.
fuzzyfinder/main.py
def fuzzyfinder(input, collection, accessor=lambda x: x, sort_results=True): """ Args: input (str): A partial string which is typically entered by a user. collection (iterable): A collection of strings which will be filtered based on the `input`. accessor (function): If the `collection` is not an iterable of strings, then use the accessor to fetch the string that will be used for fuzzy matching. sort_results(bool): The suggestions are sorted by considering the smallest contiguous match, followed by where the match is found in the full string. If two suggestions have the same rank, they are then sorted alpha-numerically. This parameter controls the *last tie-breaker-alpha-numeric sorting*. The sorting based on match length and position will be intact. Returns: suggestions (generator): A generator object that produces a list of suggestions narrowed down from `collection` using the `input`. """ suggestions = [] input = str(input) if not isinstance(input, str) else input pat = '.*?'.join(map(re.escape, input)) pat = '(?=({0}))'.format(pat) # lookahead regex to manage overlapping matches regex = re.compile(pat, re.IGNORECASE) for item in collection: r = list(regex.finditer(accessor(item))) if r: best = min(r, key=lambda x: len(x.group(1))) # find shortest match suggestions.append((len(best.group(1)), best.start(), accessor(item), item)) if sort_results: return (z[-1] for z in sorted(suggestions)) else: return (z[-1] for z in sorted(suggestions, key=lambda x: x[:2]))
def fuzzyfinder(input, collection, accessor=lambda x: x, sort_results=True): """ Args: input (str): A partial string which is typically entered by a user. collection (iterable): A collection of strings which will be filtered based on the `input`. accessor (function): If the `collection` is not an iterable of strings, then use the accessor to fetch the string that will be used for fuzzy matching. sort_results(bool): The suggestions are sorted by considering the smallest contiguous match, followed by where the match is found in the full string. If two suggestions have the same rank, they are then sorted alpha-numerically. This parameter controls the *last tie-breaker-alpha-numeric sorting*. The sorting based on match length and position will be intact. Returns: suggestions (generator): A generator object that produces a list of suggestions narrowed down from `collection` using the `input`. """ suggestions = [] input = str(input) if not isinstance(input, str) else input pat = '.*?'.join(map(re.escape, input)) pat = '(?=({0}))'.format(pat) # lookahead regex to manage overlapping matches regex = re.compile(pat, re.IGNORECASE) for item in collection: r = list(regex.finditer(accessor(item))) if r: best = min(r, key=lambda x: len(x.group(1))) # find shortest match suggestions.append((len(best.group(1)), best.start(), accessor(item), item)) if sort_results: return (z[-1] for z in sorted(suggestions)) else: return (z[-1] for z in sorted(suggestions, key=lambda x: x[:2]))
[ "Args", ":", "input", "(", "str", ")", ":", "A", "partial", "string", "which", "is", "typically", "entered", "by", "a", "user", ".", "collection", "(", "iterable", ")", ":", "A", "collection", "of", "strings", "which", "will", "be", "filtered", "based", "on", "the", "input", ".", "accessor", "(", "function", ")", ":", "If", "the", "collection", "is", "not", "an", "iterable", "of", "strings", "then", "use", "the", "accessor", "to", "fetch", "the", "string", "that", "will", "be", "used", "for", "fuzzy", "matching", ".", "sort_results", "(", "bool", ")", ":", "The", "suggestions", "are", "sorted", "by", "considering", "the", "smallest", "contiguous", "match", "followed", "by", "where", "the", "match", "is", "found", "in", "the", "full", "string", ".", "If", "two", "suggestions", "have", "the", "same", "rank", "they", "are", "then", "sorted", "alpha", "-", "numerically", ".", "This", "parameter", "controls", "the", "*", "last", "tie", "-", "breaker", "-", "alpha", "-", "numeric", "sorting", "*", ".", "The", "sorting", "based", "on", "match", "length", "and", "position", "will", "be", "intact", "." ]
amjith/fuzzyfinder
python
https://github.com/amjith/fuzzyfinder/blob/43fe7676cad68e269bbace7bb2fd9b77f2e07da9/fuzzyfinder/main.py#L6-L41
[ "def", "fuzzyfinder", "(", "input", ",", "collection", ",", "accessor", "=", "lambda", "x", ":", "x", ",", "sort_results", "=", "True", ")", ":", "suggestions", "=", "[", "]", "input", "=", "str", "(", "input", ")", "if", "not", "isinstance", "(", "input", ",", "str", ")", "else", "input", "pat", "=", "'.*?'", ".", "join", "(", "map", "(", "re", ".", "escape", ",", "input", ")", ")", "pat", "=", "'(?=({0}))'", ".", "format", "(", "pat", ")", "# lookahead regex to manage overlapping matches", "regex", "=", "re", ".", "compile", "(", "pat", ",", "re", ".", "IGNORECASE", ")", "for", "item", "in", "collection", ":", "r", "=", "list", "(", "regex", ".", "finditer", "(", "accessor", "(", "item", ")", ")", ")", "if", "r", ":", "best", "=", "min", "(", "r", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", ".", "group", "(", "1", ")", ")", ")", "# find shortest match", "suggestions", ".", "append", "(", "(", "len", "(", "best", ".", "group", "(", "1", ")", ")", ",", "best", ".", "start", "(", ")", ",", "accessor", "(", "item", ")", ",", "item", ")", ")", "if", "sort_results", ":", "return", "(", "z", "[", "-", "1", "]", "for", "z", "in", "sorted", "(", "suggestions", ")", ")", "else", ":", "return", "(", "z", "[", "-", "1", "]", "for", "z", "in", "sorted", "(", "suggestions", ",", "key", "=", "lambda", "x", ":", "x", "[", ":", "2", "]", ")", ")" ]
43fe7676cad68e269bbace7bb2fd9b77f2e07da9
valid
TokenGenerator.create_access_token
Creates an access token. TODO: check valid in hours TODO: maybe specify how often a token can be used
twitcher/tokengenerator.py
def create_access_token(self, valid_in_hours=1, data=None): """ Creates an access token. TODO: check valid in hours TODO: maybe specify how often a token can be used """ data = data or {} token = AccessToken( token=self.generate(), expires_at=expires_at(hours=valid_in_hours), data=data) return token
def create_access_token(self, valid_in_hours=1, data=None): """ Creates an access token. TODO: check valid in hours TODO: maybe specify how often a token can be used """ data = data or {} token = AccessToken( token=self.generate(), expires_at=expires_at(hours=valid_in_hours), data=data) return token
[ "Creates", "an", "access", "token", "." ]
bird-house/twitcher
python
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/tokengenerator.py#L22-L34
[ "def", "create_access_token", "(", "self", ",", "valid_in_hours", "=", "1", ",", "data", "=", "None", ")", ":", "data", "=", "data", "or", "{", "}", "token", "=", "AccessToken", "(", "token", "=", "self", ".", "generate", "(", ")", ",", "expires_at", "=", "expires_at", "(", "hours", "=", "valid_in_hours", ")", ",", "data", "=", "data", ")", "return", "token" ]
e6a36b3aeeacf44eec537434b0fb87c09ab54b5f
valid
MongodbServiceStore.save_service
Stores an OWS service in mongodb.
twitcher/store/mongodb.py
def save_service(self, service, overwrite=True): """ Stores an OWS service in mongodb. """ name = namesgenerator.get_sane_name(service.name) if not name: name = namesgenerator.get_random_name() if self.collection.count_documents({'name': name}) > 0: name = namesgenerator.get_random_name(retry=True) # check if service is already registered if self.collection.count_documents({'name': name}) > 0: if overwrite: self.collection.delete_one({'name': name}) else: raise Exception("service name already registered.") self.collection.insert_one(Service( name=name, url=baseurl(service.url), type=service.type, purl=service.purl, public=service.public, auth=service.auth, verify=service.verify)) return self.fetch_by_name(name=name)
def save_service(self, service, overwrite=True): """ Stores an OWS service in mongodb. """ name = namesgenerator.get_sane_name(service.name) if not name: name = namesgenerator.get_random_name() if self.collection.count_documents({'name': name}) > 0: name = namesgenerator.get_random_name(retry=True) # check if service is already registered if self.collection.count_documents({'name': name}) > 0: if overwrite: self.collection.delete_one({'name': name}) else: raise Exception("service name already registered.") self.collection.insert_one(Service( name=name, url=baseurl(service.url), type=service.type, purl=service.purl, public=service.public, auth=service.auth, verify=service.verify)) return self.fetch_by_name(name=name)
[ "Stores", "an", "OWS", "service", "in", "mongodb", "." ]
bird-house/twitcher
python
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/store/mongodb.py#L52-L75
[ "def", "save_service", "(", "self", ",", "service", ",", "overwrite", "=", "True", ")", ":", "name", "=", "namesgenerator", ".", "get_sane_name", "(", "service", ".", "name", ")", "if", "not", "name", ":", "name", "=", "namesgenerator", ".", "get_random_name", "(", ")", "if", "self", ".", "collection", ".", "count_documents", "(", "{", "'name'", ":", "name", "}", ")", ">", "0", ":", "name", "=", "namesgenerator", ".", "get_random_name", "(", "retry", "=", "True", ")", "# check if service is already registered", "if", "self", ".", "collection", ".", "count_documents", "(", "{", "'name'", ":", "name", "}", ")", ">", "0", ":", "if", "overwrite", ":", "self", ".", "collection", ".", "delete_one", "(", "{", "'name'", ":", "name", "}", ")", "else", ":", "raise", "Exception", "(", "\"service name already registered.\"", ")", "self", ".", "collection", ".", "insert_one", "(", "Service", "(", "name", "=", "name", ",", "url", "=", "baseurl", "(", "service", ".", "url", ")", ",", "type", "=", "service", ".", "type", ",", "purl", "=", "service", ".", "purl", ",", "public", "=", "service", ".", "public", ",", "auth", "=", "service", ".", "auth", ",", "verify", "=", "service", ".", "verify", ")", ")", "return", "self", ".", "fetch_by_name", "(", "name", "=", "name", ")" ]
e6a36b3aeeacf44eec537434b0fb87c09ab54b5f
valid
MongodbServiceStore.list_services
Lists all services in mongodb storage.
twitcher/store/mongodb.py
def list_services(self): """ Lists all services in mongodb storage. """ my_services = [] for service in self.collection.find().sort('name', pymongo.ASCENDING): my_services.append(Service(service)) return my_services
def list_services(self): """ Lists all services in mongodb storage. """ my_services = [] for service in self.collection.find().sort('name', pymongo.ASCENDING): my_services.append(Service(service)) return my_services
[ "Lists", "all", "services", "in", "mongodb", "storage", "." ]
bird-house/twitcher
python
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/store/mongodb.py#L84-L91
[ "def", "list_services", "(", "self", ")", ":", "my_services", "=", "[", "]", "for", "service", "in", "self", ".", "collection", ".", "find", "(", ")", ".", "sort", "(", "'name'", ",", "pymongo", ".", "ASCENDING", ")", ":", "my_services", ".", "append", "(", "Service", "(", "service", ")", ")", "return", "my_services" ]
e6a36b3aeeacf44eec537434b0fb87c09ab54b5f
valid
MongodbServiceStore.fetch_by_name
Gets service for given ``name`` from mongodb storage.
twitcher/store/mongodb.py
def fetch_by_name(self, name): """ Gets service for given ``name`` from mongodb storage. """ service = self.collection.find_one({'name': name}) if not service: raise ServiceNotFound return Service(service)
def fetch_by_name(self, name): """ Gets service for given ``name`` from mongodb storage. """ service = self.collection.find_one({'name': name}) if not service: raise ServiceNotFound return Service(service)
[ "Gets", "service", "for", "given", "name", "from", "mongodb", "storage", "." ]
bird-house/twitcher
python
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/store/mongodb.py#L93-L100
[ "def", "fetch_by_name", "(", "self", ",", "name", ")", ":", "service", "=", "self", ".", "collection", ".", "find_one", "(", "{", "'name'", ":", "name", "}", ")", "if", "not", "service", ":", "raise", "ServiceNotFound", "return", "Service", "(", "service", ")" ]
e6a36b3aeeacf44eec537434b0fb87c09ab54b5f
valid
MongodbServiceStore.fetch_by_url
Gets service for given ``url`` from mongodb storage.
twitcher/store/mongodb.py
def fetch_by_url(self, url): """ Gets service for given ``url`` from mongodb storage. """ service = self.collection.find_one({'url': url}) if not service: raise ServiceNotFound return Service(service)
def fetch_by_url(self, url): """ Gets service for given ``url`` from mongodb storage. """ service = self.collection.find_one({'url': url}) if not service: raise ServiceNotFound return Service(service)
[ "Gets", "service", "for", "given", "url", "from", "mongodb", "storage", "." ]
bird-house/twitcher
python
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/store/mongodb.py#L102-L109
[ "def", "fetch_by_url", "(", "self", ",", "url", ")", ":", "service", "=", "self", ".", "collection", ".", "find_one", "(", "{", "'url'", ":", "url", "}", ")", "if", "not", "service", ":", "raise", "ServiceNotFound", "return", "Service", "(", "service", ")" ]
e6a36b3aeeacf44eec537434b0fb87c09ab54b5f
valid
owsproxy
TODO: use ows exceptions
twitcher/owsproxy.py
def owsproxy(request): """ TODO: use ows exceptions """ try: service_name = request.matchdict.get('service_name') extra_path = request.matchdict.get('extra_path') store = servicestore_factory(request.registry) service = store.fetch_by_name(service_name) except Exception as err: # TODO: Store impl should raise appropriate exception like not authorized return OWSAccessFailed("Could not find service {0} : {1}.".format(service_name, err.message)) else: return _send_request(request, service, extra_path, request_params=request.query_string)
def owsproxy(request): """ TODO: use ows exceptions """ try: service_name = request.matchdict.get('service_name') extra_path = request.matchdict.get('extra_path') store = servicestore_factory(request.registry) service = store.fetch_by_name(service_name) except Exception as err: # TODO: Store impl should raise appropriate exception like not authorized return OWSAccessFailed("Could not find service {0} : {1}.".format(service_name, err.message)) else: return _send_request(request, service, extra_path, request_params=request.query_string)
[ "TODO", ":", "use", "ows", "exceptions" ]
bird-house/twitcher
python
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/owsproxy.py#L133-L146
[ "def", "owsproxy", "(", "request", ")", ":", "try", ":", "service_name", "=", "request", ".", "matchdict", ".", "get", "(", "'service_name'", ")", "extra_path", "=", "request", ".", "matchdict", ".", "get", "(", "'extra_path'", ")", "store", "=", "servicestore_factory", "(", "request", ".", "registry", ")", "service", "=", "store", ".", "fetch_by_name", "(", "service_name", ")", "except", "Exception", "as", "err", ":", "# TODO: Store impl should raise appropriate exception like not authorized", "return", "OWSAccessFailed", "(", "\"Could not find service {0} : {1}.\"", ".", "format", "(", "service_name", ",", "err", ".", "message", ")", ")", "else", ":", "return", "_send_request", "(", "request", ",", "service", ",", "extra_path", ",", "request_params", "=", "request", ".", "query_string", ")" ]
e6a36b3aeeacf44eec537434b0fb87c09ab54b5f