partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
Chart.scanner
For each edge expecting a word of this category here, extend the edge.
aima/nlp.py
def scanner(self, j, word): "For each edge expecting a word of this category here, extend the edge." for (i, j, A, alpha, Bb) in self.chart[j]: if Bb and self.grammar.isa(word, Bb[0]): self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def scanner(self, j, word): "For each edge expecting a word of this category here, extend the edge." for (i, j, A, alpha, Bb) in self.chart[j]: if Bb and self.grammar.isa(word, Bb[0]): self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
[ "For", "each", "edge", "expecting", "a", "word", "of", "this", "category", "here", "extend", "the", "edge", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/nlp.py#L160-L164
[ "def", "scanner", "(", "self", ",", "j", ",", "word", ")", ":", "for", "(", "i", ",", "j", ",", "A", ",", "alpha", ",", "Bb", ")", "in", "self", ".", "chart", "[", "j", "]", ":", "if", "Bb", "and", "self", ".", "grammar", ".", "isa", "(", "word", ",", "Bb", "[", "0", "]", ")", ":", "self", ".", "add_edge", "(", "[", "i", ",", "j", "+", "1", ",", "A", ",", "alpha", "+", "[", "(", "Bb", "[", "0", "]", ",", "word", ")", "]", ",", "Bb", "[", "1", ":", "]", "]", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
Chart.predictor
Add to chart any rules for B that could help extend this edge.
aima/nlp.py
def predictor(self, (i, j, A, alpha, Bb)): "Add to chart any rules for B that could help extend this edge." B = Bb[0] if B in self.grammar.rules: for rhs in self.grammar.rewrites_for(B): self.add_edge([j, j, B, [], rhs])
def predictor(self, (i, j, A, alpha, Bb)): "Add to chart any rules for B that could help extend this edge." B = Bb[0] if B in self.grammar.rules: for rhs in self.grammar.rewrites_for(B): self.add_edge([j, j, B, [], rhs])
[ "Add", "to", "chart", "any", "rules", "for", "B", "that", "could", "help", "extend", "this", "edge", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/nlp.py#L166-L171
[ "def", "predictor", "(", "self", ",", "(", "i", ",", "j", ",", "A", ",", "alpha", ",", "Bb", ")", ")", ":", "B", "=", "Bb", "[", "0", "]", "if", "B", "in", "self", ".", "grammar", ".", "rules", ":", "for", "rhs", "in", "self", ".", "grammar", ".", "rewrites_for", "(", "B", ")", ":", "self", ".", "add_edge", "(", "[", "j", ",", "j", ",", "B", ",", "[", "]", ",", "rhs", "]", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
Chart.extender
See what edges can be extended by this edge.
aima/nlp.py
def extender(self, edge): "See what edges can be extended by this edge." (j, k, B, _, _) = edge for (i, j, A, alpha, B1b) in self.chart[j]: if B1b and B == B1b[0]: self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
def extender(self, edge): "See what edges can be extended by this edge." (j, k, B, _, _) = edge for (i, j, A, alpha, B1b) in self.chart[j]: if B1b and B == B1b[0]: self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
[ "See", "what", "edges", "can", "be", "extended", "by", "this", "edge", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/nlp.py#L173-L178
[ "def", "extender", "(", "self", ",", "edge", ")", ":", "(", "j", ",", "k", ",", "B", ",", "_", ",", "_", ")", "=", "edge", "for", "(", "i", ",", "j", ",", "A", ",", "alpha", ",", "B1b", ")", "in", "self", ".", "chart", "[", "j", "]", ":", "if", "B1b", "and", "B", "==", "B1b", "[", "0", "]", ":", "self", ".", "add_edge", "(", "[", "i", ",", "k", ",", "A", ",", "alpha", "+", "[", "edge", "]", ",", "B1b", "[", "1", ":", "]", "]", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
settings
Adds a ``SettingDict`` object for the ``Setting`` model to the context as ``SETTINGS``. Automatically creates non-existent settings with an empty string as the default value.
model_settings/context_processors.py
def settings(request): """ Adds a ``SettingDict`` object for the ``Setting`` model to the context as ``SETTINGS``. Automatically creates non-existent settings with an empty string as the default value. """ settings = Setting.objects.all().as_dict(default='') context = { 'SETTINGS': settings, } return context
def settings(request): """ Adds a ``SettingDict`` object for the ``Setting`` model to the context as ``SETTINGS``. Automatically creates non-existent settings with an empty string as the default value. """ settings = Setting.objects.all().as_dict(default='') context = { 'SETTINGS': settings, } return context
[ "Adds", "a", "SettingDict", "object", "for", "the", "Setting", "model", "to", "the", "context", "as", "SETTINGS", ".", "Automatically", "creates", "non", "-", "existent", "settings", "with", "an", "empty", "string", "as", "the", "default", "value", "." ]
ixc/django-model-settings
python
https://github.com/ixc/django-model-settings/blob/654233bf7f13619e4531741f9158e7034eac031b/model_settings/context_processors.py#L3-L13
[ "def", "settings", "(", "request", ")", ":", "settings", "=", "Setting", ".", "objects", ".", "all", "(", ")", ".", "as_dict", "(", "default", "=", "''", ")", "context", "=", "{", "'SETTINGS'", ":", "settings", ",", "}", "return", "context" ]
654233bf7f13619e4531741f9158e7034eac031b
valid
SettingModelAdmin.get_child_models
Returns a list of ``(Model, ModelAdmin)`` tuples for ``base_model`` subclasses.
model_settings/admin.py
def get_child_models(self): """ Returns a list of ``(Model, ModelAdmin)`` tuples for ``base_model`` subclasses. """ child_models = [] # Loop through all models with FKs back to `base_model`. for related_object in get_all_related_objects(self.base_model._meta): # Django 1.8 deprecated `get_all_related_objects()`. We're still # using it for now with the documented work-around for # compatibility with Django <=1.7. model = getattr( related_object, 'related_model', related_object.model) # Only consider `base_model` subclasses. if issubclass(model, self.base_model): class SettingValueAdmin(self.base_admin_class): pass child_models.append((model, SettingValueAdmin)) return child_models
def get_child_models(self): """ Returns a list of ``(Model, ModelAdmin)`` tuples for ``base_model`` subclasses. """ child_models = [] # Loop through all models with FKs back to `base_model`. for related_object in get_all_related_objects(self.base_model._meta): # Django 1.8 deprecated `get_all_related_objects()`. We're still # using it for now with the documented work-around for # compatibility with Django <=1.7. model = getattr( related_object, 'related_model', related_object.model) # Only consider `base_model` subclasses. if issubclass(model, self.base_model): class SettingValueAdmin(self.base_admin_class): pass child_models.append((model, SettingValueAdmin)) return child_models
[ "Returns", "a", "list", "of", "(", "Model", "ModelAdmin", ")", "tuples", "for", "base_model", "subclasses", "." ]
ixc/django-model-settings
python
https://github.com/ixc/django-model-settings/blob/654233bf7f13619e4531741f9158e7034eac031b/model_settings/admin.py#L17-L35
[ "def", "get_child_models", "(", "self", ")", ":", "child_models", "=", "[", "]", "# Loop through all models with FKs back to `base_model`.", "for", "related_object", "in", "get_all_related_objects", "(", "self", ".", "base_model", ".", "_meta", ")", ":", "# Django 1.8 deprecated `get_all_related_objects()`. We're still", "# using it for now with the documented work-around for", "# compatibility with Django <=1.7.", "model", "=", "getattr", "(", "related_object", ",", "'related_model'", ",", "related_object", ".", "model", ")", "# Only consider `base_model` subclasses.", "if", "issubclass", "(", "model", ",", "self", ".", "base_model", ")", ":", "class", "SettingValueAdmin", "(", "self", ".", "base_admin_class", ")", ":", "pass", "child_models", ".", "append", "(", "(", "model", ",", "SettingValueAdmin", ")", ")", "return", "child_models" ]
654233bf7f13619e4531741f9158e7034eac031b
valid
DTAgentProgram
A decision-theoretic agent. [Fig. 13.1]
aima/probability.py
def DTAgentProgram(belief_state): "A decision-theoretic agent. [Fig. 13.1]" def program(percept): belief_state.observe(program.action, percept) program.action = argmax(belief_state.actions(), belief_state.expected_outcome_utility) return program.action program.action = None return program
def DTAgentProgram(belief_state): "A decision-theoretic agent. [Fig. 13.1]" def program(percept): belief_state.observe(program.action, percept) program.action = argmax(belief_state.actions(), belief_state.expected_outcome_utility) return program.action program.action = None return program
[ "A", "decision", "-", "theoretic", "agent", ".", "[", "Fig", ".", "13", ".", "1", "]" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L10-L18
[ "def", "DTAgentProgram", "(", "belief_state", ")", ":", "def", "program", "(", "percept", ")", ":", "belief_state", ".", "observe", "(", "program", ".", "action", ",", "percept", ")", "program", ".", "action", "=", "argmax", "(", "belief_state", ".", "actions", "(", ")", ",", "belief_state", ".", "expected_outcome_utility", ")", "return", "program", ".", "action", "program", ".", "action", "=", "None", "return", "program" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
event_values
Return a tuple of the values of variables vars in event. >>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A']) (8, 10) >>> event_values ((1, 2), ['C', 'A']) (1, 2)
aima/probability.py
def event_values(event, vars): """Return a tuple of the values of variables vars in event. >>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A']) (8, 10) >>> event_values ((1, 2), ['C', 'A']) (1, 2) """ if isinstance(event, tuple) and len(event) == len(vars): return event else: return tuple([event[var] for var in vars])
def event_values(event, vars): """Return a tuple of the values of variables vars in event. >>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A']) (8, 10) >>> event_values ((1, 2), ['C', 'A']) (1, 2) """ if isinstance(event, tuple) and len(event) == len(vars): return event else: return tuple([event[var] for var in vars])
[ "Return", "a", "tuple", "of", "the", "values", "of", "variables", "vars", "in", "event", ".", ">>>", "event_values", "(", "{", "A", ":", "10", "B", ":", "9", "C", ":", "8", "}", "[", "C", "A", "]", ")", "(", "8", "10", ")", ">>>", "event_values", "((", "1", "2", ")", "[", "C", "A", "]", ")", "(", "1", "2", ")" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L107-L117
[ "def", "event_values", "(", "event", ",", "vars", ")", ":", "if", "isinstance", "(", "event", ",", "tuple", ")", "and", "len", "(", "event", ")", "==", "len", "(", "vars", ")", ":", "return", "event", "else", ":", "return", "tuple", "(", "[", "event", "[", "var", "]", "for", "var", "in", "vars", "]", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
enumerate_joint_ask
Return a probability distribution over the values of the variable X, given the {var:val} observations e, in the JointProbDist P. [Section 13.3] >>> P = JointProbDist(['X', 'Y']) >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125 >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx() '0: 0.667, 1: 0.167, 2: 0.167'
aima/probability.py
def enumerate_joint_ask(X, e, P): """Return a probability distribution over the values of the variable X, given the {var:val} observations e, in the JointProbDist P. [Section 13.3] >>> P = JointProbDist(['X', 'Y']) >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125 >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx() '0: 0.667, 1: 0.167, 2: 0.167' """ assert X not in e, "Query variable must be distinct from evidence" Q = ProbDist(X) # probability distribution for X, initially empty Y = [v for v in P.variables if v != X and v not in e] # hidden vars. for xi in P.values(X): Q[xi] = enumerate_joint(Y, extend(e, X, xi), P) return Q.normalize()
def enumerate_joint_ask(X, e, P): """Return a probability distribution over the values of the variable X, given the {var:val} observations e, in the JointProbDist P. [Section 13.3] >>> P = JointProbDist(['X', 'Y']) >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125 >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx() '0: 0.667, 1: 0.167, 2: 0.167' """ assert X not in e, "Query variable must be distinct from evidence" Q = ProbDist(X) # probability distribution for X, initially empty Y = [v for v in P.variables if v != X and v not in e] # hidden vars. for xi in P.values(X): Q[xi] = enumerate_joint(Y, extend(e, X, xi), P) return Q.normalize()
[ "Return", "a", "probability", "distribution", "over", "the", "values", "of", "the", "variable", "X", "given", "the", "{", "var", ":", "val", "}", "observations", "e", "in", "the", "JointProbDist", "P", ".", "[", "Section", "13", ".", "3", "]", ">>>", "P", "=", "JointProbDist", "(", "[", "X", "Y", "]", ")", ">>>", "P", "[", "0", "0", "]", "=", "0", ".", "25", ";", "P", "[", "0", "1", "]", "=", "0", ".", "5", ";", "P", "[", "1", "1", "]", "=", "P", "[", "2", "1", "]", "=", "0", ".", "125", ">>>", "enumerate_joint_ask", "(", "X", "dict", "(", "Y", "=", "1", ")", "P", ")", ".", "show_approx", "()", "0", ":", "0", ".", "667", "1", ":", "0", ".", "167", "2", ":", "0", ".", "167" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L121-L134
[ "def", "enumerate_joint_ask", "(", "X", ",", "e", ",", "P", ")", ":", "assert", "X", "not", "in", "e", ",", "\"Query variable must be distinct from evidence\"", "Q", "=", "ProbDist", "(", "X", ")", "# probability distribution for X, initially empty", "Y", "=", "[", "v", "for", "v", "in", "P", ".", "variables", "if", "v", "!=", "X", "and", "v", "not", "in", "e", "]", "# hidden vars.", "for", "xi", "in", "P", ".", "values", "(", "X", ")", ":", "Q", "[", "xi", "]", "=", "enumerate_joint", "(", "Y", ",", "extend", "(", "e", ",", "X", ",", "xi", ")", ",", "P", ")", "return", "Q", ".", "normalize", "(", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
enumerate_joint
Return the sum of those entries in P consistent with e, provided vars is P's remaining variables (the ones not in e).
aima/probability.py
def enumerate_joint(vars, e, P): """Return the sum of those entries in P consistent with e, provided vars is P's remaining variables (the ones not in e).""" if not vars: return P[e] Y, rest = vars[0], vars[1:] return sum([enumerate_joint(rest, extend(e, Y, y), P) for y in P.values(Y)])
def enumerate_joint(vars, e, P): """Return the sum of those entries in P consistent with e, provided vars is P's remaining variables (the ones not in e).""" if not vars: return P[e] Y, rest = vars[0], vars[1:] return sum([enumerate_joint(rest, extend(e, Y, y), P) for y in P.values(Y)])
[ "Return", "the", "sum", "of", "those", "entries", "in", "P", "consistent", "with", "e", "provided", "vars", "is", "P", "s", "remaining", "variables", "(", "the", "ones", "not", "in", "e", ")", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L136-L143
[ "def", "enumerate_joint", "(", "vars", ",", "e", ",", "P", ")", ":", "if", "not", "vars", ":", "return", "P", "[", "e", "]", "Y", ",", "rest", "=", "vars", "[", "0", "]", ",", "vars", "[", "1", ":", "]", "return", "sum", "(", "[", "enumerate_joint", "(", "rest", ",", "extend", "(", "e", ",", "Y", ",", "y", ")", ",", "P", ")", "for", "y", "in", "P", ".", "values", "(", "Y", ")", "]", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
enumeration_ask
Return the conditional probability distribution of variable X given evidence e, from BayesNet bn. [Fig. 14.9] >>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary ... ).show_approx() 'False: 0.716, True: 0.284
aima/probability.py
def enumeration_ask(X, e, bn): """Return the conditional probability distribution of variable X given evidence e, from BayesNet bn. [Fig. 14.9] >>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary ... ).show_approx() 'False: 0.716, True: 0.284'""" assert X not in e, "Query variable must be distinct from evidence" Q = ProbDist(X) for xi in bn.variable_values(X): Q[xi] = enumerate_all(bn.vars, extend(e, X, xi), bn) return Q.normalize()
def enumeration_ask(X, e, bn): """Return the conditional probability distribution of variable X given evidence e, from BayesNet bn. [Fig. 14.9] >>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary ... ).show_approx() 'False: 0.716, True: 0.284'""" assert X not in e, "Query variable must be distinct from evidence" Q = ProbDist(X) for xi in bn.variable_values(X): Q[xi] = enumerate_all(bn.vars, extend(e, X, xi), bn) return Q.normalize()
[ "Return", "the", "conditional", "probability", "distribution", "of", "variable", "X", "given", "evidence", "e", "from", "BayesNet", "bn", ".", "[", "Fig", ".", "14", ".", "9", "]", ">>>", "enumeration_ask", "(", "Burglary", "dict", "(", "JohnCalls", "=", "T", "MaryCalls", "=", "T", ")", "burglary", "...", ")", ".", "show_approx", "()", "False", ":", "0", ".", "716", "True", ":", "0", ".", "284" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L265-L275
[ "def", "enumeration_ask", "(", "X", ",", "e", ",", "bn", ")", ":", "assert", "X", "not", "in", "e", ",", "\"Query variable must be distinct from evidence\"", "Q", "=", "ProbDist", "(", "X", ")", "for", "xi", "in", "bn", ".", "variable_values", "(", "X", ")", ":", "Q", "[", "xi", "]", "=", "enumerate_all", "(", "bn", ".", "vars", ",", "extend", "(", "e", ",", "X", ",", "xi", ")", ",", "bn", ")", "return", "Q", ".", "normalize", "(", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
enumerate_all
Return the sum of those entries in P(vars | e{others}) consistent with e, where P is the joint distribution represented by bn, and e{others} means e restricted to bn's other variables (the ones other than vars). Parents must precede children in vars.
aima/probability.py
def enumerate_all(vars, e, bn): """Return the sum of those entries in P(vars | e{others}) consistent with e, where P is the joint distribution represented by bn, and e{others} means e restricted to bn's other variables (the ones other than vars). Parents must precede children in vars.""" if not vars: return 1.0 Y, rest = vars[0], vars[1:] Ynode = bn.variable_node(Y) if Y in e: return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn) else: return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn) for y in bn.variable_values(Y))
def enumerate_all(vars, e, bn): """Return the sum of those entries in P(vars | e{others}) consistent with e, where P is the joint distribution represented by bn, and e{others} means e restricted to bn's other variables (the ones other than vars). Parents must precede children in vars.""" if not vars: return 1.0 Y, rest = vars[0], vars[1:] Ynode = bn.variable_node(Y) if Y in e: return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn) else: return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn) for y in bn.variable_values(Y))
[ "Return", "the", "sum", "of", "those", "entries", "in", "P", "(", "vars", "|", "e", "{", "others", "}", ")", "consistent", "with", "e", "where", "P", "is", "the", "joint", "distribution", "represented", "by", "bn", "and", "e", "{", "others", "}", "means", "e", "restricted", "to", "bn", "s", "other", "variables", "(", "the", "ones", "other", "than", "vars", ")", ".", "Parents", "must", "precede", "children", "in", "vars", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L277-L290
[ "def", "enumerate_all", "(", "vars", ",", "e", ",", "bn", ")", ":", "if", "not", "vars", ":", "return", "1.0", "Y", ",", "rest", "=", "vars", "[", "0", "]", ",", "vars", "[", "1", ":", "]", "Ynode", "=", "bn", ".", "variable_node", "(", "Y", ")", "if", "Y", "in", "e", ":", "return", "Ynode", ".", "p", "(", "e", "[", "Y", "]", ",", "e", ")", "*", "enumerate_all", "(", "rest", ",", "e", ",", "bn", ")", "else", ":", "return", "sum", "(", "Ynode", ".", "p", "(", "y", ",", "e", ")", "*", "enumerate_all", "(", "rest", ",", "extend", "(", "e", ",", "Y", ",", "y", ")", ",", "bn", ")", "for", "y", "in", "bn", ".", "variable_values", "(", "Y", ")", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
elimination_ask
Compute bn's P(X|e) by variable elimination. [Fig. 14.11] >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary ... ).show_approx() 'False: 0.716, True: 0.284
aima/probability.py
def elimination_ask(X, e, bn): """Compute bn's P(X|e) by variable elimination. [Fig. 14.11] >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary ... ).show_approx() 'False: 0.716, True: 0.284'""" assert X not in e, "Query variable must be distinct from evidence" factors = [] for var in reversed(bn.vars): factors.append(make_factor(var, e, bn)) if is_hidden(var, X, e): factors = sum_out(var, factors, bn) return pointwise_product(factors, bn).normalize()
def elimination_ask(X, e, bn): """Compute bn's P(X|e) by variable elimination. [Fig. 14.11] >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary ... ).show_approx() 'False: 0.716, True: 0.284'""" assert X not in e, "Query variable must be distinct from evidence" factors = [] for var in reversed(bn.vars): factors.append(make_factor(var, e, bn)) if is_hidden(var, X, e): factors = sum_out(var, factors, bn) return pointwise_product(factors, bn).normalize()
[ "Compute", "bn", "s", "P", "(", "X|e", ")", "by", "variable", "elimination", ".", "[", "Fig", ".", "14", ".", "11", "]", ">>>", "elimination_ask", "(", "Burglary", "dict", "(", "JohnCalls", "=", "T", "MaryCalls", "=", "T", ")", "burglary", "...", ")", ".", "show_approx", "()", "False", ":", "0", ".", "716", "True", ":", "0", ".", "284" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L294-L305
[ "def", "elimination_ask", "(", "X", ",", "e", ",", "bn", ")", ":", "assert", "X", "not", "in", "e", ",", "\"Query variable must be distinct from evidence\"", "factors", "=", "[", "]", "for", "var", "in", "reversed", "(", "bn", ".", "vars", ")", ":", "factors", ".", "append", "(", "make_factor", "(", "var", ",", "e", ",", "bn", ")", ")", "if", "is_hidden", "(", "var", ",", "X", ",", "e", ")", ":", "factors", "=", "sum_out", "(", "var", ",", "factors", ",", "bn", ")", "return", "pointwise_product", "(", "factors", ",", "bn", ")", ".", "normalize", "(", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
make_factor
Return the factor for var in bn's joint distribution given e. That is, bn's full joint distribution, projected to accord with e, is the pointwise product of these factors for bn's variables.
aima/probability.py
def make_factor(var, e, bn): """Return the factor for var in bn's joint distribution given e. That is, bn's full joint distribution, projected to accord with e, is the pointwise product of these factors for bn's variables.""" node = bn.variable_node(var) vars = [X for X in [var] + node.parents if X not in e] cpt = dict((event_values(e1, vars), node.p(e1[var], e1)) for e1 in all_events(vars, bn, e)) return Factor(vars, cpt)
def make_factor(var, e, bn): """Return the factor for var in bn's joint distribution given e. That is, bn's full joint distribution, projected to accord with e, is the pointwise product of these factors for bn's variables.""" node = bn.variable_node(var) vars = [X for X in [var] + node.parents if X not in e] cpt = dict((event_values(e1, vars), node.p(e1[var], e1)) for e1 in all_events(vars, bn, e)) return Factor(vars, cpt)
[ "Return", "the", "factor", "for", "var", "in", "bn", "s", "joint", "distribution", "given", "e", ".", "That", "is", "bn", "s", "full", "joint", "distribution", "projected", "to", "accord", "with", "e", "is", "the", "pointwise", "product", "of", "these", "factors", "for", "bn", "s", "variables", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L311-L319
[ "def", "make_factor", "(", "var", ",", "e", ",", "bn", ")", ":", "node", "=", "bn", ".", "variable_node", "(", "var", ")", "vars", "=", "[", "X", "for", "X", "in", "[", "var", "]", "+", "node", ".", "parents", "if", "X", "not", "in", "e", "]", "cpt", "=", "dict", "(", "(", "event_values", "(", "e1", ",", "vars", ")", ",", "node", ".", "p", "(", "e1", "[", "var", "]", ",", "e1", ")", ")", "for", "e1", "in", "all_events", "(", "vars", ",", "bn", ",", "e", ")", ")", "return", "Factor", "(", "vars", ",", "cpt", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
sum_out
Eliminate var from all factors by summing over its values.
aima/probability.py
def sum_out(var, factors, bn): "Eliminate var from all factors by summing over its values." result, var_factors = [], [] for f in factors: (var_factors if var in f.vars else result).append(f) result.append(pointwise_product(var_factors, bn).sum_out(var, bn)) return result
def sum_out(var, factors, bn): "Eliminate var from all factors by summing over its values." result, var_factors = [], [] for f in factors: (var_factors if var in f.vars else result).append(f) result.append(pointwise_product(var_factors, bn).sum_out(var, bn)) return result
[ "Eliminate", "var", "from", "all", "factors", "by", "summing", "over", "its", "values", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L324-L330
[ "def", "sum_out", "(", "var", ",", "factors", ",", "bn", ")", ":", "result", ",", "var_factors", "=", "[", "]", ",", "[", "]", "for", "f", "in", "factors", ":", "(", "var_factors", "if", "var", "in", "f", ".", "vars", "else", "result", ")", ".", "append", "(", "f", ")", "result", ".", "append", "(", "pointwise_product", "(", "var_factors", ",", "bn", ")", ".", "sum_out", "(", "var", ",", "bn", ")", ")", "return", "result" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
all_events
Yield every way of extending e with values for all vars.
aima/probability.py
def all_events(vars, bn, e): "Yield every way of extending e with values for all vars." if not vars: yield e else: X, rest = vars[0], vars[1:] for e1 in all_events(rest, bn, e): for x in bn.variable_values(X): yield extend(e1, X, x)
def all_events(vars, bn, e): "Yield every way of extending e with values for all vars." if not vars: yield e else: X, rest = vars[0], vars[1:] for e1 in all_events(rest, bn, e): for x in bn.variable_values(X): yield extend(e1, X, x)
[ "Yield", "every", "way", "of", "extending", "e", "with", "values", "for", "all", "vars", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L364-L372
[ "def", "all_events", "(", "vars", ",", "bn", ",", "e", ")", ":", "if", "not", "vars", ":", "yield", "e", "else", ":", "X", ",", "rest", "=", "vars", "[", "0", "]", ",", "vars", "[", "1", ":", "]", "for", "e1", "in", "all_events", "(", "rest", ",", "bn", ",", "e", ")", ":", "for", "x", "in", "bn", ".", "variable_values", "(", "X", ")", ":", "yield", "extend", "(", "e1", ",", "X", ",", "x", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
prior_sample
Randomly sample from bn's full joint distribution. The result is a {variable: value} dict. [Fig. 14.13]
aima/probability.py
def prior_sample(bn): """Randomly sample from bn's full joint distribution. The result is a {variable: value} dict. [Fig. 14.13]""" event = {} for node in bn.nodes: event[node.variable] = node.sample(event) return event
def prior_sample(bn): """Randomly sample from bn's full joint distribution. The result is a {variable: value} dict. [Fig. 14.13]""" event = {} for node in bn.nodes: event[node.variable] = node.sample(event) return event
[ "Randomly", "sample", "from", "bn", "s", "full", "joint", "distribution", ".", "The", "result", "is", "a", "{", "variable", ":", "value", "}", "dict", ".", "[", "Fig", ".", "14", ".", "13", "]" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L387-L393
[ "def", "prior_sample", "(", "bn", ")", ":", "event", "=", "{", "}", "for", "node", "in", "bn", ".", "nodes", ":", "event", "[", "node", ".", "variable", "]", "=", "node", ".", "sample", "(", "event", ")", "return", "event" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
rejection_sampling
Estimate the probability distribution of variable X given evidence e in BayesNet bn, using N samples. [Fig. 14.14] Raises a ZeroDivisionError if all the N samples are rejected, i.e., inconsistent with e. >>> seed(47) >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.7, True: 0.3'
aima/probability.py
def rejection_sampling(X, e, bn, N): """Estimate the probability distribution of variable X given evidence e in BayesNet bn, using N samples. [Fig. 14.14] Raises a ZeroDivisionError if all the N samples are rejected, i.e., inconsistent with e. >>> seed(47) >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.7, True: 0.3' """ counts = dict((x, 0) for x in bn.variable_values(X)) # bold N in Fig. 14.14 for j in xrange(N): sample = prior_sample(bn) # boldface x in Fig. 14.14 if consistent_with(sample, e): counts[sample[X]] += 1 return ProbDist(X, counts)
def rejection_sampling(X, e, bn, N): """Estimate the probability distribution of variable X given evidence e in BayesNet bn, using N samples. [Fig. 14.14] Raises a ZeroDivisionError if all the N samples are rejected, i.e., inconsistent with e. >>> seed(47) >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.7, True: 0.3' """ counts = dict((x, 0) for x in bn.variable_values(X)) # bold N in Fig. 14.14 for j in xrange(N): sample = prior_sample(bn) # boldface x in Fig. 14.14 if consistent_with(sample, e): counts[sample[X]] += 1 return ProbDist(X, counts)
[ "Estimate", "the", "probability", "distribution", "of", "variable", "X", "given", "evidence", "e", "in", "BayesNet", "bn", "using", "N", "samples", ".", "[", "Fig", ".", "14", ".", "14", "]", "Raises", "a", "ZeroDivisionError", "if", "all", "the", "N", "samples", "are", "rejected", "i", ".", "e", ".", "inconsistent", "with", "e", ".", ">>>", "seed", "(", "47", ")", ">>>", "rejection_sampling", "(", "Burglary", "dict", "(", "JohnCalls", "=", "T", "MaryCalls", "=", "T", ")", "...", "burglary", "10000", ")", ".", "show_approx", "()", "False", ":", "0", ".", "7", "True", ":", "0", ".", "3" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L397-L412
[ "def", "rejection_sampling", "(", "X", ",", "e", ",", "bn", ",", "N", ")", ":", "counts", "=", "dict", "(", "(", "x", ",", "0", ")", "for", "x", "in", "bn", ".", "variable_values", "(", "X", ")", ")", "# bold N in Fig. 14.14", "for", "j", "in", "xrange", "(", "N", ")", ":", "sample", "=", "prior_sample", "(", "bn", ")", "# boldface x in Fig. 14.14", "if", "consistent_with", "(", "sample", ",", "e", ")", ":", "counts", "[", "sample", "[", "X", "]", "]", "+=", "1", "return", "ProbDist", "(", "X", ",", "counts", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
consistent_with
Is event consistent with the given evidence?
aima/probability.py
def consistent_with(event, evidence): "Is event consistent with the given evidence?" return every(lambda (k, v): evidence.get(k, v) == v, event.items())
def consistent_with(event, evidence): "Is event consistent with the given evidence?" return every(lambda (k, v): evidence.get(k, v) == v, event.items())
[ "Is", "event", "consistent", "with", "the", "given", "evidence?" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L414-L417
[ "def", "consistent_with", "(", "event", ",", "evidence", ")", ":", "return", "every", "(", "lambda", "(", "k", ",", "v", ")", ":", "evidence", ".", "get", "(", "k", ",", "v", ")", "==", "v", ",", "event", ".", "items", "(", ")", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
likelihood_weighting
Estimate the probability distribution of variable X given evidence e in BayesNet bn. [Fig. 14.15] >>> seed(1017) >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.702, True: 0.298'
aima/probability.py
def likelihood_weighting(X, e, bn, N): """Estimate the probability distribution of variable X given evidence e in BayesNet bn. [Fig. 14.15] >>> seed(1017) >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.702, True: 0.298' """ W = dict((x, 0) for x in bn.variable_values(X)) for j in xrange(N): sample, weight = weighted_sample(bn, e) # boldface x, w in Fig. 14.15 W[sample[X]] += weight return ProbDist(X, W)
def likelihood_weighting(X, e, bn, N): """Estimate the probability distribution of variable X given evidence e in BayesNet bn. [Fig. 14.15] >>> seed(1017) >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.702, True: 0.298' """ W = dict((x, 0) for x in bn.variable_values(X)) for j in xrange(N): sample, weight = weighted_sample(bn, e) # boldface x, w in Fig. 14.15 W[sample[X]] += weight return ProbDist(X, W)
[ "Estimate", "the", "probability", "distribution", "of", "variable", "X", "given", "evidence", "e", "in", "BayesNet", "bn", ".", "[", "Fig", ".", "14", ".", "15", "]", ">>>", "seed", "(", "1017", ")", ">>>", "likelihood_weighting", "(", "Burglary", "dict", "(", "JohnCalls", "=", "T", "MaryCalls", "=", "T", ")", "...", "burglary", "10000", ")", ".", "show_approx", "()", "False", ":", "0", ".", "702", "True", ":", "0", ".", "298" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L421-L433
[ "def", "likelihood_weighting", "(", "X", ",", "e", ",", "bn", ",", "N", ")", ":", "W", "=", "dict", "(", "(", "x", ",", "0", ")", "for", "x", "in", "bn", ".", "variable_values", "(", "X", ")", ")", "for", "j", "in", "xrange", "(", "N", ")", ":", "sample", ",", "weight", "=", "weighted_sample", "(", "bn", ",", "e", ")", "# boldface x, w in Fig. 14.15", "W", "[", "sample", "[", "X", "]", "]", "+=", "weight", "return", "ProbDist", "(", "X", ",", "W", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
weighted_sample
Sample an event from bn that's consistent with the evidence e; return the event and its weight, the likelihood that the event accords to the evidence.
aima/probability.py
def weighted_sample(bn, e): """Sample an event from bn that's consistent with the evidence e; return the event and its weight, the likelihood that the event accords to the evidence.""" w = 1 event = dict(e) # boldface x in Fig. 14.15 for node in bn.nodes: Xi = node.variable if Xi in e: w *= node.p(e[Xi], event) else: event[Xi] = node.sample(event) return event, w
def weighted_sample(bn, e): """Sample an event from bn that's consistent with the evidence e; return the event and its weight, the likelihood that the event accords to the evidence.""" w = 1 event = dict(e) # boldface x in Fig. 14.15 for node in bn.nodes: Xi = node.variable if Xi in e: w *= node.p(e[Xi], event) else: event[Xi] = node.sample(event) return event, w
[ "Sample", "an", "event", "from", "bn", "that", "s", "consistent", "with", "the", "evidence", "e", ";", "return", "the", "event", "and", "its", "weight", "the", "likelihood", "that", "the", "event", "accords", "to", "the", "evidence", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L435-L447
[ "def", "weighted_sample", "(", "bn", ",", "e", ")", ":", "w", "=", "1", "event", "=", "dict", "(", "e", ")", "# boldface x in Fig. 14.15", "for", "node", "in", "bn", ".", "nodes", ":", "Xi", "=", "node", ".", "variable", "if", "Xi", "in", "e", ":", "w", "*=", "node", ".", "p", "(", "e", "[", "Xi", "]", ",", "event", ")", "else", ":", "event", "[", "Xi", "]", "=", "node", ".", "sample", "(", "event", ")", "return", "event", ",", "w" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
gibbs_ask
[Fig. 14.16] >>> seed(1017) >>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000 ... ).show_approx() 'False: 0.738, True: 0.262'
aima/probability.py
def gibbs_ask(X, e, bn, N): """[Fig. 14.16] >>> seed(1017) >>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000 ... ).show_approx() 'False: 0.738, True: 0.262' """ assert X not in e, "Query variable must be distinct from evidence" counts = dict((x, 0) for x in bn.variable_values(X)) # bold N in Fig. 14.16 Z = [var for var in bn.vars if var not in e] state = dict(e) # boldface x in Fig. 14.16 for Zi in Z: state[Zi] = choice(bn.variable_values(Zi)) for j in xrange(N): for Zi in Z: state[Zi] = markov_blanket_sample(Zi, state, bn) counts[state[X]] += 1 return ProbDist(X, counts)
def gibbs_ask(X, e, bn, N): """[Fig. 14.16] >>> seed(1017) >>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000 ... ).show_approx() 'False: 0.738, True: 0.262' """ assert X not in e, "Query variable must be distinct from evidence" counts = dict((x, 0) for x in bn.variable_values(X)) # bold N in Fig. 14.16 Z = [var for var in bn.vars if var not in e] state = dict(e) # boldface x in Fig. 14.16 for Zi in Z: state[Zi] = choice(bn.variable_values(Zi)) for j in xrange(N): for Zi in Z: state[Zi] = markov_blanket_sample(Zi, state, bn) counts[state[X]] += 1 return ProbDist(X, counts)
[ "[", "Fig", ".", "14", ".", "16", "]", ">>>", "seed", "(", "1017", ")", ">>>", "gibbs_ask", "(", "Burglary", "dict", "(", "JohnCalls", "=", "T", "MaryCalls", "=", "T", ")", "burglary", "1000", "...", ")", ".", "show_approx", "()", "False", ":", "0", ".", "738", "True", ":", "0", ".", "262" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L451-L468
[ "def", "gibbs_ask", "(", "X", ",", "e", ",", "bn", ",", "N", ")", ":", "assert", "X", "not", "in", "e", ",", "\"Query variable must be distinct from evidence\"", "counts", "=", "dict", "(", "(", "x", ",", "0", ")", "for", "x", "in", "bn", ".", "variable_values", "(", "X", ")", ")", "# bold N in Fig. 14.16", "Z", "=", "[", "var", "for", "var", "in", "bn", ".", "vars", "if", "var", "not", "in", "e", "]", "state", "=", "dict", "(", "e", ")", "# boldface x in Fig. 14.16", "for", "Zi", "in", "Z", ":", "state", "[", "Zi", "]", "=", "choice", "(", "bn", ".", "variable_values", "(", "Zi", ")", ")", "for", "j", "in", "xrange", "(", "N", ")", ":", "for", "Zi", "in", "Z", ":", "state", "[", "Zi", "]", "=", "markov_blanket_sample", "(", "Zi", ",", "state", ",", "bn", ")", "counts", "[", "state", "[", "X", "]", "]", "+=", "1", "return", "ProbDist", "(", "X", ",", "counts", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
markov_blanket_sample
Return a sample from P(X | mb) where mb denotes that the variables in the Markov blanket of X take their values from event e (which must assign a value to each). The Markov blanket of X is X's parents, children, and children's parents.
aima/probability.py
def markov_blanket_sample(X, e, bn): """Return a sample from P(X | mb) where mb denotes that the variables in the Markov blanket of X take their values from event e (which must assign a value to each). The Markov blanket of X is X's parents, children, and children's parents.""" Xnode = bn.variable_node(X) Q = ProbDist(X) for xi in bn.variable_values(X): ei = extend(e, X, xi) # [Equation 14.12:] Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei) for Yj in Xnode.children) return probability(Q.normalize()[True])
def markov_blanket_sample(X, e, bn): """Return a sample from P(X | mb) where mb denotes that the variables in the Markov blanket of X take their values from event e (which must assign a value to each). The Markov blanket of X is X's parents, children, and children's parents.""" Xnode = bn.variable_node(X) Q = ProbDist(X) for xi in bn.variable_values(X): ei = extend(e, X, xi) # [Equation 14.12:] Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei) for Yj in Xnode.children) return probability(Q.normalize()[True])
[ "Return", "a", "sample", "from", "P", "(", "X", "|", "mb", ")", "where", "mb", "denotes", "that", "the", "variables", "in", "the", "Markov", "blanket", "of", "X", "take", "their", "values", "from", "event", "e", "(", "which", "must", "assign", "a", "value", "to", "each", ")", ".", "The", "Markov", "blanket", "of", "X", "is", "X", "s", "parents", "children", "and", "children", "s", "parents", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L470-L482
[ "def", "markov_blanket_sample", "(", "X", ",", "e", ",", "bn", ")", ":", "Xnode", "=", "bn", ".", "variable_node", "(", "X", ")", "Q", "=", "ProbDist", "(", "X", ")", "for", "xi", "in", "bn", ".", "variable_values", "(", "X", ")", ":", "ei", "=", "extend", "(", "e", ",", "X", ",", "xi", ")", "# [Equation 14.12:]", "Q", "[", "xi", "]", "=", "Xnode", ".", "p", "(", "xi", ",", "e", ")", "*", "product", "(", "Yj", ".", "p", "(", "ei", "[", "Yj", ".", "variable", "]", ",", "ei", ")", "for", "Yj", "in", "Xnode", ".", "children", ")", "return", "probability", "(", "Q", ".", "normalize", "(", ")", "[", "True", "]", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
ProbDist.normalize
Make sure the probabilities of all values sum to 1. Returns the normalized distribution. Raises a ZeroDivisionError if the sum of the values is 0. >>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65 >>> P = P.normalize() >>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T']) 0.350 0.650
aima/probability.py
def normalize(self): """Make sure the probabilities of all values sum to 1. Returns the normalized distribution. Raises a ZeroDivisionError if the sum of the values is 0. >>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65 >>> P = P.normalize() >>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T']) 0.350 0.650 """ total = float(sum(self.prob.values())) if not (1.0-epsilon < total < 1.0+epsilon): for val in self.prob: self.prob[val] /= total return self
def normalize(self): """Make sure the probabilities of all values sum to 1. Returns the normalized distribution. Raises a ZeroDivisionError if the sum of the values is 0. >>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65 >>> P = P.normalize() >>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T']) 0.350 0.650 """ total = float(sum(self.prob.values())) if not (1.0-epsilon < total < 1.0+epsilon): for val in self.prob: self.prob[val] /= total return self
[ "Make", "sure", "the", "probabilities", "of", "all", "values", "sum", "to", "1", ".", "Returns", "the", "normalized", "distribution", ".", "Raises", "a", "ZeroDivisionError", "if", "the", "sum", "of", "the", "values", "is", "0", ".", ">>>", "P", "=", "ProbDist", "(", "Flip", ")", ";", "P", "[", "H", "]", "P", "[", "T", "]", "=", "35", "65", ">>>", "P", "=", "P", ".", "normalize", "()", ">>>", "print", "%5", ".", "3f", "%5", ".", "3f", "%", "(", "P", ".", "prob", "[", "H", "]", "P", ".", "prob", "[", "T", "]", ")", "0", ".", "350", "0", ".", "650" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L51-L64
[ "def", "normalize", "(", "self", ")", ":", "total", "=", "float", "(", "sum", "(", "self", ".", "prob", ".", "values", "(", ")", ")", ")", "if", "not", "(", "1.0", "-", "epsilon", "<", "total", "<", "1.0", "+", "epsilon", ")", ":", "for", "val", "in", "self", ".", "prob", ":", "self", ".", "prob", "[", "val", "]", "/=", "total", "return", "self" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
ProbDist.show_approx
Show the probabilities rounded and sorted by key, for the sake of portable doctests.
aima/probability.py
def show_approx(self, numfmt='%.3g'): """Show the probabilities rounded and sorted by key, for the sake of portable doctests.""" return ', '.join([('%s: ' + numfmt) % (v, p) for (v, p) in sorted(self.prob.items())])
def show_approx(self, numfmt='%.3g'): """Show the probabilities rounded and sorted by key, for the sake of portable doctests.""" return ', '.join([('%s: ' + numfmt) % (v, p) for (v, p) in sorted(self.prob.items())])
[ "Show", "the", "probabilities", "rounded", "and", "sorted", "by", "key", "for", "the", "sake", "of", "portable", "doctests", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L66-L70
[ "def", "show_approx", "(", "self", ",", "numfmt", "=", "'%.3g'", ")", ":", "return", "', '", ".", "join", "(", "[", "(", "'%s: '", "+", "numfmt", ")", "%", "(", "v", ",", "p", ")", "for", "(", "v", ",", "p", ")", "in", "sorted", "(", "self", ".", "prob", ".", "items", "(", ")", ")", "]", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
BayesNet.add
Add a node to the net. Its parents must already be in the net, and its variable must not.
aima/probability.py
def add(self, node_spec): """Add a node to the net. Its parents must already be in the net, and its variable must not.""" node = BayesNode(*node_spec) assert node.variable not in self.vars assert every(lambda parent: parent in self.vars, node.parents) self.nodes.append(node) self.vars.append(node.variable) for parent in node.parents: self.variable_node(parent).children.append(node)
def add(self, node_spec): """Add a node to the net. Its parents must already be in the net, and its variable must not.""" node = BayesNode(*node_spec) assert node.variable not in self.vars assert every(lambda parent: parent in self.vars, node.parents) self.nodes.append(node) self.vars.append(node.variable) for parent in node.parents: self.variable_node(parent).children.append(node)
[ "Add", "a", "node", "to", "the", "net", ".", "Its", "parents", "must", "already", "be", "in", "the", "net", "and", "its", "variable", "must", "not", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L156-L165
[ "def", "add", "(", "self", ",", "node_spec", ")", ":", "node", "=", "BayesNode", "(", "*", "node_spec", ")", "assert", "node", ".", "variable", "not", "in", "self", ".", "vars", "assert", "every", "(", "lambda", "parent", ":", "parent", "in", "self", ".", "vars", ",", "node", ".", "parents", ")", "self", ".", "nodes", ".", "append", "(", "node", ")", "self", ".", "vars", ".", "append", "(", "node", ".", "variable", ")", "for", "parent", "in", "node", ".", "parents", ":", "self", ".", "variable_node", "(", "parent", ")", ".", "children", ".", "append", "(", "node", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
BayesNet.variable_node
Return the node for the variable named var. >>> burglary.variable_node('Burglary').variable 'Burglary
aima/probability.py
def variable_node(self, var): """Return the node for the variable named var. >>> burglary.variable_node('Burglary').variable 'Burglary'""" for n in self.nodes: if n.variable == var: return n raise Exception("No such variable: %s" % var)
def variable_node(self, var): """Return the node for the variable named var. >>> burglary.variable_node('Burglary').variable 'Burglary'""" for n in self.nodes: if n.variable == var: return n raise Exception("No such variable: %s" % var)
[ "Return", "the", "node", "for", "the", "variable", "named", "var", ".", ">>>", "burglary", ".", "variable_node", "(", "Burglary", ")", ".", "variable", "Burglary" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L167-L174
[ "def", "variable_node", "(", "self", ",", "var", ")", ":", "for", "n", "in", "self", ".", "nodes", ":", "if", "n", ".", "variable", "==", "var", ":", "return", "n", "raise", "Exception", "(", "\"No such variable: %s\"", "%", "var", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
BayesNode.p
Return the conditional probability P(X=value | parents=parent_values), where parent_values are the values of parents in event. (event must assign each parent a value.) >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625}) >>> bn.p(False, {'Burglary': False, 'Earthquake': True}) 0.375
aima/probability.py
def p(self, value, event): """Return the conditional probability P(X=value | parents=parent_values), where parent_values are the values of parents in event. (event must assign each parent a value.) >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625}) >>> bn.p(False, {'Burglary': False, 'Earthquake': True}) 0.375""" assert isinstance(value, bool) ptrue = self.cpt[event_values(event, self.parents)] return if_(value, ptrue, 1 - ptrue)
def p(self, value, event): """Return the conditional probability P(X=value | parents=parent_values), where parent_values are the values of parents in event. (event must assign each parent a value.) >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625}) >>> bn.p(False, {'Burglary': False, 'Earthquake': True}) 0.375""" assert isinstance(value, bool) ptrue = self.cpt[event_values(event, self.parents)] return if_(value, ptrue, 1 - ptrue)
[ "Return", "the", "conditional", "probability", "P", "(", "X", "=", "value", "|", "parents", "=", "parent_values", ")", "where", "parent_values", "are", "the", "values", "of", "parents", "in", "event", ".", "(", "event", "must", "assign", "each", "parent", "a", "value", ".", ")", ">>>", "bn", "=", "BayesNode", "(", "X", "Burglary", "{", "T", ":", "0", ".", "2", "F", ":", "0", ".", "625", "}", ")", ">>>", "bn", ".", "p", "(", "False", "{", "Burglary", ":", "False", "Earthquake", ":", "True", "}", ")", "0", ".", "375" ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L228-L238
[ "def", "p", "(", "self", ",", "value", ",", "event", ")", ":", "assert", "isinstance", "(", "value", ",", "bool", ")", "ptrue", "=", "self", ".", "cpt", "[", "event_values", "(", "event", ",", "self", ".", "parents", ")", "]", "return", "if_", "(", "value", ",", "ptrue", ",", "1", "-", "ptrue", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
Factor.pointwise_product
Multiply two factors, combining their variables.
aima/probability.py
def pointwise_product(self, other, bn): "Multiply two factors, combining their variables." vars = list(set(self.vars) | set(other.vars)) cpt = dict((event_values(e, vars), self.p(e) * other.p(e)) for e in all_events(vars, bn, {})) return Factor(vars, cpt)
def pointwise_product(self, other, bn): "Multiply two factors, combining their variables." vars = list(set(self.vars) | set(other.vars)) cpt = dict((event_values(e, vars), self.p(e) * other.p(e)) for e in all_events(vars, bn, {})) return Factor(vars, cpt)
[ "Multiply", "two", "factors", "combining", "their", "variables", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L338-L343
[ "def", "pointwise_product", "(", "self", ",", "other", ",", "bn", ")", ":", "vars", "=", "list", "(", "set", "(", "self", ".", "vars", ")", "|", "set", "(", "other", ".", "vars", ")", ")", "cpt", "=", "dict", "(", "(", "event_values", "(", "e", ",", "vars", ")", ",", "self", ".", "p", "(", "e", ")", "*", "other", ".", "p", "(", "e", ")", ")", "for", "e", "in", "all_events", "(", "vars", ",", "bn", ",", "{", "}", ")", ")", "return", "Factor", "(", "vars", ",", "cpt", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
Factor.sum_out
Make a factor eliminating var by summing over its values.
aima/probability.py
def sum_out(self, var, bn): "Make a factor eliminating var by summing over its values." vars = [X for X in self.vars if X != var] cpt = dict((event_values(e, vars), sum(self.p(extend(e, var, val)) for val in bn.variable_values(var))) for e in all_events(vars, bn, {})) return Factor(vars, cpt)
def sum_out(self, var, bn): "Make a factor eliminating var by summing over its values." vars = [X for X in self.vars if X != var] cpt = dict((event_values(e, vars), sum(self.p(extend(e, var, val)) for val in bn.variable_values(var))) for e in all_events(vars, bn, {})) return Factor(vars, cpt)
[ "Make", "a", "factor", "eliminating", "var", "by", "summing", "over", "its", "values", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L345-L352
[ "def", "sum_out", "(", "self", ",", "var", ",", "bn", ")", ":", "vars", "=", "[", "X", "for", "X", "in", "self", ".", "vars", "if", "X", "!=", "var", "]", "cpt", "=", "dict", "(", "(", "event_values", "(", "e", ",", "vars", ")", ",", "sum", "(", "self", ".", "p", "(", "extend", "(", "e", ",", "var", ",", "val", ")", ")", "for", "val", "in", "bn", ".", "variable_values", "(", "var", ")", ")", ")", "for", "e", "in", "all_events", "(", "vars", ",", "bn", ",", "{", "}", ")", ")", "return", "Factor", "(", "vars", ",", "cpt", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
Factor.normalize
Return my probabilities; must be down to one variable.
aima/probability.py
def normalize(self): "Return my probabilities; must be down to one variable." assert len(self.vars) == 1 return ProbDist(self.vars[0], dict((k, v) for ((k,), v) in self.cpt.items()))
def normalize(self): "Return my probabilities; must be down to one variable." assert len(self.vars) == 1 return ProbDist(self.vars[0], dict((k, v) for ((k,), v) in self.cpt.items()))
[ "Return", "my", "probabilities", ";", "must", "be", "down", "to", "one", "variable", "." ]
hobson/aima
python
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L354-L358
[ "def", "normalize", "(", "self", ")", ":", "assert", "len", "(", "self", ".", "vars", ")", "==", "1", "return", "ProbDist", "(", "self", ".", "vars", "[", "0", "]", ",", "dict", "(", "(", "k", ",", "v", ")", "for", "(", "(", "k", ",", ")", ",", "v", ")", "in", "self", ".", "cpt", ".", "items", "(", ")", ")", ")" ]
3572b2fb92039b4a1abe384be8545560fbd3d470
valid
BaseMultiLevelChunker.next_chunk_boundaries
Computes the next chunk boundaries within `buf`. See :meth:`.BaseChunker.next_chunk_boundaries`.
fastchunking/__init__.py
def next_chunk_boundaries(self, buf, prepend_bytes=0): """Computes the next chunk boundaries within `buf`. See :meth:`.BaseChunker.next_chunk_boundaries`. """ return (boundary for boundary, _ in self.next_chunk_boundaries_levels(buf, prepend_bytes))
def next_chunk_boundaries(self, buf, prepend_bytes=0): """Computes the next chunk boundaries within `buf`. See :meth:`.BaseChunker.next_chunk_boundaries`. """ return (boundary for boundary, _ in self.next_chunk_boundaries_levels(buf, prepend_bytes))
[ "Computes", "the", "next", "chunk", "boundaries", "within", "buf", "." ]
netleibi/fastchunking
python
https://github.com/netleibi/fastchunking/blob/069b7689d26bc067120907f01d9453ab3d2efa74/fastchunking/__init__.py#L73-L78
[ "def", "next_chunk_boundaries", "(", "self", ",", "buf", ",", "prepend_bytes", "=", "0", ")", ":", "return", "(", "boundary", "for", "boundary", ",", "_", "in", "self", ".", "next_chunk_boundaries_levels", "(", "buf", ",", "prepend_bytes", ")", ")" ]
069b7689d26bc067120907f01d9453ab3d2efa74
valid
DefaultMultiLevelChunker.next_chunk_boundaries_levels
Computes the next chunk boundaries within `buf`. Similar to :meth:`.next_chunk_boundaries`, but information about which chunker led to a respective boundary is included in the returned value. Args: buf (bytes): The message that is to be chunked. prepend_bytes (Optional[int]): Optional number of zero bytes that should be input to the chunking algorithm before `buf`. Returns: list: List of tuples (boundary, level), where boundary is a boundary position relative to `buf` and level is the index of the chunker (i.e., the index of its chunk size specified during instantiation) that yielded the boundary. If multiple chunkers yield the same boundary, it is returned only once, along with the highest matching chunker index.
fastchunking/__init__.py
def next_chunk_boundaries_levels(self, buf, prepend_bytes=0): """Computes the next chunk boundaries within `buf`. Similar to :meth:`.next_chunk_boundaries`, but information about which chunker led to a respective boundary is included in the returned value. Args: buf (bytes): The message that is to be chunked. prepend_bytes (Optional[int]): Optional number of zero bytes that should be input to the chunking algorithm before `buf`. Returns: list: List of tuples (boundary, level), where boundary is a boundary position relative to `buf` and level is the index of the chunker (i.e., the index of its chunk size specified during instantiation) that yielded the boundary. If multiple chunkers yield the same boundary, it is returned only once, along with the highest matching chunker index. """ boundaries = {} for level_index, chunker in enumerate(self._chunkers): boundaries.update( dict([(boundary, level_index) for boundary in chunker.next_chunk_boundaries(buf, prepend_bytes)])) return sorted(boundaries.items())
def next_chunk_boundaries_levels(self, buf, prepend_bytes=0): """Computes the next chunk boundaries within `buf`. Similar to :meth:`.next_chunk_boundaries`, but information about which chunker led to a respective boundary is included in the returned value. Args: buf (bytes): The message that is to be chunked. prepend_bytes (Optional[int]): Optional number of zero bytes that should be input to the chunking algorithm before `buf`. Returns: list: List of tuples (boundary, level), where boundary is a boundary position relative to `buf` and level is the index of the chunker (i.e., the index of its chunk size specified during instantiation) that yielded the boundary. If multiple chunkers yield the same boundary, it is returned only once, along with the highest matching chunker index. """ boundaries = {} for level_index, chunker in enumerate(self._chunkers): boundaries.update( dict([(boundary, level_index) for boundary in chunker.next_chunk_boundaries(buf, prepend_bytes)])) return sorted(boundaries.items())
[ "Computes", "the", "next", "chunk", "boundaries", "within", "buf", "." ]
netleibi/fastchunking
python
https://github.com/netleibi/fastchunking/blob/069b7689d26bc067120907f01d9453ab3d2efa74/fastchunking/__init__.py#L115-L138
[ "def", "next_chunk_boundaries_levels", "(", "self", ",", "buf", ",", "prepend_bytes", "=", "0", ")", ":", "boundaries", "=", "{", "}", "for", "level_index", ",", "chunker", "in", "enumerate", "(", "self", ".", "_chunkers", ")", ":", "boundaries", ".", "update", "(", "dict", "(", "[", "(", "boundary", ",", "level_index", ")", "for", "boundary", "in", "chunker", ".", "next_chunk_boundaries", "(", "buf", ",", "prepend_bytes", ")", "]", ")", ")", "return", "sorted", "(", "boundaries", ".", "items", "(", ")", ")" ]
069b7689d26bc067120907f01d9453ab3d2efa74
valid
RabinKarpCDC.create_chunker
Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a specific, expected chunk size. Args: chunk_size (int): (Expected) target chunk size. Returns: BaseChunker: A chunker object.
fastchunking/__init__.py
def create_chunker(self, chunk_size): """Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a specific, expected chunk size. Args: chunk_size (int): (Expected) target chunk size. Returns: BaseChunker: A chunker object. """ rolling_hash = _rabinkarprh.RabinKarpHash(self.window_size, self._seed) rolling_hash.set_threshold(1.0 / chunk_size) return RabinKarpCDC._Chunker(rolling_hash)
def create_chunker(self, chunk_size): """Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a specific, expected chunk size. Args: chunk_size (int): (Expected) target chunk size. Returns: BaseChunker: A chunker object. """ rolling_hash = _rabinkarprh.RabinKarpHash(self.window_size, self._seed) rolling_hash.set_threshold(1.0 / chunk_size) return RabinKarpCDC._Chunker(rolling_hash)
[ "Create", "a", "chunker", "performing", "content", "-", "defined", "chunking", "(", "CDC", ")", "using", "Rabin", "Karp", "s", "rolling", "hash", "scheme", "with", "a", "specific", "expected", "chunk", "size", "." ]
netleibi/fastchunking
python
https://github.com/netleibi/fastchunking/blob/069b7689d26bc067120907f01d9453ab3d2efa74/fastchunking/__init__.py#L200-L212
[ "def", "create_chunker", "(", "self", ",", "chunk_size", ")", ":", "rolling_hash", "=", "_rabinkarprh", ".", "RabinKarpHash", "(", "self", ".", "window_size", ",", "self", ".", "_seed", ")", "rolling_hash", ".", "set_threshold", "(", "1.0", "/", "chunk_size", ")", "return", "RabinKarpCDC", ".", "_Chunker", "(", "rolling_hash", ")" ]
069b7689d26bc067120907f01d9453ab3d2efa74
valid
RabinKarpCDC.create_multilevel_chunker
Create a multi-level chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with different specific, expected chunk sizes. Args: chunk_sizes (list): List of (expected) target chunk sizes. Warning: For performance reasons, behavior is only defined if chunk sizes are passed in order, i.e., from lowest to highest value. Returns: BaseMultiLevelChunker: A multi-level chunker object.
fastchunking/__init__.py
def create_multilevel_chunker(self, chunk_sizes): """Create a multi-level chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with different specific, expected chunk sizes. Args: chunk_sizes (list): List of (expected) target chunk sizes. Warning: For performance reasons, behavior is only defined if chunk sizes are passed in order, i.e., from lowest to highest value. Returns: BaseMultiLevelChunker: A multi-level chunker object. """ rolling_hash = _rabinkarprh.RabinKarpMultiThresholdHash(self.window_size, self._seed, [1.0 / chunk_size for chunk_size in chunk_sizes]) return RabinKarpCDC._MultiLevelChunker(rolling_hash)
def create_multilevel_chunker(self, chunk_sizes): """Create a multi-level chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with different specific, expected chunk sizes. Args: chunk_sizes (list): List of (expected) target chunk sizes. Warning: For performance reasons, behavior is only defined if chunk sizes are passed in order, i.e., from lowest to highest value. Returns: BaseMultiLevelChunker: A multi-level chunker object. """ rolling_hash = _rabinkarprh.RabinKarpMultiThresholdHash(self.window_size, self._seed, [1.0 / chunk_size for chunk_size in chunk_sizes]) return RabinKarpCDC._MultiLevelChunker(rolling_hash)
[ "Create", "a", "multi", "-", "level", "chunker", "performing", "content", "-", "defined", "chunking", "(", "CDC", ")", "using", "Rabin", "Karp", "s", "rolling", "hash", "scheme", "with", "different", "specific", "expected", "chunk", "sizes", "." ]
netleibi/fastchunking
python
https://github.com/netleibi/fastchunking/blob/069b7689d26bc067120907f01d9453ab3d2efa74/fastchunking/__init__.py#L214-L230
[ "def", "create_multilevel_chunker", "(", "self", ",", "chunk_sizes", ")", ":", "rolling_hash", "=", "_rabinkarprh", ".", "RabinKarpMultiThresholdHash", "(", "self", ".", "window_size", ",", "self", ".", "_seed", ",", "[", "1.0", "/", "chunk_size", "for", "chunk_size", "in", "chunk_sizes", "]", ")", "return", "RabinKarpCDC", ".", "_MultiLevelChunker", "(", "rolling_hash", ")" ]
069b7689d26bc067120907f01d9453ab3d2efa74
valid
brightness
Assumes level is out of 100
milight/rgbw.py
def brightness(level=100, group=0): """ Assumes level is out of 100 """ if level not in range(0,101): raise Exception("Brightness must be value between 0 and 100") b = int(floor(level / 4.0) + 2) #lights want values 2-27 return (COMMANDS['ON'][group], Command(0x4E, b))
def brightness(level=100, group=0): """ Assumes level is out of 100 """ if level not in range(0,101): raise Exception("Brightness must be value between 0 and 100") b = int(floor(level / 4.0) + 2) #lights want values 2-27 return (COMMANDS['ON'][group], Command(0x4E, b))
[ "Assumes", "level", "is", "out", "of", "100" ]
McSwindler/python-milight
python
https://github.com/McSwindler/python-milight/blob/4891b1d7d6a720901a27a64f7b0d0c208f0c291f/milight/rgbw.py#L57-L62
[ "def", "brightness", "(", "level", "=", "100", ",", "group", "=", "0", ")", ":", "if", "level", "not", "in", "range", "(", "0", ",", "101", ")", ":", "raise", "Exception", "(", "\"Brightness must be value between 0 and 100\"", ")", "b", "=", "int", "(", "floor", "(", "level", "/", "4.0", ")", "+", "2", ")", "#lights want values 2-27", "return", "(", "COMMANDS", "[", "'ON'", "]", "[", "group", "]", ",", "Command", "(", "0x4E", ",", "b", ")", ")" ]
4891b1d7d6a720901a27a64f7b0d0c208f0c291f
valid
strip_minidom_whitespace
Strips all whitespace from a minidom XML node and its children This operation is made in-place.
structlog_pretty/utils.py
def strip_minidom_whitespace(node): """Strips all whitespace from a minidom XML node and its children This operation is made in-place.""" for child in node.childNodes: if child.nodeType == Node.TEXT_NODE: if child.nodeValue: child.nodeValue = child.nodeValue.strip() elif child.nodeType == Node.ELEMENT_NODE: strip_minidom_whitespace(child)
def strip_minidom_whitespace(node): """Strips all whitespace from a minidom XML node and its children This operation is made in-place.""" for child in node.childNodes: if child.nodeType == Node.TEXT_NODE: if child.nodeValue: child.nodeValue = child.nodeValue.strip() elif child.nodeType == Node.ELEMENT_NODE: strip_minidom_whitespace(child)
[ "Strips", "all", "whitespace", "from", "a", "minidom", "XML", "node", "and", "its", "children" ]
underyx/structlog-pretty
python
https://github.com/underyx/structlog-pretty/blob/e87e1ce582b94b21e1b65b1c326d4edf87f8bef3/structlog_pretty/utils.py#L4-L13
[ "def", "strip_minidom_whitespace", "(", "node", ")", ":", "for", "child", "in", "node", ".", "childNodes", ":", "if", "child", ".", "nodeType", "==", "Node", ".", "TEXT_NODE", ":", "if", "child", ".", "nodeValue", ":", "child", ".", "nodeValue", "=", "child", ".", "nodeValue", ".", "strip", "(", ")", "elif", "child", ".", "nodeType", "==", "Node", ".", "ELEMENT_NODE", ":", "strip_minidom_whitespace", "(", "child", ")" ]
e87e1ce582b94b21e1b65b1c326d4edf87f8bef3
valid
brightness
Assumes level is out of 100
milight/rgb.py
def brightness(level=100, group=0): """ Assumes level is out of 100 """ if level not in range(0,101): raise Exception("Brightness must be value between 0 and 100") b = int(floor(level / 10.0)) #lights have 10 levels of brightness commands = list(darkest(group)) for i in range(0, b): commands.append(COMMANDS['BRIGHTER']) return tuple(commands)
def brightness(level=100, group=0): """ Assumes level is out of 100 """ if level not in range(0,101): raise Exception("Brightness must be value between 0 and 100") b = int(floor(level / 10.0)) #lights have 10 levels of brightness commands = list(darkest(group)) for i in range(0, b): commands.append(COMMANDS['BRIGHTER']) return tuple(commands)
[ "Assumes", "level", "is", "out", "of", "100" ]
McSwindler/python-milight
python
https://github.com/McSwindler/python-milight/blob/4891b1d7d6a720901a27a64f7b0d0c208f0c291f/milight/rgb.py#L63-L71
[ "def", "brightness", "(", "level", "=", "100", ",", "group", "=", "0", ")", ":", "if", "level", "not", "in", "range", "(", "0", ",", "101", ")", ":", "raise", "Exception", "(", "\"Brightness must be value between 0 and 100\"", ")", "b", "=", "int", "(", "floor", "(", "level", "/", "10.0", ")", ")", "#lights have 10 levels of brightness", "commands", "=", "list", "(", "darkest", "(", "group", ")", ")", "for", "i", "in", "range", "(", "0", ",", "b", ")", ":", "commands", ".", "append", "(", "COMMANDS", "[", "'BRIGHTER'", "]", ")", "return", "tuple", "(", "commands", ")" ]
4891b1d7d6a720901a27a64f7b0d0c208f0c291f
valid
warmness
Assumes level is out of 100
milight/white.py
def warmness(level=100, group=0): """ Assumes level is out of 100 """ if level not in range(0,101): raise Exception("Warmness must be value between 0 and 100") b = int(floor(level / 10.0)) #lights have 10 levels of warmness commands = list(coolest(group)) for i in range(0, b): commands.append(COMMANDS['WARMER']) return tuple(commands)
def warmness(level=100, group=0): """ Assumes level is out of 100 """ if level not in range(0,101): raise Exception("Warmness must be value between 0 and 100") b = int(floor(level / 10.0)) #lights have 10 levels of warmness commands = list(coolest(group)) for i in range(0, b): commands.append(COMMANDS['WARMER']) return tuple(commands)
[ "Assumes", "level", "is", "out", "of", "100" ]
McSwindler/python-milight
python
https://github.com/McSwindler/python-milight/blob/4891b1d7d6a720901a27a64f7b0d0c208f0c291f/milight/white.py#L87-L95
[ "def", "warmness", "(", "level", "=", "100", ",", "group", "=", "0", ")", ":", "if", "level", "not", "in", "range", "(", "0", ",", "101", ")", ":", "raise", "Exception", "(", "\"Warmness must be value between 0 and 100\"", ")", "b", "=", "int", "(", "floor", "(", "level", "/", "10.0", ")", ")", "#lights have 10 levels of warmness", "commands", "=", "list", "(", "coolest", "(", "group", ")", ")", "for", "i", "in", "range", "(", "0", ",", "b", ")", ":", "commands", ".", "append", "(", "COMMANDS", "[", "'WARMER'", "]", ")", "return", "tuple", "(", "commands", ")" ]
4891b1d7d6a720901a27a64f7b0d0c208f0c291f
valid
color_from_hls
Takes a hls color and converts to proper hue Bulbs use a BGR order instead of RGB
milight/__init__.py
def color_from_hls(hue, light, sat): """ Takes a hls color and converts to proper hue Bulbs use a BGR order instead of RGB """ if light > 0.95: #too bright, let's just switch to white return 256 elif light < 0.05: #too dark, let's shut it off return -1 else: hue = (-hue + 1 + 2.0/3.0) % 1 # invert and translate by 2/3 return int(floor(hue * 256))
def color_from_hls(hue, light, sat): """ Takes a hls color and converts to proper hue Bulbs use a BGR order instead of RGB """ if light > 0.95: #too bright, let's just switch to white return 256 elif light < 0.05: #too dark, let's shut it off return -1 else: hue = (-hue + 1 + 2.0/3.0) % 1 # invert and translate by 2/3 return int(floor(hue * 256))
[ "Takes", "a", "hls", "color", "and", "converts", "to", "proper", "hue", "Bulbs", "use", "a", "BGR", "order", "instead", "of", "RGB" ]
McSwindler/python-milight
python
https://github.com/McSwindler/python-milight/blob/4891b1d7d6a720901a27a64f7b0d0c208f0c291f/milight/__init__.py#L7-L16
[ "def", "color_from_hls", "(", "hue", ",", "light", ",", "sat", ")", ":", "if", "light", ">", "0.95", ":", "#too bright, let's just switch to white", "return", "256", "elif", "light", "<", "0.05", ":", "#too dark, let's shut it off", "return", "-", "1", "else", ":", "hue", "=", "(", "-", "hue", "+", "1", "+", "2.0", "/", "3.0", ")", "%", "1", "# invert and translate by 2/3", "return", "int", "(", "floor", "(", "hue", "*", "256", ")", ")" ]
4891b1d7d6a720901a27a64f7b0d0c208f0c291f
valid
color_from_rgb
Takes your standard rgb color and converts it to a proper hue value
milight/__init__.py
def color_from_rgb(red, green, blue): """ Takes your standard rgb color and converts it to a proper hue value """ r = min(red, 255) g = min(green, 255) b = min(blue, 255) if r > 1 or g > 1 or b > 1: r = r / 255.0 g = g / 255.0 b = b / 255.0 return color_from_hls(*rgb_to_hls(r,g,b))
def color_from_rgb(red, green, blue): """ Takes your standard rgb color and converts it to a proper hue value """ r = min(red, 255) g = min(green, 255) b = min(blue, 255) if r > 1 or g > 1 or b > 1: r = r / 255.0 g = g / 255.0 b = b / 255.0 return color_from_hls(*rgb_to_hls(r,g,b))
[ "Takes", "your", "standard", "rgb", "color", "and", "converts", "it", "to", "a", "proper", "hue", "value" ]
McSwindler/python-milight
python
https://github.com/McSwindler/python-milight/blob/4891b1d7d6a720901a27a64f7b0d0c208f0c291f/milight/__init__.py#L18-L30
[ "def", "color_from_rgb", "(", "red", ",", "green", ",", "blue", ")", ":", "r", "=", "min", "(", "red", ",", "255", ")", "g", "=", "min", "(", "green", ",", "255", ")", "b", "=", "min", "(", "blue", ",", "255", ")", "if", "r", ">", "1", "or", "g", ">", "1", "or", "b", ">", "1", ":", "r", "=", "r", "/", "255.0", "g", "=", "g", "/", "255.0", "b", "=", "b", "/", "255.0", "return", "color_from_hls", "(", "*", "rgb_to_hls", "(", "r", ",", "g", ",", "b", ")", ")" ]
4891b1d7d6a720901a27a64f7b0d0c208f0c291f
valid
color_from_hex
Takes an HTML hex code and converts it to a proper hue value
milight/__init__.py
def color_from_hex(value): """ Takes an HTML hex code and converts it to a proper hue value """ if "#" in value: value = value[1:] try: unhexed = bytes.fromhex(value) except: unhexed = binascii.unhexlify(value) # Fallback for 2.7 compatibility return color_from_rgb(*struct.unpack('BBB',unhexed))
def color_from_hex(value): """ Takes an HTML hex code and converts it to a proper hue value """ if "#" in value: value = value[1:] try: unhexed = bytes.fromhex(value) except: unhexed = binascii.unhexlify(value) # Fallback for 2.7 compatibility return color_from_rgb(*struct.unpack('BBB',unhexed))
[ "Takes", "an", "HTML", "hex", "code", "and", "converts", "it", "to", "a", "proper", "hue", "value" ]
McSwindler/python-milight
python
https://github.com/McSwindler/python-milight/blob/4891b1d7d6a720901a27a64f7b0d0c208f0c291f/milight/__init__.py#L32-L42
[ "def", "color_from_hex", "(", "value", ")", ":", "if", "\"#\"", "in", "value", ":", "value", "=", "value", "[", "1", ":", "]", "try", ":", "unhexed", "=", "bytes", ".", "fromhex", "(", "value", ")", "except", ":", "unhexed", "=", "binascii", ".", "unhexlify", "(", "value", ")", "# Fallback for 2.7 compatibility", "return", "color_from_rgb", "(", "*", "struct", ".", "unpack", "(", "'BBB'", ",", "unhexed", ")", ")" ]
4891b1d7d6a720901a27a64f7b0d0c208f0c291f
valid
LightBulb.wait
Wait for x seconds each wait command is 100ms
milight/__init__.py
def wait(self, sec=0.1): """ Wait for x seconds each wait command is 100ms """ sec = max(sec, 0) reps = int(floor(sec / 0.1)) commands = [] for i in range(0, reps): commands.append(Command(0x00, wait=True)) return tuple(commands)
def wait(self, sec=0.1): """ Wait for x seconds each wait command is 100ms """ sec = max(sec, 0) reps = int(floor(sec / 0.1)) commands = [] for i in range(0, reps): commands.append(Command(0x00, wait=True)) return tuple(commands)
[ "Wait", "for", "x", "seconds", "each", "wait", "command", "is", "100ms" ]
McSwindler/python-milight
python
https://github.com/McSwindler/python-milight/blob/4891b1d7d6a720901a27a64f7b0d0c208f0c291f/milight/__init__.py#L181-L189
[ "def", "wait", "(", "self", ",", "sec", "=", "0.1", ")", ":", "sec", "=", "max", "(", "sec", ",", "0", ")", "reps", "=", "int", "(", "floor", "(", "sec", "/", "0.1", ")", ")", "commands", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "reps", ")", ":", "commands", ".", "append", "(", "Command", "(", "0x00", ",", "wait", "=", "True", ")", ")", "return", "tuple", "(", "commands", ")" ]
4891b1d7d6a720901a27a64f7b0d0c208f0c291f
valid
toc
ex1) tic() # save start time - time1 toc() # print elapsed time from last calling tic() toc() # print elapsed time from last calling tic() ex2) t0 = tic() # simple t1 = tic() toc(t1) # print time from t1 toc(t0) # print time from t0 :param t: time: 시작 시간 (tic()의 리턴 값) :param name: str: 출력시 포함할 문자 ['tictoc']
snipy/tictoc.py
def toc(t=None, name='tictoc'): """ ex1) tic() # save start time - time1 toc() # print elapsed time from last calling tic() toc() # print elapsed time from last calling tic() ex2) t0 = tic() # simple t1 = tic() toc(t1) # print time from t1 toc(t0) # print time from t0 :param t: time: 시작 시간 (tic()의 리턴 값) :param name: str: 출력시 포함할 문자 ['tictoc'] """ try: t = t or tic.last_tic_time except AttributeError: # tic()부터 콜하세요 logg.warn('calling tic() need to use toc()') return elapsed = time.time() - t logg.info('%s Elapsed: %s secs' % (name, elapsed)) return elapsed
def toc(t=None, name='tictoc'): """ ex1) tic() # save start time - time1 toc() # print elapsed time from last calling tic() toc() # print elapsed time from last calling tic() ex2) t0 = tic() # simple t1 = tic() toc(t1) # print time from t1 toc(t0) # print time from t0 :param t: time: 시작 시간 (tic()의 리턴 값) :param name: str: 출력시 포함할 문자 ['tictoc'] """ try: t = t or tic.last_tic_time except AttributeError: # tic()부터 콜하세요 logg.warn('calling tic() need to use toc()') return elapsed = time.time() - t logg.info('%s Elapsed: %s secs' % (name, elapsed)) return elapsed
[ "ex1", ")", "tic", "()", "#", "save", "start", "time", "-", "time1", "toc", "()", "#", "print", "elapsed", "time", "from", "last", "calling", "tic", "()", "toc", "()", "#", "print", "elapsed", "time", "from", "last", "calling", "tic", "()" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/tictoc.py#L29-L53
[ "def", "toc", "(", "t", "=", "None", ",", "name", "=", "'tictoc'", ")", ":", "try", ":", "t", "=", "t", "or", "tic", ".", "last_tic_time", "except", "AttributeError", ":", "# tic()부터 콜하세요", "logg", ".", "warn", "(", "'calling tic() need to use toc()'", ")", "return", "elapsed", "=", "time", ".", "time", "(", ")", "-", "t", "logg", ".", "info", "(", "'%s Elapsed: %s secs'", "%", "(", "name", ",", "elapsed", ")", ")", "return", "elapsed" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
tictoc
with tictoc('any string or not'): print 'cool~~~' cool~~~ 2015-12-30 14:39:28,458 [INFO] tictoc Elapsed: 7.10487365723e-05 secs :param name: str
snipy/tictoc.py
def tictoc(name='tictoc'): """ with tictoc('any string or not'): print 'cool~~~' cool~~~ 2015-12-30 14:39:28,458 [INFO] tictoc Elapsed: 7.10487365723e-05 secs :param name: str """ t = time.time() yield logg.info('%s Elapsed: %s secs' % (name, time.time() - t))
def tictoc(name='tictoc'): """ with tictoc('any string or not'): print 'cool~~~' cool~~~ 2015-12-30 14:39:28,458 [INFO] tictoc Elapsed: 7.10487365723e-05 secs :param name: str """ t = time.time() yield logg.info('%s Elapsed: %s secs' % (name, time.time() - t))
[ "with", "tictoc", "(", "any", "string", "or", "not", ")", ":", "print", "cool~~~", "cool~~~", "2015", "-", "12", "-", "30", "14", ":", "39", ":", "28", "458", "[", "INFO", "]", "tictoc", "Elapsed", ":", "7", ".", "10487365723e", "-", "05", "secs", ":", "param", "name", ":", "str" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/tictoc.py#L57-L67
[ "def", "tictoc", "(", "name", "=", "'tictoc'", ")", ":", "t", "=", "time", ".", "time", "(", ")", "yield", "logg", ".", "info", "(", "'%s Elapsed: %s secs'", "%", "(", "name", ",", "time", ".", "time", "(", ")", "-", "t", ")", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
split_rand
data(1-ratio), data(with ratio) = split_rand(data_or_size, ratio, seed) :param data_or_size: data or count :param ratio: :param seed: :return:
snipy/train/crossvalidation_set.py
def split_rand(data_or_size, ratio, seed): """ data(1-ratio), data(with ratio) = split_rand(data_or_size, ratio, seed) :param data_or_size: data or count :param ratio: :param seed: :return: """ if not isinstance(data_or_size, int): sz = len(data_or_size) data = np.asarray(data_or_size) else: sz = data_or_size data = np.arange(sz) if not ratio: return data, [] i = np.zeros(sz, dtype='bool') lattersz = int(sz * ratio) i[:lattersz] = True with np_seed(seed): np.random.shuffle(i) return data[~i], data[i]
def split_rand(data_or_size, ratio, seed): """ data(1-ratio), data(with ratio) = split_rand(data_or_size, ratio, seed) :param data_or_size: data or count :param ratio: :param seed: :return: """ if not isinstance(data_or_size, int): sz = len(data_or_size) data = np.asarray(data_or_size) else: sz = data_or_size data = np.arange(sz) if not ratio: return data, [] i = np.zeros(sz, dtype='bool') lattersz = int(sz * ratio) i[:lattersz] = True with np_seed(seed): np.random.shuffle(i) return data[~i], data[i]
[ "data", "(", "1", "-", "ratio", ")", "data", "(", "with", "ratio", ")", "=", "split_rand", "(", "data_or_size", "ratio", "seed", ")", ":", "param", "data_or_size", ":", "data", "or", "count", ":", "param", "ratio", ":", ":", "param", "seed", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/train/crossvalidation_set.py#L6-L30
[ "def", "split_rand", "(", "data_or_size", ",", "ratio", ",", "seed", ")", ":", "if", "not", "isinstance", "(", "data_or_size", ",", "int", ")", ":", "sz", "=", "len", "(", "data_or_size", ")", "data", "=", "np", ".", "asarray", "(", "data_or_size", ")", "else", ":", "sz", "=", "data_or_size", "data", "=", "np", ".", "arange", "(", "sz", ")", "if", "not", "ratio", ":", "return", "data", ",", "[", "]", "i", "=", "np", ".", "zeros", "(", "sz", ",", "dtype", "=", "'bool'", ")", "lattersz", "=", "int", "(", "sz", "*", "ratio", ")", "i", "[", ":", "lattersz", "]", "=", "True", "with", "np_seed", "(", "seed", ")", ":", "np", ".", "random", ".", "shuffle", "(", "i", ")", "return", "data", "[", "~", "i", "]", ",", "data", "[", "i", "]" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
kfolds
return train, valid [,test] testset if p_testset :param n: :param k: :param sz: :param p_testset: :param seed: :return:
snipy/train/crossvalidation_set.py
def kfolds(n, k, sz, p_testset=None, seed=7238): """ return train, valid [,test] testset if p_testset :param n: :param k: :param sz: :param p_testset: :param seed: :return: """ trains, tests = split_rand(sz, p_testset, seed) ntrain = len(trains) # np.random.seed(seed) with np_seed(seed): np.random.shuffle(trains) if n == k: # no split train, valid = trains, trains else: foldsz = ntrain // k itrain = np.arange(ntrain) // foldsz != n train = trains[itrain] valid = trains[~itrain] if not p_testset: return train, valid else: return train, valid, tests
def kfolds(n, k, sz, p_testset=None, seed=7238): """ return train, valid [,test] testset if p_testset :param n: :param k: :param sz: :param p_testset: :param seed: :return: """ trains, tests = split_rand(sz, p_testset, seed) ntrain = len(trains) # np.random.seed(seed) with np_seed(seed): np.random.shuffle(trains) if n == k: # no split train, valid = trains, trains else: foldsz = ntrain // k itrain = np.arange(ntrain) // foldsz != n train = trains[itrain] valid = trains[~itrain] if not p_testset: return train, valid else: return train, valid, tests
[ "return", "train", "valid", "[", "test", "]", "testset", "if", "p_testset", ":", "param", "n", ":", ":", "param", "k", ":", ":", "param", "sz", ":", ":", "param", "p_testset", ":", ":", "param", "seed", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/train/crossvalidation_set.py#L45-L76
[ "def", "kfolds", "(", "n", ",", "k", ",", "sz", ",", "p_testset", "=", "None", ",", "seed", "=", "7238", ")", ":", "trains", ",", "tests", "=", "split_rand", "(", "sz", ",", "p_testset", ",", "seed", ")", "ntrain", "=", "len", "(", "trains", ")", "# np.random.seed(seed)", "with", "np_seed", "(", "seed", ")", ":", "np", ".", "random", ".", "shuffle", "(", "trains", ")", "if", "n", "==", "k", ":", "# no split", "train", ",", "valid", "=", "trains", ",", "trains", "else", ":", "foldsz", "=", "ntrain", "//", "k", "itrain", "=", "np", ".", "arange", "(", "ntrain", ")", "//", "foldsz", "!=", "n", "train", "=", "trains", "[", "itrain", "]", "valid", "=", "trains", "[", "~", "itrain", "]", "if", "not", "p_testset", ":", "return", "train", ",", "valid", "else", ":", "return", "train", ",", "valid", ",", "tests" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
date_proc
An decorator checking whether date parameter is passing in or not. If not, default date value is all PTT data. Else, return PTT data with right date. Args: func: function you want to decorate. request: WSGI request parameter getten from django. Returns: date: a datetime variable, you can only give year, year + month or year + month + day, three type. The missing part would be assigned default value 1 (for month is Jan, for day is 1).
djangoApiDec/djangoApiDec.py
def date_proc(func): """ An decorator checking whether date parameter is passing in or not. If not, default date value is all PTT data. Else, return PTT data with right date. Args: func: function you want to decorate. request: WSGI request parameter getten from django. Returns: date: a datetime variable, you can only give year, year + month or year + month + day, three type. The missing part would be assigned default value 1 (for month is Jan, for day is 1). """ @wraps(func) def wrapped(request, *args, **kwargs): if 'date' in request.GET and request.GET['date'] == '': raise Http404("api does not exist") elif 'date' not in request.GET: date = datetime.today() return func(request, date) else: date = tuple(int(intValue) for intValue in request.GET['date'].split('-')) if len(date) == 3: date = datetime(*date) elif len(date) == 2: date = datetime(*date, day = 1) else: date = datetime(*date, month = 1, day = 1) return func(request, date) return wrapped
def date_proc(func): """ An decorator checking whether date parameter is passing in or not. If not, default date value is all PTT data. Else, return PTT data with right date. Args: func: function you want to decorate. request: WSGI request parameter getten from django. Returns: date: a datetime variable, you can only give year, year + month or year + month + day, three type. The missing part would be assigned default value 1 (for month is Jan, for day is 1). """ @wraps(func) def wrapped(request, *args, **kwargs): if 'date' in request.GET and request.GET['date'] == '': raise Http404("api does not exist") elif 'date' not in request.GET: date = datetime.today() return func(request, date) else: date = tuple(int(intValue) for intValue in request.GET['date'].split('-')) if len(date) == 3: date = datetime(*date) elif len(date) == 2: date = datetime(*date, day = 1) else: date = datetime(*date, month = 1, day = 1) return func(request, date) return wrapped
[ "An", "decorator", "checking", "whether", "date", "parameter", "is", "passing", "in", "or", "not", ".", "If", "not", "default", "date", "value", "is", "all", "PTT", "data", ".", "Else", "return", "PTT", "data", "with", "right", "date", ".", "Args", ":", "func", ":", "function", "you", "want", "to", "decorate", ".", "request", ":", "WSGI", "request", "parameter", "getten", "from", "django", "." ]
Stufinite/djangoApiDec
python
https://github.com/Stufinite/djangoApiDec/blob/8b2d5776b3413b1b850df12a92f30526c05c0a46/djangoApiDec/djangoApiDec.py#L8-L36
[ "def", "date_proc", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'date'", "in", "request", ".", "GET", "and", "request", ".", "GET", "[", "'date'", "]", "==", "''", ":", "raise", "Http404", "(", "\"api does not exist\"", ")", "elif", "'date'", "not", "in", "request", ".", "GET", ":", "date", "=", "datetime", ".", "today", "(", ")", "return", "func", "(", "request", ",", "date", ")", "else", ":", "date", "=", "tuple", "(", "int", "(", "intValue", ")", "for", "intValue", "in", "request", ".", "GET", "[", "'date'", "]", ".", "split", "(", "'-'", ")", ")", "if", "len", "(", "date", ")", "==", "3", ":", "date", "=", "datetime", "(", "*", "date", ")", "elif", "len", "(", "date", ")", "==", "2", ":", "date", "=", "datetime", "(", "*", "date", ",", "day", "=", "1", ")", "else", ":", "date", "=", "datetime", "(", "*", "date", ",", "month", "=", "1", ",", "day", "=", "1", ")", "return", "func", "(", "request", ",", "date", ")", "return", "wrapped" ]
8b2d5776b3413b1b850df12a92f30526c05c0a46
valid
queryString_required
An decorator checking whether queryString key is valid or not Args: str: allowed queryString key Returns: if contains invalid queryString key, it will raise exception.
djangoApiDec/djangoApiDec.py
def queryString_required(strList): """ An decorator checking whether queryString key is valid or not Args: str: allowed queryString key Returns: if contains invalid queryString key, it will raise exception. """ def _dec(function): @wraps(function) def _wrap(request, *args, **kwargs): for i in strList: if i not in request.GET: raise Http404("api does not exist") return function(request, *args, **kwargs) return _wrap return _dec
def queryString_required(strList): """ An decorator checking whether queryString key is valid or not Args: str: allowed queryString key Returns: if contains invalid queryString key, it will raise exception. """ def _dec(function): @wraps(function) def _wrap(request, *args, **kwargs): for i in strList: if i not in request.GET: raise Http404("api does not exist") return function(request, *args, **kwargs) return _wrap return _dec
[ "An", "decorator", "checking", "whether", "queryString", "key", "is", "valid", "or", "not", "Args", ":", "str", ":", "allowed", "queryString", "key" ]
Stufinite/djangoApiDec
python
https://github.com/Stufinite/djangoApiDec/blob/8b2d5776b3413b1b850df12a92f30526c05c0a46/djangoApiDec/djangoApiDec.py#L38-L54
[ "def", "queryString_required", "(", "strList", ")", ":", "def", "_dec", "(", "function", ")", ":", "@", "wraps", "(", "function", ")", "def", "_wrap", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "i", "in", "strList", ":", "if", "i", "not", "in", "request", ".", "GET", ":", "raise", "Http404", "(", "\"api does not exist\"", ")", "return", "function", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrap", "return", "_dec" ]
8b2d5776b3413b1b850df12a92f30526c05c0a46
valid
queryString_required_ClassVersion
An decorator checking whether queryString key is valid or not Args: str: allowed queryString key Returns: if contains invalid queryString key, it will raise exception.
djangoApiDec/djangoApiDec.py
def queryString_required_ClassVersion(strList): """ An decorator checking whether queryString key is valid or not Args: str: allowed queryString key Returns: if contains invalid queryString key, it will raise exception. """ def _dec(function): @wraps(function) def _wrap(classInstance, request, *args, **kwargs): for i in strList: if i not in request.GET: raise Http404("api does not exist") return function(classInstance, request, *args, **kwargs) return _wrap return _dec
def queryString_required_ClassVersion(strList): """ An decorator checking whether queryString key is valid or not Args: str: allowed queryString key Returns: if contains invalid queryString key, it will raise exception. """ def _dec(function): @wraps(function) def _wrap(classInstance, request, *args, **kwargs): for i in strList: if i not in request.GET: raise Http404("api does not exist") return function(classInstance, request, *args, **kwargs) return _wrap return _dec
[ "An", "decorator", "checking", "whether", "queryString", "key", "is", "valid", "or", "not", "Args", ":", "str", ":", "allowed", "queryString", "key" ]
Stufinite/djangoApiDec
python
https://github.com/Stufinite/djangoApiDec/blob/8b2d5776b3413b1b850df12a92f30526c05c0a46/djangoApiDec/djangoApiDec.py#L56-L72
[ "def", "queryString_required_ClassVersion", "(", "strList", ")", ":", "def", "_dec", "(", "function", ")", ":", "@", "wraps", "(", "function", ")", "def", "_wrap", "(", "classInstance", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "i", "in", "strList", ":", "if", "i", "not", "in", "request", ".", "GET", ":", "raise", "Http404", "(", "\"api does not exist\"", ")", "return", "function", "(", "classInstance", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrap", "return", "_dec" ]
8b2d5776b3413b1b850df12a92f30526c05c0a46
valid
getJsonFromApi
Return json from querying Web Api Args: view: django view function. request: http request object got from django. Returns: json format dictionary
djangoApiDec/djangoApiDec.py
def getJsonFromApi(view, request): """Return json from querying Web Api Args: view: django view function. request: http request object got from django. Returns: json format dictionary """ jsonText = view(request) jsonText = json.loads(jsonText.content.decode('utf-8')) return jsonText
def getJsonFromApi(view, request): """Return json from querying Web Api Args: view: django view function. request: http request object got from django. Returns: json format dictionary """ jsonText = view(request) jsonText = json.loads(jsonText.content.decode('utf-8')) return jsonText
[ "Return", "json", "from", "querying", "Web", "Api" ]
Stufinite/djangoApiDec
python
https://github.com/Stufinite/djangoApiDec/blob/8b2d5776b3413b1b850df12a92f30526c05c0a46/djangoApiDec/djangoApiDec.py#L101-L112
[ "def", "getJsonFromApi", "(", "view", ",", "request", ")", ":", "jsonText", "=", "view", "(", "request", ")", "jsonText", "=", "json", ".", "loads", "(", "jsonText", ".", "content", ".", "decode", "(", "'utf-8'", ")", ")", "return", "jsonText" ]
8b2d5776b3413b1b850df12a92f30526c05c0a46
valid
progress
프로그래스 bar for i in progress(10): print i for i in progress(iter): print i
snipy/progress.py
def progress(iter, **kwargs): """ 프로그래스 bar for i in progress(10): print i for i in progress(iter): print i """ if isinstance(iter, int): iter = xrange(iter) if hasattr(iter, '__len__') or 'target' in kwargs: cls = Progress else: cls = ProgressBase return cls(iter, **kwargs)
def progress(iter, **kwargs): """ 프로그래스 bar for i in progress(10): print i for i in progress(iter): print i """ if isinstance(iter, int): iter = xrange(iter) if hasattr(iter, '__len__') or 'target' in kwargs: cls = Progress else: cls = ProgressBase return cls(iter, **kwargs)
[ "프로그래스", "bar", "for", "i", "in", "progress", "(", "10", ")", ":", "print", "i" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/progress.py#L180-L196
[ "def", "progress", "(", "iter", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "iter", ",", "int", ")", ":", "iter", "=", "xrange", "(", "iter", ")", "if", "hasattr", "(", "iter", ",", "'__len__'", ")", "or", "'target'", "in", "kwargs", ":", "cls", "=", "Progress", "else", ":", "cls", "=", "ProgressBase", "return", "cls", "(", "iter", ",", "*", "*", "kwargs", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
threaded
function decorator
snipy/concurrent.py
def threaded(f, *args, **kwargs): """function decorator """ if args or kwargs: return Threaded(f, *args, **kwargs) @wraps(f) def wrapped(*wargs, **wkwargs): return Threaded(f, *wargs, **wkwargs) return wrapped
def threaded(f, *args, **kwargs): """function decorator """ if args or kwargs: return Threaded(f, *args, **kwargs) @wraps(f) def wrapped(*wargs, **wkwargs): return Threaded(f, *wargs, **wkwargs) return wrapped
[ "function", "decorator" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/concurrent.py#L84-L94
[ "def", "threaded", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", "or", "kwargs", ":", "return", "Threaded", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", "@", "wraps", "(", "f", ")", "def", "wrapped", "(", "*", "wargs", ",", "*", "*", "wkwargs", ")", ":", "return", "Threaded", "(", "f", ",", "*", "wargs", ",", "*", "*", "wkwargs", ")", "return", "wrapped" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
spawn
decorator
snipy/concurrent.py
def spawn(f, *args, **kwargs): """decorator """ if args or kwargs: return Spawn(f, *args, **kwargs) @wraps(f) def wrapped(*args, **kwargs): return Spawn(f, *args, **kwargs) return wrapped
def spawn(f, *args, **kwargs): """decorator """ if args or kwargs: return Spawn(f, *args, **kwargs) @wraps(f) def wrapped(*args, **kwargs): return Spawn(f, *args, **kwargs) return wrapped
[ "decorator" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/concurrent.py#L97-L107
[ "def", "spawn", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", "or", "kwargs", ":", "return", "Spawn", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", "@", "wraps", "(", "f", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "Spawn", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
ODict.intersect
self와 other 키가 동일한 아이템의 dictobj :type other: dict :rtype: dictobj:
snipy/odict.py
def intersect(self, other): """ self와 other 키가 동일한 아이템의 dictobj :type other: dict :rtype: dictobj: """ return ODict((k, self[k]) for k in self if k in other)
def intersect(self, other): """ self와 other 키가 동일한 아이템의 dictobj :type other: dict :rtype: dictobj: """ return ODict((k, self[k]) for k in self if k in other)
[ "self와", "other", "키가", "동일한", "아이템의", "dictobj", ":", "type", "other", ":", "dict", ":", "rtype", ":", "dictobj", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/odict.py#L80-L86
[ "def", "intersect", "(", "self", ",", "other", ")", ":", "return", "ODict", "(", "(", "k", ",", "self", "[", "k", "]", ")", "for", "k", "in", "self", "if", "k", "in", "other", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
ODict.from_dict
recursive dict to dictobj 컨버트 :param dic: :return:
snipy/odict.py
def from_dict(dic): """ recursive dict to dictobj 컨버트 :param dic: :return: """ return ODict((k, ODict.convert_ifdic(v)) for k, v in dic.items())
def from_dict(dic): """ recursive dict to dictobj 컨버트 :param dic: :return: """ return ODict((k, ODict.convert_ifdic(v)) for k, v in dic.items())
[ "recursive", "dict", "to", "dictobj", "컨버트", ":", "param", "dic", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/odict.py#L89-L95
[ "def", "from_dict", "(", "dic", ")", ":", "return", "ODict", "(", "(", "k", ",", "ODict", ".", "convert_ifdic", "(", "v", ")", ")", "for", "k", ",", "v", "in", "dic", ".", "items", "(", ")", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
plots
simple wrapper plot with labels and skip x :param yonly_or_xy: :param kwargs: :return:
snipy/plt/ploting.py
def plots(data, **kwargs): """ simple wrapper plot with labels and skip x :param yonly_or_xy: :param kwargs: :return: """ labels = kwargs.pop('labels', '') loc = kwargs.pop('loc', 1) # if len(yonly_or_xy) == 1: # x = range(len(yonly_or_xy)) # y = yonly_or_xy # else: # x = yonly_or_xy[0] # y = yonly_or_xy[1:] lines = plt.plot(np.asarray(data).T, **kwargs) if labels: plt.legend(lines, labels, loc=loc) return lines
def plots(data, **kwargs): """ simple wrapper plot with labels and skip x :param yonly_or_xy: :param kwargs: :return: """ labels = kwargs.pop('labels', '') loc = kwargs.pop('loc', 1) # if len(yonly_or_xy) == 1: # x = range(len(yonly_or_xy)) # y = yonly_or_xy # else: # x = yonly_or_xy[0] # y = yonly_or_xy[1:] lines = plt.plot(np.asarray(data).T, **kwargs) if labels: plt.legend(lines, labels, loc=loc) return lines
[ "simple", "wrapper", "plot", "with", "labels", "and", "skip", "x", ":", "param", "yonly_or_xy", ":", ":", "param", "kwargs", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L17-L37
[ "def", "plots", "(", "data", ",", "*", "*", "kwargs", ")", ":", "labels", "=", "kwargs", ".", "pop", "(", "'labels'", ",", "''", ")", "loc", "=", "kwargs", ".", "pop", "(", "'loc'", ",", "1", ")", "# if len(yonly_or_xy) == 1:", "# x = range(len(yonly_or_xy))", "# y = yonly_or_xy", "# else:", "# x = yonly_or_xy[0]", "# y = yonly_or_xy[1:]", "lines", "=", "plt", ".", "plot", "(", "np", ".", "asarray", "(", "data", ")", ".", "T", ",", "*", "*", "kwargs", ")", "if", "labels", ":", "plt", ".", "legend", "(", "lines", ",", "labels", ",", "loc", "=", "loc", ")", "return", "lines" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
imshow_grid
:param images: nhwc :return:
snipy/plt/ploting.py
def imshow_grid(images, grid=None, showfun=None, **opt): """ :param images: nhwc :return: """ # assert images.ndim == 4 or list showfun = showfun or plt.imshow count = len(images) grid = grid or grid_recommend(count, sorted(images[0].shape[:2])) res = [] for i, img in enumerate(images): # grid row first index plt.subplot2grid(grid, (i % grid[0], i // grid[0])) res.append(showfun(img.squeeze(), **opt)) return res
def imshow_grid(images, grid=None, showfun=None, **opt): """ :param images: nhwc :return: """ # assert images.ndim == 4 or list showfun = showfun or plt.imshow count = len(images) grid = grid or grid_recommend(count, sorted(images[0].shape[:2])) res = [] for i, img in enumerate(images): # grid row first index plt.subplot2grid(grid, (i % grid[0], i // grid[0])) res.append(showfun(img.squeeze(), **opt)) return res
[ ":", "param", "images", ":", "nhwc", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L48-L64
[ "def", "imshow_grid", "(", "images", ",", "grid", "=", "None", ",", "showfun", "=", "None", ",", "*", "*", "opt", ")", ":", "# assert images.ndim == 4 or list", "showfun", "=", "showfun", "or", "plt", ".", "imshow", "count", "=", "len", "(", "images", ")", "grid", "=", "grid", "or", "grid_recommend", "(", "count", ",", "sorted", "(", "images", "[", "0", "]", ".", "shape", "[", ":", "2", "]", ")", ")", "res", "=", "[", "]", "for", "i", ",", "img", "in", "enumerate", "(", "images", ")", ":", "# grid row first index", "plt", ".", "subplot2grid", "(", "grid", ",", "(", "i", "%", "grid", "[", "0", "]", ",", "i", "//", "grid", "[", "0", "]", ")", ")", "res", ".", "append", "(", "showfun", "(", "img", ".", "squeeze", "(", ")", ",", "*", "*", "opt", ")", ")", "return", "res" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
plt_range
for i in plot_range(n): plt.imshow(imgs[i]) left arrow yield prev value other key yield next value :param args: :return:
snipy/plt/ploting.py
def plt_range(*args, **kwargs): """ for i in plot_range(n): plt.imshow(imgs[i]) left arrow yield prev value other key yield next value :param args: :return: """ wait = kwargs.pop('wait', True) if not wait: # no interactive just pass range for i in progress(range(*args)): yield i return class _holder(object): pass hold = _holder() hold.i = 0 hold.done = False def press(event): # import sys # sys.stdout.flush() hold.i += -1 if event.key == 'left' else 1 hold.i = 0 if hold.i < 0 else hold.i def onclose(event): hold.done = True fig = kwargs.pop('fig', None) figsize = kwargs.pop('figsize', None) if fig is None: fig = plt.gcf() if figsize: fig.set_size_inches(figsize) elif isinstance(fig, (int, str)): if figsize: fig = plt.figure(fig, figsize=figsize) else: fig = plt.figure(fig) elif isinstance(fig, plt.Figure): if figsize: fig.set_size_inches(figsize) else: raise ValueError onkey_fig(press, fig) onclose_fig(onclose, fig) ranges = range(*args) l = len(ranges) while hold.i < l: print('hold.i', ranges[hold.i]) yield ranges[hold.i] # yield first without keypress before = hold.i while before == hold.i: while not fig.waitforbuttonpress(0.01): if hold.done: return while fig.waitforbuttonpress(0.1): if hold.done: return
def plt_range(*args, **kwargs): """ for i in plot_range(n): plt.imshow(imgs[i]) left arrow yield prev value other key yield next value :param args: :return: """ wait = kwargs.pop('wait', True) if not wait: # no interactive just pass range for i in progress(range(*args)): yield i return class _holder(object): pass hold = _holder() hold.i = 0 hold.done = False def press(event): # import sys # sys.stdout.flush() hold.i += -1 if event.key == 'left' else 1 hold.i = 0 if hold.i < 0 else hold.i def onclose(event): hold.done = True fig = kwargs.pop('fig', None) figsize = kwargs.pop('figsize', None) if fig is None: fig = plt.gcf() if figsize: fig.set_size_inches(figsize) elif isinstance(fig, (int, str)): if figsize: fig = plt.figure(fig, figsize=figsize) else: fig = plt.figure(fig) elif isinstance(fig, plt.Figure): if figsize: fig.set_size_inches(figsize) else: raise ValueError onkey_fig(press, fig) onclose_fig(onclose, fig) ranges = range(*args) l = len(ranges) while hold.i < l: print('hold.i', ranges[hold.i]) yield ranges[hold.i] # yield first without keypress before = hold.i while before == hold.i: while not fig.waitforbuttonpress(0.01): if hold.done: return while fig.waitforbuttonpress(0.1): if hold.done: return
[ "for", "i", "in", "plot_range", "(", "n", ")", ":", "plt", ".", "imshow", "(", "imgs", "[", "i", "]", ")" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L69-L134
[ "def", "plt_range", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "wait", "=", "kwargs", ".", "pop", "(", "'wait'", ",", "True", ")", "if", "not", "wait", ":", "# no interactive just pass range", "for", "i", "in", "progress", "(", "range", "(", "*", "args", ")", ")", ":", "yield", "i", "return", "class", "_holder", "(", "object", ")", ":", "pass", "hold", "=", "_holder", "(", ")", "hold", ".", "i", "=", "0", "hold", ".", "done", "=", "False", "def", "press", "(", "event", ")", ":", "# import sys", "# sys.stdout.flush()", "hold", ".", "i", "+=", "-", "1", "if", "event", ".", "key", "==", "'left'", "else", "1", "hold", ".", "i", "=", "0", "if", "hold", ".", "i", "<", "0", "else", "hold", ".", "i", "def", "onclose", "(", "event", ")", ":", "hold", ".", "done", "=", "True", "fig", "=", "kwargs", ".", "pop", "(", "'fig'", ",", "None", ")", "figsize", "=", "kwargs", ".", "pop", "(", "'figsize'", ",", "None", ")", "if", "fig", "is", "None", ":", "fig", "=", "plt", ".", "gcf", "(", ")", "if", "figsize", ":", "fig", ".", "set_size_inches", "(", "figsize", ")", "elif", "isinstance", "(", "fig", ",", "(", "int", ",", "str", ")", ")", ":", "if", "figsize", ":", "fig", "=", "plt", ".", "figure", "(", "fig", ",", "figsize", "=", "figsize", ")", "else", ":", "fig", "=", "plt", ".", "figure", "(", "fig", ")", "elif", "isinstance", "(", "fig", ",", "plt", ".", "Figure", ")", ":", "if", "figsize", ":", "fig", ".", "set_size_inches", "(", "figsize", ")", "else", ":", "raise", "ValueError", "onkey_fig", "(", "press", ",", "fig", ")", "onclose_fig", "(", "onclose", ",", "fig", ")", "ranges", "=", "range", "(", "*", "args", ")", "l", "=", "len", "(", "ranges", ")", "while", "hold", ".", "i", "<", "l", ":", "print", "(", "'hold.i'", ",", "ranges", "[", "hold", ".", "i", "]", ")", "yield", "ranges", "[", "hold", ".", "i", "]", "# yield first without keypress", "before", "=", "hold", ".", "i", "while", "before", "==", "hold", ".", "i", ":", "while", "not", "fig", ".", "waitforbuttonpress", "(", "0.01", ")", ":", "if", "hold", ".", "done", ":", "return", "while", "fig", ".", "waitforbuttonpress", "(", "0.1", ")", ":", "if", "hold", ".", "done", ":", "return" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
plot_pause
todo : add some example :param timeout: wait time. if None, blocking :param msg: :return:
snipy/plt/ploting.py
def plot_pause(timeout=None, msg=''): """ todo : add some example :param timeout: wait time. if None, blocking :param msg: :return: """ if timeout is not None: print(msg or 'Press key for continue in time {}'.format(timeout)) plt.waitforbuttonpress(timeout=timeout) return True print(msg or 'Press key for continue') while not plt.waitforbuttonpress(timeout=0.01): if not plt.get_fignums(): return False return len(plt.get_fignums()) != 0
def plot_pause(timeout=None, msg=''): """ todo : add some example :param timeout: wait time. if None, blocking :param msg: :return: """ if timeout is not None: print(msg or 'Press key for continue in time {}'.format(timeout)) plt.waitforbuttonpress(timeout=timeout) return True print(msg or 'Press key for continue') while not plt.waitforbuttonpress(timeout=0.01): if not plt.get_fignums(): return False return len(plt.get_fignums()) != 0
[ "todo", ":", "add", "some", "example", ":", "param", "timeout", ":", "wait", "time", ".", "if", "None", "blocking", ":", "param", "msg", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L141-L158
[ "def", "plot_pause", "(", "timeout", "=", "None", ",", "msg", "=", "''", ")", ":", "if", "timeout", "is", "not", "None", ":", "print", "(", "msg", "or", "'Press key for continue in time {}'", ".", "format", "(", "timeout", ")", ")", "plt", ".", "waitforbuttonpress", "(", "timeout", "=", "timeout", ")", "return", "True", "print", "(", "msg", "or", "'Press key for continue'", ")", "while", "not", "plt", ".", "waitforbuttonpress", "(", "timeout", "=", "0.01", ")", ":", "if", "not", "plt", ".", "get_fignums", "(", ")", ":", "return", "False", "return", "len", "(", "plt", ".", "get_fignums", "(", ")", ")", "!=", "0" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
flat_images
convert batch image to flat image with margin inserted [B,h,w,c] => [H,W,c] :param images: :param grid: patch grid cell size of (Row, Col) :param bfill: board filling value :param bsz: int or (int, int) board size :return: flatted image
snipy/plt/ploting.py
def flat_images(images, grid=None, bfill=1.0, bsz=(1, 1)): """ convert batch image to flat image with margin inserted [B,h,w,c] => [H,W,c] :param images: :param grid: patch grid cell size of (Row, Col) :param bfill: board filling value :param bsz: int or (int, int) board size :return: flatted image """ if images.ndim == 4 and images.shape[-1] == 1: images = images.squeeze(axis=-1) grid = grid or grid_recommend(len(images), sorted(images[0].shape[:2])) if not isinstance(bsz, (tuple, list)): bsz = (bsz, bsz) # np.empty() imshape = list(images.shape) imshape[0] = grid[0] * grid[1] imshape[1] += bsz[0] imshape[2] += bsz[1] # data = np.empty((grid[0] * grid[1], imshape[1], imshape[2]), dtype=images.dtype) data = np.empty(imshape, dtype=images.dtype) data.fill(bfill) bslice0 = slice(0, -bsz[0]) if bsz[0] else slice(None, None) bslice1 = slice(0, -bsz[1]) if bsz[1] else slice(None, None) data[:len(images), bslice0, bslice1] = images imshape = list(grid) + imshape[1:] # [grid[0], grid[1], H, W, [Channel]] data = data.reshape(imshape) if len(imshape) == 5: data = data.transpose(0, 2, 1, 3, 4) imshape = [imshape[0]*imshape[2], imshape[1]*imshape[3], imshape[4]] else: # len == 4 data = data.transpose(0, 2, 1, 3) imshape = [imshape[0]*imshape[2], imshape[1]*imshape[3]] data = data.reshape(imshape) # remove last margin data = data[bslice0, bslice1] return data
def flat_images(images, grid=None, bfill=1.0, bsz=(1, 1)): """ convert batch image to flat image with margin inserted [B,h,w,c] => [H,W,c] :param images: :param grid: patch grid cell size of (Row, Col) :param bfill: board filling value :param bsz: int or (int, int) board size :return: flatted image """ if images.ndim == 4 and images.shape[-1] == 1: images = images.squeeze(axis=-1) grid = grid or grid_recommend(len(images), sorted(images[0].shape[:2])) if not isinstance(bsz, (tuple, list)): bsz = (bsz, bsz) # np.empty() imshape = list(images.shape) imshape[0] = grid[0] * grid[1] imshape[1] += bsz[0] imshape[2] += bsz[1] # data = np.empty((grid[0] * grid[1], imshape[1], imshape[2]), dtype=images.dtype) data = np.empty(imshape, dtype=images.dtype) data.fill(bfill) bslice0 = slice(0, -bsz[0]) if bsz[0] else slice(None, None) bslice1 = slice(0, -bsz[1]) if bsz[1] else slice(None, None) data[:len(images), bslice0, bslice1] = images imshape = list(grid) + imshape[1:] # [grid[0], grid[1], H, W, [Channel]] data = data.reshape(imshape) if len(imshape) == 5: data = data.transpose(0, 2, 1, 3, 4) imshape = [imshape[0]*imshape[2], imshape[1]*imshape[3], imshape[4]] else: # len == 4 data = data.transpose(0, 2, 1, 3) imshape = [imshape[0]*imshape[2], imshape[1]*imshape[3]] data = data.reshape(imshape) # remove last margin data = data[bslice0, bslice1] return data
[ "convert", "batch", "image", "to", "flat", "image", "with", "margin", "inserted", "[", "B", "h", "w", "c", "]", "=", ">", "[", "H", "W", "c", "]", ":", "param", "images", ":", ":", "param", "grid", ":", "patch", "grid", "cell", "size", "of", "(", "Row", "Col", ")", ":", "param", "bfill", ":", "board", "filling", "value", ":", "param", "bsz", ":", "int", "or", "(", "int", "int", ")", "board", "size", ":", "return", ":", "flatted", "image" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L169-L214
[ "def", "flat_images", "(", "images", ",", "grid", "=", "None", ",", "bfill", "=", "1.0", ",", "bsz", "=", "(", "1", ",", "1", ")", ")", ":", "if", "images", ".", "ndim", "==", "4", "and", "images", ".", "shape", "[", "-", "1", "]", "==", "1", ":", "images", "=", "images", ".", "squeeze", "(", "axis", "=", "-", "1", ")", "grid", "=", "grid", "or", "grid_recommend", "(", "len", "(", "images", ")", ",", "sorted", "(", "images", "[", "0", "]", ".", "shape", "[", ":", "2", "]", ")", ")", "if", "not", "isinstance", "(", "bsz", ",", "(", "tuple", ",", "list", ")", ")", ":", "bsz", "=", "(", "bsz", ",", "bsz", ")", "# np.empty()", "imshape", "=", "list", "(", "images", ".", "shape", ")", "imshape", "[", "0", "]", "=", "grid", "[", "0", "]", "*", "grid", "[", "1", "]", "imshape", "[", "1", "]", "+=", "bsz", "[", "0", "]", "imshape", "[", "2", "]", "+=", "bsz", "[", "1", "]", "# data = np.empty((grid[0] * grid[1], imshape[1], imshape[2]), dtype=images.dtype)", "data", "=", "np", ".", "empty", "(", "imshape", ",", "dtype", "=", "images", ".", "dtype", ")", "data", ".", "fill", "(", "bfill", ")", "bslice0", "=", "slice", "(", "0", ",", "-", "bsz", "[", "0", "]", ")", "if", "bsz", "[", "0", "]", "else", "slice", "(", "None", ",", "None", ")", "bslice1", "=", "slice", "(", "0", ",", "-", "bsz", "[", "1", "]", ")", "if", "bsz", "[", "1", "]", "else", "slice", "(", "None", ",", "None", ")", "data", "[", ":", "len", "(", "images", ")", ",", "bslice0", ",", "bslice1", "]", "=", "images", "imshape", "=", "list", "(", "grid", ")", "+", "imshape", "[", "1", ":", "]", "# [grid[0], grid[1], H, W, [Channel]]", "data", "=", "data", ".", "reshape", "(", "imshape", ")", "if", "len", "(", "imshape", ")", "==", "5", ":", "data", "=", "data", ".", "transpose", "(", "0", ",", "2", ",", "1", ",", "3", ",", "4", ")", "imshape", "=", "[", "imshape", "[", "0", "]", "*", "imshape", "[", "2", "]", ",", "imshape", "[", "1", "]", "*", "imshape", "[", "3", "]", ",", "imshape", "[", "4", "]", "]", "else", ":", "# len == 4", "data", "=", "data", ".", "transpose", "(", "0", ",", "2", ",", "1", ",", "3", ")", "imshape", "=", "[", "imshape", "[", "0", "]", "*", "imshape", "[", "2", "]", ",", "imshape", "[", "1", "]", "*", "imshape", "[", "3", "]", "]", "data", "=", "data", ".", "reshape", "(", "imshape", ")", "# remove last margin", "data", "=", "data", "[", "bslice0", ",", "bslice1", "]", "return", "data" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
imshow_flat
imshow after applying flat_images :param images: [bhwc] :param grid: None for auto grid :param showfun: plt.imshow :param bfill: color for board fill :param bsz: size of board :param opt: option for showfun :return:
snipy/plt/ploting.py
def imshow_flat(images, grid=None, showfun=None, bfill=1.0, bsz=(1,1), **opt): """ imshow after applying flat_images :param images: [bhwc] :param grid: None for auto grid :param showfun: plt.imshow :param bfill: color for board fill :param bsz: size of board :param opt: option for showfun :return: """ showfun = showfun or plt.imshow count = len(images) # decide grid shape if need pick one grid = grid or grid_recommend(count, ratio=sorted(images[0].shape[:2])) flatted = flat_images(images, grid, bfill=bfill, bsz=bsz) res = showfun(flatted, **opt) plt.draw()
def imshow_flat(images, grid=None, showfun=None, bfill=1.0, bsz=(1,1), **opt): """ imshow after applying flat_images :param images: [bhwc] :param grid: None for auto grid :param showfun: plt.imshow :param bfill: color for board fill :param bsz: size of board :param opt: option for showfun :return: """ showfun = showfun or plt.imshow count = len(images) # decide grid shape if need pick one grid = grid or grid_recommend(count, ratio=sorted(images[0].shape[:2])) flatted = flat_images(images, grid, bfill=bfill, bsz=bsz) res = showfun(flatted, **opt) plt.draw()
[ "imshow", "after", "applying", "flat_images", ":", "param", "images", ":", "[", "bhwc", "]", ":", "param", "grid", ":", "None", "for", "auto", "grid", ":", "param", "showfun", ":", "plt", ".", "imshow", ":", "param", "bfill", ":", "color", "for", "board", "fill", ":", "param", "bsz", ":", "size", "of", "board", ":", "param", "opt", ":", "option", "for", "showfun", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L217-L236
[ "def", "imshow_flat", "(", "images", ",", "grid", "=", "None", ",", "showfun", "=", "None", ",", "bfill", "=", "1.0", ",", "bsz", "=", "(", "1", ",", "1", ")", ",", "*", "*", "opt", ")", ":", "showfun", "=", "showfun", "or", "plt", ".", "imshow", "count", "=", "len", "(", "images", ")", "# decide grid shape if need pick one", "grid", "=", "grid", "or", "grid_recommend", "(", "count", ",", "ratio", "=", "sorted", "(", "images", "[", "0", "]", ".", "shape", "[", ":", "2", "]", ")", ")", "flatted", "=", "flat_images", "(", "images", ",", "grid", ",", "bfill", "=", "bfill", ",", "bsz", "=", "bsz", ")", "res", "=", "showfun", "(", "flatted", ",", "*", "*", "opt", ")", "plt", ".", "draw", "(", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
matshow
imshow without interpolation like as matshow :param args: :param kwargs: :return:
snipy/plt/ploting.py
def matshow(*args, **kwargs): """ imshow without interpolation like as matshow :param args: :param kwargs: :return: """ kwargs['interpolation'] = kwargs.pop('interpolation', 'none') return plt.imshow(*args, **kwargs)
def matshow(*args, **kwargs): """ imshow without interpolation like as matshow :param args: :param kwargs: :return: """ kwargs['interpolation'] = kwargs.pop('interpolation', 'none') return plt.imshow(*args, **kwargs)
[ "imshow", "without", "interpolation", "like", "as", "matshow", ":", "param", "args", ":", ":", "param", "kwargs", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L279-L287
[ "def", "matshow", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'interpolation'", "]", "=", "kwargs", ".", "pop", "(", "'interpolation'", ",", "'none'", ")", "return", "plt", ".", "imshow", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
imbox
draw boundary box :param xy: start index xy (ji) :param w: width :param h: height :param angle: :param kwargs: :return:
snipy/plt/ploting.py
def imbox(xy, w, h, angle=0.0, **kwargs): """ draw boundary box :param xy: start index xy (ji) :param w: width :param h: height :param angle: :param kwargs: :return: """ from matplotlib.patches import Rectangle return imbound(Rectangle, xy, w, h, angle, **kwargs)
def imbox(xy, w, h, angle=0.0, **kwargs): """ draw boundary box :param xy: start index xy (ji) :param w: width :param h: height :param angle: :param kwargs: :return: """ from matplotlib.patches import Rectangle return imbound(Rectangle, xy, w, h, angle, **kwargs)
[ "draw", "boundary", "box", ":", "param", "xy", ":", "start", "index", "xy", "(", "ji", ")", ":", "param", "w", ":", "width", ":", "param", "h", ":", "height", ":", "param", "angle", ":", ":", "param", "kwargs", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L413-L424
[ "def", "imbox", "(", "xy", ",", "w", ",", "h", ",", "angle", "=", "0.0", ",", "*", "*", "kwargs", ")", ":", "from", "matplotlib", ".", "patches", "import", "Rectangle", "return", "imbound", "(", "Rectangle", ",", "xy", ",", "w", ",", "h", ",", "angle", ",", "*", "*", "kwargs", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
imbound
:param clspatch: :param args: :param kwargs: :return:
snipy/plt/ploting.py
def imbound(clspatch, *args, **kwargs): """ :param clspatch: :param args: :param kwargs: :return: """ # todo : add example c = kwargs.pop('color', kwargs.get('edgecolor', None)) kwargs.update(facecolor='none', edgecolor=c) return impatch(clspatch, *args, **kwargs)
def imbound(clspatch, *args, **kwargs): """ :param clspatch: :param args: :param kwargs: :return: """ # todo : add example c = kwargs.pop('color', kwargs.get('edgecolor', None)) kwargs.update(facecolor='none', edgecolor=c) return impatch(clspatch, *args, **kwargs)
[ ":", "param", "clspatch", ":", ":", "param", "args", ":", ":", "param", "kwargs", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L433-L444
[ "def", "imbound", "(", "clspatch", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# todo : add example", "c", "=", "kwargs", ".", "pop", "(", "'color'", ",", "kwargs", ".", "get", "(", "'edgecolor'", ",", "None", ")", ")", "kwargs", ".", "update", "(", "facecolor", "=", "'none'", ",", "edgecolor", "=", "c", ")", "return", "impatch", "(", "clspatch", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
imslic
slic args : n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False mark_boundaries args: label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0 imshow args: cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, hold=None, data=None, :param img: :param slicarg: :param slickw: :return:
snipy/plt/ploting.py
def imslic(img, n_segments=100, aspect=None): """ slic args : n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False mark_boundaries args: label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0 imshow args: cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, hold=None, data=None, :param img: :param slicarg: :param slickw: :return: """ from skimage.segmentation import (slic, mark_boundaries) from skimage.morphology import (dilation) if img.ndim == 2 or img.ndim == 3 and img.shape[-1] == 1: imz = np.stack([img, img, img], 2) else: imz = img slics = slic(imz, n_segments=n_segments) boundaries = mark_boundaries(imz, slics) return plt.imshow(boundaries, aspect=aspect)
def imslic(img, n_segments=100, aspect=None): """ slic args : n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False mark_boundaries args: label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0 imshow args: cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, hold=None, data=None, :param img: :param slicarg: :param slickw: :return: """ from skimage.segmentation import (slic, mark_boundaries) from skimage.morphology import (dilation) if img.ndim == 2 or img.ndim == 3 and img.shape[-1] == 1: imz = np.stack([img, img, img], 2) else: imz = img slics = slic(imz, n_segments=n_segments) boundaries = mark_boundaries(imz, slics) return plt.imshow(boundaries, aspect=aspect)
[ "slic", "args", ":", "n_segments", "=", "100", "compactness", "=", "10", ".", "max_iter", "=", "10", "sigma", "=", "0", "spacing", "=", "None", "multichannel", "=", "True", "convert2lab", "=", "None", "enforce_connectivity", "=", "True", "min_size_factor", "=", "0", ".", "5", "max_size_factor", "=", "3", "slic_zero", "=", "False" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L476-L509
[ "def", "imslic", "(", "img", ",", "n_segments", "=", "100", ",", "aspect", "=", "None", ")", ":", "from", "skimage", ".", "segmentation", "import", "(", "slic", ",", "mark_boundaries", ")", "from", "skimage", ".", "morphology", "import", "(", "dilation", ")", "if", "img", ".", "ndim", "==", "2", "or", "img", ".", "ndim", "==", "3", "and", "img", ".", "shape", "[", "-", "1", "]", "==", "1", ":", "imz", "=", "np", ".", "stack", "(", "[", "img", ",", "img", ",", "img", "]", ",", "2", ")", "else", ":", "imz", "=", "img", "slics", "=", "slic", "(", "imz", ",", "n_segments", "=", "n_segments", ")", "boundaries", "=", "mark_boundaries", "(", "imz", ",", "slics", ")", "return", "plt", ".", "imshow", "(", "boundaries", ",", "aspect", "=", "aspect", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
imslic2
slic args : n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False mark_boundaries args: label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0 imshow args: cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, hold=None, data=None, :param img: :param slicarg: :param slickw: :return:
snipy/plt/ploting.py
def imslic2(img, n_segments=100, color=None, outline_color=None, mode='thick', **kwargs): """ slic args : n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False mark_boundaries args: label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0 imshow args: cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, hold=None, data=None, :param img: :param slicarg: :param slickw: :return: """ from skimage.segmentation import (slic, find_boundaries) # mark_boundaries from skimage.morphology import (dilation) kwslic = {'compactness', 'max_iter', 'sigma', 'spacing', 'multichannel', 'convert2lab', 'enforce_connectivity', 'min_size_factor', 'max_size_factor', 'slic_zero=False'} imshowkw = {'cmap', 'norm', 'aspect', 'interpolation', 'alpha', 'vmin', 'vmax', 'origin', 'extent', 'shape', 'filternorm', 'filterrad', 'imlim', 'resample', 'url', 'hold', 'data'} slicarg = {k: v for k, v in kwargs.iteritems() if k in kwslic} imshowarg = {k: v for k, v in kwargs.iteritems() if k in imshowkw} if img.ndim == 2 or img.ndim == 3 and img.shape[-1] == 1: imz = np.stack([img, img, img], 2) color = color or 1. else: imgz = img color = color or (1,1,0) slics = slic(imz, n_segments=n_segments, **slicarg) boundaries = find_boundaries(slics, mode=mode) if outline_color is not None: outlines = dilation(boundaries, np.ones((3, 3), np.uint8)) img[outlines] = outline_color img[boundaries] = color return plt.imshow(img, **imshowarg)
def imslic2(img, n_segments=100, color=None, outline_color=None, mode='thick', **kwargs): """ slic args : n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False mark_boundaries args: label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0 imshow args: cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, hold=None, data=None, :param img: :param slicarg: :param slickw: :return: """ from skimage.segmentation import (slic, find_boundaries) # mark_boundaries from skimage.morphology import (dilation) kwslic = {'compactness', 'max_iter', 'sigma', 'spacing', 'multichannel', 'convert2lab', 'enforce_connectivity', 'min_size_factor', 'max_size_factor', 'slic_zero=False'} imshowkw = {'cmap', 'norm', 'aspect', 'interpolation', 'alpha', 'vmin', 'vmax', 'origin', 'extent', 'shape', 'filternorm', 'filterrad', 'imlim', 'resample', 'url', 'hold', 'data'} slicarg = {k: v for k, v in kwargs.iteritems() if k in kwslic} imshowarg = {k: v for k, v in kwargs.iteritems() if k in imshowkw} if img.ndim == 2 or img.ndim == 3 and img.shape[-1] == 1: imz = np.stack([img, img, img], 2) color = color or 1. else: imgz = img color = color or (1,1,0) slics = slic(imz, n_segments=n_segments, **slicarg) boundaries = find_boundaries(slics, mode=mode) if outline_color is not None: outlines = dilation(boundaries, np.ones((3, 3), np.uint8)) img[outlines] = outline_color img[boundaries] = color return plt.imshow(img, **imshowarg)
[ "slic", "args", ":", "n_segments", "=", "100", "compactness", "=", "10", ".", "max_iter", "=", "10", "sigma", "=", "0", "spacing", "=", "None", "multichannel", "=", "True", "convert2lab", "=", "None", "enforce_connectivity", "=", "True", "min_size_factor", "=", "0", ".", "5", "max_size_factor", "=", "3", "slic_zero", "=", "False" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L512-L559
[ "def", "imslic2", "(", "img", ",", "n_segments", "=", "100", ",", "color", "=", "None", ",", "outline_color", "=", "None", ",", "mode", "=", "'thick'", ",", "*", "*", "kwargs", ")", ":", "from", "skimage", ".", "segmentation", "import", "(", "slic", ",", "find_boundaries", ")", "# mark_boundaries", "from", "skimage", ".", "morphology", "import", "(", "dilation", ")", "kwslic", "=", "{", "'compactness'", ",", "'max_iter'", ",", "'sigma'", ",", "'spacing'", ",", "'multichannel'", ",", "'convert2lab'", ",", "'enforce_connectivity'", ",", "'min_size_factor'", ",", "'max_size_factor'", ",", "'slic_zero=False'", "}", "imshowkw", "=", "{", "'cmap'", ",", "'norm'", ",", "'aspect'", ",", "'interpolation'", ",", "'alpha'", ",", "'vmin'", ",", "'vmax'", ",", "'origin'", ",", "'extent'", ",", "'shape'", ",", "'filternorm'", ",", "'filterrad'", ",", "'imlim'", ",", "'resample'", ",", "'url'", ",", "'hold'", ",", "'data'", "}", "slicarg", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "iteritems", "(", ")", "if", "k", "in", "kwslic", "}", "imshowarg", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "iteritems", "(", ")", "if", "k", "in", "imshowkw", "}", "if", "img", ".", "ndim", "==", "2", "or", "img", ".", "ndim", "==", "3", "and", "img", ".", "shape", "[", "-", "1", "]", "==", "1", ":", "imz", "=", "np", ".", "stack", "(", "[", "img", ",", "img", ",", "img", "]", ",", "2", ")", "color", "=", "color", "or", "1.", "else", ":", "imgz", "=", "img", "color", "=", "color", "or", "(", "1", ",", "1", ",", "0", ")", "slics", "=", "slic", "(", "imz", ",", "n_segments", "=", "n_segments", ",", "*", "*", "slicarg", ")", "boundaries", "=", "find_boundaries", "(", "slics", ",", "mode", "=", "mode", ")", "if", "outline_color", "is", "not", "None", ":", "outlines", "=", "dilation", "(", "boundaries", ",", "np", ".", "ones", "(", "(", "3", ",", "3", ")", ",", "np", ".", "uint8", ")", ")", "img", "[", "outlines", "]", "=", "outline_color", "img", "[", "boundaries", "]", "=", "color", "return", "plt", ".", "imshow", "(", "img", ",", "*", "*", "imshowarg", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
movie_saving
contextmanager for PlotMovieWriter Example: with movie_saving('output.mp4', dpi=100) as plot: for i in range(10): plot(data[i]) :param outfile: :param showfun: :param fig: :param tight: :param drawopt: :param dpi: :param movieopt: fps=5, codec=None, bitrate=None, extra_args=None, metadata=None :return:
snipy/plt/ploting.py
def movie_saving(outfile, showfun=imshow, fig=None, tight=True, drawopt=None, dpi=100, **movieopt): """ contextmanager for PlotMovieWriter Example: with movie_saving('output.mp4', dpi=100) as plot: for i in range(10): plot(data[i]) :param outfile: :param showfun: :param fig: :param tight: :param drawopt: :param dpi: :param movieopt: fps=5, codec=None, bitrate=None, extra_args=None, metadata=None :return: """ if tight: plot_writer = ImageMovieWriter(outfile, showfun=showfun, fig=fig, drawopt=drawopt, dpi=dpi, **movieopt) else: plot_writer = PlotMovieWriter(outfile, showfun=showfun, fig=fig, drawopt=drawopt, dpi=dpi, **movieopt) try: yield plot_writer finally: plot_writer.finish()
def movie_saving(outfile, showfun=imshow, fig=None, tight=True, drawopt=None, dpi=100, **movieopt): """ contextmanager for PlotMovieWriter Example: with movie_saving('output.mp4', dpi=100) as plot: for i in range(10): plot(data[i]) :param outfile: :param showfun: :param fig: :param tight: :param drawopt: :param dpi: :param movieopt: fps=5, codec=None, bitrate=None, extra_args=None, metadata=None :return: """ if tight: plot_writer = ImageMovieWriter(outfile, showfun=showfun, fig=fig, drawopt=drawopt, dpi=dpi, **movieopt) else: plot_writer = PlotMovieWriter(outfile, showfun=showfun, fig=fig, drawopt=drawopt, dpi=dpi, **movieopt) try: yield plot_writer finally: plot_writer.finish()
[ "contextmanager", "for", "PlotMovieWriter", "Example", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L700-L726
[ "def", "movie_saving", "(", "outfile", ",", "showfun", "=", "imshow", ",", "fig", "=", "None", ",", "tight", "=", "True", ",", "drawopt", "=", "None", ",", "dpi", "=", "100", ",", "*", "*", "movieopt", ")", ":", "if", "tight", ":", "plot_writer", "=", "ImageMovieWriter", "(", "outfile", ",", "showfun", "=", "showfun", ",", "fig", "=", "fig", ",", "drawopt", "=", "drawopt", ",", "dpi", "=", "dpi", ",", "*", "*", "movieopt", ")", "else", ":", "plot_writer", "=", "PlotMovieWriter", "(", "outfile", ",", "showfun", "=", "showfun", ",", "fig", "=", "fig", ",", "drawopt", "=", "drawopt", ",", "dpi", "=", "dpi", ",", "*", "*", "movieopt", ")", "try", ":", "yield", "plot_writer", "finally", ":", "plot_writer", ".", "finish", "(", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
put
put text on on screen a tuple as first argument tells absolute position for the text does not change TermCursor position args = list of optional position, formatting tokens and strings
snipy/term.py
def put(xy, *args): """ put text on on screen a tuple as first argument tells absolute position for the text does not change TermCursor position args = list of optional position, formatting tokens and strings """ cmd = [TermCursor.save, TermCursor.move(*xy), ''.join(args), TermCursor.restore] write(''.join(cmd))
def put(xy, *args): """ put text on on screen a tuple as first argument tells absolute position for the text does not change TermCursor position args = list of optional position, formatting tokens and strings """ cmd = [TermCursor.save, TermCursor.move(*xy), ''.join(args), TermCursor.restore] write(''.join(cmd))
[ "put", "text", "on", "on", "screen", "a", "tuple", "as", "first", "argument", "tells", "absolute", "position", "for", "the", "text", "does", "not", "change", "TermCursor", "position", "args", "=", "list", "of", "optional", "position", "formatting", "tokens", "and", "strings" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/term.py#L135-L143
[ "def", "put", "(", "xy", ",", "*", "args", ")", ":", "cmd", "=", "[", "TermCursor", ".", "save", ",", "TermCursor", ".", "move", "(", "*", "xy", ")", ",", "''", ".", "join", "(", "args", ")", ",", "TermCursor", ".", "restore", "]", "write", "(", "''", ".", "join", "(", "cmd", ")", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
getpassword
get user input without echo
snipy/term.py
def getpassword(prompt="Password: "): """ get user input without echo """ fd = sys.stdin.fileno() old = termios.tcgetattr(fd) new = termios.tcgetattr(fd) new[3] &= ~termios.ECHO # lflags try: termios.tcsetattr(fd, termios.TCSADRAIN, new) passwd = raw_input(prompt) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old) return passwd
def getpassword(prompt="Password: "): """ get user input without echo """ fd = sys.stdin.fileno() old = termios.tcgetattr(fd) new = termios.tcgetattr(fd) new[3] &= ~termios.ECHO # lflags try: termios.tcsetattr(fd, termios.TCSADRAIN, new) passwd = raw_input(prompt) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old) return passwd
[ "get", "user", "input", "without", "echo" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/term.py#L146-L160
[ "def", "getpassword", "(", "prompt", "=", "\"Password: \"", ")", ":", "fd", "=", "sys", ".", "stdin", ".", "fileno", "(", ")", "old", "=", "termios", ".", "tcgetattr", "(", "fd", ")", "new", "=", "termios", ".", "tcgetattr", "(", "fd", ")", "new", "[", "3", "]", "&=", "~", "termios", ".", "ECHO", "# lflags", "try", ":", "termios", ".", "tcsetattr", "(", "fd", ",", "termios", ".", "TCSADRAIN", ",", "new", ")", "passwd", "=", "raw_input", "(", "prompt", ")", "finally", ":", "termios", ".", "tcsetattr", "(", "fd", ",", "termios", ".", "TCSADRAIN", ",", "old", ")", "return", "passwd" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
getch
get character. waiting for key
snipy/term.py
def getch(): """ get character. waiting for key """ try: termios.tcsetattr(_fd, termios.TCSANOW, _new_settings) ch = sys.stdin.read(1) finally: termios.tcsetattr(_fd, termios.TCSADRAIN, _old_settings) return ch
def getch(): """ get character. waiting for key """ try: termios.tcsetattr(_fd, termios.TCSANOW, _new_settings) ch = sys.stdin.read(1) finally: termios.tcsetattr(_fd, termios.TCSADRAIN, _old_settings) return ch
[ "get", "character", ".", "waiting", "for", "key" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/term.py#L163-L172
[ "def", "getch", "(", ")", ":", "try", ":", "termios", ".", "tcsetattr", "(", "_fd", ",", "termios", ".", "TCSANOW", ",", "_new_settings", ")", "ch", "=", "sys", ".", "stdin", ".", "read", "(", "1", ")", "finally", ":", "termios", ".", "tcsetattr", "(", "_fd", ",", "termios", ".", "TCSADRAIN", ",", "_old_settings", ")", "return", "ch" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
getlogger
패키지 혹은 채널 로거 logging.getLogger(package_name) or logg.getLogger() :param pkg: str
snipy/ilogging.py
def getlogger(pkg='', handler=None): """ 패키지 혹은 채널 로거 logging.getLogger(package_name) or logg.getLogger() :param pkg: str """ from .caller import caller if not pkg: m = caller.modulename() s = m.split('.', 1) if len(s) > 1: pkg = s[0] if haslogger(pkg): return logging.getLogger(pkg) else: # local logger = logging.getLogger(pkg) logger.addHandler(handler or default_handler) logger.setLevel(logging.DEBUG) return logger
def getlogger(pkg='', handler=None): """ 패키지 혹은 채널 로거 logging.getLogger(package_name) or logg.getLogger() :param pkg: str """ from .caller import caller if not pkg: m = caller.modulename() s = m.split('.', 1) if len(s) > 1: pkg = s[0] if haslogger(pkg): return logging.getLogger(pkg) else: # local logger = logging.getLogger(pkg) logger.addHandler(handler or default_handler) logger.setLevel(logging.DEBUG) return logger
[ "패키지", "혹은", "채널", "로거", "logging", ".", "getLogger", "(", "package_name", ")", "or", "logg", ".", "getLogger", "()", ":", "param", "pkg", ":", "str" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/ilogging.py#L90-L111
[ "def", "getlogger", "(", "pkg", "=", "''", ",", "handler", "=", "None", ")", ":", "from", ".", "caller", "import", "caller", "if", "not", "pkg", ":", "m", "=", "caller", ".", "modulename", "(", ")", "s", "=", "m", ".", "split", "(", "'.'", ",", "1", ")", "if", "len", "(", "s", ")", ">", "1", ":", "pkg", "=", "s", "[", "0", "]", "if", "haslogger", "(", "pkg", ")", ":", "return", "logging", ".", "getLogger", "(", "pkg", ")", "else", ":", "# local", "logger", "=", "logging", ".", "getLogger", "(", "pkg", ")", "logger", ".", "addHandler", "(", "handler", "or", "default_handler", ")", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "return", "logger" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
basicConfig
logging의 로그를 한번 호출하면 basicConfig가 안먹으므로. 기존 핸들러 삭제후 재설정. http://stackoverflow.com/questions/1943747/python-logging-before-you-run-logging-basicconfig ex) basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG) :param filename: Specifies that a FileHandler be created, using the specified filename, rather than a StreamHandler. :param filemode: Specifies the mode to open the file, if filename is specified (if filemode is unspecified, it defaults to ‘a’). :param format: Use the specified format string for the handler. (https://docs.python.org/2.7/library/logging.html#logging.basicConfig :param datefmt: Use the specified date/time format. :param level: Set the root logger level to the specified level. :param stream: Use the specified stream to initialize the StreamHandler. Note that this argument is incompatible with ‘filename’ - if both are present, ‘stream’ is ignored.
snipy/ilogging.py
def basicConfig(**kw): """logging의 로그를 한번 호출하면 basicConfig가 안먹으므로. 기존 핸들러 삭제후 재설정. http://stackoverflow.com/questions/1943747/python-logging-before-you-run-logging-basicconfig ex) basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG) :param filename: Specifies that a FileHandler be created, using the specified filename, rather than a StreamHandler. :param filemode: Specifies the mode to open the file, if filename is specified (if filemode is unspecified, it defaults to ‘a’). :param format: Use the specified format string for the handler. (https://docs.python.org/2.7/library/logging.html#logging.basicConfig :param datefmt: Use the specified date/time format. :param level: Set the root logger level to the specified level. :param stream: Use the specified stream to initialize the StreamHandler. Note that this argument is incompatible with ‘filename’ - if both are present, ‘stream’ is ignored. """ while len(logging.root.handlers) > 0: logging.root.removeHandler(logging.root.handlers[-1]) logging.basicConfig(**kw)
def basicConfig(**kw): """logging의 로그를 한번 호출하면 basicConfig가 안먹으므로. 기존 핸들러 삭제후 재설정. http://stackoverflow.com/questions/1943747/python-logging-before-you-run-logging-basicconfig ex) basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG) :param filename: Specifies that a FileHandler be created, using the specified filename, rather than a StreamHandler. :param filemode: Specifies the mode to open the file, if filename is specified (if filemode is unspecified, it defaults to ‘a’). :param format: Use the specified format string for the handler. (https://docs.python.org/2.7/library/logging.html#logging.basicConfig :param datefmt: Use the specified date/time format. :param level: Set the root logger level to the specified level. :param stream: Use the specified stream to initialize the StreamHandler. Note that this argument is incompatible with ‘filename’ - if both are present, ‘stream’ is ignored. """ while len(logging.root.handlers) > 0: logging.root.removeHandler(logging.root.handlers[-1]) logging.basicConfig(**kw)
[ "logging의", "로그를", "한번", "호출하면", "basicConfig가", "안먹으므로", ".", "기존", "핸들러", "삭제후", "재설정", ".", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "1943747", "/", "python", "-", "logging", "-", "before", "-", "you", "-", "run", "-", "logging", "-", "basicconfig", "ex", ")", "basicConfig", "(", "format", "=", "%", "(", "asctime", ")", "s", "%", "(", "message", ")", "s", "level", "=", "logging", ".", "DEBUG", ")", ":", "param", "filename", ":", "Specifies", "that", "a", "FileHandler", "be", "created", "using", "the", "specified", "filename", "rather", "than", "a", "StreamHandler", ".", ":", "param", "filemode", ":", "Specifies", "the", "mode", "to", "open", "the", "file", "if", "filename", "is", "specified", "(", "if", "filemode", "is", "unspecified", "it", "defaults", "to", "‘a’", ")", ".", ":", "param", "format", ":", "Use", "the", "specified", "format", "string", "for", "the", "handler", ".", "(", "https", ":", "//", "docs", ".", "python", ".", "org", "/", "2", ".", "7", "/", "library", "/", "logging", ".", "html#logging", ".", "basicConfig", ":", "param", "datefmt", ":", "Use", "the", "specified", "date", "/", "time", "format", ".", ":", "param", "level", ":", "Set", "the", "root", "logger", "level", "to", "the", "specified", "level", ".", ":", "param", "stream", ":", "Use", "the", "specified", "stream", "to", "initialize", "the", "StreamHandler", ".", "Note", "that", "this", "argument", "is", "incompatible", "with", "‘filename’", "-", "if", "both", "are", "present", "‘stream’", "is", "ignored", "." ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/ilogging.py#L176-L192
[ "def", "basicConfig", "(", "*", "*", "kw", ")", ":", "while", "len", "(", "logging", ".", "root", ".", "handlers", ")", ">", "0", ":", "logging", ".", "root", ".", "removeHandler", "(", "logging", ".", "root", ".", "handlers", "[", "-", "1", "]", ")", "logging", ".", "basicConfig", "(", "*", "*", "kw", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
FormatterX.format
tweaked from source of base
snipy/ilogging.py
def format(self, record): """tweaked from source of base""" try: record.message = record.getMessage() except TypeError: # if error during msg = msg % self.args if record.args: if isinstance(record.args, collections.Mapping): record.message = record.msg.format(**record.args) else: record.message = record.msg.format(record.args) self._fmt = self.getfmt(record.levelname) if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) s = self._fmt.format(**record.__dict__) if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: if s[-1:] != '\n': s += '\n' try: s = s + record.exc_text except UnicodeError: s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace') return s
def format(self, record): """tweaked from source of base""" try: record.message = record.getMessage() except TypeError: # if error during msg = msg % self.args if record.args: if isinstance(record.args, collections.Mapping): record.message = record.msg.format(**record.args) else: record.message = record.msg.format(record.args) self._fmt = self.getfmt(record.levelname) if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) s = self._fmt.format(**record.__dict__) if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: if s[-1:] != '\n': s += '\n' try: s = s + record.exc_text except UnicodeError: s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace') return s
[ "tweaked", "from", "source", "of", "base" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/ilogging.py#L32-L61
[ "def", "format", "(", "self", ",", "record", ")", ":", "try", ":", "record", ".", "message", "=", "record", ".", "getMessage", "(", ")", "except", "TypeError", ":", "# if error during msg = msg % self.args", "if", "record", ".", "args", ":", "if", "isinstance", "(", "record", ".", "args", ",", "collections", ".", "Mapping", ")", ":", "record", ".", "message", "=", "record", ".", "msg", ".", "format", "(", "*", "*", "record", ".", "args", ")", "else", ":", "record", ".", "message", "=", "record", ".", "msg", ".", "format", "(", "record", ".", "args", ")", "self", ".", "_fmt", "=", "self", ".", "getfmt", "(", "record", ".", "levelname", ")", "if", "self", ".", "usesTime", "(", ")", ":", "record", ".", "asctime", "=", "self", ".", "formatTime", "(", "record", ",", "self", ".", "datefmt", ")", "s", "=", "self", ".", "_fmt", ".", "format", "(", "*", "*", "record", ".", "__dict__", ")", "if", "record", ".", "exc_info", ":", "# Cache the traceback text to avoid converting it multiple times", "# (it's constant anyway)", "if", "not", "record", ".", "exc_text", ":", "record", ".", "exc_text", "=", "self", ".", "formatException", "(", "record", ".", "exc_info", ")", "if", "record", ".", "exc_text", ":", "if", "s", "[", "-", "1", ":", "]", "!=", "'\\n'", ":", "s", "+=", "'\\n'", "try", ":", "s", "=", "s", "+", "record", ".", "exc_text", "except", "UnicodeError", ":", "s", "=", "s", "+", "record", ".", "exc_text", ".", "decode", "(", "sys", ".", "getfilesystemencoding", "(", ")", ",", "'replace'", ")", "return", "s" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
getProcessOwner
getProcessOwner - Get the process owner of a pid @param pid <int> - process id @return - None if process not found or can't be determined. Otherwise, a dict: { uid - Owner UID name - Owner name, or None if one cannot be determined }
ProcessMappingScanner/__init__.py
def getProcessOwner(pid): ''' getProcessOwner - Get the process owner of a pid @param pid <int> - process id @return - None if process not found or can't be determined. Otherwise, a dict: { uid - Owner UID name - Owner name, or None if one cannot be determined } ''' try: ownerUid = os.stat('/proc/' + str(pid)).st_uid except: return None try: ownerName = pwd.getpwuid(ownerUid).pw_name except: ownerName = None return { 'uid' : ownerUid, 'name' : ownerName }
def getProcessOwner(pid): ''' getProcessOwner - Get the process owner of a pid @param pid <int> - process id @return - None if process not found or can't be determined. Otherwise, a dict: { uid - Owner UID name - Owner name, or None if one cannot be determined } ''' try: ownerUid = os.stat('/proc/' + str(pid)).st_uid except: return None try: ownerName = pwd.getpwuid(ownerUid).pw_name except: ownerName = None return { 'uid' : ownerUid, 'name' : ownerName }
[ "getProcessOwner", "-", "Get", "the", "process", "owner", "of", "a", "pid" ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L25-L50
[ "def", "getProcessOwner", "(", "pid", ")", ":", "try", ":", "ownerUid", "=", "os", ".", "stat", "(", "'/proc/'", "+", "str", "(", "pid", ")", ")", ".", "st_uid", "except", ":", "return", "None", "try", ":", "ownerName", "=", "pwd", ".", "getpwuid", "(", "ownerUid", ")", ".", "pw_name", "except", ":", "ownerName", "=", "None", "return", "{", "'uid'", ":", "ownerUid", ",", "'name'", ":", "ownerName", "}" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
getProcessOwnerStr
getProcessOwner - Get Process owner of a pid as a string instead of components (#getProcessOwner) @return - Returns username if it can be determined, otherwise uid, otherwise "unknown"
ProcessMappingScanner/__init__.py
def getProcessOwnerStr(pid): ''' getProcessOwner - Get Process owner of a pid as a string instead of components (#getProcessOwner) @return - Returns username if it can be determined, otherwise uid, otherwise "unknown" ''' ownerInfo = getProcessOwner(pid) if ownerInfo: if ownerInfo['name']: owner = ownerInfo['name'] else: owner = str(ownerInfo['uid']) else: owner = 'unknown' return owner
def getProcessOwnerStr(pid): ''' getProcessOwner - Get Process owner of a pid as a string instead of components (#getProcessOwner) @return - Returns username if it can be determined, otherwise uid, otherwise "unknown" ''' ownerInfo = getProcessOwner(pid) if ownerInfo: if ownerInfo['name']: owner = ownerInfo['name'] else: owner = str(ownerInfo['uid']) else: owner = 'unknown' return owner
[ "getProcessOwner", "-", "Get", "Process", "owner", "of", "a", "pid", "as", "a", "string", "instead", "of", "components", "(", "#getProcessOwner", ")" ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L52-L67
[ "def", "getProcessOwnerStr", "(", "pid", ")", ":", "ownerInfo", "=", "getProcessOwner", "(", "pid", ")", "if", "ownerInfo", ":", "if", "ownerInfo", "[", "'name'", "]", ":", "owner", "=", "ownerInfo", "[", "'name'", "]", "else", ":", "owner", "=", "str", "(", "ownerInfo", "[", "'uid'", "]", ")", "else", ":", "owner", "=", "'unknown'", "return", "owner" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
getProcessCommandLineStr
getProcessCommandLineStr - Gets a the commandline (program + arguments) of a given pid @param pid <int> - Process ID @return - None if process not found or can't be determined. Otherwise a string of commandline. @note Caution, args may have spaces in them, and you cannot surmise from this method. If you care (like trying to replay a command), use getProcessCommandLineList instead
ProcessMappingScanner/__init__.py
def getProcessCommandLineStr(pid): ''' getProcessCommandLineStr - Gets a the commandline (program + arguments) of a given pid @param pid <int> - Process ID @return - None if process not found or can't be determined. Otherwise a string of commandline. @note Caution, args may have spaces in them, and you cannot surmise from this method. If you care (like trying to replay a command), use getProcessCommandLineList instead ''' try: with open('/proc/%d/cmdline' %(int(pid),), 'r') as f: cmdline = f.read() return cmdline.replace('\x00', ' ') except: return None
def getProcessCommandLineStr(pid): ''' getProcessCommandLineStr - Gets a the commandline (program + arguments) of a given pid @param pid <int> - Process ID @return - None if process not found or can't be determined. Otherwise a string of commandline. @note Caution, args may have spaces in them, and you cannot surmise from this method. If you care (like trying to replay a command), use getProcessCommandLineList instead ''' try: with open('/proc/%d/cmdline' %(int(pid),), 'r') as f: cmdline = f.read() return cmdline.replace('\x00', ' ') except: return None
[ "getProcessCommandLineStr", "-", "Gets", "a", "the", "commandline", "(", "program", "+", "arguments", ")", "of", "a", "given", "pid" ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L69-L84
[ "def", "getProcessCommandLineStr", "(", "pid", ")", ":", "try", ":", "with", "open", "(", "'/proc/%d/cmdline'", "%", "(", "int", "(", "pid", ")", ",", ")", ",", "'r'", ")", "as", "f", ":", "cmdline", "=", "f", ".", "read", "(", ")", "return", "cmdline", ".", "replace", "(", "'\\x00'", ",", "' '", ")", "except", ":", "return", "None" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
getProcessCommandLineList
getProcessCommandLineList - Gets the commandline (program + argumentS) of a given pid as a list. @param pid <int> - Process ID @return - None if process not found or can't be determined. Otherwise a list representing argv. First argument is process name, remainder are arguments. @note - Use this if you care about whether a process had a space in the commands
ProcessMappingScanner/__init__.py
def getProcessCommandLineList(pid): ''' getProcessCommandLineList - Gets the commandline (program + argumentS) of a given pid as a list. @param pid <int> - Process ID @return - None if process not found or can't be determined. Otherwise a list representing argv. First argument is process name, remainder are arguments. @note - Use this if you care about whether a process had a space in the commands ''' try: with open('/proc/%d/cmdline' %(int(pid),), 'r') as f: cmdline = f.read() return cmdline.split('\x00') except: return None
def getProcessCommandLineList(pid): ''' getProcessCommandLineList - Gets the commandline (program + argumentS) of a given pid as a list. @param pid <int> - Process ID @return - None if process not found or can't be determined. Otherwise a list representing argv. First argument is process name, remainder are arguments. @note - Use this if you care about whether a process had a space in the commands ''' try: with open('/proc/%d/cmdline' %(int(pid),), 'r') as f: cmdline = f.read() return cmdline.split('\x00') except: return None
[ "getProcessCommandLineList", "-", "Gets", "the", "commandline", "(", "program", "+", "argumentS", ")", "of", "a", "given", "pid", "as", "a", "list", "." ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L87-L103
[ "def", "getProcessCommandLineList", "(", "pid", ")", ":", "try", ":", "with", "open", "(", "'/proc/%d/cmdline'", "%", "(", "int", "(", "pid", ")", ",", ")", ",", "'r'", ")", "as", "f", ":", "cmdline", "=", "f", ".", "read", "(", ")", "return", "cmdline", ".", "split", "(", "'\\x00'", ")", "except", ":", "return", "None" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
scanProcessForCwd
scanProcessForCwd - Searches a given pid's cwd for a given pattern @param pid <int> - A running process ID on this system @param searchPortion <str> - Any portion of directory to search @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned. { 'searchPortion' : The passed search pattern 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or uid if no mapping can be found, or "unknown" if neither could be determined. 'cmdline' : Commandline string 'cwd' : The exact cwd of matched process }
ProcessMappingScanner/__init__.py
def scanProcessForCwd(pid, searchPortion, isExactMatch=False): ''' scanProcessForCwd - Searches a given pid's cwd for a given pattern @param pid <int> - A running process ID on this system @param searchPortion <str> - Any portion of directory to search @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned. { 'searchPortion' : The passed search pattern 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or uid if no mapping can be found, or "unknown" if neither could be determined. 'cmdline' : Commandline string 'cwd' : The exact cwd of matched process } ''' try: try: pid = int(pid) except ValueError as e: sys.stderr.write('Expected an integer, got %s for pid.\n' %(str(type(pid)),)) raise e cwd = getProcessCwd(pid) if not cwd: return None isMatch = False if isExactMatch is True: if searchPortion == cwd: isMatch = True else: if searchPortion.endswith('/') and searchPortion[:-1] == cwd: isMatch = True else: if searchPortion in cwd: isMatch = True else: if searchPortion.endswith('/') and searchPortion[:-1] in cwd: isMatch = True if not isMatch: return None cmdline = getProcessCommandLineStr(pid) owner = getProcessOwnerStr(pid) return { 'searchPortion' : searchPortion, 'pid' : pid, 'owner' : owner, 'cmdline' : cmdline, 'cwd' : cwd, } except OSError: return None except IOError: return None except FileNotFoundError: return None except PermissionError: return None
def scanProcessForCwd(pid, searchPortion, isExactMatch=False): ''' scanProcessForCwd - Searches a given pid's cwd for a given pattern @param pid <int> - A running process ID on this system @param searchPortion <str> - Any portion of directory to search @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned. { 'searchPortion' : The passed search pattern 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or uid if no mapping can be found, or "unknown" if neither could be determined. 'cmdline' : Commandline string 'cwd' : The exact cwd of matched process } ''' try: try: pid = int(pid) except ValueError as e: sys.stderr.write('Expected an integer, got %s for pid.\n' %(str(type(pid)),)) raise e cwd = getProcessCwd(pid) if not cwd: return None isMatch = False if isExactMatch is True: if searchPortion == cwd: isMatch = True else: if searchPortion.endswith('/') and searchPortion[:-1] == cwd: isMatch = True else: if searchPortion in cwd: isMatch = True else: if searchPortion.endswith('/') and searchPortion[:-1] in cwd: isMatch = True if not isMatch: return None cmdline = getProcessCommandLineStr(pid) owner = getProcessOwnerStr(pid) return { 'searchPortion' : searchPortion, 'pid' : pid, 'owner' : owner, 'cmdline' : cmdline, 'cwd' : cwd, } except OSError: return None except IOError: return None except FileNotFoundError: return None except PermissionError: return None
[ "scanProcessForCwd", "-", "Searches", "a", "given", "pid", "s", "cwd", "for", "a", "given", "pattern" ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L131-L194
[ "def", "scanProcessForCwd", "(", "pid", ",", "searchPortion", ",", "isExactMatch", "=", "False", ")", ":", "try", ":", "try", ":", "pid", "=", "int", "(", "pid", ")", "except", "ValueError", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "'Expected an integer, got %s for pid.\\n'", "%", "(", "str", "(", "type", "(", "pid", ")", ")", ",", ")", ")", "raise", "e", "cwd", "=", "getProcessCwd", "(", "pid", ")", "if", "not", "cwd", ":", "return", "None", "isMatch", "=", "False", "if", "isExactMatch", "is", "True", ":", "if", "searchPortion", "==", "cwd", ":", "isMatch", "=", "True", "else", ":", "if", "searchPortion", ".", "endswith", "(", "'/'", ")", "and", "searchPortion", "[", ":", "-", "1", "]", "==", "cwd", ":", "isMatch", "=", "True", "else", ":", "if", "searchPortion", "in", "cwd", ":", "isMatch", "=", "True", "else", ":", "if", "searchPortion", ".", "endswith", "(", "'/'", ")", "and", "searchPortion", "[", ":", "-", "1", "]", "in", "cwd", ":", "isMatch", "=", "True", "if", "not", "isMatch", ":", "return", "None", "cmdline", "=", "getProcessCommandLineStr", "(", "pid", ")", "owner", "=", "getProcessOwnerStr", "(", "pid", ")", "return", "{", "'searchPortion'", ":", "searchPortion", ",", "'pid'", ":", "pid", ",", "'owner'", ":", "owner", ",", "'cmdline'", ":", "cmdline", ",", "'cwd'", ":", "cwd", ",", "}", "except", "OSError", ":", "return", "None", "except", "IOError", ":", "return", "None", "except", "FileNotFoundError", ":", "return", "None", "except", "PermissionError", ":", "return", "None" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
scanAllProcessesForCwd
scanAllProcessesForCwd - Scans all processes on the system for a given search pattern. @param searchPortion <str> - Any portion of directory to search @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @return - <dict> - A dictionary of pid -> cwdResults for each pid that matched the search pattern. For format of "cwdResults", @see scanProcessForCwd
ProcessMappingScanner/__init__.py
def scanAllProcessesForCwd(searchPortion, isExactMatch=False): ''' scanAllProcessesForCwd - Scans all processes on the system for a given search pattern. @param searchPortion <str> - Any portion of directory to search @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @return - <dict> - A dictionary of pid -> cwdResults for each pid that matched the search pattern. For format of "cwdResults", @see scanProcessForCwd ''' pids = getAllRunningPids() cwdResults = [scanProcessForCwd(pid, searchPortion, isExactMatch) for pid in pids] ret = {} for i in range(len(pids)): if cwdResults[i] is not None: ret[pids[i]] = cwdResults[i] return ret
def scanAllProcessesForCwd(searchPortion, isExactMatch=False): ''' scanAllProcessesForCwd - Scans all processes on the system for a given search pattern. @param searchPortion <str> - Any portion of directory to search @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @return - <dict> - A dictionary of pid -> cwdResults for each pid that matched the search pattern. For format of "cwdResults", @see scanProcessForCwd ''' pids = getAllRunningPids() cwdResults = [scanProcessForCwd(pid, searchPortion, isExactMatch) for pid in pids] ret = {} for i in range(len(pids)): if cwdResults[i] is not None: ret[pids[i]] = cwdResults[i] return ret
[ "scanAllProcessesForCwd", "-", "Scans", "all", "processes", "on", "the", "system", "for", "a", "given", "search", "pattern", "." ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L196-L214
[ "def", "scanAllProcessesForCwd", "(", "searchPortion", ",", "isExactMatch", "=", "False", ")", ":", "pids", "=", "getAllRunningPids", "(", ")", "cwdResults", "=", "[", "scanProcessForCwd", "(", "pid", ",", "searchPortion", ",", "isExactMatch", ")", "for", "pid", "in", "pids", "]", "ret", "=", "{", "}", "for", "i", "in", "range", "(", "len", "(", "pids", ")", ")", ":", "if", "cwdResults", "[", "i", "]", "is", "not", "None", ":", "ret", "[", "pids", "[", "i", "]", "]", "=", "cwdResults", "[", "i", "]", "return", "ret" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
scanProcessForMapping
scanProcessForMapping - Searches a given pid's mappings for a certain pattern. @param pid <int> - A running process ID on this system @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings. @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned. { 'searchPortion' : The passed search pattern 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or uid if no mapping can be found, or "unknown" if neither could be determined. 'cmdline' : Commandline string 'matchedMappings' : All mappings likes that matched the given search pattern }
ProcessMappingScanner/__init__.py
def scanProcessForMapping(pid, searchPortion, isExactMatch=False, ignoreCase=False): ''' scanProcessForMapping - Searches a given pid's mappings for a certain pattern. @param pid <int> - A running process ID on this system @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings. @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned. { 'searchPortion' : The passed search pattern 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or uid if no mapping can be found, or "unknown" if neither could be determined. 'cmdline' : Commandline string 'matchedMappings' : All mappings likes that matched the given search pattern } ''' try: try: pid = int(pid) except ValueError as e: sys.stderr.write('Expected an integer, got %s for pid.\n' %(str(type(pid)),)) raise e with open('/proc/%d/maps' %(pid,), 'r') as f: contents = f.read() lines = contents.split('\n') matchedMappings = [] if isExactMatch is True: if ignoreCase is False: isMatch = lambda searchFor, searchIn : bool(searchFor == searchIn) else: isMatch = lambda searchFor, searchIn : bool(searchFor.lower() == searchIn.lower()) else: if ignoreCase is False: isMatch = lambda searchFor, searchIn : bool(searchFor in searchIn) else: isMatch = lambda searchFor, searchIn : bool(searchFor.lower() in searchIn.lower()) for line in lines: portion = ' '.join(line.split(' ')[5:]).lstrip() if isMatch(searchPortion, portion): matchedMappings.append('\t' + line) if len(matchedMappings) == 0: return None cmdline = getProcessCommandLineStr(pid) owner = getProcessOwnerStr(pid) return { 'searchPortion' : searchPortion, 'pid' : pid, 'owner' : owner, 'cmdline' : cmdline, 'matchedMappings' : matchedMappings, } except OSError: return None except IOError: return None except FileNotFoundError: return None except PermissionError: return None
def scanProcessForMapping(pid, searchPortion, isExactMatch=False, ignoreCase=False): ''' scanProcessForMapping - Searches a given pid's mappings for a certain pattern. @param pid <int> - A running process ID on this system @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings. @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned. { 'searchPortion' : The passed search pattern 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or uid if no mapping can be found, or "unknown" if neither could be determined. 'cmdline' : Commandline string 'matchedMappings' : All mappings likes that matched the given search pattern } ''' try: try: pid = int(pid) except ValueError as e: sys.stderr.write('Expected an integer, got %s for pid.\n' %(str(type(pid)),)) raise e with open('/proc/%d/maps' %(pid,), 'r') as f: contents = f.read() lines = contents.split('\n') matchedMappings = [] if isExactMatch is True: if ignoreCase is False: isMatch = lambda searchFor, searchIn : bool(searchFor == searchIn) else: isMatch = lambda searchFor, searchIn : bool(searchFor.lower() == searchIn.lower()) else: if ignoreCase is False: isMatch = lambda searchFor, searchIn : bool(searchFor in searchIn) else: isMatch = lambda searchFor, searchIn : bool(searchFor.lower() in searchIn.lower()) for line in lines: portion = ' '.join(line.split(' ')[5:]).lstrip() if isMatch(searchPortion, portion): matchedMappings.append('\t' + line) if len(matchedMappings) == 0: return None cmdline = getProcessCommandLineStr(pid) owner = getProcessOwnerStr(pid) return { 'searchPortion' : searchPortion, 'pid' : pid, 'owner' : owner, 'cmdline' : cmdline, 'matchedMappings' : matchedMappings, } except OSError: return None except IOError: return None except FileNotFoundError: return None except PermissionError: return None
[ "scanProcessForMapping", "-", "Searches", "a", "given", "pid", "s", "mappings", "for", "a", "certain", "pattern", "." ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L216-L287
[ "def", "scanProcessForMapping", "(", "pid", ",", "searchPortion", ",", "isExactMatch", "=", "False", ",", "ignoreCase", "=", "False", ")", ":", "try", ":", "try", ":", "pid", "=", "int", "(", "pid", ")", "except", "ValueError", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "'Expected an integer, got %s for pid.\\n'", "%", "(", "str", "(", "type", "(", "pid", ")", ")", ",", ")", ")", "raise", "e", "with", "open", "(", "'/proc/%d/maps'", "%", "(", "pid", ",", ")", ",", "'r'", ")", "as", "f", ":", "contents", "=", "f", ".", "read", "(", ")", "lines", "=", "contents", ".", "split", "(", "'\\n'", ")", "matchedMappings", "=", "[", "]", "if", "isExactMatch", "is", "True", ":", "if", "ignoreCase", "is", "False", ":", "isMatch", "=", "lambda", "searchFor", ",", "searchIn", ":", "bool", "(", "searchFor", "==", "searchIn", ")", "else", ":", "isMatch", "=", "lambda", "searchFor", ",", "searchIn", ":", "bool", "(", "searchFor", ".", "lower", "(", ")", "==", "searchIn", ".", "lower", "(", ")", ")", "else", ":", "if", "ignoreCase", "is", "False", ":", "isMatch", "=", "lambda", "searchFor", ",", "searchIn", ":", "bool", "(", "searchFor", "in", "searchIn", ")", "else", ":", "isMatch", "=", "lambda", "searchFor", ",", "searchIn", ":", "bool", "(", "searchFor", ".", "lower", "(", ")", "in", "searchIn", ".", "lower", "(", ")", ")", "for", "line", "in", "lines", ":", "portion", "=", "' '", ".", "join", "(", "line", ".", "split", "(", "' '", ")", "[", "5", ":", "]", ")", ".", "lstrip", "(", ")", "if", "isMatch", "(", "searchPortion", ",", "portion", ")", ":", "matchedMappings", ".", "append", "(", "'\\t'", "+", "line", ")", "if", "len", "(", "matchedMappings", ")", "==", "0", ":", "return", "None", "cmdline", "=", "getProcessCommandLineStr", "(", "pid", ")", "owner", "=", "getProcessOwnerStr", "(", "pid", ")", "return", "{", "'searchPortion'", ":", "searchPortion", ",", "'pid'", ":", "pid", ",", "'owner'", ":", "owner", ",", "'cmdline'", ":", "cmdline", ",", "'matchedMappings'", ":", "matchedMappings", ",", "}", "except", "OSError", ":", "return", "None", "except", "IOError", ":", "return", "None", "except", "FileNotFoundError", ":", "return", "None", "except", "PermissionError", ":", "return", "None" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
scanAllProcessesForMapping
scanAllProcessesForMapping - Scans all processes on the system for a given search pattern. @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings. @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping
ProcessMappingScanner/__init__.py
def scanAllProcessesForMapping(searchPortion, isExactMatch=False, ignoreCase=False): ''' scanAllProcessesForMapping - Scans all processes on the system for a given search pattern. @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings. @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping ''' pids = getAllRunningPids() # Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later. mappingResults = [scanProcessForMapping(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids] ret = {} for i in range(len(pids)): if mappingResults[i] is not None: ret[pids[i]] = mappingResults[i] return ret
def scanAllProcessesForMapping(searchPortion, isExactMatch=False, ignoreCase=False): ''' scanAllProcessesForMapping - Scans all processes on the system for a given search pattern. @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings. @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping ''' pids = getAllRunningPids() # Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later. mappingResults = [scanProcessForMapping(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids] ret = {} for i in range(len(pids)): if mappingResults[i] is not None: ret[pids[i]] = mappingResults[i] return ret
[ "scanAllProcessesForMapping", "-", "Scans", "all", "processes", "on", "the", "system", "for", "a", "given", "search", "pattern", "." ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L290-L309
[ "def", "scanAllProcessesForMapping", "(", "searchPortion", ",", "isExactMatch", "=", "False", ",", "ignoreCase", "=", "False", ")", ":", "pids", "=", "getAllRunningPids", "(", ")", "# Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later.", "mappingResults", "=", "[", "scanProcessForMapping", "(", "pid", ",", "searchPortion", ",", "isExactMatch", ",", "ignoreCase", ")", "for", "pid", "in", "pids", "]", "ret", "=", "{", "}", "for", "i", "in", "range", "(", "len", "(", "pids", ")", ")", ":", "if", "mappingResults", "[", "i", "]", "is", "not", "None", ":", "ret", "[", "pids", "[", "i", "]", "]", "=", "mappingResults", "[", "i", "]", "return", "ret" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
scanProcessForOpenFile
scanProcessForOpenFile - Scans open FDs for a given pid to see if any are the provided searchPortion @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - If result is found, the following dict is returned. If no match found on the given pid, or the pid is not found running, None is returned. { 'searchPortion' : The search portion provided 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or "unknown" if one could not be determined 'cmdline' : Commandline string 'fds' : List of file descriptors assigned to this file (could be mapped several times) 'filenames' : List of the filenames matched }
ProcessMappingScanner/__init__.py
def scanProcessForOpenFile(pid, searchPortion, isExactMatch=True, ignoreCase=False): ''' scanProcessForOpenFile - Scans open FDs for a given pid to see if any are the provided searchPortion @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - If result is found, the following dict is returned. If no match found on the given pid, or the pid is not found running, None is returned. { 'searchPortion' : The search portion provided 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or "unknown" if one could not be determined 'cmdline' : Commandline string 'fds' : List of file descriptors assigned to this file (could be mapped several times) 'filenames' : List of the filenames matched } ''' try: try: pid = int(pid) except ValueError as e: sys.stderr.write('Expected an integer, got %s for pid.\n' %(str(type(pid)),)) raise e prefixDir = "/proc/%d/fd" % (pid,) processFDs = os.listdir(prefixDir) matchedFDs = [] matchedFilenames = [] if isExactMatch is True: if ignoreCase is False: isMatch = lambda searchFor, totalPath : bool(searchFor == totalPath) else: isMatch = lambda searchFor, totalPath : bool(searchFor.lower() == totalPath.lower()) else: if ignoreCase is False: isMatch = lambda searchFor, totalPath : bool(searchFor in totalPath) else: isMatch = lambda searchFor, totalPath : bool(searchFor.lower() in totalPath.lower()) for fd in processFDs: fdPath = os.readlink(prefixDir + '/' + fd) if isMatch(searchPortion, fdPath): matchedFDs.append(fd) matchedFilenames.append(fdPath) if len(matchedFDs) == 0: return None cmdline = getProcessCommandLineStr(pid) owner = getProcessOwnerStr(pid) return { 'searchPortion' : searchPortion, 'pid' : pid, 'owner' : owner, 'cmdline' : cmdline, 'fds' : matchedFDs, 'filenames' : matchedFilenames, } except OSError: return None except IOError: return None except FileNotFoundError: return None except PermissionError: return None
def scanProcessForOpenFile(pid, searchPortion, isExactMatch=True, ignoreCase=False): ''' scanProcessForOpenFile - Scans open FDs for a given pid to see if any are the provided searchPortion @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - If result is found, the following dict is returned. If no match found on the given pid, or the pid is not found running, None is returned. { 'searchPortion' : The search portion provided 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or "unknown" if one could not be determined 'cmdline' : Commandline string 'fds' : List of file descriptors assigned to this file (could be mapped several times) 'filenames' : List of the filenames matched } ''' try: try: pid = int(pid) except ValueError as e: sys.stderr.write('Expected an integer, got %s for pid.\n' %(str(type(pid)),)) raise e prefixDir = "/proc/%d/fd" % (pid,) processFDs = os.listdir(prefixDir) matchedFDs = [] matchedFilenames = [] if isExactMatch is True: if ignoreCase is False: isMatch = lambda searchFor, totalPath : bool(searchFor == totalPath) else: isMatch = lambda searchFor, totalPath : bool(searchFor.lower() == totalPath.lower()) else: if ignoreCase is False: isMatch = lambda searchFor, totalPath : bool(searchFor in totalPath) else: isMatch = lambda searchFor, totalPath : bool(searchFor.lower() in totalPath.lower()) for fd in processFDs: fdPath = os.readlink(prefixDir + '/' + fd) if isMatch(searchPortion, fdPath): matchedFDs.append(fd) matchedFilenames.append(fdPath) if len(matchedFDs) == 0: return None cmdline = getProcessCommandLineStr(pid) owner = getProcessOwnerStr(pid) return { 'searchPortion' : searchPortion, 'pid' : pid, 'owner' : owner, 'cmdline' : cmdline, 'fds' : matchedFDs, 'filenames' : matchedFilenames, } except OSError: return None except IOError: return None except FileNotFoundError: return None except PermissionError: return None
[ "scanProcessForOpenFile", "-", "Scans", "open", "FDs", "for", "a", "given", "pid", "to", "see", "if", "any", "are", "the", "provided", "searchPortion" ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L315-L392
[ "def", "scanProcessForOpenFile", "(", "pid", ",", "searchPortion", ",", "isExactMatch", "=", "True", ",", "ignoreCase", "=", "False", ")", ":", "try", ":", "try", ":", "pid", "=", "int", "(", "pid", ")", "except", "ValueError", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "'Expected an integer, got %s for pid.\\n'", "%", "(", "str", "(", "type", "(", "pid", ")", ")", ",", ")", ")", "raise", "e", "prefixDir", "=", "\"/proc/%d/fd\"", "%", "(", "pid", ",", ")", "processFDs", "=", "os", ".", "listdir", "(", "prefixDir", ")", "matchedFDs", "=", "[", "]", "matchedFilenames", "=", "[", "]", "if", "isExactMatch", "is", "True", ":", "if", "ignoreCase", "is", "False", ":", "isMatch", "=", "lambda", "searchFor", ",", "totalPath", ":", "bool", "(", "searchFor", "==", "totalPath", ")", "else", ":", "isMatch", "=", "lambda", "searchFor", ",", "totalPath", ":", "bool", "(", "searchFor", ".", "lower", "(", ")", "==", "totalPath", ".", "lower", "(", ")", ")", "else", ":", "if", "ignoreCase", "is", "False", ":", "isMatch", "=", "lambda", "searchFor", ",", "totalPath", ":", "bool", "(", "searchFor", "in", "totalPath", ")", "else", ":", "isMatch", "=", "lambda", "searchFor", ",", "totalPath", ":", "bool", "(", "searchFor", ".", "lower", "(", ")", "in", "totalPath", ".", "lower", "(", ")", ")", "for", "fd", "in", "processFDs", ":", "fdPath", "=", "os", ".", "readlink", "(", "prefixDir", "+", "'/'", "+", "fd", ")", "if", "isMatch", "(", "searchPortion", ",", "fdPath", ")", ":", "matchedFDs", ".", "append", "(", "fd", ")", "matchedFilenames", ".", "append", "(", "fdPath", ")", "if", "len", "(", "matchedFDs", ")", "==", "0", ":", "return", "None", "cmdline", "=", "getProcessCommandLineStr", "(", "pid", ")", "owner", "=", "getProcessOwnerStr", "(", "pid", ")", "return", "{", "'searchPortion'", ":", "searchPortion", ",", "'pid'", ":", "pid", ",", "'owner'", ":", "owner", ",", "'cmdline'", ":", "cmdline", ",", "'fds'", ":", "matchedFDs", ",", "'filenames'", ":", "matchedFilenames", ",", "}", "except", "OSError", ":", "return", "None", "except", "IOError", ":", "return", "None", "except", "FileNotFoundError", ":", "return", "None", "except", "PermissionError", ":", "return", "None" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
scanAllProcessesForOpenFile
scanAllProcessessForOpenFile - Scans all processes on the system for a given filename @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForOpenFile
ProcessMappingScanner/__init__.py
def scanAllProcessesForOpenFile(searchPortion, isExactMatch=True, ignoreCase=False): ''' scanAllProcessessForOpenFile - Scans all processes on the system for a given filename @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForOpenFile ''' pids = getAllRunningPids() # Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later. mappingResults = [scanProcessForOpenFile(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids] ret = {} for i in range(len(pids)): if mappingResults[i] is not None: ret[pids[i]] = mappingResults[i] return ret
def scanAllProcessesForOpenFile(searchPortion, isExactMatch=True, ignoreCase=False): ''' scanAllProcessessForOpenFile - Scans all processes on the system for a given filename @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForOpenFile ''' pids = getAllRunningPids() # Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later. mappingResults = [scanProcessForOpenFile(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids] ret = {} for i in range(len(pids)): if mappingResults[i] is not None: ret[pids[i]] = mappingResults[i] return ret
[ "scanAllProcessessForOpenFile", "-", "Scans", "all", "processes", "on", "the", "system", "for", "a", "given", "filename" ]
kata198/ProcessMappingScanner
python
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L395-L414
[ "def", "scanAllProcessesForOpenFile", "(", "searchPortion", ",", "isExactMatch", "=", "True", ",", "ignoreCase", "=", "False", ")", ":", "pids", "=", "getAllRunningPids", "(", ")", "# Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later.", "mappingResults", "=", "[", "scanProcessForOpenFile", "(", "pid", ",", "searchPortion", ",", "isExactMatch", ",", "ignoreCase", ")", "for", "pid", "in", "pids", "]", "ret", "=", "{", "}", "for", "i", "in", "range", "(", "len", "(", "pids", ")", ")", ":", "if", "mappingResults", "[", "i", "]", "is", "not", "None", ":", "ret", "[", "pids", "[", "i", "]", "]", "=", "mappingResults", "[", "i", "]", "return", "ret" ]
d1735fe6746493c51aaae213b982fa96f5c5b621
valid
enum
class buider
snipy/enum.py
def enum(name, *members, **withvalue): """class buider""" if len(members) == 1: if isinstance(members[0], str): members = members[0].split() elif isinstance(members[0], (list, tuple)): members = members[0] dic = {v: v for v in members} dic.update(withvalue) return type(name, (Enum,), dic)
def enum(name, *members, **withvalue): """class buider""" if len(members) == 1: if isinstance(members[0], str): members = members[0].split() elif isinstance(members[0], (list, tuple)): members = members[0] dic = {v: v for v in members} dic.update(withvalue) return type(name, (Enum,), dic)
[ "class", "buider" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/enum.py#L26-L37
[ "def", "enum", "(", "name", ",", "*", "members", ",", "*", "*", "withvalue", ")", ":", "if", "len", "(", "members", ")", "==", "1", ":", "if", "isinstance", "(", "members", "[", "0", "]", ",", "str", ")", ":", "members", "=", "members", "[", "0", "]", ".", "split", "(", ")", "elif", "isinstance", "(", "members", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "members", "=", "members", "[", "0", "]", "dic", "=", "{", "v", ":", "v", "for", "v", "in", "members", "}", "dic", ".", "update", "(", "withvalue", ")", "return", "type", "(", "name", ",", "(", "Enum", ",", ")", ",", "dic", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
database
usage: with database('my_db') as conn: c = conn.cursor() .... database 커넥션 with 문과 같이 사용하고, 알아서 close하기 :param db: str: db스키마 :param kwargs: :return:
snipy/database.py
def database(db='', **kwargs): """ usage: with database('my_db') as conn: c = conn.cursor() .... database 커넥션 with 문과 같이 사용하고, 알아서 close하기 :param db: str: db스키마 :param kwargs: :return: """ db = kwargs.pop('db', db) arg = db_config(db) arg.update(kwargs) return closing(MySQLdb.connect(**arg))
def database(db='', **kwargs): """ usage: with database('my_db') as conn: c = conn.cursor() .... database 커넥션 with 문과 같이 사용하고, 알아서 close하기 :param db: str: db스키마 :param kwargs: :return: """ db = kwargs.pop('db', db) arg = db_config(db) arg.update(kwargs) return closing(MySQLdb.connect(**arg))
[ "usage", ":", "with", "database", "(", "my_db", ")", "as", "conn", ":", "c", "=", "conn", ".", "cursor", "()", "....", "database", "커넥션", "with", "문과", "같이", "사용하고", "알아서", "close하기", ":", "param", "db", ":", "str", ":", "db스키마", ":", "param", "kwargs", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/database.py#L23-L38
[ "def", "database", "(", "db", "=", "''", ",", "*", "*", "kwargs", ")", ":", "db", "=", "kwargs", ".", "pop", "(", "'db'", ",", "db", ")", "arg", "=", "db_config", "(", "db", ")", "arg", ".", "update", "(", "kwargs", ")", "return", "closing", "(", "MySQLdb", ".", "connect", "(", "*", "*", "arg", ")", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
connect
db 접속 공통 인자들 채워서 접속, schema만 넣으면 됩니다. db connection 객체 반환이지만 with 문과 같이 쓰이면 cursor임에 주의 (MySQLdb의 구현이 그렇습니다.) ex1) import snipy.database as db conn = db.connect('my_db') cursor = conn.cursor() ex2) import snipy.database as db with db.connect('my_db') as cursor: cursor.execute(query) :param db: str: db schema :param kwargs: 추가 접속 정보 :return: connection or cursor
snipy/database.py
def connect(db='', **kwargs): """ db 접속 공통 인자들 채워서 접속, schema만 넣으면 됩니다. db connection 객체 반환이지만 with 문과 같이 쓰이면 cursor임에 주의 (MySQLdb의 구현이 그렇습니다.) ex1) import snipy.database as db conn = db.connect('my_db') cursor = conn.cursor() ex2) import snipy.database as db with db.connect('my_db') as cursor: cursor.execute(query) :param db: str: db schema :param kwargs: 추가 접속 정보 :return: connection or cursor """ arg = db_config(db) arg.update(kwargs) return MySQLdb.connect(**arg)
def connect(db='', **kwargs): """ db 접속 공통 인자들 채워서 접속, schema만 넣으면 됩니다. db connection 객체 반환이지만 with 문과 같이 쓰이면 cursor임에 주의 (MySQLdb의 구현이 그렇습니다.) ex1) import snipy.database as db conn = db.connect('my_db') cursor = conn.cursor() ex2) import snipy.database as db with db.connect('my_db') as cursor: cursor.execute(query) :param db: str: db schema :param kwargs: 추가 접속 정보 :return: connection or cursor """ arg = db_config(db) arg.update(kwargs) return MySQLdb.connect(**arg)
[ "db", "접속", "공통", "인자들", "채워서", "접속", "schema만", "넣으면", "됩니다", ".", "db", "connection", "객체", "반환이지만", "with", "문과", "같이", "쓰이면", "cursor임에", "주의", "(", "MySQLdb의", "구현이", "그렇습니다", ".", ")", "ex1", ")", "import", "snipy", ".", "database", "as", "db", "conn", "=", "db", ".", "connect", "(", "my_db", ")", "cursor", "=", "conn", ".", "cursor", "()" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/database.py#L41-L62
[ "def", "connect", "(", "db", "=", "''", ",", "*", "*", "kwargs", ")", ":", "arg", "=", "db_config", "(", "db", ")", "arg", ".", "update", "(", "kwargs", ")", "return", "MySQLdb", ".", "connect", "(", "*", "*", "arg", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
_cursor_exit
cursor with문과 쓸수 있게 __exit__에 바인딩 :param cursor: :param exc_type: :param exc_value: :param traceback: :return:
snipy/database.py
def _cursor_exit(cursor, exc_type, exc_value, traceback): """ cursor with문과 쓸수 있게 __exit__에 바인딩 :param cursor: :param exc_type: :param exc_value: :param traceback: :return: """ if exc_type is not None: print(exc_value, traceback) cursor.connection.close()
def _cursor_exit(cursor, exc_type, exc_value, traceback): """ cursor with문과 쓸수 있게 __exit__에 바인딩 :param cursor: :param exc_type: :param exc_value: :param traceback: :return: """ if exc_type is not None: print(exc_value, traceback) cursor.connection.close()
[ "cursor", "with문과", "쓸수", "있게", "__exit__에", "바인딩", ":", "param", "cursor", ":", ":", "param", "exc_type", ":", ":", "param", "exc_value", ":", ":", "param", "traceback", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/database.py#L65-L76
[ "def", "_cursor_exit", "(", "cursor", ",", "exc_type", ",", "exc_value", ",", "traceback", ")", ":", "if", "exc_type", "is", "not", "None", ":", "print", "(", "exc_value", ",", "traceback", ")", "cursor", ".", "connection", ".", "close", "(", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
fetch
for record in fetch(query, args, **configs): print record :param args: :param db: str: db 스키마 :param query: 쿼리 스트링 :param kwargs: db connection 추가 인자. 보통 생략 :return: iterator
snipy/database.py
def fetch(query, args=None, **kwargs): """ for record in fetch(query, args, **configs): print record :param args: :param db: str: db 스키마 :param query: 쿼리 스트링 :param kwargs: db connection 추가 인자. 보통 생략 :return: iterator """ cur = execute(kwargs.pop('db', ''), query, args, **kwargs) for r in cur: yield r cur.connection.close()
def fetch(query, args=None, **kwargs): """ for record in fetch(query, args, **configs): print record :param args: :param db: str: db 스키마 :param query: 쿼리 스트링 :param kwargs: db connection 추가 인자. 보통 생략 :return: iterator """ cur = execute(kwargs.pop('db', ''), query, args, **kwargs) for r in cur: yield r cur.connection.close()
[ "for", "record", "in", "fetch", "(", "query", "args", "**", "configs", ")", ":", "print", "record", ":", "param", "args", ":", ":", "param", "db", ":", "str", ":", "db", "스키마", ":", "param", "query", ":", "쿼리", "스트링", ":", "param", "kwargs", ":", "db", "connection", "추가", "인자", ".", "보통", "생략", ":", "return", ":", "iterator" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/database.py#L105-L120
[ "def", "fetch", "(", "query", ",", "args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cur", "=", "execute", "(", "kwargs", ".", "pop", "(", "'db'", ",", "''", ")", ",", "query", ",", "args", ",", "*", "*", "kwargs", ")", "for", "r", "in", "cur", ":", "yield", "r", "cur", ".", "connection", ".", "close", "(", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
get_insert_query
format insert query :param table: str :param fields: list[str] :param field_count: int :return: str
snipy/database.py
def get_insert_query(table, fields=None, field_count=None): """ format insert query :param table: str :param fields: list[str] :param field_count: int :return: str """ if fields: q = 'insert into %s ({0}) values ({1});' % table l = len(fields) q = q.format(','.join(fields), ','.join(['%s'] * l)) elif field_count: q = 'insert into %s values ({0});' % table q = q.format(','.join(['%s'] * field_count)) else: raise ValueError('fields or field_count need') return q
def get_insert_query(table, fields=None, field_count=None): """ format insert query :param table: str :param fields: list[str] :param field_count: int :return: str """ if fields: q = 'insert into %s ({0}) values ({1});' % table l = len(fields) q = q.format(','.join(fields), ','.join(['%s'] * l)) elif field_count: q = 'insert into %s values ({0});' % table q = q.format(','.join(['%s'] * field_count)) else: raise ValueError('fields or field_count need') return q
[ "format", "insert", "query", ":", "param", "table", ":", "str", ":", "param", "fields", ":", "list", "[", "str", "]", ":", "param", "field_count", ":", "int", ":", "return", ":", "str" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/database.py#L145-L163
[ "def", "get_insert_query", "(", "table", ",", "fields", "=", "None", ",", "field_count", "=", "None", ")", ":", "if", "fields", ":", "q", "=", "'insert into %s ({0}) values ({1});'", "%", "table", "l", "=", "len", "(", "fields", ")", "q", "=", "q", ".", "format", "(", "','", ".", "join", "(", "fields", ")", ",", "','", ".", "join", "(", "[", "'%s'", "]", "*", "l", ")", ")", "elif", "field_count", ":", "q", "=", "'insert into %s values ({0});'", "%", "table", "q", "=", "q", ".", "format", "(", "','", ".", "join", "(", "[", "'%s'", "]", "*", "field_count", ")", ")", "else", ":", "raise", "ValueError", "(", "'fields or field_count need'", ")", "return", "q" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
insert
db에 레코드 집어넣기 ex) cursor.insert(table, v1, v2,...) ex) cursor.insert(table, id=v1, word=v2, commit=True) :param commit: :param cursor: :param table: :param args: :param field_values: :return:
snipy/database.py
def insert(cursor, table, *args, **field_values): """ db에 레코드 집어넣기 ex) cursor.insert(table, v1, v2,...) ex) cursor.insert(table, id=v1, word=v2, commit=True) :param commit: :param cursor: :param table: :param args: :param field_values: :return: """ commit = field_values.pop('commit', True) q, a = None, None if args is not None and len(args) > 0: q = get_insert_query(table, field_count=len(args)) a = args elif len(field_values) > 0: q = get_insert_query(table, fields=field_values.keys()) a = field_values.values() else: raise ValueError('need table, record...') cursor.execute(q, args=a) if commit: cursor.connection.commit()
def insert(cursor, table, *args, **field_values): """ db에 레코드 집어넣기 ex) cursor.insert(table, v1, v2,...) ex) cursor.insert(table, id=v1, word=v2, commit=True) :param commit: :param cursor: :param table: :param args: :param field_values: :return: """ commit = field_values.pop('commit', True) q, a = None, None if args is not None and len(args) > 0: q = get_insert_query(table, field_count=len(args)) a = args elif len(field_values) > 0: q = get_insert_query(table, fields=field_values.keys()) a = field_values.values() else: raise ValueError('need table, record...') cursor.execute(q, args=a) if commit: cursor.connection.commit()
[ "db에", "레코드", "집어넣기", "ex", ")", "cursor", ".", "insert", "(", "table", "v1", "v2", "...", ")", "ex", ")", "cursor", ".", "insert", "(", "table", "id", "=", "v1", "word", "=", "v2", "commit", "=", "True", ")", ":", "param", "commit", ":", ":", "param", "cursor", ":", ":", "param", "table", ":", ":", "param", "args", ":", ":", "param", "field_values", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/database.py#L167-L195
[ "def", "insert", "(", "cursor", ",", "table", ",", "*", "args", ",", "*", "*", "field_values", ")", ":", "commit", "=", "field_values", ".", "pop", "(", "'commit'", ",", "True", ")", "q", ",", "a", "=", "None", ",", "None", "if", "args", "is", "not", "None", "and", "len", "(", "args", ")", ">", "0", ":", "q", "=", "get_insert_query", "(", "table", ",", "field_count", "=", "len", "(", "args", ")", ")", "a", "=", "args", "elif", "len", "(", "field_values", ")", ">", "0", ":", "q", "=", "get_insert_query", "(", "table", ",", "fields", "=", "field_values", ".", "keys", "(", ")", ")", "a", "=", "field_values", ".", "values", "(", ")", "else", ":", "raise", "ValueError", "(", "'need table, record...'", ")", "cursor", ".", "execute", "(", "q", ",", "args", "=", "a", ")", "if", "commit", ":", "cursor", ".", "connection", ".", "commit", "(", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
update
db update 쿼리 빌딩 및 실행, 단, commit은 :param cursor: 커서 :type cursor: Cursor :param table: 테이블 이름 :type table: str :param where_kv: 업데이트 where 조건 dictionary, key:field, value:equal condition only :type where_kv: dict :param field_values: kwarg 업데이트용 :type field_values: dict :param commit: 커밋 여부 :type commit: bool :return:
snipy/database.py
def update(cursor, table, where_kv, commit=True, **field_values): """ db update 쿼리 빌딩 및 실행, 단, commit은 :param cursor: 커서 :type cursor: Cursor :param table: 테이블 이름 :type table: str :param where_kv: 업데이트 where 조건 dictionary, key:field, value:equal condition only :type where_kv: dict :param field_values: kwarg 업데이트용 :type field_values: dict :param commit: 커밋 여부 :type commit: bool :return: """ q = """update %s \nset {0} \nwhere {1} """ % table fields = field_values.keys() kv = ','.join(['{}=%s'.format(f) for f in fields]) where = ' and '.join(['{}=%s'.format(f) for f in where_kv.keys()]) q = q.format(kv, where) args = field_values.values() + where_kv.values() cursor.execute(q, args=args) if commit: cursor.connection.commit()
def update(cursor, table, where_kv, commit=True, **field_values): """ db update 쿼리 빌딩 및 실행, 단, commit은 :param cursor: 커서 :type cursor: Cursor :param table: 테이블 이름 :type table: str :param where_kv: 업데이트 where 조건 dictionary, key:field, value:equal condition only :type where_kv: dict :param field_values: kwarg 업데이트용 :type field_values: dict :param commit: 커밋 여부 :type commit: bool :return: """ q = """update %s \nset {0} \nwhere {1} """ % table fields = field_values.keys() kv = ','.join(['{}=%s'.format(f) for f in fields]) where = ' and '.join(['{}=%s'.format(f) for f in where_kv.keys()]) q = q.format(kv, where) args = field_values.values() + where_kv.values() cursor.execute(q, args=args) if commit: cursor.connection.commit()
[ "db", "update", "쿼리", "빌딩", "및", "실행", "단", "commit은", ":", "param", "cursor", ":", "커서", ":", "type", "cursor", ":", "Cursor", ":", "param", "table", ":", "테이블", "이름", ":", "type", "table", ":", "str", ":", "param", "where_kv", ":", "업데이트", "where", "조건", "dictionary", "key", ":", "field", "value", ":", "equal", "condition", "only", ":", "type", "where_kv", ":", "dict", ":", "param", "field_values", ":", "kwarg", "업데이트용", ":", "type", "field_values", ":", "dict", ":", "param", "commit", ":", "커밋", "여부", ":", "type", "commit", ":", "bool", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/database.py#L199-L227
[ "def", "update", "(", "cursor", ",", "table", ",", "where_kv", ",", "commit", "=", "True", ",", "*", "*", "field_values", ")", ":", "q", "=", "\"\"\"update %s \\nset {0} \\nwhere {1} \"\"\"", "%", "table", "fields", "=", "field_values", ".", "keys", "(", ")", "kv", "=", "','", ".", "join", "(", "[", "'{}=%s'", ".", "format", "(", "f", ")", "for", "f", "in", "fields", "]", ")", "where", "=", "' and '", ".", "join", "(", "[", "'{}=%s'", ".", "format", "(", "f", ")", "for", "f", "in", "where_kv", ".", "keys", "(", ")", "]", ")", "q", "=", "q", ".", "format", "(", "kv", ",", "where", ")", "args", "=", "field_values", ".", "values", "(", ")", "+", "where_kv", ".", "values", "(", ")", "cursor", ".", "execute", "(", "q", ",", "args", "=", "args", ")", "if", "commit", ":", "cursor", ".", "connection", ".", "commit", "(", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
insert_or_update
db update 쿼리 빌딩 및 실행, 단, commit은 :param cursor: 커서 :type cursor: Cursor :param table: 테이블이름 :type table: str :param commit: 커밋 여부 :type commit: bool :param field_values: insert 또는 업데이트 할 필드 및 값 dict pairs :type field_values:dict :return:
snipy/database.py
def insert_or_update(cursor, table, commit=True, **field_values): """ db update 쿼리 빌딩 및 실행, 단, commit은 :param cursor: 커서 :type cursor: Cursor :param table: 테이블이름 :type table: str :param commit: 커밋 여부 :type commit: bool :param field_values: insert 또는 업데이트 할 필드 및 값 dict pairs :type field_values:dict :return: """ q = """INSERT INTO %s ({0}) \nVALUES ({1}) \nON DUPLICATE KEY UPDATE {2} """ % table l = len(field_values) fields = field_values.keys() field = ','.join(fields) value = ','.join(['%s'] * l) kv = ','.join(['{}=%s'.format(f) for f in fields]) q = q.format(field, value, kv) args = field_values.values() * 2 cursor.execute(q, args=args) if commit: cursor.connection.commit()
def insert_or_update(cursor, table, commit=True, **field_values): """ db update 쿼리 빌딩 및 실행, 단, commit은 :param cursor: 커서 :type cursor: Cursor :param table: 테이블이름 :type table: str :param commit: 커밋 여부 :type commit: bool :param field_values: insert 또는 업데이트 할 필드 및 값 dict pairs :type field_values:dict :return: """ q = """INSERT INTO %s ({0}) \nVALUES ({1}) \nON DUPLICATE KEY UPDATE {2} """ % table l = len(field_values) fields = field_values.keys() field = ','.join(fields) value = ','.join(['%s'] * l) kv = ','.join(['{}=%s'.format(f) for f in fields]) q = q.format(field, value, kv) args = field_values.values() * 2 cursor.execute(q, args=args) if commit: cursor.connection.commit()
[ "db", "update", "쿼리", "빌딩", "및", "실행", "단", "commit은", ":", "param", "cursor", ":", "커서", ":", "type", "cursor", ":", "Cursor", ":", "param", "table", ":", "테이블이름", ":", "type", "table", ":", "str", ":", "param", "commit", ":", "커밋", "여부", ":", "type", "commit", ":", "bool", ":", "param", "field_values", ":", "insert", "또는", "업데이트", "할", "필드", "및", "값", "dict", "pairs", ":", "type", "field_values", ":", "dict", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/database.py#L231-L258
[ "def", "insert_or_update", "(", "cursor", ",", "table", ",", "commit", "=", "True", ",", "*", "*", "field_values", ")", ":", "q", "=", "\"\"\"INSERT INTO %s ({0}) \\nVALUES ({1}) \\nON DUPLICATE KEY UPDATE {2} \"\"\"", "%", "table", "l", "=", "len", "(", "field_values", ")", "fields", "=", "field_values", ".", "keys", "(", ")", "field", "=", "','", ".", "join", "(", "fields", ")", "value", "=", "','", ".", "join", "(", "[", "'%s'", "]", "*", "l", ")", "kv", "=", "','", ".", "join", "(", "[", "'{}=%s'", ".", "format", "(", "f", ")", "for", "f", "in", "fields", "]", ")", "q", "=", "q", ".", "format", "(", "field", ",", "value", ",", "kv", ")", "args", "=", "field_values", ".", "values", "(", ")", "*", "2", "cursor", ".", "execute", "(", "q", ",", "args", "=", "args", ")", "if", "commit", ":", "cursor", ".", "connection", ".", "commit", "(", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
tojson
recursive implementation
snipy/jsonutil.py
def tojson(o): """ recursive implementation """ try: return json.encode(o) except json.EncodeError: pass try: return o.tojson() except AttributeError as e: pass t = type(o) if isinstance(o, list): return '[%s]' % ', '.join([tojson(e) for e in o]) elif isinstance(o, dict): d = ['%s:%s' % (k, tojson(v)) for k, v in o.iteritems()] return '{%s}' % ', '.join(d) elif isinstance(o, set): d = ['%s:%s' % (tojson(e)) for e in o] return '{%s}' % ', '.join(d) elif isinstance(o, np.ndarray): return numpy_to_json(o) else: raise ValueError('error, failed encoding type(%s) to json' % t)
def tojson(o): """ recursive implementation """ try: return json.encode(o) except json.EncodeError: pass try: return o.tojson() except AttributeError as e: pass t = type(o) if isinstance(o, list): return '[%s]' % ', '.join([tojson(e) for e in o]) elif isinstance(o, dict): d = ['%s:%s' % (k, tojson(v)) for k, v in o.iteritems()] return '{%s}' % ', '.join(d) elif isinstance(o, set): d = ['%s:%s' % (tojson(e)) for e in o] return '{%s}' % ', '.join(d) elif isinstance(o, np.ndarray): return numpy_to_json(o) else: raise ValueError('error, failed encoding type(%s) to json' % t)
[ "recursive", "implementation" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/jsonutil.py#L15-L40
[ "def", "tojson", "(", "o", ")", ":", "try", ":", "return", "json", ".", "encode", "(", "o", ")", "except", "json", ".", "EncodeError", ":", "pass", "try", ":", "return", "o", ".", "tojson", "(", ")", "except", "AttributeError", "as", "e", ":", "pass", "t", "=", "type", "(", "o", ")", "if", "isinstance", "(", "o", ",", "list", ")", ":", "return", "'[%s]'", "%", "', '", ".", "join", "(", "[", "tojson", "(", "e", ")", "for", "e", "in", "o", "]", ")", "elif", "isinstance", "(", "o", ",", "dict", ")", ":", "d", "=", "[", "'%s:%s'", "%", "(", "k", ",", "tojson", "(", "v", ")", ")", "for", "k", ",", "v", "in", "o", ".", "iteritems", "(", ")", "]", "return", "'{%s}'", "%", "', '", ".", "join", "(", "d", ")", "elif", "isinstance", "(", "o", ",", "set", ")", ":", "d", "=", "[", "'%s:%s'", "%", "(", "tojson", "(", "e", ")", ")", "for", "e", "in", "o", "]", "return", "'{%s}'", "%", "', '", ".", "join", "(", "d", ")", "elif", "isinstance", "(", "o", ",", "np", ".", "ndarray", ")", ":", "return", "numpy_to_json", "(", "o", ")", "else", ":", "raise", "ValueError", "(", "'error, failed encoding type(%s) to json'", "%", "t", ")" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
named
namedtuple with default values named('typename', fields | *fields, default=x, [**defaults]) :param typename: :param fieldnames: :param defaults: :return:
snipy/named.py
def named(typename, *fieldnames, **defaults): """ namedtuple with default values named('typename', fields | *fields, default=x, [**defaults]) :param typename: :param fieldnames: :param defaults: :return: """ if len(fieldnames) == 1: if isinstance(fieldnames[0], str): fieldnames = tuple(fieldnames[0].replace(',', ' ').split()) elif isinstance(fieldnames[0], (list, tuple)): fieldnames = fieldnames[0] # set default of defaults default_of_defaults = defaults.pop('default', None) dfields = tuple(f for f in defaults if f not in fieldnames) T = collections.namedtuple(typename, fieldnames + dfields) T.__new__.__defaults__ = (default_of_defaults,) * len(T._fields) prototype = T(**defaults) T.__new__.__defaults__ = tuple(prototype) # make picklable globals()[typename] = T return T
def named(typename, *fieldnames, **defaults): """ namedtuple with default values named('typename', fields | *fields, default=x, [**defaults]) :param typename: :param fieldnames: :param defaults: :return: """ if len(fieldnames) == 1: if isinstance(fieldnames[0], str): fieldnames = tuple(fieldnames[0].replace(',', ' ').split()) elif isinstance(fieldnames[0], (list, tuple)): fieldnames = fieldnames[0] # set default of defaults default_of_defaults = defaults.pop('default', None) dfields = tuple(f for f in defaults if f not in fieldnames) T = collections.namedtuple(typename, fieldnames + dfields) T.__new__.__defaults__ = (default_of_defaults,) * len(T._fields) prototype = T(**defaults) T.__new__.__defaults__ = tuple(prototype) # make picklable globals()[typename] = T return T
[ "namedtuple", "with", "default", "values", "named", "(", "typename", "fields", "|", "*", "fields", "default", "=", "x", "[", "**", "defaults", "]", ")", ":", "param", "typename", ":", ":", "param", "fieldnames", ":", ":", "param", "defaults", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/named.py#L5-L33
[ "def", "named", "(", "typename", ",", "*", "fieldnames", ",", "*", "*", "defaults", ")", ":", "if", "len", "(", "fieldnames", ")", "==", "1", ":", "if", "isinstance", "(", "fieldnames", "[", "0", "]", ",", "str", ")", ":", "fieldnames", "=", "tuple", "(", "fieldnames", "[", "0", "]", ".", "replace", "(", "','", ",", "' '", ")", ".", "split", "(", ")", ")", "elif", "isinstance", "(", "fieldnames", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "fieldnames", "=", "fieldnames", "[", "0", "]", "# set default of defaults", "default_of_defaults", "=", "defaults", ".", "pop", "(", "'default'", ",", "None", ")", "dfields", "=", "tuple", "(", "f", "for", "f", "in", "defaults", "if", "f", "not", "in", "fieldnames", ")", "T", "=", "collections", ".", "namedtuple", "(", "typename", ",", "fieldnames", "+", "dfields", ")", "T", ".", "__new__", ".", "__defaults__", "=", "(", "default_of_defaults", ",", ")", "*", "len", "(", "T", ".", "_fields", ")", "prototype", "=", "T", "(", "*", "*", "defaults", ")", "T", ".", "__new__", ".", "__defaults__", "=", "tuple", "(", "prototype", ")", "# make picklable", "globals", "(", ")", "[", "typename", "]", "=", "T", "return", "T" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
np_seed
numpy random seed context :param seed: :return:
snipy/irandom.py
def np_seed(seed): """ numpy random seed context :param seed: :return: """ if seed is not None: state = np.random.get_state() np.random.seed(seed) yield np.random.set_state(state) else: yield
def np_seed(seed): """ numpy random seed context :param seed: :return: """ if seed is not None: state = np.random.get_state() np.random.seed(seed) yield np.random.set_state(state) else: yield
[ "numpy", "random", "seed", "context", ":", "param", "seed", ":", ":", "return", ":" ]
dade-ai/snipy
python
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/irandom.py#L7-L20
[ "def", "np_seed", "(", "seed", ")", ":", "if", "seed", "is", "not", "None", ":", "state", "=", "np", ".", "random", ".", "get_state", "(", ")", "np", ".", "random", ".", "seed", "(", "seed", ")", "yield", "np", ".", "random", ".", "set_state", "(", "state", ")", "else", ":", "yield" ]
408520867179f99b3158b57520e2619f3fecd69b
valid
Hub.connect
Create and connect to socket for TCP communication with hub.
yeelightsunflower/main.py
def connect(self): """Create and connect to socket for TCP communication with hub.""" try: self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(TIMEOUT_SECONDS) self._socket.connect((self._ip, self._port)) _LOGGER.debug("Successfully created Hub at %s:%s :)", self._ip, self._port) except socket.error as error: _LOGGER.error("Error creating Hub: %s :(", error) self._socket.close()
def connect(self): """Create and connect to socket for TCP communication with hub.""" try: self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(TIMEOUT_SECONDS) self._socket.connect((self._ip, self._port)) _LOGGER.debug("Successfully created Hub at %s:%s :)", self._ip, self._port) except socket.error as error: _LOGGER.error("Error creating Hub: %s :(", error) self._socket.close()
[ "Create", "and", "connect", "to", "socket", "for", "TCP", "communication", "with", "hub", "." ]
lindsaymarkward/python-yeelight-sunflower
python
https://github.com/lindsaymarkward/python-yeelight-sunflower/blob/4ec72d005ce307f832429620ba0bcbf6b236eead/yeelightsunflower/main.py#L40-L50
[ "def", "connect", "(", "self", ")", ":", "try", ":", "self", ".", "_socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "self", ".", "_socket", ".", "settimeout", "(", "TIMEOUT_SECONDS", ")", "self", ".", "_socket", ".", "connect", "(", "(", "self", ".", "_ip", ",", "self", ".", "_port", ")", ")", "_LOGGER", ".", "debug", "(", "\"Successfully created Hub at %s:%s :)\"", ",", "self", ".", "_ip", ",", "self", ".", "_port", ")", "except", "socket", ".", "error", "as", "error", ":", "_LOGGER", ".", "error", "(", "\"Error creating Hub: %s :(\"", ",", "error", ")", "self", ".", "_socket", ".", "close", "(", ")" ]
4ec72d005ce307f832429620ba0bcbf6b236eead
valid
Hub.send_command
Send TCP command to hub and return response.
yeelightsunflower/main.py
def send_command(self, command): """Send TCP command to hub and return response.""" # use lock to make TCP send/receive thread safe with self._lock: try: self._socket.send(command.encode("utf8")) result = self.receive() # hub may send "status"/"new" messages that should be ignored while result.startswith("S") or result.startswith("NEW"): _LOGGER.debug("!Got response: %s", result) result = self.receive() _LOGGER.debug("Received: %s", result) return result except socket.error as error: _LOGGER.error("Error sending command: %s", error) # try re-connecting socket self.connect() return ""
def send_command(self, command): """Send TCP command to hub and return response.""" # use lock to make TCP send/receive thread safe with self._lock: try: self._socket.send(command.encode("utf8")) result = self.receive() # hub may send "status"/"new" messages that should be ignored while result.startswith("S") or result.startswith("NEW"): _LOGGER.debug("!Got response: %s", result) result = self.receive() _LOGGER.debug("Received: %s", result) return result except socket.error as error: _LOGGER.error("Error sending command: %s", error) # try re-connecting socket self.connect() return ""
[ "Send", "TCP", "command", "to", "hub", "and", "return", "response", "." ]
lindsaymarkward/python-yeelight-sunflower
python
https://github.com/lindsaymarkward/python-yeelight-sunflower/blob/4ec72d005ce307f832429620ba0bcbf6b236eead/yeelightsunflower/main.py#L58-L75
[ "def", "send_command", "(", "self", ",", "command", ")", ":", "# use lock to make TCP send/receive thread safe", "with", "self", ".", "_lock", ":", "try", ":", "self", ".", "_socket", ".", "send", "(", "command", ".", "encode", "(", "\"utf8\"", ")", ")", "result", "=", "self", ".", "receive", "(", ")", "# hub may send \"status\"/\"new\" messages that should be ignored", "while", "result", ".", "startswith", "(", "\"S\"", ")", "or", "result", ".", "startswith", "(", "\"NEW\"", ")", ":", "_LOGGER", ".", "debug", "(", "\"!Got response: %s\"", ",", "result", ")", "result", "=", "self", ".", "receive", "(", ")", "_LOGGER", ".", "debug", "(", "\"Received: %s\"", ",", "result", ")", "return", "result", "except", "socket", ".", "error", "as", "error", ":", "_LOGGER", ".", "error", "(", "\"Error sending command: %s\"", ",", "error", ")", "# try re-connecting socket", "self", ".", "connect", "(", ")", "return", "\"\"" ]
4ec72d005ce307f832429620ba0bcbf6b236eead
valid
Hub.receive
Receive TCP response, looping to get whole thing or timeout.
yeelightsunflower/main.py
def receive(self): """Receive TCP response, looping to get whole thing or timeout.""" try: buffer = self._socket.recv(BUFFER_SIZE) except socket.timeout as error: # Something is wrong, assume it's offline temporarily _LOGGER.error("Error receiving: %s", error) # self._socket.close() return "" # Read until a newline or timeout buffering = True response = '' while buffering: if '\n' in buffer.decode("utf8"): response = buffer.decode("utf8").split('\n')[0] buffering = False else: try: more = self._socket.recv(BUFFER_SIZE) except socket.timeout: more = None if not more: buffering = False response = buffer.decode("utf8") else: buffer += more return response
def receive(self): """Receive TCP response, looping to get whole thing or timeout.""" try: buffer = self._socket.recv(BUFFER_SIZE) except socket.timeout as error: # Something is wrong, assume it's offline temporarily _LOGGER.error("Error receiving: %s", error) # self._socket.close() return "" # Read until a newline or timeout buffering = True response = '' while buffering: if '\n' in buffer.decode("utf8"): response = buffer.decode("utf8").split('\n')[0] buffering = False else: try: more = self._socket.recv(BUFFER_SIZE) except socket.timeout: more = None if not more: buffering = False response = buffer.decode("utf8") else: buffer += more return response
[ "Receive", "TCP", "response", "looping", "to", "get", "whole", "thing", "or", "timeout", "." ]
lindsaymarkward/python-yeelight-sunflower
python
https://github.com/lindsaymarkward/python-yeelight-sunflower/blob/4ec72d005ce307f832429620ba0bcbf6b236eead/yeelightsunflower/main.py#L77-L104
[ "def", "receive", "(", "self", ")", ":", "try", ":", "buffer", "=", "self", ".", "_socket", ".", "recv", "(", "BUFFER_SIZE", ")", "except", "socket", ".", "timeout", "as", "error", ":", "# Something is wrong, assume it's offline temporarily", "_LOGGER", ".", "error", "(", "\"Error receiving: %s\"", ",", "error", ")", "# self._socket.close()", "return", "\"\"", "# Read until a newline or timeout", "buffering", "=", "True", "response", "=", "''", "while", "buffering", ":", "if", "'\\n'", "in", "buffer", ".", "decode", "(", "\"utf8\"", ")", ":", "response", "=", "buffer", ".", "decode", "(", "\"utf8\"", ")", ".", "split", "(", "'\\n'", ")", "[", "0", "]", "buffering", "=", "False", "else", ":", "try", ":", "more", "=", "self", ".", "_socket", ".", "recv", "(", "BUFFER_SIZE", ")", "except", "socket", ".", "timeout", ":", "more", "=", "None", "if", "not", "more", ":", "buffering", "=", "False", "response", "=", "buffer", ".", "decode", "(", "\"utf8\"", ")", "else", ":", "buffer", "+=", "more", "return", "response" ]
4ec72d005ce307f832429620ba0bcbf6b236eead
valid
Hub.get_data
Get current light data as dictionary with light zids as keys.
yeelightsunflower/main.py
def get_data(self): """Get current light data as dictionary with light zids as keys.""" response = self.send_command(GET_LIGHTS_COMMAND) _LOGGER.debug("get_data response: %s", repr(response)) if not response: _LOGGER.debug("Empty response: %s", response) return {} response = response.strip() # Check string before splitting (avoid IndexError if malformed) if not (response.startswith("GLB") and response.endswith(";")): _LOGGER.debug("Invalid response: %s", repr(response)) return {} # deconstruct response string into light data. Example data: # GLB 143E,1,1,25,255,255,255,0,0;287B,1,1,22,255,255,255,0,0;\r\n response = response[4:-3] # strip start (GLB) and end (;\r\n) light_strings = response.split(';') light_data_by_id = {} for light_string in light_strings: values = light_string.split(',') try: light_data_by_id[values[0]] = [int(values[2]), int(values[4]), int(values[5]), int(values[6]), int(values[7])] except ValueError as error: _LOGGER.error("Error %s: %s (%s)", error, values, response) except IndexError as error: _LOGGER.error("Error %s: %s (%s)", error, values, response) return light_data_by_id
def get_data(self): """Get current light data as dictionary with light zids as keys.""" response = self.send_command(GET_LIGHTS_COMMAND) _LOGGER.debug("get_data response: %s", repr(response)) if not response: _LOGGER.debug("Empty response: %s", response) return {} response = response.strip() # Check string before splitting (avoid IndexError if malformed) if not (response.startswith("GLB") and response.endswith(";")): _LOGGER.debug("Invalid response: %s", repr(response)) return {} # deconstruct response string into light data. Example data: # GLB 143E,1,1,25,255,255,255,0,0;287B,1,1,22,255,255,255,0,0;\r\n response = response[4:-3] # strip start (GLB) and end (;\r\n) light_strings = response.split(';') light_data_by_id = {} for light_string in light_strings: values = light_string.split(',') try: light_data_by_id[values[0]] = [int(values[2]), int(values[4]), int(values[5]), int(values[6]), int(values[7])] except ValueError as error: _LOGGER.error("Error %s: %s (%s)", error, values, response) except IndexError as error: _LOGGER.error("Error %s: %s (%s)", error, values, response) return light_data_by_id
[ "Get", "current", "light", "data", "as", "dictionary", "with", "light", "zids", "as", "keys", "." ]
lindsaymarkward/python-yeelight-sunflower
python
https://github.com/lindsaymarkward/python-yeelight-sunflower/blob/4ec72d005ce307f832429620ba0bcbf6b236eead/yeelightsunflower/main.py#L106-L134
[ "def", "get_data", "(", "self", ")", ":", "response", "=", "self", ".", "send_command", "(", "GET_LIGHTS_COMMAND", ")", "_LOGGER", ".", "debug", "(", "\"get_data response: %s\"", ",", "repr", "(", "response", ")", ")", "if", "not", "response", ":", "_LOGGER", ".", "debug", "(", "\"Empty response: %s\"", ",", "response", ")", "return", "{", "}", "response", "=", "response", ".", "strip", "(", ")", "# Check string before splitting (avoid IndexError if malformed)", "if", "not", "(", "response", ".", "startswith", "(", "\"GLB\"", ")", "and", "response", ".", "endswith", "(", "\";\"", ")", ")", ":", "_LOGGER", ".", "debug", "(", "\"Invalid response: %s\"", ",", "repr", "(", "response", ")", ")", "return", "{", "}", "# deconstruct response string into light data. Example data:", "# GLB 143E,1,1,25,255,255,255,0,0;287B,1,1,22,255,255,255,0,0;\\r\\n", "response", "=", "response", "[", "4", ":", "-", "3", "]", "# strip start (GLB) and end (;\\r\\n)", "light_strings", "=", "response", ".", "split", "(", "';'", ")", "light_data_by_id", "=", "{", "}", "for", "light_string", "in", "light_strings", ":", "values", "=", "light_string", ".", "split", "(", "','", ")", "try", ":", "light_data_by_id", "[", "values", "[", "0", "]", "]", "=", "[", "int", "(", "values", "[", "2", "]", ")", ",", "int", "(", "values", "[", "4", "]", ")", ",", "int", "(", "values", "[", "5", "]", ")", ",", "int", "(", "values", "[", "6", "]", ")", ",", "int", "(", "values", "[", "7", "]", ")", "]", "except", "ValueError", "as", "error", ":", "_LOGGER", ".", "error", "(", "\"Error %s: %s (%s)\"", ",", "error", ",", "values", ",", "response", ")", "except", "IndexError", "as", "error", ":", "_LOGGER", ".", "error", "(", "\"Error %s: %s (%s)\"", ",", "error", ",", "values", ",", "response", ")", "return", "light_data_by_id" ]
4ec72d005ce307f832429620ba0bcbf6b236eead
valid
Hub.get_lights
Get current light data, set and return as list of Bulb objects.
yeelightsunflower/main.py
def get_lights(self): """Get current light data, set and return as list of Bulb objects.""" # Throttle updates. Use cached data if within UPDATE_INTERVAL_SECONDS now = datetime.datetime.now() if (now - self._last_updated) < datetime.timedelta( seconds=UPDATE_INTERVAL_SECONDS): # _LOGGER.debug("Using cached light data") return self._bulbs else: self._last_updated = now light_data = self.get_data() _LOGGER.debug("got: %s", light_data) if not light_data: return [] if self._bulbs: # Bulbs already created, just update values for bulb in self._bulbs: # use the values for the bulb with the correct ID try: values = light_data[bulb.zid] bulb._online, bulb._red, bulb._green, bulb._blue, \ bulb._level = values except KeyError: pass else: for light_id in light_data: self._bulbs.append(Bulb(self, light_id, *light_data[light_id])) # return a list of Bulb objects return self._bulbs
def get_lights(self): """Get current light data, set and return as list of Bulb objects.""" # Throttle updates. Use cached data if within UPDATE_INTERVAL_SECONDS now = datetime.datetime.now() if (now - self._last_updated) < datetime.timedelta( seconds=UPDATE_INTERVAL_SECONDS): # _LOGGER.debug("Using cached light data") return self._bulbs else: self._last_updated = now light_data = self.get_data() _LOGGER.debug("got: %s", light_data) if not light_data: return [] if self._bulbs: # Bulbs already created, just update values for bulb in self._bulbs: # use the values for the bulb with the correct ID try: values = light_data[bulb.zid] bulb._online, bulb._red, bulb._green, bulb._blue, \ bulb._level = values except KeyError: pass else: for light_id in light_data: self._bulbs.append(Bulb(self, light_id, *light_data[light_id])) # return a list of Bulb objects return self._bulbs
[ "Get", "current", "light", "data", "set", "and", "return", "as", "list", "of", "Bulb", "objects", "." ]
lindsaymarkward/python-yeelight-sunflower
python
https://github.com/lindsaymarkward/python-yeelight-sunflower/blob/4ec72d005ce307f832429620ba0bcbf6b236eead/yeelightsunflower/main.py#L136-L166
[ "def", "get_lights", "(", "self", ")", ":", "# Throttle updates. Use cached data if within UPDATE_INTERVAL_SECONDS", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "(", "now", "-", "self", ".", "_last_updated", ")", "<", "datetime", ".", "timedelta", "(", "seconds", "=", "UPDATE_INTERVAL_SECONDS", ")", ":", "# _LOGGER.debug(\"Using cached light data\")", "return", "self", ".", "_bulbs", "else", ":", "self", ".", "_last_updated", "=", "now", "light_data", "=", "self", ".", "get_data", "(", ")", "_LOGGER", ".", "debug", "(", "\"got: %s\"", ",", "light_data", ")", "if", "not", "light_data", ":", "return", "[", "]", "if", "self", ".", "_bulbs", ":", "# Bulbs already created, just update values", "for", "bulb", "in", "self", ".", "_bulbs", ":", "# use the values for the bulb with the correct ID", "try", ":", "values", "=", "light_data", "[", "bulb", ".", "zid", "]", "bulb", ".", "_online", ",", "bulb", ".", "_red", ",", "bulb", ".", "_green", ",", "bulb", ".", "_blue", ",", "bulb", ".", "_level", "=", "values", "except", "KeyError", ":", "pass", "else", ":", "for", "light_id", "in", "light_data", ":", "self", ".", "_bulbs", ".", "append", "(", "Bulb", "(", "self", ",", "light_id", ",", "*", "light_data", "[", "light_id", "]", ")", ")", "# return a list of Bulb objects", "return", "self", ".", "_bulbs" ]
4ec72d005ce307f832429620ba0bcbf6b236eead