INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
A boolean matrix m [ i j ] == True if there is a relation term ( i ) - > term ( j ): return: a np. matrix ( len ( dictionary ) len ( dictionary )) of boolean
def connexity(self): """ A boolean matrix, m[i, j] == True if there is a relation term(i) -> term(j) :return: a np.matrix (len(dictionary), len(dictionary)) of boolean """ return np.matrix(sum(self.relations.values()).todense(), dtype=bool)
path is a mul of coord or a coord
def _resolve_path(obj, path): """path is a mul of coord or a coord""" if obj.__class__ not in path.context.accept: result = set() for ctx in path.context.accept: result |= {e for u in obj[ctx] for e in _resolve_path(u, path)} return result if isinstance(obj, Text): if path.index is not None: return {obj.children[path.index]} return set(obj.children) if isinstance(obj, (Fact, Theory)): return _resolve_path_tree_graph(obj.tree_graph, path) if isinstance(obj, Topic): if path.kind == 'r': if path.index is not None: return {obj.root[path.index]} return set(obj.root) else: if path.index is not None: return {obj.flexing[path.index]} return set(obj.flexing)
Resolve the context of the rules ( the type of this element ) and building the ieml element.: param rules:: return:
def _resolve_ctx(rules): """ Resolve the context of the rules (the type of this element), and building the ieml element. :param rules: :return: """ if not rules: raise ResolveError("Missing node definition.") # if rules == [(None, e)] --> e if len(rules) == 1 and rules[0][0] is None: return rules[0][1] if any(r[0] is None for r in rules): raise ResolveError("Multiple definition, multiple ieml object provided for the same node.") if any(not isinstance(r[0], Path) for r in rules): raise ResolveError("Must have only path instance.") # resolve all the possible types for this element r0 = rules[0] types = _inferred_types(*r0) for r in rules[1:]: types = types.intersection(_inferred_types(*r)) if not types: raise ResolveError("No definition, no type inferred on rules list.") if len(types) > 1: raise ResolveError("Multiple definition, multiple type inferred on rules list.") type = next(types.__iter__()) if type == Topic: error, deps = _build_deps_topic(rules) if error: return flexing = None if deps['f']: flexing = deps['f'] if not deps['r']: raise ResolveError("No root for the topic node.") return topic(deps['r'], flexing) if type == Text: error, deps = _build_deps_text(rules) if error: return return text(deps) if type in (Theory, Fact): error, deps = _build_deps_tree_graph(rules) if error: return if type == Fact: clauses = [] for s, a, m in deps: clauses.append((s, a, m)) return fact(clauses) else: clauses = [] for s, a, m in deps: clauses.append((s, a, m)) return theory(clauses) raise ResolveError("Invalid type inferred %s"%type.__name__)
usls is an iterable of usl.
def project_usls_on_dictionary(usls, allowed_terms=None): """`usls` is an iterable of usl. return a mapping term -> usl list """ cells_to_usls = defaultdict(set) tables = set() for u in usls: for t in u.objects(Term): for c in t.singular_sequences: # This is the first time we meet the cell c if not cells_to_usls[c]: tables.update(c.relations.contained) cells_to_usls[c].add(u) if allowed_terms: allowed_terms = set(allowed_terms) tables = tables & allowed_terms cells_to_usls = {c: l for c, l in cells_to_usls.items() if c in allowed_terms} tables_to_usls = { table: list(set(u for c in table.singular_sequences for u in cells_to_usls[c])) for table in tables if not isinstance(table, TableSet) } return tables_to_usls
usls_data: usl = > data []: param usls_data:: return:
def project_usl_with_data(usls_data, metric=None): """ usls_data: usl => data[] :param usls_data: :return: """ projection = project_usls_on_dictionary(usls_data) all_terms = set(c for u in usls_data for t in u.objects(Term) for c in t.singular_sequences) if metric is None: metric = lambda e: len(e['posts']) * len(all_terms.intersection(e['table'].singular_sequences)) return sorted(({ 'table': table, 'usls': usls, 'posts': list(set(chain.from_iterable(usls_data[u] for u in usls))) } for table, usls in projection.items()), key=metric, reverse=True)
script_lvl_0: PRIMITIVE LAYER0_MARK | REMARKABLE_ADDITION LAYER0_MARK
def p_script_lvl_0(self, p): """ script_lvl_0 : PRIMITIVE LAYER0_MARK | REMARKABLE_ADDITION LAYER0_MARK""" if p[1] == 'E': p[0] = NullScript(layer=0) elif p[1] in REMARKABLE_ADDITION: p[0] = AdditiveScript(character=p[1]) else: p[0] = MultiplicativeScript(character=p[1])
sum_lvl_0: script_lvl_0 | script_lvl_0 PLUS sum_lvl_0
def p_sum_lvl_0(self, p): """ sum_lvl_0 : script_lvl_0 | script_lvl_0 PLUS sum_lvl_0""" if len(p) == 4: p[3].append(p[1]) p[0] = p[3] else: p[0] = [p[1]]
script_lvl_1: additive_script_lvl_0 LAYER1_MARK | additive_script_lvl_0 additive_script_lvl_0 LAYER1_MARK | additive_script_lvl_0 additive_script_lvl_0 additive_script_lvl_0 LAYER1_MARK | REMARKABLE_MULTIPLICATION LAYER1_MARK
def p_script_lvl_1(self, p): """ script_lvl_1 : additive_script_lvl_0 LAYER1_MARK | additive_script_lvl_0 additive_script_lvl_0 LAYER1_MARK | additive_script_lvl_0 additive_script_lvl_0 additive_script_lvl_0 LAYER1_MARK | REMARKABLE_MULTIPLICATION LAYER1_MARK""" if isinstance(p[1], AdditiveScript): if len(p) == 3: p[0] = MultiplicativeScript(substance=p[1]) elif len(p) == 4: p[0] = MultiplicativeScript(substance=p[1], attribute=p[2]) else: p[0] = MultiplicativeScript(substance=p[1], attribute=p[2], mode=p[3]) else: p[0] = MultiplicativeScript(character=p[1])
sum_lvl_1: script_lvl_1 | script_lvl_1 PLUS sum_lvl_1
def p_sum_lvl_1(self, p): """ sum_lvl_1 : script_lvl_1 | script_lvl_1 PLUS sum_lvl_1""" if len(p) == 4: p[3].append(p[1]) p[0] = p[3] else: p[0] = [p[1]]
Compute the ordering of a list of usls from each usl and return the matrix m s. t. for each u in usl_list at index i [ usl_list [ j ] for j in m [ i: ]] is the list sorted by proximity from u. of the result: param usl_list: a list of usls: return: a ( len ( usl_list ) len ( usl_list )) np. array
def square_order_matrix(usl_list): """ Compute the ordering of a list of usls from each usl and return the matrix m s.t. for each u in usl_list at index i, [usl_list[j] for j in m[i, :]] is the list sorted by proximity from u. of the result :param usl_list: a list of usls :return: a (len(usl_list), len(usl_list)) np.array """ usl_list = list(usl_list) indexes = { u: i for i, u in enumerate(usl_list) } order_mat = np.zeros(shape=(len(usl_list), len(usl_list)), dtype=int) for u in usl_list: sorted_list = QuerySort(u).sort(collection=usl_list) for i, u_s in enumerate(sorted_list): order_mat[indexes[u], indexes[u_s]] = i return order_mat
True when the term is a subset of this term tables. If the parent of this term is already a TableSet return always false ( only one main tableset ): param term:: return:
def accept_script(self, script): """ True when the term is a subset of this term tables. If the parent of this term is already a TableSet,return always false (only one main tableset) :param term: :return: """ if isinstance(self.parent, TableSet): return False, False tables = [table for table in self.script.tables_script if table in script] if len(tables) >= 1 and {ss for t in tables for ss in t.singular_sequences} == set(script.singular_sequences): return True, False return False, False
Slow method retrieve all the terms from the database.: return:
def _build_pools(self): """ Slow method, retrieve all the terms from the database. :return: """ if self.level >= Topic: # words self.topics_pool = set(self.topic() for i in range(self.pool_size)) if self.level >= Fact: # sentences self.facts_pool = set(self.fact() for i in range(self.pool_size)) if self.level >= Theory: self.theories_pool = set(self.theory() for i in range(self.pool_size)) if self.level >= Text: self.propositions_pool = set(chain.from_iterable((self.topics_pool, self.facts_pool, self.theories_pool)))
Returns the mean value.
def mean(self): """Returns the mean value.""" if self.counter.value > 0: return self.sum.value / self.counter.value return 0.0
Returns variance
def variance(self): """Returns variance""" if self.counter.value <= 1: return 0.0 return self.var.value[1] / (self.counter.value - 1)
Record an event with the meter. By default it will record one event.
def mark(self, value=1): """Record an event with the meter. By default it will record one event. :param value: number of event to record """ self.counter += value self.m1_rate.update(value) self.m5_rate.update(value) self.m15_rate.update(value)
Returns the mean rate of the events since the start of the process.
def mean_rate(self): """ Returns the mean rate of the events since the start of the process. """ if self.counter.value == 0: return 0.0 else: elapsed = time() - self.start_time return self.counter.value / elapsed
Record an event with the derive.
def mark(self, value=1): """Record an event with the derive. :param value: counter value to record """ last = self.last.get_and_set(value) if last <= value: value = value - last super(Derive, self).mark(value)
Wrapper to make map () behave the same on Py2 and Py3.
def mmap(func, iterable): """Wrapper to make map() behave the same on Py2 and Py3.""" if sys.version_info[0] > 2: return [i for i in map(func, iterable)] else: return map(func, iterable)
Send metric and its snapshot.
def send_metric(self, name, metric): """Send metric and its snapshot.""" config = SERIALIZER_CONFIG[class_name(metric)] mmap( self._buffered_send_metric, self.serialize_metric( metric, name, config['keys'], config['serialized_type'] ) ) if hasattr(metric, 'snapshot') and config.get('snapshot_keys'): mmap( self._buffered_send_metric, self.serialize_metric( metric.snapshot, name, config['snapshot_keys'], config['serialized_type'] ) )
Serialize and send available measures of a metric.
def serialize_metric(self, metric, m_name, keys, m_type): """Serialize and send available measures of a metric.""" return [ self.format_metric_string(m_name, getattr(metric, key), m_type) for key in keys ]
Compose a statsd compatible string for a metric s measurement.
def format_metric_string(self, name, value, m_type): """Compose a statsd compatible string for a metric's measurement.""" # NOTE(romcheg): This serialized metric template is based on # statsd's documentation. template = '{name}:{value}|{m_type}\n' if self.prefix: name = "{prefix}.{m_name}".format(prefix=self.prefix, m_name=name) return template.format(name=name, value=value, m_type=m_type)
Add a metric to the buffer.
def _buffered_send_metric(self, metric_str): """Add a metric to the buffer.""" self.batch_count += 1 self.batch_buffer += metric_str # NOTE(romcheg): Send metrics if the number of metrics in the buffer # has reached the threshold for sending. if self.batch_count >= self.batch_size: self._send()
Get method that raises MissingSetting if the value was unset.
def get(self, section, option, **kwargs): """ Get method that raises MissingSetting if the value was unset. This differs from the SafeConfigParser which may raise either a NoOptionError or a NoSectionError. We take extra **kwargs because the Python 3.5 configparser extends the get method signature and it calls self with those parameters. def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET): """ try: ret = super(ExactOnlineConfig, self).get(section, option, **kwargs) except (NoOptionError, NoSectionError): raise MissingSetting(option, section) return ret
Set method that ( 1 ) auto - saves if possible and ( 2 ) auto - creates sections.
def set(self, section, option, value): """ Set method that (1) auto-saves if possible and (2) auto-creates sections. """ try: super(ExactOnlineConfig, self).set(section, option, value) except NoSectionError: self.add_section(section) super(ExactOnlineConfig, self).set(section, option, value) # Save automatically! self.save()
json. loads wants an unistr in Python3. Convert it.
def _json_safe(data): """ json.loads wants an unistr in Python3. Convert it. """ if not hasattr(data, 'encode'): try: data = data.decode('utf-8') except UnicodeDecodeError: raise ValueError( 'Expected valid UTF8 for JSON data, got %r' % (data,)) return data
Shortcut for urlopen ( POST ) + read. We ll probably want to add a nice timeout here later too.
def http_post(url, data=None, opt=opt_default): """ Shortcut for urlopen (POST) + read. We'll probably want to add a nice timeout here later too. """ return _http_request(url, method='POST', data=_marshalled(data), opt=opt)
Shortcut for urlopen ( PUT ) + read. We ll probably want to add a nice timeout here later too.
def http_put(url, data=None, opt=opt_default): """ Shortcut for urlopen (PUT) + read. We'll probably want to add a nice timeout here later too. """ return _http_request(url, method='PUT', data=_marshalled(data), opt=opt)
Connect to a host on a given ( SSL ) port.
def connect(self): "Connect to a host on a given (SSL) port." sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) if self._tunnel_host: self.sock = sock self._tunnel() # Python 2.7.9+ if create_default_context: # Newer python will use the "right" cacert file automatically. So # the default of None can safely be passed along. ctx = create_default_context(cafile=self.cacert_file) sock = ctx.wrap_socket(sock, server_hostname=self.host) else: # Take the supplied file, or FALLBACK_CACERT_FILE if nothing # was supplied. cacert_file = self.cacert_file or FALLBACK_CACERT_FILE sock = ssl.wrap_socket(sock, ca_certs=cacert_file, cert_reqs=ssl.CERT_REQUIRED) self.sock = sock
Base method to fetch values and to set defaults in case they don t exist.
def get_or_set_default(self, section, option, value): """ Base method to fetch values and to set defaults in case they don't exist. """ try: ret = self.get(section, option) except MissingSetting: self.set(section, option, value) ret = value return ret
Convert set of human codes and to a dict of code to exactonline guid mappings.
def get_ledger_code_to_guid_map(self, codes): """ Convert set of human codes and to a dict of code to exactonline guid mappings. Example:: ret = inv.get_ledger_code_to_guid_map(['1234', '5555']) ret == {'1234': '<guid1_from_exactonline_ledgeraccounts>', '5555': '<guid2_from_exactonline_ledgeraccounts>'} """ if codes: codes = set(str(i) for i in codes) ledger_ids = self._api.ledgeraccounts.filter(code__in=codes) ret = dict((str(i['Code']), i['ID']) for i in ledger_ids) found = set(ret.keys()) missing = (codes - found) if missing: raise UnknownLedgerCodes(missing) return ret return {}
Get VATCode ( up to three digit number ) for the specified ledger line.
def get_vatcode_for_ledger_line(self, ledger_line): """ Get VATCode (up to three digit number) for the specified ledger line. Can be as simple as: return '0 ' # one VAT category only Or more complicated, like: if ledger_line['vat_percentage'] == 21: return '2 ' # high VAT assert ledger_line['vat_percentage'] == 0 customer = self._bosso_invoice.customer assert customer.has_vat_number() if customer.is_in_nl(): return '0 ' # no VAT elif customer.is_in_eu(): return '7 ' # inside EU, no VAT return '6 ' # outside EU, no VAT """ # Exact accepts receiving 'VATPercentage', but only when it is # higher than 0. Possibly because we have more than one match # for 0%? So, we'll have to fetch the right VATCode instead. vat_percentage = ledger_line['vat_percentage'] if vat_percentage == 0: vatcode = '0 ' # FIXME: hardcoded.. fetch from API? elif vat_percentage == 21: vatcode = '2 ' # FIXME: hardcoded.. fetch from API? else: raise NotImplementedError('Unknown VAT: %s' % (vat_percentage,)) return vatcode
Get the current division and return a dictionary of divisions so the user can select the right one.
def get_divisions(self): """ Get the "current" division and return a dictionary of divisions so the user can select the right one. """ ret = self.rest(GET('v1/current/Me?$select=CurrentDivision')) current_division = ret[0]['CurrentDivision'] assert isinstance(current_division, int) urlbase = 'v1/%d/' % (current_division,) resource = urljoin(urlbase, 'hrm/Divisions?$select=Code,Description') ret = self.rest(GET(resource)) choices = dict((i['Code'], i['Description']) for i in ret) return choices, current_division
Select the current division that we ll be working on/ with.
def set_division(self, division): """ Select the "current" division that we'll be working on/with. """ try: division = int(division) except (TypeError, ValueError): raise V1DivisionError('Supplied division %r is not a number' % (division,)) urlbase = 'v1/%d/' % (division,) resource = urljoin( urlbase, "crm/Accounts?$select=ID&$filter=Name+eq+'DOES_NOT_EXIST'") try: self.rest(GET(resource)) except AssertionError: raise V1DivisionError('Invalid division %r according to server' % (division,)) self.storage.set_division(division)
Optionally supply a list of ExactOnline invoice numbers.
def map_exact2foreign_invoice_numbers(self, exact_invoice_numbers=None): """ Optionally supply a list of ExactOnline invoice numbers. Returns a dictionary of ExactOnline invoice numbers to foreign (YourRef) invoice numbers. """ # Quick, select all. Not the most nice to the server though. if exact_invoice_numbers is None: ret = self.filter(select='InvoiceNumber,YourRef') return dict((i['InvoiceNumber'], i['YourRef']) for i in ret) # Slower, select what we want to know. More work for us. exact_to_foreign_map = {} # Do it in batches. If we append 300 InvoiceNumbers at once, we # get a 12kB URI. (If the list is empty, we skip the entire # forloop and correctly return the empty dict.) exact_invoice_numbers = list(set(exact_invoice_numbers)) # unique for offset in range(0, len(exact_invoice_numbers), 40): batch = exact_invoice_numbers[offset:(offset + 40)] filter_ = ' or '.join( 'InvoiceNumber eq %s' % (i,) for i in batch) assert filter_ # if filter was empty, we'd get all! ret = self.filter(filter=filter_, select='InvoiceNumber,YourRef') exact_to_foreign_map.update( dict((i['InvoiceNumber'], i['YourRef']) for i in ret)) # Any values we missed? for exact_invoice_number in exact_invoice_numbers: if exact_invoice_number not in exact_to_foreign_map: exact_to_foreign_map[exact_invoice_number] = None return exact_to_foreign_map
Optionally supply a list of foreign ( your ) invoice numbers.
def map_foreign2exact_invoice_numbers(self, foreign_invoice_numbers=None): """ Optionally supply a list of foreign (your) invoice numbers. Returns a dictionary of your invoice numbers (YourRef) to Exact Online invoice numbers. """ # Quick, select all. Not the most nice to the server though. if foreign_invoice_numbers is None: ret = self.filter(select='InvoiceNumber,YourRef') return dict((i['YourRef'], i['InvoiceNumber']) for i in ret) # Slower, select what we want to know. More work for us. foreign_to_exact_map = {} # Do it in batches. If we append 300 InvoiceNumbers at once, we # get a 12kB URI. (If the list is empty, we skip the entire # forloop and correctly return the empty dict.) foreign_invoice_numbers = list(set(foreign_invoice_numbers)) # unique for offset in range(0, len(foreign_invoice_numbers), 40): batch = foreign_invoice_numbers[offset:(offset + 40)] filter_ = ' or '.join( 'YourRef eq %s' % (self._remote_invoice_number(i),) for i in batch) assert filter_ # if filter was empty, we'd get all! ret = self.filter(filter=filter_, select='InvoiceNumber,YourRef') foreign_to_exact_map.update( dict((i['YourRef'], i['InvoiceNumber']) for i in ret)) # Any values we missed? for foreign_invoice_number in foreign_invoice_numbers: if foreign_invoice_number not in foreign_to_exact_map: foreign_to_exact_map[foreign_invoice_number] = None return foreign_to_exact_map
A common query would be duedate__lt = date ( 2015 1 1 ) to get all Receivables that are due in 2014 and earlier.
def filter(self, relation_id=None, duedate__lt=None, duedate__gte=None, **kwargs): """ A common query would be duedate__lt=date(2015, 1, 1) to get all Receivables that are due in 2014 and earlier. """ if relation_id is not None: # Filter by (relation) account_id. There doesn't seem to be # any reason to prefer # 'read/financial/ReceivablesListByAccount?accountId=X' over # this. relation_id = self._remote_guid(relation_id) self._filter_append(kwargs, u'AccountId eq %s' % (relation_id,)) if duedate__lt is not None: # Not sure what the AgeGroup means in # ReceivablesListByAgeGroup, but we can certainly do # without. duedate__lt = self._remote_datetime(duedate__lt) self._filter_append(kwargs, u'DueDate lt %s' % (duedate__lt,)) if duedate__gte is not None: # Not sure what the AgeGroup means in # ReceivablesListByAgeGroup, but we can certainly do # without. duedate__gte = self._remote_datetime(duedate__gte) self._filter_append(kwargs, u'DueDate ge %s' % (duedate__gte,)) return super(Receivables, self).filter(**kwargs)
Create the ( 11745 ) Sudoku clauses and return them as a list. Note that these clauses are * independent * of the particular Sudoku puzzle at hand.
def sudoku_clauses(): """ Create the (11745) Sudoku clauses, and return them as a list. Note that these clauses are *independent* of the particular Sudoku puzzle at hand. """ res = [] # for all cells, ensure that the each cell: for i in range(1, 10): for j in range(1, 10): # denotes (at least) one of the 9 digits (1 clause) res.append([v(i, j, d) for d in range(1, 10)]) # does not denote two different digits at once (36 clauses) for d in range(1, 10): for dp in range(d + 1, 10): res.append([-v(i, j, d), -v(i, j, dp)]) def valid(cells): # Append 324 clauses, corresponding to 9 cells, to the result. # The 9 cells are represented by a list tuples. The new clauses # ensure that the cells contain distinct values. for i, xi in enumerate(cells): for j, xj in enumerate(cells): if i < j: for d in range(1, 10): res.append([-v(xi[0], xi[1], d), -v(xj[0], xj[1], d)]) # ensure rows and columns have distinct values for i in range(1, 10): valid([(i, j) for j in range(1, 10)]) valid([(j, i) for j in range(1, 10)]) # ensure 3x3 sub-grids "regions" have distinct values for i in 1, 4, 7: for j in 1, 4 ,7: valid([(i + k % 3, j + k // 3) for k in range(9)]) assert len(res) == 81 * (1 + 36) + 27 * 324 return res
solve a Sudoku grid inplace
def solve(grid): """ solve a Sudoku grid inplace """ clauses = sudoku_clauses() for i in range(1, 10): for j in range(1, 10): d = grid[i - 1][j - 1] # For each digit already known, a clause (with one literal). # Note: # We could also remove all variables for the known cells # altogether (which would be more efficient). However, for # the sake of simplicity, we decided not to do that. if d: clauses.append([v(i, j, d)]) # solve the SAT problem sol = set(pycosat.solve(clauses)) def read_cell(i, j): # return the digit of cell i, j according to the solution for d in range(1, 10): if v(i, j, d) in sol: return d for i in range(1, 10): for j in range(1, 10): grid[i - 1][j - 1] = read_cell(i, j)
Create Django class - based view from injector class.
def view(injector): """Create Django class-based view from injector class.""" handler = create_handler(View, injector) apply_http_methods(handler, injector) return injector.let(as_view=handler.as_view)
Create Django form processing class - based view from injector class.
def form_view(injector): """Create Django form processing class-based view from injector class.""" handler = create_handler(FormView, injector) apply_form_methods(handler, injector) return injector.let(as_view=handler.as_view)
Create Flask method based dispatching view from injector class.
def method_view(injector): """Create Flask method based dispatching view from injector class.""" handler = create_handler(MethodView) apply_http_methods(handler, injector) return injector.let(as_view=handler.as_view)
Create DRF class - based API view from injector class.
def api_view(injector): """Create DRF class-based API view from injector class.""" handler = create_handler(APIView, injector) apply_http_methods(handler, injector) apply_api_view_methods(handler, injector) return injector.let(as_view=handler.as_view)
Create DRF generic class - based API view from injector class.
def generic_api_view(injector): """Create DRF generic class-based API view from injector class.""" handler = create_handler(GenericAPIView, injector) apply_http_methods(handler, injector) apply_api_view_methods(handler, injector) apply_generic_api_view_methods(handler, injector) return injector.let(as_view=handler.as_view)
Create DRF model view set from injector class.
def model_view_set(injector): """Create DRF model view set from injector class.""" handler = create_handler(ModelViewSet, injector) apply_api_view_methods(handler, injector) apply_generic_api_view_methods(handler, injector) apply_model_view_set_methods(handler, injector) return injector.let(as_viewset=lambda: handler)
Recieve a streamer for a given file descriptor.
def stream_from_fd(fd, loop): """Recieve a streamer for a given file descriptor.""" reader = asyncio.StreamReader(loop=loop) protocol = asyncio.StreamReaderProtocol(reader, loop=loop) waiter = asyncio.futures.Future(loop=loop) transport = UnixFileDescriptorTransport( loop=loop, fileno=fd, protocol=protocol, waiter=waiter, ) try: yield from waiter except Exception: transport.close() if loop.get_debug(): logger.debug("Read fd %r connected: (%r, %r)", fd, transport, protocol) return reader, transport
Called by the event loop whenever the fd is ready for reading.
def _read_ready(self): """Called by the event loop whenever the fd is ready for reading.""" try: data = os.read(self._fileno, self.max_size) except InterruptedError: # No worries ;) pass except OSError as exc: # Some OS-level problem, crash. self._fatal_error(exc, "Fatal read error on file descriptor read") else: if data: self._protocol.data_received(data) else: # We reached end-of-file. if self._loop.get_debug(): logger.info("%r was closed by the kernel", self) self._closing = False self.pause_reading() self._loop.call_soon(self._protocol.eof_received) self._loop.call_soon(self._call_connection_lost, None)
Public API: pause reading the transport.
def pause_reading(self): """Public API: pause reading the transport.""" self._loop.remove_reader(self._fileno) self._active = False
Public API: resume transport reading.
def resume_reading(self): """Public API: resume transport reading.""" self._loop.add_reader(self._fileno, self._read_ready) self._active = True
Actual closing code both from manual close and errors.
def _close(self, error=None): """Actual closing code, both from manual close and errors.""" self._closing = True self.pause_reading() self._loop.call_soon(self._call_connection_lost, error)
Finalize closing.
def _call_connection_lost(self, error): """Finalize closing.""" try: self._protocol.connection_lost(error) finally: os.close(self._fileno) self._fileno = None self._protocol = None self._loop = None
Add a new watching rule.
def watch(self, path, flags, *, alias=None): """Add a new watching rule.""" if alias is None: alias = path if alias in self.requests: raise ValueError("A watch request is already scheduled for alias %s" % alias) self.requests[alias] = (path, flags) if self._fd is not None: # We've started, register the watch immediately. self._setup_watch(alias, path, flags)
Stop watching a given rule.
def unwatch(self, alias): """Stop watching a given rule.""" if alias not in self.descriptors: raise ValueError("Unknown watch alias %s; current set is %r" % (alias, list(self.descriptors.keys()))) wd = self.descriptors[alias] errno = LibC.inotify_rm_watch(self._fd, wd) if errno != 0: raise IOError("Failed to close watcher %d: errno=%d" % (wd, errno)) del self.descriptors[alias] del self.requests[alias] del self.aliases[wd]
Actual rule setup.
def _setup_watch(self, alias, path, flags): """Actual rule setup.""" assert alias not in self.descriptors, "Registering alias %s twice!" % alias wd = LibC.inotify_add_watch(self._fd, path, flags) if wd < 0: raise IOError("Error setting up watch on %s with flags %s: wd=%s" % ( path, flags, wd)) self.descriptors[alias] = wd self.aliases[wd] = alias
Start the watcher registering new watches if any.
def setup(self, loop): """Start the watcher, registering new watches if any.""" self._loop = loop self._fd = LibC.inotify_init() for alias, (path, flags) in self.requests.items(): self._setup_watch(alias, path, flags) # We pass ownership of the fd to the transport; it will close it. self._stream, self._transport = yield from aioutils.stream_from_fd(self._fd, loop)
Fetch an event.
def get_event(self): """Fetch an event. This coroutine will swallow events for removed watches. """ while True: prefix = yield from self._stream.readexactly(PREFIX.size) if prefix == b'': # We got closed, return None. return wd, flags, cookie, length = PREFIX.unpack(prefix) path = yield from self._stream.readexactly(length) # All async performed, time to look at the event's content. if wd not in self.aliases: # Event for a removed watch, skip it. continue decoded_path = struct.unpack('%ds' % length, path)[0].rstrip(b'\x00').decode('utf-8') return Event( flags=flags, cookie=cookie, name=decoded_path, alias=self.aliases[wd], )
Respond to nsqd that you ve processed this message successfully ( or would like to silently discard it ).
def finish(self): """ Respond to ``nsqd`` that you've processed this message successfully (or would like to silently discard it). """ assert not self._has_responded self._has_responded = True self.trigger(event.FINISH, message=self)
Respond to nsqd that you ve failed to process this message successfully ( and would like it to be requeued ).
def requeue(self, **kwargs): """ Respond to ``nsqd`` that you've failed to process this message successfully (and would like it to be requeued). :param backoff: whether or not :class:`nsq.Reader` should apply backoff handling :type backoff: bool :param delay: the amount of time (in seconds) that this message should be delayed if -1 it will be calculated based on # of attempts :type delay: int """ # convert delay to time_ms for fixing # https://github.com/nsqio/pynsq/issues/71 and maintaining # backward compatibility if 'delay' in kwargs and isinstance(kwargs['delay'], int) and kwargs['delay'] >= 0: kwargs['time_ms'] = kwargs['delay'] * 1000 assert not self._has_responded self._has_responded = True self.trigger(event.REQUEUE, message=self, **kwargs)
Respond to nsqd that you need more time to process the message.
def touch(self): """ Respond to ``nsqd`` that you need more time to process the message. """ assert not self._has_responded self.trigger(event.TOUCH, message=self)
Starts any instantiated: class: nsq. Reader or: class: nsq. Writer
def run(): """ Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` """ signal.signal(signal.SIGTERM, _handle_term_signal) signal.signal(signal.SIGINT, _handle_term_signal) tornado.ioloop.IOLoop.instance().start()
Update the timer to reflect a successfull call
def success(self): """Update the timer to reflect a successfull call""" if self.interval == 0.0: return self.short_interval -= self.short_unit self.long_interval -= self.long_unit self.short_interval = max(self.short_interval, Decimal(0)) self.long_interval = max(self.long_interval, Decimal(0)) self.update_interval()
Update the timer to reflect a failed call
def failure(self): """Update the timer to reflect a failed call""" self.short_interval += self.short_unit self.long_interval += self.long_unit self.short_interval = min(self.short_interval, self.max_short_timer) self.long_interval = min(self.long_interval, self.max_long_timer) self.update_interval()
encode a dictionary of URL parameters ( including iterables ) as utf - 8
def _utf8_params(params): """encode a dictionary of URL parameters (including iterables) as utf-8""" assert isinstance(params, dict) encoded_params = [] for k, v in params.items(): if v is None: continue if isinstance(v, integer_types + (float,)): v = str(v) if isinstance(v, (list, tuple)): v = [to_bytes(x) for x in v] else: v = to_bytes(v) encoded_params.append((k, v)) return dict(encoded_params)
Closes all connections stops all periodic callbacks
def close(self): """ Closes all connections stops all periodic callbacks """ for conn in self.conns.values(): conn.close() self.redist_periodic.stop() if self.query_periodic is not None: self.query_periodic.stop()
Used to identify when buffered messages should be processed and responded to.
def is_starved(self): """ Used to identify when buffered messages should be processed and responded to. When max_in_flight > 1 and you're batching messages together to perform work is isn't possible to just compare the len of your list of buffered messages against your configured max_in_flight (because max_in_flight may not be evenly divisible by the number of producers you're connected to, ie. you might never get that many messages... it's a *max*). Example:: def message_handler(self, nsq_msg, reader): # buffer messages if reader.is_starved(): # perform work reader = nsq.Reader(...) reader.set_message_handler(functools.partial(message_handler, reader=reader)) nsq.run() """ for conn in itervalues(self.conns): if conn.in_flight > 0 and conn.in_flight >= (conn.last_rdy * 0.85): return True return False
Adds a connection to nsqd at the specified address.
def connect_to_nsqd(self, host, port): """ Adds a connection to ``nsqd`` at the specified address. :param host: the address to connect to :param port: the port to connect to """ assert isinstance(host, string_types) assert isinstance(port, int) conn = AsyncConn(host, port, **self.conn_kwargs) conn.on('identify', self._on_connection_identify) conn.on('identify_response', self._on_connection_identify_response) conn.on('auth', self._on_connection_auth) conn.on('auth_response', self._on_connection_auth_response) conn.on('error', self._on_connection_error) conn.on('close', self._on_connection_close) conn.on('ready', self._on_connection_ready) conn.on('message', self._on_message) conn.on('heartbeat', self._on_heartbeat) conn.on('backoff', functools.partial(self._on_backoff_resume, success=False)) conn.on('resume', functools.partial(self._on_backoff_resume, success=True)) conn.on('continue', functools.partial(self._on_backoff_resume, success=None)) if conn.id in self.conns: return # only attempt to re-connect once every 10s per destination # this throttles reconnects to failed endpoints now = time.time() last_connect_attempt = self.connection_attempts.get(conn.id) if last_connect_attempt and last_connect_attempt > now - 10: return self.connection_attempts[conn.id] = now logger.info('[%s:%s] connecting to nsqd', conn.id, self.name) conn.connect() return conn
Trigger a query of the configured nsq_lookupd_http_addresses.
def query_lookupd(self): """ Trigger a query of the configured ``nsq_lookupd_http_addresses``. """ endpoint = self.lookupd_http_addresses[self.lookupd_query_index] self.lookupd_query_index = (self.lookupd_query_index + 1) % len(self.lookupd_http_addresses) # urlsplit() is faulty if scheme not present if '://' not in endpoint: endpoint = 'http://' + endpoint scheme, netloc, path, query, fragment = urlparse.urlsplit(endpoint) if not path or path == "/": path = "/lookup" params = parse_qs(query) params['topic'] = self.topic query = urlencode(_utf8_params(params), doseq=1) lookupd_url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) req = tornado.httpclient.HTTPRequest( lookupd_url, method='GET', headers={'Accept': 'application/vnd.nsq; version=1.0'}, connect_timeout=self.lookupd_connect_timeout, request_timeout=self.lookupd_request_timeout) callback = functools.partial(self._finish_query_lookupd, lookupd_url=lookupd_url) self.http_client.fetch(req, callback=callback)
Dynamically adjust the reader max_in_flight. Set to 0 to immediately disable a Reader
def set_max_in_flight(self, max_in_flight): """Dynamically adjust the reader max_in_flight. Set to 0 to immediately disable a Reader""" assert isinstance(max_in_flight, int) self.max_in_flight = max_in_flight if max_in_flight == 0: # set RDY 0 to all connections for conn in itervalues(self.conns): if conn.rdy > 0: logger.debug('[%s:%s] rdy: %d -> 0', conn.id, self.name, conn.rdy) self._send_rdy(conn, 0) self.total_rdy = 0 else: self.need_rdy_redistributed = True self._redistribute_rdy_state()
Called when a message has been received where msg. attempts > max_tries
def giving_up(self, message): """ Called when a message has been received where ``msg.attempts > max_tries`` This is useful to subclass and override to perform a task (such as writing to disk, etc.) :param message: the :class:`nsq.Message` received """ logger.warning('[%s] giving up on message %s after %d tries (max:%d) %r', self.name, message.id, message.attempts, self.max_tries, message.body)
Listen for the named event with the specified callback.
def on(self, name, callback): """ Listen for the named event with the specified callback. :param name: the name of the event :type name: string :param callback: the callback to execute when the event is triggered :type callback: callable """ assert callable(callback), 'callback is not callable' if callback in self.__listeners[name]: raise DuplicateListenerError self.__listeners[name].append(callback)
Stop listening for the named event via the specified callback.
def off(self, name, callback): """ Stop listening for the named event via the specified callback. :param name: the name of the event :type name: string :param callback: the callback that was originally used :type callback: callable """ if callback not in self.__listeners[name]: raise InvalidListenerError self.__listeners[name].remove(callback)
Execute the callbacks for the listeners on the specified event with the supplied arguments.
def trigger(self, name, *args, **kwargs): """ Execute the callbacks for the listeners on the specified event with the supplied arguments. All extra arguments are passed through to each callback. :param name: the name of the event :type name: string """ for ev in self.__listeners[name]: ev(*args, **kwargs)
publish a message to nsq
def pub(self, topic, msg, callback=None): """ publish a message to nsq :param topic: nsq topic :param msg: message body (bytes) :param callback: function which takes (conn, data) (data may be nsq.Error) """ self._pub('pub', topic, msg, callback=callback)
publish multiple messages in one command ( efficiently )
def mpub(self, topic, msg, callback=None): """ publish multiple messages in one command (efficiently) :param topic: nsq topic :param msg: list of messages bodies (which are bytes) :param callback: function which takes (conn, data) (data may be nsq.Error) """ if isinstance(msg, bytes_types): msg = [msg] assert isinstance(msg, (list, set, tuple)) self._pub('mpub', topic, msg, callback=callback)
publish multiple messages in one command ( efficiently )
def dpub(self, topic, delay_ms, msg, callback=None): """ publish multiple messages in one command (efficiently) :param topic: nsq topic :param delay_ms: tell nsqd to delay delivery for this long (integer milliseconds) :param msg: message body (bytes) :param callback: function which takes (conn, data) (data may be nsq.Error) """ self._pub('dpub', topic, msg, delay_ms, callback=callback)
Score function to calculate score
def score_function(self, x, W): # need refector ''' Score function to calculate score ''' if (self.svm_kernel == 'polynomial_kernel' or self.svm_kernel == 'gaussian_kernel' or self.svm_kernel == 'soft_polynomial_kernel' or self.svm_kernel == 'soft_gaussian_kernel'): x = x[1:] ''' original_X = self.train_X[:, 1:] score = 0 for i in range(len(self.sv_alpha)): if (self.svm_kernel == 'polynomial_kernel' or self.svm_kernel == 'soft_polynomial_kernel'): score += self.sv_alpha[i] * self.sv_Y[i] * utility.Kernel.polynomial_kernel(self, original_X[self.sv_index[i]], x) elif (self.svm_kernel == 'gaussian_kernel' or self.svm_kernel == 'soft_gaussian_kernel'): score += self.sv_alpha[i] * self.sv_Y[i] * utility.Kernel.gaussian_kernel(self, original_X[self.sv_index[i]], x) score = np.sign(score + self.sv_avg_b) ''' score = np.sign(np.sum(self.sv_alpha * self.sv_Y * utility.Kernel.kernel_matrix_xX(self, x, self.sv_X)) + self.sv_avg_b) else: score = np.sign(np.inner(x, W)) return score
Score function to calculate score
def score_function(self, x, W): ''' Score function to calculate score ''' score = super(BinaryClassifier, self).score_function(x, W) if score >= 0.5: score = 1.0 else: score = -1.0 return score
Train Pocket Perceptron Learning Algorithm From f ( x ) = WX Find best h ( x ) = WX similar to f ( x ) Output W
def train(self): ''' Train Pocket Perceptron Learning Algorithm From f(x) = WX Find best h(x) = WX similar to f(x) Output W ''' if (self.status != 'init'): print("Please load train data and init W first.") return self.W self.status = 'train' new_W = self.W self.temp_avg_error = self.calculate_avg_error(self.train_X, self.train_Y, new_W) for _ in range(self.updates): if (self.loop_mode is 'naive_cycle'): data_check_order = range(self.data_num) elif (self.loop_mode is 'random'): data_check_order = range(self.data_num) data_check_order = random.sample(data_check_order, self.data_num) else: data_check_order = range(self.data_num) data_check_order = random.sample(data_check_order, self.data_num) for i in data_check_order: if self.error_function(self.score_function(self.train_X[i], new_W), self.train_Y[i]): self.tune_times += 1 new_W = new_W + self.step_alpha * (self.train_Y[i] * self.train_X[i]) new_avg_error = self.calculate_avg_error(self.train_X, self.train_Y, new_W) if new_avg_error < self.temp_avg_error: self.put_in_pocket_times += 1 self.temp_avg_error = new_avg_error self.W = new_W break return self.W
original_X = self. svm_processor. train_X [: 1: ] score = 0 for i in range ( len ( self. svm_processor. sv_alpha )): score + = self. svm_processor. sv_alpha [ i ] * self. svm_processor. sv_Y [ i ] * utility. Kernel. gaussian_kernel ( self original_X [ self. svm_processor. sv_index [ i ]] x ) score = score + self. svm_processor. sv_avg_b
def svm_score(self, x): x = x[1:] ''' original_X = self.svm_processor.train_X[:, 1:] score = 0 for i in range(len(self.svm_processor.sv_alpha)): score += self.svm_processor.sv_alpha[i] * self.svm_processor.sv_Y[i] * utility.Kernel.gaussian_kernel(self, original_X[self.svm_processor.sv_index[i]], x) score = score + self.svm_processor.sv_avg_b ''' score = np.sum(self.svm_processor.sv_alpha * self.svm_processor.sv_Y * utility.Kernel.kernel_matrix_xX(self, x, self.svm_processor.sv_X)) + self.svm_processor.sv_avg_b return score
Train Linear Regression Algorithm From f ( x ) = WX Find best h ( x ) = WX similar to f ( x ) Output W
def train(self): ''' Train Linear Regression Algorithm From f(x) = WX Find best h(x) = WX similar to f(x) Output W ''' if (self.status != 'init'): print("Please load train data and init W first.") return self.W self.status = 'train' self.xpsedo = self.calculate_psedo_X(self.train_X) self.W = np.dot(self.xpsedo, self.train_Y) return self.W
Score function to calculate score
def score_function(self, x, W): # need refector ''' Score function to calculate score ''' score = self.sign * np.sign(x[self.feature_index] - self.theta) return score
Train Perceptron Learning Algorithm From f ( x ) = WX Find best h ( x ) = WX similar to f ( x ) Output W
def train(self): ''' Train Perceptron Learning Algorithm From f(x) = WX Find best h(x) = WX similar to f(x) Output W ''' if (self.status != 'init'): print("Please load train data and init W first.") return self.W self.status = 'train' if (self.loop_mode is 'random'): data_check_order = range(self.data_num) data_check_order = random.sample(data_check_order, self.data_num) elif (self.loop_mode is 'naive_cycle'): data_check_order = range(self.data_num) else: data_check_order = range(self.data_num) self.tune_times = 0 k = 0 flag = True while True: if (self.tune_times > (2 * self.data_num)): print("Dataset not linear separable.") break if k == self.data_num: if flag: break k = 0 flag = True point_wise_i = data_check_order[k] if self.error_function(self.score_function(self.train_X[point_wise_i], self.W), self.train_Y[point_wise_i]): flag = False self.tune_times += 1 self.W = self.W + self.step_alpha * (self.train_Y[point_wise_i] * self.train_X[point_wise_i]) k += 1 return self.W
load file
def load(input_data_file='', data_type='float'): """load file""" X = [] Y = [] if data_type == 'float': with open(input_data_file) as f: for line in f: data = line.split() x = [1] + [float(v) for v in data[:-1]] X.append(x) Y.append(float(data[-1])) else: with open(input_data_file) as f: for line in f: data = line.split() x = [1] + [v for v in data[:-1]] X.append(x) Y.append(data[-1]) return np.array(X), np.array(Y)
K = np. zeros (( svm_model. data_num svm_model. data_num ))
def kernel_matrix(svm_model, original_X): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K = (svm_model.zeta + svm_model.gamma * np.dot(original_X, original_X.T)) ** svm_model.Q elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): pairwise_dists = squareform(pdist(original_X, 'euclidean')) K = np.exp(-svm_model.gamma * (pairwise_dists ** 2)) ''' K = np.zeros((svm_model.data_num, svm_model.data_num)) for i in range(svm_model.data_num): for j in range(svm_model.data_num): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K[i, j] = Kernel.polynomial_kernel(svm_model, original_X[i], original_X[j]) elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K[i, j] = Kernel.gaussian_kernel(svm_model, original_X[i], original_X[j]) ''' return K
K = np. zeros (( svm_model. data_num svm_model. data_num ))
def kernel_matrix_xX(svm_model, original_x, original_X): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K = (svm_model.zeta + svm_model.gamma * np.dot(original_x, original_X.T)) ** svm_model.Q elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K = np.exp(-svm_model.gamma * (cdist(original_X, np.atleast_2d(original_x), 'euclidean').T ** 2)).ravel() ''' K = np.zeros((svm_model.data_num, svm_model.data_num)) for i in range(svm_model.data_num): for j in range(svm_model.data_num): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j]) elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j]) ''' return K
Transform data feature to high level
def set_feature_transform(self, mode='polynomial', degree=1): ''' Transform data feature to high level ''' if self.status != 'load_train_data': print("Please load train data first.") return self.train_X self.feature_transform_mode = mode self.feature_transform_degree = degree self.train_X = self.train_X[:, 1:] self.train_X = utility.DatasetLoader.feature_transform( self.train_X, self.feature_transform_mode, self.feature_transform_degree ) return self.train_X
Make prediction input test data output the prediction
def prediction(self, input_data='', mode='test_data'): ''' Make prediction input test data output the prediction ''' prediction = {} if (self.status != 'train'): print("Please load train data and init W then train the W first.") return prediction if (input_data == ''): print("Please input test data for prediction.") return prediction if mode == 'future_data': data = input_data.split() input_data_x = [float(v) for v in data] input_data_x = utility.DatasetLoader.feature_transform( np.array(input_data_x).reshape(1, -1), self.feature_transform_mode, self.feature_transform_degree ) input_data_x = np.ravel(input_data_x) prediction = self.score_function(input_data_x, self.W) return {"input_data_x": input_data_x, "input_data_y": None, "prediction": prediction} else: data = input_data.split() input_data_x = [float(v) for v in data[:-1]] input_data_x = utility.DatasetLoader.feature_transform( np.array(input_data_x).reshape(1, -1), self.feature_transform_mode, self.feature_transform_degree ) input_data_x = np.ravel(input_data_x) input_data_y = float(data[-1]) prediction = self.score_function(input_data_x, self.W) return {"input_data_x": input_data_x, "input_data_y": input_data_y, "prediction": prediction}
Theta sigmoid function
def theta(self, s): ''' Theta sigmoid function ''' s = np.where(s < -709, -709, s) return 1 / (1 + np.exp((-1) * s))
Score function to calculate score
def score_function(self, x, W): # need refector ''' Score function to calculate score ''' score = self.theta(np.inner(x, W)) return score
Error function to calculate error: cross entropy error
def error_function(self, x, y, W): # need refector ''' Error function to calculate error: cross entropy error ''' error = np.log(1 + np.exp((-1) * y * np.inner(x, W))) return error
Retrieves some statistics from a single Trimmomatic log file.
def parse_log(log_file): """Retrieves some statistics from a single Trimmomatic log file. This function parses Trimmomatic's log file and stores some trimming statistics in an :py:class:`OrderedDict` object. This object contains the following keys: - ``clean_len``: Total length after trimming. - ``total_trim``: Total trimmed base pairs. - ``total_trim_perc``: Total trimmed base pairs in percentage. - ``5trim``: Total base pairs trimmed at 5' end. - ``3trim``: Total base pairs trimmed at 3' end. Parameters ---------- log_file : str Path to trimmomatic log file. Returns ------- x : :py:class:`OrderedDict` Object storing the trimming statistics. """ template = OrderedDict([ # Total length after trimming ("clean_len", 0), # Total trimmed base pairs ("total_trim", 0), # Total trimmed base pairs in percentage ("total_trim_perc", 0), # Total trimmed at 5' end ("5trim", 0), # Total trimmed at 3' end ("3trim", 0), # Bad reads (completely trimmed) ("bad_reads", 0) ]) with open(log_file) as fh: for line in fh: # This will split the log fields into: # 0. read length after trimming # 1. amount trimmed from the start # 2. last surviving base # 3. amount trimmed from the end fields = [int(x) for x in line.strip().split()[-4:]] if not fields[0]: template["bad_reads"] += 1 template["5trim"] += fields[1] template["3trim"] += fields[3] template["total_trim"] += fields[1] + fields[3] template["clean_len"] += fields[0] total_len = template["clean_len"] + template["total_trim"] if total_len: template["total_trim_perc"] = round( (template["total_trim"] / total_len) * 100, 2) else: template["total_trim_perc"] = 0 return template
Cleans the working directory of unwanted temporary files
def clean_up(fastq_pairs, clear): """Cleans the working directory of unwanted temporary files""" # Find unpaired fastq files unpaired_fastq = [f for f in os.listdir(".") if f.endswith("_U.fastq.gz")] # Remove unpaired fastq files, if any for fpath in unpaired_fastq: os.remove(fpath) # Expected output to assess whether it is safe to remove temporary input expected_out = [f for f in os.listdir(".") if f.endswith("_trim.fastq.gz")] if clear == "true" and len(expected_out) == 2: for fq in fastq_pairs: # Get real path of fastq files, following symlinks rp = os.path.realpath(fq) logger.debug("Removing temporary fastq file path: {}".format(rp)) if re.match(".*/work/.{2}/.{30}/.*", rp): os.remove(rp)
Merges the default adapters file in the trimmomatic adapters directory
def merge_default_adapters(): """Merges the default adapters file in the trimmomatic adapters directory Returns ------- str Path with the merged adapters file. """ default_adapters = [os.path.join(ADAPTERS_PATH, x) for x in os.listdir(ADAPTERS_PATH)] filepath = os.path.join(os.getcwd(), "default_adapters.fasta") with open(filepath, "w") as fh, \ fileinput.input(default_adapters) as in_fh: for line in in_fh: fh.write("{}{}".format(line, "\\n")) return filepath
Main executor of the trimmomatic template.
def main(sample_id, fastq_pair, trim_range, trim_opts, phred, adapters_file, clear): """ Main executor of the trimmomatic template. Parameters ---------- sample_id : str Sample Identification string. fastq_pair : list Two element list containing the paired FastQ files. trim_range : list Two element list containing the trimming range. trim_opts : list Four element list containing several trimmomatic options: [*SLIDINGWINDOW*; *LEADING*; *TRAILING*; *MINLEN*] phred : int Guessed phred score for the sample. The phred score is a generated output from :py:class:`templates.integrity_coverage`. adapters_file : str Path to adapters file. If not provided, or the path is not available, it will use the default adapters from Trimmomatic will be used clear : str Can be either 'true' or 'false'. If 'true', the input fastq files will be removed at the end of the run, IF they are in the working directory """ logger.info("Starting trimmomatic") # Create base CLI cli = [ "java", "-Xmx{}".format("$task.memory"[:-1].lower().replace(" ", "")), "-jar", TRIM_PATH.strip(), "PE", "-threads", "$task.cpus" ] # If the phred encoding was detected, provide it try: # Check if the provided PHRED can be converted to int phred = int(phred) phred_flag = "-phred{}".format(str(phred)) cli += [phred_flag] # Could not detect phred encoding. Do not add explicit encoding to # trimmomatic and let it guess except ValueError: pass # Add input samples to CLI cli += fastq_pair # Add output file names output_names = [] for i in range(len(fastq_pair)): output_names.append("{}_{}_trim.fastq.gz".format( SAMPLE_ID, str(i + 1))) output_names.append("{}_{}_U.fastq.gz".format( SAMPLE_ID, str(i + 1))) cli += output_names if trim_range != ["None"]: cli += [ "CROP:{}".format(trim_range[1]), "HEADCROP:{}".format(trim_range[0]), ] if os.path.exists(adapters_file): logger.debug("Using the provided adapters file '{}'".format( adapters_file)) else: logger.debug("Adapters file '{}' not provided or does not exist. Using" " default adapters".format(adapters_file)) adapters_file = merge_default_adapters() cli += [ "ILLUMINACLIP:{}:3:30:10:6:true".format(adapters_file) ] #create log file im temporary dir to avoid issues when running on a docker container in macOS logfile = os.path.join(tempfile.mkdtemp(prefix='tmp'), "{}_trimlog.txt".format(sample_id)) # Add trimmomatic options cli += [ "SLIDINGWINDOW:{}".format(trim_opts[0]), "LEADING:{}".format(trim_opts[1]), "TRAILING:{}".format(trim_opts[2]), "MINLEN:{}".format(trim_opts[3]), "TOPHRED33", "-trimlog", logfile ] logger.debug("Running trimmomatic subprocess with command: {}".format(cli)) p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() # Attempt to decode STDERR output from bytes. If unsuccessful, coerce to # string try: stderr = stderr.decode("utf8") except (UnicodeDecodeError, AttributeError): stderr = str(stderr) logger.info("Finished trimmomatic subprocess with STDOUT:\\n" "======================================\\n{}".format(stdout)) logger.info("Finished trimmomatic subprocesswith STDERR:\\n" "======================================\\n{}".format(stderr)) logger.info("Finished trimmomatic with return code: {}".format( p.returncode)) trimmomatic_log(logfile, sample_id) if p.returncode == 0 and os.path.exists("{}_1_trim.fastq.gz".format( SAMPLE_ID)): clean_up(fastq_pair, clear) # Check if trimmomatic ran successfully. If not, write the error message # to the status channel and exit. with open(".status", "w") as status_fh: if p.returncode != 0: status_fh.write("fail") return else: status_fh.write("pass")
Function that parse samtools depth file and creates 3 dictionaries that will be useful to make the outputs of this script both the tabular file and the json file that may be imported by pATLAS
def depth_file_reader(depth_file): """ Function that parse samtools depth file and creates 3 dictionaries that will be useful to make the outputs of this script, both the tabular file and the json file that may be imported by pATLAS Parameters ---------- depth_file: textIO the path to depth file for each sample Returns ------- depth_dic_coverage: dict dictionary with the coverage per position for each plasmid """ # dict to store the mean coverage for each reference depth_dic_coverage = {} for line in depth_file: tab_split = line.split() # split by any white space reference = "_".join(tab_split[0].strip().split("_")[0:3]) # store # only the gi for the reference position = tab_split[1] num_reads_align = float(tab_split[2].rstrip()) if reference not in depth_dic_coverage: depth_dic_coverage[reference] = {} depth_dic_coverage[reference][position] = num_reads_align logger.info("Finished parsing depth file.") depth_file.close() logger.debug("Size of dict_cov: {} kb".format( asizeof(depth_dic_coverage)/1024)) return depth_dic_coverage
Function that handles the inputs required to parse depth files from bowtie and dumps a dict to a json file that can be imported into pATLAS.
def main(depth_file, json_dict, cutoff, sample_id): """ Function that handles the inputs required to parse depth files from bowtie and dumps a dict to a json file that can be imported into pATLAS. Parameters ---------- depth_file: str the path to depth file for each sample json_dict: str the file that contains the dictionary with keys and values for accessions and their respective lengths cutoff: str the cutoff used to trim the unwanted matches for the minimum coverage results from mapping. This value may range between 0 and 1. sample_id: str the id of the sample being parsed """ # check for the appropriate value for the cutoff value for coverage results logger.debug("Cutoff value: {}. Type: {}".format(cutoff, type(cutoff))) try: cutoff_val = float(cutoff) if cutoff_val < 0.4: logger.warning("This cutoff value will generate a high volume of " "plot data. Therefore '.report.json' can be too big") except ValueError: logger.error("Cutoff value should be a string such as: '0.6'. " "The outputted value: {}. Make sure to provide an " "appropriate value for --cov_cutoff".format(cutoff)) sys.exit(1) # loads dict from file, this file is provided in docker image plasmid_length = json.load(open(json_dict)) if plasmid_length: logger.info("Loaded dictionary of plasmid lengths") else: logger.error("Something went wrong and plasmid lengths dictionary" "could not be loaded. Check if process received this" "param successfully.") sys.exit(1) # read depth file depth_file_in = open(depth_file) # first reads the depth file and generates dictionaries to handle the input # to a simpler format logger.info("Reading depth file and creating dictionary to dump.") depth_dic_coverage = depth_file_reader(depth_file_in) percentage_bases_covered, dict_cov = generate_jsons(depth_dic_coverage, plasmid_length, cutoff_val) if percentage_bases_covered and dict_cov: logger.info("percentage_bases_covered length: {}".format( str(len(percentage_bases_covered)))) logger.info("dict_cov length: {}".format(str(len(dict_cov)))) else: logger.error("Both dicts that dump to JSON file or .report.json are " "empty.") # then dump do file logger.info("Dumping to {}".format("{}_mapping.json".format(depth_file))) with open("{}_mapping.json".format(depth_file), "w") as output_json: output_json.write(json.dumps(percentage_bases_covered)) json_dic = { "tableRow": [{ "sample": sample_id, "data": [{ "header": "Mapping", "table": "plasmids", "patlas_mapping": percentage_bases_covered, "value": len(percentage_bases_covered) }] }], "sample": sample_id, "patlas_mapping": percentage_bases_covered, "plotData": [{ "sample": sample_id, "data": { "patlasMappingSliding": dict_cov }, }] } logger.debug("Size of dict_cov: {} kb".format(asizeof(json_dic)/1024)) logger.info("Writing to .report.json") with open(".report.json", "w") as json_report: json_report.write(json.dumps(json_dic, separators=(",", ":")))
Sets the path to the appropriate jinja template file
def _set_template(self, template): """Sets the path to the appropriate jinja template file When a Process instance is initialized, this method will fetch the location of the appropriate template file, based on the ``template`` argument. It will raise an exception is the template file is not found. Otherwise, it will set the :py:attr:`Process.template_path` attribute. """ # Set template directory tpl_dir = join(dirname(abspath(__file__)), "templates") # Set template file path tpl_path = join(tpl_dir, template + ".nf") if not os.path.exists(tpl_path): raise eh.ProcessError( "Template {} does not exist".format(tpl_path)) self._template_path = join(tpl_dir, template + ".nf")
Sets the main channel names based on the provide input and output channel suffixes. This is performed when connecting processes.
def set_main_channel_names(self, input_suffix, output_suffix, lane): """Sets the main channel names based on the provide input and output channel suffixes. This is performed when connecting processes. Parameters ---------- input_suffix : str Suffix added to the input channel. Should be based on the lane and an arbitrary unique id output_suffix : str Suffix added to the output channel. Should be based on the lane and an arbitrary unique id lane : int Sets the lane of the process. """ self.input_channel = "{}_in_{}".format(self.template, input_suffix) self.output_channel = "{}_out_{}".format(self.template, output_suffix) self.lane = lane
Returns the main raw channel for the process
def get_user_channel(self, input_channel, input_type=None): """Returns the main raw channel for the process Provided with at least a channel name, this method returns the raw channel name and specification (the nextflow string definition) for the process. By default, it will fork from the raw input of the process' :attr:`~Process.input_type` attribute. However, this behaviour can be overridden by providing the ``input_type`` argument. If the specified or inferred input type exists in the :attr:`~Process.RAW_MAPPING` dictionary, the channel info dictionary will be retrieved along with the specified input channel. Otherwise, it will return None. An example of the returned dictionary is:: {"input_channel": "myChannel", "params": "fastq", "channel": "IN_fastq_raw", "channel_str":"IN_fastq_raw = Channel.fromFilePairs(params.fastq)" } Returns ------- dict or None Dictionary with the complete raw channel info. None if no channel is found. """ res = {"input_channel": input_channel} itype = input_type if input_type else self.input_type if itype in self.RAW_MAPPING: channel_info = self.RAW_MAPPING[itype] return {**res, **channel_info}
Wrapper to the jinja2 render method from a template file
def render(template, context): """Wrapper to the jinja2 render method from a template file Parameters ---------- template : str Path to template file. context : dict Dictionary with kwargs context to populate the template """ path, filename = os.path.split(template) return jinja2.Environment( loader=jinja2.FileSystemLoader(path or './') ).get_template(filename).render(context)
Class property that returns a populated template string
def template_str(self): """Class property that returns a populated template string This property allows the template of a particular process to be dynamically generated and returned when doing ``Process.template_str``. Returns ------- x : str String with the complete and populated process template """ if not self._context: raise eh.ProcessError("Channels must be setup first using the " "set_channels method") logger.debug("Setting context for template {}: {}".format( self.template, self._context )) x = self.render(self._template_path, self._context) return x