INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Logs an entity creation
|
def log_entity_deletion(entity, params=None):
"""Logs an entity creation
"""
p = {'entity': entity}
if params:
p['params'] = params
_log(TYPE_CODES.DELETE, p)
|
Logs an operation done on an entity possibly with other arguments
|
def log_operation(entities, operation_name, params=None):
"""Logs an operation done on an entity, possibly with other arguments
"""
if isinstance(entities, (list, tuple)):
entities = list(entities)
else:
entities = [entities]
p = {'name': operation_name, 'on': entities}
if params:
p['params'] = params
_log(TYPE_CODES.OPERATION, p)
|
Logs a new state of an entity
|
def log_state(entity, state):
"""Logs a new state of an entity
"""
p = {'on': entity, 'state': state}
_log(TYPE_CODES.STATE, p)
|
Logs an update done on an entity
|
def log_update(entity, update):
"""Logs an update done on an entity
"""
p = {'on': entity, 'update': update}
_log(TYPE_CODES.UPDATE, p)
|
Logs an error
|
def log_error(error, result):
"""Logs an error
"""
p = {'error': error, 'result':result}
_log(TYPE_CODES.ERROR, p)
|
Decorator that provides a dictionary cursor to the calling function
|
def dict_cursor(func):
"""
Decorator that provides a dictionary cursor to the calling function
Adds the cursor as the second argument to the calling functions
Requires that the function being decorated is an instance of a class or object
that yields a cursor from a get_cursor(cursor_type=CursorType.DICT) coroutine or provides such an object
as the first argument in its signature
Yields:
A client-side dictionary cursor
"""
@wraps(func)
def wrapper(cls, *args, **kwargs):
with (yield from cls.get_cursor(_CursorType.DICT)) as c:
return (yield from func(cls, c, *args, **kwargs))
return wrapper
|
Decorator that provides a cursor to the calling function
|
def cursor(func):
"""
Decorator that provides a cursor to the calling function
Adds the cursor as the second argument to the calling functions
Requires that the function being decorated is an instance of a class or object
that yields a cursor from a get_cursor() coroutine or provides such an object
as the first argument in its signature
Yields:
A client-side cursor
"""
@wraps(func)
def wrapper(cls, *args, **kwargs):
with (yield from cls.get_cursor()) as c:
return (yield from func(cls, c, *args, **kwargs))
return wrapper
|
Decorator that provides a namedtuple cursor to the calling function
|
def nt_cursor(func):
"""
Decorator that provides a namedtuple cursor to the calling function
Adds the cursor as the second argument to the calling functions
Requires that the function being decorated is an instance of a class or object
that yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object
as the first argument in its signature
Yields:
A client-side namedtuple cursor
"""
@wraps(func)
def wrapper(cls, *args, **kwargs):
with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c:
return (yield from func(cls, c, *args, **kwargs))
return wrapper
|
Provides a transacted cursor which will run in autocommit = false mode
|
def transaction(func):
"""
Provides a transacted cursor which will run in autocommit=false mode
For any exception the transaction will be rolled back.
Requires that the function being decorated is an instance of a class or object
that yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object
as the first argument in its signature
Yields:
A client-side transacted named cursor
"""
@wraps(func)
def wrapper(cls, *args, **kwargs):
with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c:
try:
yield from c.execute('BEGIN')
result = (yield from func(cls, c, *args, **kwargs))
except Exception:
yield from c.execute('ROLLBACK')
else:
yield from c.execute('COMMIT')
return result
return wrapper
|
Sets connection parameters For more information on the parameters that is accepts see: http:// www. postgresql. org/ docs/ 9. 2/ static/ libpq - connect. html
|
def connect(cls, database: str, user: str, password: str, host: str, port: int, *, use_pool: bool=True,
enable_ssl: bool=False, minsize=1, maxsize=50, keepalives_idle=5, keepalives_interval=4, echo=False,
**kwargs):
"""
Sets connection parameters
For more information on the parameters that is accepts,
see : http://www.postgresql.org/docs/9.2/static/libpq-connect.html
"""
cls._connection_params['database'] = database
cls._connection_params['user'] = user
cls._connection_params['password'] = password
cls._connection_params['host'] = host
cls._connection_params['port'] = port
cls._connection_params['sslmode'] = 'prefer' if enable_ssl else 'disable'
cls._connection_params['minsize'] = minsize
cls._connection_params['maxsize'] = maxsize
cls._connection_params['keepalives_idle'] = keepalives_idle
cls._connection_params['keepalives_interval'] = keepalives_interval
cls._connection_params['echo'] = echo
cls._connection_params.update(kwargs)
cls._use_pool = use_pool
|
Yields: existing db connection pool
|
def get_pool(cls) -> Pool:
"""
Yields:
existing db connection pool
"""
if len(cls._connection_params) < 5:
raise ConnectionError('Please call SQLStore.connect before calling this method')
if not cls._pool:
cls._pool = yield from create_pool(**cls._connection_params)
return cls._pool
|
Yields: new client - side cursor from existing db connection pool
|
def get_cursor(cls, cursor_type=_CursorType.PLAIN) -> Cursor:
"""
Yields:
new client-side cursor from existing db connection pool
"""
_cur = None
if cls._use_pool:
_connection_source = yield from cls.get_pool()
else:
_connection_source = yield from aiopg.connect(echo=False, **cls._connection_params)
if cursor_type == _CursorType.PLAIN:
_cur = yield from _connection_source.cursor()
if cursor_type == _CursorType.NAMEDTUPLE:
_cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
if cursor_type == _CursorType.DICT:
_cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.DictCursor)
if not cls._use_pool:
_cur = cursor_context_manager(_connection_source, _cur)
return _cur
|
gives the number of records in the table
|
def count(cls, cur, table:str, where_keys: list=None):
"""
gives the number of records in the table
Args:
table: a string indicating the name of the table
Returns:
an integer indicating the number of records in the table
"""
if where_keys:
where_clause, values = cls._get_where_clause_with_values(where_keys)
query = cls._count_query_where.format(table, where_clause)
q, t = query, values
else:
query = cls._count_query.format(table)
q, t = query, ()
yield from cur.execute(q, t)
result = yield from cur.fetchone()
return int(result[0])
|
Creates an insert statement with only chosen fields
|
def insert(cls, cur, table: str, values: dict):
"""
Creates an insert statement with only chosen fields
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
Returns:
A 'Record' object with table columns as properties
"""
keys = cls._COMMA.join(values.keys())
value_place_holder = cls._PLACEHOLDER * len(values)
query = cls._insert_string.format(table, keys, value_place_holder[:-1])
yield from cur.execute(query, tuple(values.values()))
return (yield from cur.fetchone())
|
Creates an update query with only chosen fields Supports only a single field where clause
|
def update(cls, cur, table: str, values: dict, where_keys: list) -> tuple:
"""
Creates an update query with only chosen fields
Supports only a single field where clause
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
where_keys: list of dictionary
example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]
where_clause will look like ((name>%s and url=%s) or (type <= %s))
items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed
Returns:
an integer indicating count of rows deleted
"""
keys = cls._COMMA.join(values.keys())
value_place_holder = cls._PLACEHOLDER * len(values)
where_clause, where_values = cls._get_where_clause_with_values(where_keys)
query = cls._update_string.format(table, keys, value_place_holder[:-1], where_clause)
yield from cur.execute(query, (tuple(values.values()) + where_values))
return (yield from cur.fetchall())
|
Creates a delete query with where keys Supports multiple where clause with and or or both
|
def delete(cls, cur, table: str, where_keys: list):
"""
Creates a delete query with where keys
Supports multiple where clause with and or or both
Args:
table: a string indicating the name of the table
where_keys: list of dictionary
example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]
where_clause will look like ((name>%s and url=%s) or (type <= %s))
items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed
Returns:
an integer indicating count of rows deleted
"""
where_clause, values = cls._get_where_clause_with_values(where_keys)
query = cls._delete_query.format(table, where_clause)
yield from cur.execute(query, values)
return cur.rowcount
|
Creates a select query for selective columns with where keys Supports multiple where claus with and or or both
|
def select(cls, cur, table: str, order_by: str, columns: list=None, where_keys: list=None, limit=100,
offset=0):
"""
Creates a select query for selective columns with where keys
Supports multiple where claus with and or or both
Args:
table: a string indicating the name of the table
order_by: a string indicating column name to order the results on
columns: list of columns to select from
where_keys: list of dictionary
limit: the limit on the number of results
offset: offset on the results
example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]
where_clause will look like ((name>%s and url=%s) or (type <= %s))
items within each dictionary get 'AND'-ed and across dictionaries get 'OR'-ed
Returns:
A list of 'Record' object with table columns as properties
"""
if columns:
columns_string = cls._COMMA.join(columns)
if where_keys:
where_clause, values = cls._get_where_clause_with_values(where_keys)
query = cls._select_selective_column_with_condition.format(columns_string, table, where_clause,
order_by, limit, offset)
q, t = query, values
else:
query = cls._select_selective_column.format(columns_string, table, order_by, limit, offset)
q, t = query, ()
else:
if where_keys:
where_clause, values = cls._get_where_clause_with_values(where_keys)
query = cls._select_all_string_with_condition.format(table, where_clause, order_by, limit, offset)
q, t = query, values
else:
query = cls._select_all_string.format(table, order_by, limit, offset)
q, t = query, ()
yield from cur.execute(q, t)
return (yield from cur.fetchall())
|
Run a raw sql query
|
def raw_sql(cls, cur, query: str, values: tuple):
"""
Run a raw sql query
Args:
query : query string to execute
values : tuple of values to be used with the query
Returns:
result of query as list of named tuple
"""
yield from cur.execute(query, values)
return (yield from cur.fetchall())
|
This method is used to append content of the text argument to the out argument.
|
def serialize_text(out, text):
"""This method is used to append content of the `text`
argument to the `out` argument.
Depending on how many lines in the text, a
padding can be added to all lines except the first
one.
Concatenation result is appended to the `out` argument.
"""
padding = len(out)
# we need to add padding to all lines
# except the first one
add_padding = padding_adder(padding)
text = add_padding(text, ignore_first_line=True)
return out + text
|
This method is used to serialize list of text pieces like [ some = u Another blah = 124 ]
|
def serialize_list(out, lst, delimiter=u'', max_length=20):
"""This method is used to serialize list of text
pieces like ["some=u'Another'", "blah=124"]
Depending on how many lines are in these items,
they are concatenated in row or as a column.
Concatenation result is appended to the `out` argument.
"""
have_multiline_items = any(map(is_multiline, lst))
result_will_be_too_long = sum(map(len, lst)) > max_length
if have_multiline_items or result_will_be_too_long:
padding = len(out)
add_padding = padding_adder(padding)
# we need to add padding to all lines
# except the first one
head, rest = cut_head(lst)
rest = map(add_padding, rest)
# add padding to the head, but not for it's first line
head = add_padding(head, ignore_first_line=True)
# now join lines back
lst = chain((head,), rest)
delimiter += u'\n'
else:
delimiter += u' '
return out + delimiter.join(lst)
|
This function should return unicode representation of the value
|
def format_value(value):
"""This function should return unicode representation of the value
"""
value_id = id(value)
if value_id in recursion_breaker.processed:
return u'<recursion>'
recursion_breaker.processed.add(value_id)
try:
if isinstance(value, six.binary_type):
# suppose, all byte strings are in unicode
# don't know if everybody in the world uses anything else?
return u"'{0}'".format(value.decode('utf-8'))
elif isinstance(value, six.text_type):
return u"u'{0}'".format(value)
elif isinstance(value, (list, tuple)):
# long lists or lists with multiline items
# will be shown vertically
values = list(map(format_value, value))
result = serialize_list(u'[', values, delimiter=u',') + u']'
return force_unicode(result)
elif isinstance(value, dict):
items = six.iteritems(value)
# format each key/value pair as a text,
# calling format_value recursively
items = (tuple(map(format_value, item))
for item in items)
items = list(items)
# sort by keys for readability
items.sort()
# for each item value
items = [
serialize_text(
u'{0}: '.format(key),
item_value)
for key, item_value in items]
# and serialize these pieces as a list, enclosing
# them into a curve brackets
result = serialize_list(u'{', items, delimiter=u',') + u'}'
return force_unicode(result)
return force_unicode(repr(value))
finally:
recursion_breaker.processed.remove(value_id)
|
Returns __repr__ method which returns ASCII representaion of the object with given fields.
|
def make_repr(*args, **kwargs):
"""Returns __repr__ method which returns ASCII
representaion of the object with given fields.
Without arguments, ``make_repr`` generates a method
which outputs all object's non-protected (non-undercored)
arguments which are not callables.
Accepts ``*args``, which should be a names of object's
attributes to be included in the output::
__repr__ = make_repr('foo', 'bar')
If you want to generate attribute's content on the fly,
then you should use keyword arguments and pass a callable
of one argument::
__repr__ = make_repr(foo=lambda obj: obj.blah + 100500)
"""
def method(self):
cls_name = self.__class__.__name__
if args:
field_names = args
else:
def undercored(name): return name.startswith('_')
def is_method(name): return callable(getattr(self, name))
def good_name(name):
return not undercored(name) and not is_method(name)
field_names = filter(good_name, dir(self))
field_names = sorted(field_names)
# on this stage, we make from field_names an
# attribute getters
field_getters = zip(field_names,
map(attrgetter, field_names))
# now process keyword args, they must
# contain callables of one argument
# and callable should return a field's value
field_getters = chain(
field_getters,
kwargs.items())
fields = ((name, format_value(getter(self)))
for name, getter in field_getters)
# prepare key strings
fields = ((u'{0}='.format(name), value)
for name, value in fields)
# join values with they respective keys
fields = list(starmap(serialize_text, fields))
beginning = u'<{cls_name} '.format(
cls_name=cls_name,
)
result = serialize_list(
beginning,
fields)
# append closing braket
result += u'>'
if ON_PYTHON2:
# on python 2.x repr returns bytes, but on python3 - unicode strings
result = result.encode('utf-8')
return result
return method
|
Setup a connection pool: param host: Redis host: param port: Redis port: param loop: Event loop
|
def connect(self, host, port, minsize=5, maxsize=10, loop=asyncio.get_event_loop()):
"""
Setup a connection pool
:param host: Redis host
:param port: Redis port
:param loop: Event loop
"""
self._pool = yield from aioredis.create_pool((host, port), minsize=minsize, maxsize=maxsize, loop=loop)
|
Set a key in a cache.: param key: Key name: param value: Value: param namespace: Namespace to associate the key with: param expire: expiration: return:
|
def set_key(self, key, value, namespace=None, expire=0):
"""
Set a key in a cache.
:param key: Key name
:param value: Value
:param namespace : Namespace to associate the key with
:param expire: expiration
:return:
"""
with (yield from self._pool) as redis:
if namespace is not None:
key = self._get_key(namespace, key)
yield from redis.set(key, value, expire=expire)
|
Helper function to traverse an element tree rooted at element yielding nodes matching the query.
|
def traverse(element, query, deep=False):
"""
Helper function to traverse an element tree rooted at element, yielding nodes matching the query.
"""
# Grab the next part of the query (it will be chopped from the front each iteration).
part = query[0]
if not part:
# If the part is blank, we encountered a //, meaning search all sub-nodes.
query = query[1:]
part = query[0]
deep = True
# Parse out any predicate (tag[pred]) from this part of the query.
part, predicate = xpath_re.match(query[0]).groups()
for c in element._children:
if part in ('*', c.tagname) and c._match(predicate):
# A potential matching branch: this child matches the next query part (and predicate).
if len(query) == 1:
# If this is the last part of the query, we found a matching element, yield it.
yield c
else:
# Otherwise, check the children of this child against the next query part.
for e in traverse(c, query[1:]):
yield e
if deep:
# If we're searching all sub-nodes, traverse with the same query, regardless of matching.
# This basically creates a recursion branch to search EVERYWHERE for anything after //.
for e in traverse(c, query, deep=True):
yield e
|
Given a simplified XPath query string returns an array of normalized query parts.
|
def parse_query(query):
"""
Given a simplified XPath query string, returns an array of normalized query parts.
"""
parts = query.split('/')
norm = []
for p in parts:
p = p.strip()
if p:
norm.append(p)
elif '' not in norm:
norm.append('')
return norm
|
: param url_or_path: A file - like object a filesystem path a URL or a string containing XML: rtype:: class: XmlElement
|
def parse(url_or_path, encoding=None, handler_class=DrillHandler):
"""
:param url_or_path: A file-like object, a filesystem path, a URL, or a string containing XML
:rtype: :class:`XmlElement`
"""
handler = handler_class()
parser = expat.ParserCreate(encoding)
parser.buffer_text = 1
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.characters
if isinstance(url_or_path, basestring):
if '://' in url_or_path[:20]:
with contextlib.closing(url_lib.urlopen(url_or_path)) as f:
parser.ParseFile(f)
elif url_or_path[:100].strip().startswith('<'):
if isinstance(url_or_path, unicode):
if encoding is None:
encoding = 'utf-8'
url_or_path = url_or_path.encode(encoding)
parser.Parse(url_or_path, True)
else:
with open(url_or_path, 'rb') as f:
parser.ParseFile(f)
elif PY3 and isinstance(url_or_path, bytes):
parser.ParseFile(bytes_io(url_or_path))
else:
parser.ParseFile(url_or_path)
return handler.root
|
: param filelike: A file - like object with a read method: returns: An iterator yielding: class: XmlElement objects
|
def iterparse(filelike, encoding=None, handler_class=DrillHandler, xpath=None):
"""
:param filelike: A file-like object with a ``read`` method
:returns: An iterator yielding :class:`XmlElement` objects
"""
parser = expat.ParserCreate(encoding)
elem_iter = DrillElementIterator(filelike, parser)
handler = handler_class(elem_iter, xpath)
parser.buffer_text = 1
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.characters
return elem_iter
|
Writes an XML representation of this node ( including descendants ) to the specified file - like object.
|
def write(self, writer):
"""
Writes an XML representation of this node (including descendants) to the specified file-like object.
:param writer: An :class:`XmlWriter` instance to write this node to
"""
multiline = bool(self._children)
newline_start = multiline and not bool(self.data)
writer.start(self.tagname, self.attrs, newline=newline_start)
if self.data:
writer.data(self.data, newline=bool(self._children))
for c in self._children:
c.write(writer)
writer.end(self.tagname, indent=multiline)
|
Returns an XML representation of this node ( including descendants ). This method automatically creates an: class: XmlWriter instance internally to handle the writing.
|
def xml(self, **kwargs):
"""
Returns an XML representation of this node (including descendants). This method automatically creates an
:class:`XmlWriter` instance internally to handle the writing.
:param **kwargs: Any named arguments are passed along to the :class:`XmlWriter` constructor
"""
s = bytes_io()
writer = XmlWriter(s, **kwargs)
self.write(writer)
return s.getvalue()
|
Called when the parser detects a start tag ( child element ) while in this node. Internally creates an: class: XmlElement and adds it to the end of this node s children.
|
def append(self, name, attrs=None, data=None):
"""
Called when the parser detects a start tag (child element) while in this node. Internally creates an
:class:`XmlElement` and adds it to the end of this node's children.
:param name: The tag name to add
:param attrs: Attributes for the new tag
:param data: CDATA for the new tag
:returns: The newly-created element
:rtype: :class:`XmlElement`
"""
elem = self.__class__(name, attrs, data, parent=self, index=len(self._children))
self._children.append(elem)
return elem
|
Inserts a new element as a child of this element before the specified index or sibling.
|
def insert(self, before, name, attrs=None, data=None):
"""
Inserts a new element as a child of this element, before the specified index or sibling.
:param before: An :class:`XmlElement` or a numeric index to insert the new node before
:param name: The tag name to add
:param attrs: Attributes for the new tag
:param data: CDATA for the new tag
:returns: The newly-created element
:rtype: :class:`XmlElement`
"""
if isinstance(before, self.__class__):
if before.parent != self:
raise ValueError('Cannot insert before an element with a different parent.')
before = before.index
# Make sure 0 <= before <= len(_children).
before = min(max(0, before), len(self._children))
elem = self.__class__(name, attrs, data, parent=self, index=before)
self._children.insert(before, elem)
# Re-index all the children.
for idx, c in enumerate(self._children):
c.index = idx
return elem
|
A generator yielding ( key value ) attribute pairs sorted by key name.
|
def items(self):
"""
A generator yielding ``(key, value)`` attribute pairs, sorted by key name.
"""
for key in sorted(self.attrs):
yield key, self.attrs[key]
|
A generator yielding children of this node.
|
def children(self, name=None, reverse=False):
"""
A generator yielding children of this node.
:param name: If specified, only consider elements with this tag name
:param reverse: If ``True``, children will be yielded in reverse declaration order
"""
elems = self._children
if reverse:
elems = reversed(elems)
for elem in elems:
if name is None or elem.tagname == name:
yield elem
|
Helper function to determine if this node matches the given predicate.
|
def _match(self, pred):
"""
Helper function to determine if this node matches the given predicate.
"""
if not pred:
return True
# Strip off the [ and ]
pred = pred[1:-1]
if pred.startswith('@'):
# An attribute predicate checks the existence (and optionally value) of an attribute on this tag.
pred = pred[1:]
if '=' in pred:
attr, value = pred.split('=', 1)
if value[0] in ('"', "'"):
value = value[1:]
if value[-1] in ('"', "'"):
value = value[:-1]
return self.attrs.get(attr) == value
else:
return pred in self.attrs
elif num_re.match(pred):
# An index predicate checks whether we are the n-th child of our parent (0-based).
index = int(pred)
if index < 0:
if self.parent:
# For negative indexes, count from the end of the list.
return self.index == (len(self.parent._children) + index)
else:
# If we're the root node, the only index we could be is 0.
return index == 0
else:
return index == self.index
else:
if '=' in pred:
tag, value = pred.split('=', 1)
if value[0] in ('"', "'"):
value = value[1:]
if value[-1] in ('"', "'"):
value = value[:-1]
for c in self._children:
if c.tagname == tag and c.data == value:
return True
else:
# A plain [tag] predicate means we match if we have a child with tagname "tag".
for c in self._children:
if c.tagname == pred:
return True
return False
|
Returns a canonical path to this element relative to the root node.
|
def path(self, include_root=False):
"""
Returns a canonical path to this element, relative to the root node.
:param include_root: If ``True``, include the root node in the path. Defaults to ``False``.
"""
path = '%s[%d]' % (self.tagname, self.index or 0)
p = self.parent
while p is not None:
if p.parent or include_root:
path = '%s[%d]/%s' % (p.tagname, p.index or 0, path)
p = p.parent
return path
|
Recursively find any descendants of this node with the given tag name. If a tag name is omitted this will yield every descendant node.
|
def iter(self, name=None):
"""
Recursively find any descendants of this node with the given tag name. If a tag name is omitted, this will
yield every descendant node.
:param name: If specified, only consider elements with this tag name
:returns: A generator yielding descendants of this node
"""
for c in self._children:
if name is None or c.tagname == name:
yield c
for gc in c.find(name):
yield gc
|
Returns the last child of this node.
|
def last(self, name=None):
"""
Returns the last child of this node.
:param name: If specified, only consider elements with this tag name
:rtype: :class:`XmlElement`
"""
for c in self.children(name, reverse=True):
return c
|
Yields all parents of this element back to the root element.
|
def parents(self, name=None):
"""
Yields all parents of this element, back to the root element.
:param name: If specified, only consider elements with this tag name
"""
p = self.parent
while p is not None:
if name is None or p.tagname == name:
yield p
p = p.parent
|
Yields all siblings of this node ( not including the node itself ).
|
def siblings(self, name=None):
"""
Yields all siblings of this node (not including the node itself).
:param name: If specified, only consider elements with this tag name
"""
if self.parent and self.index:
for c in self.parent._children:
if c.index != self.index and (name is None or name == c.tagname):
yield c
|
Returns the next sibling of this node.
|
def next(self, name=None):
"""
Returns the next sibling of this node.
:param name: If specified, only consider elements with this tag name
:rtype: :class:`XmlElement`
"""
if self.parent is None or self.index is None:
return None
for idx in xrange(self.index + 1, len(self.parent)):
if name is None or self.parent[idx].tagname == name:
return self.parent[idx]
|
Returns the previous sibling of this node.
|
def prev(self, name=None):
"""
Returns the previous sibling of this node.
:param name: If specified, only consider elements with this tag name
:rtype: :class:`XmlElement`
"""
if self.parent is None or self.index is None:
return None
for idx in xrange(self.index - 1, -1, -1):
if name is None or self.parent[idx].tagname == name:
return self.parent[idx]
|
Parses the HTML table into a list of dictionaries each of which represents a single observation.
|
def get_observations(self):
"""
Parses the HTML table into a list of dictionaries, each of which
represents a single observation.
"""
if self.empty:
return []
rows = list(self.tbody)
observations = []
for row_observation, row_details in zip(rows[::2], rows[1::2]):
data = {}
cells = OBSERVATION_XPATH(row_observation)
data['name'] = _clean_cell(cells[0])
data['date'] = _clean_cell(cells[1])
data['magnitude'] = _clean_cell(cells[3])
data['obscode'] = _clean_cell(cells[6])
cells = DETAILS_XPATH(row_details)
data['comp1'] = _clean_cell(cells[0])
data['chart'] = _clean_cell(cells[3]).replace('None', '')
data['comment_code'] = _clean_cell(cells[4])
data['notes'] = _clean_cell(cells[5])
observations.append(data)
return observations
|
Calculates cache key based on args and kwargs. args and kwargs must be instances of hashable types.
|
def get_cache_key(prefix, *args, **kwargs):
"""
Calculates cache key based on `args` and `kwargs`.
`args` and `kwargs` must be instances of hashable types.
"""
hash_args_kwargs = hash(tuple(kwargs.iteritems()) + args)
return '{}_{}'.format(prefix, hash_args_kwargs)
|
Cache result of function execution into the self object ( mostly useful in models ). Calculate cache key based on args and kwargs of the function ( except self ).
|
def cache_method(func=None, prefix=''):
"""
Cache result of function execution into the `self` object (mostly useful in models).
Calculate cache key based on `args` and `kwargs` of the function (except `self`).
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
cache_key_prefix = prefix or '_cache_{}'.format(func.__name__)
cache_key = get_cache_key(cache_key_prefix, *args, **kwargs)
if not hasattr(self, cache_key):
setattr(self, cache_key, func(self))
return getattr(self, cache_key)
return wrapper
if func is None:
return decorator
else:
return decorator(func)
|
Cache result of function execution into the django cache backend. Calculate cache key based on prefix args and kwargs of the function. For using like object method set method = True.
|
def cache_func(prefix, method=False):
"""
Cache result of function execution into the django cache backend.
Calculate cache key based on `prefix`, `args` and `kwargs` of the function.
For using like object method set `method=True`.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
cache_args = args
if method:
cache_args = args[1:]
cache_key = get_cache_key(prefix, *cache_args, **kwargs)
cached_value = cache.get(cache_key)
if cached_value is None:
cached_value = func(*args, **kwargs)
cache.set(cache_key, cached_value)
return cached_value
return wrapper
return decorator
|
Wrapper around Django s ORM get functionality. Wrap anything that raises ObjectDoesNotExist exception and provide the default value if necessary. default by default is None. default can be any callable if it is callable it will be called when ObjectDoesNotExist exception will be raised.
|
def get_or_default(func=None, default=None):
"""
Wrapper around Django's ORM `get` functionality.
Wrap anything that raises ObjectDoesNotExist exception
and provide the default value if necessary.
`default` by default is None. `default` can be any callable,
if it is callable it will be called when ObjectDoesNotExist
exception will be raised.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ObjectDoesNotExist:
if callable(default):
return default()
else:
return default
return wrapper
if func is None:
return decorator
else:
return decorator(func)
|
Turn column inputs from user into list of simple numbers.
|
def _get_column_nums_from_args(columns):
"""Turn column inputs from user into list of simple numbers.
Inputs can be:
- individual number: 1
- range: 1-3
- comma separated list: 1,2,3,4-6
"""
nums = []
for c in columns:
for p in c.split(','):
p = p.strip()
try:
c = int(p)
nums.append(c)
except (TypeError, ValueError):
start, ignore, end = p.partition('-')
try:
start = int(start)
end = int(end)
except (TypeError, ValueError):
raise ValueError(
'Did not understand %r, expected digit-digit' % c
)
inc = 1 if start < end else -1
nums.extend(range(start, end + inc, inc))
# The user will pass us 1-based indexes, but we need to use
# 0-based indexing with the row.
return [n - 1 for n in nums]
|
Return only the part of the row which should be printed.
|
def _get_printable_columns(columns, row):
"""Return only the part of the row which should be printed.
"""
if not columns:
return row
# Extract the column values, in the order specified.
return tuple(row[c] for c in columns)
|
Writes a single observation to the output file.
|
def writerow(self, observation_data):
"""
Writes a single observation to the output file.
If the ``observation_data`` parameter is a dictionary, it is
converted to a list to keep a consisted field order (as described
in format specification). Otherwise it is assumed that the data
is a raw record ready to be written to file.
:param observation_data: a single observation as a dictionary or list
"""
if isinstance(observation_data, (list, tuple)):
row = observation_data
else:
row = self.dict_to_row(observation_data)
self.writer.writerow(row)
|
Takes a dictionary of observation data and converts it to a list of fields according to AAVSO visual format specification.
|
def dict_to_row(cls, observation_data):
"""
Takes a dictionary of observation data and converts it to a list
of fields according to AAVSO visual format specification.
:param cls: current class
:param observation_data: a single observation as a dictionary
"""
row = []
row.append(observation_data['name'])
row.append(observation_data['date'])
row.append(observation_data['magnitude'])
comment_code = observation_data.get('comment_code', 'na')
if not comment_code:
comment_code = 'na'
row.append(comment_code)
comp1 = observation_data.get('comp1', 'na')
if not comp1:
comp1 = 'na'
row.append(comp1)
comp2 = observation_data.get('comp2', 'na')
if not comp2:
comp2 = 'na'
row.append(comp2)
chart = observation_data.get('chart', 'na')
if not chart:
chart = 'na'
row.append(chart)
notes = observation_data.get('notes', 'na')
if not notes:
notes = 'na'
row.append(notes)
return row
|
Converts a raw input record to a dictionary of observation data.
|
def row_to_dict(cls, row):
"""
Converts a raw input record to a dictionary of observation data.
:param cls: current class
:param row: a single observation as a list or tuple
"""
comment_code = row[3]
if comment_code.lower() == 'na':
comment_code = ''
comp1 = row[4]
if comp1.lower() == 'na':
comp1 = ''
comp2 = row[5]
if comp2.lower() == 'na':
comp2 = ''
chart = row[6]
if chart.lower() == 'na':
chart = ''
notes = row[7]
if notes.lower() == 'na':
notes = ''
return {
'name': row[0],
'date': row[1],
'magnitude': row[2],
'comment_code': comment_code,
'comp1': comp1,
'comp2': comp2,
'chart': chart,
'notes': notes,
}
|
Get the name of the view function used to prevent having to set the tag manually for every endpoint
|
def get_default_tag(app):
'''Get the name of the view function used to prevent having to set the tag
manually for every endpoint'''
view_func = get_view_function(app, request.path, request.method)
if view_func:
return view_func.__name__
|
Match a url and return the view and arguments it will be called with or None if there is no view. Creds: http:// stackoverflow. com/ a/ 38488506
|
def get_view_function(app, url, method):
"""Match a url and return the view and arguments
it will be called with, or None if there is no view.
Creds: http://stackoverflow.com/a/38488506
"""
# pylint: disable=too-many-return-statements
adapter = app.create_url_adapter(request)
try:
match = adapter.match(url, method=method)
except RequestRedirect as ex:
# recursively match redirects
return get_view_function(app, ex.new_url, method)
except (MethodNotAllowed, NotFound):
# no match
return None
try:
return app.view_functions[match[0]]
except KeyError:
# no view is associated with the endpoint
return None
|
Downloads all variable star observations by a given observer.
|
def download_observations(observer_code):
"""
Downloads all variable star observations by a given observer.
Performs a series of HTTP requests to AAVSO's WebObs search and
downloads the results page by page. Each page is then passed to
:py:class:`~pyaavso.parsers.webobs.WebObsResultsParser` and parse results
are added to the final observation list.
"""
page_number = 1
observations = []
while True:
logger.info('Downloading page %d...', page_number)
response = requests.get(WEBOBS_RESULTS_URL, params={
'obscode': observer_code,
'num_results': 200,
'obs_types': 'all',
'page': page_number,
})
logger.debug(response.request.url)
parser = WebObsResultsParser(response.text)
observations.extend(parser.get_observations())
# kinda silly, but there's no need for lxml machinery here
if '>Next</a>' not in response.text:
break
page_number += 1
return observations
|
Generates random filename for uploading file using uuid4 hashes You need to define UPLOADS_ROOT in your django settings something like this UPLOADS_ROOT = rel ( MEDIA_ROOT uploads )
|
def get_random_filename(instance, filename):
"""
Generates random filename for uploading file using uuid4 hashes
You need to define UPLOADS_ROOT in your django settings
something like this
UPLOADS_ROOT = rel(MEDIA_ROOT, 'uploads')
"""
folder = settings.UPLOADS_ROOT
ext = filename.split('.')[-1]
filename = '{}.{}'.format(str(uuid4()), ext)
return os.path.join(folder, filename)
|
Generates likely unique image path using md5 hashes
|
def image_path(instance, filename):
"""Generates likely unique image path using md5 hashes"""
filename, ext = os.path.splitext(filename.lower())
instance_id_hash = hashlib.md5(str(instance.id)).hexdigest()
filename_hash = ''.join(random.sample(hashlib.md5(filename.encode('utf-8')).hexdigest(), 8))
return '{}/{}{}'.format(instance_id_hash, filename_hash, ext)
|
Get URLs for LSST the Docs ( LTD ) products from the LTD Keeper API.
|
async def get_ltd_product_urls(session):
"""Get URLs for LSST the Docs (LTD) products from the LTD Keeper API.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
See http://aiohttp.readthedocs.io/en/stable/client.html.
Returns
-------
product_urls : `list`
List of product URLs.
"""
product_url = 'https://keeper.lsst.codes/products/'
async with session.get(product_url) as response:
data = await response.json()
return data['products']
|
Get the product resource ( JSON document ) from the LSST the Docs API.
|
async def get_ltd_product(session, slug=None, url=None):
"""Get the product resource (JSON document) from the LSST the Docs API.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
See http://aiohttp.readthedocs.io/en/stable/client.html.
slug : `str`, optional
Slug identfying the product. This is the same as the subdomain.
For example, ``'ldm-151'`` is the slug for ``ldm-151.lsst.io``.
A full product URL can be provided instead, see ``url``.
url : `str`, optional
The full LTD Keeper URL for the product resource. For example,
``'https://keeper.lsst.codes/products/ldm-151'``. The ``slug``
can be provided instead.
Returns
-------
product : `dict`
Product dataset. See
https://ltd-keeper.lsst.io/products.html#get--products-(slug)
for fields.
"""
if url is None:
url = 'https://keeper.lsst.codes/products/{}'.format(slug)
async with session.get(url) as response:
data = await response.json()
return data
|
Extract transform and load metadata from Lander - based projects.
|
async def process_lander_page(session, github_api_token, ltd_product_data,
mongo_collection=None):
"""Extract, transform, and load metadata from Lander-based projects.
Parameters
----------
session : `aiohttp.ClientSession`
Your application's aiohttp client session.
See http://aiohttp.readthedocs.io/en/stable/client.html.
github_api_token : `str`
A GitHub personal API token. See the `GitHub personal access token
guide`_.
ltd_product_data : `dict`
Contents of ``metadata.yaml``, obtained via `download_metadata_yaml`.
Data for this technote from the LTD Keeper API
(``GET /products/<slug>``). Usually obtained via
`lsstprojectmeta.ltd.get_ltd_product`.
mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional
MongoDB collection. This should be the common MongoDB collection for
LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted
into the MongoDB collection.
Returns
-------
metadata : `dict`
JSON-LD-formatted dictionary.
Raises
------
NotLanderPageError
Raised when the LTD product cannot be interpreted as a Lander page
because the ``/metadata.jsonld`` file is absent. This implies that
the LTD product *could* be of a different format.
.. `GitHub personal access token guide`: https://ls.st/41d
"""
logger = logging.getLogger(__name__)
# Try to download metadata.jsonld from the Landing page site.
published_url = ltd_product_data['published_url']
jsonld_url = urljoin(published_url, '/metadata.jsonld')
try:
async with session.get(jsonld_url) as response:
logger.debug('%s response status %r', jsonld_url, response.status)
response.raise_for_status()
json_data = await response.text()
except aiohttp.ClientResponseError as err:
logger.debug('Tried to download %s, got status %d',
jsonld_url, err.code)
raise NotLanderPageError()
# Use our own json parser to get datetimes
metadata = decode_jsonld(json_data)
if mongo_collection is not None:
await _upload_to_mongodb(mongo_collection, metadata)
return metadata
|
Upsert the technote resource into the projectmeta MongoDB collection.
|
async def _upload_to_mongodb(collection, jsonld):
"""Upsert the technote resource into the projectmeta MongoDB collection.
Parameters
----------
collection : `motor.motor_asyncio.AsyncIOMotorCollection`
The MongoDB collection.
jsonld : `dict`
The JSON-LD document that represents the document resource.
"""
document = {
'data': jsonld
}
query = {
'data.reportNumber': jsonld['reportNumber']
}
await collection.update(query, document, upsert=True, multi=False)
|
Converts a Open511 JSON document to XML.
|
def json_doc_to_xml(json_obj, lang='en', custom_namespace=None):
"""Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response."""
if 'meta' not in json_obj:
raise Exception("This function requires a conforming Open511 JSON document with a 'meta' section.")
json_obj = dict(json_obj)
meta = json_obj.pop('meta')
elem = get_base_open511_element(lang=lang, version=meta.pop('version'))
pagination = json_obj.pop('pagination', None)
json_struct_to_xml(json_obj, elem, custom_namespace=custom_namespace)
if pagination:
elem.append(json_struct_to_xml(pagination, 'pagination', custom_namespace=custom_namespace))
json_struct_to_xml(meta, elem)
return elem
|
Converts a Open511 JSON fragment to XML.
|
def json_struct_to_xml(json_obj, root, custom_namespace=None):
"""Converts a Open511 JSON fragment to XML.
Takes a dict deserialized from JSON, returns an lxml Element.
This won't provide a conforming document if you pass in a full JSON document;
it's for translating little fragments, and is mostly used internally."""
if isinstance(root, (str, unicode)):
if root.startswith('!'):
root = etree.Element('{%s}%s' % (NS_PROTECTED, root[1:]))
elif root.startswith('+'):
if not custom_namespace:
raise Exception("JSON fields starts with +, but no custom namespace provided")
root = etree.Element('{%s}%s' % (custom_namespace, root[1:]))
else:
root = etree.Element(root)
if root.tag in ('attachments', 'grouped_events', 'media_files'):
for link in json_obj:
root.append(json_link_to_xml(link))
elif isinstance(json_obj, (str, unicode)):
root.text = json_obj
elif isinstance(json_obj, (int, float)):
root.text = unicode(json_obj)
elif isinstance(json_obj, dict):
if frozenset(json_obj.keys()) == frozenset(('type', 'coordinates')):
root.append(geojson_to_gml(json_obj))
else:
for key, val in json_obj.items():
if key == 'url' or key.endswith('_url'):
el = json_link_to_xml(val, json_link_key_to_xml_rel(key))
else:
el = json_struct_to_xml(val, key, custom_namespace=custom_namespace)
if el is not None:
root.append(el)
elif isinstance(json_obj, list):
tag_name = root.tag
if tag_name.endswith('ies'):
tag_name = tag_name[:-3] + 'y'
elif tag_name.endswith('s'):
tag_name = tag_name[:-1]
for val in json_obj:
el = json_struct_to_xml(val, tag_name, custom_namespace=custom_namespace)
if el is not None:
root.append(el)
elif json_obj is None:
return None
else:
raise NotImplementedError
return root
|
Given a dict deserialized from a GeoJSON object returns an lxml Element of the corresponding GML geometry.
|
def geojson_to_gml(gj, set_srs=True):
"""Given a dict deserialized from a GeoJSON object, returns an lxml Element
of the corresponding GML geometry."""
tag = G(gj['type'])
if set_srs:
tag.set('srsName', 'urn:ogc:def:crs:EPSG::4326')
if gj['type'] == 'Point':
tag.append(G.pos(_reverse_geojson_coords(gj['coordinates'])))
elif gj['type'] == 'LineString':
tag.append(G.posList(' '.join(_reverse_geojson_coords(ll) for ll in gj['coordinates'])))
elif gj['type'] == 'Polygon':
rings = [
G.LinearRing(
G.posList(' '.join(_reverse_geojson_coords(ll) for ll in ring))
) for ring in gj['coordinates']
]
tag.append(G.exterior(rings.pop(0)))
for ring in rings:
tag.append(G.interior(ring))
elif gj['type'] in ('MultiPoint', 'MultiLineString', 'MultiPolygon'):
single_type = gj['type'][5:]
member_tag = single_type[0].lower() + single_type[1:] + 'Member'
for coord in gj['coordinates']:
tag.append(
G(member_tag, geojson_to_gml({'type': single_type, 'coordinates': coord}, set_srs=False))
)
else:
raise NotImplementedError
return tag
|
Transform a GEOS or OGR geometry object into an lxml Element for the GML geometry.
|
def geom_to_xml_element(geom):
"""Transform a GEOS or OGR geometry object into an lxml Element
for the GML geometry."""
if geom.srs.srid != 4326:
raise NotImplementedError("Only WGS 84 lat/long geometries (SRID 4326) are supported.")
# GeoJSON output is far more standard than GML, so go through that
return geojson_to_gml(json.loads(geom.geojson))
|
Delete latex comments from TeX source.
|
def remove_comments(tex_source):
"""Delete latex comments from TeX source.
Parameters
----------
tex_source : str
TeX source content.
Returns
-------
tex_source : str
TeX source without comments.
"""
# Expression via http://stackoverflow.com/a/13365453
return re.sub(r'(?<!\\)%.*$', r'', tex_source, flags=re.M)
|
r Read a TeX file automatically processing and normalizing it ( including other input files removing comments and deleting trailing whitespace ).
|
def read_tex_file(root_filepath, root_dir=None):
r"""Read a TeX file, automatically processing and normalizing it
(including other input files, removing comments, and deleting trailing
whitespace).
Parameters
----------
root_filepath : `str`
Filepath to a TeX file.
root_dir : `str`
Root directory of the TeX project. This only needs to be set when
recursively reading in ``\input`` or ``\include`` files.
Returns
-------
tex_source : `str`
TeX source.
"""
with open(root_filepath, 'r') as f:
tex_source = f.read()
if root_dir is None:
root_dir = os.path.dirname(root_filepath)
# Text processing pipline
tex_source = remove_comments(tex_source)
tex_source = remove_trailing_whitespace(tex_source)
tex_source = process_inputs(tex_source, root_dir=root_dir)
return tex_source
|
r Insert referenced TeX file contents ( from \ input and \ include commands ) into the source.
|
def process_inputs(tex_source, root_dir=None):
r"""Insert referenced TeX file contents (from ``\input`` and ``\include``
commands) into the source.
Parameters
----------
tex_source : `str`
TeX source where referenced source files will be found and inserted.
root_dir : `str`, optional
Name of the directory containing the TeX project's root file. Files
referenced by TeX ``\input`` and ``\include`` commands are relative to
this directory. If not set, the current working directory is assumed.
Returns
-------
tex_source : `str`
TeX source.
See also
--------
`read_tex_file`
Recommended API for reading a root TeX source file and inserting
referenced files.
"""
logger = logging.getLogger(__name__)
def _sub_line(match):
"""Function to be used with re.sub to inline files for each match."""
fname = match.group('filename')
if not fname.endswith('.tex'):
full_fname = ".".join((fname, 'tex'))
else:
full_fname = fname
full_path = os.path.abspath(os.path.join(root_dir, full_fname))
try:
included_source = read_tex_file(full_path, root_dir=root_dir)
except IOError:
logger.error("Cannot open {0} for inclusion".format(full_path))
raise
else:
return included_source
tex_source = input_include_pattern.sub(_sub_line, tex_source)
return tex_source
|
r Replace macros in the TeX source with their content.
|
def replace_macros(tex_source, macros):
r"""Replace macros in the TeX source with their content.
Parameters
----------
tex_source : `str`
TeX source content.
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros. See
`lsstprojectmeta.tex.scraper.get_macros`.
Returns
-------
tex_source : `str`
TeX source with known macros replaced.
Notes
-----
Macros with arguments are not supported.
Examples
--------
>>> macros = {r'\handle': 'LDM-nnn'}
>>> sample = r'This is document \handle.'
>>> replace_macros(sample, macros)
'This is document LDM-nnn.'
Any trailing slash after the macro command is also replaced by this
function.
>>> macros = {r'\product': 'Data Management'}
>>> sample = r'\title [Test Plan] { \product\ Test Plan}'
>>> replace_macros(sample, macros)
'\\title [Test Plan] { Data Management Test Plan}'
"""
for macro_name, macro_content in macros.items():
# '\\?' suffix matches an optional trailing '\' that might be used
# for spacing.
pattern = re.escape(macro_name) + r"\\?"
# Wrap macro_content in lambda to avoid processing escapes
tex_source = re.sub(pattern, lambda _: macro_content, tex_source)
return tex_source
|
Ensures that the provided document is an lxml Element or json dict.
|
def ensure_format(doc, format):
"""
Ensures that the provided document is an lxml Element or json dict.
"""
assert format in ('xml', 'json')
if getattr(doc, 'tag', None) == 'open511':
if format == 'json':
return xml_to_json(doc)
elif isinstance(doc, dict) and 'meta' in doc:
if format == 'xml':
return json_doc_to_xml(doc)
else:
raise ValueError("Unrecognized input document")
return doc
|
Convert an Open511 document between formats. input_doc - either an lxml open511 Element or a deserialized JSON dict output_format - short string name of a valid output format as listed above
|
def open511_convert(input_doc, output_format, serialize=True, **kwargs):
"""
Convert an Open511 document between formats.
input_doc - either an lxml open511 Element or a deserialized JSON dict
output_format - short string name of a valid output format, as listed above
"""
try:
output_format_info = FORMATS[output_format]
except KeyError:
raise ValueError("Unrecognized output format %s" % output_format)
input_doc = ensure_format(input_doc, output_format_info.input_format)
result = output_format_info.func(input_doc, **kwargs)
if serialize:
result = output_format_info.serializer(result)
return result
|
Construct an LsstLatexDoc instance by reading and parsing the LaTeX source.
|
def read(cls, root_tex_path):
"""Construct an `LsstLatexDoc` instance by reading and parsing the
LaTeX source.
Parameters
----------
root_tex_path : `str`
Path to the LaTeX source on the filesystem. For multi-file LaTeX
projects this should be the path to the root document.
Notes
-----
This method implements the following pipeline:
1. `lsstprojectmeta.tex.normalizer.read_tex_file`
2. `lsstprojectmeta.tex.scraper.get_macros`
3. `lsstprojectmeta.tex.normalizer.replace_macros`
Thus ``input`` and ``includes`` are resolved along with simple macros.
"""
# Read and normalize the TeX source, replacing macros with content
root_dir = os.path.dirname(root_tex_path)
tex_source = read_tex_file(root_tex_path)
tex_macros = get_macros(tex_source)
tex_source = replace_macros(tex_source, tex_macros)
return cls(tex_source, root_dir=root_dir)
|
HTML5 - formatted document title ( str ).
|
def html_title(self):
"""HTML5-formatted document title (`str`)."""
return self.format_title(format='html5', deparagraph=True,
mathjax=False, smart=True)
|
HTML5 - formatted document short title ( str ).
|
def html_short_title(self):
"""HTML5-formatted document short title (`str`)."""
return self.format_short_title(format='html5', deparagraph=True,
mathjax=False, smart=True)
|
HTML5 - formatted authors ( list of str ).
|
def html_authors(self):
"""HTML5-formatted authors (`list` of `str`)."""
return self.format_authors(format='html5', deparagraph=True,
mathjax=False, smart=True)
|
HTML5 - formatted document abstract ( str ).
|
def html_abstract(self):
"""HTML5-formatted document abstract (`str`)."""
return self.format_abstract(format='html5', deparagraph=False,
mathjax=False, smart=True)
|
Document is a draft if lsstdoc is included in the documentclass options ( bool ).
|
def is_draft(self):
"""Document is a draft if ``'lsstdoc'`` is included in the
documentclass options (`bool`).
"""
if not hasattr(self, '_document_options'):
self._parse_documentclass()
if 'lsstdraft' in self._document_options:
return True
else:
return False
|
Get the document content in the specified markup format.
|
def format_content(self, format='plain', mathjax=False,
smart=True, extra_args=None):
"""Get the document content in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content.
"""
output_text = convert_lsstdoc_tex(
self._tex, format,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text
|
Get the document title in the specified markup format.
|
def format_title(self, format='html5', deparagraph=True, mathjax=False,
smart=True, extra_args=None):
"""Get the document title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the title is not available in
the document.
"""
if self.title is None:
return None
output_text = convert_lsstdoc_tex(
self.title, format,
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text
|
Get the document short title in the specified markup format.
|
def format_short_title(self, format='html5', deparagraph=True,
mathjax=False, smart=True, extra_args=None):
"""Get the document short title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the short title is not available in
the document.
"""
if self.short_title is None:
return None
output_text = convert_lsstdoc_tex(
self.short_title, 'html5',
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text
|
Get the document abstract in the specified markup format.
|
def format_abstract(self, format='html5', deparagraph=False, mathjax=False,
smart=True, extra_args=None):
"""Get the document abstract in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the title is not available in
the document.
"""
if self.abstract is None:
return None
abstract_latex = self._prep_snippet_for_pandoc(self.abstract)
output_text = convert_lsstdoc_tex(
abstract_latex, format,
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text
|
Get the document authors in the specified markup format.
|
def format_authors(self, format='html5', deparagraph=True, mathjax=False,
smart=True, extra_args=None):
"""Get the document authors in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `list` of `str`
Sequence of author names in the specified output markup format.
"""
formatted_authors = []
for latex_author in self.authors:
formatted_author = convert_lsstdoc_tex(
latex_author, format,
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
# removes Pandoc's terminal newlines
formatted_author = formatted_author.strip()
formatted_authors.append(formatted_author)
return formatted_authors
|
Parse documentclass options.
|
def _parse_documentclass(self):
"""Parse documentclass options.
Sets the the ``_document_options`` attribute.
"""
command = LatexCommand(
'documentclass',
{'name': 'options', 'required': False, 'bracket': '['},
{'name': 'class_name', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no documentclass')
self._document_options = []
try:
content = parsed['options']
self._document_options = [opt.strip()
for opt in content.split(',')]
except KeyError:
self._logger.warning('lsstdoc has no documentclass options')
self._document_options = []
|
Parse the title from TeX source.
|
def _parse_title(self):
"""Parse the title from TeX source.
Sets these attributes:
- ``_title``
- ``_short_title``
"""
command = LatexCommand(
'title',
{'name': 'short_title', 'required': False, 'bracket': '['},
{'name': 'long_title', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no title')
self._title = None
self._short_title = None
self._title = parsed['long_title']
try:
self._short_title = parsed['short_title']
except KeyError:
self._logger.warning('lsstdoc has no short title')
self._short_title = None
|
Parse the document handle.
|
def _parse_doc_ref(self):
"""Parse the document handle.
Sets the ``_series``, ``_serial``, and ``_handle`` attributes.
"""
command = LatexCommand(
'setDocRef',
{'name': 'handle', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no setDocRef')
self._handle = None
self._series = None
self._serial = None
return
self._handle = parsed['handle']
try:
self._series, self._serial = self._handle.split('-', 1)
except ValueError:
self._logger.warning('lsstdoc handle cannot be parsed into '
'series and serial: %r', self._handle)
self._series = None
self._serial = None
|
r Parse the author from TeX source.
|
def _parse_author(self):
r"""Parse the author from TeX source.
Sets the ``_authors`` attribute.
Goal is to parse::
\author{
A.~Author,
B.~Author,
and
C.~Author}
Into::
['A. Author', 'B. Author', 'C. Author']
"""
command = LatexCommand(
'author',
{'name': 'authors', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no author')
self._authors = []
return
try:
content = parsed['authors']
except KeyError:
self._logger.warning('lsstdoc has no author')
self._authors = []
return
# Clean content
content = content.replace('\n', ' ')
content = content.replace('~', ' ')
content = content.strip()
# Split content into list of individual authors
authors = []
for part in content.split(','):
part = part.strip()
for split_part in part.split('and '):
split_part = split_part.strip()
if len(split_part) > 0:
authors.append(split_part)
self._authors = authors
|
Parse the abstract from the TeX source.
|
def _parse_abstract(self):
"""Parse the abstract from the TeX source.
Sets the ``_abstract`` attribute.
"""
command = LatexCommand(
'setDocAbstract',
{'name': 'abstract', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no abstract')
self._abstract = None
return
try:
content = parsed['abstract']
except KeyError:
self._logger.warning('lsstdoc has no abstract')
self._abstract = None
return
content = content.strip()
self._abstract = content
|
Process a LaTeX snippet of content for better transformation with pandoc.
|
def _prep_snippet_for_pandoc(self, latex_text):
"""Process a LaTeX snippet of content for better transformation
with pandoc.
Currently runs the CitationLinker to convert BibTeX citations to
href links.
"""
replace_cite = CitationLinker(self.bib_db)
latex_text = replace_cite(latex_text)
return latex_text
|
r Load the BibTeX bibliography referenced by the document.
|
def _load_bib_db(self):
r"""Load the BibTeX bibliography referenced by the document.
This method triggered by the `bib_db` attribute and populates the
`_bib_db` private attribute.
The ``\bibliography`` command is parsed to identify the bibliographies
referenced by the document.
"""
# Get the names of custom bibtex files by parsing the
# \bibliography command and filtering out the default lsstdoc
# bibliographies.
command = LatexCommand(
'bibliography',
{'name': 'bib_names', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
bib_names = [n.strip() for n in parsed['bib_names'].split(',')]
except StopIteration:
self._logger.warning('lsstdoc has no bibliography command')
bib_names = []
custom_bib_names = [n for n in bib_names
if n not in KNOWN_LSSTTEXMF_BIB_NAMES]
# Read custom bibliographies.
custom_bibs = []
for custom_bib_name in custom_bib_names:
custom_bib_path = os.path.join(
os.path.join(self._root_dir),
custom_bib_name + '.bib'
)
if not os.path.exists(custom_bib_path):
self._logger.warning('Could not find bibliography %r',
custom_bib_path)
continue
with open(custom_bib_path, 'r') as file_handle:
custom_bibs.append(file_handle.read())
if len(custom_bibs) > 0:
custom_bibtex = '\n\n'.join(custom_bibs)
else:
custom_bibtex = None
# Get the combined pybtex bibliography
db = get_bibliography(bibtex=custom_bibtex)
self._bib_db = db
|
r Parse the \ date command falling back to getting the most recent Git commit date and the current datetime.
|
def _parse_revision_date(self):
r"""Parse the ``\date`` command, falling back to getting the
most recent Git commit date and the current datetime.
Result is available from the `revision_datetime` attribute.
"""
doc_datetime = None
# First try to parse the \date command in the latex.
# \date is ignored for draft documents.
if not self.is_draft:
date_command = LatexCommand(
'date',
{'name': 'content', 'required': True, 'bracket': '{'})
try:
parsed = next(date_command.parse(self._tex))
command_content = parsed['content'].strip()
except StopIteration:
command_content = None
self._logger.warning('lsstdoc has no date command')
# Try to parse a date from the \date command
if command_content is not None and command_content != r'\today':
try:
doc_datetime = datetime.datetime.strptime(command_content,
'%Y-%m-%d')
# Assume LSST project time (Pacific)
project_tz = timezone('US/Pacific')
localized_datetime = project_tz.localize(doc_datetime)
# Normalize to UTC
doc_datetime = localized_datetime.astimezone(pytz.utc)
self._revision_datetime_source = 'tex'
except ValueError:
self._logger.warning('Could not parse a datetime from '
'lsstdoc date command: %r',
command_content)
# Fallback to getting the datetime from Git
if doc_datetime is None:
content_extensions = ('tex', 'bib', 'pdf', 'png', 'jpg')
try:
doc_datetime = get_content_commit_date(
content_extensions,
root_dir=self._root_dir)
self._revision_datetime_source = 'git'
except RuntimeError:
self._logger.warning('Could not get a datetime from the Git '
'repository at %r',
self._root_dir)
# Final fallback to the current datetime
if doc_datetime is None:
doc_datetime = pytz.utc.localize(datetime.datetime.now())
self._revision_datetime_source = 'now'
self._datetime = doc_datetime
|
Create a JSON - LD representation of this LSST LaTeX document.
|
def build_jsonld(self, url=None, code_url=None, ci_url=None,
readme_url=None, license_id=None):
"""Create a JSON-LD representation of this LSST LaTeX document.
Parameters
----------
url : `str`, optional
URL where this document is published to the web. Prefer
the LSST the Docs URL if possible.
Example: ``'https://ldm-151.lsst.io'``.
code_url : `str`, optional
Path the the document's repository, typically on GitHub.
Example: ``'https://github.com/lsst/LDM-151'``.
ci_url : `str`, optional
Path to the continuous integration service dashboard for this
document's repository.
Example: ``'https://travis-ci.org/lsst/LDM-151'``.
readme_url : `str`, optional
URL to the document repository's README file. Example:
``https://raw.githubusercontent.com/lsst/LDM-151/master/README.rst``.
license_id : `str`, optional
License identifier, if known. The identifier should be from the
listing at https://spdx.org/licenses/. Example: ``CC-BY-4.0``.
Returns
-------
jsonld : `dict`
JSON-LD-formatted dictionary.
"""
jsonld = {
'@context': [
"https://raw.githubusercontent.com/codemeta/codemeta/2.0-rc/"
"codemeta.jsonld",
"http://schema.org"],
'@type': ['Report', 'SoftwareSourceCode'],
'language': 'TeX',
'reportNumber': self.handle,
'name': self.plain_title,
'description': self.plain_abstract,
'author': [{'@type': 'Person', 'name': author_name}
for author_name in self.plain_authors],
# This is a datetime.datetime; not a string. If writing to a file,
# Need to convert this to a ISO 8601 string.
'dateModified': self.revision_datetime
}
try:
jsonld['articleBody'] = self.plain_content
jsonld['fileFormat'] = 'text/plain' # MIME type of articleBody
except RuntimeError:
# raised by pypandoc when it can't convert the tex document
self._logger.exception('Could not convert latex body to plain '
'text for articleBody.')
self._logger.warning('Falling back to tex source for articleBody')
jsonld['articleBody'] = self._tex
jsonld['fileFormat'] = 'text/plain' # no mimetype for LaTeX?
if url is not None:
jsonld['@id'] = url
jsonld['url'] = url
else:
# Fallback to using the document handle as the ID. This isn't
# entirely ideal from a linked data perspective.
jsonld['@id'] = self.handle
if code_url is not None:
jsonld['codeRepository'] = code_url
if ci_url is not None:
jsonld['contIntegration'] = ci_url
if readme_url is not None:
jsonld['readme'] = readme_url
if license_id is not None:
jsonld['license_id'] = None
return jsonld
|
Renames an existing database.
|
def rename(self, from_name, to_name):
"""Renames an existing database."""
log.info('renaming database from %s to %s' % (from_name, to_name))
self._run_stmt('alter database %s rename to %s' % (from_name, to_name))
|
Returns a list of existing connections to the named database.
|
def connections(self, name):
"""Returns a list of existing connections to the named database."""
stmt = """
select {fields} from pg_stat_activity
where datname = {datname!r} and pid <> pg_backend_pid()
""".format(fields=', '.join(CONNECTION_FIELDS), datname=name)
return list(Connection(**x) for x in self._iter_results(stmt))
|
Returns True if database server is running False otherwise.
|
def available(self, timeout=5):
"""Returns True if database server is running, False otherwise."""
host = self._connect_args['host']
port = self._connect_args['port']
try:
sock = socket.create_connection((host, port), timeout=timeout)
sock.close()
return True
except socket.error:
pass
return False
|
Saves the state of a database to a file.
|
def dump(self, name, filename):
"""
Saves the state of a database to a file.
Parameters
----------
name: str
the database to be backed up.
filename: str
path to a file where database backup will be written.
"""
if not self.exists(name):
raise DatabaseError('database %s does not exist!')
log.info('dumping %s to %s' % (name, filename))
self._run_cmd('pg_dump', '--verbose', '--blobs', '--format=custom',
'--file=%s' % filename, name)
|
Loads state of a backup file to a database.
|
def restore(self, name, filename):
"""
Loads state of a backup file to a database.
Note
----
If database name does not exist, it will be created.
Parameters
----------
name: str
the database to which backup will be restored.
filename: str
path to a file contain a postgres database backup.
"""
if not self.exists(name):
self.create(name)
else:
log.warn('overwriting contents of database %s' % name)
log.info('restoring %s from %s' % (name, filename))
self._run_cmd('pg_restore', '--verbose', '--dbname=%s' % name, filename)
|
Provides a connection string for database.
|
def connection_dsn(self, name=None):
"""
Provides a connection string for database.
Parameters
----------
name: str, optional
an override database name for the connection string.
Returns
-------
str: the connection string (e.g. 'dbname=db1 user=user1 host=localhost port=5432')
"""
return ' '.join("%s=%s" % (param, value) for param, value in self._connect_options(name))
|
Provides a connection string for database as a sqlalchemy compatible URL.
|
def connection_url(self, name=None):
"""
Provides a connection string for database as a sqlalchemy compatible URL.
NB - this doesn't include special arguments related to SSL connectivity (which are outside the scope
of the connection URL format).
Parameters
----------
name: str, optional
an override database name for the connection string.
Returns
-------
str: the connection URL (e.g. postgresql://user1@localhost:5432/db1)
"""
return 'postgresql://{user}@{host}:{port}/{dbname}'.format(**{k: v for k, v in self._connect_options(name)})
|
Connects the database client shell to the database.
|
def shell(self, expect=pexpect):
"""
Connects the database client shell to the database.
Parameters
----------
expect_module: str
the database to which backup will be restored.
"""
dsn = self.connection_dsn()
log.debug('connection string: %s' % dsn)
child = expect.spawn('psql "%s"' % dsn)
if self._connect_args['password'] is not None:
child.expect('Password: ')
child.sendline(self._connect_args['password'])
child.interact()
|
Returns settings from the server.
|
def settings(self):
"""Returns settings from the server."""
stmt = "select {fields} from pg_settings".format(fields=', '.join(SETTINGS_FIELDS))
settings = []
for row in self._iter_results(stmt):
row['setting'] = self._vartype_map[row['vartype']](row['setting'])
settings.append(Settings(**row))
return settings
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.