INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Packs a sub field s array into another array using a mask
|
def pack(array, sub_field_array, mask, inplace=False):
""" Packs a sub field's array into another array using a mask
Parameters:
----------
array : numpy.ndarray
The array in which the sub field array will be packed into
array_in : numpy.ndarray
sub field array to pack
mask : mask (ie: 0b00001111)
Mask of the sub field
inplace : {bool}, optional
If true a new array is returned. (the default is False, which modifies the array in place)
Raises
------
OverflowError
If the values contained in the sub field array are greater than its mask's number of bits
allows
"""
lsb = least_significant_bit(mask)
max_value = int(mask >> lsb)
if sub_field_array.max() > max_value:
raise OverflowError(
"value ({}) is greater than allowed (max: {})".format(
sub_field_array.max(), max_value
)
)
if inplace:
array[:] = array & ~mask
array[:] = array | ((sub_field_array << lsb) & mask).astype(array.dtype)
else:
array = array & ~mask
return array | ((sub_field_array << lsb) & mask).astype(array.dtype)
|
Returns a list of the names of the dimensions that will be lost when converting from point_fmt_in to point_fmt_out
|
def lost_dimensions(point_fmt_in, point_fmt_out):
""" Returns a list of the names of the dimensions that will be lost
when converting from point_fmt_in to point_fmt_out
"""
unpacked_dims_in = PointFormat(point_fmt_in).dtype
unpacked_dims_out = PointFormat(point_fmt_out).dtype
out_dims = unpacked_dims_out.fields
completely_lost = []
for dim_name in unpacked_dims_in.names:
if dim_name not in out_dims:
completely_lost.append(dim_name)
return completely_lost
|
Returns the numpy. dtype used to store the point records in a numpy array
|
def dtype(self):
""" Returns the numpy.dtype used to store the point records in a numpy array
.. note::
The dtype corresponds to the dtype with sub_fields *packed* into their
composed fields
"""
dtype = self._access_dict(dims.ALL_POINT_FORMATS_DTYPE, self.id)
dtype = self._dtype_add_extra_dims(dtype)
return dtype
|
Returns the numpy. dtype used to store the point records in a numpy array
|
def unpacked_dtype(self):
""" Returns the numpy.dtype used to store the point records in a numpy array
.. note::
The dtype corresponds to the dtype with sub_fields *unpacked*
"""
dtype = self._access_dict(dims.UNPACKED_POINT_FORMATS_DTYPES, self.id)
dtype = self._dtype_add_extra_dims(dtype)
return dtype
|
Returns a dict of the sub fields for this point format
|
def sub_fields(self):
""" Returns a dict of the sub fields for this point format
Returns
-------
Dict[str, Tuple[str, SubField]]
maps a sub field name to its composed dimension with additional information
"""
sub_fields_dict = {}
for composed_dim_name, sub_fields in self.composed_fields.items():
for sub_field in sub_fields:
sub_fields_dict[sub_field.name] = (composed_dim_name, sub_field)
return sub_fields_dict
|
Returns the number of extra bytes
|
def num_extra_bytes(self):
""" Returns the number of extra bytes
"""
return sum(np.dtype(extra_dim[1]).itemsize for extra_dim in self.extra_dims)
|
Returns True if the point format has waveform packet dimensions
|
def has_waveform_packet(self):
""" Returns True if the point format has waveform packet dimensions
"""
dimensions = set(self.dimension_names)
return all(name in dimensions for name in dims.WAVEFORM_FIELDS_NAMES)
|
Console script for satel_integra.
|
def main(port, ip, command, loglevel):
"""Console script for satel_integra."""
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level)
click.echo("Demo of satel_integra library")
if command == "demo":
demo(ip, port)
|
Function to calculate checksum as per Satel manual.
|
def checksum(command):
"""Function to calculate checksum as per Satel manual."""
crc = 0x147A
for b in command:
# rotate (crc 1 bit left)
crc = ((crc << 1) & 0xFFFF) | (crc & 0x8000) >> 15
crc = crc ^ 0xFFFF
crc = (crc + (crc >> 8) + b) & 0xFFFF
return crc
|
Debugging method to print out frames in hex.
|
def print_hex(data):
"""Debugging method to print out frames in hex."""
hex_msg = ""
for c in data:
hex_msg += "\\x" + format(c, "02x")
_LOGGER.debug(hex_msg)
|
Verify checksum and strip header and footer of received frame.
|
def verify_and_strip(resp):
"""Verify checksum and strip header and footer of received frame."""
if resp[0:2] != b'\xFE\xFE':
_LOGGER.error("Houston, we got problem:")
print_hex(resp)
raise Exception("Wrong header - got %X%X" % (resp[0], resp[1]))
if resp[-2:] != b'\xFE\x0D':
raise Exception("Wrong footer - got %X%X" % (resp[-2], resp[-1]))
output = resp[2:-2].replace(b'\xFE\xF0', b'\xFE')
c = checksum(bytearray(output[0:-2]))
if (256 * output[-2:-1][0] + output[-1:][0]) != c:
raise Exception("Wrong checksum - got %d expected %d" % (
(256 * output[-2:-1][0] + output[-1:][0]), c))
return output[0:-2]
|
Return list of positions of bits set to one in given data.
|
def list_set_bits(r, expected_length):
"""Return list of positions of bits set to one in given data.
This method is used to read e.g. violated zones. They are marked by ones
on respective bit positions - as per Satel manual.
"""
set_bit_numbers = []
bit_index = 0x1
assert (len(r) == expected_length + 1)
for b in r[1:]:
for i in range(8):
if ((b >> i) & 1) == 1:
set_bit_numbers.append(bit_index)
bit_index += 1
return set_bit_numbers
|
Add header checksum and footer to command data.
|
def generate_query(command):
"""Add header, checksum and footer to command data."""
data = bytearray(command)
c = checksum(data)
data.append(c >> 8)
data.append(c & 0xFF)
data.replace(b'\xFE', b'\xFE\xF0')
data = bytearray.fromhex("FEFE") + data + bytearray.fromhex("FE0D")
return data
|
Basic demo of the monitoring capabilities.
|
def demo(host, port):
"""Basic demo of the monitoring capabilities."""
# logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
stl = AsyncSatel(host,
port,
loop,
[1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 25, 26, 27, 28, 29, 30],
[8, 9, 10]
)
loop.run_until_complete(stl.connect())
loop.create_task(stl.arm("3333", 1))
loop.create_task(stl.disarm("3333"))
loop.create_task(stl.keep_alive())
loop.create_task(stl.monitor_status())
loop.run_forever()
loop.close()
|
Make a TCP connection to the alarm system.
|
async def connect(self):
"""Make a TCP connection to the alarm system."""
_LOGGER.debug("Connecting...")
try:
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port, loop=self._loop)
_LOGGER.debug("sucess connecting...")
except Exception as e:
_LOGGER.warning(
"Exception during connecting: %s.", e)
self._writer = None
self._reader = None
return False
return True
|
Start monitoring for interesting events.
|
async def start_monitoring(self):
"""Start monitoring for interesting events."""
data = generate_query(
b'\x7F\x01\xDC\x99\x80\x00\x04\x00\x00\x00\x00\x00\x00')
await self._send_data(data)
resp = await self._read_data()
if resp is None:
_LOGGER.warning("Start monitoring - no data!")
return
if resp[1:2] != b'\xFF':
_LOGGER.warning("Monitoring not accepted.")
|
0x17 outputs state 0x17 + 16/ 32 bytes
|
def _output_changed(self, msg):
"""0x17 outputs state 0x17 + 16/32 bytes"""
status = {"outputs": {}}
output_states = list_set_bits(msg, 32)
self.violated_outputs = output_states
_LOGGER.debug("Output states: %s, monitored outputs: %s",
output_states, self._monitored_outputs)
for output in self._monitored_outputs:
status["outputs"][output] = \
1 if output in output_states else 0
_LOGGER.debug("Returning status: %s", status)
if self._output_changed_callback:
self._output_changed_callback(status)
return status
|
Send arming command to the alarm. Modes allowed: from 0 till 3.
|
async def arm(self, code, partition_list, mode=0):
"""Send arming command to the alarm. Modes allowed: from 0 till 3."""
_LOGGER.debug("Sending arm command, mode: %s!", mode)
while len(code) < 16:
code += 'F'
code_bytes = bytearray.fromhex(code)
mode_command = 0x80 + mode
data = generate_query(mode_command.to_bytes(1, 'big')
+ code_bytes
+ partition_bytes(partition_list))
await self._send_data(data)
|
Send command to disarm.
|
async def disarm(self, code, partition_list):
"""Send command to disarm."""
_LOGGER.info("Sending disarm command.")
while len(code) < 16:
code += 'F'
code_bytes = bytearray.fromhex(code)
data = generate_query(b'\x84' + code_bytes
+ partition_bytes(partition_list))
await self._send_data(data)
|
Send command to clear the alarm.
|
async def clear_alarm(self, code, partition_list):
"""Send command to clear the alarm."""
_LOGGER.info("Sending clear the alarm command.")
while len(code) < 16:
code += 'F'
code_bytes = bytearray.fromhex(code)
data = generate_query(b'\x85' + code_bytes
+ partition_bytes(partition_list))
await self._send_data(data)
|
Send output turn on command to the alarm.
|
async def set_output(self, code, output_id, state):
"""Send output turn on command to the alarm."""
"""0x88 outputs on
+ 8 bytes - user code
+ 16/32 bytes - output list
If function is accepted, function result can be
checked by observe the system state """
_LOGGER.debug("Turn on, output: %s, code: %s", output_id, code)
while len(code) < 16:
code += 'F'
code_bytes = bytearray.fromhex(code)
mode_command = 0x88 if state else 0x89
data = generate_query(mode_command.to_bytes(1, 'big') +
code_bytes +
output_bytes(output_id))
await self._send_data(data)
|
A workaround for Satel Integra disconnecting after 25s.
|
async def keep_alive(self):
"""A workaround for Satel Integra disconnecting after 25s.
Every interval it sends some random question to the device, ignoring
answer - just to keep connection alive.
"""
while True:
await asyncio.sleep(self._keep_alive_timeout)
if self.closed:
return
# Command to read status of the alarm
data = generate_query(b'\xEE\x01\x01')
await self._send_data(data)
|
Start monitoring of the alarm status.
|
async def monitor_status(self, alarm_status_callback=None,
zone_changed_callback=None,
output_changed_callback=None):
"""Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages.
"""
self._alarm_status_callback = alarm_status_callback
self._zone_changed_callback = zone_changed_callback
self._output_changed_callback = output_changed_callback
_LOGGER.info("Starting monitor_status loop")
while not self.closed:
_LOGGER.debug("Iteration... ")
while not self.connected:
_LOGGER.info("Not connected, re-connecting... ")
await self.connect()
if not self.connected:
_LOGGER.warning("Not connected, sleeping for 10s... ")
await asyncio.sleep(self._reconnection_timeout)
continue
await self.start_monitoring()
if not self.connected:
_LOGGER.warning("Start monitoring failed, sleeping for 10s...")
await asyncio.sleep(self._reconnection_timeout)
continue
while True:
await self._update_status()
_LOGGER.debug("Got status!")
if not self.connected:
_LOGGER.info("Got connection broken, reconnecting!")
break
_LOGGER.info("Closed, quit monitoring.")
|
Stop monitoring and close connection.
|
def close(self):
"""Stop monitoring and close connection."""
_LOGGER.debug("Closing...")
self.closed = True
if self.connected:
self._writer.close()
|
Clear all matching our user_id.
|
def purge_db(self):
"""
Clear all matching our user_id.
"""
with self.engine.begin() as db:
purge_user(db, self.user_id)
|
Guess the type of a file.
|
def guess_type(self, path, allow_directory=True):
"""
Guess the type of a file.
If allow_directory is False, don't consider the possibility that the
file is a directory.
"""
if path.endswith('.ipynb'):
return 'notebook'
elif allow_directory and self.dir_exists(path):
return 'directory'
else:
return 'file'
|
Get the id of a file in the database. This function is specific to this implementation of ContentsManager and is not in the base class.
|
def get_file_id(self, path):
"""
Get the id of a file in the database. This function is specific to
this implementation of ContentsManager and is not in the base class.
"""
with self.engine.begin() as db:
try:
file_id = get_file_id(db, self.user_id, path)
except NoSuchFile:
self.no_such_entity(path)
return file_id
|
Get a notebook from the database.
|
def _get_notebook(self, path, content, format):
"""
Get a notebook from the database.
"""
with self.engine.begin() as db:
try:
record = get_file(
db,
self.user_id,
path,
content,
self.crypto.decrypt,
)
except NoSuchFile:
self.no_such_entity(path)
return self._notebook_model_from_db(record, content)
|
Build a notebook model from database record.
|
def _notebook_model_from_db(self, record, content):
"""
Build a notebook model from database record.
"""
path = to_api_path(record['parent_name'] + record['name'])
model = base_model(path)
model['type'] = 'notebook'
model['last_modified'] = model['created'] = record['created_at']
if content:
content = reads_base64(record['content'])
self.mark_trusted_cells(content, path)
model['content'] = content
model['format'] = 'json'
self.validate_notebook_model(model)
return model
|
Get a directory from the database.
|
def _get_directory(self, path, content, format):
"""
Get a directory from the database.
"""
with self.engine.begin() as db:
try:
record = get_directory(
db, self.user_id, path, content
)
except NoSuchDirectory:
if self.file_exists(path):
# TODO: It's awkward/expensive to have to check this to
# return a 400 instead of 404. Consider just 404ing.
self.do_400("Wrong type: %s" % path)
else:
self.no_such_entity(path)
return self._directory_model_from_db(record, content)
|
Apply _notebook_model_from_db or _file_model_from_db to each entry in file_records depending on the result of guess_type.
|
def _convert_file_records(self, file_records):
"""
Apply _notebook_model_from_db or _file_model_from_db to each entry
in file_records, depending on the result of `guess_type`.
"""
for record in file_records:
type_ = self.guess_type(record['name'], allow_directory=False)
if type_ == 'notebook':
yield self._notebook_model_from_db(record, False)
elif type_ == 'file':
yield self._file_model_from_db(record, False, None)
else:
self.do_500("Unknown file type %s" % type_)
|
Build a directory model from database directory record.
|
def _directory_model_from_db(self, record, content):
"""
Build a directory model from database directory record.
"""
model = base_directory_model(to_api_path(record['name']))
if content:
model['format'] = 'json'
model['content'] = list(
chain(
self._convert_file_records(record['files']),
(
self._directory_model_from_db(subdir, False)
for subdir in record['subdirs']
),
)
)
return model
|
Build a file model from database record.
|
def _file_model_from_db(self, record, content, format):
"""
Build a file model from database record.
"""
# TODO: Most of this is shared with _notebook_model_from_db.
path = to_api_path(record['parent_name'] + record['name'])
model = base_model(path)
model['type'] = 'file'
model['last_modified'] = model['created'] = record['created_at']
if content:
bcontent = record['content']
model['content'], model['format'], model['mimetype'] = from_b64(
path,
bcontent,
format,
)
return model
|
Save a notebook.
|
def _save_notebook(self, db, model, path):
"""
Save a notebook.
Returns a validation message.
"""
nb_contents = from_dict(model['content'])
self.check_and_sign(nb_contents, path)
save_file(
db,
self.user_id,
path,
writes_base64(nb_contents),
self.crypto.encrypt,
self.max_file_size_bytes,
)
# It's awkward that this writes to the model instead of returning.
self.validate_notebook_model(model)
return model.get('message')
|
Save a non - notebook file.
|
def _save_file(self, db, model, path):
"""
Save a non-notebook file.
"""
save_file(
db,
self.user_id,
path,
to_b64(model['content'], model.get('format', None)),
self.crypto.encrypt,
self.max_file_size_bytes,
)
return None
|
Rename object from old_path to path.
|
def rename_file(self, old_path, path):
"""
Rename object from old_path to path.
NOTE: This method is unfortunately named on the base class. It
actually moves a file or a directory.
"""
with self.engine.begin() as db:
try:
if self.file_exists(old_path):
rename_file(db, self.user_id, old_path, path)
elif self.dir_exists(old_path):
rename_directory(db, self.user_id, old_path, path)
else:
self.no_such_entity(path)
except (FileExists, DirectoryExists):
self.already_exists(path)
except RenameRoot as e:
self.do_409(str(e))
|
Delete object corresponding to path.
|
def delete_file(self, path):
"""
Delete object corresponding to path.
"""
if self.file_exists(path):
self._delete_non_directory(path)
elif self.dir_exists(path):
self._delete_directory(path)
else:
self.no_such_entity(path)
|
Apply preprocessing steps to file/ notebook content that we re going to write to the database.
|
def preprocess_incoming_content(content, encrypt_func, max_size_bytes):
"""
Apply preprocessing steps to file/notebook content that we're going to
write to the database.
Applies ``encrypt_func`` to ``content`` and checks that the result is
smaller than ``max_size_bytes``.
"""
encrypted = encrypt_func(content)
if max_size_bytes != UNLIMITED and len(encrypted) > max_size_bytes:
raise FileTooLarge()
return encrypted
|
Add a new user if they don t already exist.
|
def ensure_db_user(db, user_id):
"""
Add a new user if they don't already exist.
"""
with ignore_unique_violation():
db.execute(
users.insert().values(id=user_id),
)
|
Delete a user and all of their resources.
|
def purge_user(db, user_id):
"""
Delete a user and all of their resources.
"""
db.execute(files.delete().where(
files.c.user_id == user_id
))
db.execute(directories.delete().where(
directories.c.user_id == user_id
))
db.execute(users.delete().where(
users.c.id == user_id
))
|
Create a directory.
|
def create_directory(db, user_id, api_path):
"""
Create a directory.
"""
name = from_api_dirname(api_path)
if name == '/':
parent_name = null()
parent_user_id = null()
else:
# Convert '/foo/bar/buzz/' -> '/foo/bar/'
parent_name = name[:name.rindex('/', 0, -1) + 1]
parent_user_id = user_id
db.execute(
directories.insert().values(
name=name,
user_id=user_id,
parent_name=parent_name,
parent_user_id=parent_user_id,
)
)
|
Return a WHERE clause that matches entries in a directory.
|
def _is_in_directory(table, user_id, db_dirname):
"""
Return a WHERE clause that matches entries in a directory.
Parameterized on table because this clause is re-used between files and
directories.
"""
return and_(
table.c.parent_name == db_dirname,
table.c.user_id == user_id,
)
|
Delete a directory.
|
def delete_directory(db, user_id, api_path):
"""
Delete a directory.
"""
db_dirname = from_api_dirname(api_path)
try:
result = db.execute(
directories.delete().where(
and_(
directories.c.user_id == user_id,
directories.c.name == db_dirname,
)
)
)
except IntegrityError as error:
if is_foreign_key_violation(error):
raise DirectoryNotEmpty(api_path)
else:
raise
rowcount = result.rowcount
if not rowcount:
raise NoSuchDirectory(api_path)
return rowcount
|
Internal implementation of dir_exists.
|
def _dir_exists(db, user_id, db_dirname):
"""
Internal implementation of dir_exists.
Expects a db-style path name.
"""
return db.execute(
select(
[func.count(directories.c.name)],
).where(
and_(
directories.c.user_id == user_id,
directories.c.name == db_dirname,
),
)
).scalar() != 0
|
Return files in a directory.
|
def files_in_directory(db, user_id, db_dirname):
"""
Return files in a directory.
"""
fields = _file_default_fields()
rows = db.execute(
select(
fields,
).where(
_is_in_directory(files, user_id, db_dirname),
).order_by(
files.c.user_id,
files.c.parent_name,
files.c.name,
files.c.created_at,
).distinct(
files.c.user_id, files.c.parent_name, files.c.name,
)
)
return [to_dict_no_content(fields, row) for row in rows]
|
Return subdirectories of a directory.
|
def directories_in_directory(db, user_id, db_dirname):
"""
Return subdirectories of a directory.
"""
fields = _directory_default_fields()
rows = db.execute(
select(
fields,
).where(
_is_in_directory(directories, user_id, db_dirname),
)
)
return [to_dict_no_content(fields, row) for row in rows]
|
Return the names of all files/ directories that are direct children of api_dirname.
|
def get_directory(db, user_id, api_dirname, content):
"""
Return the names of all files/directories that are direct children of
api_dirname.
If content is False, return a bare model containing just a database-style
name.
"""
db_dirname = from_api_dirname(api_dirname)
if not _dir_exists(db, user_id, db_dirname):
raise NoSuchDirectory(api_dirname)
if content:
files = files_in_directory(
db,
user_id,
db_dirname,
)
subdirectories = directories_in_directory(
db,
user_id,
db_dirname,
)
else:
files, subdirectories = None, None
# TODO: Consider using namedtuples for these return values.
return {
'name': db_dirname,
'files': files,
'subdirs': subdirectories,
}
|
Return a WHERE clause matching the given API path and user_id.
|
def _file_where(user_id, api_path):
"""
Return a WHERE clause matching the given API path and user_id.
"""
directory, name = split_api_filepath(api_path)
return and_(
files.c.name == name,
files.c.user_id == user_id,
files.c.parent_name == directory,
)
|
Return a SELECT statement that returns the latest N versions of a file.
|
def _select_file(user_id, api_path, fields, limit):
"""
Return a SELECT statement that returns the latest N versions of a file.
"""
query = select(fields).where(
_file_where(user_id, api_path),
).order_by(
_file_creation_order(),
)
if limit is not None:
query = query.limit(limit)
return query
|
Default fields returned by a file query.
|
def _file_default_fields():
"""
Default fields returned by a file query.
"""
return [
files.c.name,
files.c.created_at,
files.c.parent_name,
]
|
Get file data for the given user_id path and query_fields. The query_fields parameter specifies which database fields should be included in the returned file data.
|
def _get_file(db, user_id, api_path, query_fields, decrypt_func):
"""
Get file data for the given user_id, path, and query_fields. The
query_fields parameter specifies which database fields should be
included in the returned file data.
"""
result = db.execute(
_select_file(user_id, api_path, query_fields, limit=1),
).first()
if result is None:
raise NoSuchFile(api_path)
if files.c.content in query_fields:
return to_dict_with_content(query_fields, result, decrypt_func)
else:
return to_dict_no_content(query_fields, result)
|
Get file data for the given user_id and path.
|
def get_file(db, user_id, api_path, include_content, decrypt_func):
"""
Get file data for the given user_id and path.
Include content only if include_content=True.
"""
query_fields = _file_default_fields()
if include_content:
query_fields.append(files.c.content)
return _get_file(db, user_id, api_path, query_fields, decrypt_func)
|
Get the value in the id column for the file with the given user_id and path.
|
def get_file_id(db, user_id, api_path):
"""
Get the value in the 'id' column for the file with the given
user_id and path.
"""
return _get_file(
db,
user_id,
api_path,
[files.c.id],
unused_decrypt_func,
)['id']
|
Delete a file.
|
def delete_file(db, user_id, api_path):
"""
Delete a file.
TODO: Consider making this a soft delete.
"""
result = db.execute(
files.delete().where(
_file_where(user_id, api_path)
)
)
rowcount = result.rowcount
if not rowcount:
raise NoSuchFile(api_path)
return rowcount
|
Check if a file exists.
|
def file_exists(db, user_id, path):
"""
Check if a file exists.
"""
try:
get_file(
db,
user_id,
path,
include_content=False,
decrypt_func=unused_decrypt_func,
)
return True
except NoSuchFile:
return False
|
Rename a file.
|
def rename_file(db, user_id, old_api_path, new_api_path):
"""
Rename a file.
"""
# Overwriting existing files is disallowed.
if file_exists(db, user_id, new_api_path):
raise FileExists(new_api_path)
old_dir, old_name = split_api_filepath(old_api_path)
new_dir, new_name = split_api_filepath(new_api_path)
if old_dir != new_dir:
raise ValueError(
dedent(
"""
Can't rename object to new directory.
Old Path: {old_api_path}
New Path: {new_api_path}
""".format(
old_api_path=old_api_path,
new_api_path=new_api_path
)
)
)
db.execute(
files.update().where(
_file_where(user_id, old_api_path),
).values(
name=new_name,
created_at=func.now(),
)
)
|
Rename a directory.
|
def rename_directory(db, user_id, old_api_path, new_api_path):
"""
Rename a directory.
"""
old_db_path = from_api_dirname(old_api_path)
new_db_path = from_api_dirname(new_api_path)
if old_db_path == '/':
raise RenameRoot('Renaming the root directory is not permitted.')
# Overwriting existing directories is disallowed.
if _dir_exists(db, user_id, new_db_path):
raise DirectoryExists(new_api_path)
# Set this foreign key constraint to deferred so it's not violated
# when we run the first statement to update the name of the directory.
db.execute('SET CONSTRAINTS '
'pgcontents.directories_parent_user_id_fkey DEFERRED')
# Update name column for the directory that's being renamed
db.execute(
directories.update().where(
and_(
directories.c.user_id == user_id,
directories.c.name == old_db_path,
)
).values(
name=new_db_path,
)
)
# Update the name and parent_name of any descendant directories. Do
# this in a single statement so the non-deferrable check constraint
# is satisfied.
db.execute(
directories.update().where(
and_(
directories.c.user_id == user_id,
directories.c.name.startswith(old_db_path),
directories.c.parent_name.startswith(old_db_path),
)
).values(
name=func.concat(
new_db_path,
func.right(directories.c.name, -func.length(old_db_path))
),
parent_name=func.concat(
new_db_path,
func.right(
directories.c.parent_name,
-func.length(old_db_path)
)
),
)
)
|
Save a file.
|
def save_file(db, user_id, path, content, encrypt_func, max_size_bytes):
"""
Save a file.
TODO: Update-then-insert is probably cheaper than insert-then-update.
"""
content = preprocess_incoming_content(
content,
encrypt_func,
max_size_bytes,
)
directory, name = split_api_filepath(path)
with db.begin_nested() as savepoint:
try:
res = db.execute(
files.insert().values(
name=name,
user_id=user_id,
parent_name=directory,
content=content,
)
)
except IntegrityError as error:
# The file already exists, so overwrite its content with the newer
# version.
if is_unique_violation(error):
savepoint.rollback()
res = db.execute(
files.update().where(
_file_where(user_id, path),
).values(
content=content,
created_at=func.now(),
)
)
else:
# Unknown error. Reraise
raise
return res
|
Create a generator of decrypted files.
|
def generate_files(engine, crypto_factory, min_dt=None, max_dt=None,
logger=None):
"""
Create a generator of decrypted files.
Files are yielded in ascending order of their timestamp.
This function selects all current notebooks (optionally, falling within a
datetime range), decrypts them, and returns a generator yielding dicts,
each containing a decoded notebook and metadata including the user,
filepath, and timestamp.
Parameters
----------
engine : SQLAlchemy.engine
Engine encapsulating database connections.
crypto_factory : function[str -> Any]
A function from user_id to an object providing the interface required
by PostgresContentsManager.crypto. Results of this will be used for
decryption of the selected notebooks.
min_dt : datetime.datetime, optional
Minimum last modified datetime at which a file will be included.
max_dt : datetime.datetime, optional
Last modified datetime at and after which a file will be excluded.
logger : Logger, optional
"""
return _generate_notebooks(files, files.c.created_at,
engine, crypto_factory, min_dt, max_dt, logger)
|
Delete all database records for the given user_id.
|
def purge_remote_checkpoints(db, user_id):
"""
Delete all database records for the given user_id.
"""
db.execute(
remote_checkpoints.delete().where(
remote_checkpoints.c.user_id == user_id,
)
)
|
Create a generator of decrypted remote checkpoints.
|
def generate_checkpoints(engine, crypto_factory, min_dt=None, max_dt=None,
logger=None):
"""
Create a generator of decrypted remote checkpoints.
Checkpoints are yielded in ascending order of their timestamp.
This function selects all notebook checkpoints (optionally, falling within
a datetime range), decrypts them, and returns a generator yielding dicts,
each containing a decoded notebook and metadata including the user,
filepath, and timestamp.
Parameters
----------
engine : SQLAlchemy.engine
Engine encapsulating database connections.
crypto_factory : function[str -> Any]
A function from user_id to an object providing the interface required
by PostgresContentsManager.crypto. Results of this will be used for
decryption of the selected notebooks.
min_dt : datetime.datetime, optional
Minimum last modified datetime at which a file will be included.
max_dt : datetime.datetime, optional
Last modified datetime at and after which a file will be excluded.
logger : Logger, optional
"""
return _generate_notebooks(remote_checkpoints,
remote_checkpoints.c.last_modified,
engine, crypto_factory, min_dt, max_dt, logger)
|
See docstrings for generate_files and generate_checkpoints.
|
def _generate_notebooks(table, timestamp_column,
engine, crypto_factory, min_dt, max_dt, logger):
"""
See docstrings for `generate_files` and `generate_checkpoints`.
Parameters
----------
table : SQLAlchemy.Table
Table to fetch notebooks from, `files` or `remote_checkpoints.
timestamp_column : SQLAlchemy.Column
`table`'s column storing timestamps, `created_at` or `last_modified`.
engine : SQLAlchemy.engine
Engine encapsulating database connections.
crypto_factory : function[str -> Any]
A function from user_id to an object providing the interface required
by PostgresContentsManager.crypto. Results of this will be used for
decryption of the selected notebooks.
min_dt : datetime.datetime
Minimum last modified datetime at which a file will be included.
max_dt : datetime.datetime
Last modified datetime at and after which a file will be excluded.
logger : Logger
"""
where_conds = []
if min_dt is not None:
where_conds.append(timestamp_column >= min_dt)
if max_dt is not None:
where_conds.append(timestamp_column < max_dt)
if table is files:
# Only select files that are notebooks
where_conds.append(files.c.name.like(u'%.ipynb'))
# Query for notebooks satisfying the conditions.
query = select([table]).order_by(timestamp_column)
for cond in where_conds:
query = query.where(cond)
result = engine.execute(query)
# Decrypt each notebook and yield the result.
for nb_row in result:
try:
# The decrypt function depends on the user
user_id = nb_row['user_id']
decrypt_func = crypto_factory(user_id).decrypt
nb_dict = to_dict_with_content(table.c, nb_row, decrypt_func)
if table is files:
# Correct for files schema differing somewhat from checkpoints.
nb_dict['path'] = nb_dict['parent_name'] + nb_dict['name']
nb_dict['last_modified'] = nb_dict['created_at']
# For 'content', we use `reads_base64` directly. If the db content
# format is changed from base64, the decoding should be changed
# here as well.
yield {
'id': nb_dict['id'],
'user_id': user_id,
'path': to_api_path(nb_dict['path']),
'last_modified': nb_dict['last_modified'],
'content': reads_base64(nb_dict['content']),
}
except CorruptedFile:
if logger is not None:
logger.warning(
'Corrupted file with id %d in table %s.'
% (nb_row['id'], table.name)
)
|
Re - encrypt a row from table with id of row_id.
|
def reencrypt_row_content(db,
table,
row_id,
decrypt_func,
encrypt_func,
logger):
"""
Re-encrypt a row from ``table`` with ``id`` of ``row_id``.
"""
q = (select([table.c.content])
.with_for_update()
.where(table.c.id == row_id))
[(content,)] = db.execute(q)
logger.info("Begin encrypting %s row %s.", table.name, row_id)
db.execute(
table
.update()
.where(table.c.id == row_id)
.values(content=encrypt_func(decrypt_func(content)))
)
logger.info("Done encrypting %s row %s.", table.name, row_id)
|
Get all file ids for a user.
|
def select_file_ids(db, user_id):
"""
Get all file ids for a user.
"""
return list(
db.execute(
select([files.c.id])
.where(files.c.user_id == user_id)
)
)
|
Get all file ids for a user.
|
def select_remote_checkpoint_ids(db, user_id):
"""
Get all file ids for a user.
"""
return list(
db.execute(
select([remote_checkpoints.c.id])
.where(remote_checkpoints.c.user_id == user_id)
)
)
|
Re - encrypt all of the files and checkpoints for a single user.
|
def reencrypt_user_content(engine,
user_id,
old_decrypt_func,
new_encrypt_func,
logger):
"""
Re-encrypt all of the files and checkpoints for a single user.
"""
logger.info("Begin re-encryption for user %s", user_id)
with engine.begin() as db:
# NOTE: Doing both of these operations in one transaction depends for
# correctness on the fact that the creation of new checkpoints always
# involves writing new data into the database from Python, rather than
# simply copying data inside the DB.
# If we change checkpoint creation so that it does an in-database copy,
# then we need to split this transaction to ensure that
# file-reencryption is complete before checkpoint-reencryption starts.
# If that doesn't happen, it will be possible for a user to create a
# new checkpoint in a transaction that hasn't seen the completed
# file-reencryption process, but we might not see that checkpoint here,
# which means that we would never update the content of that checkpoint
# to the new encryption key.
logger.info("Re-encrypting files for %s", user_id)
for (file_id,) in select_file_ids(db, user_id):
reencrypt_row_content(
db,
files,
file_id,
old_decrypt_func,
new_encrypt_func,
logger,
)
logger.info("Re-encrypting checkpoints for %s", user_id)
for (cp_id,) in select_remote_checkpoint_ids(db, user_id):
reencrypt_row_content(
db,
remote_checkpoints,
cp_id,
old_decrypt_func,
new_encrypt_func,
logger,
)
logger.info("Finished re-encryption for user %s", user_id)
|
Convert a secret key and a user ID into an encryption key to use with a cryptography. fernet. Fernet.
|
def derive_single_fernet_key(password, user_id):
"""
Convert a secret key and a user ID into an encryption key to use with a
``cryptography.fernet.Fernet``.
Taken from
https://cryptography.io/en/latest/fernet/#using-passwords-with-fernet
Parameters
----------
password : unicode
ascii-encodable key to derive
user_id : unicode
ascii-encodable user_id to use as salt
"""
password = ascii_unicode_to_bytes(password)
user_id = ascii_unicode_to_bytes(user_id)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=user_id,
iterations=100000,
backend=default_backend(),
)
return base64.urlsafe_b64encode(kdf.derive(password))
|
Derive a list of per - user Fernet keys from a list of master keys and a username.
|
def derive_fallback_fernet_keys(passwords, user_id):
"""
Derive a list of per-user Fernet keys from a list of master keys and a
username.
If a None is encountered in ``passwords``, it is forwarded.
Parameters
----------
passwords : list[unicode]
List of ascii-encodable keys to derive.
user_id : unicode or None
ascii-encodable user_id to use as salt
"""
# Normally I wouldn't advocate for these kinds of assertions, but we really
# really really don't want to mess up deriving encryption keys.
assert isinstance(passwords, (list, tuple)), \
"Expected list or tuple of keys, got %s." % type(passwords)
def derive_single_allow_none(k):
if k is None:
return None
return derive_single_fernet_key(k, user_id).decode('ascii')
return list(map(derive_single_allow_none, passwords))
|
Create and return a function suitable for passing as a crypto_factory to pgcontents. utils. sync. reencrypt_all_users
|
def single_password_crypto_factory(password):
"""
Create and return a function suitable for passing as a crypto_factory to
``pgcontents.utils.sync.reencrypt_all_users``
The factory here returns a ``FernetEncryption`` that uses a key derived
from ``password`` and salted with the supplied user_id.
"""
@memoize_single_arg
def factory(user_id):
return FernetEncryption(
Fernet(derive_single_fernet_key(password, user_id))
)
return factory
|
Decorator memoizing a single - argument function
|
def memoize_single_arg(f):
"""
Decorator memoizing a single-argument function
"""
memo = {}
@wraps(f)
def memoized_f(arg):
try:
return memo[arg]
except KeyError:
result = memo[arg] = f(arg)
return result
return memoized_f
|
Get the name from a column - like SQLAlchemy expression.
|
def _get_name(column_like):
"""
Get the name from a column-like SQLAlchemy expression.
Works for Columns and Cast expressions.
"""
if isinstance(column_like, Column):
return column_like.name
elif isinstance(column_like, Cast):
return column_like.clause.name
|
Convert a SQLAlchemy row that does not contain a content field to a dict.
|
def to_dict_no_content(fields, row):
"""
Convert a SQLAlchemy row that does not contain a 'content' field to a dict.
If row is None, return None.
Raises AssertionError if there is a field named 'content' in ``fields``.
"""
assert(len(fields) == len(row))
field_names = list(map(_get_name, fields))
assert 'content' not in field_names, "Unexpected content field."
return dict(zip(field_names, row))
|
Convert a SQLAlchemy row that contains a content field to a dict.
|
def to_dict_with_content(fields, row, decrypt_func):
"""
Convert a SQLAlchemy row that contains a 'content' field to a dict.
``decrypt_func`` will be applied to the ``content`` field of the row.
If row is None, return None.
Raises AssertionError if there is no field named 'content' in ``fields``.
"""
assert(len(fields) == len(row))
field_names = list(map(_get_name, fields))
assert 'content' in field_names, "Missing content field."
result = dict(zip(field_names, row))
result['content'] = decrypt_func(result['content'])
return result
|
Create a checkpoint of the current state of a notebook
|
def create_notebook_checkpoint(self, nb, path):
"""Create a checkpoint of the current state of a notebook
Returns a checkpoint_id for the new checkpoint.
"""
b64_content = writes_base64(nb)
with self.engine.begin() as db:
return save_remote_checkpoint(
db,
self.user_id,
path,
b64_content,
self.crypto.encrypt,
self.max_file_size_bytes,
)
|
Create a checkpoint of the current state of a file
|
def create_file_checkpoint(self, content, format, path):
"""Create a checkpoint of the current state of a file
Returns a checkpoint_id for the new checkpoint.
"""
try:
b64_content = to_b64(content, format)
except ValueError as e:
self.do_400(str(e))
with self.engine.begin() as db:
return save_remote_checkpoint(
db,
self.user_id,
path,
b64_content,
self.crypto.encrypt,
self.max_file_size_bytes,
)
|
delete a checkpoint for a file
|
def delete_checkpoint(self, checkpoint_id, path):
"""delete a checkpoint for a file"""
with self.engine.begin() as db:
return delete_single_remote_checkpoint(
db, self.user_id, path, checkpoint_id,
)
|
Get the content of a checkpoint.
|
def get_checkpoint_content(self, checkpoint_id, path):
"""Get the content of a checkpoint."""
with self.engine.begin() as db:
return get_remote_checkpoint(
db,
self.user_id,
path,
checkpoint_id,
self.crypto.decrypt,
)['content']
|
Return a list of checkpoints for a given file
|
def list_checkpoints(self, path):
"""Return a list of checkpoints for a given file"""
with self.engine.begin() as db:
return list_remote_checkpoints(db, self.user_id, path)
|
Rename all checkpoints for old_path to new_path.
|
def rename_all_checkpoints(self, old_path, new_path):
"""Rename all checkpoints for old_path to new_path."""
with self.engine.begin() as db:
return move_remote_checkpoints(
db,
self.user_id,
old_path,
new_path,
)
|
Delete all checkpoints for the given path.
|
def delete_all_checkpoints(self, path):
"""Delete all checkpoints for the given path."""
with self.engine.begin() as db:
delete_remote_checkpoints(db, self.user_id, path)
|
Purge all database records for the current user.
|
def purge_db(self):
"""
Purge all database records for the current user.
"""
with self.engine.begin() as db:
purge_remote_checkpoints(db, self.user_id)
|
Resolve a path based on a dictionary of manager prefixes.
|
def _resolve_path(path, manager_dict):
"""
Resolve a path based on a dictionary of manager prefixes.
Returns a triple of (prefix, manager, manager_relative_path).
"""
path = normalize_api_path(path)
parts = path.split('/')
# Try to find a sub-manager for the first subdirectory.
mgr = manager_dict.get(parts[0])
if mgr is not None:
return parts[0], mgr, '/'.join(parts[1:])
# Try to find use the root manager, if one was supplied.
mgr = manager_dict.get('')
if mgr is not None:
return '', mgr, path
raise HTTPError(
404,
"Couldn't resolve path [{path}] and "
"no root manager supplied!".format(path=path)
)
|
Get an argument either from kwargs or from the first entry in args. Raises a TypeError if argname not in kwargs and len ( args ) == 0.
|
def _get_arg(argname, args, kwargs):
"""
Get an argument, either from kwargs or from the first entry in args.
Raises a TypeError if argname not in kwargs and len(args) == 0.
Mutates kwargs in place if the value is found in kwargs.
"""
try:
return kwargs.pop(argname), args
except KeyError:
pass
try:
return args[0], args[1:]
except IndexError:
raise TypeError("No value passed for %s" % argname)
|
Prefix all path entries in model with the given prefix.
|
def _apply_prefix(prefix, model):
"""
Prefix all path entries in model with the given prefix.
"""
if not isinstance(model, dict):
raise TypeError("Expected dict for model, got %s" % type(model))
# We get unwanted leading/trailing slashes if prefix or model['path'] are
# '', both of which are legal values.
model['path'] = '/'.join((prefix, model['path'])).strip('/')
if model['type'] in ('notebook', 'file'):
return model
if model['type'] != 'directory':
raise ValueError("Unknown model type %s." % type(model))
content = model.get('content', None)
if content is not None:
for sub_model in content:
_apply_prefix(prefix, sub_model)
return model
|
Decorator for methods that accept path as a first argument.
|
def path_dispatch1(mname, returns_model):
"""
Decorator for methods that accept path as a first argument.
"""
def _wrapper(self, *args, **kwargs):
path, args = _get_arg('path', args, kwargs)
prefix, mgr, mgr_path = _resolve_path(path, self.managers)
result = getattr(mgr, mname)(mgr_path, *args, **kwargs)
if returns_model and prefix:
return _apply_prefix(prefix, result)
else:
return result
return _wrapper
|
Parameterized decorator for methods that accept path as a second argument.
|
def path_dispatch_kwarg(mname, path_default, returns_model):
"""
Parameterized decorator for methods that accept path as a second
argument.
"""
def _wrapper(self, path=path_default, **kwargs):
prefix, mgr, mgr_path = _resolve_path(path, self.managers)
result = getattr(mgr, mname)(path=mgr_path, **kwargs)
if returns_model and prefix:
return _apply_prefix(prefix, result)
else:
return result
return _wrapper
|
Decorator for methods accepting old_path and new_path.
|
def path_dispatch_old_new(mname, returns_model):
"""
Decorator for methods accepting old_path and new_path.
"""
def _wrapper(self, old_path, new_path, *args, **kwargs):
old_prefix, old_mgr, old_mgr_path = _resolve_path(
old_path, self.managers
)
new_prefix, new_mgr, new_mgr_path = _resolve_path(
new_path, self.managers,
)
if old_mgr is not new_mgr:
# TODO: Consider supporting this via get+delete+save.
raise HTTPError(
400,
"Can't move files between backends ({old} -> {new})".format(
old=old_path,
new=new_path,
)
)
assert new_prefix == old_prefix
result = getattr(new_mgr, mname)(
old_mgr_path,
new_mgr_path,
*args,
**kwargs
)
if returns_model and new_prefix:
return _apply_prefix(new_prefix, result)
else:
return result
return _wrapper
|
Strip slashes from directories before updating.
|
def _managers_changed(self, name, old, new):
"""
Strip slashes from directories before updating.
"""
for key in new:
if '/' in key:
raise ValueError(
"Expected directory names w/o slashes. Got [%s]" % key
)
self.managers = {k.strip('/'): v for k, v in new.items()}
|
Special case handling for listing root dir.
|
def get(self, path, content=True, type=None, format=None):
"""
Special case handling for listing root dir.
"""
path = normalize_api_path(path)
if path:
return self.__get(path, content=content, type=type, format=format)
if not content:
return base_directory_model('')
extra_content = self._extra_root_dirs()
rm = self.root_manager
if rm is None:
root_model = base_directory_model('')
root_model.update(
format='json',
content=extra_content,
)
else:
root_model = rm.get(
path,
content=content,
type=type,
format=format,
)
# Append the extra directories.
root_model['content'].extend(extra_content)
return root_model
|
Ensure that roots of our managers can t be deleted. This should be enforced by https:// github. com/ ipython/ ipython/ pull/ 8168 but rogue implementations might override this behavior.
|
def delete(self, path):
"""
Ensure that roots of our managers can't be deleted. This should be
enforced by https://github.com/ipython/ipython/pull/8168, but rogue
implementations might override this behavior.
"""
path = normalize_api_path(path)
if path in self.managers:
raise HTTPError(
400, "Can't delete root of %s" % self.managers[path]
)
return self.__delete(path)
|
Resolve paths with.. to normalized paths raising an error if the final result is outside root.
|
def normalize_api_path(api_path):
"""
Resolve paths with '..' to normalized paths, raising an error if the final
result is outside root.
"""
normalized = posixpath.normpath(api_path.strip('/'))
if normalized == '.':
normalized = ''
elif normalized.startswith('..'):
raise PathOutsideRoot(normalized)
return normalized
|
Split an API file path into directory and name.
|
def split_api_filepath(path):
"""
Split an API file path into directory and name.
"""
parts = path.rsplit('/', 1)
if len(parts) == 1:
name = parts[0]
dirname = '/'
else:
name = parts[1]
dirname = parts[0] + '/'
return from_api_dirname(dirname), name
|
Write a notebook as base64.
|
def writes_base64(nb, version=NBFORMAT_VERSION):
"""
Write a notebook as base64.
"""
return b64encode(writes(nb, version=version).encode('utf-8'))
|
Read a notebook from base64.
|
def reads_base64(nb, as_version=NBFORMAT_VERSION):
"""
Read a notebook from base64.
"""
try:
return reads(b64decode(nb).decode('utf-8'), as_version=as_version)
except Exception as e:
raise CorruptedFile(e)
|
Decode base64 data of unknown format.
|
def _decode_unknown_from_base64(path, bcontent):
"""
Decode base64 data of unknown format.
Attempts to interpret data as utf-8, falling back to ascii on failure.
"""
content = b64decode(bcontent)
try:
return (content.decode('utf-8'), 'text')
except UnicodeError:
pass
return bcontent.decode('ascii'), 'base64'
|
Decode base64 content for a file.
|
def from_b64(path, bcontent, format):
"""
Decode base64 content for a file.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', do nothing.
If not specified, try to decode as UTF-8, and fall back to base64
Returns a triple of decoded_content, format, and mimetype.
"""
decoders = {
'base64': lambda path, bcontent: (bcontent.decode('ascii'), 'base64'),
'text': _decode_text_from_base64,
None: _decode_unknown_from_base64,
}
try:
content, real_format = decoders[format](path, bcontent)
except HTTPError:
# Pass through HTTPErrors, since we intend for them to bubble all the
# way back to the API layer.
raise
except Exception as e:
# Anything else should be wrapped in a CorruptedFile, since it likely
# indicates misconfiguration of encryption.
raise CorruptedFile(e)
default_mimes = {
'text': 'text/plain',
'base64': 'application/octet-stream',
}
mimetype = mimetypes.guess_type(path)[0] or default_mimes[real_format]
return content, real_format, mimetype
|
Return an iterable of all prefix directories of path descending from root.
|
def prefix_dirs(path):
"""
Return an iterable of all prefix directories of path, descending from root.
"""
_dirname = posixpath.dirname
path = path.strip('/')
out = []
while path != '':
path = _dirname(path)
out.append(path)
return reversed(out)
|
Decorator for converting PathOutsideRoot errors to 404s.
|
def outside_root_to_404(fn):
"""
Decorator for converting PathOutsideRoot errors to 404s.
"""
@wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except PathOutsideRoot as e:
raise HTTPError(404, "Path outside root: [%s]" % e.args[0])
return wrapped
|
Create a user.
|
def create_user(db_url, user):
"""
Create a user.
"""
PostgresCheckpoints(
db_url=db_url,
user_id=user,
create_user_on_startup=True,
)
|
Split an iterable of models into a list of file paths and a list of directory paths.
|
def _separate_dirs_files(models):
"""
Split an iterable of models into a list of file paths and a list of
directory paths.
"""
dirs = []
files = []
for model in models:
if model['type'] == 'directory':
dirs.append(model['path'])
else:
files.append(model['path'])
return dirs, files
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.