INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Search for the episode with the requested experience Id: return:
|
def episode_info(self):
"""
Search for the episode with the requested experience Id
:return:
"""
if self.show_info:
for season in self.show_info["seasons"]:
for episode in season["episodes"]:
for lang in episode["languages"].values():
for alpha in lang["alpha"].values():
if alpha["experienceId"] == self.experience_id:
return episode
|
Get the sources for a given experience_id which is tied to a specific language: param experience_id: int ; video content id: return: sources dict
|
def sources(self):
"""
Get the sources for a given experience_id, which is tied to a specific language
:param experience_id: int; video content id
:return: sources dict
"""
api_url = self.sources_api_url.format(experience_id=self.experience_id)
res = self.get(api_url, params={"pinst_id": self.pinst_id})
return self.session.http.json(res)
|
Get the RSA key for the user and encrypt the users password: param email: steam account: param password: password for account: return: encrypted password
|
def encrypt_password(self, email, password):
"""
Get the RSA key for the user and encrypt the users password
:param email: steam account
:param password: password for account
:return: encrypted password
"""
res = self.session.http.get(self._get_rsa_key_url, params=dict(username=email, donotcache=self.donotcache))
rsadata = self.session.http.json(res, schema=self._rsa_key_schema)
rsa = RSA.construct((rsadata["publickey_mod"], rsadata["publickey_exp"]))
cipher = PKCS1_v1_5.new(rsa)
return base64.b64encode(cipher.encrypt(password.encode("utf8"))), rsadata["timestamp"]
|
Logs in to Steam
|
def dologin(self, email, password, emailauth="", emailsteamid="", captchagid="-1", captcha_text="", twofactorcode=""):
"""
Logs in to Steam
"""
epassword, rsatimestamp = self.encrypt_password(email, password)
login_data = {
'username': email,
"password": epassword,
"emailauth": emailauth,
"loginfriendlyname": "Streamlink",
"captchagid": captchagid,
"captcha_text": captcha_text,
"emailsteamid": emailsteamid,
"rsatimestamp": rsatimestamp,
"remember_login": True,
"donotcache": self.donotcache,
"twofactorcode": twofactorcode
}
res = self.session.http.post(self._dologin_url, data=login_data)
resp = self.session.http.json(res, schema=self._dologin_schema)
if not resp[u"success"]:
if resp.get(u"captcha_needed"):
# special case for captcha
captchagid = resp[u"captcha_gid"]
log.error("Captcha result required, open this URL to see the captcha: {}".format(
self._captcha_url.format(captchagid)))
try:
captcha_text = self.input_ask("Captcha text")
except FatalPluginError:
captcha_text = None
if not captcha_text:
return False
else:
# If the user must enter the code that was emailed to them
if resp.get(u"emailauth_needed"):
if not emailauth:
try:
emailauth = self.input_ask("Email auth code required")
except FatalPluginError:
emailauth = None
if not emailauth:
return False
else:
raise SteamLoginFailed("Email auth key error")
# If the user must enter a two factor auth code
if resp.get(u"requires_twofactor"):
try:
twofactorcode = self.input_ask("Two factor auth code required")
except FatalPluginError:
twofactorcode = None
if not twofactorcode:
return False
if resp.get(u"message"):
raise SteamLoginFailed(resp[u"message"])
return self.dologin(email, password,
emailauth=emailauth,
emailsteamid=resp.get(u"emailsteamid", u""),
captcha_text=captcha_text,
captchagid=captchagid,
twofactorcode=twofactorcode)
elif resp.get("login_complete"):
return True
else:
log.error("Something when wrong when logging in to Steam")
return False
|
Returns the stream_id contained in the HTML.
|
def get_stream_id(self, html):
"""Returns the stream_id contained in the HTML."""
stream_id = stream_id_pattern.search(html)
if not stream_id:
self.logger.error("Failed to extract stream_id.")
return stream_id.group("stream_id")
|
Returns a nested list of different stream options.
|
def get_stream_info(self, html):
"""
Returns a nested list of different stream options.
Each entry in the list will contain a stream_url and stream_quality_name
for each stream occurrence that was found in the JS.
"""
stream_info = stream_info_pattern.findall(html)
if not stream_info:
self.logger.error("Failed to extract stream_info.")
# Rename the "" quality to "source" by transforming the tuples to a
# list and reassigning.
stream_info_list = []
for info in stream_info:
if not info[1]:
stream_info_list.append([info[0], "source"])
else:
stream_info_list.append(list(info))
return stream_info_list
|
login and update cached cookies
|
def _login(self, username, password):
'''login and update cached cookies'''
self.logger.debug('login ...')
res = self.session.http.get(self.login_url)
input_list = self._input_re.findall(res.text)
if not input_list:
raise PluginError('Missing input data on login website.')
data = {}
for _input_data in input_list:
try:
_input_name = self._name_re.search(_input_data).group(1)
except AttributeError:
continue
try:
_input_value = self._value_re.search(_input_data).group(1)
except AttributeError:
_input_value = ''
data[_input_name] = _input_value
login_data = {
'ctl00$Login1$UserName': username,
'ctl00$Login1$Password': password,
'ctl00$Login1$LoginButton.x': '0',
'ctl00$Login1$LoginButton.y': '0'
}
data.update(login_data)
res = self.session.http.post(self.login_url, data=data)
for cookie in self.session.http.cookies:
self._session_attributes.set(cookie.name, cookie.value, expires=3600 * 24)
if self._session_attributes.get('ASP.NET_SessionId') and self._session_attributes.get('.abportail1'):
self.logger.debug('New session data')
self.set_expires_time_cache()
return True
else:
self.logger.error('Failed to login, check your username/password')
return False
|
Creates a key - function mapping.
|
def map(self, key, func, *args, **kwargs):
"""Creates a key-function mapping.
The return value from the function should be either
- A tuple containing a name and stream
- A iterator of tuples containing a name and stream
Any extra arguments will be passed to the function.
"""
self._map.append((key, partial(func, *args, **kwargs)))
|
Takes ISO 8601 format ( string ) and converts into a utc datetime ( naive )
|
def parse_timestamp(ts):
"""Takes ISO 8601 format(string) and converts into a utc datetime(naive)"""
return (
datetime.datetime.strptime(ts[:-7], "%Y-%m-%dT%H:%M:%S") +
datetime.timedelta(hours=int(ts[-5:-3]), minutes=int(ts[-2:])) *
int(ts[-6:-5] + "1")
)
|
Makes a call against the api.
|
def _api_call(self, entrypoint, params=None, schema=None):
"""Makes a call against the api.
:param entrypoint: API method to call.
:param params: parameters to include in the request data.
:param schema: schema to use to validate the data
"""
url = self._api_url.format(entrypoint)
# Default params
params = params or {}
if self.session_id:
params.update({
"session_id": self.session_id
})
else:
params.update({
"device_id": self.device_id,
"device_type": self._access_type,
"access_token": self._access_token,
"version": self._version_code
})
params.update({
"locale": self.locale.replace('_', ''),
})
if self.session_id:
params["session_id"] = self.session_id
# The certificate used by Crunchyroll cannot be verified in some environments.
res = self.session.http.post(url, data=params, headers=self.headers, verify=False)
json_res = self.session.http.json(res, schema=_api_schema)
if json_res["error"]:
err_msg = json_res.get("message", "Unknown error")
err_code = json_res.get("code", "unknown_error")
raise CrunchyrollAPIError(err_msg, err_code)
data = json_res.get("data")
if schema:
data = schema.validate(data, name="API response")
return data
|
Starts a session against Crunchyroll s server. Is recommended that you call this method before making any other calls to make sure you have a valid session against the server.
|
def start_session(self):
"""
Starts a session against Crunchyroll's server.
Is recommended that you call this method before making any other calls
to make sure you have a valid session against the server.
"""
params = {}
if self.auth:
params["auth"] = self.auth
self.session_id = self._api_call("start_session", params, schema=_session_schema)
log.debug("Session created with ID: {0}".format(self.session_id))
return self.session_id
|
Authenticates the session to be able to access restricted data from the server ( e. g. premium restricted videos ).
|
def login(self, username, password):
"""
Authenticates the session to be able to access restricted data from
the server (e.g. premium restricted videos).
"""
params = {
"account": username,
"password": password
}
login = self._api_call("login", params, schema=_login_schema)
self.auth = login["auth"]
self.cache.set("auth", login["auth"], expires_at=login["expires"])
return login
|
Returns the data for a certain media item.
|
def get_info(self, media_id, fields=None, schema=None):
"""
Returns the data for a certain media item.
:param media_id: id that identifies the media item to be accessed.
:param fields: list of the media"s field to be returned. By default the
API returns some fields, but others are not returned unless they are
explicity asked for. I have no real documentation on the fields, but
they all seem to start with the "media." prefix (e.g. media.name,
media.stream_data).
:param schema: validation schema to use
"""
params = {"media_id": media_id}
if fields:
params["fields"] = ",".join(fields)
return self._api_call("info", params, schema=schema)
|
Creates a new CrunchyrollAPI object initiates it s session and tries to authenticate it either by using saved credentials or the user s username and password.
|
def _create_api(self):
"""Creates a new CrunchyrollAPI object, initiates it's session and
tries to authenticate it either by using saved credentials or the
user's username and password.
"""
if self.options.get("purge_credentials"):
self.cache.set("session_id", None, 0)
self.cache.set("auth", None, 0)
self.cache.set("session_id", None, 0)
# use the crunchyroll locale as an override, for backwards compatibility
locale = self.get_option("locale") or self.session.localization.language_code
api = CrunchyrollAPI(self.cache,
self.session,
session_id=self.get_option("session_id"),
locale=locale)
if not self.get_option("session_id"):
self.logger.debug("Creating session with locale: {0}", locale)
api.start_session()
if api.auth:
self.logger.debug("Using saved credentials")
login = api.authenticate()
self.logger.info("Successfully logged in as '{0}'",
login["user"]["username"] or login["user"]["email"])
elif self.options.get("username"):
try:
self.logger.debug("Attempting to login using username and password")
api.login(self.options.get("username"),
self.options.get("password"))
login = api.authenticate()
self.logger.info("Logged in as '{0}'",
login["user"]["username"] or login["user"]["email"])
except CrunchyrollAPIError as err:
raise PluginError(u"Authentication error: {0}".format(err.msg))
else:
self.logger.warning(
"No authentication provided, you won't be able to access "
"premium restricted content"
)
return api
|
Compress a byte string.
|
def compress(string, mode=MODE_GENERIC, quality=11, lgwin=22, lgblock=0):
"""Compress a byte string.
Args:
string (bytes): The input data.
mode (int, optional): The compression mode can be MODE_GENERIC (default),
MODE_TEXT (for UTF-8 format text input) or MODE_FONT (for WOFF 2.0).
quality (int, optional): Controls the compression-speed vs compression-
density tradeoff. The higher the quality, the slower the compression.
Range is 0 to 11. Defaults to 11.
lgwin (int, optional): Base 2 logarithm of the sliding window size. Range
is 10 to 24. Defaults to 22.
lgblock (int, optional): Base 2 logarithm of the maximum input block size.
Range is 16 to 24. If set to 0, the value will be set based on the
quality. Defaults to 0.
Returns:
The compressed byte string.
Raises:
brotli.error: If arguments are invalid, or compressor fails.
"""
compressor = Compressor(mode=mode, quality=quality, lgwin=lgwin,
lgblock=lgblock)
return compressor.process(string) + compressor.finish()
|
Return the specified standard input output or errors stream as a raw buffer object suitable for reading/ writing binary data from/ to it.
|
def get_binary_stdio(stream):
""" Return the specified standard input, output or errors stream as a
'raw' buffer object suitable for reading/writing binary data from/to it.
"""
assert stream in ['stdin', 'stdout', 'stderr'], 'invalid stream name'
stdio = getattr(sys, stream)
if sys.version_info[0] < 3:
if sys.platform == 'win32':
# set I/O stream binary flag on python2.x (Windows)
runtime = platform.python_implementation()
if runtime == 'PyPy':
# the msvcrt trick doesn't work in pypy, so I use fdopen
mode = 'rb' if stream == 'stdin' else 'wb'
stdio = os.fdopen(stdio.fileno(), mode, 0)
else:
# this works with CPython -- untested on other implementations
import msvcrt
msvcrt.setmode(stdio.fileno(), os.O_BINARY)
return stdio
else:
# get 'buffer' attribute to read/write binary data on python3.x
if hasattr(stdio, 'buffer'):
return stdio.buffer
else:
orig_stdio = getattr(sys, '__%s__' % stream)
return orig_stdio.buffer
|
Show character in readable format
|
def outputCharFormatter(c):
"""Show character in readable format
"""
#TODO 2: allow hex only output
if 32<c<127: return chr(c)
elif c==10: return '\\n'
elif c==13: return '\\r'
elif c==32: return '" "'
else: return '\\x{:02x}'.format(c)
|
Show string or char.
|
def outputFormatter(s):
"""Show string or char.
"""
result = ''
def formatSubString(s):
for c in s:
if c==32: yield ' '
else: yield outputCharFormatter(c)
if len(result)<200: return ''.join(formatSubString(s))
else:
return ''.join(formatSubString(s[:100]))+'...'+ \
''.join(formatSubString(s[-100:]))
|
Read n bits from the stream and return as an integer. Produces zero bits beyond the stream. >>> olleke. data [ 0 ] == 27 True >>> olleke. read ( 5 ) 27
|
def read(self, n):
"""Read n bits from the stream and return as an integer.
Produces zero bits beyond the stream.
>>> olleke.data[0]==27
True
>>> olleke.read(5)
27
>>> olleke
BitStream(pos=0:5)
"""
value = self.peek(n)
self.pos += n
if self.pos>len(self.data)*8:
raise ValueError('Read past end of stream')
return value
|
Peek an n bit integer from the stream without updating the pointer. It is not an error to read beyond the end of the stream. >>> olleke. data [: 2 ] == b \ x1b \ x2e and 0x2e1b == 11803 True >>> olleke. peek ( 15 ) 11803 >>> hex ( olleke. peek ( 32 )) 0x2e1b
|
def peek(self, n):
"""Peek an n bit integer from the stream without updating the pointer.
It is not an error to read beyond the end of the stream.
>>> olleke.data[:2]==b'\x1b\x2e' and 0x2e1b==11803
True
>>> olleke.peek(15)
11803
>>> hex(olleke.peek(32))
'0x2e1b'
"""
#read bytes that contain the data: self.data[self.pos>>3:self.pos+n+7>>3]
#convert to int: int.from_bytes(..., 'little')
#shift out the bits from the first byte: >>(self.pos&7)
#mask unwanted bits: & (1<<n)-1
return int.from_bytes(
self.data[self.pos>>3:self.pos+n+7>>3],
'little')>>(self.pos&7) & (1<<n)-1
|
Read n bytes from the stream on a byte boundary.
|
def readBytes(self, n):
"""Read n bytes from the stream on a byte boundary.
"""
if self.pos&7: raise ValueError('readBytes: need byte boundary')
result = self.data[self.pos>>3:(self.pos>>3)+n]
self.pos += 8*n
return result
|
The value used for processing. Can be a tuple. with optional extra bits
|
def value(self, extra=None):
"""The value used for processing. Can be a tuple.
with optional extra bits
"""
if isinstance(self.code, WithExtra):
if not 0<=extra<1<<self.extraBits():
raise ValueError("value: extra value doesn't fit in extraBits")
return self.code.value(self.index, extra)
if extra is not None:
raise ValueError('value: no extra bits for this code')
return self.code.value(self.index)
|
Long explanation of the value from the numeric value with optional extra bits Used by Layout. verboseRead when printing the value
|
def explanation(self, extra=None):
"""Long explanation of the value from the numeric value
with optional extra bits
Used by Layout.verboseRead when printing the value
"""
if isinstance(self.code, WithExtra):
return self.code.callback(self, extra)
return self.code.callback(self)
|
Find which symbol index matches the given data ( from peek as a number ) and return the number of bits decoded. Can also be used to figure out length of a symbol.
|
def decodePeek(self, data):
"""Find which symbol index matches the given data (from peek, as a number)
and return the number of bits decoded.
Can also be used to figure out length of a symbol.
"""
return self.maxLength, Symbol(self, data&(1<<self.maxLength)-1)
|
Find which symbol index matches the given data ( from peek as a number ) and return the number of bits decoded. Can also be used to figure out length of a symbol.
|
def decodePeek(self, data):
"""Find which symbol index matches the given data (from peek, as a number)
and return the number of bits decoded.
Can also be used to figure out length of a symbol.
"""
#do binary search for word length
#invariant: lo<=length<=hi
lo, hi = self.minLength, self.maxLength
while lo<=hi:
mid = lo+hi>>1
#note lo<=mid<hi at this point
mask = (1<<mid)-1
#lets see what happens if we guess length is mid
try: index = self.decodeTable[data&mask]
except KeyError:
#too many bits specified, reduce estimated length
hi = mid-1
continue
#we found a symbol, but there could be a longer match
symbolLength = self.lengthTable[index]
if symbolLength<=mid:
#all bits match, symbol must be right
return symbolLength, Symbol(self, index)
#there must be more bits to match
lo = mid+1
return lo, Symbol(self, index)
|
Store decodeTable and compute lengthTable minLength maxLength from encodings.
|
def setDecode(self, decodeTable):
"""Store decodeTable,
and compute lengthTable, minLength, maxLength from encodings.
"""
self.decodeTable = decodeTable
#set of symbols with unknown length
todo = set(decodeTable)
#bit size under investigation
maskLength = 0
lengthTable = {}
while todo:
mask = (1<<maskLength)-1
#split the encodings that we didn't find yet using b bits
splitSymbols = defaultdict(list)
for s in todo: splitSymbols[s&mask].append(s)
#unique encodings have a length of maskLength bits
#set length, and remove from todo list
for s,subset in splitSymbols.items():
if len(subset)==1:
lengthTable[self.decodeTable[s]] = maskLength
todo.remove(s)
#now investigate with longer mask
maskLength +=1
#save result
self.lengthTable = lengthTable
self.minLength = min(lengthTable.values())
self.maxLength = max(lengthTable.values())
self.switchToPrefix()
|
Given the bit pattern lengths for symbols given in lengthTable set decodeTable minLength maxLength
|
def setLength(self, lengthTable):
"""Given the bit pattern lengths for symbols given in lengthTable,
set decodeTable, minLength, maxLength
"""
self.lengthTable = lengthTable
self.minLength = min(lengthTable.values())
self.maxLength = max(lengthTable.values())
#compute the backwards codes first; then reverse them
#compute (backwards) first code for every separate lengths
nextCodes = []
#build codes for each length, from right to left
code = 0
for bits in range(self.maxLength+1):
code <<= 1
nextCodes.append(code)
code += sum(x==bits for x in lengthTable.values())
self.decodeTable = {}
#count codes for each length, and store reversed in the table
for symbol in sorted(lengthTable):
bits = lengthTable[symbol]
bitpattern = '{:0{}b}'.format(nextCodes[bits], bits)
self.decodeTable[int(bitpattern[::-1], 2)] = symbol
nextCodes[bits] += 1
self.switchToPrefix()
|
Long explanation of the value from the numeric value This is a default routine. You can customize in three ways: - set description to add some text - override to get more control - set callback to make it dependent on you local variables
|
def explanation(self, index):
"""Long explanation of the value from the numeric value
This is a default routine.
You can customize in three ways:
- set description to add some text
- override to get more control
- set callback to make it dependent on you local variables
"""
value = self.value(index)
return '{0}{1}: {2}'.format(
self.description and self.description+': ',
self.bitPattern(index),
value,
)
|
Show all words of the code in a nice format.
|
def showCode(self, width=80):
"""Show all words of the code in a nice format.
"""
#make table of all symbols with binary strings
symbolStrings = [
(self.bitPattern(s.index), self.mnemonic(s.index))
for s in self
]
#determine column widths the way Lisp programmers do it
leftColWidth, rightColWidth = map(max, map(
map,
repeat(len),
zip(*symbolStrings)
))
colwidth = leftColWidth+rightColWidth
columns = 81//(colwidth+2)
rows = -(-len(symbolStrings)//columns)
def justify(bs):
b,s = bs
return b.rjust(leftColWidth)+':'+s.ljust(rightColWidth)
for i in range(rows):
print(' '.join(map(justify, symbolStrings[i::rows])).rstrip())
|
Read symbol from stream. Returns symbol length.
|
def readTuple(self, stream):
"""Read symbol from stream. Returns symbol, length.
"""
length, symbol = self.decodePeek(stream.peek(self.maxLength))
stream.pos += length
return length, symbol
|
Read symbol and extrabits from stream. Returns symbol length symbol extraBits extra >>> olleke. pos = 6 >>> MetablockLengthAlphabet (). readTupleAndExtra ( olleke ) ( 2 Symbol ( MLEN 4 ) 16 46 )
|
def readTupleAndExtra(self, stream):
"""Read symbol and extrabits from stream.
Returns symbol length, symbol, extraBits, extra
>>> olleke.pos = 6
>>> MetablockLengthAlphabet().readTupleAndExtra(olleke)
(2, Symbol(MLEN, 4), 16, 46)
"""
length, symbol = self.decodePeek(stream.peek(self.maxLength))
stream.pos += length
extraBits = self.extraBits(symbol.index)
return length, symbol, extraBits, stream.read(extraBits)
|
Expanded version of Code. explanation supporting extra bits. If you don t supply extra it is not mentioned.
|
def explanation(self, index, extra=None):
"""Expanded version of Code.explanation supporting extra bits.
If you don't supply extra, it is not mentioned.
"""
extraBits = 0 if extra is None else self.extraBits(index)
if not hasattr(self, 'extraTable'):
formatString = '{0}{3}'
lo = hi = value = self.value(index, extra)
elif extraBits==0:
formatString = '{0}{2}: {3}'
lo, hi = self.span(index)
value = lo
else:
formatString = '{0}{1} {2}: {3}-{4}; {3}+{5}={6}'
lo, hi = self.span(index)
value = lo+extra
return formatString.format(
self.description and self.description+': ',
'x'*extraBits,
self.bitPattern(index),
lo, hi,
extra,
value,
)
|
Override if you don t define value0 and extraTable
|
def value(self, index, extra):
"""Override if you don't define value0 and extraTable
"""
lower, upper = self.span(index)
value = lower+(extra or 0)
if value>upper:
raise ValueError('value: extra out of range')
return value
|
Give the range of possible values in a tuple Useful for mnemonic and explanation
|
def span(self, index):
"""Give the range of possible values in a tuple
Useful for mnemonic and explanation
"""
lower = self.value0+sum(1<<x for x in self.extraTable[:index])
upper = lower+(1<<self.extraTable[index])
return lower, upper-1
|
Returns ( Simple #codewords ) or ( Complex HSKIP )
|
def value(self, index, extra):
"""Returns ('Simple', #codewords) or ('Complex', HSKIP)
"""
if index==1:
if extra>3:
raise ValueError('value: extra out of range')
return 'Simple', extra+1
if extra:
raise ValueError('value: extra out of range')
return 'Complex', index
|
Give count and value.
|
def value(self, index, extra):
"""Give count and value."""
index = index
if index==0: return 1, 0
if index<=self.RLEMAX: return (1<<index)+extra, 0
return 1, index-self.RLEMAX
|
Give relevant values for computations: ( insertSymbol copySymbol dist0flag )
|
def splitSymbol(self, index):
"""Give relevant values for computations:
(insertSymbol, copySymbol, dist0flag)
"""
#determine insert and copy upper bits from table
row = [0,0,1,1,2,2,1,3,2,3,3][index>>6]
col = [0,1,0,1,0,1,2,0,2,1,2][index>>6]
#determine inserts and copy sub codes
insertLengthCode = row<<3 | index>>3&7
if row: insertLengthCode -= 8
copyLengthCode = col<<3 | index&7
return (
Symbol(self.insertLengthAlphabet, insertLengthCode),
Symbol(self.copyLengthAlphabet, copyLengthCode),
row==0
)
|
Make a nice mnemonic
|
def mnemonic(self, index):
"""Make a nice mnemonic
"""
i,c,d0 = self.splitSymbol(index)
iLower, _ = i.code.span(i.index)
iExtra = i.extraBits()
cLower, _ = c.code.span(c.index)
cExtra = c.extraBits()
return 'I{}{}{}C{}{}{}{}'.format(
iLower,
'+' if iExtra else '',
'x'*iExtra if iExtra<6 else '[{}*x]'.format(iExtra),
cLower,
'+' if cExtra else '',
'x'*cExtra if cExtra<6 else '[{}*x]'.format(cExtra),
'&D=0' if d0 else '')
|
Indicate how many extra bits are needed to interpret symbol >>> d = DistanceAlphabet ( D NPOSTFIX = 2 NDIRECT = 10 ) >>> [ d [ i ]. extraBits () for i in range ( 26 ) ] [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ] >>> [ d [ i ]. extraBits () for i in range ( 26 36 ) ] [ 1 1 1 1 1 1 1 1 2 2 ]
|
def extraBits(self, index):
"""Indicate how many extra bits are needed to interpret symbol
>>> d = DistanceAlphabet('D', NPOSTFIX=2, NDIRECT=10)
>>> [d[i].extraBits() for i in range(26)]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> [d[i].extraBits() for i in range(26,36)]
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2]
"""
if index<16+self.NDIRECT: return 0
return 1 + ((index - self.NDIRECT - 16) >> (self.NPOSTFIX + 1))
|
Decode value of symbol together with the extra bits. >>> d = DistanceAlphabet ( D NPOSTFIX = 2 NDIRECT = 10 ) >>> d [ 34 ]. value ( 2 ) ( 0 35 )
|
def value(self, dcode, dextra):
"""Decode value of symbol together with the extra bits.
>>> d = DistanceAlphabet('D', NPOSTFIX=2, NDIRECT=10)
>>> d[34].value(2)
(0, 35)
"""
if dcode<16:
return [(1,0),(2,0),(3,0),(4,0),
(1,-1),(1,+1),(1,-2),(1,+2),(1,-3),(1,+3),
(2,-1),(2,+1),(2,-2),(2,+2),(2,-3),(2,+3)
][dcode]
if dcode<16+self.NDIRECT:
return (0,dcode-16)
#we use the original formulas, instead of my clear explanation
POSTFIX_MASK = (1 << self.NPOSTFIX) - 1
ndistbits = 1 + ((dcode - self.NDIRECT - 16) >> (self.NPOSTFIX + 1))
hcode = (dcode - self.NDIRECT - 16) >> self.NPOSTFIX
lcode = (dcode - self.NDIRECT - 16) & POSTFIX_MASK
offset = ((2 + (hcode & 1)) << ndistbits) - 4
distance = ((offset + dextra) << self.NPOSTFIX) + lcode + self.NDIRECT + 1
return (0,distance)
|
Give mnemonic representation of meaning. verbose compresses strings of x s
|
def mnemonic(self, index, verbose=False):
"""Give mnemonic representation of meaning.
verbose compresses strings of x's
"""
if index<16:
return ['last', '2last', '3last', '4last',
'last-1', 'last+1', 'last-2', 'last+2', 'last-3', 'last+3',
'2last-1', '2last+1', '2last-2', '2last+2', '2last-3', '2last+3'
][index]
if index<16+self.NDIRECT:
return str(index-16)
#construct strings like "1xx01-15"
index -= self.NDIRECT+16
hcode = index >> self.NPOSTFIX
lcode = index & (1<<self.NPOSTFIX)-1
if self.NPOSTFIX: formatString = '1{0}{1}{2:0{3}b}{4:+d}'
else: formatString = '1{0}{1}{4:+d}'
return formatString.format(
hcode&1,
'x'*(2+hcode>>1) if hcode<13 or verbose else '[{}*x]'.format(2+hcode>>1),
lcode, self.NPOSTFIX,
self.NDIRECT+1-(4<<self.NPOSTFIX))
|
>>> d = DistanceAlphabet ( D NPOSTFIX = 2 NDIRECT = 10 ) >>> d [ 55 ]. explanation ( 13 ) 11 [ 1101 ] 01 - 5: [ 0 ] + 240
|
def explanation(self, index, extra):
"""
>>> d = DistanceAlphabet('D', NPOSTFIX=2, NDIRECT=10)
>>> d[55].explanation(13)
'11[1101]01-5: [0]+240'
"""
extraBits = self.extraBits(index)
extraString = '[{:0{}b}]'.format(extra, extraBits)
return '{0}: [{1[0]}]{1[1]:+d}'.format(
self.mnemonic(index, True).replace('x'*(extraBits or 1), extraString),
self.value(index, extra))
|
Get word
|
def word(self, size, dist):
"""Get word
"""
#split dist in index and action
ndbits = self.NDBITS[size]
index = dist&(1<<ndbits)-1
action = dist>>ndbits
#compute position in file
position = sum(n<<self.NDBITS[n] for n in range(4,size))+size*index
self.file.seek(position)
return self.doAction(self.file.read(size), action)
|
Build the action table from the text above
|
def compileActions(self):
"""Build the action table from the text above
"""
import re
self.actionList = actions = [None]*121
#Action 73, which is too long, looks like this when expanded:
actions[73] = "b' the '+w+b' of the '"
#find out what the columns are
actionLines = self.actionTable.splitlines()
colonPositions = [m.start()
for m in re.finditer(':',actionLines[1])
]+[100]
columns = [(colonPositions[i]-3,colonPositions[i+1]-3)
for i in range(len(colonPositions)-1)]
for line in self.actionTable.splitlines(keepends=False):
for start,end in columns:
action = line[start:end]
#skip empty actions
if not action or action.isspace(): continue
#chop it up, and check if the colon is properly placed
index, colon, action = action[:3], action[3], action[4:]
assert colon==':'
#remove filler spaces at right
action = action.rstrip()
#replace space symbols
action = action.replace('_', ' ')
wPos = action.index('w')
#add quotes around left string when present
#translation: any pattern from beginning, up to
#(but not including) a + following by a w later on
action = re.sub(r"^(.*)(?=\+[U(]*w)", r"b'\1'", action)
#add quotes around right string when present
#translation: anything with a w in it, followed by a +
#and a pattern up to the end
#(there is no variable lookbehind assertion,
#so we have to copy the pattern)
action = re.sub(r"(w[[:\-1\]).U]*)\+(.*)$", r"\1+b'\2'", action)
#expand shortcut for uppercaseAll
action = action.replace(".U", ".upper()")
#store action
actions[int(index)] = action
|
Perform the proper action
|
def doAction(self, w, action):
"""Perform the proper action
"""
#set environment for the UpperCaseFirst
U = self.upperCase1
return eval(self.actionList[action], locals())
|
Produce hex dump of all data containing the bits from pos to stream. pos
|
def makeHexData(self, pos):
"""Produce hex dump of all data containing the bits
from pos to stream.pos
"""
firstAddress = pos+7>>3
lastAddress = self.stream.pos+7>>3
return ''.join(map('{:02x} '.format,
self.stream.data[firstAddress:lastAddress]))
|
Show formatted bit data: Bytes are separated by commas whole bytes are displayed in hex >>> Layout ( olleke ). formatBitData ( 6 2 16 ) |00h|2Eh |00 >>> Layout ( olleke ). formatBitData ( 4 1 0 ) 1
|
def formatBitData(self, pos, width1, width2=0):
"""Show formatted bit data:
Bytes are separated by commas
whole bytes are displayed in hex
>>> Layout(olleke).formatBitData(6, 2, 16)
'|00h|2Eh,|00'
>>> Layout(olleke).formatBitData(4, 1, 0)
'1'
"""
result = []
#make empty prefix code explicit
if width1==0: result = ['()', ',']
for width in width1, width2:
#skip empty width2
if width==0: continue
#build result backwards in a list
while width>0:
availableBits = 8-(pos&7)
if width<availableBits:
#read partial byte, beginning nor ending at boundary
data = self.stream.data[pos>>3] >> (pos&7) & (1<<width)-1
result.append('{:0{}b}'.format(data, width))
elif availableBits<8:
#read rest of byte, ending at boundary
data = self.stream.data[pos>>3] >> (pos&7)
result.append('|{:0{}b}'.format(data, availableBits))
else:
#read whole byte (in hex), beginning and ending at boundary
data = self.stream.data[pos>>3]
result.append('|{:02X}h'.format(data))
width -= availableBits
pos += availableBits
#if width overshot from the availableBits subtraction, fix it
pos += width
#add comma to separate fields
result.append(',')
#concatenate pieces, reversed, skipping the last space
return ''.join(result[-2::-1])
|
give alphabet the prefix code that is read from the stream Called for the following alphabets in this order: The alphabet in question must have a logical order otherwise the assignment of symbols doesn t work.
|
def readPrefixCode(self, alphabet):
"""give alphabet the prefix code that is read from the stream
Called for the following alphabets, in this order:
The alphabet in question must have a "logical" order,
otherwise the assignment of symbols doesn't work.
"""
mode, numberOfSymbols = self.verboseRead(PrefixCodeHeader(alphabet.name))
if mode=='Complex':
#for a complex code, numberOfSymbols means hskip
self.readComplexCode(numberOfSymbols, alphabet)
return alphabet
else:
table = []
#Set table of lengths for mnemonic function
lengths = [[0], [1,1], [1,2,2], '????'][numberOfSymbols-1]
#adjust mnemonic function of alphabet class
def myMnemonic(index):
return '{} bit{}: {}'.format(
lengths[i],
'' if lengths[i]==1 else 's',
alphabet.__class__.mnemonic(alphabet, index)
)
alphabet.mnemonic = myMnemonic
for i in range(numberOfSymbols):
table.append(self.verboseRead(alphabet, skipExtra=True).index)
#restore mnemonic
del alphabet.mnemonic
if numberOfSymbols==4:
#read tree shape to redefine lengths
lengths = self.verboseRead(TreeShapeAlhabet())
#construct the alphabet prefix code
alphabet.setLength(dict(zip(table, lengths)))
return alphabet
|
Read complex code
|
def readComplexCode(self, hskip, alphabet):
"""Read complex code"""
stream = self.stream
#read the lengths for the length code
lengths = [1,2,3,4,0,5,17,6,16,7,8,9,10,11,12,13,14,15][hskip:]
codeLengths = {}
total = 0
lol = LengthOfLengthAlphabet('##'+alphabet.name)
#lengthCode will be used for coding the lengths of the new code
#we use it for display until now; definition comes below
lengthCode = LengthAlphabet('#'+alphabet.name)
lengthIter = iter(lengths)
lengthsLeft = len(lengths)
while total<32 and lengthsLeft>0:
lengthsLeft -= 1
newSymbol = next(lengthIter)
lol.description = str(lengthCode[newSymbol])
length = self.verboseRead(lol)
if length:
codeLengths[newSymbol] = length
total += 32>>length
if total>32: raise ValueError("Stream format")
if len(codeLengths)==1: codeLengths[list(codeLengths.keys())[0]] = 0
#Now set the encoding of the lengthCode
lengthCode.setLength(codeLengths)
print("***** Lengths for {} will be coded as:".format(alphabet.name))
lengthCode.showCode()
#Now determine the symbol lengths with the lengthCode
symbolLengths = {}
total = 0
lastLength = 8
alphabetIter = iter(alphabet)
while total<32768:
#look ahead to see what is going to happen
length = lengthCode.decodePeek(
self.stream.peek(lengthCode.maxLength))[1].index
#in every branch, set lengthCode.description to explanatory text
#lengthCode calls format(symbol, extra) with this string
if length==0:
symbol = next(alphabetIter)
lengthCode.description = 'symbol {} unused'.format(symbol)
self.verboseRead(lengthCode)
#unused symbol
continue
if length==16:
lengthCode.description = \
'{1}+3 symbols of length '+str(lastLength)
extra = self.verboseRead(lengthCode)
#scan series of 16s (repeat counts)
#start with repeat count 2
repeat = 2
startSymbol = next(alphabetIter)
endSymbol = next(alphabetIter)
symbolLengths[startSymbol.index] = \
symbolLengths[endSymbol.index] = lastLength
#count the two just defined symbols
total += 2*32768>>lastLength
#note: loop may end because we're there
#even if a 16 _appears_ to follow
while True:
#determine last symbol
oldRepeat = repeat
repeat = (repeat-2<<2)+extra+3
#read as many symbols as repeat increased
for i in range(oldRepeat, repeat):
endSymbol = next(alphabetIter)
symbolLengths[endSymbol.index] = lastLength
#compute new total; it may be end of loop
total += (repeat-oldRepeat)*32768>>lastLength
if total>=32768: break
#see if there is more to do
length = lengthCode.decodePeek(
self.stream.peek(lengthCode.maxLength))[1].index
if length!=16: break
lengthCode.description = 'total {}+{{1}} symbols'.format(
(repeat-2<<2)+3)
extra = self.verboseRead(lengthCode)
elif length==17:
#read, and show explanation
lengthCode.description = '{1}+3 unused'
extra = self.verboseRead(lengthCode)
#scan series of 17s (groups of zero counts)
#start with repeat count 2
repeat = 2
startSymbol = next(alphabetIter)
endSymbol = next(alphabetIter)
#note: loop will not end with total==32768,
#since total doesn't change here
while True:
#determine last symbol
oldRepeat = repeat
repeat = (repeat-2<<3)+extra+3
#read as many symbols as repeat increases
for i in range(repeat-oldRepeat):
endSymbol = next(alphabetIter)
#see if there is more to do
length = lengthCode.decodePeek(
self.stream.peek(lengthCode.maxLength))[1].index
if length!=17: break
lengthCode.description = 'total {}+{{1}} unused'.format(
(repeat-2<<3)+3)
extra = self.verboseRead(lengthCode)
else:
symbol = next(alphabetIter)
#double braces for format
char = str(symbol)
if char in '{}': char *= 2
lengthCode.description = \
'Length for {} is {{0.index}} bits'.format(char)
#output is not needed (will be 0)
self.verboseRead(lengthCode)
symbolLengths[symbol.index] = length
total += 32768>>length
lastLength = length
assert total==32768
alphabet.setLength(symbolLengths)
print('End of table. Prefix code '+alphabet.name+':')
alphabet.showCode()
|
Process a brotli stream.
|
def processStream(self):
"""Process a brotli stream.
"""
print('addr hex{:{}s}binary context explanation'.format(
'', self.width-10))
print('Stream header'.center(60, '-'))
self.windowSize = self.verboseRead(WindowSizeAlphabet())
print('Metablock header'.center(60, '='))
self.ISLAST = False
self.output = bytearray()
while not self.ISLAST:
self.ISLAST = self.verboseRead(
BoolCode('LAST', description="Last block"))
if self.ISLAST:
if self.verboseRead(
BoolCode('EMPTY', description="Empty block")): break
if self.metablockLength(): continue
if not self.ISLAST and self.uncompressed(): continue
print('Block type descriptors'.center(60, '-'))
self.numberOfBlockTypes = {}
self.currentBlockCounts = {}
self.blockTypeCodes = {}
self.blockCountCodes = {}
for blockType in (L,I,D): self.blockType(blockType)
print('Distance code parameters'.center(60, '-'))
self.NPOSTFIX, self.NDIRECT = self.verboseRead(DistanceParamAlphabet())
self.readLiteralContextModes()
print('Context maps'.center(60, '-'))
self.cmaps = {}
#keep the number of each kind of prefix tree for the last loop
numberOfTrees = {I: self.numberOfBlockTypes[I]}
for blockType in (L,D):
numberOfTrees[blockType] = self.contextMap(blockType)
print('Prefix code lists'.center(60, '-'))
self.prefixCodes = {}
for blockType in (L,I,D):
self.readPrefixArray(blockType, numberOfTrees[blockType])
self.metablock()
|
Read symbol and extra from stream and explain what happens. Returns the value of the symbol >>> olleke. pos = 0 >>> l = Layout ( olleke ) >>> l. verboseRead ( WindowSizeAlphabet () ) 0000 1b 1011 WSIZE windowsize = ( 1<<22 ) - 16 = 4194288 4194288
|
def verboseRead(self, alphabet, context='', skipExtra=False):
"""Read symbol and extra from stream and explain what happens.
Returns the value of the symbol
>>> olleke.pos = 0
>>> l = Layout(olleke)
>>> l.verboseRead(WindowSizeAlphabet())
0000 1b 1011 WSIZE windowsize=(1<<22)-16=4194288
4194288
"""
#TODO 2: verbosity level, e.g. show only codes and maps in header
stream = self.stream
pos = stream.pos
if skipExtra:
length, symbol = alphabet.readTuple(stream)
extraBits, extra = 0, None
else:
length, symbol, extraBits, extra = alphabet.readTupleAndExtra(
stream)
#fields: address, hex data, binary data, name of alphabet, explanation
hexdata = self.makeHexData(pos)
addressField = '{:04x}'.format(pos+7>>3) if hexdata else ''
bitdata = self.formatBitData(pos, length, extraBits)
#bitPtr moves bitdata so that the bytes are easier to read
#jump back to right if a new byte starts
if '|' in bitdata[1:]:
#start over on the right side
self.bitPtr = self.width
fillWidth = self.bitPtr-(len(hexdata)+len(bitdata))
if fillWidth<0: fillWidth = 0
print('{:<5s} {:<{}s} {:7s} {}'.format(
addressField,
hexdata+' '*fillWidth+bitdata, self.width,
context+alphabet.name,
symbol if skipExtra else symbol.explanation(extra),
))
#jump to the right if we started with a '|'
#because we didn't jump before printing
if bitdata.startswith('|'): self.bitPtr = self.width
else: self.bitPtr -= len(bitdata)
return symbol if skipExtra else symbol.value(extra)
|
Read MNIBBLES and meta block length ; if empty block skip block and return true.
|
def metablockLength(self):
"""Read MNIBBLES and meta block length;
if empty block, skip block and return true.
"""
self.MLEN = self.verboseRead(MetablockLengthAlphabet())
if self.MLEN:
return False
#empty block; skip and return False
self.verboseRead(ReservedAlphabet())
MSKIP = self.verboseRead(SkipLengthAlphabet())
self.verboseRead(FillerAlphabet(streamPos=self.stream.pos))
self.stream.pos += 8*MSKIP
print("Skipping to {:x}".format(self.stream.pos>>3))
return True
|
If true handle uncompressed data
|
def uncompressed(self):
"""If true, handle uncompressed data
"""
ISUNCOMPRESSED = self.verboseRead(
BoolCode('UNCMPR', description='Is uncompressed?'))
if ISUNCOMPRESSED:
self.verboseRead(FillerAlphabet(streamPos=self.stream.pos))
print('Uncompressed data:')
self.output += self.stream.readBytes(self.MLEN)
print(outputFormatter(self.output[-self.MLEN:]))
return ISUNCOMPRESSED
|
Read block type switch descriptor for given kind of blockType.
|
def blockType(self, kind):
"""Read block type switch descriptor for given kind of blockType."""
NBLTYPES = self.verboseRead(TypeCountAlphabet(
'BT#'+kind[0].upper(),
description='{} block types'.format(kind),
))
self.numberOfBlockTypes[kind] = NBLTYPES
if NBLTYPES>=2:
self.blockTypeCodes[kind] = self.readPrefixCode(
BlockTypeAlphabet('BT'+kind[0].upper(), NBLTYPES))
self.blockCountCodes[kind] = self.readPrefixCode(
BlockCountAlphabet('BC'+kind[0].upper()))
blockCount = self.verboseRead(self.blockCountCodes[kind])
else:
blockCount = 1<<24
self.currentBlockCounts[kind] = blockCount
|
Read literal context modes. LSB6: lower 6 bits of last char MSB6: upper 6 bits of last char UTF8: rougly dependent on categories: upper 4 bits depend on category of last char: control/ whitespace/ space/ punctuation/ quote/ %/ open/ close/ comma/ period/ =/ digits/ VOWEL/ CONSONANT/ vowel/ consonant lower 2 bits depend on category of 2nd last char: space/ punctuation/ digit or upper/ lowercase signed: hamming weight of last 2 chars
|
def readLiteralContextModes(self):
"""Read literal context modes.
LSB6: lower 6 bits of last char
MSB6: upper 6 bits of last char
UTF8: rougly dependent on categories:
upper 4 bits depend on category of last char:
control/whitespace/space/ punctuation/quote/%/open/close/
comma/period/=/digits/ VOWEL/CONSONANT/vowel/consonant
lower 2 bits depend on category of 2nd last char:
space/punctuation/digit or upper/lowercase
signed: hamming weight of last 2 chars
"""
print('Context modes'.center(60, '-'))
self.literalContextModes = []
for i in range(self.numberOfBlockTypes[L]):
self.literalContextModes.append(
self.verboseRead(LiteralContextMode(number=i)))
|
Read context maps Returns the number of differnt values on the context map ( In other words the number of prefix trees )
|
def contextMap(self, kind):
"""Read context maps
Returns the number of differnt values on the context map
(In other words, the number of prefix trees)
"""
NTREES = self.verboseRead(TypeCountAlphabet(
kind[0].upper()+'T#',
description='{} prefix trees'.format(kind)))
mapSize = {L:64, D:4}[kind]
if NTREES<2:
self.cmaps[kind] = [0]*mapSize
else:
#read CMAPkind
RLEMAX = self.verboseRead(RLEmaxAlphabet(
'RLE#'+kind[0].upper(),
description=kind+' context map'))
alphabet = TreeAlphabet('CM'+kind[0].upper(), NTREES=NTREES, RLEMAX=RLEMAX)
cmapCode = self.readPrefixCode(alphabet)
tableSize = mapSize*self.numberOfBlockTypes[kind]
cmap = []
while len(cmap)<tableSize:
cmapCode.description = 'map {}, entry {}'.format(
*divmod(len(cmap), mapSize))
count, value = self.verboseRead(cmapCode)
cmap.extend([value]*count)
assert len(cmap)==tableSize
IMTF = self.verboseRead(BoolCode('IMTF', description='Apply inverse MTF'))
if IMTF:
self.IMTF(cmap)
if kind==L:
print('Context maps for literal data:')
for i in range(0, len(cmap), 64):
print(*(
''.join(map(str, cmap[j:j+8]))
for j in range(i, i+64, 8)
))
else:
print('Context map for distances:')
print(*(
''.join(map('{:x}'.format, cmap[i:i+4]))
for i in range(0, len(cmap), 4)
))
self.cmaps[kind] = cmap
return NTREES
|
In place inverse move to front transform.
|
def IMTF(v):
"""In place inverse move to front transform.
"""
#mtf is initialized virtually with range(infinity)
mtf = []
for i, vi in enumerate(v):
#get old value from mtf. If never seen, take virtual value
try: value = mtf.pop(vi)
except IndexError: value = vi
#put value at front
mtf.insert(0, value)
#replace transformed value
v[i] = value
|
Read prefix code array
|
def readPrefixArray(self, kind, numberOfTrees):
"""Read prefix code array"""
prefixes = []
for i in range(numberOfTrees):
if kind==L: alphabet = LiteralAlphabet(i)
elif kind==I: alphabet = InsertAndCopyAlphabet(i)
elif kind==D: alphabet = DistanceAlphabet(
i, NPOSTFIX=self.NPOSTFIX, NDIRECT=self.NDIRECT)
self.readPrefixCode(alphabet)
prefixes.append(alphabet)
self.prefixCodes[kind] = prefixes
|
Process the data. Relevant variables of self: numberOfBlockTypes [ kind ]: number of block types currentBlockTypes [ kind ]: current block types ( = 0 ) literalContextModes: the context modes for the literal block types currentBlockCounts [ kind ]: counters for block types blockTypeCodes [ kind ]: code for block type blockCountCodes [ kind ]: code for block count cmaps [ kind ]: the context maps ( not for I ) prefixCodes [ kind ] [ # ]: the prefix codes lastDistances: the last four distances lastChars: the last two chars output: the result
|
def metablock(self):
"""Process the data.
Relevant variables of self:
numberOfBlockTypes[kind]: number of block types
currentBlockTypes[kind]: current block types (=0)
literalContextModes: the context modes for the literal block types
currentBlockCounts[kind]: counters for block types
blockTypeCodes[kind]: code for block type
blockCountCodes[kind]: code for block count
cmaps[kind]: the context maps (not for I)
prefixCodes[kind][#]: the prefix codes
lastDistances: the last four distances
lastChars: the last two chars
output: the result
"""
print('Meta block contents'.center(60, '='))
self.currentBlockTypes = {L:0, I:0, D:0, pL:1, pI:1, pD:1}
self.lastDistances = deque([17,16,11,4], maxlen=4)
#the current context mode is for block type 0
self.contextMode = ContextModeKeeper(self.literalContextModes[0])
wordList = WordList()
#setup distance callback function
def distanceCallback(symbol, extra):
"callback function for displaying decoded distance"
index, offset = symbol.value(extra)
if index:
#recent distance
distance = self.lastDistances[-index]+offset
return 'Distance: {}last{:+d}={}'.format(index, offset, distance)
#absolute value
if offset<=maxDistance:
return 'Absolute value: {} (pos {})'.format(offset, maxDistance-offset)
#word list value
action, word = divmod(offset-maxDistance, 1<<wordList.NDBITS[copyLen])
return '{}-{} gives word {},{} action {}'.format(
offset, maxDistance, copyLen, word, action)
for dpc in self.prefixCodes[D]: dpc.callback = distanceCallback
blockLen = 0
#there we go
while blockLen<self.MLEN:
#get insert© command
litLen, copyLen, dist0Flag = self.verboseRead(
self.prefixCodes[I][
self.figureBlockType(I)])
#literal data
for i in range(litLen):
bt = self.figureBlockType(L)
cm = self.contextMode.getIndex()
ct = self.cmaps[L][bt<<6|cm]
char = self.verboseRead(
self.prefixCodes[L][ct],
context='{},{}='.format(bt,cm))
self.contextMode.add(char)
self.output.append(char)
blockLen += litLen
#check if we're done
if blockLen>=self.MLEN: return
#distance
#distances are computed relative to output length, at most window size
maxDistance = min(len(self.output), self.windowSize)
if dist0Flag:
distance = self.lastDistances[-1]
else:
bt = self.figureBlockType(D)
cm = {2:0, 3:1, 4:2}.get(copyLen, 3)
ct = self.cmaps[D][bt<<2|cm]
index, offset = self.verboseRead(
self.prefixCodes[D][ct],
context='{},{}='.format(bt,cm))
distance = self.lastDistances[-index]+offset if index else offset
if index==1 and offset==0:
#to make sure distance is not put in last distance list
dist0Flag = True
if distance<=maxDistance:
#copy from output
for i in range(
maxDistance-distance,
maxDistance-distance+copyLen):
self.output.append(self.output[i])
if not dist0Flag: self.lastDistances.append(distance)
comment = 'Seen before'
else:
#fetch from wordlist
newWord = wordList.word(copyLen, distance-maxDistance-1)
self.output.extend(newWord)
#adjust copyLen to reflect actual new data
copyLen = len(newWord)
comment = 'From wordlist'
blockLen += copyLen
print(' '*40,
comment,
': "',
outputFormatter(self.output[-copyLen:]),
'"',
sep='')
self.contextMode.add(self.output[-2])
self.contextMode.add(self.output[-1])
|
Return BROTLI_VERSION string as defined in common/ version. h file.
|
def get_version():
""" Return BROTLI_VERSION string as defined in 'common/version.h' file. """
version_file_path = os.path.join(CURR_DIR, 'c', 'common', 'version.h')
version = 0
with open(version_file_path, 'r') as f:
for line in f:
m = re.match(r'#define\sBROTLI_VERSION\s+0x([0-9a-fA-F]+)', line)
if m:
version = int(m.group(1), 16)
if version == 0:
return ''
# Semantic version is calculated as (MAJOR << 24) | (MINOR << 12) | PATCH.
major = version >> 24
minor = (version >> 12) & 0xFFF
patch = version & 0xFFF
return '{0}.{1}.{2}'.format(major, minor, patch)
|
Turns a intensity array to a monochrome image by replacing each intensity by a scaled color
|
def monochrome(I, color, vmin=None, vmax=None):
"""Turns a intensity array to a monochrome 'image' by replacing each intensity by a scaled 'color'
Values in I between vmin and vmax get scaled between 0 and 1, and values outside this range are clipped to this.
Example
>>> I = np.arange(16.).reshape(4,4)
>>> color = (0, 0, 1) # red
>>> rgb = vx.image.monochrome(I, color) # shape is (4,4,3)
:param I: ndarray of any shape (2d for image)
:param color: sequence of a (r, g and b) value
:param vmin: normalization minimum for I, or np.nanmin(I) when None
:param vmax: normalization maximum for I, or np.nanmax(I) when None
:return:
"""
if vmin is None:
vmin = np.nanmin(I)
if vmax is None:
vmax = np.nanmax(I)
normalized = (I - vmin) / (vmax - vmin)
return np.clip(normalized[..., np.newaxis], 0, 1) * np.array(color)
|
Similar to monochrome but now do it for multiple colors
|
def polychrome(I, colors, vmin=None, vmax=None, axis=-1):
"""Similar to monochrome, but now do it for multiple colors
Example
>>> I = np.arange(32.).reshape(4,4,2)
>>> colors = [(0, 0, 1), (0, 1, 0)] # red and green
>>> rgb = vx.image.polychrome(I, colors) # shape is (4,4,3)
:param I: ndarray of any shape (3d will result in a 2d image)
:param colors: sequence of [(r,g,b), ...] values
:param vmin: normalization minimum for I, or np.nanmin(I) when None
:param vmax: normalization maximum for I, or np.nanmax(I) when None
:param axis: axis which to sum over, by default the last
:return:
"""
axes_length = len(I.shape)
allaxes = list(range(axes_length))
otheraxes = list(allaxes)
otheraxes.remove((axis + axes_length) % axes_length)
otheraxes = tuple(otheraxes)
if vmin is None:
vmin = np.nanmin(I, axis=otheraxes)
if vmax is None:
vmax = np.nanmax(I, axis=otheraxes)
normalized = (I - vmin) / (vmax - vmin)
return np.clip(normalized, 0, 1).dot(colors)
|
Function decorater that executes the function in parallel Usage::
|
def parallelize(cores=None, fork=True, flatten=False, info=False, infoclass=InfoThreadProgressBar, init=None, *args, **kwargs):
"""Function decorater that executes the function in parallel
Usage::
@parallelize(cores=10, info=True)
def f(x):
return x**2
x = numpy.arange(0, 100, 0.1)
y = f(x) # this gets executed parallel
:param cores: number of cpus/cores to use (if None, it counts the cores using /proc/cpuinfo)
:param fork: fork a process (should always be true since of the GIT, but can be used with c modules that release the GIT)
:param flatten: if False and each return value is a list, final result will be a list of lists, if True, all lists are combined to one big list
:param info: show progress bar (see infoclass)
:param infoclass: class to instantiate that shows the progress (default shows progressbar)
:param init: function to be called in each forked process before executing, can be used to set the seed, takes a integer as parameter (number that identifies the process)
:param args: extra arguments passed to function
:param kwargs: extra keyword arguments passed to function
Example::
@parallelize(cores=10, info=True, n=2)
def f(x, n):
return x**n
x = numpy.arange(0, 100, 0.1)
y = f(x) # this gets executed parallel
"""
if cores == None:
cores = multiprocessing.cpu_count()
def wrapper(f):
def execute(*multiargs):
results = []
len(list(zip(*multiargs)))
N = len(multiargs[0])
if info:
print("running %i jobs on %i cores" % (N, cores))
taskQueue = queue.Queue(len(multiargs[0]))
#for timenr in range(times):
# taskQueue.put(timenr)
for tasknr, _args in enumerate(zip(*multiargs)):
taskQueue.put((tasknr, list(_args)))
#for timenr in range(times):
# result = f(*args, **kwargs)
# results.append(result)
executions = [Execution(taskQueue, fork, f, init, corenr, args, kwargs) for corenr in range(cores)]
if info:
infoobj = infoclass(len(multiargs[0]), executions)
infoobj.start()
for i, execution in enumerate(executions):
execution.setName("T-%d" % i)
execution.start()
#if 1:
# watchdog = Watchdog(executions)
# watchdog.start()
error = False
for execution in executions:
log("joining:",execution.getName())
try:
execution.join()
except BaseException:
error = True
results.extend(execution.results)
if execution.error:
error = True
if info:
infoobj.join()
if error:
print("error", file=sys.stderr)
results = None
raise Exception("error in one or more of the executors")
else:
results.sort(cmp=lambda a, b: cmp(a[0], b[0]))
results = [k[1] for k in results]
#print "bla", results
if flatten:
flatresults = []
for result in results:
flatresults.extend(result)
results = flatresults
return results
return execute
return wrapper
|
: param DatasetLocal dataset: dataset to export: param str path: path for file: param lis [ str ] column_names: list of column names to export or None for all columns: param str byteorder: = for native < for little endian and > for big endian: param bool shuffle: export rows in random order: param bool selection: export selection or not: param progress: progress callback that gets a progress fraction as argument and should return True to continue or a default progress bar when progress = True: param: bool virtual: When True export virtual columns: return:
|
def export_hdf5(dataset, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):
"""
:param DatasetLocal dataset: dataset to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:return:
"""
if selection:
if selection == True: # easier to work with the name
selection = "default"
# first open file using h5py api
with h5py.File(path, "w") as h5file_output:
h5table_output = h5file_output.require_group("/table")
h5table_output.attrs["type"] = "table"
h5columns_output = h5file_output.require_group("/table/columns")
# i1, i2 = dataset.current_slice
N = len(dataset) if not selection else dataset.selected_length(selection)
if N == 0:
raise ValueError("Cannot export empty table")
logger.debug("virtual=%r", virtual)
logger.debug("exporting %d rows to file %s" % (N, path))
# column_names = column_names or (dataset.get_column_names() + (list(dataset.virtual_columns.keys()) if virtual else []))
column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True)
logger.debug("exporting columns(hdf5): %r" % column_names)
sparse_groups = collections.defaultdict(list)
sparse_matrices = {} # alternative to a set of matrices, since they are not hashable
for column_name in list(column_names):
sparse_matrix = dataset._sparse_matrix(column_name)
if sparse_matrix is not None:
# sparse columns are stored differently
sparse_groups[id(sparse_matrix)].append(column_name)
sparse_matrices[id(sparse_matrix)] = sparse_matrix
continue
dtype = dataset.dtype(column_name)
if column_name in dataset.get_column_names(virtual=False):
column = dataset.columns[column_name]
shape = (N,) + column.shape[1:]
else:
shape = (N,)
h5column_output = h5columns_output.require_group(column_name)
if dtype == str_type:
# TODO: if no selection or filter, we could do this
# if isinstance(column, ColumnStringArrow):
# data_shape = column.bytes.shape
# indices_shape = column.indices.shape
# else:
byte_length = dataset[column_name].str.byte_length().sum(selection=selection)
if byte_length > max_int32:
dtype_indices = 'i8'
else:
dtype_indices = 'i4'
data_shape = (byte_length, )
indices_shape = (N+1, )
array = h5column_output.require_dataset('data', shape=data_shape, dtype='S1')
array[0] = array[0] # make sure the array really exists
index_array = h5column_output.require_dataset('indices', shape=indices_shape, dtype=dtype_indices)
index_array[0] = index_array[0] # make sure the array really exists
null_value_count = N - dataset.count(column_name, selection=selection)
if null_value_count > 0:
null_shape = ((N + 7) // 8, ) # TODO: arrow requires padding right?
null_bitmap_array = h5column_output.require_dataset('null_bitmap', shape=null_shape, dtype='u1')
null_bitmap_array[0] = null_bitmap_array[0] # make sure the array really exists
array.attrs["dtype"] = 'str'
# TODO: masked support ala arrow?
else:
if dtype.kind in 'mM':
array = h5column_output.require_dataset('data', shape=shape, dtype=np.int64)
array.attrs["dtype"] = dtype.name
elif dtype.kind == 'U':
# numpy uses utf32 for unicode
char_length = dtype.itemsize // 4
shape = (N, char_length)
array = h5column_output.require_dataset('data', shape=shape, dtype=np.uint8)
array.attrs["dtype"] = 'utf32'
array.attrs["dlength"] = char_length
else:
try:
array = h5column_output.require_dataset('data', shape=shape, dtype=dtype.newbyteorder(byteorder))
except:
logging.exception("error creating dataset for %r, with type %r " % (column_name, dtype))
del h5columns_output[column_name]
column_names.remove(column_name)
array[0] = array[0] # make sure the array really exists
data = dataset.evaluate(column_name, 0, 1)
if np.ma.isMaskedArray(data):
mask = h5column_output.require_dataset('mask', shape=shape, dtype=np.bool)
mask[0] = mask[0] # make sure the array really exists
random_index_name = None
column_order = list(column_names) # copy
if shuffle:
random_index_name = "random_index"
while random_index_name in dataset.get_column_names():
random_index_name += "_new"
shuffle_array = h5columns_output.require_dataset(random_index_name + "/data", shape=(N,), dtype=byteorder + "i8")
shuffle_array[0] = shuffle_array[0]
column_order.append(random_index_name) # last item
h5columns_output.attrs["column_order"] = ",".join(column_order) # keep track or the ordering of columns
sparse_index = 0
for sparse_matrix in sparse_matrices.values():
columns = sorted(sparse_groups[id(sparse_matrix)], key=lambda col: dataset.columns[col].column_index)
name = "sparse" + str(sparse_index)
sparse_index += 1
# TODO: slice columns
# sparse_matrix = sparse_matrix[:,]
sparse_group = h5columns_output.require_group(name)
sparse_group.attrs['type'] = 'csr_matrix'
ar = sparse_group.require_dataset('data', shape=(len(sparse_matrix.data), ), dtype=sparse_matrix.dtype)
ar[0] = ar[0]
ar = sparse_group.require_dataset('indptr', shape=(len(sparse_matrix.indptr), ), dtype=sparse_matrix.indptr.dtype)
ar[0] = ar[0]
ar = sparse_group.require_dataset('indices', shape=(len(sparse_matrix.indices), ), dtype=sparse_matrix.indices.dtype)
ar[0] = ar[0]
for i, column_name in enumerate(columns):
h5column = sparse_group.require_group(column_name)
h5column.attrs['column_index'] = i
# after this the file is closed,, and reopen it using out class
dataset_output = vaex.hdf5.dataset.Hdf5MemoryMapped(path, write=True)
column_names = vaex.export._export(dataset_input=dataset, dataset_output=dataset_output, path=path, random_index_column=random_index_name,
column_names=column_names, selection=selection, shuffle=shuffle, byteorder=byteorder,
progress=progress, sort=sort, ascending=ascending)
import getpass
import datetime
user = getpass.getuser()
date = str(datetime.datetime.now())
source = dataset.path
description = "file exported by vaex, by user %s, on date %s, from source %s" % (user, date, source)
if dataset.description:
description += "previous description:\n" + dataset.description
dataset_output.copy_metadata(dataset)
dataset_output.description = description
logger.debug("writing meta information")
dataset_output.write_meta()
dataset_output.close_files()
return
|
Implementation of Dataset. to_arrow_table
|
def arrow_table_from_vaex_df(ds, column_names=None, selection=None, strings=True, virtual=False):
"""Implementation of Dataset.to_arrow_table"""
names = []
arrays = []
for name, array in ds.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual):
names.append(name)
arrays.append(arrow_array_from_numpy_array(array))
return pyarrow.Table.from_arrays(arrays, names)
|
Adds method f to the Dataset class
|
def patch(f):
'''Adds method f to the Dataset class'''
name = f.__name__
Dataset.__hidden__[name] = f
return f
|
Add ecliptic coordates ( long_out lat_out ) from equatorial coordinates.
|
def add_virtual_columns_eq2ecl(self, long_in="ra", lat_in="dec", long_out="lambda_", lat_out="beta", name_prefix="__celestial_eq2ecl", radians=False):
"""Add ecliptic coordates (long_out, lat_out) from equatorial coordinates.
:param long_in: Name/expression for right ascension
:param lat_in: Name/expression for declination
:param long_out: Output name for lambda coordinate
:param lat_out: Output name for beta coordinate
:param name_prefix:
:param radians: input and output in radians (True), or degrees (False)
:return:
"""
self.add_virtual_columns_celestial(long_in, lat_in, long_out, lat_out, name_prefix=name_prefix, radians=radians, _matrix='eq2ecl')
|
Convert parallax to distance ( i. e. 1/ parallax )
|
def add_virtual_columns_distance_from_parallax(self, parallax="parallax", distance_name="distance", parallax_uncertainty=None, uncertainty_postfix="_uncertainty"):
"""Convert parallax to distance (i.e. 1/parallax)
:param parallax: expression for the parallax, e.g. "parallax"
:param distance_name: name for the virtual column of the distance, e.g. "distance"
:param parallax_uncertainty: expression for the uncertainty on the parallax, e.g. "parallax_error"
:param uncertainty_postfix: distance_name + uncertainty_postfix is the name for the virtual column, e.g. "distance_uncertainty" by default
:return:
"""
"""
"""
import astropy.units
unit = self.unit(parallax)
# if unit:
# convert = unit.to(astropy.units.mas)
# distance_expression = "%f/(%s)" % (convert, parallax)
# else:
distance_expression = "1/%s" % (parallax)
self.ucds[distance_name] = "pos.distance"
self.descriptions[distance_name] = "Derived from parallax (%s)" % parallax
if unit:
if unit == astropy.units.milliarcsecond:
self.units[distance_name] = astropy.units.kpc
if unit == astropy.units.arcsecond:
self.units[distance_name] = astropy.units.parsec
self.add_virtual_column(distance_name, distance_expression)
if parallax_uncertainty:
"""
y = 1/x
sigma_y**2 = (1/x**2)**2 sigma_x**2
sigma_y = (1/x**2) sigma_x
sigma_y = y**2 sigma_x
sigma_y/y = (1/x) sigma_x
"""
name = distance_name + uncertainty_postfix
distance_uncertainty_expression = "{parallax_uncertainty}/({parallax})**2".format(**locals())
self.add_virtual_column(name, distance_uncertainty_expression)
self.descriptions[name] = "Uncertainty on parallax (%s)" % parallax
self.ucds[name] = "stat.error;pos.distance"
|
Concert velocities from a cartesian system to proper motions and radial velocities
|
def add_virtual_columns_cartesian_velocities_to_pmvr(self, x="x", y="y", z="z", vx="vx", vy="vy", vz="vz", vr="vr", pm_long="pm_long", pm_lat="pm_lat", distance=None):
"""Concert velocities from a cartesian system to proper motions and radial velocities
TODO: errors
:param x: name of x column (input)
:param y: y
:param z: z
:param vx: vx
:param vy: vy
:param vz: vz
:param vr: name of the column for the radial velocity in the r direction (output)
:param pm_long: name of the column for the proper motion component in the longitude direction (output)
:param pm_lat: name of the column for the proper motion component in the latitude direction, positive points to the north pole (output)
:param distance: Expression for distance, if not given defaults to sqrt(x**2+y**2+z**2), but if this column already exists, passing this expression may lead to a better performance
:return:
"""
if distance is None:
distance = "sqrt({x}**2+{y}**2+{z}**2)".format(**locals())
k = 4.74057
self.add_variable("k", k, overwrite=False)
self.add_virtual_column(vr, "({x}*{vx}+{y}*{vy}+{z}*{vz})/{distance}".format(**locals()))
self.add_virtual_column(pm_long, "-({vx}*{y}-{x}*{vy})/sqrt({x}**2+{y}**2)/{distance}/k".format(**locals()))
self.add_virtual_column(pm_lat, "-({z}*({x}*{vx}+{y}*{vy}) - ({x}**2+{y}**2)*{vz})/( ({x}**2+{y}**2+{z}**2) * sqrt({x}**2+{y}**2) )/k".format(**locals()))
|
Transform/ rotate proper motions from equatorial to galactic coordinates
|
def add_virtual_columns_proper_motion_eq2gal(self, long_in="ra", lat_in="dec", pm_long="pm_ra", pm_lat="pm_dec", pm_long_out="pm_l", pm_lat_out="pm_b",
name_prefix="__proper_motion_eq2gal",
right_ascension_galactic_pole=192.85,
declination_galactic_pole=27.12,
propagate_uncertainties=False,
radians=False, inverse=False):
"""Transform/rotate proper motions from equatorial to galactic coordinates
Taken from http://arxiv.org/abs/1306.2945
:param long_in: Name/expression for right ascension
:param lat_in: Name/expression for declination
:param pm_long: Proper motion for ra
:param pm_lat: Proper motion for dec
:param pm_long_out: Output name for output proper motion on l direction
:param pm_lat_out: Output name for output proper motion on b direction
:param name_prefix:
:param radians: input and output in radians (True), or degrees (False)
:parap inverse: (For internal use) convert from galactic to equatorial instead
:return:
"""
"""mu_gb = mu_dec*(cdec*sdp-sdec*cdp*COS(ras))/cgb $
- mu_ra*cdp*SIN(ras)/cgb"""
long_in_original = long_in = self._expr(long_in)
lat_in_original = lat_in = self._expr(lat_in)
pm_long = self._expr(pm_long)
pm_lat = self._expr(pm_lat)
if not radians:
long_in = long_in * np.pi/180
lat_in = lat_in * np.pi/180
c1_name = name_prefix + "_C1"
c2_name = name_prefix + "_C2"
right_ascension_galactic_pole = math.radians(right_ascension_galactic_pole)
declination_galactic_pole = math.radians(declination_galactic_pole)
self[c1_name] = c1 = np.sin(declination_galactic_pole) * np.cos(lat_in) - np.cos(declination_galactic_pole)*np.sin(lat_in)*np.cos(long_in-right_ascension_galactic_pole)
self[c2_name] = c2 = np.cos(declination_galactic_pole) * np.sin(long_in - right_ascension_galactic_pole)
c1 = self[c1_name]
c2 = self[c2_name]
if inverse:
self[pm_long_out] = ( c1 * pm_long + -c2 * pm_lat)/np.sqrt(c1**2+c2**2)
self[pm_lat_out] = ( c2 * pm_long + c1 * pm_lat)/np.sqrt(c1**2+c2**2)
else:
self[pm_long_out] = ( c1 * pm_long + c2 * pm_lat)/np.sqrt(c1**2+c2**2)
self[pm_lat_out] = (-c2 * pm_long + c1 * pm_lat)/np.sqrt(c1**2+c2**2)
if propagate_uncertainties:
self.propagate_uncertainties([self[pm_long_out], self[pm_lat_out]])
|
Transform/ rotate proper motions from galactic to equatorial coordinates.
|
def add_virtual_columns_proper_motion_gal2eq(self, long_in="ra", lat_in="dec", pm_long="pm_l", pm_lat="pm_b", pm_long_out="pm_ra", pm_lat_out="pm_dec",
name_prefix="__proper_motion_gal2eq",
right_ascension_galactic_pole=192.85,
declination_galactic_pole=27.12,
propagate_uncertainties=False,
radians=False):
"""Transform/rotate proper motions from galactic to equatorial coordinates.
Inverse of :py:`add_virtual_columns_proper_motion_eq2gal`
"""
kwargs = dict(**locals())
kwargs.pop('self')
kwargs['inverse'] = True
self.add_virtual_columns_proper_motion_eq2gal(**kwargs)
|
Convert radial velocity and galactic proper motions ( and positions ) to cartesian velocities wrt the center_v
|
def add_virtual_columns_lbrvr_proper_motion2vcartesian(self, long_in="l", lat_in="b", distance="distance", pm_long="pm_l", pm_lat="pm_b",
vr="vr", vx="vx", vy="vy", vz="vz",
center_v=(0, 0, 0),
propagate_uncertainties=False, radians=False):
"""Convert radial velocity and galactic proper motions (and positions) to cartesian velocities wrt the center_v
Based on http://adsabs.harvard.edu/abs/1987AJ.....93..864J
:param long_in: Name/expression for galactic longitude
:param lat_in: Name/expression for galactic latitude
:param distance: Name/expression for heliocentric distance
:param pm_long: Name/expression for the galactic proper motion in latitude direction (pm_l*, so cosine(b) term should be included)
:param pm_lat: Name/expression for the galactic proper motion in longitude direction
:param vr: Name/expression for the radial velocity
:param vx: Output name for the cartesian velocity x-component
:param vy: Output name for the cartesian velocity y-component
:param vz: Output name for the cartesian velocity z-component
:param center_v: Extra motion that should be added, for instance lsr + motion of the sun wrt the galactic restframe
:param radians: input and output in radians (True), or degrees (False)
:return:
"""
k = 4.74057
a, d, distance = self._expr(long_in, lat_in, distance)
pm_long, pm_lat, vr = self._expr(pm_long, pm_lat, vr)
if not radians:
a = a * np.pi/180
d = d * np.pi/180
A = [[np.cos(a)*np.cos(d), -np.sin(a), -np.cos(a)*np.sin(d)],
[np.sin(a)*np.cos(d), np.cos(a), -np.sin(a)*np.sin(d)],
[np.sin(d), d*0, np.cos(d)]]
self.add_virtual_columns_matrix3d(vr, k * pm_long * distance, k * pm_lat * distance, vx, vy, vz, A, translation=center_v)
if propagate_uncertainties:
self.propagate_uncertainties([self[vx], self[vy], self[vz]])
|
From http:// arxiv. org/ pdf/ 1306. 2945v2. pdf
|
def add_virtual_columns_equatorial_to_galactic_cartesian(self, alpha, delta, distance, xname, yname, zname, radians=True, alpha_gp=np.radians(192.85948), delta_gp=np.radians(27.12825), l_omega=np.radians(32.93192)):
"""From http://arxiv.org/pdf/1306.2945v2.pdf"""
if not radians:
alpha = "pi/180.*%s" % alpha
delta = "pi/180.*%s" % delta
self.virtual_columns[zname] = "{distance} * (cos({delta}) * cos({delta_gp}) * cos({alpha} - {alpha_gp}) + sin({delta}) * sin({delta_gp}))".format(**locals())
self.virtual_columns[xname] = "{distance} * (cos({delta}) * sin({alpha} - {alpha_gp}))".format(**locals())
self.virtual_columns[yname] = "{distance} * (sin({delta}) * cos({delta_gp}) - cos({delta}) * sin({delta_gp}) * cos({alpha} - {alpha_gp}))".format(**locals())
|
Convert proper motion to perpendicular velocities.
|
def add_virtual_columns_proper_motion2vperpendicular(self, distance="distance", pm_long="pm_l", pm_lat="pm_b",
vl="vl", vb="vb",
propagate_uncertainties=False,
radians=False):
"""Convert proper motion to perpendicular velocities.
:param distance:
:param pm_long:
:param pm_lat:
:param vl:
:param vb:
:param cov_matrix_distance_pm_long_pm_lat:
:param uncertainty_postfix:
:param covariance_postfix:
:param radians:
:return:
"""
k = 4.74057
self.add_variable("k", k, overwrite=False)
self.add_virtual_column(vl, "k*{pm_long}*{distance}".format(**locals()))
self.add_virtual_column(vb, "k* {pm_lat}*{distance}".format(**locals()))
if propagate_uncertainties:
self.propagate_uncertainties([self[vl], self[vb]])
|
Calculate the angular momentum components provided Cartesian positions and velocities. Be mindful of the point of origin: ex. if considering Galactic dynamics and positions and velocities should be as seen from the Galactic centre.
|
def add_virtual_columns_cartesian_angular_momenta(self, x='x', y='y', z='z',
vx='vx', vy='vy', vz='vz',
Lx='Lx', Ly='Ly', Lz='Lz',
propagate_uncertainties=False):
"""
Calculate the angular momentum components provided Cartesian positions and velocities.
Be mindful of the point of origin: ex. if considering Galactic dynamics, and positions and
velocities should be as seen from the Galactic centre.
:param x: x-position Cartesian component
:param y: y-position Cartesian component
:param z: z-position Cartesian component
:param vx: x-velocity Cartesian component
:param vy: y-velocity Cartesian component
:param vz: z-velocity Cartesian component
:param Lx: name of virtual column
:param Ly: name of virtual column
:param Lz: name of virtial column
:propagate_uncertainties: (bool) whether to propagate the uncertainties of
the positions and velocities to the angular momentum components
"""
x, y, z, vx, vy, vz = self._expr(x, y, z, vx, vy, vz)
self.add_virtual_column(Lx, y * vz - z * vy)
self.add_virtual_column(Ly, z * vx - x * vz)
self.add_virtual_column(Lz, x * vy - y * vx)
if propagate_uncertainties:
self.propagate_uncertainties([self[Lx], self[Ly], self[Lz]])
|
NOTE: This cannot be called until after this has been added to an Axes otherwise unit conversion will fail. This maxes it very important to call the accessor method and not directly access the transformation member variable.
|
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.width #self.convert_xunits(self.width)
height = self.height #self.convert_yunits(self.height)
trans = artist.Artist.get_transform(self)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5 * self.scale, height * 0.5* self.scale) \
.rotate_deg(self.angle) \
.translate(*trans.transform(center))
|
Return a graph containing the dependencies of this expression Structure is: [ <string expression > <function name if callable > <function object if callable > [ subgraph/ dependencies.... ]]
|
def _graph(self):
""""Return a graph containing the dependencies of this expression
Structure is:
[<string expression>, <function name if callable>, <function object if callable>, [subgraph/dependencies, ....]]
"""
expression = self.expression
def walk(node):
if isinstance(node, six.string_types):
if node in self.ds.virtual_columns:
ex = Expression(self.ds, self.ds.virtual_columns[node])
return [node, None, None, [ex._graph()]]
else:
return node
else:
fname, node_repr, deps = node
if len(node_repr) > 30: # clip too long expressions
node_repr = node_repr[:26] + ' ....'
deps = [walk(dep) for dep in deps]
obj = self.ds.functions.get(fname)
# we don't want the wrapper, we want the underlying object
if isinstance(obj, Function):
obj = obj.f
if isinstance(obj, FunctionSerializablePickle):
obj = obj.f
return [node_repr, fname, obj, deps]
return walk(expresso._graph(expression))
|
Return a graphviz. Digraph object with a graph of the expression
|
def _graphviz(self, dot=None):
"""Return a graphviz.Digraph object with a graph of the expression"""
from graphviz import Graph, Digraph
node = self._graph()
dot = dot or Digraph(comment=self.expression)
def walk(node):
if isinstance(node, six.string_types):
dot.node(node, node)
return node, node
else:
node_repr, fname, fobj, deps = node
node_id = node_repr
dot.node(node_id, node_repr)
for dep in deps:
dep_id, dep = walk(dep)
dot.edge(node_id, dep_id)
return node_id, node
walk(node)
return dot
|
Shortcut for ds. min ( expression... ) see Dataset. min
|
def min(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.min(expression, ...), see `Dataset.min`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.min(**kwargs)
|
Computes counts of unique values.
|
def value_counts(self, dropna=False, dropnull=True, ascending=False, progress=False):
"""Computes counts of unique values.
WARNING:
* If the expression/column is not categorical, it will be converted on the fly
* dropna is False by default, it is True by default in pandas
:param dropna: when True, it will not report the missing values
:param ascending: when False (default) it will report the most frequent occuring item first
:returns: Pandas series containing the counts
"""
from pandas import Series
dtype = self.dtype
transient = self.transient or self.ds.filtered or self.ds.is_masked(self.expression)
if self.dtype == str_type and not transient:
# string is a special case, only ColumnString are not transient
ar = self.ds.columns[self.expression]
if not isinstance(ar, ColumnString):
transient = True
counter_type = counter_type_from_dtype(self.dtype, transient)
counters = [None] * self.ds.executor.thread_pool.nthreads
def map(thread_index, i1, i2, ar):
if counters[thread_index] is None:
counters[thread_index] = counter_type()
if dtype == str_type:
previous_ar = ar
ar = _to_string_sequence(ar)
if not transient:
assert ar is previous_ar.string_sequence
if np.ma.isMaskedArray(ar):
mask = np.ma.getmaskarray(ar)
counters[thread_index].update(ar, mask)
else:
counters[thread_index].update(ar)
return 0
def reduce(a, b):
return a+b
self.ds.map_reduce(map, reduce, [self.expression], delay=False, progress=progress, name='value_counts', info=True, to_numpy=False)
counters = [k for k in counters if k is not None]
counter0 = counters[0]
for other in counters[1:]:
counter0.merge(other)
value_counts = counter0.extract()
index = np.array(list(value_counts.keys()))
counts = np.array(list(value_counts.values()))
order = np.argsort(counts)
if not ascending:
order = order[::-1]
counts = counts[order]
index = index[order]
if not dropna or not dropnull:
index = index.tolist()
counts = counts.tolist()
if not dropna and counter0.nan_count:
index = [np.nan] + index
counts = [counter0.nan_count] + counts
if not dropnull and counter0.null_count:
index = ['null'] + index
counts = [counter0.null_count] + counts
return Series(counts, index=index)
|
Map values of an expression or in memory column accoring to an input dictionary or a custom callable function.
|
def map(self, mapper, nan_mapping=None, null_mapping=None):
"""Map values of an expression or in memory column accoring to an input
dictionary or a custom callable function.
Example:
>>> import vaex
>>> df = vaex.from_arrays(color=['red', 'red', 'blue', 'red', 'green'])
>>> mapper = {'red': 1, 'blue': 2, 'green': 3}
>>> df['color_mapped'] = df.color.map(mapper)
>>> df
# color color_mapped
0 red 1
1 red 1
2 blue 2
3 red 1
4 green 3
>>> import numpy as np
>>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, np.nan])
>>> df['role'] = df['type'].map({0: 'admin', 1: 'maintainer', 2: 'user', np.nan: 'unknown'})
>>> df
# type role
0 0 admin
1 1 maintainer
2 2 user
3 2 user
4 2 user
5 nan unknown
:param mapper: dict like object used to map the values from keys to values
:param nan_mapping: value to be used when a nan is present (and not in the mapper)
:param null_mapping: value to use used when there is a missing value
:return: A vaex expression
:rtype: vaex.expression.Expression
"""
assert isinstance(mapper, collectionsAbc.Mapping), "mapper should be a dict like object"
df = self.ds
mapper_keys = np.array(list(mapper.keys()))
# we map the keys to a ordinal values [0, N-1] using the set
key_set = df._set(self.expression)
found_keys = key_set.keys()
mapper_has_nan = any([key != key for key in mapper_keys])
# we want all possible values to be converted
# so mapper's key should be a superset of the keys found
if not set(mapper_keys).issuperset(found_keys):
missing = set(found_keys).difference(mapper_keys)
missing0 = list(missing)[0]
if missing0 == missing0: # safe nan check
raise ValueError('Missing values in mapper: %s' % missing)
# and these are the corresponding choices
choices = [mapper[key] for key in found_keys]
if key_set.has_nan:
if mapper_has_nan:
choices = [mapper[np.nan]] + choices
else:
choices = [nan_mapping] + choices
if key_set.has_null:
choices = [null_mapping] + choices
choices = np.array(choices)
key_set_name = df.add_variable('map_key_set', key_set, unique=True)
choices_name = df.add_variable('map_choices', choices, unique=True)
expr = '_choose(_ordinal_values({}, {}), {})'.format(self, key_set_name, choices_name)
return Expression(df, expr)
|
Create a vaex app the QApplication mainloop must be started.
|
def app(*args, **kwargs):
"""Create a vaex app, the QApplication mainloop must be started.
In ipython notebook/jupyter do the following:
>>> import vaex.ui.main # this causes the qt api level to be set properly
>>> import vaex
Next cell:
>>> %gui qt
Next cell:
>>> app = vaex.app()
From now on, you can run the app along with jupyter
"""
import vaex.ui.main
return vaex.ui.main.VaexApp()
|
Convert a filename ( or list of ) to a filename with. hdf5 and optionally a - shuffle suffix
|
def _convert_name(filenames, shuffle=False):
'''Convert a filename (or list of) to a filename with .hdf5 and optionally a -shuffle suffix'''
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
base = filenames[0]
if shuffle:
base += '-shuffle'
if len(filenames) > 1:
return base + "_and_{}_more.hdf5".format(len(filenames)-1)
else:
return base + ".hdf5"
|
Open a DataFrame from file given by path.
|
def open(path, convert=False, shuffle=False, copy_index=True, *args, **kwargs):
"""Open a DataFrame from file given by path.
Example:
>>> ds = vaex.open('sometable.hdf5')
>>> ds = vaex.open('somedata*.csv', convert='bigdata.hdf5')
:param str path: local or absolute path to file, or glob string
:param convert: convert files to an hdf5 file for optimization, can also be a path
:param bool shuffle: shuffle converted DataFrame or not
:param args: extra arguments for file readers that need it
:param kwargs: extra keyword arguments
:param bool copy_index: copy index when source is read via pandas
:return: return a DataFrame on succes, otherwise None
:rtype: DataFrame
"""
import vaex
try:
if path in aliases:
path = aliases[path]
if path.startswith("http://") or path.startswith("ws://"): # TODO: think about https and wss
server, DataFrame = path.rsplit("/", 1)
server = vaex.server(server, **kwargs)
DataFrames = server.DataFrames(as_dict=True)
if DataFrame not in DataFrames:
raise KeyError("no such DataFrame '%s' at server, possible DataFrame names: %s" % (DataFrame, " ".join(DataFrames.keys())))
return DataFrames[DataFrame]
if path.startswith("cluster"):
import vaex.distributed
return vaex.distributed.open(path, *args, **kwargs)
else:
import vaex.file
import glob
# sort to get predicatable behaviour (useful for testing)
filenames = list(sorted(glob.glob(path)))
ds = None
if len(filenames) == 0:
raise IOError('Could not open file: {}, it does not exist'.format(path))
filename_hdf5 = _convert_name(filenames, shuffle=shuffle)
filename_hdf5_noshuffle = _convert_name(filenames, shuffle=False)
if len(filenames) == 1:
path = filenames[0]
ext = os.path.splitext(path)[1]
if os.path.exists(filename_hdf5) and convert: # also check mtime?
if convert:
ds = vaex.file.open(filename_hdf5)
else:
ds = vaex.file.open(filename_hdf5, *args, **kwargs)
else:
if ext == '.csv': # special support for csv.. should probably approach it a different way
ds = from_csv(path, copy_index=copy_index, **kwargs)
else:
ds = vaex.file.open(path, *args, **kwargs)
if convert:
ds.export_hdf5(filename_hdf5, shuffle=shuffle)
ds = vaex.file.open(filename_hdf5) # argument were meant for pandas?
if ds is None:
if os.path.exists(path):
raise IOError('Could not open file: {}, did you install vaex-hdf5?'.format(path))
if os.path.exists(path):
raise IOError('Could not open file: {}, it does not exist?'.format(path))
elif len(filenames) > 1:
if convert not in [True, False]:
filename_hdf5 = convert
else:
filename_hdf5 = _convert_name(filenames, shuffle=shuffle)
if os.path.exists(filename_hdf5) and convert: # also check mtime
ds = open(filename_hdf5)
else:
# with ProcessPoolExecutor() as executor:
# executor.submit(read_csv_and_convert, filenames, shuffle=shuffle, **kwargs)
DataFrames = []
for filename in filenames:
DataFrames.append(open(filename, convert=bool(convert), shuffle=shuffle, **kwargs))
ds = vaex.dataframe.DataFrameConcatenated(DataFrames)
if convert:
ds.export_hdf5(filename_hdf5, shuffle=shuffle)
ds = vaex.file.open(filename_hdf5, *args, **kwargs)
if ds is None:
raise IOError('Unknown error opening: {}'.format(path))
return ds
except:
logging.getLogger("vaex").error("error opening %r" % path)
raise
|
Open a list of filenames and return a DataFrame with all DataFrames cocatenated.
|
def open_many(filenames):
"""Open a list of filenames, and return a DataFrame with all DataFrames cocatenated.
:param list[str] filenames: list of filenames/paths
:rtype: DataFrame
"""
dfs = []
for filename in filenames:
filename = filename.strip()
if filename and filename[0] != "#":
dfs.append(open(filename))
return vaex.dataframe.DataFrameConcatenated(dfs=dfs)
|
Connect to a SAMP Hub and wait for a single table load event disconnect download the table and return the DataFrame.
|
def from_samp(username=None, password=None):
"""Connect to a SAMP Hub and wait for a single table load event, disconnect, download the table and return the DataFrame.
Useful if you want to send a single table from say TOPCAT to vaex in a python console or notebook.
"""
print("Waiting for SAMP message...")
import vaex.samp
t = vaex.samp.single_table(username=username, password=password)
return from_astropy_table(t.to_table())
|
Create a vaex DataFrame from an Astropy Table.
|
def from_astropy_table(table):
"""Create a vaex DataFrame from an Astropy Table."""
import vaex.file.other
return vaex.file.other.DatasetAstropyTable(table=table)
|
Create an in memory DataFrame from numpy arrays in contrast to from_arrays this keeps the order of columns intact ( for Python < 3. 6 ).
|
def from_items(*items):
"""Create an in memory DataFrame from numpy arrays, in contrast to from_arrays this keeps the order of columns intact (for Python < 3.6).
Example
>>> import vaex, numpy as np
>>> x = np.arange(5)
>>> y = x ** 2
>>> vaex.from_items(('x', x), ('y', y))
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
:param items: list of [(name, numpy array), ...]
:rtype: DataFrame
"""
import numpy as np
df = vaex.dataframe.DataFrameArrays("array")
for name, array in items:
df.add_column(name, np.asanyarray(array))
return df
|
Create an in memory DataFrame from numpy arrays.
|
def from_arrays(**arrays):
"""Create an in memory DataFrame from numpy arrays.
Example
>>> import vaex, numpy as np
>>> x = np.arange(5)
>>> y = x ** 2
>>> vaex.from_arrays(x=x, y=y)
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
>>> some_dict = {'x': x, 'y': y}
>>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
:param arrays: keyword arguments with arrays
:rtype: DataFrame
"""
import numpy as np
import six
from .column import Column
df = vaex.dataframe.DataFrameArrays("array")
for name, array in arrays.items():
if isinstance(array, Column):
df.add_column(name, array)
else:
array = np.asanyarray(array)
df.add_column(name, array)
return df
|
Similar to from_arrays but convenient for a DataFrame of length 1.
|
def from_scalars(**kwargs):
"""Similar to from_arrays, but convenient for a DataFrame of length 1.
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, y=2)
:rtype: DataFrame
"""
import numpy as np
return from_arrays(**{k: np.array([v]) for k, v in kwargs.items()})
|
Create an in memory DataFrame from a pandas DataFrame.
|
def from_pandas(df, name="pandas", copy_index=True, index_name="index"):
"""Create an in memory DataFrame from a pandas DataFrame.
:param: pandas.DataFrame df: Pandas DataFrame
:param: name: unique for the DataFrame
>>> import vaex, pandas as pd
>>> df_pandas = pd.from_csv('test.csv')
>>> df = vaex.from_pandas(df_pandas)
:rtype: DataFrame
"""
import six
vaex_df = vaex.dataframe.DataFrameArrays(name)
def add(name, column):
values = column.values
try:
vaex_df.add_column(name, values)
except Exception as e:
print("could not convert column %s, error: %r, will try to convert it to string" % (name, e))
try:
values = values.astype("S")
vaex_df.add_column(name, values)
except Exception as e:
print("Giving up column %s, error: %r" % (name, e))
for name in df.columns:
add(name, df[name])
if copy_index:
add(index_name, df.index)
return vaex_df
|
Create an in memory DataFrame from an ascii file ( whitespace seperated by default ).
|
def from_ascii(path, seperator=None, names=True, skip_lines=0, skip_after=0, **kwargs):
"""
Create an in memory DataFrame from an ascii file (whitespace seperated by default).
>>> ds = vx.from_ascii("table.asc")
>>> ds = vx.from_ascii("table.csv", seperator=",", names=["x", "y", "z"])
:param path: file path
:param seperator: value seperator, by default whitespace, use "," for comma seperated values.
:param names: If True, the first line is used for the column names, otherwise provide a list of strings with names
:param skip_lines: skip lines at the start of the file
:param skip_after: skip lines at the end of the file
:param kwargs:
:rtype: DataFrame
"""
import vaex.ext.readcol as rc
ds = vaex.dataframe.DataFrameArrays(path)
if names not in [True, False]:
namelist = names
names = False
else:
namelist = None
data = rc.readcol(path, fsep=seperator, asdict=namelist is None, names=names, skipline=skip_lines, skipafter=skip_after, **kwargs)
if namelist:
for name, array in zip(namelist, data.T):
ds.add_column(name, array)
else:
for name, array in data.items():
ds.add_column(name, array)
return ds
|
Shortcut to read a csv file using pandas and convert to a DataFrame directly.
|
def from_csv(filename_or_buffer, copy_index=True, **kwargs):
"""Shortcut to read a csv file using pandas and convert to a DataFrame directly.
:rtype: DataFrame
"""
import pandas as pd
return from_pandas(pd.read_csv(filename_or_buffer, **kwargs), copy_index=copy_index)
|
Convert a path ( or glob pattern ) to a single hdf5 file will open the hdf5 file if exists.
|
def read_csv_and_convert(path, shuffle=False, copy_index=True, **kwargs):
'''Convert a path (or glob pattern) to a single hdf5 file, will open the hdf5 file if exists.
Example:
>>> vaex.read_csv_and_convert('test-*.csv', shuffle=True) # this may take a while
>>> vaex.read_csv_and_convert('test-*.csv', shuffle=True) # 2nd time it is instant
:param str path: path of file or glob pattern for multiple files
:param bool shuffle: shuffle DataFrame when converting to hdf5
:param bool copy_index: by default pandas will create an index (row number), set to false if you want to drop that
:param kwargs: parameters passed to pandas' read_cvs
'''
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
filenames = glob.glob(path)
if len(filenames) > 1:
filename_hdf5 = _convert_name(filenames, shuffle=shuffle)
filename_hdf5_noshuffle = _convert_name(filenames, shuffle=False)
if not os.path.exists(filename_hdf5):
if not os.path.exists(filename_hdf5_noshuffle):
# with ProcessPoolExecutor() as executor:
# executor.submit(read_csv_and_convert, filenames, shuffle=shuffle, **kwargs)
for filename in filenames:
read_csv_and_convert(filename, shuffle=shuffle, copy_index=copy_index, **kwargs)
ds = open_many([_convert_name(k, shuffle=shuffle) for k in filenames])
else:
ds = open(filename_hdf5_noshuffle)
ds.export_hdf5(filename_hdf5, shuffle=shuffle)
return open(filename_hdf5)
else:
filename = filenames[0]
filename_hdf5 = _convert_name(filename, shuffle=shuffle)
filename_hdf5_noshuffle = _convert_name(filename, shuffle=False)
if not os.path.exists(filename_hdf5):
if not os.path.exists(filename_hdf5_noshuffle):
df = pd.read_csv(filename, **kwargs)
ds = from_pandas(df, copy_index=copy_index)
else:
ds = open(filename_hdf5_noshuffle)
ds.export_hdf5(filename_hdf5, shuffle=shuffle)
return open(filename_hdf5)
|
Connect to hostname supporting the vaex web api.
|
def server(url, **kwargs):
"""Connect to hostname supporting the vaex web api.
:param str hostname: hostname or ip address of server
:return vaex.dataframe.ServerRest: returns a server object, note that it does not connect to the server yet, so this will always succeed
:rtype: ServerRest
"""
from vaex.remote import ServerRest
url = urlparse(url)
if url.scheme == "ws":
websocket = True
else:
websocket = False
assert url.scheme in ["ws", "http"]
port = url.port
base_path = url.path
hostname = url.hostname
return vaex.remote.ServerRest(hostname, base_path=base_path, port=port, websocket=websocket, **kwargs)
|
Returns an example DataFrame which comes with vaex for testing/ learning purposes.
|
def example(download=True):
"""Returns an example DataFrame which comes with vaex for testing/learning purposes.
:rtype: DataFrame
"""
from . import utils
path = utils.get_data_file("helmi-dezeeuw-2000-10p.hdf5")
if path is None and download:
return vaex.datasets.helmi_de_zeeuw_10percent.fetch()
return open(path) if path else None
|
Creates a zeldovich DataFrame.
|
def zeldovich(dim=2, N=256, n=-2.5, t=None, scale=1, seed=None):
"""Creates a zeldovich DataFrame.
"""
import vaex.file
return vaex.file.other.Zeldovich(dim=dim, N=N, n=n, t=t, scale=scale)
|
Concatenate a list of DataFrames.
|
def concat(dfs):
'''Concatenate a list of DataFrames.
:rtype: DataFrame
'''
ds = reduce((lambda x, y: x.concat(y)), dfs)
return ds
|
Creates a virtual column which is the equivalent of numpy. arange but uses 0 memory
|
def vrange(start, stop, step=1, dtype='f8'):
"""Creates a virtual column which is the equivalent of numpy.arange, but uses 0 memory"""
from .column import ColumnVirtualRange
return ColumnVirtualRange(start, stop, step, dtype)
|
if 1: # app = guisupport. get_app_qt4 () print_process_id ()
|
def main(argv=sys.argv[1:]):
global main_thread
global vaex
global app
global kernel
global ipython_console
global current
vaex.set_log_level_warning()
if app is None:
app = QtGui.QApplication(argv)
if not (frozen and darwin): # osx app has its own icon file
import vaex.ui.icons
icon = QtGui.QIcon(vaex.ui.icons.iconfile('vaex128'))
app.setWindowIcon(icon)
# import vaex.ipkernel_qtapp
# ipython_window = vaex.ipkernel_qtapp.SimpleWindow(app)
main_thread = QtCore.QThread.currentThread()
# print select_many(None, "lala", ["aap", "noot"] + ["item-%d-%s" % (k, "-" * k) for k in range(30)])
# sys.exit(0)
# sys._excepthook = sys.excepthook
def qt_exception_hook(exctype, value, traceback):
print("qt hook in thread: %r" % threading.currentThread())
sys.__excepthook__(exctype, value, traceback)
qt_exception(None, exctype, value, traceback)
# sys._excepthook(exctype, value, traceback)
# sys.exit(1)
sys.excepthook = qt_exception_hook
vaex.promise.Promise.unhandled = staticmethod(qt_exception_hook)
# raise RuntimeError, "blaat"
vaex_app = VaexApp(argv, open_default=True)
def plot(*args, **kwargs):
vaex_app.plot(*args, **kwargs)
def select(*args, **kwargs):
vaex_app.select(*args, **kwargs)
"""if 1:
# app = guisupport.get_app_qt4()
print_process_id()
# Create an in-process kernel
# >>> print_process_id( )
# will print the same process ID as the main process
kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel()
kernel = kernel_manager.kernel
kernel.gui = 'qt4'
kernel.shell.push({'foo': 43, 'print_process_id': print_process_id, "vaex_app":vaex_app, "plot": plot, "current": current, "select": select})
kernel_client = kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
app.exit()
ipython_console = RichJupyterWidget()
ipython_console.kernel_manager = kernel_manager
ipython_console.kernel_client = kernel_client
ipython_console.exit_requested.connect(stop)
#ipython_console.show()
sys.exit(guisupport.start_event_loop_qt4(app))
"""
# w = QtGui.QWidget()
# w.resize(250, 150)
# w.move(300, 300)
# w.setWindowTitle('Simple')
# w.show()
# ipython_window.show()
# ipython_window.ipkernel.start()
sys.exit(app.exec_())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.