partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
valid
|
BasePeonyClient.run_tasks
|
Run the tasks attached to the instance
|
peony/client.py
|
async def run_tasks(self):
""" Run the tasks attached to the instance """
tasks = self.get_tasks()
self._gathered_tasks = asyncio.gather(*tasks, loop=self.loop)
try:
await self._gathered_tasks
except CancelledError:
pass
|
async def run_tasks(self):
""" Run the tasks attached to the instance """
tasks = self.get_tasks()
self._gathered_tasks = asyncio.gather(*tasks, loop=self.loop)
try:
await self._gathered_tasks
except CancelledError:
pass
|
[
"Run",
"the",
"tasks",
"attached",
"to",
"the",
"instance"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/client.py#L402-L409
|
[
"async",
"def",
"run_tasks",
"(",
"self",
")",
":",
"tasks",
"=",
"self",
".",
"get_tasks",
"(",
")",
"self",
".",
"_gathered_tasks",
"=",
"asyncio",
".",
"gather",
"(",
"*",
"tasks",
",",
"loop",
"=",
"self",
".",
"loop",
")",
"try",
":",
"await",
"self",
".",
"_gathered_tasks",
"except",
"CancelledError",
":",
"pass"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
BasePeonyClient.close
|
properly close the client
|
peony/client.py
|
async def close(self):
""" properly close the client """
tasks = self._get_close_tasks()
if tasks:
await asyncio.wait(tasks)
self._session = None
|
async def close(self):
""" properly close the client """
tasks = self._get_close_tasks()
if tasks:
await asyncio.wait(tasks)
self._session = None
|
[
"properly",
"close",
"the",
"client"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/client.py#L458-L465
|
[
"async",
"def",
"close",
"(",
"self",
")",
":",
"tasks",
"=",
"self",
".",
"_get_close_tasks",
"(",
")",
"if",
"tasks",
":",
"await",
"asyncio",
".",
"wait",
"(",
"tasks",
")",
"self",
".",
"_session",
"=",
"None"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
PeonyClient._get_twitter_configuration
|
create a ``twitter_configuration`` attribute with the response
of the endpoint
https://api.twitter.com/1.1/help/configuration.json
|
peony/client.py
|
async def _get_twitter_configuration(self):
"""
create a ``twitter_configuration`` attribute with the response
of the endpoint
https://api.twitter.com/1.1/help/configuration.json
"""
api = self['api', general.twitter_api_version,
".json", general.twitter_base_api_url]
return await api.help.configuration.get()
|
async def _get_twitter_configuration(self):
"""
create a ``twitter_configuration`` attribute with the response
of the endpoint
https://api.twitter.com/1.1/help/configuration.json
"""
api = self['api', general.twitter_api_version,
".json", general.twitter_base_api_url]
return await api.help.configuration.get()
|
[
"create",
"a",
"twitter_configuration",
"attribute",
"with",
"the",
"response",
"of",
"the",
"endpoint",
"https",
":",
"//",
"api",
".",
"twitter",
".",
"com",
"/",
"1",
".",
"1",
"/",
"help",
"/",
"configuration",
".",
"json"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/client.py#L487-L496
|
[
"async",
"def",
"_get_twitter_configuration",
"(",
"self",
")",
":",
"api",
"=",
"self",
"[",
"'api'",
",",
"general",
".",
"twitter_api_version",
",",
"\".json\"",
",",
"general",
".",
"twitter_base_api_url",
"]",
"return",
"await",
"api",
".",
"help",
".",
"configuration",
".",
"get",
"(",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
PeonyClient._get_user
|
create a ``user`` attribute with the response of the endpoint
https://api.twitter.com/1.1/account/verify_credentials.json
|
peony/client.py
|
async def _get_user(self):
"""
create a ``user`` attribute with the response of the endpoint
https://api.twitter.com/1.1/account/verify_credentials.json
"""
api = self['api', general.twitter_api_version,
".json", general.twitter_base_api_url]
return await api.account.verify_credentials.get()
|
async def _get_user(self):
"""
create a ``user`` attribute with the response of the endpoint
https://api.twitter.com/1.1/account/verify_credentials.json
"""
api = self['api', general.twitter_api_version,
".json", general.twitter_base_api_url]
return await api.account.verify_credentials.get()
|
[
"create",
"a",
"user",
"attribute",
"with",
"the",
"response",
"of",
"the",
"endpoint",
"https",
":",
"//",
"api",
".",
"twitter",
".",
"com",
"/",
"1",
".",
"1",
"/",
"account",
"/",
"verify_credentials",
".",
"json"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/client.py#L506-L514
|
[
"async",
"def",
"_get_user",
"(",
"self",
")",
":",
"api",
"=",
"self",
"[",
"'api'",
",",
"general",
".",
"twitter_api_version",
",",
"\".json\"",
",",
"general",
".",
"twitter_base_api_url",
"]",
"return",
"await",
"api",
".",
"account",
".",
"verify_credentials",
".",
"get",
"(",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
PeonyClient._chunked_upload
|
upload media in chunks
Parameters
----------
media : file object
a file object of the media
media_size : int
size of the media
path : str, optional
filename of the media
media_type : str, optional
mime type of the media
media_category : str, optional
twitter media category, must be used with ``media_type``
chunk_size : int, optional
size of a chunk in bytes
params : dict, optional
additional parameters of the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
|
peony/client.py
|
async def _chunked_upload(self, media, media_size,
path=None,
media_type=None,
media_category=None,
chunk_size=2**20,
**params):
"""
upload media in chunks
Parameters
----------
media : file object
a file object of the media
media_size : int
size of the media
path : str, optional
filename of the media
media_type : str, optional
mime type of the media
media_category : str, optional
twitter media category, must be used with ``media_type``
chunk_size : int, optional
size of a chunk in bytes
params : dict, optional
additional parameters of the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(media, bytes):
media = io.BytesIO(media)
chunk = media.read(chunk_size)
is_coro = asyncio.iscoroutine(chunk)
if is_coro:
chunk = await chunk
if media_type is None:
media_metadata = await utils.get_media_metadata(chunk, path)
media_type, media_category = media_metadata
elif media_category is None:
media_category = utils.get_category(media_type)
response = await self.upload.media.upload.post(
command="INIT",
total_bytes=media_size,
media_type=media_type,
media_category=media_category,
**params
)
media_id = response['media_id']
i = 0
while chunk:
if is_coro:
req = self.upload.media.upload.post(command="APPEND",
media_id=media_id,
media=chunk,
segment_index=i)
chunk, _ = await asyncio.gather(media.read(chunk_size), req)
else:
await self.upload.media.upload.post(command="APPEND",
media_id=media_id,
media=chunk,
segment_index=i)
chunk = media.read(chunk_size)
i += 1
status = await self.upload.media.upload.post(command="FINALIZE",
media_id=media_id)
if 'processing_info' in status:
while status['processing_info'].get('state') != "succeeded":
processing_info = status['processing_info']
if processing_info.get('state') == "failed":
error = processing_info.get('error', {})
message = error.get('message', str(status))
raise exceptions.MediaProcessingError(data=status,
message=message,
**params)
delay = processing_info['check_after_secs']
await asyncio.sleep(delay)
status = await self.upload.media.upload.get(
command="STATUS",
media_id=media_id,
**params
)
return response
|
async def _chunked_upload(self, media, media_size,
path=None,
media_type=None,
media_category=None,
chunk_size=2**20,
**params):
"""
upload media in chunks
Parameters
----------
media : file object
a file object of the media
media_size : int
size of the media
path : str, optional
filename of the media
media_type : str, optional
mime type of the media
media_category : str, optional
twitter media category, must be used with ``media_type``
chunk_size : int, optional
size of a chunk in bytes
params : dict, optional
additional parameters of the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(media, bytes):
media = io.BytesIO(media)
chunk = media.read(chunk_size)
is_coro = asyncio.iscoroutine(chunk)
if is_coro:
chunk = await chunk
if media_type is None:
media_metadata = await utils.get_media_metadata(chunk, path)
media_type, media_category = media_metadata
elif media_category is None:
media_category = utils.get_category(media_type)
response = await self.upload.media.upload.post(
command="INIT",
total_bytes=media_size,
media_type=media_type,
media_category=media_category,
**params
)
media_id = response['media_id']
i = 0
while chunk:
if is_coro:
req = self.upload.media.upload.post(command="APPEND",
media_id=media_id,
media=chunk,
segment_index=i)
chunk, _ = await asyncio.gather(media.read(chunk_size), req)
else:
await self.upload.media.upload.post(command="APPEND",
media_id=media_id,
media=chunk,
segment_index=i)
chunk = media.read(chunk_size)
i += 1
status = await self.upload.media.upload.post(command="FINALIZE",
media_id=media_id)
if 'processing_info' in status:
while status['processing_info'].get('state') != "succeeded":
processing_info = status['processing_info']
if processing_info.get('state') == "failed":
error = processing_info.get('error', {})
message = error.get('message', str(status))
raise exceptions.MediaProcessingError(data=status,
message=message,
**params)
delay = processing_info['check_after_secs']
await asyncio.sleep(delay)
status = await self.upload.media.upload.get(
command="STATUS",
media_id=media_id,
**params
)
return response
|
[
"upload",
"media",
"in",
"chunks"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/client.py#L542-L640
|
[
"async",
"def",
"_chunked_upload",
"(",
"self",
",",
"media",
",",
"media_size",
",",
"path",
"=",
"None",
",",
"media_type",
"=",
"None",
",",
"media_category",
"=",
"None",
",",
"chunk_size",
"=",
"2",
"**",
"20",
",",
"*",
"*",
"params",
")",
":",
"if",
"isinstance",
"(",
"media",
",",
"bytes",
")",
":",
"media",
"=",
"io",
".",
"BytesIO",
"(",
"media",
")",
"chunk",
"=",
"media",
".",
"read",
"(",
"chunk_size",
")",
"is_coro",
"=",
"asyncio",
".",
"iscoroutine",
"(",
"chunk",
")",
"if",
"is_coro",
":",
"chunk",
"=",
"await",
"chunk",
"if",
"media_type",
"is",
"None",
":",
"media_metadata",
"=",
"await",
"utils",
".",
"get_media_metadata",
"(",
"chunk",
",",
"path",
")",
"media_type",
",",
"media_category",
"=",
"media_metadata",
"elif",
"media_category",
"is",
"None",
":",
"media_category",
"=",
"utils",
".",
"get_category",
"(",
"media_type",
")",
"response",
"=",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"command",
"=",
"\"INIT\"",
",",
"total_bytes",
"=",
"media_size",
",",
"media_type",
"=",
"media_type",
",",
"media_category",
"=",
"media_category",
",",
"*",
"*",
"params",
")",
"media_id",
"=",
"response",
"[",
"'media_id'",
"]",
"i",
"=",
"0",
"while",
"chunk",
":",
"if",
"is_coro",
":",
"req",
"=",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"command",
"=",
"\"APPEND\"",
",",
"media_id",
"=",
"media_id",
",",
"media",
"=",
"chunk",
",",
"segment_index",
"=",
"i",
")",
"chunk",
",",
"_",
"=",
"await",
"asyncio",
".",
"gather",
"(",
"media",
".",
"read",
"(",
"chunk_size",
")",
",",
"req",
")",
"else",
":",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"command",
"=",
"\"APPEND\"",
",",
"media_id",
"=",
"media_id",
",",
"media",
"=",
"chunk",
",",
"segment_index",
"=",
"i",
")",
"chunk",
"=",
"media",
".",
"read",
"(",
"chunk_size",
")",
"i",
"+=",
"1",
"status",
"=",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"command",
"=",
"\"FINALIZE\"",
",",
"media_id",
"=",
"media_id",
")",
"if",
"'processing_info'",
"in",
"status",
":",
"while",
"status",
"[",
"'processing_info'",
"]",
".",
"get",
"(",
"'state'",
")",
"!=",
"\"succeeded\"",
":",
"processing_info",
"=",
"status",
"[",
"'processing_info'",
"]",
"if",
"processing_info",
".",
"get",
"(",
"'state'",
")",
"==",
"\"failed\"",
":",
"error",
"=",
"processing_info",
".",
"get",
"(",
"'error'",
",",
"{",
"}",
")",
"message",
"=",
"error",
".",
"get",
"(",
"'message'",
",",
"str",
"(",
"status",
")",
")",
"raise",
"exceptions",
".",
"MediaProcessingError",
"(",
"data",
"=",
"status",
",",
"message",
"=",
"message",
",",
"*",
"*",
"params",
")",
"delay",
"=",
"processing_info",
"[",
"'check_after_secs'",
"]",
"await",
"asyncio",
".",
"sleep",
"(",
"delay",
")",
"status",
"=",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"get",
"(",
"command",
"=",
"\"STATUS\"",
",",
"media_id",
"=",
"media_id",
",",
"*",
"*",
"params",
")",
"return",
"response"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
PeonyClient.upload_media
|
upload a media on twitter
Parameters
----------
file_ : str or pathlib.Path or file
Path to the file or file object
media_type : str, optional
mime type of the media
media_category : str, optional
Twitter's media category of the media, must be used with
``media_type``
chunked : bool, optional
If True, force the use of the chunked upload for the media
size_limit : int, optional
If set, the media will be sent using a multipart upload if
its size is over ``size_limit`` bytes
params : dict
parameters used when making the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
|
peony/client.py
|
async def upload_media(self, file_,
media_type=None,
media_category=None,
chunked=None,
size_limit=None,
**params):
"""
upload a media on twitter
Parameters
----------
file_ : str or pathlib.Path or file
Path to the file or file object
media_type : str, optional
mime type of the media
media_category : str, optional
Twitter's media category of the media, must be used with
``media_type``
chunked : bool, optional
If True, force the use of the chunked upload for the media
size_limit : int, optional
If set, the media will be sent using a multipart upload if
its size is over ``size_limit`` bytes
params : dict
parameters used when making the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(file_, str):
url = urlparse(file_)
if url.scheme.startswith('http'):
media = await self._session.get(file_)
else:
path = urlparse(file_).path.strip(" \"'")
media = await utils.execute(open(path, 'rb'))
elif hasattr(file_, 'read') or isinstance(file_, bytes):
media = file_
else:
raise TypeError("upload_media input must be a file object or a "
"filename or binary data or an aiohttp request")
media_size = await utils.get_size(media)
if chunked is not None:
size_test = False
else:
size_test = await self._size_test(media_size, size_limit)
if isinstance(media, aiohttp.ClientResponse):
# send the content of the response
media = media.content
if chunked or (size_test and chunked is None):
args = media, media_size, file_, media_type, media_category
response = await self._chunked_upload(*args, **params)
else:
response = await self.upload.media.upload.post(media=media,
**params)
if not hasattr(file_, 'read') and not getattr(media, 'closed', True):
media.close()
return response
|
async def upload_media(self, file_,
media_type=None,
media_category=None,
chunked=None,
size_limit=None,
**params):
"""
upload a media on twitter
Parameters
----------
file_ : str or pathlib.Path or file
Path to the file or file object
media_type : str, optional
mime type of the media
media_category : str, optional
Twitter's media category of the media, must be used with
``media_type``
chunked : bool, optional
If True, force the use of the chunked upload for the media
size_limit : int, optional
If set, the media will be sent using a multipart upload if
its size is over ``size_limit`` bytes
params : dict
parameters used when making the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(file_, str):
url = urlparse(file_)
if url.scheme.startswith('http'):
media = await self._session.get(file_)
else:
path = urlparse(file_).path.strip(" \"'")
media = await utils.execute(open(path, 'rb'))
elif hasattr(file_, 'read') or isinstance(file_, bytes):
media = file_
else:
raise TypeError("upload_media input must be a file object or a "
"filename or binary data or an aiohttp request")
media_size = await utils.get_size(media)
if chunked is not None:
size_test = False
else:
size_test = await self._size_test(media_size, size_limit)
if isinstance(media, aiohttp.ClientResponse):
# send the content of the response
media = media.content
if chunked or (size_test and chunked is None):
args = media, media_size, file_, media_type, media_category
response = await self._chunked_upload(*args, **params)
else:
response = await self.upload.media.upload.post(media=media,
**params)
if not hasattr(file_, 'read') and not getattr(media, 'closed', True):
media.close()
return response
|
[
"upload",
"a",
"media",
"on",
"twitter"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/client.py#L652-L716
|
[
"async",
"def",
"upload_media",
"(",
"self",
",",
"file_",
",",
"media_type",
"=",
"None",
",",
"media_category",
"=",
"None",
",",
"chunked",
"=",
"None",
",",
"size_limit",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"if",
"isinstance",
"(",
"file_",
",",
"str",
")",
":",
"url",
"=",
"urlparse",
"(",
"file_",
")",
"if",
"url",
".",
"scheme",
".",
"startswith",
"(",
"'http'",
")",
":",
"media",
"=",
"await",
"self",
".",
"_session",
".",
"get",
"(",
"file_",
")",
"else",
":",
"path",
"=",
"urlparse",
"(",
"file_",
")",
".",
"path",
".",
"strip",
"(",
"\" \\\"'\"",
")",
"media",
"=",
"await",
"utils",
".",
"execute",
"(",
"open",
"(",
"path",
",",
"'rb'",
")",
")",
"elif",
"hasattr",
"(",
"file_",
",",
"'read'",
")",
"or",
"isinstance",
"(",
"file_",
",",
"bytes",
")",
":",
"media",
"=",
"file_",
"else",
":",
"raise",
"TypeError",
"(",
"\"upload_media input must be a file object or a \"",
"\"filename or binary data or an aiohttp request\"",
")",
"media_size",
"=",
"await",
"utils",
".",
"get_size",
"(",
"media",
")",
"if",
"chunked",
"is",
"not",
"None",
":",
"size_test",
"=",
"False",
"else",
":",
"size_test",
"=",
"await",
"self",
".",
"_size_test",
"(",
"media_size",
",",
"size_limit",
")",
"if",
"isinstance",
"(",
"media",
",",
"aiohttp",
".",
"ClientResponse",
")",
":",
"# send the content of the response",
"media",
"=",
"media",
".",
"content",
"if",
"chunked",
"or",
"(",
"size_test",
"and",
"chunked",
"is",
"None",
")",
":",
"args",
"=",
"media",
",",
"media_size",
",",
"file_",
",",
"media_type",
",",
"media_category",
"response",
"=",
"await",
"self",
".",
"_chunked_upload",
"(",
"*",
"args",
",",
"*",
"*",
"params",
")",
"else",
":",
"response",
"=",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"media",
"=",
"media",
",",
"*",
"*",
"params",
")",
"if",
"not",
"hasattr",
"(",
"file_",
",",
"'read'",
")",
"and",
"not",
"getattr",
"(",
"media",
",",
"'closed'",
",",
"True",
")",
":",
"media",
".",
"close",
"(",
")",
"return",
"response"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
split_stdout_lines
|
Given the standard output from NetMHC/NetMHCpan/NetMHCcons tools,
drop all {comments, lines of hyphens, empty lines} and split the
remaining lines by whitespace.
|
mhctools/parsing.py
|
def split_stdout_lines(stdout):
"""
Given the standard output from NetMHC/NetMHCpan/NetMHCcons tools,
drop all {comments, lines of hyphens, empty lines} and split the
remaining lines by whitespace.
"""
# all the NetMHC formats use lines full of dashes before any actual
# binding results
seen_dash = False
for l in stdout.split("\n"):
l = l.strip()
# wait for a line like '----------' before trying to parse entries
# have to include multiple dashes here since NetMHC 4.0 sometimes
# gives negative positions in its "peptide" input mode
if l.startswith("---"):
seen_dash = True
continue
if not seen_dash:
continue
# ignore empty lines and comments
if not l or l.startswith("#"):
continue
# beginning of headers in NetMHC
if any(l.startswith(word) for word in NETMHC_TOKENS):
continue
yield l.split()
|
def split_stdout_lines(stdout):
"""
Given the standard output from NetMHC/NetMHCpan/NetMHCcons tools,
drop all {comments, lines of hyphens, empty lines} and split the
remaining lines by whitespace.
"""
# all the NetMHC formats use lines full of dashes before any actual
# binding results
seen_dash = False
for l in stdout.split("\n"):
l = l.strip()
# wait for a line like '----------' before trying to parse entries
# have to include multiple dashes here since NetMHC 4.0 sometimes
# gives negative positions in its "peptide" input mode
if l.startswith("---"):
seen_dash = True
continue
if not seen_dash:
continue
# ignore empty lines and comments
if not l or l.startswith("#"):
continue
# beginning of headers in NetMHC
if any(l.startswith(word) for word in NETMHC_TOKENS):
continue
yield l.split()
|
[
"Given",
"the",
"standard",
"output",
"from",
"NetMHC",
"/",
"NetMHCpan",
"/",
"NetMHCcons",
"tools",
"drop",
"all",
"{",
"comments",
"lines",
"of",
"hyphens",
"empty",
"lines",
"}",
"and",
"split",
"the",
"remaining",
"lines",
"by",
"whitespace",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L41-L66
|
[
"def",
"split_stdout_lines",
"(",
"stdout",
")",
":",
"# all the NetMHC formats use lines full of dashes before any actual",
"# binding results",
"seen_dash",
"=",
"False",
"for",
"l",
"in",
"stdout",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"l",
"=",
"l",
".",
"strip",
"(",
")",
"# wait for a line like '----------' before trying to parse entries",
"# have to include multiple dashes here since NetMHC 4.0 sometimes",
"# gives negative positions in its \"peptide\" input mode",
"if",
"l",
".",
"startswith",
"(",
"\"---\"",
")",
":",
"seen_dash",
"=",
"True",
"continue",
"if",
"not",
"seen_dash",
":",
"continue",
"# ignore empty lines and comments",
"if",
"not",
"l",
"or",
"l",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"continue",
"# beginning of headers in NetMHC",
"if",
"any",
"(",
"l",
".",
"startswith",
"(",
"word",
")",
"for",
"word",
"in",
"NETMHC_TOKENS",
")",
":",
"continue",
"yield",
"l",
".",
"split",
"(",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
clean_fields
|
Sometimes, NetMHC* has fields that are only populated sometimes, which results
in different count/indexing of the fields when that happens.
We handle this by looking for particular strings at particular indices, and
deleting them.
Warning: this may result in unexpected behavior sometimes. For example, we
ignore "SB" and "WB" for NetMHC 3.x output; which also means that any line
with a key called SB or WB will be ignored.
Also, sometimes NetMHC* will have fields that we want to modify in some
consistent way, e.g. NetMHCpan3 has 1-based offsets and all other predictors
have 0-based offsets (and we rely on 0-based offsets). We handle this using
a map from field index to transform function.
|
mhctools/parsing.py
|
def clean_fields(fields, ignored_value_indices, transforms):
"""
Sometimes, NetMHC* has fields that are only populated sometimes, which results
in different count/indexing of the fields when that happens.
We handle this by looking for particular strings at particular indices, and
deleting them.
Warning: this may result in unexpected behavior sometimes. For example, we
ignore "SB" and "WB" for NetMHC 3.x output; which also means that any line
with a key called SB or WB will be ignored.
Also, sometimes NetMHC* will have fields that we want to modify in some
consistent way, e.g. NetMHCpan3 has 1-based offsets and all other predictors
have 0-based offsets (and we rely on 0-based offsets). We handle this using
a map from field index to transform function.
"""
cleaned_fields = []
for i, field in enumerate(fields):
if field in ignored_value_indices:
ignored_index = ignored_value_indices[field]
# Is the value we want to ignore at the index where we'd ignore it?
if ignored_index == i:
continue
# transform this field if the index is in transforms, otherwise leave alone
cleaned_field = transforms[i](field) if i in transforms else field
cleaned_fields.append(cleaned_field)
return cleaned_fields
|
def clean_fields(fields, ignored_value_indices, transforms):
"""
Sometimes, NetMHC* has fields that are only populated sometimes, which results
in different count/indexing of the fields when that happens.
We handle this by looking for particular strings at particular indices, and
deleting them.
Warning: this may result in unexpected behavior sometimes. For example, we
ignore "SB" and "WB" for NetMHC 3.x output; which also means that any line
with a key called SB or WB will be ignored.
Also, sometimes NetMHC* will have fields that we want to modify in some
consistent way, e.g. NetMHCpan3 has 1-based offsets and all other predictors
have 0-based offsets (and we rely on 0-based offsets). We handle this using
a map from field index to transform function.
"""
cleaned_fields = []
for i, field in enumerate(fields):
if field in ignored_value_indices:
ignored_index = ignored_value_indices[field]
# Is the value we want to ignore at the index where we'd ignore it?
if ignored_index == i:
continue
# transform this field if the index is in transforms, otherwise leave alone
cleaned_field = transforms[i](field) if i in transforms else field
cleaned_fields.append(cleaned_field)
return cleaned_fields
|
[
"Sometimes",
"NetMHC",
"*",
"has",
"fields",
"that",
"are",
"only",
"populated",
"sometimes",
"which",
"results",
"in",
"different",
"count",
"/",
"indexing",
"of",
"the",
"fields",
"when",
"that",
"happens",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L69-L98
|
[
"def",
"clean_fields",
"(",
"fields",
",",
"ignored_value_indices",
",",
"transforms",
")",
":",
"cleaned_fields",
"=",
"[",
"]",
"for",
"i",
",",
"field",
"in",
"enumerate",
"(",
"fields",
")",
":",
"if",
"field",
"in",
"ignored_value_indices",
":",
"ignored_index",
"=",
"ignored_value_indices",
"[",
"field",
"]",
"# Is the value we want to ignore at the index where we'd ignore it?",
"if",
"ignored_index",
"==",
"i",
":",
"continue",
"# transform this field if the index is in transforms, otherwise leave alone",
"cleaned_field",
"=",
"transforms",
"[",
"i",
"]",
"(",
"field",
")",
"if",
"i",
"in",
"transforms",
"else",
"field",
"cleaned_fields",
".",
"append",
"(",
"cleaned_field",
")",
"return",
"cleaned_fields"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
parse_stdout
|
Generic function for parsing any NetMHC* output, given expected indices
of values of interest.
Parameters
----------
ignored_value_indices : dict
Map from values to the positions we'll ignore them at. See clean_fields.
transforms : dict
Map from field index to a transform function to be applied to values in
that field. See clean_fields.
Returns BindingPredictionCollection
|
mhctools/parsing.py
|
def parse_stdout(
stdout,
prediction_method_name,
sequence_key_mapping,
key_index,
offset_index,
peptide_index,
allele_index,
ic50_index,
rank_index,
log_ic50_index,
ignored_value_indices={},
transforms={}):
"""
Generic function for parsing any NetMHC* output, given expected indices
of values of interest.
Parameters
----------
ignored_value_indices : dict
Map from values to the positions we'll ignore them at. See clean_fields.
transforms : dict
Map from field index to a transform function to be applied to values in
that field. See clean_fields.
Returns BindingPredictionCollection
"""
binding_predictions = []
for fields in split_stdout_lines(stdout):
fields = clean_fields(fields, ignored_value_indices, transforms)
offset = int(fields[offset_index])
peptide = str(fields[peptide_index])
allele = str(fields[allele_index])
ic50 = float(fields[ic50_index])
rank = float(fields[rank_index]) if rank_index else 0.0
log_ic50 = float(fields[log_ic50_index])
key = str(fields[key_index])
if sequence_key_mapping:
original_key = sequence_key_mapping[key]
else:
# if sequence_key_mapping isn't provided then let's assume it's the
# identity function
original_key = key
binding_predictions.append(BindingPrediction(
source_sequence_name=original_key,
offset=offset,
peptide=peptide,
allele=normalize_allele_name(allele),
affinity=ic50,
percentile_rank=rank,
log_affinity=log_ic50,
prediction_method_name=prediction_method_name))
return binding_predictions
|
def parse_stdout(
stdout,
prediction_method_name,
sequence_key_mapping,
key_index,
offset_index,
peptide_index,
allele_index,
ic50_index,
rank_index,
log_ic50_index,
ignored_value_indices={},
transforms={}):
"""
Generic function for parsing any NetMHC* output, given expected indices
of values of interest.
Parameters
----------
ignored_value_indices : dict
Map from values to the positions we'll ignore them at. See clean_fields.
transforms : dict
Map from field index to a transform function to be applied to values in
that field. See clean_fields.
Returns BindingPredictionCollection
"""
binding_predictions = []
for fields in split_stdout_lines(stdout):
fields = clean_fields(fields, ignored_value_indices, transforms)
offset = int(fields[offset_index])
peptide = str(fields[peptide_index])
allele = str(fields[allele_index])
ic50 = float(fields[ic50_index])
rank = float(fields[rank_index]) if rank_index else 0.0
log_ic50 = float(fields[log_ic50_index])
key = str(fields[key_index])
if sequence_key_mapping:
original_key = sequence_key_mapping[key]
else:
# if sequence_key_mapping isn't provided then let's assume it's the
# identity function
original_key = key
binding_predictions.append(BindingPrediction(
source_sequence_name=original_key,
offset=offset,
peptide=peptide,
allele=normalize_allele_name(allele),
affinity=ic50,
percentile_rank=rank,
log_affinity=log_ic50,
prediction_method_name=prediction_method_name))
return binding_predictions
|
[
"Generic",
"function",
"for",
"parsing",
"any",
"NetMHC",
"*",
"output",
"given",
"expected",
"indices",
"of",
"values",
"of",
"interest",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L100-L157
|
[
"def",
"parse_stdout",
"(",
"stdout",
",",
"prediction_method_name",
",",
"sequence_key_mapping",
",",
"key_index",
",",
"offset_index",
",",
"peptide_index",
",",
"allele_index",
",",
"ic50_index",
",",
"rank_index",
",",
"log_ic50_index",
",",
"ignored_value_indices",
"=",
"{",
"}",
",",
"transforms",
"=",
"{",
"}",
")",
":",
"binding_predictions",
"=",
"[",
"]",
"for",
"fields",
"in",
"split_stdout_lines",
"(",
"stdout",
")",
":",
"fields",
"=",
"clean_fields",
"(",
"fields",
",",
"ignored_value_indices",
",",
"transforms",
")",
"offset",
"=",
"int",
"(",
"fields",
"[",
"offset_index",
"]",
")",
"peptide",
"=",
"str",
"(",
"fields",
"[",
"peptide_index",
"]",
")",
"allele",
"=",
"str",
"(",
"fields",
"[",
"allele_index",
"]",
")",
"ic50",
"=",
"float",
"(",
"fields",
"[",
"ic50_index",
"]",
")",
"rank",
"=",
"float",
"(",
"fields",
"[",
"rank_index",
"]",
")",
"if",
"rank_index",
"else",
"0.0",
"log_ic50",
"=",
"float",
"(",
"fields",
"[",
"log_ic50_index",
"]",
")",
"key",
"=",
"str",
"(",
"fields",
"[",
"key_index",
"]",
")",
"if",
"sequence_key_mapping",
":",
"original_key",
"=",
"sequence_key_mapping",
"[",
"key",
"]",
"else",
":",
"# if sequence_key_mapping isn't provided then let's assume it's the",
"# identity function",
"original_key",
"=",
"key",
"binding_predictions",
".",
"append",
"(",
"BindingPrediction",
"(",
"source_sequence_name",
"=",
"original_key",
",",
"offset",
"=",
"offset",
",",
"peptide",
"=",
"peptide",
",",
"allele",
"=",
"normalize_allele_name",
"(",
"allele",
")",
",",
"affinity",
"=",
"ic50",
",",
"percentile_rank",
"=",
"rank",
",",
"log_affinity",
"=",
"log_ic50",
",",
"prediction_method_name",
"=",
"prediction_method_name",
")",
")",
"return",
"binding_predictions"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
parse_netmhc3_stdout
|
Parse the output format for NetMHC 3.x, which looks like:
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 SIINKFELL 0.437 441 WB A1 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SIINKFFFQ 0.206 5411 A2 HLA-A02:01
1 IINKFFFQQ 0.128 12544 A2 HLA-A02:01
2 INKFFFQQQ 0.046 30406 A2 HLA-A02:01
3 NKFFFQQQQ 0.050 29197 A2 HLA-A02:01
--------------------------------------------------------------------------------------------------
|
mhctools/parsing.py
|
def parse_netmhc3_stdout(
stdout,
prediction_method_name="netmhc3",
sequence_key_mapping=None):
"""
Parse the output format for NetMHC 3.x, which looks like:
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 SIINKFELL 0.437 441 WB A1 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SIINKFFFQ 0.206 5411 A2 HLA-A02:01
1 IINKFFFQQ 0.128 12544 A2 HLA-A02:01
2 INKFFFQQQ 0.046 30406 A2 HLA-A02:01
3 NKFFFQQQQ 0.050 29197 A2 HLA-A02:01
--------------------------------------------------------------------------------------------------
"""
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=4,
offset_index=0,
peptide_index=1,
allele_index=5,
ic50_index=3,
rank_index=None,
log_ic50_index=2,
ignored_value_indices={"WB": 4, "SB": 4})
|
def parse_netmhc3_stdout(
stdout,
prediction_method_name="netmhc3",
sequence_key_mapping=None):
"""
Parse the output format for NetMHC 3.x, which looks like:
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 SIINKFELL 0.437 441 WB A1 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SIINKFFFQ 0.206 5411 A2 HLA-A02:01
1 IINKFFFQQ 0.128 12544 A2 HLA-A02:01
2 INKFFFQQQ 0.046 30406 A2 HLA-A02:01
3 NKFFFQQQQ 0.050 29197 A2 HLA-A02:01
--------------------------------------------------------------------------------------------------
"""
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=4,
offset_index=0,
peptide_index=1,
allele_index=5,
ic50_index=3,
rank_index=None,
log_ic50_index=2,
ignored_value_indices={"WB": 4, "SB": 4})
|
[
"Parse",
"the",
"output",
"format",
"for",
"NetMHC",
"3",
".",
"x",
"which",
"looks",
"like",
":"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L159-L188
|
[
"def",
"parse_netmhc3_stdout",
"(",
"stdout",
",",
"prediction_method_name",
"=",
"\"netmhc3\"",
",",
"sequence_key_mapping",
"=",
"None",
")",
":",
"return",
"parse_stdout",
"(",
"stdout",
"=",
"stdout",
",",
"prediction_method_name",
"=",
"prediction_method_name",
",",
"sequence_key_mapping",
"=",
"sequence_key_mapping",
",",
"key_index",
"=",
"4",
",",
"offset_index",
"=",
"0",
",",
"peptide_index",
"=",
"1",
",",
"allele_index",
"=",
"5",
",",
"ic50_index",
"=",
"3",
",",
"rank_index",
"=",
"None",
",",
"log_ic50_index",
"=",
"2",
",",
"ignored_value_indices",
"=",
"{",
"\"WB\"",
":",
"4",
",",
"\"SB\"",
":",
"4",
"}",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
parse_netmhc4_stdout
|
# Peptide length 9
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
pos HLA peptide Core Offset I_pos I_len D_pos D_len iCore Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
0 HLA-A0201 TMDKSELVQ TMDKSELVQ 0 0 0 0 0 TMDKSELVQ 143B_BOVIN_P293 0.051 28676.59 43.00
1 HLA-A0201 MDKSELVQK MDKSELVQK 0 0 0 0 0 MDKSELVQK 143B_BOVIN_P293 0.030 36155.15 70.00
2 HLA-A0201 DKSELVQKA DKSELVQKA 0 0 0 0 0 DKSELVQKA 143B_BOVIN_P293 0.030 36188.42 70.00
3 HLA-A0201 KSELVQKAK KSELVQKAK 0 0 0 0 0 KSELVQKAK 143B_BOVIN_P293 0.032 35203.22 65.00
4 HLA-A0201 SELVQKAKL SELVQKAKL 0 0 0 0 0 SELVQKAKL 143B_BOVIN_P293 0.031 35670.99 65.00
5 HLA-A0201 ELVQKAKLA ELVQKAKLA 0 0 0 0 0 ELVQKAKLA 143B_BOVIN_P293 0.080 21113.07 29.00
6 HLA-A0201 LVQKAKLAE LVQKAKLAE 0 0 0 0 0 LVQKAKLAE 143B_BOVIN_P293 0.027 37257.56 75.00
7 HLA-A0201 VQKAKLAEQ VQKAKLAEQ 0 0 0 0 0 VQKAKLAEQ 143B_BOVIN_P293 0.040 32404.62 55.00
219 HLA-A0201 QLLRDNLTL QLLRDNLTL 0 0 0 0 0 QLLRDNLTL 143B_BOVIN_P293 0.527 167.10 1.50 <= WB
-----------------------------------------------------------------------------------
|
mhctools/parsing.py
|
def parse_netmhc4_stdout(
stdout,
prediction_method_name="netmhc4",
sequence_key_mapping=None):
"""
# Peptide length 9
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
pos HLA peptide Core Offset I_pos I_len D_pos D_len iCore Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
0 HLA-A0201 TMDKSELVQ TMDKSELVQ 0 0 0 0 0 TMDKSELVQ 143B_BOVIN_P293 0.051 28676.59 43.00
1 HLA-A0201 MDKSELVQK MDKSELVQK 0 0 0 0 0 MDKSELVQK 143B_BOVIN_P293 0.030 36155.15 70.00
2 HLA-A0201 DKSELVQKA DKSELVQKA 0 0 0 0 0 DKSELVQKA 143B_BOVIN_P293 0.030 36188.42 70.00
3 HLA-A0201 KSELVQKAK KSELVQKAK 0 0 0 0 0 KSELVQKAK 143B_BOVIN_P293 0.032 35203.22 65.00
4 HLA-A0201 SELVQKAKL SELVQKAKL 0 0 0 0 0 SELVQKAKL 143B_BOVIN_P293 0.031 35670.99 65.00
5 HLA-A0201 ELVQKAKLA ELVQKAKLA 0 0 0 0 0 ELVQKAKLA 143B_BOVIN_P293 0.080 21113.07 29.00
6 HLA-A0201 LVQKAKLAE LVQKAKLAE 0 0 0 0 0 LVQKAKLAE 143B_BOVIN_P293 0.027 37257.56 75.00
7 HLA-A0201 VQKAKLAEQ VQKAKLAEQ 0 0 0 0 0 VQKAKLAEQ 143B_BOVIN_P293 0.040 32404.62 55.00
219 HLA-A0201 QLLRDNLTL QLLRDNLTL 0 0 0 0 0 QLLRDNLTL 143B_BOVIN_P293 0.527 167.10 1.50 <= WB
-----------------------------------------------------------------------------------
"""
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=10,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=12,
rank_index=13,
log_ic50_index=11)
|
def parse_netmhc4_stdout(
stdout,
prediction_method_name="netmhc4",
sequence_key_mapping=None):
"""
# Peptide length 9
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
pos HLA peptide Core Offset I_pos I_len D_pos D_len iCore Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
0 HLA-A0201 TMDKSELVQ TMDKSELVQ 0 0 0 0 0 TMDKSELVQ 143B_BOVIN_P293 0.051 28676.59 43.00
1 HLA-A0201 MDKSELVQK MDKSELVQK 0 0 0 0 0 MDKSELVQK 143B_BOVIN_P293 0.030 36155.15 70.00
2 HLA-A0201 DKSELVQKA DKSELVQKA 0 0 0 0 0 DKSELVQKA 143B_BOVIN_P293 0.030 36188.42 70.00
3 HLA-A0201 KSELVQKAK KSELVQKAK 0 0 0 0 0 KSELVQKAK 143B_BOVIN_P293 0.032 35203.22 65.00
4 HLA-A0201 SELVQKAKL SELVQKAKL 0 0 0 0 0 SELVQKAKL 143B_BOVIN_P293 0.031 35670.99 65.00
5 HLA-A0201 ELVQKAKLA ELVQKAKLA 0 0 0 0 0 ELVQKAKLA 143B_BOVIN_P293 0.080 21113.07 29.00
6 HLA-A0201 LVQKAKLAE LVQKAKLAE 0 0 0 0 0 LVQKAKLAE 143B_BOVIN_P293 0.027 37257.56 75.00
7 HLA-A0201 VQKAKLAEQ VQKAKLAEQ 0 0 0 0 0 VQKAKLAEQ 143B_BOVIN_P293 0.040 32404.62 55.00
219 HLA-A0201 QLLRDNLTL QLLRDNLTL 0 0 0 0 0 QLLRDNLTL 143B_BOVIN_P293 0.527 167.10 1.50 <= WB
-----------------------------------------------------------------------------------
"""
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=10,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=12,
rank_index=13,
log_ic50_index=11)
|
[
"#",
"Peptide",
"length",
"9",
"#",
"Rank",
"Threshold",
"for",
"Strong",
"binding",
"peptides",
"0",
".",
"500",
"#",
"Rank",
"Threshold",
"for",
"Weak",
"binding",
"peptides",
"2",
".",
"000",
"-----------------------------------------------------------------------------------",
"pos",
"HLA",
"peptide",
"Core",
"Offset",
"I_pos",
"I_len",
"D_pos",
"D_len",
"iCore",
"Identity",
"1",
"-",
"log50k",
"(",
"aff",
")",
"Affinity",
"(",
"nM",
")",
"%Rank",
"BindLevel",
"-----------------------------------------------------------------------------------",
"0",
"HLA",
"-",
"A0201",
"TMDKSELVQ",
"TMDKSELVQ",
"0",
"0",
"0",
"0",
"0",
"TMDKSELVQ",
"143B_BOVIN_P293",
"0",
".",
"051",
"28676",
".",
"59",
"43",
".",
"00",
"1",
"HLA",
"-",
"A0201",
"MDKSELVQK",
"MDKSELVQK",
"0",
"0",
"0",
"0",
"0",
"MDKSELVQK",
"143B_BOVIN_P293",
"0",
".",
"030",
"36155",
".",
"15",
"70",
".",
"00",
"2",
"HLA",
"-",
"A0201",
"DKSELVQKA",
"DKSELVQKA",
"0",
"0",
"0",
"0",
"0",
"DKSELVQKA",
"143B_BOVIN_P293",
"0",
".",
"030",
"36188",
".",
"42",
"70",
".",
"00",
"3",
"HLA",
"-",
"A0201",
"KSELVQKAK",
"KSELVQKAK",
"0",
"0",
"0",
"0",
"0",
"KSELVQKAK",
"143B_BOVIN_P293",
"0",
".",
"032",
"35203",
".",
"22",
"65",
".",
"00",
"4",
"HLA",
"-",
"A0201",
"SELVQKAKL",
"SELVQKAKL",
"0",
"0",
"0",
"0",
"0",
"SELVQKAKL",
"143B_BOVIN_P293",
"0",
".",
"031",
"35670",
".",
"99",
"65",
".",
"00",
"5",
"HLA",
"-",
"A0201",
"ELVQKAKLA",
"ELVQKAKLA",
"0",
"0",
"0",
"0",
"0",
"ELVQKAKLA",
"143B_BOVIN_P293",
"0",
".",
"080",
"21113",
".",
"07",
"29",
".",
"00",
"6",
"HLA",
"-",
"A0201",
"LVQKAKLAE",
"LVQKAKLAE",
"0",
"0",
"0",
"0",
"0",
"LVQKAKLAE",
"143B_BOVIN_P293",
"0",
".",
"027",
"37257",
".",
"56",
"75",
".",
"00",
"7",
"HLA",
"-",
"A0201",
"VQKAKLAEQ",
"VQKAKLAEQ",
"0",
"0",
"0",
"0",
"0",
"VQKAKLAEQ",
"143B_BOVIN_P293",
"0",
".",
"040",
"32404",
".",
"62",
"55",
".",
"00",
"219",
"HLA",
"-",
"A0201",
"QLLRDNLTL",
"QLLRDNLTL",
"0",
"0",
"0",
"0",
"0",
"QLLRDNLTL",
"143B_BOVIN_P293",
"0",
".",
"527",
"167",
".",
"10",
"1",
".",
"50",
"<",
"=",
"WB",
"-----------------------------------------------------------------------------------"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L190-L222
|
[
"def",
"parse_netmhc4_stdout",
"(",
"stdout",
",",
"prediction_method_name",
"=",
"\"netmhc4\"",
",",
"sequence_key_mapping",
"=",
"None",
")",
":",
"return",
"parse_stdout",
"(",
"stdout",
"=",
"stdout",
",",
"prediction_method_name",
"=",
"prediction_method_name",
",",
"sequence_key_mapping",
"=",
"sequence_key_mapping",
",",
"key_index",
"=",
"10",
",",
"offset_index",
"=",
"0",
",",
"peptide_index",
"=",
"2",
",",
"allele_index",
"=",
"1",
",",
"ic50_index",
"=",
"12",
",",
"rank_index",
"=",
"13",
",",
"log_ic50_index",
"=",
"11",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
parse_netmhcpan28_stdout
|
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
----------------------------------------------------------------------------
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
|
mhctools/parsing.py
|
def parse_netmhcpan28_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
----------------------------------------------------------------------------
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
"""
check_stdout_error(stdout, "NetMHCpan-2.8")
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=3,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=5,
rank_index=6,
log_ic50_index=4)
|
def parse_netmhcpan28_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
----------------------------------------------------------------------------
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
"""
check_stdout_error(stdout, "NetMHCpan-2.8")
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=3,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=5,
rank_index=6,
log_ic50_index=4)
|
[
"#",
"Affinity",
"Threshold",
"for",
"Strong",
"binding",
"peptides",
"50",
".",
"000",
"#",
"Affinity",
"Threshold",
"for",
"Weak",
"binding",
"peptides",
"500",
".",
"000",
"#",
"Rank",
"Threshold",
"for",
"Strong",
"binding",
"peptides",
"0",
".",
"500",
"#",
"Rank",
"Threshold",
"for",
"Weak",
"binding",
"peptides",
"2",
".",
"000",
"----------------------------------------------------------------------------",
"pos",
"HLA",
"peptide",
"Identity",
"1",
"-",
"log50k",
"(",
"aff",
")",
"Affinity",
"(",
"nM",
")",
"%Rank",
"BindLevel",
"----------------------------------------------------------------------------",
"0",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"QQQQQYFPE",
"id0",
"0",
".",
"024",
"38534",
".",
"25",
"50",
".",
"00",
"1",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"QQQQYFPEI",
"id0",
"0",
".",
"278",
"2461",
".",
"53",
"15",
".",
"00",
"2",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"QQQYFPEIT",
"id0",
"0",
".",
"078",
"21511",
".",
"53",
"50",
".",
"00",
"3",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"QQYFPEITH",
"id0",
"0",
".",
"041",
"32176",
".",
"84",
"50",
".",
"00",
"4",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"QYFPEITHI",
"id0",
"0",
".",
"085",
"19847",
".",
"09",
"32",
".",
"00",
"5",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"YFPEITHII",
"id0",
"0",
".",
"231",
"4123",
".",
"85",
"15",
".",
"00",
"6",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"FPEITHIII",
"id0",
"0",
".",
"060",
"26134",
".",
"28",
"50",
".",
"00",
"7",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"PEITHIIIA",
"id0",
"0",
".",
"034",
"34524",
".",
"63",
"50",
".",
"00",
"8",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"EITHIIIAS",
"id0",
"0",
".",
"076",
"21974",
".",
"48",
"50",
".",
"00",
"9",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"ITHIIIASS",
"id0",
"0",
".",
"170",
"7934",
".",
"26",
"32",
".",
"00",
"10",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"THIIIASSS",
"id0",
"0",
".",
"040",
"32361",
".",
"18",
"50",
".",
"00",
"11",
"HLA",
"-",
"A",
"*",
"02",
":",
"03",
"HIIIASSSL",
"id0",
"0",
".",
"515",
"189",
".",
"74",
"4",
".",
"00",
"<",
"=",
"WB"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L224-L260
|
[
"def",
"parse_netmhcpan28_stdout",
"(",
"stdout",
",",
"prediction_method_name",
"=",
"\"netmhcpan\"",
",",
"sequence_key_mapping",
"=",
"None",
")",
":",
"check_stdout_error",
"(",
"stdout",
",",
"\"NetMHCpan-2.8\"",
")",
"return",
"parse_stdout",
"(",
"stdout",
"=",
"stdout",
",",
"prediction_method_name",
"=",
"prediction_method_name",
",",
"sequence_key_mapping",
"=",
"sequence_key_mapping",
",",
"key_index",
"=",
"3",
",",
"offset_index",
"=",
"0",
",",
"peptide_index",
"=",
"2",
",",
"allele_index",
"=",
"1",
",",
"ic50_index",
"=",
"5",
",",
"rank_index",
"=",
"6",
",",
"log_ic50_index",
"=",
"4",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
parse_netmhcpan3_stdout
|
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-B*18:01 MFCQLAKT MFCQLAKT- 0 0 0 8 1 MFCQLAKT sequence0_0 0.02864 36676.0 45.00
2 HLA-B*18:01 FCQLAKTY F-CQLAKTY 0 0 0 1 1 FCQLAKTY sequence0_0 0.07993 21056.5 13.00
|
mhctools/parsing.py
|
def parse_netmhcpan3_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-B*18:01 MFCQLAKT MFCQLAKT- 0 0 0 8 1 MFCQLAKT sequence0_0 0.02864 36676.0 45.00
2 HLA-B*18:01 FCQLAKTY F-CQLAKTY 0 0 0 1 1 FCQLAKTY sequence0_0 0.07993 21056.5 13.00
"""
# the offset specified in "pos" (at index 0) is 1-based instead of 0-based. we adjust it to be
# 0-based, as in all the other netmhc predictors supported by this library.
transforms = {
0: lambda x: int(x) - 1,
}
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=10,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=12,
rank_index=13,
log_ic50_index=11,
transforms=transforms)
|
def parse_netmhcpan3_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-B*18:01 MFCQLAKT MFCQLAKT- 0 0 0 8 1 MFCQLAKT sequence0_0 0.02864 36676.0 45.00
2 HLA-B*18:01 FCQLAKTY F-CQLAKTY 0 0 0 1 1 FCQLAKTY sequence0_0 0.07993 21056.5 13.00
"""
# the offset specified in "pos" (at index 0) is 1-based instead of 0-based. we adjust it to be
# 0-based, as in all the other netmhc predictors supported by this library.
transforms = {
0: lambda x: int(x) - 1,
}
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=10,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=12,
rank_index=13,
log_ic50_index=11,
transforms=transforms)
|
[
"#",
"Rank",
"Threshold",
"for",
"Strong",
"binding",
"peptides",
"0",
".",
"500",
"#",
"Rank",
"Threshold",
"for",
"Weak",
"binding",
"peptides",
"2",
".",
"000",
"-----------------------------------------------------------------------------------",
"Pos",
"HLA",
"Peptide",
"Core",
"Of",
"Gp",
"Gl",
"Ip",
"Il",
"Icore",
"Identity",
"Score",
"Aff",
"(",
"nM",
")",
"%Rank",
"BindLevel",
"-----------------------------------------------------------------------------------",
"1",
"HLA",
"-",
"B",
"*",
"18",
":",
"01",
"MFCQLAKT",
"MFCQLAKT",
"-",
"0",
"0",
"0",
"8",
"1",
"MFCQLAKT",
"sequence0_0",
"0",
".",
"02864",
"36676",
".",
"0",
"45",
".",
"00",
"2",
"HLA",
"-",
"B",
"*",
"18",
":",
"01",
"FCQLAKTY",
"F",
"-",
"CQLAKTY",
"0",
"0",
"0",
"1",
"1",
"FCQLAKTY",
"sequence0_0",
"0",
".",
"07993",
"21056",
".",
"5",
"13",
".",
"00"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L262-L292
|
[
"def",
"parse_netmhcpan3_stdout",
"(",
"stdout",
",",
"prediction_method_name",
"=",
"\"netmhcpan\"",
",",
"sequence_key_mapping",
"=",
"None",
")",
":",
"# the offset specified in \"pos\" (at index 0) is 1-based instead of 0-based. we adjust it to be",
"# 0-based, as in all the other netmhc predictors supported by this library.",
"transforms",
"=",
"{",
"0",
":",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
"-",
"1",
",",
"}",
"return",
"parse_stdout",
"(",
"stdout",
"=",
"stdout",
",",
"prediction_method_name",
"=",
"prediction_method_name",
",",
"sequence_key_mapping",
"=",
"sequence_key_mapping",
",",
"key_index",
"=",
"10",
",",
"offset_index",
"=",
"0",
",",
"peptide_index",
"=",
"2",
",",
"allele_index",
"=",
"1",
",",
"ic50_index",
"=",
"12",
",",
"rank_index",
"=",
"13",
",",
"log_ic50_index",
"=",
"11",
",",
"transforms",
"=",
"transforms",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
parse_netmhcpan4_stdout
|
# NetMHCpan version 4.0
# Tmpdir made /var/folders/jc/fyrvcrcs3sb8g4mkdg6nl_t80000gp/T//netMHCpanuH3SvY
# Input is in PEPTIDE format
# Make binding affinity predictions
HLA-A02:01 : Distance to training data 0.000 (using nearest neighbor HLA-A02:01)
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-A*02:01 SIINFEKL SIINF-EKL 0 0 0 5 1 SIINFEKL PEPLIST 0.1141340 14543.1 18.9860
-----------------------------------------------------------------------------------
Protein PEPLIST. Allele HLA-A*02:01. Number of high binders 0. Number of weak binders 0. Number of peptides 1
|
mhctools/parsing.py
|
def parse_netmhcpan4_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# NetMHCpan version 4.0
# Tmpdir made /var/folders/jc/fyrvcrcs3sb8g4mkdg6nl_t80000gp/T//netMHCpanuH3SvY
# Input is in PEPTIDE format
# Make binding affinity predictions
HLA-A02:01 : Distance to training data 0.000 (using nearest neighbor HLA-A02:01)
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-A*02:01 SIINFEKL SIINF-EKL 0 0 0 5 1 SIINFEKL PEPLIST 0.1141340 14543.1 18.9860
-----------------------------------------------------------------------------------
Protein PEPLIST. Allele HLA-A*02:01. Number of high binders 0. Number of weak binders 0. Number of peptides 1
"""
# Output format is compatible with netmhcpan3, but netmhcpan 4.0 must be
# called with the -BA flag, so it gives affinity predictions, not mass-spec
# elution likelihoods.
return parse_netmhcpan3_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping)
|
def parse_netmhcpan4_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# NetMHCpan version 4.0
# Tmpdir made /var/folders/jc/fyrvcrcs3sb8g4mkdg6nl_t80000gp/T//netMHCpanuH3SvY
# Input is in PEPTIDE format
# Make binding affinity predictions
HLA-A02:01 : Distance to training data 0.000 (using nearest neighbor HLA-A02:01)
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-A*02:01 SIINFEKL SIINF-EKL 0 0 0 5 1 SIINFEKL PEPLIST 0.1141340 14543.1 18.9860
-----------------------------------------------------------------------------------
Protein PEPLIST. Allele HLA-A*02:01. Number of high binders 0. Number of weak binders 0. Number of peptides 1
"""
# Output format is compatible with netmhcpan3, but netmhcpan 4.0 must be
# called with the -BA flag, so it gives affinity predictions, not mass-spec
# elution likelihoods.
return parse_netmhcpan3_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping)
|
[
"#",
"NetMHCpan",
"version",
"4",
".",
"0"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L295-L326
|
[
"def",
"parse_netmhcpan4_stdout",
"(",
"stdout",
",",
"prediction_method_name",
"=",
"\"netmhcpan\"",
",",
"sequence_key_mapping",
"=",
"None",
")",
":",
"# Output format is compatible with netmhcpan3, but netmhcpan 4.0 must be",
"# called with the -BA flag, so it gives affinity predictions, not mass-spec",
"# elution likelihoods.",
"return",
"parse_netmhcpan3_stdout",
"(",
"stdout",
"=",
"stdout",
",",
"prediction_method_name",
"=",
"prediction_method_name",
",",
"sequence_key_mapping",
"=",
"sequence_key_mapping",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
_parse_iedb_response
|
Take the binding predictions returned by IEDB's web API
and parse them into a DataFrame
Expect response to look like:
allele seq_num start end length peptide ic50 percentile_rank
HLA-A*01:01 1 2 10 9 LYNTVATLY 2145.70 3.7
HLA-A*01:01 1 5 13 9 TVATLYCVH 2216.49 3.9
HLA-A*01:01 1 7 15 9 ATLYCVHQR 2635.42 5.1
HLA-A*01:01 1 4 12 9 NTVATLYCV 6829.04 20
HLA-A*01:01 1 1 9 9 SLYNTVATL 8032.38 24
HLA-A*01:01 1 8 16 9 TLYCVHQRI 8853.90 26
HLA-A*01:01 1 3 11 9 YNTVATLYC 9865.62 29
HLA-A*01:01 1 6 14 9 VATLYCVHQ 27575.71 58
HLA-A*01:01 1 10 18 9 YCVHQRIDV 48929.64 74
HLA-A*01:01 1 9 17 9 LYCVHQRID 50000.00 75
|
mhctools/iedb.py
|
def _parse_iedb_response(response):
"""Take the binding predictions returned by IEDB's web API
and parse them into a DataFrame
Expect response to look like:
allele seq_num start end length peptide ic50 percentile_rank
HLA-A*01:01 1 2 10 9 LYNTVATLY 2145.70 3.7
HLA-A*01:01 1 5 13 9 TVATLYCVH 2216.49 3.9
HLA-A*01:01 1 7 15 9 ATLYCVHQR 2635.42 5.1
HLA-A*01:01 1 4 12 9 NTVATLYCV 6829.04 20
HLA-A*01:01 1 1 9 9 SLYNTVATL 8032.38 24
HLA-A*01:01 1 8 16 9 TLYCVHQRI 8853.90 26
HLA-A*01:01 1 3 11 9 YNTVATLYC 9865.62 29
HLA-A*01:01 1 6 14 9 VATLYCVHQ 27575.71 58
HLA-A*01:01 1 10 18 9 YCVHQRIDV 48929.64 74
HLA-A*01:01 1 9 17 9 LYCVHQRID 50000.00 75
"""
if len(response) == 0:
raise ValueError("Empty response from IEDB!")
df = pd.read_csv(io.BytesIO(response), delim_whitespace=True, header=0)
# pylint doesn't realize that df is a DataFrame, so tell is
assert type(df) == pd.DataFrame
df = pd.DataFrame(df)
if len(df) == 0:
raise ValueError(
"No binding predictions in response from IEDB: %s" % (response,))
required_columns = [
"allele",
"peptide",
"ic50",
"start",
"end",
]
for column in required_columns:
if column not in df.columns:
raise ValueError(
"Response from IEDB is missing '%s' column: %s. Full "
"response:\n%s" % (
column,
df.ix[0],
response))
# since IEDB has allowed multiple column names for percentile rank,
# we're defensively normalizing all of them to just 'rank'
df = df.rename(columns={
"percentile_rank": "rank",
"percentile rank": "rank"})
return df
|
def _parse_iedb_response(response):
"""Take the binding predictions returned by IEDB's web API
and parse them into a DataFrame
Expect response to look like:
allele seq_num start end length peptide ic50 percentile_rank
HLA-A*01:01 1 2 10 9 LYNTVATLY 2145.70 3.7
HLA-A*01:01 1 5 13 9 TVATLYCVH 2216.49 3.9
HLA-A*01:01 1 7 15 9 ATLYCVHQR 2635.42 5.1
HLA-A*01:01 1 4 12 9 NTVATLYCV 6829.04 20
HLA-A*01:01 1 1 9 9 SLYNTVATL 8032.38 24
HLA-A*01:01 1 8 16 9 TLYCVHQRI 8853.90 26
HLA-A*01:01 1 3 11 9 YNTVATLYC 9865.62 29
HLA-A*01:01 1 6 14 9 VATLYCVHQ 27575.71 58
HLA-A*01:01 1 10 18 9 YCVHQRIDV 48929.64 74
HLA-A*01:01 1 9 17 9 LYCVHQRID 50000.00 75
"""
if len(response) == 0:
raise ValueError("Empty response from IEDB!")
df = pd.read_csv(io.BytesIO(response), delim_whitespace=True, header=0)
# pylint doesn't realize that df is a DataFrame, so tell is
assert type(df) == pd.DataFrame
df = pd.DataFrame(df)
if len(df) == 0:
raise ValueError(
"No binding predictions in response from IEDB: %s" % (response,))
required_columns = [
"allele",
"peptide",
"ic50",
"start",
"end",
]
for column in required_columns:
if column not in df.columns:
raise ValueError(
"Response from IEDB is missing '%s' column: %s. Full "
"response:\n%s" % (
column,
df.ix[0],
response))
# since IEDB has allowed multiple column names for percentile rank,
# we're defensively normalizing all of them to just 'rank'
df = df.rename(columns={
"percentile_rank": "rank",
"percentile rank": "rank"})
return df
|
[
"Take",
"the",
"binding",
"predictions",
"returned",
"by",
"IEDB",
"s",
"web",
"API",
"and",
"parse",
"them",
"into",
"a",
"DataFrame"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/iedb.py#L70-L118
|
[
"def",
"_parse_iedb_response",
"(",
"response",
")",
":",
"if",
"len",
"(",
"response",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Empty response from IEDB!\"",
")",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"io",
".",
"BytesIO",
"(",
"response",
")",
",",
"delim_whitespace",
"=",
"True",
",",
"header",
"=",
"0",
")",
"# pylint doesn't realize that df is a DataFrame, so tell is",
"assert",
"type",
"(",
"df",
")",
"==",
"pd",
".",
"DataFrame",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"df",
")",
"if",
"len",
"(",
"df",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"No binding predictions in response from IEDB: %s\"",
"%",
"(",
"response",
",",
")",
")",
"required_columns",
"=",
"[",
"\"allele\"",
",",
"\"peptide\"",
",",
"\"ic50\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"]",
"for",
"column",
"in",
"required_columns",
":",
"if",
"column",
"not",
"in",
"df",
".",
"columns",
":",
"raise",
"ValueError",
"(",
"\"Response from IEDB is missing '%s' column: %s. Full \"",
"\"response:\\n%s\"",
"%",
"(",
"column",
",",
"df",
".",
"ix",
"[",
"0",
"]",
",",
"response",
")",
")",
"# since IEDB has allowed multiple column names for percentile rank,",
"# we're defensively normalizing all of them to just 'rank'",
"df",
"=",
"df",
".",
"rename",
"(",
"columns",
"=",
"{",
"\"percentile_rank\"",
":",
"\"rank\"",
",",
"\"percentile rank\"",
":",
"\"rank\"",
"}",
")",
"return",
"df"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
_query_iedb
|
Call into IEDB's web API for MHC binding prediction using request dictionary
with fields:
- "method"
- "length"
- "sequence_text"
- "allele"
Parse the response into a DataFrame.
|
mhctools/iedb.py
|
def _query_iedb(request_values, url):
"""
Call into IEDB's web API for MHC binding prediction using request dictionary
with fields:
- "method"
- "length"
- "sequence_text"
- "allele"
Parse the response into a DataFrame.
"""
data = urlencode(request_values)
req = Request(url, data.encode("ascii"))
response = urlopen(req).read()
return _parse_iedb_response(response)
|
def _query_iedb(request_values, url):
"""
Call into IEDB's web API for MHC binding prediction using request dictionary
with fields:
- "method"
- "length"
- "sequence_text"
- "allele"
Parse the response into a DataFrame.
"""
data = urlencode(request_values)
req = Request(url, data.encode("ascii"))
response = urlopen(req).read()
return _parse_iedb_response(response)
|
[
"Call",
"into",
"IEDB",
"s",
"web",
"API",
"for",
"MHC",
"binding",
"prediction",
"using",
"request",
"dictionary",
"with",
"fields",
":",
"-",
"method",
"-",
"length",
"-",
"sequence_text",
"-",
"allele"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/iedb.py#L120-L134
|
[
"def",
"_query_iedb",
"(",
"request_values",
",",
"url",
")",
":",
"data",
"=",
"urlencode",
"(",
"request_values",
")",
"req",
"=",
"Request",
"(",
"url",
",",
"data",
".",
"encode",
"(",
"\"ascii\"",
")",
")",
"response",
"=",
"urlopen",
"(",
"req",
")",
".",
"read",
"(",
")",
"return",
"_parse_iedb_response",
"(",
"response",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
IedbBasePredictor.predict_subsequences
|
Given a dictionary mapping unique keys to amino acid sequences,
run MHC binding predictions on all candidate epitopes extracted from
sequences and return a EpitopeCollection.
Parameters
----------
fasta_dictionary : dict or string
Mapping of protein identifiers to protein amino acid sequences.
If string then converted to dictionary.
|
mhctools/iedb.py
|
def predict_subsequences(self, sequence_dict, peptide_lengths=None):
"""Given a dictionary mapping unique keys to amino acid sequences,
run MHC binding predictions on all candidate epitopes extracted from
sequences and return a EpitopeCollection.
Parameters
----------
fasta_dictionary : dict or string
Mapping of protein identifiers to protein amino acid sequences.
If string then converted to dictionary.
"""
sequence_dict = check_sequence_dictionary(sequence_dict)
peptide_lengths = self._check_peptide_lengths(peptide_lengths)
# take each mutated sequence in the dataframe
# and general MHC binding scores for all k-mer substrings
binding_predictions = []
expected_peptides = set([])
normalized_alleles = []
for key, amino_acid_sequence in sequence_dict.items():
for l in peptide_lengths:
for i in range(len(amino_acid_sequence) - l + 1):
expected_peptides.add(amino_acid_sequence[i:i + l])
self._check_peptide_inputs(expected_peptides)
for allele in self.alleles:
# IEDB MHCII predictor expects DRA1 to be omitted.
allele = normalize_allele_name(allele, omit_dra1=True)
normalized_alleles.append(allele)
request = self._get_iedb_request_params(
amino_acid_sequence, allele)
logger.info(
"Calling IEDB (%s) with request %s",
self.url,
request)
response_df = _query_iedb(request, self.url)
for _, row in response_df.iterrows():
binding_predictions.append(
BindingPrediction(
source_sequence_name=key,
offset=row['start'] - 1,
allele=row['allele'],
peptide=row['peptide'],
affinity=row['ic50'],
percentile_rank=row['rank'],
prediction_method_name="iedb-" + self.prediction_method))
self._check_results(
binding_predictions,
alleles=normalized_alleles,
peptides=expected_peptides)
return BindingPredictionCollection(binding_predictions)
|
def predict_subsequences(self, sequence_dict, peptide_lengths=None):
"""Given a dictionary mapping unique keys to amino acid sequences,
run MHC binding predictions on all candidate epitopes extracted from
sequences and return a EpitopeCollection.
Parameters
----------
fasta_dictionary : dict or string
Mapping of protein identifiers to protein amino acid sequences.
If string then converted to dictionary.
"""
sequence_dict = check_sequence_dictionary(sequence_dict)
peptide_lengths = self._check_peptide_lengths(peptide_lengths)
# take each mutated sequence in the dataframe
# and general MHC binding scores for all k-mer substrings
binding_predictions = []
expected_peptides = set([])
normalized_alleles = []
for key, amino_acid_sequence in sequence_dict.items():
for l in peptide_lengths:
for i in range(len(amino_acid_sequence) - l + 1):
expected_peptides.add(amino_acid_sequence[i:i + l])
self._check_peptide_inputs(expected_peptides)
for allele in self.alleles:
# IEDB MHCII predictor expects DRA1 to be omitted.
allele = normalize_allele_name(allele, omit_dra1=True)
normalized_alleles.append(allele)
request = self._get_iedb_request_params(
amino_acid_sequence, allele)
logger.info(
"Calling IEDB (%s) with request %s",
self.url,
request)
response_df = _query_iedb(request, self.url)
for _, row in response_df.iterrows():
binding_predictions.append(
BindingPrediction(
source_sequence_name=key,
offset=row['start'] - 1,
allele=row['allele'],
peptide=row['peptide'],
affinity=row['ic50'],
percentile_rank=row['rank'],
prediction_method_name="iedb-" + self.prediction_method))
self._check_results(
binding_predictions,
alleles=normalized_alleles,
peptides=expected_peptides)
return BindingPredictionCollection(binding_predictions)
|
[
"Given",
"a",
"dictionary",
"mapping",
"unique",
"keys",
"to",
"amino",
"acid",
"sequences",
"run",
"MHC",
"binding",
"predictions",
"on",
"all",
"candidate",
"epitopes",
"extracted",
"from",
"sequences",
"and",
"return",
"a",
"EpitopeCollection",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/iedb.py#L191-L241
|
[
"def",
"predict_subsequences",
"(",
"self",
",",
"sequence_dict",
",",
"peptide_lengths",
"=",
"None",
")",
":",
"sequence_dict",
"=",
"check_sequence_dictionary",
"(",
"sequence_dict",
")",
"peptide_lengths",
"=",
"self",
".",
"_check_peptide_lengths",
"(",
"peptide_lengths",
")",
"# take each mutated sequence in the dataframe",
"# and general MHC binding scores for all k-mer substrings",
"binding_predictions",
"=",
"[",
"]",
"expected_peptides",
"=",
"set",
"(",
"[",
"]",
")",
"normalized_alleles",
"=",
"[",
"]",
"for",
"key",
",",
"amino_acid_sequence",
"in",
"sequence_dict",
".",
"items",
"(",
")",
":",
"for",
"l",
"in",
"peptide_lengths",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"amino_acid_sequence",
")",
"-",
"l",
"+",
"1",
")",
":",
"expected_peptides",
".",
"add",
"(",
"amino_acid_sequence",
"[",
"i",
":",
"i",
"+",
"l",
"]",
")",
"self",
".",
"_check_peptide_inputs",
"(",
"expected_peptides",
")",
"for",
"allele",
"in",
"self",
".",
"alleles",
":",
"# IEDB MHCII predictor expects DRA1 to be omitted.",
"allele",
"=",
"normalize_allele_name",
"(",
"allele",
",",
"omit_dra1",
"=",
"True",
")",
"normalized_alleles",
".",
"append",
"(",
"allele",
")",
"request",
"=",
"self",
".",
"_get_iedb_request_params",
"(",
"amino_acid_sequence",
",",
"allele",
")",
"logger",
".",
"info",
"(",
"\"Calling IEDB (%s) with request %s\"",
",",
"self",
".",
"url",
",",
"request",
")",
"response_df",
"=",
"_query_iedb",
"(",
"request",
",",
"self",
".",
"url",
")",
"for",
"_",
",",
"row",
"in",
"response_df",
".",
"iterrows",
"(",
")",
":",
"binding_predictions",
".",
"append",
"(",
"BindingPrediction",
"(",
"source_sequence_name",
"=",
"key",
",",
"offset",
"=",
"row",
"[",
"'start'",
"]",
"-",
"1",
",",
"allele",
"=",
"row",
"[",
"'allele'",
"]",
",",
"peptide",
"=",
"row",
"[",
"'peptide'",
"]",
",",
"affinity",
"=",
"row",
"[",
"'ic50'",
"]",
",",
"percentile_rank",
"=",
"row",
"[",
"'rank'",
"]",
",",
"prediction_method_name",
"=",
"\"iedb-\"",
"+",
"self",
".",
"prediction_method",
")",
")",
"self",
".",
"_check_results",
"(",
"binding_predictions",
",",
"alleles",
"=",
"normalized_alleles",
",",
"peptides",
"=",
"expected_peptides",
")",
"return",
"BindingPredictionCollection",
"(",
"binding_predictions",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
get_args
|
Hackish way to get the arguments of a function
Parameters
----------
func : callable
Function to get the arguments from
skip : int, optional
Arguments to skip, defaults to 0 set it to 1 to skip the
``self`` argument of a method.
Returns
-------
tuple
Function's arguments
|
peony/utils.py
|
def get_args(func, skip=0):
"""
Hackish way to get the arguments of a function
Parameters
----------
func : callable
Function to get the arguments from
skip : int, optional
Arguments to skip, defaults to 0 set it to 1 to skip the
``self`` argument of a method.
Returns
-------
tuple
Function's arguments
"""
code = getattr(func, '__code__', None)
if code is None:
code = func.__call__.__code__
return code.co_varnames[skip:code.co_argcount]
|
def get_args(func, skip=0):
"""
Hackish way to get the arguments of a function
Parameters
----------
func : callable
Function to get the arguments from
skip : int, optional
Arguments to skip, defaults to 0 set it to 1 to skip the
``self`` argument of a method.
Returns
-------
tuple
Function's arguments
"""
code = getattr(func, '__code__', None)
if code is None:
code = func.__call__.__code__
return code.co_varnames[skip:code.co_argcount]
|
[
"Hackish",
"way",
"to",
"get",
"the",
"arguments",
"of",
"a",
"function"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/utils.py#L149-L171
|
[
"def",
"get_args",
"(",
"func",
",",
"skip",
"=",
"0",
")",
":",
"code",
"=",
"getattr",
"(",
"func",
",",
"'__code__'",
",",
"None",
")",
"if",
"code",
"is",
"None",
":",
"code",
"=",
"func",
".",
"__call__",
".",
"__code__",
"return",
"code",
".",
"co_varnames",
"[",
"skip",
":",
"code",
".",
"co_argcount",
"]"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
log_error
|
log an exception and its traceback on the logger defined
Parameters
----------
msg : str, optional
A message to add to the error
exc_info : tuple
Information about the current exception
logger : logging.Logger
logger to use
|
peony/utils.py
|
def log_error(msg=None, exc_info=None, logger=None, **kwargs):
"""
log an exception and its traceback on the logger defined
Parameters
----------
msg : str, optional
A message to add to the error
exc_info : tuple
Information about the current exception
logger : logging.Logger
logger to use
"""
if logger is None:
logger = _logger
if not exc_info:
exc_info = sys.exc_info()
if msg is None:
msg = ""
exc_class, exc_msg, _ = exc_info
if all(info is not None for info in exc_info):
logger.error(msg, exc_info=exc_info)
|
def log_error(msg=None, exc_info=None, logger=None, **kwargs):
"""
log an exception and its traceback on the logger defined
Parameters
----------
msg : str, optional
A message to add to the error
exc_info : tuple
Information about the current exception
logger : logging.Logger
logger to use
"""
if logger is None:
logger = _logger
if not exc_info:
exc_info = sys.exc_info()
if msg is None:
msg = ""
exc_class, exc_msg, _ = exc_info
if all(info is not None for info in exc_info):
logger.error(msg, exc_info=exc_info)
|
[
"log",
"an",
"exception",
"and",
"its",
"traceback",
"on",
"the",
"logger",
"defined"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/utils.py#L174-L199
|
[
"def",
"log_error",
"(",
"msg",
"=",
"None",
",",
"exc_info",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"logger",
"is",
"None",
":",
"logger",
"=",
"_logger",
"if",
"not",
"exc_info",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"if",
"msg",
"is",
"None",
":",
"msg",
"=",
"\"\"",
"exc_class",
",",
"exc_msg",
",",
"_",
"=",
"exc_info",
"if",
"all",
"(",
"info",
"is",
"not",
"None",
"for",
"info",
"in",
"exc_info",
")",
":",
"logger",
".",
"error",
"(",
"msg",
",",
"exc_info",
"=",
"exc_info",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
get_media_metadata
|
Get all the file's metadata and read any kind of file object
Parameters
----------
data : bytes
first bytes of the file (the mimetype shoudl be guessed from the
file headers
path : str, optional
path to the file
Returns
-------
str
The mimetype of the media
str
The category of the media on Twitter
|
peony/utils.py
|
async def get_media_metadata(data, path=None):
"""
Get all the file's metadata and read any kind of file object
Parameters
----------
data : bytes
first bytes of the file (the mimetype shoudl be guessed from the
file headers
path : str, optional
path to the file
Returns
-------
str
The mimetype of the media
str
The category of the media on Twitter
"""
if isinstance(data, bytes):
media_type = await get_type(data, path)
else:
raise TypeError("get_metadata input must be a bytes")
media_category = get_category(media_type)
_logger.info("media_type: %s, media_category: %s" % (media_type,
media_category))
return media_type, media_category
|
async def get_media_metadata(data, path=None):
"""
Get all the file's metadata and read any kind of file object
Parameters
----------
data : bytes
first bytes of the file (the mimetype shoudl be guessed from the
file headers
path : str, optional
path to the file
Returns
-------
str
The mimetype of the media
str
The category of the media on Twitter
"""
if isinstance(data, bytes):
media_type = await get_type(data, path)
else:
raise TypeError("get_metadata input must be a bytes")
media_category = get_category(media_type)
_logger.info("media_type: %s, media_category: %s" % (media_type,
media_category))
return media_type, media_category
|
[
"Get",
"all",
"the",
"file",
"s",
"metadata",
"and",
"read",
"any",
"kind",
"of",
"file",
"object"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/utils.py#L202-L232
|
[
"async",
"def",
"get_media_metadata",
"(",
"data",
",",
"path",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"media_type",
"=",
"await",
"get_type",
"(",
"data",
",",
"path",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"get_metadata input must be a bytes\"",
")",
"media_category",
"=",
"get_category",
"(",
"media_type",
")",
"_logger",
".",
"info",
"(",
"\"media_type: %s, media_category: %s\"",
"%",
"(",
"media_type",
",",
"media_category",
")",
")",
"return",
"media_type",
",",
"media_category"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
get_size
|
Get the size of a file
Parameters
----------
media : file object
The file object of the media
Returns
-------
int
The size of the file
|
peony/utils.py
|
async def get_size(media):
"""
Get the size of a file
Parameters
----------
media : file object
The file object of the media
Returns
-------
int
The size of the file
"""
if hasattr(media, 'seek'):
await execute(media.seek(0, os.SEEK_END))
size = await execute(media.tell())
await execute(media.seek(0))
elif hasattr(media, 'headers'):
size = int(media.headers['Content-Length'])
elif isinstance(media, bytes):
size = len(media)
else:
raise TypeError("Can't get size of media of type:",
type(media).__name__)
_logger.info("media size: %dB" % size)
return size
|
async def get_size(media):
"""
Get the size of a file
Parameters
----------
media : file object
The file object of the media
Returns
-------
int
The size of the file
"""
if hasattr(media, 'seek'):
await execute(media.seek(0, os.SEEK_END))
size = await execute(media.tell())
await execute(media.seek(0))
elif hasattr(media, 'headers'):
size = int(media.headers['Content-Length'])
elif isinstance(media, bytes):
size = len(media)
else:
raise TypeError("Can't get size of media of type:",
type(media).__name__)
_logger.info("media size: %dB" % size)
return size
|
[
"Get",
"the",
"size",
"of",
"a",
"file"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/utils.py#L235-L262
|
[
"async",
"def",
"get_size",
"(",
"media",
")",
":",
"if",
"hasattr",
"(",
"media",
",",
"'seek'",
")",
":",
"await",
"execute",
"(",
"media",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_END",
")",
")",
"size",
"=",
"await",
"execute",
"(",
"media",
".",
"tell",
"(",
")",
")",
"await",
"execute",
"(",
"media",
".",
"seek",
"(",
"0",
")",
")",
"elif",
"hasattr",
"(",
"media",
",",
"'headers'",
")",
":",
"size",
"=",
"int",
"(",
"media",
".",
"headers",
"[",
"'Content-Length'",
"]",
")",
"elif",
"isinstance",
"(",
"media",
",",
"bytes",
")",
":",
"size",
"=",
"len",
"(",
"media",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Can't get size of media of type:\"",
",",
"type",
"(",
"media",
")",
".",
"__name__",
")",
"_logger",
".",
"info",
"(",
"\"media size: %dB\"",
"%",
"size",
")",
"return",
"size"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
get_type
|
Parameters
----------
media : file object
A file object of the image
path : str, optional
The path to the file
Returns
-------
str
The mimetype of the media
str
The category of the media on Twitter
|
peony/utils.py
|
async def get_type(media, path=None):
"""
Parameters
----------
media : file object
A file object of the image
path : str, optional
The path to the file
Returns
-------
str
The mimetype of the media
str
The category of the media on Twitter
"""
if magic:
if not media:
raise TypeError("Media data is empty")
_logger.debug("guessing mimetype using magic")
media_type = mime.from_buffer(media[:1024])
else:
media_type = None
if path:
_logger.debug("guessing mimetype using built-in module")
media_type = mime.guess_type(path)[0]
if media_type is None:
msg = ("Could not guess the mimetype of the media.\n"
"Please consider installing python-magic\n"
"(pip3 install peony-twitter[magic])")
raise RuntimeError(msg)
return media_type
|
async def get_type(media, path=None):
"""
Parameters
----------
media : file object
A file object of the image
path : str, optional
The path to the file
Returns
-------
str
The mimetype of the media
str
The category of the media on Twitter
"""
if magic:
if not media:
raise TypeError("Media data is empty")
_logger.debug("guessing mimetype using magic")
media_type = mime.from_buffer(media[:1024])
else:
media_type = None
if path:
_logger.debug("guessing mimetype using built-in module")
media_type = mime.guess_type(path)[0]
if media_type is None:
msg = ("Could not guess the mimetype of the media.\n"
"Please consider installing python-magic\n"
"(pip3 install peony-twitter[magic])")
raise RuntimeError(msg)
return media_type
|
[
"Parameters",
"----------",
"media",
":",
"file",
"object",
"A",
"file",
"object",
"of",
"the",
"image",
"path",
":",
"str",
"optional",
"The",
"path",
"to",
"the",
"file"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/utils.py#L265-L299
|
[
"async",
"def",
"get_type",
"(",
"media",
",",
"path",
"=",
"None",
")",
":",
"if",
"magic",
":",
"if",
"not",
"media",
":",
"raise",
"TypeError",
"(",
"\"Media data is empty\"",
")",
"_logger",
".",
"debug",
"(",
"\"guessing mimetype using magic\"",
")",
"media_type",
"=",
"mime",
".",
"from_buffer",
"(",
"media",
"[",
":",
"1024",
"]",
")",
"else",
":",
"media_type",
"=",
"None",
"if",
"path",
":",
"_logger",
".",
"debug",
"(",
"\"guessing mimetype using built-in module\"",
")",
"media_type",
"=",
"mime",
".",
"guess_type",
"(",
"path",
")",
"[",
"0",
"]",
"if",
"media_type",
"is",
"None",
":",
"msg",
"=",
"(",
"\"Could not guess the mimetype of the media.\\n\"",
"\"Please consider installing python-magic\\n\"",
"\"(pip3 install peony-twitter[magic])\"",
")",
"raise",
"RuntimeError",
"(",
"msg",
")",
"return",
"media_type"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
set_debug
|
activates error messages, useful during development
|
peony/utils.py
|
def set_debug():
""" activates error messages, useful during development """
logging.basicConfig(level=logging.WARNING)
peony.logger.setLevel(logging.DEBUG)
|
def set_debug():
""" activates error messages, useful during development """
logging.basicConfig(level=logging.WARNING)
peony.logger.setLevel(logging.DEBUG)
|
[
"activates",
"error",
"messages",
"useful",
"during",
"development"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/utils.py#L328-L331
|
[
"def",
"set_debug",
"(",
")",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"WARNING",
")",
"peony",
".",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
BindingPrediction.clone_with_updates
|
Returns new BindingPrediction with updated fields
|
mhctools/binding_prediction.py
|
def clone_with_updates(self, **kwargs):
"""Returns new BindingPrediction with updated fields"""
fields_dict = self.to_dict()
fields_dict.update(kwargs)
return BindingPrediction(**fields_dict)
|
def clone_with_updates(self, **kwargs):
"""Returns new BindingPrediction with updated fields"""
fields_dict = self.to_dict()
fields_dict.update(kwargs)
return BindingPrediction(**fields_dict)
|
[
"Returns",
"new",
"BindingPrediction",
"with",
"updated",
"fields"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/binding_prediction.py#L109-L113
|
[
"def",
"clone_with_updates",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"fields_dict",
"=",
"self",
".",
"to_dict",
"(",
")",
"fields_dict",
".",
"update",
"(",
"kwargs",
")",
"return",
"BindingPrediction",
"(",
"*",
"*",
"fields_dict",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
NetMHCpan
|
This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class
to use, with the help of the miraculous and strange '--version' netmhcpan argument.
|
mhctools/netmhc_pan.py
|
def NetMHCpan(
alleles,
program_name="netMHCpan",
process_limit=-1,
default_peptide_lengths=[9],
extra_flags=[]):
"""
This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class
to use, with the help of the miraculous and strange '--version' netmhcpan argument.
"""
# convert to str since Python3 returns a `bytes` object.
# The '_MHCTOOLS_VERSION_SNIFFING' here is meaningless, but it is necessary
# to call `netmhcpan --version` with some argument, otherwise it hangs.
with open(os.devnull, 'w') as devnull:
output = check_output([
program_name, "--version", "_MHCTOOLS_VERSION_SNIFFING"],
stderr=devnull)
output_str = output.decode("ascii", "ignore")
common_kwargs = {
"alleles": alleles,
"default_peptide_lengths": default_peptide_lengths,
"program_name": program_name,
"process_limit": process_limit,
"extra_flags": extra_flags,
}
if "NetMHCpan version 2.8" in output_str:
return NetMHCpan28(**common_kwargs)
elif "NetMHCpan version 3.0" in output_str:
return NetMHCpan3(**common_kwargs)
elif "NetMHCpan version 4.0" in output_str:
return NetMHCpan4(**common_kwargs)
else:
raise RuntimeError(
"This software expects NetMHCpan version 2.8, 3.0, or 4.0")
|
def NetMHCpan(
alleles,
program_name="netMHCpan",
process_limit=-1,
default_peptide_lengths=[9],
extra_flags=[]):
"""
This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class
to use, with the help of the miraculous and strange '--version' netmhcpan argument.
"""
# convert to str since Python3 returns a `bytes` object.
# The '_MHCTOOLS_VERSION_SNIFFING' here is meaningless, but it is necessary
# to call `netmhcpan --version` with some argument, otherwise it hangs.
with open(os.devnull, 'w') as devnull:
output = check_output([
program_name, "--version", "_MHCTOOLS_VERSION_SNIFFING"],
stderr=devnull)
output_str = output.decode("ascii", "ignore")
common_kwargs = {
"alleles": alleles,
"default_peptide_lengths": default_peptide_lengths,
"program_name": program_name,
"process_limit": process_limit,
"extra_flags": extra_flags,
}
if "NetMHCpan version 2.8" in output_str:
return NetMHCpan28(**common_kwargs)
elif "NetMHCpan version 3.0" in output_str:
return NetMHCpan3(**common_kwargs)
elif "NetMHCpan version 4.0" in output_str:
return NetMHCpan4(**common_kwargs)
else:
raise RuntimeError(
"This software expects NetMHCpan version 2.8, 3.0, or 4.0")
|
[
"This",
"function",
"wraps",
"NetMHCpan28",
"and",
"NetMHCpan3",
"to",
"automatically",
"detect",
"which",
"class",
"to",
"use",
"with",
"the",
"help",
"of",
"the",
"miraculous",
"and",
"strange",
"--",
"version",
"netmhcpan",
"argument",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/netmhc_pan.py#L28-L64
|
[
"def",
"NetMHCpan",
"(",
"alleles",
",",
"program_name",
"=",
"\"netMHCpan\"",
",",
"process_limit",
"=",
"-",
"1",
",",
"default_peptide_lengths",
"=",
"[",
"9",
"]",
",",
"extra_flags",
"=",
"[",
"]",
")",
":",
"# convert to str since Python3 returns a `bytes` object.",
"# The '_MHCTOOLS_VERSION_SNIFFING' here is meaningless, but it is necessary",
"# to call `netmhcpan --version` with some argument, otherwise it hangs.",
"with",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"as",
"devnull",
":",
"output",
"=",
"check_output",
"(",
"[",
"program_name",
",",
"\"--version\"",
",",
"\"_MHCTOOLS_VERSION_SNIFFING\"",
"]",
",",
"stderr",
"=",
"devnull",
")",
"output_str",
"=",
"output",
".",
"decode",
"(",
"\"ascii\"",
",",
"\"ignore\"",
")",
"common_kwargs",
"=",
"{",
"\"alleles\"",
":",
"alleles",
",",
"\"default_peptide_lengths\"",
":",
"default_peptide_lengths",
",",
"\"program_name\"",
":",
"program_name",
",",
"\"process_limit\"",
":",
"process_limit",
",",
"\"extra_flags\"",
":",
"extra_flags",
",",
"}",
"if",
"\"NetMHCpan version 2.8\"",
"in",
"output_str",
":",
"return",
"NetMHCpan28",
"(",
"*",
"*",
"common_kwargs",
")",
"elif",
"\"NetMHCpan version 3.0\"",
"in",
"output_str",
":",
"return",
"NetMHCpan3",
"(",
"*",
"*",
"common_kwargs",
")",
"elif",
"\"NetMHCpan version 4.0\"",
"in",
"output_str",
":",
"return",
"NetMHCpan4",
"(",
"*",
"*",
"common_kwargs",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"This software expects NetMHCpan version 2.8, 3.0, or 4.0\"",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
IdIterator.get_data
|
Get the data from the response
|
peony/iterators.py
|
def get_data(self, response):
""" Get the data from the response """
if self._response_list:
return response
elif self._response_key is None:
if hasattr(response, "items"):
for key, data in response.items():
if (hasattr(data, "__getitem__")
and not hasattr(data, "items")
and len(data) > 0
and 'id' in data[0]):
self._response_key = key
return data
else:
self._response_list = True
return response
else:
return response[self._response_key]
raise NoDataFound(response=response, url=self.request.get_url())
|
def get_data(self, response):
""" Get the data from the response """
if self._response_list:
return response
elif self._response_key is None:
if hasattr(response, "items"):
for key, data in response.items():
if (hasattr(data, "__getitem__")
and not hasattr(data, "items")
and len(data) > 0
and 'id' in data[0]):
self._response_key = key
return data
else:
self._response_list = True
return response
else:
return response[self._response_key]
raise NoDataFound(response=response, url=self.request.get_url())
|
[
"Get",
"the",
"data",
"from",
"the",
"response"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/iterators.py#L72-L91
|
[
"def",
"get_data",
"(",
"self",
",",
"response",
")",
":",
"if",
"self",
".",
"_response_list",
":",
"return",
"response",
"elif",
"self",
".",
"_response_key",
"is",
"None",
":",
"if",
"hasattr",
"(",
"response",
",",
"\"items\"",
")",
":",
"for",
"key",
",",
"data",
"in",
"response",
".",
"items",
"(",
")",
":",
"if",
"(",
"hasattr",
"(",
"data",
",",
"\"__getitem__\"",
")",
"and",
"not",
"hasattr",
"(",
"data",
",",
"\"items\"",
")",
"and",
"len",
"(",
"data",
")",
">",
"0",
"and",
"'id'",
"in",
"data",
"[",
"0",
"]",
")",
":",
"self",
".",
"_response_key",
"=",
"key",
"return",
"data",
"else",
":",
"self",
".",
"_response_list",
"=",
"True",
"return",
"response",
"else",
":",
"return",
"response",
"[",
"self",
".",
"_response_key",
"]",
"raise",
"NoDataFound",
"(",
"response",
"=",
"response",
",",
"url",
"=",
"self",
".",
"request",
".",
"get_url",
"(",
")",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
SinceIdIterator.call_on_response
|
Try to fill the gaps and strip last tweet from the response
if its id is that of the first tweet of the last response
Parameters
----------
data : list
The response data
|
peony/iterators.py
|
async def call_on_response(self, data):
"""
Try to fill the gaps and strip last tweet from the response
if its id is that of the first tweet of the last response
Parameters
----------
data : list
The response data
"""
since_id = self.kwargs.get(self.param, 0) + 1
if self.fill_gaps:
if data[-1]['id'] != since_id:
max_id = data[-1]['id'] - 1
responses = with_max_id(self.request(**self.kwargs,
max_id=max_id))
async for tweets in responses:
data.extend(tweets)
if data[-1]['id'] == self.last_id:
data = data[:-1]
if not data and not self.force:
raise StopAsyncIteration
await self.set_param(data)
|
async def call_on_response(self, data):
"""
Try to fill the gaps and strip last tweet from the response
if its id is that of the first tweet of the last response
Parameters
----------
data : list
The response data
"""
since_id = self.kwargs.get(self.param, 0) + 1
if self.fill_gaps:
if data[-1]['id'] != since_id:
max_id = data[-1]['id'] - 1
responses = with_max_id(self.request(**self.kwargs,
max_id=max_id))
async for tweets in responses:
data.extend(tweets)
if data[-1]['id'] == self.last_id:
data = data[:-1]
if not data and not self.force:
raise StopAsyncIteration
await self.set_param(data)
|
[
"Try",
"to",
"fill",
"the",
"gaps",
"and",
"strip",
"last",
"tweet",
"from",
"the",
"response",
"if",
"its",
"id",
"is",
"that",
"of",
"the",
"first",
"tweet",
"of",
"the",
"last",
"response"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/iterators.py#L150-L177
|
[
"async",
"def",
"call_on_response",
"(",
"self",
",",
"data",
")",
":",
"since_id",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"self",
".",
"param",
",",
"0",
")",
"+",
"1",
"if",
"self",
".",
"fill_gaps",
":",
"if",
"data",
"[",
"-",
"1",
"]",
"[",
"'id'",
"]",
"!=",
"since_id",
":",
"max_id",
"=",
"data",
"[",
"-",
"1",
"]",
"[",
"'id'",
"]",
"-",
"1",
"responses",
"=",
"with_max_id",
"(",
"self",
".",
"request",
"(",
"*",
"*",
"self",
".",
"kwargs",
",",
"max_id",
"=",
"max_id",
")",
")",
"async",
"for",
"tweets",
"in",
"responses",
":",
"data",
".",
"extend",
"(",
"tweets",
")",
"if",
"data",
"[",
"-",
"1",
"]",
"[",
"'id'",
"]",
"==",
"self",
".",
"last_id",
":",
"data",
"=",
"data",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"data",
"and",
"not",
"self",
".",
"force",
":",
"raise",
"StopAsyncIteration",
"await",
"self",
".",
"set_param",
"(",
"data",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
get_oauth_token
|
Get a temporary oauth token
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
callback_uri : str, optional
Callback uri, defaults to 'oob'
Returns
-------
dict
Temporary tokens
|
peony/oauth_dance.py
|
async def get_oauth_token(consumer_key, consumer_secret, callback_uri="oob"):
"""
Get a temporary oauth token
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
callback_uri : str, optional
Callback uri, defaults to 'oob'
Returns
-------
dict
Temporary tokens
"""
client = BasePeonyClient(consumer_key=consumer_key,
consumer_secret=consumer_secret,
api_version="",
suffix="")
response = await client.api.oauth.request_token.post(
_suffix="",
oauth_callback=callback_uri
)
return parse_token(response)
|
async def get_oauth_token(consumer_key, consumer_secret, callback_uri="oob"):
"""
Get a temporary oauth token
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
callback_uri : str, optional
Callback uri, defaults to 'oob'
Returns
-------
dict
Temporary tokens
"""
client = BasePeonyClient(consumer_key=consumer_key,
consumer_secret=consumer_secret,
api_version="",
suffix="")
response = await client.api.oauth.request_token.post(
_suffix="",
oauth_callback=callback_uri
)
return parse_token(response)
|
[
"Get",
"a",
"temporary",
"oauth",
"token"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth_dance.py#L10-L39
|
[
"async",
"def",
"get_oauth_token",
"(",
"consumer_key",
",",
"consumer_secret",
",",
"callback_uri",
"=",
"\"oob\"",
")",
":",
"client",
"=",
"BasePeonyClient",
"(",
"consumer_key",
"=",
"consumer_key",
",",
"consumer_secret",
"=",
"consumer_secret",
",",
"api_version",
"=",
"\"\"",
",",
"suffix",
"=",
"\"\"",
")",
"response",
"=",
"await",
"client",
".",
"api",
".",
"oauth",
".",
"request_token",
".",
"post",
"(",
"_suffix",
"=",
"\"\"",
",",
"oauth_callback",
"=",
"callback_uri",
")",
"return",
"parse_token",
"(",
"response",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
get_oauth_verifier
|
Open authorize page in a browser,
print the url if it didn't work
Arguments
---------
oauth_token : str
The oauth token received in :func:`get_oauth_token`
Returns
-------
str
The PIN entered by the user
|
peony/oauth_dance.py
|
async def get_oauth_verifier(oauth_token):
"""
Open authorize page in a browser,
print the url if it didn't work
Arguments
---------
oauth_token : str
The oauth token received in :func:`get_oauth_token`
Returns
-------
str
The PIN entered by the user
"""
url = "https://api.twitter.com/oauth/authorize?oauth_token=" + oauth_token
try:
browser = webbrowser.open(url)
await asyncio.sleep(2)
if not browser:
raise RuntimeError
except RuntimeError:
print("could not open a browser\ngo here to enter your PIN: " + url)
verifier = input("\nEnter your PIN: ")
return verifier
|
async def get_oauth_verifier(oauth_token):
"""
Open authorize page in a browser,
print the url if it didn't work
Arguments
---------
oauth_token : str
The oauth token received in :func:`get_oauth_token`
Returns
-------
str
The PIN entered by the user
"""
url = "https://api.twitter.com/oauth/authorize?oauth_token=" + oauth_token
try:
browser = webbrowser.open(url)
await asyncio.sleep(2)
if not browser:
raise RuntimeError
except RuntimeError:
print("could not open a browser\ngo here to enter your PIN: " + url)
verifier = input("\nEnter your PIN: ")
return verifier
|
[
"Open",
"authorize",
"page",
"in",
"a",
"browser",
"print",
"the",
"url",
"if",
"it",
"didn",
"t",
"work"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth_dance.py#L42-L69
|
[
"async",
"def",
"get_oauth_verifier",
"(",
"oauth_token",
")",
":",
"url",
"=",
"\"https://api.twitter.com/oauth/authorize?oauth_token=\"",
"+",
"oauth_token",
"try",
":",
"browser",
"=",
"webbrowser",
".",
"open",
"(",
"url",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"2",
")",
"if",
"not",
"browser",
":",
"raise",
"RuntimeError",
"except",
"RuntimeError",
":",
"print",
"(",
"\"could not open a browser\\ngo here to enter your PIN: \"",
"+",
"url",
")",
"verifier",
"=",
"input",
"(",
"\"\\nEnter your PIN: \"",
")",
"return",
"verifier"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
get_access_token
|
get the access token of the user
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_token : str
OAuth token from :func:`get_oauth_token`
oauth_token_secret : str
OAuth token secret from :func:`get_oauth_token`
oauth_verifier : str
OAuth verifier from :func:`get_oauth_verifier`
Returns
-------
dict
Access tokens
|
peony/oauth_dance.py
|
async def get_access_token(consumer_key, consumer_secret,
oauth_token, oauth_token_secret,
oauth_verifier, **kwargs):
"""
get the access token of the user
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_token : str
OAuth token from :func:`get_oauth_token`
oauth_token_secret : str
OAuth token secret from :func:`get_oauth_token`
oauth_verifier : str
OAuth verifier from :func:`get_oauth_verifier`
Returns
-------
dict
Access tokens
"""
client = BasePeonyClient(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token=oauth_token,
access_token_secret=oauth_token_secret,
api_version="",
suffix="")
response = await client.api.oauth.access_token.get(
_suffix="",
oauth_verifier=oauth_verifier
)
return parse_token(response)
|
async def get_access_token(consumer_key, consumer_secret,
oauth_token, oauth_token_secret,
oauth_verifier, **kwargs):
"""
get the access token of the user
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_token : str
OAuth token from :func:`get_oauth_token`
oauth_token_secret : str
OAuth token secret from :func:`get_oauth_token`
oauth_verifier : str
OAuth verifier from :func:`get_oauth_verifier`
Returns
-------
dict
Access tokens
"""
client = BasePeonyClient(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token=oauth_token,
access_token_secret=oauth_token_secret,
api_version="",
suffix="")
response = await client.api.oauth.access_token.get(
_suffix="",
oauth_verifier=oauth_verifier
)
return parse_token(response)
|
[
"get",
"the",
"access",
"token",
"of",
"the",
"user"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth_dance.py#L72-L109
|
[
"async",
"def",
"get_access_token",
"(",
"consumer_key",
",",
"consumer_secret",
",",
"oauth_token",
",",
"oauth_token_secret",
",",
"oauth_verifier",
",",
"*",
"*",
"kwargs",
")",
":",
"client",
"=",
"BasePeonyClient",
"(",
"consumer_key",
"=",
"consumer_key",
",",
"consumer_secret",
"=",
"consumer_secret",
",",
"access_token",
"=",
"oauth_token",
",",
"access_token_secret",
"=",
"oauth_token_secret",
",",
"api_version",
"=",
"\"\"",
",",
"suffix",
"=",
"\"\"",
")",
"response",
"=",
"await",
"client",
".",
"api",
".",
"oauth",
".",
"access_token",
".",
"get",
"(",
"_suffix",
"=",
"\"\"",
",",
"oauth_verifier",
"=",
"oauth_verifier",
")",
"return",
"parse_token",
"(",
"response",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
async_oauth_dance
|
OAuth dance to get the user's access token
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
callback_uri : str
Callback uri, defaults to 'oob'
Returns
-------
dict
Access tokens
|
peony/oauth_dance.py
|
async def async_oauth_dance(consumer_key, consumer_secret, callback_uri="oob"):
"""
OAuth dance to get the user's access token
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
callback_uri : str
Callback uri, defaults to 'oob'
Returns
-------
dict
Access tokens
"""
token = await get_oauth_token(consumer_key, consumer_secret, callback_uri)
oauth_verifier = await get_oauth_verifier(token['oauth_token'])
token = await get_access_token(
consumer_key,
consumer_secret,
oauth_verifier=oauth_verifier,
**token
)
token = dict(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token=token['oauth_token'],
access_token_secret=token['oauth_token_secret']
)
return token
|
async def async_oauth_dance(consumer_key, consumer_secret, callback_uri="oob"):
"""
OAuth dance to get the user's access token
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
callback_uri : str
Callback uri, defaults to 'oob'
Returns
-------
dict
Access tokens
"""
token = await get_oauth_token(consumer_key, consumer_secret, callback_uri)
oauth_verifier = await get_oauth_verifier(token['oauth_token'])
token = await get_access_token(
consumer_key,
consumer_secret,
oauth_verifier=oauth_verifier,
**token
)
token = dict(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token=token['oauth_token'],
access_token_secret=token['oauth_token_secret']
)
return token
|
[
"OAuth",
"dance",
"to",
"get",
"the",
"user",
"s",
"access",
"token"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth_dance.py#L112-L149
|
[
"async",
"def",
"async_oauth_dance",
"(",
"consumer_key",
",",
"consumer_secret",
",",
"callback_uri",
"=",
"\"oob\"",
")",
":",
"token",
"=",
"await",
"get_oauth_token",
"(",
"consumer_key",
",",
"consumer_secret",
",",
"callback_uri",
")",
"oauth_verifier",
"=",
"await",
"get_oauth_verifier",
"(",
"token",
"[",
"'oauth_token'",
"]",
")",
"token",
"=",
"await",
"get_access_token",
"(",
"consumer_key",
",",
"consumer_secret",
",",
"oauth_verifier",
"=",
"oauth_verifier",
",",
"*",
"*",
"token",
")",
"token",
"=",
"dict",
"(",
"consumer_key",
"=",
"consumer_key",
",",
"consumer_secret",
"=",
"consumer_secret",
",",
"access_token",
"=",
"token",
"[",
"'oauth_token'",
"]",
",",
"access_token_secret",
"=",
"token",
"[",
"'oauth_token_secret'",
"]",
")",
"return",
"token"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
parse_token
|
parse the responses containing the tokens
Parameters
----------
response : str
The response containing the tokens
Returns
-------
dict
The parsed tokens
|
peony/oauth_dance.py
|
def parse_token(response):
"""
parse the responses containing the tokens
Parameters
----------
response : str
The response containing the tokens
Returns
-------
dict
The parsed tokens
"""
items = response.split("&")
items = [item.split("=") for item in items]
return {key: value for key, value in items}
|
def parse_token(response):
"""
parse the responses containing the tokens
Parameters
----------
response : str
The response containing the tokens
Returns
-------
dict
The parsed tokens
"""
items = response.split("&")
items = [item.split("=") for item in items]
return {key: value for key, value in items}
|
[
"parse",
"the",
"responses",
"containing",
"the",
"tokens"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth_dance.py#L152-L169
|
[
"def",
"parse_token",
"(",
"response",
")",
":",
"items",
"=",
"response",
".",
"split",
"(",
"\"&\"",
")",
"items",
"=",
"[",
"item",
".",
"split",
"(",
"\"=\"",
")",
"for",
"item",
"in",
"items",
"]",
"return",
"{",
"key",
":",
"value",
"for",
"key",
",",
"value",
"in",
"items",
"}"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
oauth_dance
|
OAuth dance to get the user's access token
It calls async_oauth_dance and create event loop of not given
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_callback : str
Callback uri, defaults to 'oob'
loop : event loop
asyncio event loop
Returns
-------
dict
Access tokens
|
peony/oauth_dance.py
|
def oauth_dance(consumer_key, consumer_secret,
oauth_callback="oob", loop=None):
"""
OAuth dance to get the user's access token
It calls async_oauth_dance and create event loop of not given
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_callback : str
Callback uri, defaults to 'oob'
loop : event loop
asyncio event loop
Returns
-------
dict
Access tokens
"""
loop = asyncio.get_event_loop() if loop is None else loop
coro = async_oauth_dance(consumer_key, consumer_secret, oauth_callback)
return loop.run_until_complete(coro)
|
def oauth_dance(consumer_key, consumer_secret,
oauth_callback="oob", loop=None):
"""
OAuth dance to get the user's access token
It calls async_oauth_dance and create event loop of not given
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_callback : str
Callback uri, defaults to 'oob'
loop : event loop
asyncio event loop
Returns
-------
dict
Access tokens
"""
loop = asyncio.get_event_loop() if loop is None else loop
coro = async_oauth_dance(consumer_key, consumer_secret, oauth_callback)
return loop.run_until_complete(coro)
|
[
"OAuth",
"dance",
"to",
"get",
"the",
"user",
"s",
"access",
"token"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth_dance.py#L172-L198
|
[
"def",
"oauth_dance",
"(",
"consumer_key",
",",
"consumer_secret",
",",
"oauth_callback",
"=",
"\"oob\"",
",",
"loop",
"=",
"None",
")",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"if",
"loop",
"is",
"None",
"else",
"loop",
"coro",
"=",
"async_oauth_dance",
"(",
"consumer_key",
",",
"consumer_secret",
",",
"oauth_callback",
")",
"return",
"loop",
".",
"run_until_complete",
"(",
"coro",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
oauth2_dance
|
oauth2 dance
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
loop : event loop, optional
event loop to use
Returns
-------
str
Bearer token
|
peony/oauth_dance.py
|
def oauth2_dance(consumer_key, consumer_secret, loop=None):
"""
oauth2 dance
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
loop : event loop, optional
event loop to use
Returns
-------
str
Bearer token
"""
loop = asyncio.get_event_loop() if loop is None else loop
client = BasePeonyClient(consumer_key=consumer_key,
consumer_secret=consumer_secret,
auth=oauth.OAuth2Headers)
loop.run_until_complete(client.headers.sign())
return client.headers.token
|
def oauth2_dance(consumer_key, consumer_secret, loop=None):
"""
oauth2 dance
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
loop : event loop, optional
event loop to use
Returns
-------
str
Bearer token
"""
loop = asyncio.get_event_loop() if loop is None else loop
client = BasePeonyClient(consumer_key=consumer_key,
consumer_secret=consumer_secret,
auth=oauth.OAuth2Headers)
loop.run_until_complete(client.headers.sign())
return client.headers.token
|
[
"oauth2",
"dance"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth_dance.py#L201-L225
|
[
"def",
"oauth2_dance",
"(",
"consumer_key",
",",
"consumer_secret",
",",
"loop",
"=",
"None",
")",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"if",
"loop",
"is",
"None",
"else",
"loop",
"client",
"=",
"BasePeonyClient",
"(",
"consumer_key",
"=",
"consumer_key",
",",
"consumer_secret",
"=",
"consumer_secret",
",",
"auth",
"=",
"oauth",
".",
"OAuth2Headers",
")",
"loop",
".",
"run_until_complete",
"(",
"client",
".",
"headers",
".",
"sign",
"(",
")",
")",
"return",
"client",
".",
"headers",
".",
"token"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
NetChop.predict
|
Return netChop predictions for each position in each sequence.
Parameters
-----------
sequences : list of string
Amino acid sequences to predict cleavage for
Returns
-----------
list of list of float
The i'th list corresponds to the i'th sequence. Each list gives
the cleavage probability for each position in the sequence.
|
mhctools/netchop.py
|
def predict(self, sequences):
"""
Return netChop predictions for each position in each sequence.
Parameters
-----------
sequences : list of string
Amino acid sequences to predict cleavage for
Returns
-----------
list of list of float
The i'th list corresponds to the i'th sequence. Each list gives
the cleavage probability for each position in the sequence.
"""
with tempfile.NamedTemporaryFile(suffix=".fsa", mode="w") as input_fd:
for (i, sequence) in enumerate(sequences):
input_fd.write("> %d\n" % i)
input_fd.write(sequence)
input_fd.write("\n")
input_fd.flush()
try:
output = subprocess.check_output(["netChop", input_fd.name])
except subprocess.CalledProcessError as e:
logging.error("Error calling netChop: %s:\n%s" % (e, e.output))
raise
parsed = self.parse_netchop(output)
assert len(parsed) == len(sequences), \
"Expected %d results but got %d" % (
len(sequences), len(parsed))
assert [len(x) for x in parsed] == [len(x) for x in sequences]
return parsed
|
def predict(self, sequences):
"""
Return netChop predictions for each position in each sequence.
Parameters
-----------
sequences : list of string
Amino acid sequences to predict cleavage for
Returns
-----------
list of list of float
The i'th list corresponds to the i'th sequence. Each list gives
the cleavage probability for each position in the sequence.
"""
with tempfile.NamedTemporaryFile(suffix=".fsa", mode="w") as input_fd:
for (i, sequence) in enumerate(sequences):
input_fd.write("> %d\n" % i)
input_fd.write(sequence)
input_fd.write("\n")
input_fd.flush()
try:
output = subprocess.check_output(["netChop", input_fd.name])
except subprocess.CalledProcessError as e:
logging.error("Error calling netChop: %s:\n%s" % (e, e.output))
raise
parsed = self.parse_netchop(output)
assert len(parsed) == len(sequences), \
"Expected %d results but got %d" % (
len(sequences), len(parsed))
assert [len(x) for x in parsed] == [len(x) for x in sequences]
return parsed
|
[
"Return",
"netChop",
"predictions",
"for",
"each",
"position",
"in",
"each",
"sequence",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/netchop.py#L26-L59
|
[
"def",
"predict",
"(",
"self",
",",
"sequences",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"\".fsa\"",
",",
"mode",
"=",
"\"w\"",
")",
"as",
"input_fd",
":",
"for",
"(",
"i",
",",
"sequence",
")",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"input_fd",
".",
"write",
"(",
"\"> %d\\n\"",
"%",
"i",
")",
"input_fd",
".",
"write",
"(",
"sequence",
")",
"input_fd",
".",
"write",
"(",
"\"\\n\"",
")",
"input_fd",
".",
"flush",
"(",
")",
"try",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"netChop\"",
",",
"input_fd",
".",
"name",
"]",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"\"Error calling netChop: %s:\\n%s\"",
"%",
"(",
"e",
",",
"e",
".",
"output",
")",
")",
"raise",
"parsed",
"=",
"self",
".",
"parse_netchop",
"(",
"output",
")",
"assert",
"len",
"(",
"parsed",
")",
"==",
"len",
"(",
"sequences",
")",
",",
"\"Expected %d results but got %d\"",
"%",
"(",
"len",
"(",
"sequences",
")",
",",
"len",
"(",
"parsed",
")",
")",
"assert",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"parsed",
"]",
"==",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"sequences",
"]",
"return",
"parsed"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
NetChop.parse_netchop
|
Parse netChop stdout.
|
mhctools/netchop.py
|
def parse_netchop(netchop_output):
"""
Parse netChop stdout.
"""
line_iterator = iter(netchop_output.decode().split("\n"))
scores = []
for line in line_iterator:
if "pos" in line and 'AA' in line and 'score' in line:
scores.append([])
if "----" not in next(line_iterator):
raise ValueError("Dashes expected")
line = next(line_iterator)
while '-------' not in line:
score = float(line.split()[3])
scores[-1].append(score)
line = next(line_iterator)
return scores
|
def parse_netchop(netchop_output):
"""
Parse netChop stdout.
"""
line_iterator = iter(netchop_output.decode().split("\n"))
scores = []
for line in line_iterator:
if "pos" in line and 'AA' in line and 'score' in line:
scores.append([])
if "----" not in next(line_iterator):
raise ValueError("Dashes expected")
line = next(line_iterator)
while '-------' not in line:
score = float(line.split()[3])
scores[-1].append(score)
line = next(line_iterator)
return scores
|
[
"Parse",
"netChop",
"stdout",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/netchop.py#L62-L78
|
[
"def",
"parse_netchop",
"(",
"netchop_output",
")",
":",
"line_iterator",
"=",
"iter",
"(",
"netchop_output",
".",
"decode",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
")",
"scores",
"=",
"[",
"]",
"for",
"line",
"in",
"line_iterator",
":",
"if",
"\"pos\"",
"in",
"line",
"and",
"'AA'",
"in",
"line",
"and",
"'score'",
"in",
"line",
":",
"scores",
".",
"append",
"(",
"[",
"]",
")",
"if",
"\"----\"",
"not",
"in",
"next",
"(",
"line_iterator",
")",
":",
"raise",
"ValueError",
"(",
"\"Dashes expected\"",
")",
"line",
"=",
"next",
"(",
"line_iterator",
")",
"while",
"'-------'",
"not",
"in",
"line",
":",
"score",
"=",
"float",
"(",
"line",
".",
"split",
"(",
")",
"[",
"3",
"]",
")",
"scores",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"score",
")",
"line",
"=",
"next",
"(",
"line_iterator",
")",
"return",
"scores"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
BindingPredictionCollection.to_dataframe
|
Converts collection of BindingPrediction objects to DataFrame
|
mhctools/binding_prediction_collection.py
|
def to_dataframe(
self,
columns=BindingPrediction.fields + ("length",)):
"""
Converts collection of BindingPrediction objects to DataFrame
"""
return pd.DataFrame.from_records(
[tuple([getattr(x, name) for name in columns]) for x in self],
columns=columns)
|
def to_dataframe(
self,
columns=BindingPrediction.fields + ("length",)):
"""
Converts collection of BindingPrediction objects to DataFrame
"""
return pd.DataFrame.from_records(
[tuple([getattr(x, name) for name in columns]) for x in self],
columns=columns)
|
[
"Converts",
"collection",
"of",
"BindingPrediction",
"objects",
"to",
"DataFrame"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/binding_prediction_collection.py#L23-L31
|
[
"def",
"to_dataframe",
"(",
"self",
",",
"columns",
"=",
"BindingPrediction",
".",
"fields",
"+",
"(",
"\"length\"",
",",
")",
")",
":",
"return",
"pd",
".",
"DataFrame",
".",
"from_records",
"(",
"[",
"tuple",
"(",
"[",
"getattr",
"(",
"x",
",",
"name",
")",
"for",
"name",
"in",
"columns",
"]",
")",
"for",
"x",
"in",
"self",
"]",
",",
"columns",
"=",
"columns",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
NetMHC
|
This function wraps NetMHC3 and NetMHC4 to automatically detect which class
to use. Currently based on running the '-h' command and looking for
discriminating substrings between the versions.
|
mhctools/netmhc.py
|
def NetMHC(alleles,
default_peptide_lengths=[9],
program_name="netMHC"):
"""
This function wraps NetMHC3 and NetMHC4 to automatically detect which class
to use. Currently based on running the '-h' command and looking for
discriminating substrings between the versions.
"""
# run NetMHC's help command and parse discriminating substrings out of
# the resulting str output
with open(os.devnull, 'w') as devnull:
help_output = check_output([program_name, "-h"], stderr=devnull)
help_output_str = help_output.decode("ascii", "ignore")
substring_to_netmhc_class = {
"-listMHC": NetMHC4,
"--Alleles": NetMHC3,
}
successes = []
for substring, netmhc_class in substring_to_netmhc_class.items():
if substring in help_output_str:
successes.append(netmhc_class)
if len(successes) > 1:
raise SystemError("Command %s is valid for multiple NetMHC versions. "
"This is likely an mhctools bug." % program_name)
if len(successes) == 0:
raise SystemError("Command %s is not a valid way of calling any NetMHC software."
% program_name)
netmhc_class = successes[0]
return netmhc_class(
alleles=alleles,
default_peptide_lengths=default_peptide_lengths,
program_name=program_name)
|
def NetMHC(alleles,
default_peptide_lengths=[9],
program_name="netMHC"):
"""
This function wraps NetMHC3 and NetMHC4 to automatically detect which class
to use. Currently based on running the '-h' command and looking for
discriminating substrings between the versions.
"""
# run NetMHC's help command and parse discriminating substrings out of
# the resulting str output
with open(os.devnull, 'w') as devnull:
help_output = check_output([program_name, "-h"], stderr=devnull)
help_output_str = help_output.decode("ascii", "ignore")
substring_to_netmhc_class = {
"-listMHC": NetMHC4,
"--Alleles": NetMHC3,
}
successes = []
for substring, netmhc_class in substring_to_netmhc_class.items():
if substring in help_output_str:
successes.append(netmhc_class)
if len(successes) > 1:
raise SystemError("Command %s is valid for multiple NetMHC versions. "
"This is likely an mhctools bug." % program_name)
if len(successes) == 0:
raise SystemError("Command %s is not a valid way of calling any NetMHC software."
% program_name)
netmhc_class = successes[0]
return netmhc_class(
alleles=alleles,
default_peptide_lengths=default_peptide_lengths,
program_name=program_name)
|
[
"This",
"function",
"wraps",
"NetMHC3",
"and",
"NetMHC4",
"to",
"automatically",
"detect",
"which",
"class",
"to",
"use",
".",
"Currently",
"based",
"on",
"running",
"the",
"-",
"h",
"command",
"and",
"looking",
"for",
"discriminating",
"substrings",
"between",
"the",
"versions",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/netmhc.py#L23-L59
|
[
"def",
"NetMHC",
"(",
"alleles",
",",
"default_peptide_lengths",
"=",
"[",
"9",
"]",
",",
"program_name",
"=",
"\"netMHC\"",
")",
":",
"# run NetMHC's help command and parse discriminating substrings out of",
"# the resulting str output",
"with",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"as",
"devnull",
":",
"help_output",
"=",
"check_output",
"(",
"[",
"program_name",
",",
"\"-h\"",
"]",
",",
"stderr",
"=",
"devnull",
")",
"help_output_str",
"=",
"help_output",
".",
"decode",
"(",
"\"ascii\"",
",",
"\"ignore\"",
")",
"substring_to_netmhc_class",
"=",
"{",
"\"-listMHC\"",
":",
"NetMHC4",
",",
"\"--Alleles\"",
":",
"NetMHC3",
",",
"}",
"successes",
"=",
"[",
"]",
"for",
"substring",
",",
"netmhc_class",
"in",
"substring_to_netmhc_class",
".",
"items",
"(",
")",
":",
"if",
"substring",
"in",
"help_output_str",
":",
"successes",
".",
"append",
"(",
"netmhc_class",
")",
"if",
"len",
"(",
"successes",
")",
">",
"1",
":",
"raise",
"SystemError",
"(",
"\"Command %s is valid for multiple NetMHC versions. \"",
"\"This is likely an mhctools bug.\"",
"%",
"program_name",
")",
"if",
"len",
"(",
"successes",
")",
"==",
"0",
":",
"raise",
"SystemError",
"(",
"\"Command %s is not a valid way of calling any NetMHC software.\"",
"%",
"program_name",
")",
"netmhc_class",
"=",
"successes",
"[",
"0",
"]",
"return",
"netmhc_class",
"(",
"alleles",
"=",
"alleles",
",",
"default_peptide_lengths",
"=",
"default_peptide_lengths",
",",
"program_name",
"=",
"program_name",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
MHCflurry.predict_peptides
|
Predict MHC affinity for peptides.
|
mhctools/mhcflurry.py
|
def predict_peptides(self, peptides):
"""
Predict MHC affinity for peptides.
"""
# importing locally to avoid slowing down CLI applications which
# don't use MHCflurry
from mhcflurry.encodable_sequences import EncodableSequences
binding_predictions = []
encodable_sequences = EncodableSequences.create(peptides)
for allele in self.alleles:
predictions_df = self.predictor.predict_to_dataframe(
encodable_sequences, allele=allele)
for (_, row) in predictions_df.iterrows():
binding_prediction = BindingPrediction(
allele=allele,
peptide=row.peptide,
affinity=row.prediction,
percentile_rank=(
row.prediction_percentile
if 'prediction_percentile' in row else nan),
prediction_method_name="mhcflurry"
)
binding_predictions.append(binding_prediction)
return BindingPredictionCollection(binding_predictions)
|
def predict_peptides(self, peptides):
"""
Predict MHC affinity for peptides.
"""
# importing locally to avoid slowing down CLI applications which
# don't use MHCflurry
from mhcflurry.encodable_sequences import EncodableSequences
binding_predictions = []
encodable_sequences = EncodableSequences.create(peptides)
for allele in self.alleles:
predictions_df = self.predictor.predict_to_dataframe(
encodable_sequences, allele=allele)
for (_, row) in predictions_df.iterrows():
binding_prediction = BindingPrediction(
allele=allele,
peptide=row.peptide,
affinity=row.prediction,
percentile_rank=(
row.prediction_percentile
if 'prediction_percentile' in row else nan),
prediction_method_name="mhcflurry"
)
binding_predictions.append(binding_prediction)
return BindingPredictionCollection(binding_predictions)
|
[
"Predict",
"MHC",
"affinity",
"for",
"peptides",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/mhcflurry.py#L75-L100
|
[
"def",
"predict_peptides",
"(",
"self",
",",
"peptides",
")",
":",
"# importing locally to avoid slowing down CLI applications which",
"# don't use MHCflurry",
"from",
"mhcflurry",
".",
"encodable_sequences",
"import",
"EncodableSequences",
"binding_predictions",
"=",
"[",
"]",
"encodable_sequences",
"=",
"EncodableSequences",
".",
"create",
"(",
"peptides",
")",
"for",
"allele",
"in",
"self",
".",
"alleles",
":",
"predictions_df",
"=",
"self",
".",
"predictor",
".",
"predict_to_dataframe",
"(",
"encodable_sequences",
",",
"allele",
"=",
"allele",
")",
"for",
"(",
"_",
",",
"row",
")",
"in",
"predictions_df",
".",
"iterrows",
"(",
")",
":",
"binding_prediction",
"=",
"BindingPrediction",
"(",
"allele",
"=",
"allele",
",",
"peptide",
"=",
"row",
".",
"peptide",
",",
"affinity",
"=",
"row",
".",
"prediction",
",",
"percentile_rank",
"=",
"(",
"row",
".",
"prediction_percentile",
"if",
"'prediction_percentile'",
"in",
"row",
"else",
"nan",
")",
",",
"prediction_method_name",
"=",
"\"mhcflurry\"",
")",
"binding_predictions",
".",
"append",
"(",
"binding_prediction",
")",
"return",
"BindingPredictionCollection",
"(",
"binding_predictions",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
seq_to_str
|
Given a sequence convert it to a comma separated string.
If, however, the argument is a single object, return its string
representation.
|
mhctools/common.py
|
def seq_to_str(obj, sep=","):
"""
Given a sequence convert it to a comma separated string.
If, however, the argument is a single object, return its string
representation.
"""
if isinstance(obj, string_classes):
return obj
elif isinstance(obj, (list, tuple)):
return sep.join([str(x) for x in obj])
else:
return str(obj)
|
def seq_to_str(obj, sep=","):
"""
Given a sequence convert it to a comma separated string.
If, however, the argument is a single object, return its string
representation.
"""
if isinstance(obj, string_classes):
return obj
elif isinstance(obj, (list, tuple)):
return sep.join([str(x) for x in obj])
else:
return str(obj)
|
[
"Given",
"a",
"sequence",
"convert",
"it",
"to",
"a",
"comma",
"separated",
"string",
".",
"If",
"however",
"the",
"argument",
"is",
"a",
"single",
"object",
"return",
"its",
"string",
"representation",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/common.py#L24-L35
|
[
"def",
"seq_to_str",
"(",
"obj",
",",
"sep",
"=",
"\",\"",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"string_classes",
")",
":",
"return",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"sep",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"obj",
"]",
")",
"else",
":",
"return",
"str",
"(",
"obj",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
convert
|
Convert the image to all the formats specified
Parameters
----------
img : PIL.Image.Image
The image to convert
formats : list
List of all the formats to use
Returns
-------
io.BytesIO
A file object containing the converted image
|
examples/upload.py
|
def convert(img, formats):
"""
Convert the image to all the formats specified
Parameters
----------
img : PIL.Image.Image
The image to convert
formats : list
List of all the formats to use
Returns
-------
io.BytesIO
A file object containing the converted image
"""
media = None
min_size = 0
for kwargs in formats:
f = io.BytesIO()
if img.mode == "RGBA" and kwargs['format'] != "PNG":
# convert to RGB if picture is too large as a png
# this implies that the png format is the first in `formats`
if min_size < 5 * 1024**2:
continue
else:
img.convert('RGB')
img.save(f, **kwargs)
size = f.tell()
if media is None or size < min_size:
if media is not None:
media.close()
media = f
min_size = size
else:
f.close()
return media
|
def convert(img, formats):
"""
Convert the image to all the formats specified
Parameters
----------
img : PIL.Image.Image
The image to convert
formats : list
List of all the formats to use
Returns
-------
io.BytesIO
A file object containing the converted image
"""
media = None
min_size = 0
for kwargs in formats:
f = io.BytesIO()
if img.mode == "RGBA" and kwargs['format'] != "PNG":
# convert to RGB if picture is too large as a png
# this implies that the png format is the first in `formats`
if min_size < 5 * 1024**2:
continue
else:
img.convert('RGB')
img.save(f, **kwargs)
size = f.tell()
if media is None or size < min_size:
if media is not None:
media.close()
media = f
min_size = size
else:
f.close()
return media
|
[
"Convert",
"the",
"image",
"to",
"all",
"the",
"formats",
"specified",
"Parameters",
"----------",
"img",
":",
"PIL",
".",
"Image",
".",
"Image",
"The",
"image",
"to",
"convert",
"formats",
":",
"list",
"List",
"of",
"all",
"the",
"formats",
"to",
"use",
"Returns",
"-------",
"io",
".",
"BytesIO",
"A",
"file",
"object",
"containing",
"the",
"converted",
"image"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/examples/upload.py#L25-L64
|
[
"def",
"convert",
"(",
"img",
",",
"formats",
")",
":",
"media",
"=",
"None",
"min_size",
"=",
"0",
"for",
"kwargs",
"in",
"formats",
":",
"f",
"=",
"io",
".",
"BytesIO",
"(",
")",
"if",
"img",
".",
"mode",
"==",
"\"RGBA\"",
"and",
"kwargs",
"[",
"'format'",
"]",
"!=",
"\"PNG\"",
":",
"# convert to RGB if picture is too large as a png",
"# this implies that the png format is the first in `formats`",
"if",
"min_size",
"<",
"5",
"*",
"1024",
"**",
"2",
":",
"continue",
"else",
":",
"img",
".",
"convert",
"(",
"'RGB'",
")",
"img",
".",
"save",
"(",
"f",
",",
"*",
"*",
"kwargs",
")",
"size",
"=",
"f",
".",
"tell",
"(",
")",
"if",
"media",
"is",
"None",
"or",
"size",
"<",
"min_size",
":",
"if",
"media",
"is",
"not",
"None",
":",
"media",
".",
"close",
"(",
")",
"media",
"=",
"f",
"min_size",
"=",
"size",
"else",
":",
"f",
".",
"close",
"(",
")",
"return",
"media"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
optimize_media
|
Optimize an image
Resize the picture to the ``max_size``, defaulting to the large
photo size of Twitter in :meth:`PeonyClient.upload_media` when
used with the ``optimize_media`` argument.
Parameters
----------
file_ : file object
the file object of an image
max_size : :obj:`tuple` or :obj:`list` of :obj:`int`
a tuple in the format (width, height) which is maximum size of
the picture returned by this function
formats : :obj`list` or :obj:`tuple` of :obj:`dict`
a list of all the formats to convert the picture to
Returns
-------
file
The smallest file created in this function
|
examples/upload.py
|
def optimize_media(file_, max_size, formats):
"""
Optimize an image
Resize the picture to the ``max_size``, defaulting to the large
photo size of Twitter in :meth:`PeonyClient.upload_media` when
used with the ``optimize_media`` argument.
Parameters
----------
file_ : file object
the file object of an image
max_size : :obj:`tuple` or :obj:`list` of :obj:`int`
a tuple in the format (width, height) which is maximum size of
the picture returned by this function
formats : :obj`list` or :obj:`tuple` of :obj:`dict`
a list of all the formats to convert the picture to
Returns
-------
file
The smallest file created in this function
"""
if not PIL:
msg = ("Pillow must be installed to optimize a media\n"
"$ pip3 install Pillow")
raise RuntimeError(msg)
img = PIL.Image.open(file_)
# resize the picture (defaults to the 'large' photo size of Twitter
# in peony.PeonyClient.upload_media)
ratio = max(hw / max_hw for hw, max_hw in zip(img.size, max_size))
if ratio > 1:
size = tuple(int(hw // ratio) for hw in img.size)
img = img.resize(size, PIL.Image.ANTIALIAS)
media = convert(img, formats)
# do not close a file opened by the user
# only close if a filename was given
if not hasattr(file_, 'read'):
img.close()
return media
|
def optimize_media(file_, max_size, formats):
"""
Optimize an image
Resize the picture to the ``max_size``, defaulting to the large
photo size of Twitter in :meth:`PeonyClient.upload_media` when
used with the ``optimize_media`` argument.
Parameters
----------
file_ : file object
the file object of an image
max_size : :obj:`tuple` or :obj:`list` of :obj:`int`
a tuple in the format (width, height) which is maximum size of
the picture returned by this function
formats : :obj`list` or :obj:`tuple` of :obj:`dict`
a list of all the formats to convert the picture to
Returns
-------
file
The smallest file created in this function
"""
if not PIL:
msg = ("Pillow must be installed to optimize a media\n"
"$ pip3 install Pillow")
raise RuntimeError(msg)
img = PIL.Image.open(file_)
# resize the picture (defaults to the 'large' photo size of Twitter
# in peony.PeonyClient.upload_media)
ratio = max(hw / max_hw for hw, max_hw in zip(img.size, max_size))
if ratio > 1:
size = tuple(int(hw // ratio) for hw in img.size)
img = img.resize(size, PIL.Image.ANTIALIAS)
media = convert(img, formats)
# do not close a file opened by the user
# only close if a filename was given
if not hasattr(file_, 'read'):
img.close()
return media
|
[
"Optimize",
"an",
"image",
"Resize",
"the",
"picture",
"to",
"the",
"max_size",
"defaulting",
"to",
"the",
"large",
"photo",
"size",
"of",
"Twitter",
"in",
":",
"meth",
":",
"PeonyClient",
".",
"upload_media",
"when",
"used",
"with",
"the",
"optimize_media",
"argument",
".",
"Parameters",
"----------",
"file_",
":",
"file",
"object",
"the",
"file",
"object",
"of",
"an",
"image",
"max_size",
":",
":",
"obj",
":",
"tuple",
"or",
":",
"obj",
":",
"list",
"of",
":",
"obj",
":",
"int",
"a",
"tuple",
"in",
"the",
"format",
"(",
"width",
"height",
")",
"which",
"is",
"maximum",
"size",
"of",
"the",
"picture",
"returned",
"by",
"this",
"function",
"formats",
":",
":",
"obj",
"list",
"or",
":",
"obj",
":",
"tuple",
"of",
":",
"obj",
":",
"dict",
"a",
"list",
"of",
"all",
"the",
"formats",
"to",
"convert",
"the",
"picture",
"to",
"Returns",
"-------",
"file",
"The",
"smallest",
"file",
"created",
"in",
"this",
"function"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/examples/upload.py#L67-L109
|
[
"def",
"optimize_media",
"(",
"file_",
",",
"max_size",
",",
"formats",
")",
":",
"if",
"not",
"PIL",
":",
"msg",
"=",
"(",
"\"Pillow must be installed to optimize a media\\n\"",
"\"$ pip3 install Pillow\"",
")",
"raise",
"RuntimeError",
"(",
"msg",
")",
"img",
"=",
"PIL",
".",
"Image",
".",
"open",
"(",
"file_",
")",
"# resize the picture (defaults to the 'large' photo size of Twitter",
"# in peony.PeonyClient.upload_media)",
"ratio",
"=",
"max",
"(",
"hw",
"/",
"max_hw",
"for",
"hw",
",",
"max_hw",
"in",
"zip",
"(",
"img",
".",
"size",
",",
"max_size",
")",
")",
"if",
"ratio",
">",
"1",
":",
"size",
"=",
"tuple",
"(",
"int",
"(",
"hw",
"//",
"ratio",
")",
"for",
"hw",
"in",
"img",
".",
"size",
")",
"img",
"=",
"img",
".",
"resize",
"(",
"size",
",",
"PIL",
".",
"Image",
".",
"ANTIALIAS",
")",
"media",
"=",
"convert",
"(",
"img",
",",
"formats",
")",
"# do not close a file opened by the user",
"# only close if a filename was given",
"if",
"not",
"hasattr",
"(",
"file_",
",",
"'read'",
")",
":",
"img",
".",
"close",
"(",
")",
"return",
"media"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
create_input_peptides_files
|
Creates one or more files containing one peptide per line,
returns names of files.
|
mhctools/input_file_formats.py
|
def create_input_peptides_files(
peptides,
max_peptides_per_file=None,
group_by_length=False):
"""
Creates one or more files containing one peptide per line,
returns names of files.
"""
if group_by_length:
peptide_lengths = {len(p) for p in peptides}
peptide_groups = {l: [] for l in peptide_lengths}
for p in peptides:
peptide_groups[len(p)].append(p)
else:
peptide_groups = {"": peptides}
file_names = []
for key, group in peptide_groups.items():
n_peptides = len(group)
if not max_peptides_per_file:
max_peptides_per_file = n_peptides
input_file = None
for i, p in enumerate(group):
if i % max_peptides_per_file == 0:
if input_file is not None:
file_names.append(input_file.name)
input_file.close()
input_file = make_writable_tempfile(
prefix_number=i // max_peptides_per_file,
prefix_name=key,
suffix=".txt")
input_file.write("%s\n" % p)
if input_file is not None:
file_names.append(input_file.name)
input_file.close()
return file_names
|
def create_input_peptides_files(
peptides,
max_peptides_per_file=None,
group_by_length=False):
"""
Creates one or more files containing one peptide per line,
returns names of files.
"""
if group_by_length:
peptide_lengths = {len(p) for p in peptides}
peptide_groups = {l: [] for l in peptide_lengths}
for p in peptides:
peptide_groups[len(p)].append(p)
else:
peptide_groups = {"": peptides}
file_names = []
for key, group in peptide_groups.items():
n_peptides = len(group)
if not max_peptides_per_file:
max_peptides_per_file = n_peptides
input_file = None
for i, p in enumerate(group):
if i % max_peptides_per_file == 0:
if input_file is not None:
file_names.append(input_file.name)
input_file.close()
input_file = make_writable_tempfile(
prefix_number=i // max_peptides_per_file,
prefix_name=key,
suffix=".txt")
input_file.write("%s\n" % p)
if input_file is not None:
file_names.append(input_file.name)
input_file.close()
return file_names
|
[
"Creates",
"one",
"or",
"more",
"files",
"containing",
"one",
"peptide",
"per",
"line",
"returns",
"names",
"of",
"files",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/input_file_formats.py#L26-L61
|
[
"def",
"create_input_peptides_files",
"(",
"peptides",
",",
"max_peptides_per_file",
"=",
"None",
",",
"group_by_length",
"=",
"False",
")",
":",
"if",
"group_by_length",
":",
"peptide_lengths",
"=",
"{",
"len",
"(",
"p",
")",
"for",
"p",
"in",
"peptides",
"}",
"peptide_groups",
"=",
"{",
"l",
":",
"[",
"]",
"for",
"l",
"in",
"peptide_lengths",
"}",
"for",
"p",
"in",
"peptides",
":",
"peptide_groups",
"[",
"len",
"(",
"p",
")",
"]",
".",
"append",
"(",
"p",
")",
"else",
":",
"peptide_groups",
"=",
"{",
"\"\"",
":",
"peptides",
"}",
"file_names",
"=",
"[",
"]",
"for",
"key",
",",
"group",
"in",
"peptide_groups",
".",
"items",
"(",
")",
":",
"n_peptides",
"=",
"len",
"(",
"group",
")",
"if",
"not",
"max_peptides_per_file",
":",
"max_peptides_per_file",
"=",
"n_peptides",
"input_file",
"=",
"None",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"group",
")",
":",
"if",
"i",
"%",
"max_peptides_per_file",
"==",
"0",
":",
"if",
"input_file",
"is",
"not",
"None",
":",
"file_names",
".",
"append",
"(",
"input_file",
".",
"name",
")",
"input_file",
".",
"close",
"(",
")",
"input_file",
"=",
"make_writable_tempfile",
"(",
"prefix_number",
"=",
"i",
"//",
"max_peptides_per_file",
",",
"prefix_name",
"=",
"key",
",",
"suffix",
"=",
"\".txt\"",
")",
"input_file",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"p",
")",
"if",
"input_file",
"is",
"not",
"None",
":",
"file_names",
".",
"append",
"(",
"input_file",
".",
"name",
")",
"input_file",
".",
"close",
"(",
")",
"return",
"file_names"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
BasePredictor._check_peptide_lengths
|
If peptide lengths not specified, then try using the default
lengths associated with this predictor object. If those aren't
a valid non-empty sequence of integers, then raise an exception.
Otherwise return the peptide lengths.
|
mhctools/base_predictor.py
|
def _check_peptide_lengths(self, peptide_lengths=None):
"""
If peptide lengths not specified, then try using the default
lengths associated with this predictor object. If those aren't
a valid non-empty sequence of integers, then raise an exception.
Otherwise return the peptide lengths.
"""
if not peptide_lengths:
peptide_lengths = self.default_peptide_lengths
if not peptide_lengths:
raise ValueError(
("Must either provide 'peptide_lengths' argument "
"or set 'default_peptide_lengths"))
if isinstance(peptide_lengths, int):
peptide_lengths = [peptide_lengths]
require_iterable_of(peptide_lengths, int)
for peptide_length in peptide_lengths:
if (self.min_peptide_length is not None and
peptide_length < self.min_peptide_length):
raise ValueError(
"Invalid peptide length %d, shorter than min %d" % (
peptide_length,
self.min_peptide_length))
elif (self.max_peptide_length is not None and
peptide_length > self.max_peptide_length):
raise ValueError(
"Invalid peptide length %d, longer than max %d" % (
peptide_length,
self.max_peptide_length))
return peptide_lengths
|
def _check_peptide_lengths(self, peptide_lengths=None):
"""
If peptide lengths not specified, then try using the default
lengths associated with this predictor object. If those aren't
a valid non-empty sequence of integers, then raise an exception.
Otherwise return the peptide lengths.
"""
if not peptide_lengths:
peptide_lengths = self.default_peptide_lengths
if not peptide_lengths:
raise ValueError(
("Must either provide 'peptide_lengths' argument "
"or set 'default_peptide_lengths"))
if isinstance(peptide_lengths, int):
peptide_lengths = [peptide_lengths]
require_iterable_of(peptide_lengths, int)
for peptide_length in peptide_lengths:
if (self.min_peptide_length is not None and
peptide_length < self.min_peptide_length):
raise ValueError(
"Invalid peptide length %d, shorter than min %d" % (
peptide_length,
self.min_peptide_length))
elif (self.max_peptide_length is not None and
peptide_length > self.max_peptide_length):
raise ValueError(
"Invalid peptide length %d, longer than max %d" % (
peptide_length,
self.max_peptide_length))
return peptide_lengths
|
[
"If",
"peptide",
"lengths",
"not",
"specified",
"then",
"try",
"using",
"the",
"default",
"lengths",
"associated",
"with",
"this",
"predictor",
"object",
".",
"If",
"those",
"aren",
"t",
"a",
"valid",
"non",
"-",
"empty",
"sequence",
"of",
"integers",
"then",
"raise",
"an",
"exception",
".",
"Otherwise",
"return",
"the",
"peptide",
"lengths",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/base_predictor.py#L103-L133
|
[
"def",
"_check_peptide_lengths",
"(",
"self",
",",
"peptide_lengths",
"=",
"None",
")",
":",
"if",
"not",
"peptide_lengths",
":",
"peptide_lengths",
"=",
"self",
".",
"default_peptide_lengths",
"if",
"not",
"peptide_lengths",
":",
"raise",
"ValueError",
"(",
"(",
"\"Must either provide 'peptide_lengths' argument \"",
"\"or set 'default_peptide_lengths\"",
")",
")",
"if",
"isinstance",
"(",
"peptide_lengths",
",",
"int",
")",
":",
"peptide_lengths",
"=",
"[",
"peptide_lengths",
"]",
"require_iterable_of",
"(",
"peptide_lengths",
",",
"int",
")",
"for",
"peptide_length",
"in",
"peptide_lengths",
":",
"if",
"(",
"self",
".",
"min_peptide_length",
"is",
"not",
"None",
"and",
"peptide_length",
"<",
"self",
".",
"min_peptide_length",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid peptide length %d, shorter than min %d\"",
"%",
"(",
"peptide_length",
",",
"self",
".",
"min_peptide_length",
")",
")",
"elif",
"(",
"self",
".",
"max_peptide_length",
"is",
"not",
"None",
"and",
"peptide_length",
">",
"self",
".",
"max_peptide_length",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid peptide length %d, longer than max %d\"",
"%",
"(",
"peptide_length",
",",
"self",
".",
"max_peptide_length",
")",
")",
"return",
"peptide_lengths"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
BasePredictor._check_peptide_inputs
|
Check peptide sequences to make sure they are valid for this predictor.
|
mhctools/base_predictor.py
|
def _check_peptide_inputs(self, peptides):
"""
Check peptide sequences to make sure they are valid for this predictor.
"""
require_iterable_of(peptides, string_types)
check_X = not self.allow_X_in_peptides
check_lower = not self.allow_lowercase_in_peptides
check_min_length = self.min_peptide_length is not None
min_length = self.min_peptide_length
check_max_length = self.max_peptide_length is not None
max_length = self.max_peptide_length
for p in peptides:
if not p.isalpha():
raise ValueError("Invalid characters in peptide '%s'" % p)
elif check_X and "X" in p:
raise ValueError("Invalid character 'X' in peptide '%s'" % p)
elif check_lower and not p.isupper():
raise ValueError("Invalid lowercase letters in peptide '%s'" % p)
elif check_min_length and len(p) < min_length:
raise ValueError(
"Peptide '%s' too short (%d chars), must be at least %d" % (
p, len(p), min_length))
elif check_max_length and len(p) > max_length:
raise ValueError(
"Peptide '%s' too long (%d chars), must be at least %d" % (
p, len(p), max_length))
|
def _check_peptide_inputs(self, peptides):
"""
Check peptide sequences to make sure they are valid for this predictor.
"""
require_iterable_of(peptides, string_types)
check_X = not self.allow_X_in_peptides
check_lower = not self.allow_lowercase_in_peptides
check_min_length = self.min_peptide_length is not None
min_length = self.min_peptide_length
check_max_length = self.max_peptide_length is not None
max_length = self.max_peptide_length
for p in peptides:
if not p.isalpha():
raise ValueError("Invalid characters in peptide '%s'" % p)
elif check_X and "X" in p:
raise ValueError("Invalid character 'X' in peptide '%s'" % p)
elif check_lower and not p.isupper():
raise ValueError("Invalid lowercase letters in peptide '%s'" % p)
elif check_min_length and len(p) < min_length:
raise ValueError(
"Peptide '%s' too short (%d chars), must be at least %d" % (
p, len(p), min_length))
elif check_max_length and len(p) > max_length:
raise ValueError(
"Peptide '%s' too long (%d chars), must be at least %d" % (
p, len(p), max_length))
|
[
"Check",
"peptide",
"sequences",
"to",
"make",
"sure",
"they",
"are",
"valid",
"for",
"this",
"predictor",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/base_predictor.py#L151-L176
|
[
"def",
"_check_peptide_inputs",
"(",
"self",
",",
"peptides",
")",
":",
"require_iterable_of",
"(",
"peptides",
",",
"string_types",
")",
"check_X",
"=",
"not",
"self",
".",
"allow_X_in_peptides",
"check_lower",
"=",
"not",
"self",
".",
"allow_lowercase_in_peptides",
"check_min_length",
"=",
"self",
".",
"min_peptide_length",
"is",
"not",
"None",
"min_length",
"=",
"self",
".",
"min_peptide_length",
"check_max_length",
"=",
"self",
".",
"max_peptide_length",
"is",
"not",
"None",
"max_length",
"=",
"self",
".",
"max_peptide_length",
"for",
"p",
"in",
"peptides",
":",
"if",
"not",
"p",
".",
"isalpha",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid characters in peptide '%s'\"",
"%",
"p",
")",
"elif",
"check_X",
"and",
"\"X\"",
"in",
"p",
":",
"raise",
"ValueError",
"(",
"\"Invalid character 'X' in peptide '%s'\"",
"%",
"p",
")",
"elif",
"check_lower",
"and",
"not",
"p",
".",
"isupper",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid lowercase letters in peptide '%s'\"",
"%",
"p",
")",
"elif",
"check_min_length",
"and",
"len",
"(",
"p",
")",
"<",
"min_length",
":",
"raise",
"ValueError",
"(",
"\"Peptide '%s' too short (%d chars), must be at least %d\"",
"%",
"(",
"p",
",",
"len",
"(",
"p",
")",
",",
"min_length",
")",
")",
"elif",
"check_max_length",
"and",
"len",
"(",
"p",
")",
">",
"max_length",
":",
"raise",
"ValueError",
"(",
"\"Peptide '%s' too long (%d chars), must be at least %d\"",
"%",
"(",
"p",
",",
"len",
"(",
"p",
")",
",",
"max_length",
")",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
BasePredictor.predict_subsequences
|
Given a dictionary mapping sequence names to amino acid strings,
and an optional list of peptide lengths, returns a
BindingPredictionCollection.
|
mhctools/base_predictor.py
|
def predict_subsequences(
self,
sequence_dict,
peptide_lengths=None):
"""
Given a dictionary mapping sequence names to amino acid strings,
and an optional list of peptide lengths, returns a
BindingPredictionCollection.
"""
if isinstance(sequence_dict, string_types):
sequence_dict = {"seq": sequence_dict}
elif isinstance(sequence_dict, (list, tuple)):
sequence_dict = {seq: seq for seq in sequence_dict}
peptide_lengths = self._check_peptide_lengths(peptide_lengths)
# convert long protein sequences to set of peptides and
# associated sequence name / offsets that each peptide may have come
# from
peptide_set = set([])
peptide_to_name_offset_pairs = defaultdict(list)
for name, sequence in sequence_dict.items():
for peptide_length in peptide_lengths:
for i in range(len(sequence) - peptide_length + 1):
peptide = sequence[i:i + peptide_length]
peptide_set.add(peptide)
peptide_to_name_offset_pairs[peptide].append((name, i))
peptide_list = sorted(peptide_set)
binding_predictions = self.predict_peptides(peptide_list)
# create BindingPrediction objects with sequence name and offset
results = []
for binding_prediction in binding_predictions:
for name, offset in peptide_to_name_offset_pairs[
binding_prediction.peptide]:
results.append(binding_prediction.clone_with_updates(
source_sequence_name=name,
offset=offset))
self._check_results(
results,
peptides=peptide_set,
alleles=self.alleles)
return BindingPredictionCollection(results)
|
def predict_subsequences(
self,
sequence_dict,
peptide_lengths=None):
"""
Given a dictionary mapping sequence names to amino acid strings,
and an optional list of peptide lengths, returns a
BindingPredictionCollection.
"""
if isinstance(sequence_dict, string_types):
sequence_dict = {"seq": sequence_dict}
elif isinstance(sequence_dict, (list, tuple)):
sequence_dict = {seq: seq for seq in sequence_dict}
peptide_lengths = self._check_peptide_lengths(peptide_lengths)
# convert long protein sequences to set of peptides and
# associated sequence name / offsets that each peptide may have come
# from
peptide_set = set([])
peptide_to_name_offset_pairs = defaultdict(list)
for name, sequence in sequence_dict.items():
for peptide_length in peptide_lengths:
for i in range(len(sequence) - peptide_length + 1):
peptide = sequence[i:i + peptide_length]
peptide_set.add(peptide)
peptide_to_name_offset_pairs[peptide].append((name, i))
peptide_list = sorted(peptide_set)
binding_predictions = self.predict_peptides(peptide_list)
# create BindingPrediction objects with sequence name and offset
results = []
for binding_prediction in binding_predictions:
for name, offset in peptide_to_name_offset_pairs[
binding_prediction.peptide]:
results.append(binding_prediction.clone_with_updates(
source_sequence_name=name,
offset=offset))
self._check_results(
results,
peptides=peptide_set,
alleles=self.alleles)
return BindingPredictionCollection(results)
|
[
"Given",
"a",
"dictionary",
"mapping",
"sequence",
"names",
"to",
"amino",
"acid",
"strings",
"and",
"an",
"optional",
"list",
"of",
"peptide",
"lengths",
"returns",
"a",
"BindingPredictionCollection",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/base_predictor.py#L178-L222
|
[
"def",
"predict_subsequences",
"(",
"self",
",",
"sequence_dict",
",",
"peptide_lengths",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"sequence_dict",
",",
"string_types",
")",
":",
"sequence_dict",
"=",
"{",
"\"seq\"",
":",
"sequence_dict",
"}",
"elif",
"isinstance",
"(",
"sequence_dict",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"sequence_dict",
"=",
"{",
"seq",
":",
"seq",
"for",
"seq",
"in",
"sequence_dict",
"}",
"peptide_lengths",
"=",
"self",
".",
"_check_peptide_lengths",
"(",
"peptide_lengths",
")",
"# convert long protein sequences to set of peptides and",
"# associated sequence name / offsets that each peptide may have come",
"# from",
"peptide_set",
"=",
"set",
"(",
"[",
"]",
")",
"peptide_to_name_offset_pairs",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"name",
",",
"sequence",
"in",
"sequence_dict",
".",
"items",
"(",
")",
":",
"for",
"peptide_length",
"in",
"peptide_lengths",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"sequence",
")",
"-",
"peptide_length",
"+",
"1",
")",
":",
"peptide",
"=",
"sequence",
"[",
"i",
":",
"i",
"+",
"peptide_length",
"]",
"peptide_set",
".",
"add",
"(",
"peptide",
")",
"peptide_to_name_offset_pairs",
"[",
"peptide",
"]",
".",
"append",
"(",
"(",
"name",
",",
"i",
")",
")",
"peptide_list",
"=",
"sorted",
"(",
"peptide_set",
")",
"binding_predictions",
"=",
"self",
".",
"predict_peptides",
"(",
"peptide_list",
")",
"# create BindingPrediction objects with sequence name and offset",
"results",
"=",
"[",
"]",
"for",
"binding_prediction",
"in",
"binding_predictions",
":",
"for",
"name",
",",
"offset",
"in",
"peptide_to_name_offset_pairs",
"[",
"binding_prediction",
".",
"peptide",
"]",
":",
"results",
".",
"append",
"(",
"binding_prediction",
".",
"clone_with_updates",
"(",
"source_sequence_name",
"=",
"name",
",",
"offset",
"=",
"offset",
")",
")",
"self",
".",
"_check_results",
"(",
"results",
",",
"peptides",
"=",
"peptide_set",
",",
"alleles",
"=",
"self",
".",
"alleles",
")",
"return",
"BindingPredictionCollection",
"(",
"results",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
BasePredictor._check_hla_alleles
|
Given a list of HLA alleles and an optional list of valid
HLA alleles, return a set of alleles that we will pass into
the MHC binding predictor.
|
mhctools/base_predictor.py
|
def _check_hla_alleles(
alleles,
valid_alleles=None):
"""
Given a list of HLA alleles and an optional list of valid
HLA alleles, return a set of alleles that we will pass into
the MHC binding predictor.
"""
require_iterable_of(alleles, string_types, "HLA alleles")
# Don't run the MHC predictor twice for homozygous alleles,
# only run it for unique alleles
alleles = {
normalize_allele_name(allele.strip().upper())
for allele in alleles
}
if valid_alleles:
# For some reason netMHCpan drops the '*' in names, so
# 'HLA-A*03:01' becomes 'HLA-A03:01'
missing_alleles = [
allele
for allele in alleles
if allele not in valid_alleles
]
if len(missing_alleles) > 0:
raise UnsupportedAllele(
"Unsupported HLA alleles: %s" % missing_alleles)
return list(alleles)
|
def _check_hla_alleles(
alleles,
valid_alleles=None):
"""
Given a list of HLA alleles and an optional list of valid
HLA alleles, return a set of alleles that we will pass into
the MHC binding predictor.
"""
require_iterable_of(alleles, string_types, "HLA alleles")
# Don't run the MHC predictor twice for homozygous alleles,
# only run it for unique alleles
alleles = {
normalize_allele_name(allele.strip().upper())
for allele in alleles
}
if valid_alleles:
# For some reason netMHCpan drops the '*' in names, so
# 'HLA-A*03:01' becomes 'HLA-A03:01'
missing_alleles = [
allele
for allele in alleles
if allele not in valid_alleles
]
if len(missing_alleles) > 0:
raise UnsupportedAllele(
"Unsupported HLA alleles: %s" % missing_alleles)
return list(alleles)
|
[
"Given",
"a",
"list",
"of",
"HLA",
"alleles",
"and",
"an",
"optional",
"list",
"of",
"valid",
"HLA",
"alleles",
"return",
"a",
"set",
"of",
"alleles",
"that",
"we",
"will",
"pass",
"into",
"the",
"MHC",
"binding",
"predictor",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/base_predictor.py#L237-L265
|
[
"def",
"_check_hla_alleles",
"(",
"alleles",
",",
"valid_alleles",
"=",
"None",
")",
":",
"require_iterable_of",
"(",
"alleles",
",",
"string_types",
",",
"\"HLA alleles\"",
")",
"# Don't run the MHC predictor twice for homozygous alleles,",
"# only run it for unique alleles",
"alleles",
"=",
"{",
"normalize_allele_name",
"(",
"allele",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
")",
"for",
"allele",
"in",
"alleles",
"}",
"if",
"valid_alleles",
":",
"# For some reason netMHCpan drops the '*' in names, so",
"# 'HLA-A*03:01' becomes 'HLA-A03:01'",
"missing_alleles",
"=",
"[",
"allele",
"for",
"allele",
"in",
"alleles",
"if",
"allele",
"not",
"in",
"valid_alleles",
"]",
"if",
"len",
"(",
"missing_alleles",
")",
">",
"0",
":",
"raise",
"UnsupportedAllele",
"(",
"\"Unsupported HLA alleles: %s\"",
"%",
"missing_alleles",
")",
"return",
"list",
"(",
"alleles",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
StreamResponse._connect
|
Connect to the stream
Returns
-------
asyncio.coroutine
The streaming response
|
peony/stream.py
|
async def _connect(self):
"""
Connect to the stream
Returns
-------
asyncio.coroutine
The streaming response
"""
logger.debug("connecting to the stream")
await self.client.setup
if self.session is None:
self.session = self.client._session
kwargs = await self.client.headers.prepare_request(**self.kwargs)
request = self.client.error_handler(self.session.request)
return await request(timeout=0, **kwargs)
|
async def _connect(self):
"""
Connect to the stream
Returns
-------
asyncio.coroutine
The streaming response
"""
logger.debug("connecting to the stream")
await self.client.setup
if self.session is None:
self.session = self.client._session
kwargs = await self.client.headers.prepare_request(**self.kwargs)
request = self.client.error_handler(self.session.request)
return await request(timeout=0, **kwargs)
|
[
"Connect",
"to",
"the",
"stream"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/stream.py#L75-L91
|
[
"async",
"def",
"_connect",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"connecting to the stream\"",
")",
"await",
"self",
".",
"client",
".",
"setup",
"if",
"self",
".",
"session",
"is",
"None",
":",
"self",
".",
"session",
"=",
"self",
".",
"client",
".",
"_session",
"kwargs",
"=",
"await",
"self",
".",
"client",
".",
"headers",
".",
"prepare_request",
"(",
"*",
"*",
"self",
".",
"kwargs",
")",
"request",
"=",
"self",
".",
"client",
".",
"error_handler",
"(",
"self",
".",
"session",
".",
"request",
")",
"return",
"await",
"request",
"(",
"timeout",
"=",
"0",
",",
"*",
"*",
"kwargs",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
StreamResponse.connect
|
Create the connection
Returns
-------
self
Raises
------
exception.PeonyException
On a response status in 4xx that are not status 420 or 429
Also on statuses in 1xx or 3xx since this should not be the status
received here
|
peony/stream.py
|
async def connect(self):
"""
Create the connection
Returns
-------
self
Raises
------
exception.PeonyException
On a response status in 4xx that are not status 420 or 429
Also on statuses in 1xx or 3xx since this should not be the status
received here
"""
with async_timeout.timeout(self.timeout):
self.response = await self._connect()
if self.response.status in range(200, 300):
self._error_timeout = 0
self.state = NORMAL
elif self.response.status == 500:
self.state = DISCONNECTION
elif self.response.status in range(501, 600):
self.state = RECONNECTION
elif self.response.status in (420, 429):
self.state = ENHANCE_YOUR_CALM
else:
logger.debug("raising error during stream connection")
raise await exceptions.throw(self.response,
loads=self.client._loads,
url=self.kwargs['url'])
logger.debug("stream state: %d" % self.state)
|
async def connect(self):
"""
Create the connection
Returns
-------
self
Raises
------
exception.PeonyException
On a response status in 4xx that are not status 420 or 429
Also on statuses in 1xx or 3xx since this should not be the status
received here
"""
with async_timeout.timeout(self.timeout):
self.response = await self._connect()
if self.response.status in range(200, 300):
self._error_timeout = 0
self.state = NORMAL
elif self.response.status == 500:
self.state = DISCONNECTION
elif self.response.status in range(501, 600):
self.state = RECONNECTION
elif self.response.status in (420, 429):
self.state = ENHANCE_YOUR_CALM
else:
logger.debug("raising error during stream connection")
raise await exceptions.throw(self.response,
loads=self.client._loads,
url=self.kwargs['url'])
logger.debug("stream state: %d" % self.state)
|
[
"Create",
"the",
"connection"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/stream.py#L93-L126
|
[
"async",
"def",
"connect",
"(",
"self",
")",
":",
"with",
"async_timeout",
".",
"timeout",
"(",
"self",
".",
"timeout",
")",
":",
"self",
".",
"response",
"=",
"await",
"self",
".",
"_connect",
"(",
")",
"if",
"self",
".",
"response",
".",
"status",
"in",
"range",
"(",
"200",
",",
"300",
")",
":",
"self",
".",
"_error_timeout",
"=",
"0",
"self",
".",
"state",
"=",
"NORMAL",
"elif",
"self",
".",
"response",
".",
"status",
"==",
"500",
":",
"self",
".",
"state",
"=",
"DISCONNECTION",
"elif",
"self",
".",
"response",
".",
"status",
"in",
"range",
"(",
"501",
",",
"600",
")",
":",
"self",
".",
"state",
"=",
"RECONNECTION",
"elif",
"self",
".",
"response",
".",
"status",
"in",
"(",
"420",
",",
"429",
")",
":",
"self",
".",
"state",
"=",
"ENHANCE_YOUR_CALM",
"else",
":",
"logger",
".",
"debug",
"(",
"\"raising error during stream connection\"",
")",
"raise",
"await",
"exceptions",
".",
"throw",
"(",
"self",
".",
"response",
",",
"loads",
"=",
"self",
".",
"client",
".",
"_loads",
",",
"url",
"=",
"self",
".",
"kwargs",
"[",
"'url'",
"]",
")",
"logger",
".",
"debug",
"(",
"\"stream state: %d\"",
"%",
"self",
".",
"state",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
StreamResponse.init_restart
|
Restart the stream on error
Parameters
----------
error : bool, optional
Whether to print the error or not
|
peony/stream.py
|
async def init_restart(self, error=None):
"""
Restart the stream on error
Parameters
----------
error : bool, optional
Whether to print the error or not
"""
if error:
utils.log_error(logger=logger)
if self.state == DISCONNECTION:
if self._error_timeout < MAX_DISCONNECTION_TIMEOUT:
self._error_timeout += DISCONNECTION_TIMEOUT
logger.info("The stream was disconnected, will reconnect in %ss"
% self._error_timeout)
elif self.state == RECONNECTION:
if self._error_timeout < RECONNECTION_TIMEOUT:
self._error_timeout = RECONNECTION_TIMEOUT
elif self._error_timeout < MAX_RECONNECTION_TIMEOUT:
self._error_timeout *= 2
logger.info("Could not connect to the stream, reconnection in %ss"
% self._error_timeout)
elif self.state == ENHANCE_YOUR_CALM:
if self._error_timeout < ENHANCE_YOUR_CALM_TIMEOUT:
self._error_timeout = ENHANCE_YOUR_CALM_TIMEOUT
else:
self._error_timeout *= 2
logger.warning("Enhance Your Calm response received from Twitter. "
"If you didn't restart your program frenetically "
"then there is probably something wrong with it. "
"Make sure you are not opening too many connections"
" to the endpoint you are currently using by "
"checking out Twitter's Streaming API "
"documentation: "
"https://dev.twitter.com/streaming/overview\n"
"The stream will restart in %ss."
% self._error_timeout)
elif self.state == EOF:
pass # no timeout
else:
raise RuntimeError("Incorrect state: %d" % self.state)
self._reconnecting = True
return {'reconnecting_in': self._error_timeout, 'error': error}
|
async def init_restart(self, error=None):
"""
Restart the stream on error
Parameters
----------
error : bool, optional
Whether to print the error or not
"""
if error:
utils.log_error(logger=logger)
if self.state == DISCONNECTION:
if self._error_timeout < MAX_DISCONNECTION_TIMEOUT:
self._error_timeout += DISCONNECTION_TIMEOUT
logger.info("The stream was disconnected, will reconnect in %ss"
% self._error_timeout)
elif self.state == RECONNECTION:
if self._error_timeout < RECONNECTION_TIMEOUT:
self._error_timeout = RECONNECTION_TIMEOUT
elif self._error_timeout < MAX_RECONNECTION_TIMEOUT:
self._error_timeout *= 2
logger.info("Could not connect to the stream, reconnection in %ss"
% self._error_timeout)
elif self.state == ENHANCE_YOUR_CALM:
if self._error_timeout < ENHANCE_YOUR_CALM_TIMEOUT:
self._error_timeout = ENHANCE_YOUR_CALM_TIMEOUT
else:
self._error_timeout *= 2
logger.warning("Enhance Your Calm response received from Twitter. "
"If you didn't restart your program frenetically "
"then there is probably something wrong with it. "
"Make sure you are not opening too many connections"
" to the endpoint you are currently using by "
"checking out Twitter's Streaming API "
"documentation: "
"https://dev.twitter.com/streaming/overview\n"
"The stream will restart in %ss."
% self._error_timeout)
elif self.state == EOF:
pass # no timeout
else:
raise RuntimeError("Incorrect state: %d" % self.state)
self._reconnecting = True
return {'reconnecting_in': self._error_timeout, 'error': error}
|
[
"Restart",
"the",
"stream",
"on",
"error"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/stream.py#L201-L251
|
[
"async",
"def",
"init_restart",
"(",
"self",
",",
"error",
"=",
"None",
")",
":",
"if",
"error",
":",
"utils",
".",
"log_error",
"(",
"logger",
"=",
"logger",
")",
"if",
"self",
".",
"state",
"==",
"DISCONNECTION",
":",
"if",
"self",
".",
"_error_timeout",
"<",
"MAX_DISCONNECTION_TIMEOUT",
":",
"self",
".",
"_error_timeout",
"+=",
"DISCONNECTION_TIMEOUT",
"logger",
".",
"info",
"(",
"\"The stream was disconnected, will reconnect in %ss\"",
"%",
"self",
".",
"_error_timeout",
")",
"elif",
"self",
".",
"state",
"==",
"RECONNECTION",
":",
"if",
"self",
".",
"_error_timeout",
"<",
"RECONNECTION_TIMEOUT",
":",
"self",
".",
"_error_timeout",
"=",
"RECONNECTION_TIMEOUT",
"elif",
"self",
".",
"_error_timeout",
"<",
"MAX_RECONNECTION_TIMEOUT",
":",
"self",
".",
"_error_timeout",
"*=",
"2",
"logger",
".",
"info",
"(",
"\"Could not connect to the stream, reconnection in %ss\"",
"%",
"self",
".",
"_error_timeout",
")",
"elif",
"self",
".",
"state",
"==",
"ENHANCE_YOUR_CALM",
":",
"if",
"self",
".",
"_error_timeout",
"<",
"ENHANCE_YOUR_CALM_TIMEOUT",
":",
"self",
".",
"_error_timeout",
"=",
"ENHANCE_YOUR_CALM_TIMEOUT",
"else",
":",
"self",
".",
"_error_timeout",
"*=",
"2",
"logger",
".",
"warning",
"(",
"\"Enhance Your Calm response received from Twitter. \"",
"\"If you didn't restart your program frenetically \"",
"\"then there is probably something wrong with it. \"",
"\"Make sure you are not opening too many connections\"",
"\" to the endpoint you are currently using by \"",
"\"checking out Twitter's Streaming API \"",
"\"documentation: \"",
"\"https://dev.twitter.com/streaming/overview\\n\"",
"\"The stream will restart in %ss.\"",
"%",
"self",
".",
"_error_timeout",
")",
"elif",
"self",
".",
"state",
"==",
"EOF",
":",
"pass",
"# no timeout",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Incorrect state: %d\"",
"%",
"self",
".",
"state",
")",
"self",
".",
"_reconnecting",
"=",
"True",
"return",
"{",
"'reconnecting_in'",
":",
"self",
".",
"_error_timeout",
",",
"'error'",
":",
"error",
"}"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
StreamResponse.restart_stream
|
Restart the stream on error
|
peony/stream.py
|
async def restart_stream(self):
"""
Restart the stream on error
"""
await self.response.release()
await asyncio.sleep(self._error_timeout)
await self.connect()
logger.info("Reconnected to the stream")
self._reconnecting = False
return {'stream_restart': True}
|
async def restart_stream(self):
"""
Restart the stream on error
"""
await self.response.release()
await asyncio.sleep(self._error_timeout)
await self.connect()
logger.info("Reconnected to the stream")
self._reconnecting = False
return {'stream_restart': True}
|
[
"Restart",
"the",
"stream",
"on",
"error"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/stream.py#L253-L263
|
[
"async",
"def",
"restart_stream",
"(",
"self",
")",
":",
"await",
"self",
".",
"response",
".",
"release",
"(",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"self",
".",
"_error_timeout",
")",
"await",
"self",
".",
"connect",
"(",
")",
"logger",
".",
"info",
"(",
"\"Reconnected to the stream\"",
")",
"self",
".",
"_reconnecting",
"=",
"False",
"return",
"{",
"'stream_restart'",
":",
"True",
"}"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
Handler.with_prefix
|
decorator to handle commands with prefixes
Parameters
----------
prefix : str
the prefix of the command
strict : bool, optional
If set to True the command must be at the beginning
of the message. Defaults to False.
Returns
-------
function
a decorator that returns an :class:`EventHandler` instance
|
peony/commands/event_types.py
|
def with_prefix(self, prefix, strict=False):
"""
decorator to handle commands with prefixes
Parameters
----------
prefix : str
the prefix of the command
strict : bool, optional
If set to True the command must be at the beginning
of the message. Defaults to False.
Returns
-------
function
a decorator that returns an :class:`EventHandler` instance
"""
def decorated(func):
return EventHandler(func=func, event=self.event,
prefix=prefix, strict=strict)
return decorated
|
def with_prefix(self, prefix, strict=False):
"""
decorator to handle commands with prefixes
Parameters
----------
prefix : str
the prefix of the command
strict : bool, optional
If set to True the command must be at the beginning
of the message. Defaults to False.
Returns
-------
function
a decorator that returns an :class:`EventHandler` instance
"""
def decorated(func):
return EventHandler(func=func, event=self.event,
prefix=prefix, strict=strict)
return decorated
|
[
"decorator",
"to",
"handle",
"commands",
"with",
"prefixes"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/commands/event_types.py#L38-L60
|
[
"def",
"with_prefix",
"(",
"self",
",",
"prefix",
",",
"strict",
"=",
"False",
")",
":",
"def",
"decorated",
"(",
"func",
")",
":",
"return",
"EventHandler",
"(",
"func",
"=",
"func",
",",
"event",
"=",
"self",
".",
"event",
",",
"prefix",
"=",
"prefix",
",",
"strict",
"=",
"strict",
")",
"return",
"decorated"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
Event.envelope
|
returns an :class:`Event` that can be used for site streams
|
peony/commands/event_types.py
|
def envelope(self):
""" returns an :class:`Event` that can be used for site streams """
def enveloped_event(data):
return 'for_user' in data and self._func(data.get('message'))
return self.__class__(enveloped_event, self.__name__)
|
def envelope(self):
""" returns an :class:`Event` that can be used for site streams """
def enveloped_event(data):
return 'for_user' in data and self._func(data.get('message'))
return self.__class__(enveloped_event, self.__name__)
|
[
"returns",
"an",
":",
"class",
":",
"Event",
"that",
"can",
"be",
"used",
"for",
"site",
"streams"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/commands/event_types.py#L83-L89
|
[
"def",
"envelope",
"(",
"self",
")",
":",
"def",
"enveloped_event",
"(",
"data",
")",
":",
"return",
"'for_user'",
"in",
"data",
"and",
"self",
".",
"_func",
"(",
"data",
".",
"get",
"(",
"'message'",
")",
")",
"return",
"self",
".",
"__class__",
"(",
"enveloped_event",
",",
"self",
".",
"__name__",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
BDClient.set_tz
|
set the environment timezone to the timezone
set in your twitter settings
|
examples/birthday.py
|
async def set_tz(self):
"""
set the environment timezone to the timezone
set in your twitter settings
"""
settings = await self.api.account.settings.get()
tz = settings.time_zone.tzinfo_name
os.environ['TZ'] = tz
time.tzset()
|
async def set_tz(self):
"""
set the environment timezone to the timezone
set in your twitter settings
"""
settings = await self.api.account.settings.get()
tz = settings.time_zone.tzinfo_name
os.environ['TZ'] = tz
time.tzset()
|
[
"set",
"the",
"environment",
"timezone",
"to",
"the",
"timezone",
"set",
"in",
"your",
"twitter",
"settings"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/examples/birthday.py#L23-L33
|
[
"async",
"def",
"set_tz",
"(",
"self",
")",
":",
"settings",
"=",
"await",
"self",
".",
"api",
".",
"account",
".",
"settings",
".",
"get",
"(",
")",
"tz",
"=",
"settings",
".",
"time_zone",
".",
"tzinfo_name",
"os",
".",
"environ",
"[",
"'TZ'",
"]",
"=",
"tz",
"time",
".",
"tzset",
"(",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
run_command
|
Given a list whose first element is a command name, followed by arguments,
execute it and show timing info.
|
mhctools/process_helpers.py
|
def run_command(args, **kwargs):
"""
Given a list whose first element is a command name, followed by arguments,
execute it and show timing info.
"""
assert len(args) > 0
start_time = time.time()
process = AsyncProcess(args, **kwargs)
process.wait()
elapsed_time = time.time() - start_time
logger.info("%s took %0.4f seconds", args[0], elapsed_time)
|
def run_command(args, **kwargs):
"""
Given a list whose first element is a command name, followed by arguments,
execute it and show timing info.
"""
assert len(args) > 0
start_time = time.time()
process = AsyncProcess(args, **kwargs)
process.wait()
elapsed_time = time.time() - start_time
logger.info("%s took %0.4f seconds", args[0], elapsed_time)
|
[
"Given",
"a",
"list",
"whose",
"first",
"element",
"is",
"a",
"command",
"name",
"followed",
"by",
"arguments",
"execute",
"it",
"and",
"show",
"timing",
"info",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/process_helpers.py#L74-L84
|
[
"def",
"run_command",
"(",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"len",
"(",
"args",
")",
">",
"0",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"process",
"=",
"AsyncProcess",
"(",
"args",
",",
"*",
"*",
"kwargs",
")",
"process",
".",
"wait",
"(",
")",
"elapsed_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"logger",
".",
"info",
"(",
"\"%s took %0.4f seconds\"",
",",
"args",
"[",
"0",
"]",
",",
"elapsed_time",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
run_multiple_commands_redirect_stdout
|
Run multiple shell commands in parallel, write each of their
stdout output to files associated with each command.
Parameters
----------
multiple_args_dict : dict
A dictionary whose keys are files and values are args list.
Run each args list as a subprocess and write stdout to the
corresponding file.
print_commands : bool
Print shell commands before running them.
process_limit : int
Limit the number of concurrent processes to this number. 0
if there is no limit, -1 to use max number of processors
polling_freq : int
Number of seconds between checking for done processes, if
we have a process limit
|
mhctools/process_helpers.py
|
def run_multiple_commands_redirect_stdout(
multiple_args_dict,
print_commands=True,
process_limit=-1,
polling_freq=0.5,
**kwargs):
"""
Run multiple shell commands in parallel, write each of their
stdout output to files associated with each command.
Parameters
----------
multiple_args_dict : dict
A dictionary whose keys are files and values are args list.
Run each args list as a subprocess and write stdout to the
corresponding file.
print_commands : bool
Print shell commands before running them.
process_limit : int
Limit the number of concurrent processes to this number. 0
if there is no limit, -1 to use max number of processors
polling_freq : int
Number of seconds between checking for done processes, if
we have a process limit
"""
assert len(multiple_args_dict) > 0
assert all(len(args) > 0 for args in multiple_args_dict.values())
assert all(hasattr(f, 'name') for f in multiple_args_dict.keys())
if process_limit < 0:
logger.debug("Using %d processes" % cpu_count())
process_limit = cpu_count()
start_time = time.time()
processes = Queue(maxsize=process_limit)
def add_to_queue(process):
process.start()
if print_commands:
handler = logging.FileHandler(process.redirect_stdout_file.name)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.debug(" ".join(process.args))
logger.removeHandler(handler)
processes.put(process)
for f, args in multiple_args_dict.items():
p = AsyncProcess(
args,
redirect_stdout_file=f,
**kwargs)
if not processes.full():
add_to_queue(p)
else:
while processes.full():
# Are there any done processes?
to_remove = []
for possibly_done in processes.queue:
if possibly_done.poll() is not None:
possibly_done.wait()
to_remove.append(possibly_done)
# Remove them from the queue and stop checking
if to_remove:
for process_to_remove in to_remove:
processes.queue.remove(process_to_remove)
break
# Check again in a second if there weren't
time.sleep(polling_freq)
add_to_queue(p)
# Wait for all the rest of the processes
while not processes.empty():
processes.get().wait()
elapsed_time = time.time() - start_time
logger.info(
"Ran %d commands in %0.4f seconds",
len(multiple_args_dict),
elapsed_time)
|
def run_multiple_commands_redirect_stdout(
multiple_args_dict,
print_commands=True,
process_limit=-1,
polling_freq=0.5,
**kwargs):
"""
Run multiple shell commands in parallel, write each of their
stdout output to files associated with each command.
Parameters
----------
multiple_args_dict : dict
A dictionary whose keys are files and values are args list.
Run each args list as a subprocess and write stdout to the
corresponding file.
print_commands : bool
Print shell commands before running them.
process_limit : int
Limit the number of concurrent processes to this number. 0
if there is no limit, -1 to use max number of processors
polling_freq : int
Number of seconds between checking for done processes, if
we have a process limit
"""
assert len(multiple_args_dict) > 0
assert all(len(args) > 0 for args in multiple_args_dict.values())
assert all(hasattr(f, 'name') for f in multiple_args_dict.keys())
if process_limit < 0:
logger.debug("Using %d processes" % cpu_count())
process_limit = cpu_count()
start_time = time.time()
processes = Queue(maxsize=process_limit)
def add_to_queue(process):
process.start()
if print_commands:
handler = logging.FileHandler(process.redirect_stdout_file.name)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.debug(" ".join(process.args))
logger.removeHandler(handler)
processes.put(process)
for f, args in multiple_args_dict.items():
p = AsyncProcess(
args,
redirect_stdout_file=f,
**kwargs)
if not processes.full():
add_to_queue(p)
else:
while processes.full():
# Are there any done processes?
to_remove = []
for possibly_done in processes.queue:
if possibly_done.poll() is not None:
possibly_done.wait()
to_remove.append(possibly_done)
# Remove them from the queue and stop checking
if to_remove:
for process_to_remove in to_remove:
processes.queue.remove(process_to_remove)
break
# Check again in a second if there weren't
time.sleep(polling_freq)
add_to_queue(p)
# Wait for all the rest of the processes
while not processes.empty():
processes.get().wait()
elapsed_time = time.time() - start_time
logger.info(
"Ran %d commands in %0.4f seconds",
len(multiple_args_dict),
elapsed_time)
|
[
"Run",
"multiple",
"shell",
"commands",
"in",
"parallel",
"write",
"each",
"of",
"their",
"stdout",
"output",
"to",
"files",
"associated",
"with",
"each",
"command",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/process_helpers.py#L86-L166
|
[
"def",
"run_multiple_commands_redirect_stdout",
"(",
"multiple_args_dict",
",",
"print_commands",
"=",
"True",
",",
"process_limit",
"=",
"-",
"1",
",",
"polling_freq",
"=",
"0.5",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"len",
"(",
"multiple_args_dict",
")",
">",
"0",
"assert",
"all",
"(",
"len",
"(",
"args",
")",
">",
"0",
"for",
"args",
"in",
"multiple_args_dict",
".",
"values",
"(",
")",
")",
"assert",
"all",
"(",
"hasattr",
"(",
"f",
",",
"'name'",
")",
"for",
"f",
"in",
"multiple_args_dict",
".",
"keys",
"(",
")",
")",
"if",
"process_limit",
"<",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Using %d processes\"",
"%",
"cpu_count",
"(",
")",
")",
"process_limit",
"=",
"cpu_count",
"(",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"processes",
"=",
"Queue",
"(",
"maxsize",
"=",
"process_limit",
")",
"def",
"add_to_queue",
"(",
"process",
")",
":",
"process",
".",
"start",
"(",
")",
"if",
"print_commands",
":",
"handler",
"=",
"logging",
".",
"FileHandler",
"(",
"process",
".",
"redirect_stdout_file",
".",
"name",
")",
"handler",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"logger",
".",
"debug",
"(",
"\" \"",
".",
"join",
"(",
"process",
".",
"args",
")",
")",
"logger",
".",
"removeHandler",
"(",
"handler",
")",
"processes",
".",
"put",
"(",
"process",
")",
"for",
"f",
",",
"args",
"in",
"multiple_args_dict",
".",
"items",
"(",
")",
":",
"p",
"=",
"AsyncProcess",
"(",
"args",
",",
"redirect_stdout_file",
"=",
"f",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"processes",
".",
"full",
"(",
")",
":",
"add_to_queue",
"(",
"p",
")",
"else",
":",
"while",
"processes",
".",
"full",
"(",
")",
":",
"# Are there any done processes?",
"to_remove",
"=",
"[",
"]",
"for",
"possibly_done",
"in",
"processes",
".",
"queue",
":",
"if",
"possibly_done",
".",
"poll",
"(",
")",
"is",
"not",
"None",
":",
"possibly_done",
".",
"wait",
"(",
")",
"to_remove",
".",
"append",
"(",
"possibly_done",
")",
"# Remove them from the queue and stop checking",
"if",
"to_remove",
":",
"for",
"process_to_remove",
"in",
"to_remove",
":",
"processes",
".",
"queue",
".",
"remove",
"(",
"process_to_remove",
")",
"break",
"# Check again in a second if there weren't",
"time",
".",
"sleep",
"(",
"polling_freq",
")",
"add_to_queue",
"(",
"p",
")",
"# Wait for all the rest of the processes",
"while",
"not",
"processes",
".",
"empty",
"(",
")",
":",
"processes",
".",
"get",
"(",
")",
".",
"wait",
"(",
")",
"elapsed_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"logger",
".",
"info",
"(",
"\"Ran %d commands in %0.4f seconds\"",
",",
"len",
"(",
"multiple_args_dict",
")",
",",
"elapsed_time",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
BaseCommandlinePredictor._determine_supported_alleles
|
Try asking the commandline predictor (e.g. netMHCpan)
which alleles it supports.
|
mhctools/base_commandline_predictor.py
|
def _determine_supported_alleles(command, supported_allele_flag):
"""
Try asking the commandline predictor (e.g. netMHCpan)
which alleles it supports.
"""
try:
# convert to str since Python3 returns a `bytes` object
supported_alleles_output = check_output([
command, supported_allele_flag
])
supported_alleles_str = supported_alleles_output.decode("ascii", "ignore")
assert len(supported_alleles_str) > 0, \
'%s returned empty allele list' % command
supported_alleles = set([])
for line in supported_alleles_str.split("\n"):
line = line.strip()
if not line.startswith('#') and len(line) > 0:
try:
# We need to normalize these alleles (the output of the predictor
# when it lists its supported alleles) so that they are comparable with
# our own alleles.
supported_alleles.add(normalize_allele_name(line))
except AlleleParseError as error:
logger.info("Skipping allele %s: %s", line, error)
continue
if len(supported_alleles) == 0:
raise ValueError("Unable to determine supported alleles")
return supported_alleles
except Exception as e:
logger.exception(e)
raise SystemError("Failed to run %s %s. Possibly an incorrect executable version?" % (
command,
supported_allele_flag))
|
def _determine_supported_alleles(command, supported_allele_flag):
"""
Try asking the commandline predictor (e.g. netMHCpan)
which alleles it supports.
"""
try:
# convert to str since Python3 returns a `bytes` object
supported_alleles_output = check_output([
command, supported_allele_flag
])
supported_alleles_str = supported_alleles_output.decode("ascii", "ignore")
assert len(supported_alleles_str) > 0, \
'%s returned empty allele list' % command
supported_alleles = set([])
for line in supported_alleles_str.split("\n"):
line = line.strip()
if not line.startswith('#') and len(line) > 0:
try:
# We need to normalize these alleles (the output of the predictor
# when it lists its supported alleles) so that they are comparable with
# our own alleles.
supported_alleles.add(normalize_allele_name(line))
except AlleleParseError as error:
logger.info("Skipping allele %s: %s", line, error)
continue
if len(supported_alleles) == 0:
raise ValueError("Unable to determine supported alleles")
return supported_alleles
except Exception as e:
logger.exception(e)
raise SystemError("Failed to run %s %s. Possibly an incorrect executable version?" % (
command,
supported_allele_flag))
|
[
"Try",
"asking",
"the",
"commandline",
"predictor",
"(",
"e",
".",
"g",
".",
"netMHCpan",
")",
"which",
"alleles",
"it",
"supports",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/base_commandline_predictor.py#L189-L221
|
[
"def",
"_determine_supported_alleles",
"(",
"command",
",",
"supported_allele_flag",
")",
":",
"try",
":",
"# convert to str since Python3 returns a `bytes` object",
"supported_alleles_output",
"=",
"check_output",
"(",
"[",
"command",
",",
"supported_allele_flag",
"]",
")",
"supported_alleles_str",
"=",
"supported_alleles_output",
".",
"decode",
"(",
"\"ascii\"",
",",
"\"ignore\"",
")",
"assert",
"len",
"(",
"supported_alleles_str",
")",
">",
"0",
",",
"'%s returned empty allele list'",
"%",
"command",
"supported_alleles",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"line",
"in",
"supported_alleles_str",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
".",
"startswith",
"(",
"'#'",
")",
"and",
"len",
"(",
"line",
")",
">",
"0",
":",
"try",
":",
"# We need to normalize these alleles (the output of the predictor",
"# when it lists its supported alleles) so that they are comparable with",
"# our own alleles.",
"supported_alleles",
".",
"add",
"(",
"normalize_allele_name",
"(",
"line",
")",
")",
"except",
"AlleleParseError",
"as",
"error",
":",
"logger",
".",
"info",
"(",
"\"Skipping allele %s: %s\"",
",",
"line",
",",
"error",
")",
"continue",
"if",
"len",
"(",
"supported_alleles",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Unable to determine supported alleles\"",
")",
"return",
"supported_alleles",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"raise",
"SystemError",
"(",
"\"Failed to run %s %s. Possibly an incorrect executable version?\"",
"%",
"(",
"command",
",",
"supported_allele_flag",
")",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
loads
|
Custom loads function with an object_hook and automatic decoding
Parameters
----------
json_data : str
The JSON data to decode
*args
Positional arguments, passed to :func:`json.loads`
encoding : :obj:`str`, optional
The encoding of the bytestring
**kwargs
Keyword arguments passed to :func:`json.loads`
Returns
-------
:obj:`dict` or :obj:`list`
Decoded json data
|
peony/data_processing.py
|
def loads(json_data, encoding="utf-8", **kwargs):
"""
Custom loads function with an object_hook and automatic decoding
Parameters
----------
json_data : str
The JSON data to decode
*args
Positional arguments, passed to :func:`json.loads`
encoding : :obj:`str`, optional
The encoding of the bytestring
**kwargs
Keyword arguments passed to :func:`json.loads`
Returns
-------
:obj:`dict` or :obj:`list`
Decoded json data
"""
if isinstance(json_data, bytes):
json_data = json_data.decode(encoding)
return json.loads(json_data, object_hook=JSONData, **kwargs)
|
def loads(json_data, encoding="utf-8", **kwargs):
"""
Custom loads function with an object_hook and automatic decoding
Parameters
----------
json_data : str
The JSON data to decode
*args
Positional arguments, passed to :func:`json.loads`
encoding : :obj:`str`, optional
The encoding of the bytestring
**kwargs
Keyword arguments passed to :func:`json.loads`
Returns
-------
:obj:`dict` or :obj:`list`
Decoded json data
"""
if isinstance(json_data, bytes):
json_data = json_data.decode(encoding)
return json.loads(json_data, object_hook=JSONData, **kwargs)
|
[
"Custom",
"loads",
"function",
"with",
"an",
"object_hook",
"and",
"automatic",
"decoding"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/data_processing.py#L149-L172
|
[
"def",
"loads",
"(",
"json_data",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"json_data",
",",
"bytes",
")",
":",
"json_data",
"=",
"json_data",
".",
"decode",
"(",
"encoding",
")",
"return",
"json",
".",
"loads",
"(",
"json_data",
",",
"object_hook",
"=",
"JSONData",
",",
"*",
"*",
"kwargs",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
read
|
read the data of the response
Parameters
----------
response : aiohttp.ClientResponse
response
loads : callable
json loads function
encoding : :obj:`str`, optional
character encoding of the response, if set to None
aiohttp should guess the right encoding
Returns
-------
:obj:`bytes`, :obj:`str`, :obj:`dict` or :obj:`list`
the data returned depends on the response
|
peony/data_processing.py
|
async def read(response, loads=loads, encoding=None):
"""
read the data of the response
Parameters
----------
response : aiohttp.ClientResponse
response
loads : callable
json loads function
encoding : :obj:`str`, optional
character encoding of the response, if set to None
aiohttp should guess the right encoding
Returns
-------
:obj:`bytes`, :obj:`str`, :obj:`dict` or :obj:`list`
the data returned depends on the response
"""
ctype = response.headers.get('Content-Type', "").lower()
try:
if "application/json" in ctype:
logger.info("decoding data as json")
return await response.json(encoding=encoding, loads=loads)
if "text" in ctype:
logger.info("decoding data as text")
return await response.text(encoding=encoding)
except (UnicodeDecodeError, json.JSONDecodeError) as exc:
data = await response.read()
raise exceptions.PeonyDecodeError(response=response,
data=data,
exception=exc)
return await response.read()
|
async def read(response, loads=loads, encoding=None):
"""
read the data of the response
Parameters
----------
response : aiohttp.ClientResponse
response
loads : callable
json loads function
encoding : :obj:`str`, optional
character encoding of the response, if set to None
aiohttp should guess the right encoding
Returns
-------
:obj:`bytes`, :obj:`str`, :obj:`dict` or :obj:`list`
the data returned depends on the response
"""
ctype = response.headers.get('Content-Type', "").lower()
try:
if "application/json" in ctype:
logger.info("decoding data as json")
return await response.json(encoding=encoding, loads=loads)
if "text" in ctype:
logger.info("decoding data as text")
return await response.text(encoding=encoding)
except (UnicodeDecodeError, json.JSONDecodeError) as exc:
data = await response.read()
raise exceptions.PeonyDecodeError(response=response,
data=data,
exception=exc)
return await response.read()
|
[
"read",
"the",
"data",
"of",
"the",
"response"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/data_processing.py#L175-L211
|
[
"async",
"def",
"read",
"(",
"response",
",",
"loads",
"=",
"loads",
",",
"encoding",
"=",
"None",
")",
":",
"ctype",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
"try",
":",
"if",
"\"application/json\"",
"in",
"ctype",
":",
"logger",
".",
"info",
"(",
"\"decoding data as json\"",
")",
"return",
"await",
"response",
".",
"json",
"(",
"encoding",
"=",
"encoding",
",",
"loads",
"=",
"loads",
")",
"if",
"\"text\"",
"in",
"ctype",
":",
"logger",
".",
"info",
"(",
"\"decoding data as text\"",
")",
"return",
"await",
"response",
".",
"text",
"(",
"encoding",
"=",
"encoding",
")",
"except",
"(",
"UnicodeDecodeError",
",",
"json",
".",
"JSONDecodeError",
")",
"as",
"exc",
":",
"data",
"=",
"await",
"response",
".",
"read",
"(",
")",
"raise",
"exceptions",
".",
"PeonyDecodeError",
"(",
"response",
"=",
"response",
",",
"data",
"=",
"data",
",",
"exception",
"=",
"exc",
")",
"return",
"await",
"response",
".",
"read",
"(",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
doc
|
Find the message shown when someone calls the help command
Parameters
----------
func : function
the function
Returns
-------
str
The help message for this command
|
peony/commands/utils.py
|
def doc(func):
"""
Find the message shown when someone calls the help command
Parameters
----------
func : function
the function
Returns
-------
str
The help message for this command
"""
stripped_chars = " \t"
if hasattr(func, '__doc__'):
docstring = func.__doc__.lstrip(" \n\t")
if "\n" in docstring:
i = docstring.index("\n")
return docstring[:i].rstrip(stripped_chars)
elif docstring:
return docstring.rstrip(stripped_chars)
return ""
|
def doc(func):
"""
Find the message shown when someone calls the help command
Parameters
----------
func : function
the function
Returns
-------
str
The help message for this command
"""
stripped_chars = " \t"
if hasattr(func, '__doc__'):
docstring = func.__doc__.lstrip(" \n\t")
if "\n" in docstring:
i = docstring.index("\n")
return docstring[:i].rstrip(stripped_chars)
elif docstring:
return docstring.rstrip(stripped_chars)
return ""
|
[
"Find",
"the",
"message",
"shown",
"when",
"someone",
"calls",
"the",
"help",
"command"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/commands/utils.py#L4-L28
|
[
"def",
"doc",
"(",
"func",
")",
":",
"stripped_chars",
"=",
"\" \\t\"",
"if",
"hasattr",
"(",
"func",
",",
"'__doc__'",
")",
":",
"docstring",
"=",
"func",
".",
"__doc__",
".",
"lstrip",
"(",
"\" \\n\\t\"",
")",
"if",
"\"\\n\"",
"in",
"docstring",
":",
"i",
"=",
"docstring",
".",
"index",
"(",
"\"\\n\"",
")",
"return",
"docstring",
"[",
":",
"i",
"]",
".",
"rstrip",
"(",
"stripped_chars",
")",
"elif",
"docstring",
":",
"return",
"docstring",
".",
"rstrip",
"(",
"stripped_chars",
")",
"return",
"\"\""
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
permission_check
|
Check the permissions of the user requesting a command
Parameters
----------
data : dict
message data
command_permissions : dict
permissions of the command, contains all the roles as key and users
with these permissions as values
command : function
the command that is run
permissions : tuple or list
a list of permissions for the command
Returns
-------
bool
True if the user has the right permissions, False otherwise
|
peony/commands/utils.py
|
def permission_check(data, command_permissions,
command=None, permissions=None):
"""
Check the permissions of the user requesting a command
Parameters
----------
data : dict
message data
command_permissions : dict
permissions of the command, contains all the roles as key and users
with these permissions as values
command : function
the command that is run
permissions : tuple or list
a list of permissions for the command
Returns
-------
bool
True if the user has the right permissions, False otherwise
"""
if permissions:
pass
elif command:
if hasattr(command, 'permissions'):
permissions = command.permissions
else:
return True # true if no permission is required
else:
msg = "{name} must be called with command or permissions argument"
raise RuntimeError(msg.format(name="_permission_check"))
return any(data['sender']['id'] in command_permissions[permission]
for permission in permissions
if permission in command_permissions)
|
def permission_check(data, command_permissions,
command=None, permissions=None):
"""
Check the permissions of the user requesting a command
Parameters
----------
data : dict
message data
command_permissions : dict
permissions of the command, contains all the roles as key and users
with these permissions as values
command : function
the command that is run
permissions : tuple or list
a list of permissions for the command
Returns
-------
bool
True if the user has the right permissions, False otherwise
"""
if permissions:
pass
elif command:
if hasattr(command, 'permissions'):
permissions = command.permissions
else:
return True # true if no permission is required
else:
msg = "{name} must be called with command or permissions argument"
raise RuntimeError(msg.format(name="_permission_check"))
return any(data['sender']['id'] in command_permissions[permission]
for permission in permissions
if permission in command_permissions)
|
[
"Check",
"the",
"permissions",
"of",
"the",
"user",
"requesting",
"a",
"command"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/commands/utils.py#L31-L66
|
[
"def",
"permission_check",
"(",
"data",
",",
"command_permissions",
",",
"command",
"=",
"None",
",",
"permissions",
"=",
"None",
")",
":",
"if",
"permissions",
":",
"pass",
"elif",
"command",
":",
"if",
"hasattr",
"(",
"command",
",",
"'permissions'",
")",
":",
"permissions",
"=",
"command",
".",
"permissions",
"else",
":",
"return",
"True",
"# true if no permission is required",
"else",
":",
"msg",
"=",
"\"{name} must be called with command or permissions argument\"",
"raise",
"RuntimeError",
"(",
"msg",
".",
"format",
"(",
"name",
"=",
"\"_permission_check\"",
")",
")",
"return",
"any",
"(",
"data",
"[",
"'sender'",
"]",
"[",
"'id'",
"]",
"in",
"command_permissions",
"[",
"permission",
"]",
"for",
"permission",
"in",
"permissions",
"if",
"permission",
"in",
"command_permissions",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
main
|
Script to make pMHC binding predictions from amino acid sequences.
Usage example:
mhctools
--sequence SFFPIQQQQQAAALLLI \
--sequence SILQQQAQAQQAQAASSSC \
--extract-subsequences \
--mhc-predictor netmhc \
--mhc-alleles HLA-A0201 H2-Db \
--mhc-predictor netmhc \
--output-csv epitope.csv
|
mhctools/cli/script.py
|
def main(args_list=None):
"""
Script to make pMHC binding predictions from amino acid sequences.
Usage example:
mhctools
--sequence SFFPIQQQQQAAALLLI \
--sequence SILQQQAQAQQAQAASSSC \
--extract-subsequences \
--mhc-predictor netmhc \
--mhc-alleles HLA-A0201 H2-Db \
--mhc-predictor netmhc \
--output-csv epitope.csv
"""
args = parse_args(args_list)
binding_predictions = run_predictor(args)
df = binding_predictions.to_dataframe()
logger.info('\n%s', df)
if args.output_csv:
df.to_csv(args.output_csv, index=False)
print("Wrote: %s" % args.output_csv)
|
def main(args_list=None):
"""
Script to make pMHC binding predictions from amino acid sequences.
Usage example:
mhctools
--sequence SFFPIQQQQQAAALLLI \
--sequence SILQQQAQAQQAQAASSSC \
--extract-subsequences \
--mhc-predictor netmhc \
--mhc-alleles HLA-A0201 H2-Db \
--mhc-predictor netmhc \
--output-csv epitope.csv
"""
args = parse_args(args_list)
binding_predictions = run_predictor(args)
df = binding_predictions.to_dataframe()
logger.info('\n%s', df)
if args.output_csv:
df.to_csv(args.output_csv, index=False)
print("Wrote: %s" % args.output_csv)
|
[
"Script",
"to",
"make",
"pMHC",
"binding",
"predictions",
"from",
"amino",
"acid",
"sequences",
"."
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/cli/script.py#L100-L120
|
[
"def",
"main",
"(",
"args_list",
"=",
"None",
")",
":",
"args",
"=",
"parse_args",
"(",
"args_list",
")",
"binding_predictions",
"=",
"run_predictor",
"(",
"args",
")",
"df",
"=",
"binding_predictions",
".",
"to_dataframe",
"(",
")",
"logger",
".",
"info",
"(",
"'\\n%s'",
",",
"df",
")",
"if",
"args",
".",
"output_csv",
":",
"df",
".",
"to_csv",
"(",
"args",
".",
"output_csv",
",",
"index",
"=",
"False",
")",
"print",
"(",
"\"Wrote: %s\"",
"%",
"args",
".",
"output_csv",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
NetMHCIIpan._prepare_drb_allele_name
|
Assume that we're dealing with a human DRB allele
which NetMHCIIpan treats differently because there is
little population diversity in the DR-alpha gene
|
mhctools/netmhcii_pan.py
|
def _prepare_drb_allele_name(self, parsed_beta_allele):
"""
Assume that we're dealing with a human DRB allele
which NetMHCIIpan treats differently because there is
little population diversity in the DR-alpha gene
"""
if "DRB" not in parsed_beta_allele.gene:
raise ValueError("Unexpected allele %s" % parsed_beta_allele)
return "%s_%s%s" % (
parsed_beta_allele.gene,
parsed_beta_allele.allele_family,
parsed_beta_allele.allele_code)
|
def _prepare_drb_allele_name(self, parsed_beta_allele):
"""
Assume that we're dealing with a human DRB allele
which NetMHCIIpan treats differently because there is
little population diversity in the DR-alpha gene
"""
if "DRB" not in parsed_beta_allele.gene:
raise ValueError("Unexpected allele %s" % parsed_beta_allele)
return "%s_%s%s" % (
parsed_beta_allele.gene,
parsed_beta_allele.allele_family,
parsed_beta_allele.allele_code)
|
[
"Assume",
"that",
"we",
"re",
"dealing",
"with",
"a",
"human",
"DRB",
"allele",
"which",
"NetMHCIIpan",
"treats",
"differently",
"because",
"there",
"is",
"little",
"population",
"diversity",
"in",
"the",
"DR",
"-",
"alpha",
"gene"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/netmhcii_pan.py#L48-L59
|
[
"def",
"_prepare_drb_allele_name",
"(",
"self",
",",
"parsed_beta_allele",
")",
":",
"if",
"\"DRB\"",
"not",
"in",
"parsed_beta_allele",
".",
"gene",
":",
"raise",
"ValueError",
"(",
"\"Unexpected allele %s\"",
"%",
"parsed_beta_allele",
")",
"return",
"\"%s_%s%s\"",
"%",
"(",
"parsed_beta_allele",
".",
"gene",
",",
"parsed_beta_allele",
".",
"allele_family",
",",
"parsed_beta_allele",
".",
"allele_code",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
NetMHCIIpan.prepare_allele_name
|
netMHCIIpan has some unique requirements for allele formats,
expecting the following forms:
- DRB1_0101 (for non-alpha/beta pairs)
- HLA-DQA10501-DQB10636 (for alpha and beta pairs)
Other than human class II alleles, the only other alleles that
netMHCIIpan accepts are the following mouse alleles:
- H-2-IAb
- H-2-IAd
|
mhctools/netmhcii_pan.py
|
def prepare_allele_name(self, allele_name):
"""
netMHCIIpan has some unique requirements for allele formats,
expecting the following forms:
- DRB1_0101 (for non-alpha/beta pairs)
- HLA-DQA10501-DQB10636 (for alpha and beta pairs)
Other than human class II alleles, the only other alleles that
netMHCIIpan accepts are the following mouse alleles:
- H-2-IAb
- H-2-IAd
"""
parsed_alleles = parse_classi_or_classii_allele_name(allele_name)
if len(parsed_alleles) == 1:
allele = parsed_alleles[0]
if allele.species == "H-2":
return "%s-%s%s" % (
allele.species,
allele.gene,
allele.allele_code)
return self._prepare_drb_allele_name(allele)
else:
alpha, beta = parsed_alleles
if "DRA" in alpha.gene:
return self._prepare_drb_allele_name(beta)
return "HLA-%s%s%s-%s%s%s" % (
alpha.gene,
alpha.allele_family,
alpha.allele_code,
beta.gene,
beta.allele_family,
beta.allele_code)
|
def prepare_allele_name(self, allele_name):
"""
netMHCIIpan has some unique requirements for allele formats,
expecting the following forms:
- DRB1_0101 (for non-alpha/beta pairs)
- HLA-DQA10501-DQB10636 (for alpha and beta pairs)
Other than human class II alleles, the only other alleles that
netMHCIIpan accepts are the following mouse alleles:
- H-2-IAb
- H-2-IAd
"""
parsed_alleles = parse_classi_or_classii_allele_name(allele_name)
if len(parsed_alleles) == 1:
allele = parsed_alleles[0]
if allele.species == "H-2":
return "%s-%s%s" % (
allele.species,
allele.gene,
allele.allele_code)
return self._prepare_drb_allele_name(allele)
else:
alpha, beta = parsed_alleles
if "DRA" in alpha.gene:
return self._prepare_drb_allele_name(beta)
return "HLA-%s%s%s-%s%s%s" % (
alpha.gene,
alpha.allele_family,
alpha.allele_code,
beta.gene,
beta.allele_family,
beta.allele_code)
|
[
"netMHCIIpan",
"has",
"some",
"unique",
"requirements",
"for",
"allele",
"formats",
"expecting",
"the",
"following",
"forms",
":",
"-",
"DRB1_0101",
"(",
"for",
"non",
"-",
"alpha",
"/",
"beta",
"pairs",
")",
"-",
"HLA",
"-",
"DQA10501",
"-",
"DQB10636",
"(",
"for",
"alpha",
"and",
"beta",
"pairs",
")"
] |
openvax/mhctools
|
python
|
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/netmhcii_pan.py#L61-L93
|
[
"def",
"prepare_allele_name",
"(",
"self",
",",
"allele_name",
")",
":",
"parsed_alleles",
"=",
"parse_classi_or_classii_allele_name",
"(",
"allele_name",
")",
"if",
"len",
"(",
"parsed_alleles",
")",
"==",
"1",
":",
"allele",
"=",
"parsed_alleles",
"[",
"0",
"]",
"if",
"allele",
".",
"species",
"==",
"\"H-2\"",
":",
"return",
"\"%s-%s%s\"",
"%",
"(",
"allele",
".",
"species",
",",
"allele",
".",
"gene",
",",
"allele",
".",
"allele_code",
")",
"return",
"self",
".",
"_prepare_drb_allele_name",
"(",
"allele",
")",
"else",
":",
"alpha",
",",
"beta",
"=",
"parsed_alleles",
"if",
"\"DRA\"",
"in",
"alpha",
".",
"gene",
":",
"return",
"self",
".",
"_prepare_drb_allele_name",
"(",
"beta",
")",
"return",
"\"HLA-%s%s%s-%s%s%s\"",
"%",
"(",
"alpha",
".",
"gene",
",",
"alpha",
".",
"allele_family",
",",
"alpha",
".",
"allele_code",
",",
"beta",
".",
"gene",
",",
"beta",
".",
"allele_family",
",",
"beta",
".",
"allele_code",
")"
] |
b329b4dccd60fae41296816b8cbfe15d6ca07e67
|
valid
|
get_error
|
return the error if there is a corresponding exception
|
peony/exceptions.py
|
def get_error(data):
""" return the error if there is a corresponding exception """
if isinstance(data, dict):
if 'errors' in data:
error = data['errors'][0]
else:
error = data.get('error', None)
if isinstance(error, dict):
if error.get('code') in errors:
return error
|
def get_error(data):
""" return the error if there is a corresponding exception """
if isinstance(data, dict):
if 'errors' in data:
error = data['errors'][0]
else:
error = data.get('error', None)
if isinstance(error, dict):
if error.get('code') in errors:
return error
|
[
"return",
"the",
"error",
"if",
"there",
"is",
"a",
"corresponding",
"exception"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/exceptions.py#L8-L18
|
[
"def",
"get_error",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"if",
"'errors'",
"in",
"data",
":",
"error",
"=",
"data",
"[",
"'errors'",
"]",
"[",
"0",
"]",
"else",
":",
"error",
"=",
"data",
".",
"get",
"(",
"'error'",
",",
"None",
")",
"if",
"isinstance",
"(",
"error",
",",
"dict",
")",
":",
"if",
"error",
".",
"get",
"(",
"'code'",
")",
"in",
"errors",
":",
"return",
"error"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
throw
|
Get the response data if possible and raise an exception
|
peony/exceptions.py
|
async def throw(response, loads=None, encoding=None, **kwargs):
""" Get the response data if possible and raise an exception """
if loads is None:
loads = data_processing.loads
data = await data_processing.read(response, loads=loads,
encoding=encoding)
error = get_error(data)
if error is not None:
exception = errors[error['code']]
raise exception(response=response, error=error, data=data, **kwargs)
if response.status in statuses:
exception = statuses[response.status]
raise exception(response=response, data=data, **kwargs)
# raise PeonyException if no specific exception was found
raise PeonyException(response=response, data=data, **kwargs)
|
async def throw(response, loads=None, encoding=None, **kwargs):
""" Get the response data if possible and raise an exception """
if loads is None:
loads = data_processing.loads
data = await data_processing.read(response, loads=loads,
encoding=encoding)
error = get_error(data)
if error is not None:
exception = errors[error['code']]
raise exception(response=response, error=error, data=data, **kwargs)
if response.status in statuses:
exception = statuses[response.status]
raise exception(response=response, data=data, **kwargs)
# raise PeonyException if no specific exception was found
raise PeonyException(response=response, data=data, **kwargs)
|
[
"Get",
"the",
"response",
"data",
"if",
"possible",
"and",
"raise",
"an",
"exception"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/exceptions.py#L21-L39
|
[
"async",
"def",
"throw",
"(",
"response",
",",
"loads",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"loads",
"is",
"None",
":",
"loads",
"=",
"data_processing",
".",
"loads",
"data",
"=",
"await",
"data_processing",
".",
"read",
"(",
"response",
",",
"loads",
"=",
"loads",
",",
"encoding",
"=",
"encoding",
")",
"error",
"=",
"get_error",
"(",
"data",
")",
"if",
"error",
"is",
"not",
"None",
":",
"exception",
"=",
"errors",
"[",
"error",
"[",
"'code'",
"]",
"]",
"raise",
"exception",
"(",
"response",
"=",
"response",
",",
"error",
"=",
"error",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")",
"if",
"response",
".",
"status",
"in",
"statuses",
":",
"exception",
"=",
"statuses",
"[",
"response",
".",
"status",
"]",
"raise",
"exception",
"(",
"response",
"=",
"response",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")",
"# raise PeonyException if no specific exception was found",
"raise",
"PeonyException",
"(",
"response",
"=",
"response",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
ErrorDict.code
|
Decorator to associate a code to an exception
|
peony/exceptions.py
|
def code(self, code):
""" Decorator to associate a code to an exception """
def decorator(exception):
self[code] = exception
return exception
return decorator
|
def code(self, code):
""" Decorator to associate a code to an exception """
def decorator(exception):
self[code] = exception
return exception
return decorator
|
[
"Decorator",
"to",
"associate",
"a",
"code",
"to",
"an",
"exception"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/exceptions.py#L101-L107
|
[
"def",
"code",
"(",
"self",
",",
"code",
")",
":",
"def",
"decorator",
"(",
"exception",
")",
":",
"self",
"[",
"code",
"]",
"=",
"exception",
"return",
"exception",
"return",
"decorator"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
PeonyHeaders.prepare_request
|
prepare all the arguments for the request
Parameters
----------
method : str
HTTP method used by the request
url : str
The url to request
headers : dict, optional
Additionnal headers
proxy : str
proxy of the request
skip_params : bool
Don't use the parameters to sign the request
Returns
-------
dict
Parameters of the request correctly formatted
|
peony/oauth.py
|
async def prepare_request(self, method, url,
headers=None,
skip_params=False,
proxy=None,
**kwargs):
"""
prepare all the arguments for the request
Parameters
----------
method : str
HTTP method used by the request
url : str
The url to request
headers : dict, optional
Additionnal headers
proxy : str
proxy of the request
skip_params : bool
Don't use the parameters to sign the request
Returns
-------
dict
Parameters of the request correctly formatted
"""
if method.lower() == "post":
key = 'data'
else:
key = 'params'
if key in kwargs and not skip_params:
request_params = {key: kwargs.pop(key)}
else:
request_params = {}
request_params.update(dict(method=method.upper(), url=url))
coro = self.sign(**request_params, skip_params=skip_params,
headers=headers)
request_params['headers'] = await utils.execute(coro)
request_params['proxy'] = proxy
kwargs.update(request_params)
return kwargs
|
async def prepare_request(self, method, url,
headers=None,
skip_params=False,
proxy=None,
**kwargs):
"""
prepare all the arguments for the request
Parameters
----------
method : str
HTTP method used by the request
url : str
The url to request
headers : dict, optional
Additionnal headers
proxy : str
proxy of the request
skip_params : bool
Don't use the parameters to sign the request
Returns
-------
dict
Parameters of the request correctly formatted
"""
if method.lower() == "post":
key = 'data'
else:
key = 'params'
if key in kwargs and not skip_params:
request_params = {key: kwargs.pop(key)}
else:
request_params = {}
request_params.update(dict(method=method.upper(), url=url))
coro = self.sign(**request_params, skip_params=skip_params,
headers=headers)
request_params['headers'] = await utils.execute(coro)
request_params['proxy'] = proxy
kwargs.update(request_params)
return kwargs
|
[
"prepare",
"all",
"the",
"arguments",
"for",
"the",
"request"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth.py#L61-L107
|
[
"async",
"def",
"prepare_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"headers",
"=",
"None",
",",
"skip_params",
"=",
"False",
",",
"proxy",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"\"post\"",
":",
"key",
"=",
"'data'",
"else",
":",
"key",
"=",
"'params'",
"if",
"key",
"in",
"kwargs",
"and",
"not",
"skip_params",
":",
"request_params",
"=",
"{",
"key",
":",
"kwargs",
".",
"pop",
"(",
"key",
")",
"}",
"else",
":",
"request_params",
"=",
"{",
"}",
"request_params",
".",
"update",
"(",
"dict",
"(",
"method",
"=",
"method",
".",
"upper",
"(",
")",
",",
"url",
"=",
"url",
")",
")",
"coro",
"=",
"self",
".",
"sign",
"(",
"*",
"*",
"request_params",
",",
"skip_params",
"=",
"skip_params",
",",
"headers",
"=",
"headers",
")",
"request_params",
"[",
"'headers'",
"]",
"=",
"await",
"utils",
".",
"execute",
"(",
"coro",
")",
"request_params",
"[",
"'proxy'",
"]",
"=",
"proxy",
"kwargs",
".",
"update",
"(",
"request_params",
")",
"return",
"kwargs"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
PeonyHeaders._user_headers
|
Make sure the user doesn't override the Authorization header
|
peony/oauth.py
|
def _user_headers(self, headers=None):
""" Make sure the user doesn't override the Authorization header """
h = self.copy()
if headers is not None:
keys = set(headers.keys())
if h.get('Authorization', False):
keys -= {'Authorization'}
for key in keys:
h[key] = headers[key]
return h
|
def _user_headers(self, headers=None):
""" Make sure the user doesn't override the Authorization header """
h = self.copy()
if headers is not None:
keys = set(headers.keys())
if h.get('Authorization', False):
keys -= {'Authorization'}
for key in keys:
h[key] = headers[key]
return h
|
[
"Make",
"sure",
"the",
"user",
"doesn",
"t",
"override",
"the",
"Authorization",
"header"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/oauth.py#L109-L121
|
[
"def",
"_user_headers",
"(",
"self",
",",
"headers",
"=",
"None",
")",
":",
"h",
"=",
"self",
".",
"copy",
"(",
")",
"if",
"headers",
"is",
"not",
"None",
":",
"keys",
"=",
"set",
"(",
"headers",
".",
"keys",
"(",
")",
")",
"if",
"h",
".",
"get",
"(",
"'Authorization'",
",",
"False",
")",
":",
"keys",
"-=",
"{",
"'Authorization'",
"}",
"for",
"key",
"in",
"keys",
":",
"h",
"[",
"key",
"]",
"=",
"headers",
"[",
"key",
"]",
"return",
"h"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
process_keys
|
Raise error for keys that are not strings
and add the prefix if it is missing
|
peony/commands/commands.py
|
def process_keys(func):
"""
Raise error for keys that are not strings
and add the prefix if it is missing
"""
@wraps(func)
def decorated(self, k, *args):
if not isinstance(k, str):
msg = "%s: key must be a string" % self.__class__.__name__
raise ValueError(msg)
if not k.startswith(self.prefix):
k = self.prefix + k
return func(self, k, *args)
return decorated
|
def process_keys(func):
"""
Raise error for keys that are not strings
and add the prefix if it is missing
"""
@wraps(func)
def decorated(self, k, *args):
if not isinstance(k, str):
msg = "%s: key must be a string" % self.__class__.__name__
raise ValueError(msg)
if not k.startswith(self.prefix):
k = self.prefix + k
return func(self, k, *args)
return decorated
|
[
"Raise",
"error",
"for",
"keys",
"that",
"are",
"not",
"strings",
"and",
"add",
"the",
"prefix",
"if",
"it",
"is",
"missing"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/commands/commands.py#L11-L28
|
[
"def",
"process_keys",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorated",
"(",
"self",
",",
"k",
",",
"*",
"args",
")",
":",
"if",
"not",
"isinstance",
"(",
"k",
",",
"str",
")",
":",
"msg",
"=",
"\"%s: key must be a string\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"not",
"k",
".",
"startswith",
"(",
"self",
".",
"prefix",
")",
":",
"k",
"=",
"self",
".",
"prefix",
"+",
"k",
"return",
"func",
"(",
"self",
",",
"k",
",",
"*",
"args",
")",
"return",
"decorated"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
Functions._get
|
Analyze the text to get the right function
Parameters
----------
text : str
The text that could call a function
|
peony/commands/commands.py
|
def _get(self, text):
"""
Analyze the text to get the right function
Parameters
----------
text : str
The text that could call a function
"""
if self.strict:
match = self.prog.match(text)
if match:
cmd = match.group()
if cmd in self:
return cmd
else:
words = self.prog.findall(text)
for word in words:
if word in self:
return word
|
def _get(self, text):
"""
Analyze the text to get the right function
Parameters
----------
text : str
The text that could call a function
"""
if self.strict:
match = self.prog.match(text)
if match:
cmd = match.group()
if cmd in self:
return cmd
else:
words = self.prog.findall(text)
for word in words:
if word in self:
return word
|
[
"Analyze",
"the",
"text",
"to",
"get",
"the",
"right",
"function"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/commands/commands.py#L77-L96
|
[
"def",
"_get",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"strict",
":",
"match",
"=",
"self",
".",
"prog",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"cmd",
"=",
"match",
".",
"group",
"(",
")",
"if",
"cmd",
"in",
"self",
":",
"return",
"cmd",
"else",
":",
"words",
"=",
"self",
".",
"prog",
".",
"findall",
"(",
"text",
")",
"for",
"word",
"in",
"words",
":",
"if",
"word",
"in",
"self",
":",
"return",
"word"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
Functions.run
|
run the function you want
|
peony/commands/commands.py
|
async def run(self, *args, data):
""" run the function you want """
cmd = self._get(data.text)
try:
if cmd is not None:
command = self[cmd](*args, data=data)
return await peony.utils.execute(command)
except:
fmt = "Error occurred while running function {cmd}:"
peony.utils.log_error(fmt.format(cmd=cmd))
|
async def run(self, *args, data):
""" run the function you want """
cmd = self._get(data.text)
try:
if cmd is not None:
command = self[cmd](*args, data=data)
return await peony.utils.execute(command)
except:
fmt = "Error occurred while running function {cmd}:"
peony.utils.log_error(fmt.format(cmd=cmd))
|
[
"run",
"the",
"function",
"you",
"want"
] |
odrling/peony-twitter
|
python
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/commands/commands.py#L98-L109
|
[
"async",
"def",
"run",
"(",
"self",
",",
"*",
"args",
",",
"data",
")",
":",
"cmd",
"=",
"self",
".",
"_get",
"(",
"data",
".",
"text",
")",
"try",
":",
"if",
"cmd",
"is",
"not",
"None",
":",
"command",
"=",
"self",
"[",
"cmd",
"]",
"(",
"*",
"args",
",",
"data",
"=",
"data",
")",
"return",
"await",
"peony",
".",
"utils",
".",
"execute",
"(",
"command",
")",
"except",
":",
"fmt",
"=",
"\"Error occurred while running function {cmd}:\"",
"peony",
".",
"utils",
".",
"log_error",
"(",
"fmt",
".",
"format",
"(",
"cmd",
"=",
"cmd",
")",
")"
] |
967f98e16e1889389540f2e6acbf7cc7a1a80203
|
valid
|
get_cartesian
|
Given a radius and theta, return the cartesian (x, y) coordinates.
|
hiveplot/hiveplot.py
|
def get_cartesian(r, theta):
"""
Given a radius and theta, return the cartesian (x, y) coordinates.
"""
x = r*np.sin(theta)
y = r*np.cos(theta)
return x, y
|
def get_cartesian(r, theta):
"""
Given a radius and theta, return the cartesian (x, y) coordinates.
"""
x = r*np.sin(theta)
y = r*np.cos(theta)
return x, y
|
[
"Given",
"a",
"radius",
"and",
"theta",
"return",
"the",
"cartesian",
"(",
"x",
"y",
")",
"coordinates",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L361-L368
|
[
"def",
"get_cartesian",
"(",
"r",
",",
"theta",
")",
":",
"x",
"=",
"r",
"*",
"np",
".",
"sin",
"(",
"theta",
")",
"y",
"=",
"r",
"*",
"np",
".",
"cos",
"(",
"theta",
")",
"return",
"x",
",",
"y"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.simplified_edges
|
A generator for getting all of the edges without consuming extra
memory.
|
hiveplot/hiveplot.py
|
def simplified_edges(self):
"""
A generator for getting all of the edges without consuming extra
memory.
"""
for group, edgelist in self.edges.items():
for u, v, d in edgelist:
yield (u, v)
|
def simplified_edges(self):
"""
A generator for getting all of the edges without consuming extra
memory.
"""
for group, edgelist in self.edges.items():
for u, v, d in edgelist:
yield (u, v)
|
[
"A",
"generator",
"for",
"getting",
"all",
"of",
"the",
"edges",
"without",
"consuming",
"extra",
"memory",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L96-L103
|
[
"def",
"simplified_edges",
"(",
"self",
")",
":",
"for",
"group",
",",
"edgelist",
"in",
"self",
".",
"edges",
".",
"items",
"(",
")",
":",
"for",
"u",
",",
"v",
",",
"d",
"in",
"edgelist",
":",
"yield",
"(",
"u",
",",
"v",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.initialize_major_angle
|
Computes the major angle: 2pi radians / number of groups.
|
hiveplot/hiveplot.py
|
def initialize_major_angle(self):
"""
Computes the major angle: 2pi radians / number of groups.
"""
num_groups = len(self.nodes.keys())
self.major_angle = 2 * np.pi / num_groups
|
def initialize_major_angle(self):
"""
Computes the major angle: 2pi radians / number of groups.
"""
num_groups = len(self.nodes.keys())
self.major_angle = 2 * np.pi / num_groups
|
[
"Computes",
"the",
"major",
"angle",
":",
"2pi",
"radians",
"/",
"number",
"of",
"groups",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L105-L110
|
[
"def",
"initialize_major_angle",
"(",
"self",
")",
":",
"num_groups",
"=",
"len",
"(",
"self",
".",
"nodes",
".",
"keys",
"(",
")",
")",
"self",
".",
"major_angle",
"=",
"2",
"*",
"np",
".",
"pi",
"/",
"num_groups"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.initialize_minor_angle
|
Computes the minor angle: 2pi radians / 3 * number of groups.
|
hiveplot/hiveplot.py
|
def initialize_minor_angle(self):
"""
Computes the minor angle: 2pi radians / 3 * number of groups.
"""
num_groups = len(self.nodes.keys())
self.minor_angle = 2 * np.pi / (6 * num_groups)
|
def initialize_minor_angle(self):
"""
Computes the minor angle: 2pi radians / 3 * number of groups.
"""
num_groups = len(self.nodes.keys())
self.minor_angle = 2 * np.pi / (6 * num_groups)
|
[
"Computes",
"the",
"minor",
"angle",
":",
"2pi",
"radians",
"/",
"3",
"*",
"number",
"of",
"groups",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L112-L118
|
[
"def",
"initialize_minor_angle",
"(",
"self",
")",
":",
"num_groups",
"=",
"len",
"(",
"self",
".",
"nodes",
".",
"keys",
"(",
")",
")",
"self",
".",
"minor_angle",
"=",
"2",
"*",
"np",
".",
"pi",
"/",
"(",
"6",
"*",
"num_groups",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.plot_radius
|
Computes the plot radius: maximum of length of each list of nodes.
|
hiveplot/hiveplot.py
|
def plot_radius(self):
"""
Computes the plot radius: maximum of length of each list of nodes.
"""
plot_rad = 0
for group, nodelist in self.nodes.items():
proposed_radius = len(nodelist) * self.scale
if proposed_radius > plot_rad:
plot_rad = proposed_radius
return plot_rad + self.internal_radius
|
def plot_radius(self):
"""
Computes the plot radius: maximum of length of each list of nodes.
"""
plot_rad = 0
for group, nodelist in self.nodes.items():
proposed_radius = len(nodelist) * self.scale
if proposed_radius > plot_rad:
plot_rad = proposed_radius
return plot_rad + self.internal_radius
|
[
"Computes",
"the",
"plot",
"radius",
":",
"maximum",
"of",
"length",
"of",
"each",
"list",
"of",
"nodes",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L130-L139
|
[
"def",
"plot_radius",
"(",
"self",
")",
":",
"plot_rad",
"=",
"0",
"for",
"group",
",",
"nodelist",
"in",
"self",
".",
"nodes",
".",
"items",
"(",
")",
":",
"proposed_radius",
"=",
"len",
"(",
"nodelist",
")",
"*",
"self",
".",
"scale",
"if",
"proposed_radius",
">",
"plot_rad",
":",
"plot_rad",
"=",
"proposed_radius",
"return",
"plot_rad",
"+",
"self",
".",
"internal_radius"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.has_edge_within_group
|
Checks whether there are within-group edges or not.
|
hiveplot/hiveplot.py
|
def has_edge_within_group(self, group):
"""
Checks whether there are within-group edges or not.
"""
assert group in self.nodes.keys(),\
"{0} not one of the group of nodes".format(group)
nodelist = self.nodes[group]
for n1, n2 in self.simplified_edges():
if n1 in nodelist and n2 in nodelist:
return True
|
def has_edge_within_group(self, group):
"""
Checks whether there are within-group edges or not.
"""
assert group in self.nodes.keys(),\
"{0} not one of the group of nodes".format(group)
nodelist = self.nodes[group]
for n1, n2 in self.simplified_edges():
if n1 in nodelist and n2 in nodelist:
return True
|
[
"Checks",
"whether",
"there",
"are",
"within",
"-",
"group",
"edges",
"or",
"not",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L147-L156
|
[
"def",
"has_edge_within_group",
"(",
"self",
",",
"group",
")",
":",
"assert",
"group",
"in",
"self",
".",
"nodes",
".",
"keys",
"(",
")",
",",
"\"{0} not one of the group of nodes\"",
".",
"format",
"(",
"group",
")",
"nodelist",
"=",
"self",
".",
"nodes",
"[",
"group",
"]",
"for",
"n1",
",",
"n2",
"in",
"self",
".",
"simplified_edges",
"(",
")",
":",
"if",
"n1",
"in",
"nodelist",
"and",
"n2",
"in",
"nodelist",
":",
"return",
"True"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.plot_axis
|
Renders the axis.
|
hiveplot/hiveplot.py
|
def plot_axis(self, rs, theta):
"""
Renders the axis.
"""
xs, ys = get_cartesian(rs, theta)
self.ax.plot(xs, ys, 'black', alpha=0.3)
|
def plot_axis(self, rs, theta):
"""
Renders the axis.
"""
xs, ys = get_cartesian(rs, theta)
self.ax.plot(xs, ys, 'black', alpha=0.3)
|
[
"Renders",
"the",
"axis",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L158-L163
|
[
"def",
"plot_axis",
"(",
"self",
",",
"rs",
",",
"theta",
")",
":",
"xs",
",",
"ys",
"=",
"get_cartesian",
"(",
"rs",
",",
"theta",
")",
"self",
".",
"ax",
".",
"plot",
"(",
"xs",
",",
"ys",
",",
"'black'",
",",
"alpha",
"=",
"0.3",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.plot_nodes
|
Plots nodes to screen.
|
hiveplot/hiveplot.py
|
def plot_nodes(self, nodelist, theta, group):
"""
Plots nodes to screen.
"""
for i, node in enumerate(nodelist):
r = self.internal_radius + i * self.scale
x, y = get_cartesian(r, theta)
circle = plt.Circle(xy=(x, y), radius=self.dot_radius,
color=self.node_colormap[group], linewidth=0)
self.ax.add_patch(circle)
|
def plot_nodes(self, nodelist, theta, group):
"""
Plots nodes to screen.
"""
for i, node in enumerate(nodelist):
r = self.internal_radius + i * self.scale
x, y = get_cartesian(r, theta)
circle = plt.Circle(xy=(x, y), radius=self.dot_radius,
color=self.node_colormap[group], linewidth=0)
self.ax.add_patch(circle)
|
[
"Plots",
"nodes",
"to",
"screen",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L165-L174
|
[
"def",
"plot_nodes",
"(",
"self",
",",
"nodelist",
",",
"theta",
",",
"group",
")",
":",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"nodelist",
")",
":",
"r",
"=",
"self",
".",
"internal_radius",
"+",
"i",
"*",
"self",
".",
"scale",
"x",
",",
"y",
"=",
"get_cartesian",
"(",
"r",
",",
"theta",
")",
"circle",
"=",
"plt",
".",
"Circle",
"(",
"xy",
"=",
"(",
"x",
",",
"y",
")",
",",
"radius",
"=",
"self",
".",
"dot_radius",
",",
"color",
"=",
"self",
".",
"node_colormap",
"[",
"group",
"]",
",",
"linewidth",
"=",
"0",
")",
"self",
".",
"ax",
".",
"add_patch",
"(",
"circle",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.group_theta
|
Computes the theta along which a group's nodes are aligned.
|
hiveplot/hiveplot.py
|
def group_theta(self, group):
"""
Computes the theta along which a group's nodes are aligned.
"""
for i, g in enumerate(self.nodes.keys()):
if g == group:
break
return i * self.major_angle
|
def group_theta(self, group):
"""
Computes the theta along which a group's nodes are aligned.
"""
for i, g in enumerate(self.nodes.keys()):
if g == group:
break
return i * self.major_angle
|
[
"Computes",
"the",
"theta",
"along",
"which",
"a",
"group",
"s",
"nodes",
"are",
"aligned",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L176-L184
|
[
"def",
"group_theta",
"(",
"self",
",",
"group",
")",
":",
"for",
"i",
",",
"g",
"in",
"enumerate",
"(",
"self",
".",
"nodes",
".",
"keys",
"(",
")",
")",
":",
"if",
"g",
"==",
"group",
":",
"break",
"return",
"i",
"*",
"self",
".",
"major_angle"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.add_axes_and_nodes
|
Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib
axes) and the nodes that belong to each axis.
|
hiveplot/hiveplot.py
|
def add_axes_and_nodes(self):
"""
Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib
axes) and the nodes that belong to each axis.
"""
for i, (group, nodelist) in enumerate(self.nodes.items()):
theta = self.group_theta(group)
if self.has_edge_within_group(group):
theta = theta - self.minor_angle
self.plot_nodes(nodelist, theta, group)
theta = theta + 2 * self.minor_angle
self.plot_nodes(nodelist, theta, group)
else:
self.plot_nodes(nodelist, theta, group)
|
def add_axes_and_nodes(self):
"""
Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib
axes) and the nodes that belong to each axis.
"""
for i, (group, nodelist) in enumerate(self.nodes.items()):
theta = self.group_theta(group)
if self.has_edge_within_group(group):
theta = theta - self.minor_angle
self.plot_nodes(nodelist, theta, group)
theta = theta + 2 * self.minor_angle
self.plot_nodes(nodelist, theta, group)
else:
self.plot_nodes(nodelist, theta, group)
|
[
"Adds",
"the",
"axes",
"(",
"i",
".",
"e",
".",
"2",
"or",
"3",
"axes",
"not",
"to",
"be",
"confused",
"with",
"matplotlib",
"axes",
")",
"and",
"the",
"nodes",
"that",
"belong",
"to",
"each",
"axis",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L186-L202
|
[
"def",
"add_axes_and_nodes",
"(",
"self",
")",
":",
"for",
"i",
",",
"(",
"group",
",",
"nodelist",
")",
"in",
"enumerate",
"(",
"self",
".",
"nodes",
".",
"items",
"(",
")",
")",
":",
"theta",
"=",
"self",
".",
"group_theta",
"(",
"group",
")",
"if",
"self",
".",
"has_edge_within_group",
"(",
"group",
")",
":",
"theta",
"=",
"theta",
"-",
"self",
".",
"minor_angle",
"self",
".",
"plot_nodes",
"(",
"nodelist",
",",
"theta",
",",
"group",
")",
"theta",
"=",
"theta",
"+",
"2",
"*",
"self",
".",
"minor_angle",
"self",
".",
"plot_nodes",
"(",
"nodelist",
",",
"theta",
",",
"group",
")",
"else",
":",
"self",
".",
"plot_nodes",
"(",
"nodelist",
",",
"theta",
",",
"group",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.find_node_group_membership
|
Identifies the group for which a node belongs to.
|
hiveplot/hiveplot.py
|
def find_node_group_membership(self, node):
"""
Identifies the group for which a node belongs to.
"""
for group, nodelist in self.nodes.items():
if node in nodelist:
return group
|
def find_node_group_membership(self, node):
"""
Identifies the group for which a node belongs to.
"""
for group, nodelist in self.nodes.items():
if node in nodelist:
return group
|
[
"Identifies",
"the",
"group",
"for",
"which",
"a",
"node",
"belongs",
"to",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L204-L210
|
[
"def",
"find_node_group_membership",
"(",
"self",
",",
"node",
")",
":",
"for",
"group",
",",
"nodelist",
"in",
"self",
".",
"nodes",
".",
"items",
"(",
")",
":",
"if",
"node",
"in",
"nodelist",
":",
"return",
"group"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.get_idx
|
Finds the index of the node in the sorted list.
|
hiveplot/hiveplot.py
|
def get_idx(self, node):
"""
Finds the index of the node in the sorted list.
"""
group = self.find_node_group_membership(node)
return self.nodes[group].index(node)
|
def get_idx(self, node):
"""
Finds the index of the node in the sorted list.
"""
group = self.find_node_group_membership(node)
return self.nodes[group].index(node)
|
[
"Finds",
"the",
"index",
"of",
"the",
"node",
"in",
"the",
"sorted",
"list",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L212-L217
|
[
"def",
"get_idx",
"(",
"self",
",",
"node",
")",
":",
"group",
"=",
"self",
".",
"find_node_group_membership",
"(",
"node",
")",
"return",
"self",
".",
"nodes",
"[",
"group",
"]",
".",
"index",
"(",
"node",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.node_radius
|
Computes the radial position of the node.
|
hiveplot/hiveplot.py
|
def node_radius(self, node):
"""
Computes the radial position of the node.
"""
return self.get_idx(node) * self.scale + self.internal_radius
|
def node_radius(self, node):
"""
Computes the radial position of the node.
"""
return self.get_idx(node) * self.scale + self.internal_radius
|
[
"Computes",
"the",
"radial",
"position",
"of",
"the",
"node",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L219-L223
|
[
"def",
"node_radius",
"(",
"self",
",",
"node",
")",
":",
"return",
"self",
".",
"get_idx",
"(",
"node",
")",
"*",
"self",
".",
"scale",
"+",
"self",
".",
"internal_radius"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.node_theta
|
Convenience function to find the node's theta angle.
|
hiveplot/hiveplot.py
|
def node_theta(self, node):
"""
Convenience function to find the node's theta angle.
"""
group = self.find_node_group_membership(node)
return self.group_theta(group)
|
def node_theta(self, node):
"""
Convenience function to find the node's theta angle.
"""
group = self.find_node_group_membership(node)
return self.group_theta(group)
|
[
"Convenience",
"function",
"to",
"find",
"the",
"node",
"s",
"theta",
"angle",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L225-L230
|
[
"def",
"node_theta",
"(",
"self",
",",
"node",
")",
":",
"group",
"=",
"self",
".",
"find_node_group_membership",
"(",
"node",
")",
"return",
"self",
".",
"group_theta",
"(",
"group",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.draw_edge
|
Renders the given edge (n1, n2) to the plot.
|
hiveplot/hiveplot.py
|
def draw_edge(self, n1, n2, d, group):
"""
Renders the given edge (n1, n2) to the plot.
"""
start_radius = self.node_radius(n1)
start_theta = self.node_theta(n1)
end_radius = self.node_radius(n2)
end_theta = self.node_theta(n2)
start_theta, end_theta = self.correct_angles(start_theta, end_theta)
start_theta, end_theta = self.adjust_angles(n1, start_theta, n2,
end_theta)
middle1_radius = np.min([start_radius, end_radius])
middle2_radius = np.max([start_radius, end_radius])
if start_radius > end_radius:
middle1_radius, middle2_radius = middle2_radius, middle1_radius
middle1_theta = np.mean([start_theta, end_theta])
middle2_theta = np.mean([start_theta, end_theta])
startx, starty = get_cartesian(start_radius, start_theta)
middle1x, middle1y = get_cartesian(middle1_radius, middle1_theta)
middle2x, middle2y = get_cartesian(middle2_radius, middle2_theta)
# middlex, middley = get_cartesian(middle_radius, middle_theta)
endx, endy = get_cartesian(end_radius, end_theta)
verts = [(startx, starty),
(middle1x, middle1y),
(middle2x, middle2y),
(endx, endy)]
codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
path = Path(verts, codes)
if self.edge_colormap is None:
edgecolor = 'black'
else:
edgecolor = self.edge_colormap[group]
patch = patches.PathPatch(path, lw=self.linewidth, facecolor='none',
edgecolor=edgecolor, alpha=0.3)
self.ax.add_patch(patch)
|
def draw_edge(self, n1, n2, d, group):
"""
Renders the given edge (n1, n2) to the plot.
"""
start_radius = self.node_radius(n1)
start_theta = self.node_theta(n1)
end_radius = self.node_radius(n2)
end_theta = self.node_theta(n2)
start_theta, end_theta = self.correct_angles(start_theta, end_theta)
start_theta, end_theta = self.adjust_angles(n1, start_theta, n2,
end_theta)
middle1_radius = np.min([start_radius, end_radius])
middle2_radius = np.max([start_radius, end_radius])
if start_radius > end_radius:
middle1_radius, middle2_radius = middle2_radius, middle1_radius
middle1_theta = np.mean([start_theta, end_theta])
middle2_theta = np.mean([start_theta, end_theta])
startx, starty = get_cartesian(start_radius, start_theta)
middle1x, middle1y = get_cartesian(middle1_radius, middle1_theta)
middle2x, middle2y = get_cartesian(middle2_radius, middle2_theta)
# middlex, middley = get_cartesian(middle_radius, middle_theta)
endx, endy = get_cartesian(end_radius, end_theta)
verts = [(startx, starty),
(middle1x, middle1y),
(middle2x, middle2y),
(endx, endy)]
codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
path = Path(verts, codes)
if self.edge_colormap is None:
edgecolor = 'black'
else:
edgecolor = self.edge_colormap[group]
patch = patches.PathPatch(path, lw=self.linewidth, facecolor='none',
edgecolor=edgecolor, alpha=0.3)
self.ax.add_patch(patch)
|
[
"Renders",
"the",
"given",
"edge",
"(",
"n1",
"n2",
")",
"to",
"the",
"plot",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L232-L274
|
[
"def",
"draw_edge",
"(",
"self",
",",
"n1",
",",
"n2",
",",
"d",
",",
"group",
")",
":",
"start_radius",
"=",
"self",
".",
"node_radius",
"(",
"n1",
")",
"start_theta",
"=",
"self",
".",
"node_theta",
"(",
"n1",
")",
"end_radius",
"=",
"self",
".",
"node_radius",
"(",
"n2",
")",
"end_theta",
"=",
"self",
".",
"node_theta",
"(",
"n2",
")",
"start_theta",
",",
"end_theta",
"=",
"self",
".",
"correct_angles",
"(",
"start_theta",
",",
"end_theta",
")",
"start_theta",
",",
"end_theta",
"=",
"self",
".",
"adjust_angles",
"(",
"n1",
",",
"start_theta",
",",
"n2",
",",
"end_theta",
")",
"middle1_radius",
"=",
"np",
".",
"min",
"(",
"[",
"start_radius",
",",
"end_radius",
"]",
")",
"middle2_radius",
"=",
"np",
".",
"max",
"(",
"[",
"start_radius",
",",
"end_radius",
"]",
")",
"if",
"start_radius",
">",
"end_radius",
":",
"middle1_radius",
",",
"middle2_radius",
"=",
"middle2_radius",
",",
"middle1_radius",
"middle1_theta",
"=",
"np",
".",
"mean",
"(",
"[",
"start_theta",
",",
"end_theta",
"]",
")",
"middle2_theta",
"=",
"np",
".",
"mean",
"(",
"[",
"start_theta",
",",
"end_theta",
"]",
")",
"startx",
",",
"starty",
"=",
"get_cartesian",
"(",
"start_radius",
",",
"start_theta",
")",
"middle1x",
",",
"middle1y",
"=",
"get_cartesian",
"(",
"middle1_radius",
",",
"middle1_theta",
")",
"middle2x",
",",
"middle2y",
"=",
"get_cartesian",
"(",
"middle2_radius",
",",
"middle2_theta",
")",
"# middlex, middley = get_cartesian(middle_radius, middle_theta)",
"endx",
",",
"endy",
"=",
"get_cartesian",
"(",
"end_radius",
",",
"end_theta",
")",
"verts",
"=",
"[",
"(",
"startx",
",",
"starty",
")",
",",
"(",
"middle1x",
",",
"middle1y",
")",
",",
"(",
"middle2x",
",",
"middle2y",
")",
",",
"(",
"endx",
",",
"endy",
")",
"]",
"codes",
"=",
"[",
"Path",
".",
"MOVETO",
",",
"Path",
".",
"CURVE4",
",",
"Path",
".",
"CURVE4",
",",
"Path",
".",
"CURVE4",
"]",
"path",
"=",
"Path",
"(",
"verts",
",",
"codes",
")",
"if",
"self",
".",
"edge_colormap",
"is",
"None",
":",
"edgecolor",
"=",
"'black'",
"else",
":",
"edgecolor",
"=",
"self",
".",
"edge_colormap",
"[",
"group",
"]",
"patch",
"=",
"patches",
".",
"PathPatch",
"(",
"path",
",",
"lw",
"=",
"self",
".",
"linewidth",
",",
"facecolor",
"=",
"'none'",
",",
"edgecolor",
"=",
"edgecolor",
",",
"alpha",
"=",
"0.3",
")",
"self",
".",
"ax",
".",
"add_patch",
"(",
"patch",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.add_edges
|
Draws all of the edges in the graph.
|
hiveplot/hiveplot.py
|
def add_edges(self):
"""
Draws all of the edges in the graph.
"""
for group, edgelist in self.edges.items():
for (u, v, d) in edgelist:
self.draw_edge(u, v, d, group)
|
def add_edges(self):
"""
Draws all of the edges in the graph.
"""
for group, edgelist in self.edges.items():
for (u, v, d) in edgelist:
self.draw_edge(u, v, d, group)
|
[
"Draws",
"all",
"of",
"the",
"edges",
"in",
"the",
"graph",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L276-L282
|
[
"def",
"add_edges",
"(",
"self",
")",
":",
"for",
"group",
",",
"edgelist",
"in",
"self",
".",
"edges",
".",
"items",
"(",
")",
":",
"for",
"(",
"u",
",",
"v",
",",
"d",
")",
"in",
"edgelist",
":",
"self",
".",
"draw_edge",
"(",
"u",
",",
"v",
",",
"d",
",",
"group",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.draw
|
The master function that is called that draws everything.
|
hiveplot/hiveplot.py
|
def draw(self):
"""
The master function that is called that draws everything.
"""
self.ax.set_xlim(-self.plot_radius(), self.plot_radius())
self.ax.set_ylim(-self.plot_radius(), self.plot_radius())
self.add_axes_and_nodes()
self.add_edges()
self.ax.axis('off')
|
def draw(self):
"""
The master function that is called that draws everything.
"""
self.ax.set_xlim(-self.plot_radius(), self.plot_radius())
self.ax.set_ylim(-self.plot_radius(), self.plot_radius())
self.add_axes_and_nodes()
self.add_edges()
self.ax.axis('off')
|
[
"The",
"master",
"function",
"that",
"is",
"called",
"that",
"draws",
"everything",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L284-L294
|
[
"def",
"draw",
"(",
"self",
")",
":",
"self",
".",
"ax",
".",
"set_xlim",
"(",
"-",
"self",
".",
"plot_radius",
"(",
")",
",",
"self",
".",
"plot_radius",
"(",
")",
")",
"self",
".",
"ax",
".",
"set_ylim",
"(",
"-",
"self",
".",
"plot_radius",
"(",
")",
",",
"self",
".",
"plot_radius",
"(",
")",
")",
"self",
".",
"add_axes_and_nodes",
"(",
")",
"self",
".",
"add_edges",
"(",
")",
"self",
".",
"ax",
".",
"axis",
"(",
"'off'",
")"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.adjust_angles
|
This function adjusts the start and end angles to correct for
duplicated axes.
|
hiveplot/hiveplot.py
|
def adjust_angles(self, start_node, start_angle, end_node, end_angle):
"""
This function adjusts the start and end angles to correct for
duplicated axes.
"""
start_group = self.find_node_group_membership(start_node)
end_group = self.find_node_group_membership(end_node)
if start_group == 0 and end_group == len(self.nodes.keys())-1:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle -
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle +
self.minor_angle)
elif start_group == len(self.nodes.keys())-1 and end_group == 0:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle +
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle -
self.minor_angle)
elif start_group < end_group:
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle -
self.minor_angle)
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle +
self.minor_angle)
elif end_group < start_group:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle -
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle +
self.minor_angle)
return start_angle, end_angle
|
def adjust_angles(self, start_node, start_angle, end_node, end_angle):
"""
This function adjusts the start and end angles to correct for
duplicated axes.
"""
start_group = self.find_node_group_membership(start_node)
end_group = self.find_node_group_membership(end_node)
if start_group == 0 and end_group == len(self.nodes.keys())-1:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle -
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle +
self.minor_angle)
elif start_group == len(self.nodes.keys())-1 and end_group == 0:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle +
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle -
self.minor_angle)
elif start_group < end_group:
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle -
self.minor_angle)
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle +
self.minor_angle)
elif end_group < start_group:
if self.has_edge_within_group(start_group):
start_angle = correct_negative_angle(start_angle -
self.minor_angle)
if self.has_edge_within_group(end_group):
end_angle = correct_negative_angle(end_angle +
self.minor_angle)
return start_angle, end_angle
|
[
"This",
"function",
"adjusts",
"the",
"start",
"and",
"end",
"angles",
"to",
"correct",
"for",
"duplicated",
"axes",
"."
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L296-L336
|
[
"def",
"adjust_angles",
"(",
"self",
",",
"start_node",
",",
"start_angle",
",",
"end_node",
",",
"end_angle",
")",
":",
"start_group",
"=",
"self",
".",
"find_node_group_membership",
"(",
"start_node",
")",
"end_group",
"=",
"self",
".",
"find_node_group_membership",
"(",
"end_node",
")",
"if",
"start_group",
"==",
"0",
"and",
"end_group",
"==",
"len",
"(",
"self",
".",
"nodes",
".",
"keys",
"(",
")",
")",
"-",
"1",
":",
"if",
"self",
".",
"has_edge_within_group",
"(",
"start_group",
")",
":",
"start_angle",
"=",
"correct_negative_angle",
"(",
"start_angle",
"-",
"self",
".",
"minor_angle",
")",
"if",
"self",
".",
"has_edge_within_group",
"(",
"end_group",
")",
":",
"end_angle",
"=",
"correct_negative_angle",
"(",
"end_angle",
"+",
"self",
".",
"minor_angle",
")",
"elif",
"start_group",
"==",
"len",
"(",
"self",
".",
"nodes",
".",
"keys",
"(",
")",
")",
"-",
"1",
"and",
"end_group",
"==",
"0",
":",
"if",
"self",
".",
"has_edge_within_group",
"(",
"start_group",
")",
":",
"start_angle",
"=",
"correct_negative_angle",
"(",
"start_angle",
"+",
"self",
".",
"minor_angle",
")",
"if",
"self",
".",
"has_edge_within_group",
"(",
"end_group",
")",
":",
"end_angle",
"=",
"correct_negative_angle",
"(",
"end_angle",
"-",
"self",
".",
"minor_angle",
")",
"elif",
"start_group",
"<",
"end_group",
":",
"if",
"self",
".",
"has_edge_within_group",
"(",
"end_group",
")",
":",
"end_angle",
"=",
"correct_negative_angle",
"(",
"end_angle",
"-",
"self",
".",
"minor_angle",
")",
"if",
"self",
".",
"has_edge_within_group",
"(",
"start_group",
")",
":",
"start_angle",
"=",
"correct_negative_angle",
"(",
"start_angle",
"+",
"self",
".",
"minor_angle",
")",
"elif",
"end_group",
"<",
"start_group",
":",
"if",
"self",
".",
"has_edge_within_group",
"(",
"start_group",
")",
":",
"start_angle",
"=",
"correct_negative_angle",
"(",
"start_angle",
"-",
"self",
".",
"minor_angle",
")",
"if",
"self",
".",
"has_edge_within_group",
"(",
"end_group",
")",
":",
"end_angle",
"=",
"correct_negative_angle",
"(",
"end_angle",
"+",
"self",
".",
"minor_angle",
")",
"return",
"start_angle",
",",
"end_angle"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
HivePlot.correct_angles
|
This function corrects for the following problems in the edges:
|
hiveplot/hiveplot.py
|
def correct_angles(self, start_angle, end_angle):
"""
This function corrects for the following problems in the edges:
"""
# Edges going the anti-clockwise direction involves angle = 0.
if start_angle == 0 and (end_angle - start_angle > np.pi):
start_angle = np.pi * 2
if end_angle == 0 and (end_angle - start_angle < -np.pi):
end_angle = np.pi * 2
# Case when start_angle == end_angle:
if start_angle == end_angle:
start_angle = start_angle - self.minor_angle
end_angle = end_angle + self.minor_angle
return start_angle, end_angle
|
def correct_angles(self, start_angle, end_angle):
"""
This function corrects for the following problems in the edges:
"""
# Edges going the anti-clockwise direction involves angle = 0.
if start_angle == 0 and (end_angle - start_angle > np.pi):
start_angle = np.pi * 2
if end_angle == 0 and (end_angle - start_angle < -np.pi):
end_angle = np.pi * 2
# Case when start_angle == end_angle:
if start_angle == end_angle:
start_angle = start_angle - self.minor_angle
end_angle = end_angle + self.minor_angle
return start_angle, end_angle
|
[
"This",
"function",
"corrects",
"for",
"the",
"following",
"problems",
"in",
"the",
"edges",
":"
] |
ericmjl/hiveplot
|
python
|
https://github.com/ericmjl/hiveplot/blob/f465a7118b7f005c83ab054d400deb02bd9f7410/hiveplot/hiveplot.py#L338-L353
|
[
"def",
"correct_angles",
"(",
"self",
",",
"start_angle",
",",
"end_angle",
")",
":",
"# Edges going the anti-clockwise direction involves angle = 0.",
"if",
"start_angle",
"==",
"0",
"and",
"(",
"end_angle",
"-",
"start_angle",
">",
"np",
".",
"pi",
")",
":",
"start_angle",
"=",
"np",
".",
"pi",
"*",
"2",
"if",
"end_angle",
"==",
"0",
"and",
"(",
"end_angle",
"-",
"start_angle",
"<",
"-",
"np",
".",
"pi",
")",
":",
"end_angle",
"=",
"np",
".",
"pi",
"*",
"2",
"# Case when start_angle == end_angle:",
"if",
"start_angle",
"==",
"end_angle",
":",
"start_angle",
"=",
"start_angle",
"-",
"self",
".",
"minor_angle",
"end_angle",
"=",
"end_angle",
"+",
"self",
".",
"minor_angle",
"return",
"start_angle",
",",
"end_angle"
] |
f465a7118b7f005c83ab054d400deb02bd9f7410
|
valid
|
Type.mods_genre
|
Guesses an appropriate MODS XML genre type.
|
publications/models/type.py
|
def mods_genre(self):
"""
Guesses an appropriate MODS XML genre type.
"""
type2genre = {
'conference': 'conference publication',
'book chapter': 'bibliography',
'unpublished': 'article'
}
tp = str(self.type).lower()
return type2genre.get(tp, tp)
|
def mods_genre(self):
"""
Guesses an appropriate MODS XML genre type.
"""
type2genre = {
'conference': 'conference publication',
'book chapter': 'bibliography',
'unpublished': 'article'
}
tp = str(self.type).lower()
return type2genre.get(tp, tp)
|
[
"Guesses",
"an",
"appropriate",
"MODS",
"XML",
"genre",
"type",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/models/type.py#L66-L77
|
[
"def",
"mods_genre",
"(",
"self",
")",
":",
"type2genre",
"=",
"{",
"'conference'",
":",
"'conference publication'",
",",
"'book chapter'",
":",
"'bibliography'",
",",
"'unpublished'",
":",
"'article'",
"}",
"tp",
"=",
"str",
"(",
"self",
".",
"type",
")",
".",
"lower",
"(",
")",
"return",
"type2genre",
".",
"get",
"(",
"tp",
",",
"tp",
")"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
valid
|
Publication._produce_author_lists
|
Parse authors string to create lists of authors.
|
publications/models/publication.py
|
def _produce_author_lists(self):
"""
Parse authors string to create lists of authors.
"""
# post-process author names
self.authors = self.authors.replace(', and ', ', ')
self.authors = self.authors.replace(',and ', ', ')
self.authors = self.authors.replace(' and ', ', ')
self.authors = self.authors.replace(';', ',')
# list of authors
self.authors_list = [author.strip() for author in self.authors.split(',')]
# simplified representation of author names
self.authors_list_simple = []
# author names represented as a tuple of given and family name
self.authors_list_split = []
# tests if title already ends with a punctuation mark
self.title_ends_with_punct = self.title[-1] in ['.', '!', '?'] \
if len(self.title) > 0 else False
suffixes = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', "Jr.", "Sr."]
prefixes = ['Dr.']
prepositions = ['van', 'von', 'der', 'de', 'den']
# further post-process author names
for i, author in enumerate(self.authors_list):
if author == '':
continue
names = author.split(' ')
# check if last string contains initials
if (len(names[-1]) <= 3) \
and names[-1] not in suffixes \
and all(c in ascii_uppercase for c in names[-1]):
# turn "Gauss CF" into "C. F. Gauss"
names = [c + '.' for c in names[-1]] + names[:-1]
# number of suffixes
num_suffixes = 0
for name in names[::-1]:
if name in suffixes:
num_suffixes += 1
else:
break
# abbreviate names
for j, name in enumerate(names[:-1 - num_suffixes]):
# don't try to abbreviate these
if j == 0 and name in prefixes:
continue
if j > 0 and name in prepositions:
continue
if (len(name) > 2) or (len(name) and (name[-1] != '.')):
k = name.find('-')
if 0 < k + 1 < len(name):
# take care of dash
names[j] = name[0] + '.-' + name[k + 1] + '.'
else:
names[j] = name[0] + '.'
if len(names):
self.authors_list[i] = ' '.join(names)
# create simplified/normalized representation of author name
if len(names) > 1:
for name in names[0].split('-'):
name_simple = self.simplify_name(' '.join([name, names[-1]]))
self.authors_list_simple.append(name_simple)
else:
self.authors_list_simple.append(self.simplify_name(names[0]))
# number of prepositions
num_prepositions = 0
for name in names:
if name in prepositions:
num_prepositions += 1
# splitting point
sp = 1 + num_suffixes + num_prepositions
self.authors_list_split.append(
(' '.join(names[:-sp]), ' '.join(names[-sp:])))
# list of authors in BibTex format
self.authors_bibtex = ' and '.join(self.authors_list)
# overwrite authors string
if len(self.authors_list) > 2:
self.authors = ', and '.join([
', '.join(self.authors_list[:-1]),
self.authors_list[-1]])
elif len(self.authors_list) > 1:
self.authors = ' and '.join(self.authors_list)
else:
self.authors = self.authors_list[0]
|
def _produce_author_lists(self):
"""
Parse authors string to create lists of authors.
"""
# post-process author names
self.authors = self.authors.replace(', and ', ', ')
self.authors = self.authors.replace(',and ', ', ')
self.authors = self.authors.replace(' and ', ', ')
self.authors = self.authors.replace(';', ',')
# list of authors
self.authors_list = [author.strip() for author in self.authors.split(',')]
# simplified representation of author names
self.authors_list_simple = []
# author names represented as a tuple of given and family name
self.authors_list_split = []
# tests if title already ends with a punctuation mark
self.title_ends_with_punct = self.title[-1] in ['.', '!', '?'] \
if len(self.title) > 0 else False
suffixes = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', "Jr.", "Sr."]
prefixes = ['Dr.']
prepositions = ['van', 'von', 'der', 'de', 'den']
# further post-process author names
for i, author in enumerate(self.authors_list):
if author == '':
continue
names = author.split(' ')
# check if last string contains initials
if (len(names[-1]) <= 3) \
and names[-1] not in suffixes \
and all(c in ascii_uppercase for c in names[-1]):
# turn "Gauss CF" into "C. F. Gauss"
names = [c + '.' for c in names[-1]] + names[:-1]
# number of suffixes
num_suffixes = 0
for name in names[::-1]:
if name in suffixes:
num_suffixes += 1
else:
break
# abbreviate names
for j, name in enumerate(names[:-1 - num_suffixes]):
# don't try to abbreviate these
if j == 0 and name in prefixes:
continue
if j > 0 and name in prepositions:
continue
if (len(name) > 2) or (len(name) and (name[-1] != '.')):
k = name.find('-')
if 0 < k + 1 < len(name):
# take care of dash
names[j] = name[0] + '.-' + name[k + 1] + '.'
else:
names[j] = name[0] + '.'
if len(names):
self.authors_list[i] = ' '.join(names)
# create simplified/normalized representation of author name
if len(names) > 1:
for name in names[0].split('-'):
name_simple = self.simplify_name(' '.join([name, names[-1]]))
self.authors_list_simple.append(name_simple)
else:
self.authors_list_simple.append(self.simplify_name(names[0]))
# number of prepositions
num_prepositions = 0
for name in names:
if name in prepositions:
num_prepositions += 1
# splitting point
sp = 1 + num_suffixes + num_prepositions
self.authors_list_split.append(
(' '.join(names[:-sp]), ' '.join(names[-sp:])))
# list of authors in BibTex format
self.authors_bibtex = ' and '.join(self.authors_list)
# overwrite authors string
if len(self.authors_list) > 2:
self.authors = ', and '.join([
', '.join(self.authors_list[:-1]),
self.authors_list[-1]])
elif len(self.authors_list) > 1:
self.authors = ' and '.join(self.authors_list)
else:
self.authors = self.authors_list[0]
|
[
"Parse",
"authors",
"string",
"to",
"create",
"lists",
"of",
"authors",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/models/publication.py#L108-L207
|
[
"def",
"_produce_author_lists",
"(",
"self",
")",
":",
"# post-process author names",
"self",
".",
"authors",
"=",
"self",
".",
"authors",
".",
"replace",
"(",
"', and '",
",",
"', '",
")",
"self",
".",
"authors",
"=",
"self",
".",
"authors",
".",
"replace",
"(",
"',and '",
",",
"', '",
")",
"self",
".",
"authors",
"=",
"self",
".",
"authors",
".",
"replace",
"(",
"' and '",
",",
"', '",
")",
"self",
".",
"authors",
"=",
"self",
".",
"authors",
".",
"replace",
"(",
"';'",
",",
"','",
")",
"# list of authors",
"self",
".",
"authors_list",
"=",
"[",
"author",
".",
"strip",
"(",
")",
"for",
"author",
"in",
"self",
".",
"authors",
".",
"split",
"(",
"','",
")",
"]",
"# simplified representation of author names",
"self",
".",
"authors_list_simple",
"=",
"[",
"]",
"# author names represented as a tuple of given and family name",
"self",
".",
"authors_list_split",
"=",
"[",
"]",
"# tests if title already ends with a punctuation mark",
"self",
".",
"title_ends_with_punct",
"=",
"self",
".",
"title",
"[",
"-",
"1",
"]",
"in",
"[",
"'.'",
",",
"'!'",
",",
"'?'",
"]",
"if",
"len",
"(",
"self",
".",
"title",
")",
">",
"0",
"else",
"False",
"suffixes",
"=",
"[",
"'I'",
",",
"'II'",
",",
"'III'",
",",
"'IV'",
",",
"'V'",
",",
"'VI'",
",",
"'VII'",
",",
"'VIII'",
",",
"\"Jr.\"",
",",
"\"Sr.\"",
"]",
"prefixes",
"=",
"[",
"'Dr.'",
"]",
"prepositions",
"=",
"[",
"'van'",
",",
"'von'",
",",
"'der'",
",",
"'de'",
",",
"'den'",
"]",
"# further post-process author names",
"for",
"i",
",",
"author",
"in",
"enumerate",
"(",
"self",
".",
"authors_list",
")",
":",
"if",
"author",
"==",
"''",
":",
"continue",
"names",
"=",
"author",
".",
"split",
"(",
"' '",
")",
"# check if last string contains initials",
"if",
"(",
"len",
"(",
"names",
"[",
"-",
"1",
"]",
")",
"<=",
"3",
")",
"and",
"names",
"[",
"-",
"1",
"]",
"not",
"in",
"suffixes",
"and",
"all",
"(",
"c",
"in",
"ascii_uppercase",
"for",
"c",
"in",
"names",
"[",
"-",
"1",
"]",
")",
":",
"# turn \"Gauss CF\" into \"C. F. Gauss\"",
"names",
"=",
"[",
"c",
"+",
"'.'",
"for",
"c",
"in",
"names",
"[",
"-",
"1",
"]",
"]",
"+",
"names",
"[",
":",
"-",
"1",
"]",
"# number of suffixes",
"num_suffixes",
"=",
"0",
"for",
"name",
"in",
"names",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"name",
"in",
"suffixes",
":",
"num_suffixes",
"+=",
"1",
"else",
":",
"break",
"# abbreviate names",
"for",
"j",
",",
"name",
"in",
"enumerate",
"(",
"names",
"[",
":",
"-",
"1",
"-",
"num_suffixes",
"]",
")",
":",
"# don't try to abbreviate these",
"if",
"j",
"==",
"0",
"and",
"name",
"in",
"prefixes",
":",
"continue",
"if",
"j",
">",
"0",
"and",
"name",
"in",
"prepositions",
":",
"continue",
"if",
"(",
"len",
"(",
"name",
")",
">",
"2",
")",
"or",
"(",
"len",
"(",
"name",
")",
"and",
"(",
"name",
"[",
"-",
"1",
"]",
"!=",
"'.'",
")",
")",
":",
"k",
"=",
"name",
".",
"find",
"(",
"'-'",
")",
"if",
"0",
"<",
"k",
"+",
"1",
"<",
"len",
"(",
"name",
")",
":",
"# take care of dash",
"names",
"[",
"j",
"]",
"=",
"name",
"[",
"0",
"]",
"+",
"'.-'",
"+",
"name",
"[",
"k",
"+",
"1",
"]",
"+",
"'.'",
"else",
":",
"names",
"[",
"j",
"]",
"=",
"name",
"[",
"0",
"]",
"+",
"'.'",
"if",
"len",
"(",
"names",
")",
":",
"self",
".",
"authors_list",
"[",
"i",
"]",
"=",
"' '",
".",
"join",
"(",
"names",
")",
"# create simplified/normalized representation of author name",
"if",
"len",
"(",
"names",
")",
">",
"1",
":",
"for",
"name",
"in",
"names",
"[",
"0",
"]",
".",
"split",
"(",
"'-'",
")",
":",
"name_simple",
"=",
"self",
".",
"simplify_name",
"(",
"' '",
".",
"join",
"(",
"[",
"name",
",",
"names",
"[",
"-",
"1",
"]",
"]",
")",
")",
"self",
".",
"authors_list_simple",
".",
"append",
"(",
"name_simple",
")",
"else",
":",
"self",
".",
"authors_list_simple",
".",
"append",
"(",
"self",
".",
"simplify_name",
"(",
"names",
"[",
"0",
"]",
")",
")",
"# number of prepositions",
"num_prepositions",
"=",
"0",
"for",
"name",
"in",
"names",
":",
"if",
"name",
"in",
"prepositions",
":",
"num_prepositions",
"+=",
"1",
"# splitting point",
"sp",
"=",
"1",
"+",
"num_suffixes",
"+",
"num_prepositions",
"self",
".",
"authors_list_split",
".",
"append",
"(",
"(",
"' '",
".",
"join",
"(",
"names",
"[",
":",
"-",
"sp",
"]",
")",
",",
"' '",
".",
"join",
"(",
"names",
"[",
"-",
"sp",
":",
"]",
")",
")",
")",
"# list of authors in BibTex format",
"self",
".",
"authors_bibtex",
"=",
"' and '",
".",
"join",
"(",
"self",
".",
"authors_list",
")",
"# overwrite authors string",
"if",
"len",
"(",
"self",
".",
"authors_list",
")",
">",
"2",
":",
"self",
".",
"authors",
"=",
"', and '",
".",
"join",
"(",
"[",
"', '",
".",
"join",
"(",
"self",
".",
"authors_list",
"[",
":",
"-",
"1",
"]",
")",
",",
"self",
".",
"authors_list",
"[",
"-",
"1",
"]",
"]",
")",
"elif",
"len",
"(",
"self",
".",
"authors_list",
")",
">",
"1",
":",
"self",
".",
"authors",
"=",
"' and '",
".",
"join",
"(",
"self",
".",
"authors_list",
")",
"else",
":",
"self",
".",
"authors",
"=",
"self",
".",
"authors_list",
"[",
"0",
"]"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
valid
|
get_publications
|
Get all publications.
|
publications/templatetags/publication_extras.py
|
def get_publications(context, template='publications/publications.html'):
"""
Get all publications.
"""
types = Type.objects.filter(hidden=False)
publications = Publication.objects.select_related()
publications = publications.filter(external=False, type__in=types)
publications = publications.order_by('-year', '-month', '-id')
if not publications:
return ''
# load custom links and files
populate(publications)
return render_template(template, context['request'], {'publications': publications})
|
def get_publications(context, template='publications/publications.html'):
"""
Get all publications.
"""
types = Type.objects.filter(hidden=False)
publications = Publication.objects.select_related()
publications = publications.filter(external=False, type__in=types)
publications = publications.order_by('-year', '-month', '-id')
if not publications:
return ''
# load custom links and files
populate(publications)
return render_template(template, context['request'], {'publications': publications})
|
[
"Get",
"all",
"publications",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/templatetags/publication_extras.py#L31-L47
|
[
"def",
"get_publications",
"(",
"context",
",",
"template",
"=",
"'publications/publications.html'",
")",
":",
"types",
"=",
"Type",
".",
"objects",
".",
"filter",
"(",
"hidden",
"=",
"False",
")",
"publications",
"=",
"Publication",
".",
"objects",
".",
"select_related",
"(",
")",
"publications",
"=",
"publications",
".",
"filter",
"(",
"external",
"=",
"False",
",",
"type__in",
"=",
"types",
")",
"publications",
"=",
"publications",
".",
"order_by",
"(",
"'-year'",
",",
"'-month'",
",",
"'-id'",
")",
"if",
"not",
"publications",
":",
"return",
"''",
"# load custom links and files",
"populate",
"(",
"publications",
")",
"return",
"render_template",
"(",
"template",
",",
"context",
"[",
"'request'",
"]",
",",
"{",
"'publications'",
":",
"publications",
"}",
")"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
valid
|
get_publication
|
Get a single publication.
|
publications/templatetags/publication_extras.py
|
def get_publication(context, id):
"""
Get a single publication.
"""
pbl = Publication.objects.filter(pk=int(id))
if len(pbl) < 1:
return ''
pbl[0].links = pbl[0].customlink_set.all()
pbl[0].files = pbl[0].customfile_set.all()
return render_template(
'publications/publication.html', context['request'], {'publication': pbl[0]})
|
def get_publication(context, id):
"""
Get a single publication.
"""
pbl = Publication.objects.filter(pk=int(id))
if len(pbl) < 1:
return ''
pbl[0].links = pbl[0].customlink_set.all()
pbl[0].files = pbl[0].customfile_set.all()
return render_template(
'publications/publication.html', context['request'], {'publication': pbl[0]})
|
[
"Get",
"a",
"single",
"publication",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/templatetags/publication_extras.py#L50-L64
|
[
"def",
"get_publication",
"(",
"context",
",",
"id",
")",
":",
"pbl",
"=",
"Publication",
".",
"objects",
".",
"filter",
"(",
"pk",
"=",
"int",
"(",
"id",
")",
")",
"if",
"len",
"(",
"pbl",
")",
"<",
"1",
":",
"return",
"''",
"pbl",
"[",
"0",
"]",
".",
"links",
"=",
"pbl",
"[",
"0",
"]",
".",
"customlink_set",
".",
"all",
"(",
")",
"pbl",
"[",
"0",
"]",
".",
"files",
"=",
"pbl",
"[",
"0",
"]",
".",
"customfile_set",
".",
"all",
"(",
")",
"return",
"render_template",
"(",
"'publications/publication.html'",
",",
"context",
"[",
"'request'",
"]",
",",
"{",
"'publication'",
":",
"pbl",
"[",
"0",
"]",
"}",
")"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
valid
|
get_publication_list
|
Get a publication list.
|
publications/templatetags/publication_extras.py
|
def get_publication_list(context, list, template='publications/publications.html'):
"""
Get a publication list.
"""
list = List.objects.filter(list__iexact=list)
if not list:
return ''
list = list[0]
publications = list.publication_set.all()
publications = publications.order_by('-year', '-month', '-id')
if not publications:
return ''
# load custom links and files
populate(publications)
return render_template(
template, context['request'], {'list': list, 'publications': publications})
|
def get_publication_list(context, list, template='publications/publications.html'):
"""
Get a publication list.
"""
list = List.objects.filter(list__iexact=list)
if not list:
return ''
list = list[0]
publications = list.publication_set.all()
publications = publications.order_by('-year', '-month', '-id')
if not publications:
return ''
# load custom links and files
populate(publications)
return render_template(
template, context['request'], {'list': list, 'publications': publications})
|
[
"Get",
"a",
"publication",
"list",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/templatetags/publication_extras.py#L67-L88
|
[
"def",
"get_publication_list",
"(",
"context",
",",
"list",
",",
"template",
"=",
"'publications/publications.html'",
")",
":",
"list",
"=",
"List",
".",
"objects",
".",
"filter",
"(",
"list__iexact",
"=",
"list",
")",
"if",
"not",
"list",
":",
"return",
"''",
"list",
"=",
"list",
"[",
"0",
"]",
"publications",
"=",
"list",
".",
"publication_set",
".",
"all",
"(",
")",
"publications",
"=",
"publications",
".",
"order_by",
"(",
"'-year'",
",",
"'-month'",
",",
"'-id'",
")",
"if",
"not",
"publications",
":",
"return",
"''",
"# load custom links and files",
"populate",
"(",
"publications",
")",
"return",
"render_template",
"(",
"template",
",",
"context",
"[",
"'request'",
"]",
",",
"{",
"'list'",
":",
"list",
",",
"'publications'",
":",
"publications",
"}",
")"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
valid
|
tex_parse
|
Renders some basic TeX math to HTML.
|
publications/templatetags/publication_extras.py
|
def tex_parse(string):
"""
Renders some basic TeX math to HTML.
"""
string = string.replace('{', '').replace('}', '')
def tex_replace(match):
return \
sub(r'\^(\w)', r'<sup>\1</sup>',
sub(r'\^\{(.*?)\}', r'<sup>\1</sup>',
sub(r'\_(\w)', r'<sub>\1</sub>',
sub(r'\_\{(.*?)\}', r'<sub>\1</sub>',
sub(r'\\(' + GREEK_LETTERS + ')', r'&\1;', match.group(1))))))
return mark_safe(sub(r'\$([^\$]*)\$', tex_replace, escape(string)))
|
def tex_parse(string):
"""
Renders some basic TeX math to HTML.
"""
string = string.replace('{', '').replace('}', '')
def tex_replace(match):
return \
sub(r'\^(\w)', r'<sup>\1</sup>',
sub(r'\^\{(.*?)\}', r'<sup>\1</sup>',
sub(r'\_(\w)', r'<sub>\1</sub>',
sub(r'\_\{(.*?)\}', r'<sub>\1</sub>',
sub(r'\\(' + GREEK_LETTERS + ')', r'&\1;', match.group(1))))))
return mark_safe(sub(r'\$([^\$]*)\$', tex_replace, escape(string)))
|
[
"Renders",
"some",
"basic",
"TeX",
"math",
"to",
"HTML",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/templatetags/publication_extras.py#L91-L104
|
[
"def",
"tex_parse",
"(",
"string",
")",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"'{'",
",",
"''",
")",
".",
"replace",
"(",
"'}'",
",",
"''",
")",
"def",
"tex_replace",
"(",
"match",
")",
":",
"return",
"sub",
"(",
"r'\\^(\\w)'",
",",
"r'<sup>\\1</sup>'",
",",
"sub",
"(",
"r'\\^\\{(.*?)\\}'",
",",
"r'<sup>\\1</sup>'",
",",
"sub",
"(",
"r'\\_(\\w)'",
",",
"r'<sub>\\1</sub>'",
",",
"sub",
"(",
"r'\\_\\{(.*?)\\}'",
",",
"r'<sub>\\1</sub>'",
",",
"sub",
"(",
"r'\\\\('",
"+",
"GREEK_LETTERS",
"+",
"')'",
",",
"r'&\\1;'",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
")",
")",
")",
"return",
"mark_safe",
"(",
"sub",
"(",
"r'\\$([^\\$]*)\\$'",
",",
"tex_replace",
",",
"escape",
"(",
"string",
")",
")",
")"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
valid
|
parse
|
Takes a string in BibTex format and returns a list of BibTex entries, where
each entry is a dictionary containing the entries' key-value pairs.
@type string: string
@param string: bibliography in BibTex format
@rtype: list
@return: a list of dictionaries representing a bibliography
|
publications/bibtex.py
|
def parse(string):
"""
Takes a string in BibTex format and returns a list of BibTex entries, where
each entry is a dictionary containing the entries' key-value pairs.
@type string: string
@param string: bibliography in BibTex format
@rtype: list
@return: a list of dictionaries representing a bibliography
"""
# bibliography
bib = []
# make sure we are dealing with unicode strings
if not isinstance(string, six.text_type):
string = string.decode('utf-8')
# replace special characters
for key, value in special_chars:
string = string.replace(key, value)
string = re.sub(r'\\[cuHvs]{?([a-zA-Z])}?', r'\1', string)
# split into BibTex entries
entries = re.findall(
r'(?u)@(\w+)[ \t]?{[ \t]*([^,\s]*)[ \t]*,?\s*((?:[^=,\s]+\s*\=\s*(?:"[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,}]*),?\s*?)+)\s*}',
string)
for entry in entries:
# parse entry
pairs = re.findall(r'(?u)([^=,\s]+)\s*\=\s*("[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,]*)', entry[2])
# add to bibliography
bib.append({'type': entry[0].lower(), 'key': entry[1]})
for key, value in pairs:
# post-process key and value
key = key.lower()
if value and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if value and value[0] == '{' and value[-1] == '}':
value = value[1:-1]
if key not in ['booktitle', 'title']:
value = value.replace('}', '').replace('{', '')
else:
if value.startswith('{') and value.endswith('}'):
value = value[1:]
value = value[:-1]
value = value.strip()
value = re.sub(r'\s+', ' ', value)
# store pair in bibliography
bib[-1][key] = value
return bib
|
def parse(string):
"""
Takes a string in BibTex format and returns a list of BibTex entries, where
each entry is a dictionary containing the entries' key-value pairs.
@type string: string
@param string: bibliography in BibTex format
@rtype: list
@return: a list of dictionaries representing a bibliography
"""
# bibliography
bib = []
# make sure we are dealing with unicode strings
if not isinstance(string, six.text_type):
string = string.decode('utf-8')
# replace special characters
for key, value in special_chars:
string = string.replace(key, value)
string = re.sub(r'\\[cuHvs]{?([a-zA-Z])}?', r'\1', string)
# split into BibTex entries
entries = re.findall(
r'(?u)@(\w+)[ \t]?{[ \t]*([^,\s]*)[ \t]*,?\s*((?:[^=,\s]+\s*\=\s*(?:"[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,}]*),?\s*?)+)\s*}',
string)
for entry in entries:
# parse entry
pairs = re.findall(r'(?u)([^=,\s]+)\s*\=\s*("[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,]*)', entry[2])
# add to bibliography
bib.append({'type': entry[0].lower(), 'key': entry[1]})
for key, value in pairs:
# post-process key and value
key = key.lower()
if value and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if value and value[0] == '{' and value[-1] == '}':
value = value[1:-1]
if key not in ['booktitle', 'title']:
value = value.replace('}', '').replace('{', '')
else:
if value.startswith('{') and value.endswith('}'):
value = value[1:]
value = value[:-1]
value = value.strip()
value = re.sub(r'\s+', ' ', value)
# store pair in bibliography
bib[-1][key] = value
return bib
|
[
"Takes",
"a",
"string",
"in",
"BibTex",
"format",
"and",
"returns",
"a",
"list",
"of",
"BibTex",
"entries",
"where",
"each",
"entry",
"is",
"a",
"dictionary",
"containing",
"the",
"entries",
"key",
"-",
"value",
"pairs",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/bibtex.py#L46-L101
|
[
"def",
"parse",
"(",
"string",
")",
":",
"# bibliography",
"bib",
"=",
"[",
"]",
"# make sure we are dealing with unicode strings",
"if",
"not",
"isinstance",
"(",
"string",
",",
"six",
".",
"text_type",
")",
":",
"string",
"=",
"string",
".",
"decode",
"(",
"'utf-8'",
")",
"# replace special characters",
"for",
"key",
",",
"value",
"in",
"special_chars",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"key",
",",
"value",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r'\\\\[cuHvs]{?([a-zA-Z])}?'",
",",
"r'\\1'",
",",
"string",
")",
"# split into BibTex entries",
"entries",
"=",
"re",
".",
"findall",
"(",
"r'(?u)@(\\w+)[ \\t]?{[ \\t]*([^,\\s]*)[ \\t]*,?\\s*((?:[^=,\\s]+\\s*\\=\\s*(?:\"[^\"]*\"|{(?:[^{}]*|{[^{}]*})*}|[^,}]*),?\\s*?)+)\\s*}'",
",",
"string",
")",
"for",
"entry",
"in",
"entries",
":",
"# parse entry",
"pairs",
"=",
"re",
".",
"findall",
"(",
"r'(?u)([^=,\\s]+)\\s*\\=\\s*(\"[^\"]*\"|{(?:[^{}]*|{[^{}]*})*}|[^,]*)'",
",",
"entry",
"[",
"2",
"]",
")",
"# add to bibliography",
"bib",
".",
"append",
"(",
"{",
"'type'",
":",
"entry",
"[",
"0",
"]",
".",
"lower",
"(",
")",
",",
"'key'",
":",
"entry",
"[",
"1",
"]",
"}",
")",
"for",
"key",
",",
"value",
"in",
"pairs",
":",
"# post-process key and value",
"key",
"=",
"key",
".",
"lower",
"(",
")",
"if",
"value",
"and",
"value",
"[",
"0",
"]",
"==",
"'\"'",
"and",
"value",
"[",
"-",
"1",
"]",
"==",
"'\"'",
":",
"value",
"=",
"value",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"value",
"and",
"value",
"[",
"0",
"]",
"==",
"'{'",
"and",
"value",
"[",
"-",
"1",
"]",
"==",
"'}'",
":",
"value",
"=",
"value",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"key",
"not",
"in",
"[",
"'booktitle'",
",",
"'title'",
"]",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'}'",
",",
"''",
")",
".",
"replace",
"(",
"'{'",
",",
"''",
")",
"else",
":",
"if",
"value",
".",
"startswith",
"(",
"'{'",
")",
"and",
"value",
".",
"endswith",
"(",
"'}'",
")",
":",
"value",
"=",
"value",
"[",
"1",
":",
"]",
"value",
"=",
"value",
"[",
":",
"-",
"1",
"]",
"value",
"=",
"value",
".",
"strip",
"(",
")",
"value",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"' '",
",",
"value",
")",
"# store pair in bibliography",
"bib",
"[",
"-",
"1",
"]",
"[",
"key",
"]",
"=",
"value",
"return",
"bib"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
valid
|
OrderedModel.swap
|
Swap the positions of this object with a reference object.
|
publications/models/orderedmodel.py
|
def swap(self, qs):
"""
Swap the positions of this object with a reference object.
"""
try:
replacement = qs[0]
except IndexError:
# already first/last
return
if not self._valid_ordering_reference(replacement):
raise ValueError(
"%r can only be swapped with instances of %r which %s equals %r." % (
self, self.__class__, self.order_with_respect_to,
self._get_order_with_respect_to()
)
)
self.order, replacement.order = replacement.order, self.order
self.save()
replacement.save()
|
def swap(self, qs):
"""
Swap the positions of this object with a reference object.
"""
try:
replacement = qs[0]
except IndexError:
# already first/last
return
if not self._valid_ordering_reference(replacement):
raise ValueError(
"%r can only be swapped with instances of %r which %s equals %r." % (
self, self.__class__, self.order_with_respect_to,
self._get_order_with_respect_to()
)
)
self.order, replacement.order = replacement.order, self.order
self.save()
replacement.save()
|
[
"Swap",
"the",
"positions",
"of",
"this",
"object",
"with",
"a",
"reference",
"object",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/models/orderedmodel.py#L116-L134
|
[
"def",
"swap",
"(",
"self",
",",
"qs",
")",
":",
"try",
":",
"replacement",
"=",
"qs",
"[",
"0",
"]",
"except",
"IndexError",
":",
"# already first/last",
"return",
"if",
"not",
"self",
".",
"_valid_ordering_reference",
"(",
"replacement",
")",
":",
"raise",
"ValueError",
"(",
"\"%r can only be swapped with instances of %r which %s equals %r.\"",
"%",
"(",
"self",
",",
"self",
".",
"__class__",
",",
"self",
".",
"order_with_respect_to",
",",
"self",
".",
"_get_order_with_respect_to",
"(",
")",
")",
")",
"self",
".",
"order",
",",
"replacement",
".",
"order",
"=",
"replacement",
".",
"order",
",",
"self",
".",
"order",
"self",
".",
"save",
"(",
")",
"replacement",
".",
"save",
"(",
")"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
valid
|
OrderedModel.up
|
Move this object up one position.
|
publications/models/orderedmodel.py
|
def up(self):
"""
Move this object up one position.
"""
self.swap(self.get_ordering_queryset().filter(order__lt=self.order).order_by('-order'))
|
def up(self):
"""
Move this object up one position.
"""
self.swap(self.get_ordering_queryset().filter(order__lt=self.order).order_by('-order'))
|
[
"Move",
"this",
"object",
"up",
"one",
"position",
"."
] |
lucastheis/django-publications
|
python
|
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/models/orderedmodel.py#L136-L140
|
[
"def",
"up",
"(",
"self",
")",
":",
"self",
".",
"swap",
"(",
"self",
".",
"get_ordering_queryset",
"(",
")",
".",
"filter",
"(",
"order__lt",
"=",
"self",
".",
"order",
")",
".",
"order_by",
"(",
"'-order'",
")",
")"
] |
5a75cf88cf794937711b6850ff2acb07fe005f08
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.