partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
valid
|
SupervisorSatchel.deploy_services
|
Collects the configurations for all registered services and writes
the appropriate supervisord.conf file.
|
burlap/supervisor.py
|
def deploy_services(self, site=None):
"""
Collects the configurations for all registered services and writes
the appropriate supervisord.conf file.
"""
verbose = self.verbose
r = self.local_renderer
if not r.env.manage_configs:
return
#
# target_sites = self.genv.available_sites_by_host.get(hostname, None)
self.render_paths()
supervisor_services = []
if r.env.purge_all_confs:
r.sudo('rm -Rf /etc/supervisor/conf.d/*')
#TODO:check available_sites_by_host and remove dead?
self.write_configs(site=site)
for _site, site_data in self.iter_sites(site=site, renderer=self.render_paths):
if verbose:
print('deploy_services.site:', _site)
# Only load site configurations that are allowed for this host.
# if target_sites is not None:
# assert isinstance(target_sites, (tuple, list))
# if site not in target_sites:
# continue
for cb in self.genv._supervisor_create_service_callbacks:
if self.verbose:
print('cb:', cb)
ret = cb(site=_site)
if self.verbose:
print('ret:', ret)
if isinstance(ret, six.string_types):
supervisor_services.append(ret)
elif isinstance(ret, tuple):
assert len(ret) == 2
conf_name, conf_content = ret
if self.dryrun:
print('supervisor conf filename:', conf_name)
print(conf_content)
self.write_to_file(conf_content)
self.env.services_rendered = '\n'.join(supervisor_services)
fn = self.render_to_file(self.env.config_template)
r.put(local_path=fn, remote_path=self.env.config_path, use_sudo=True)
# We use supervisorctl to configure supervisor, but this will throw a uselessly vague
# error message is supervisor isn't running.
if not self.is_running():
self.start()
# Reload config and then add and remove as necessary (restarts programs)
r.sudo('supervisorctl update')
|
def deploy_services(self, site=None):
"""
Collects the configurations for all registered services and writes
the appropriate supervisord.conf file.
"""
verbose = self.verbose
r = self.local_renderer
if not r.env.manage_configs:
return
#
# target_sites = self.genv.available_sites_by_host.get(hostname, None)
self.render_paths()
supervisor_services = []
if r.env.purge_all_confs:
r.sudo('rm -Rf /etc/supervisor/conf.d/*')
#TODO:check available_sites_by_host and remove dead?
self.write_configs(site=site)
for _site, site_data in self.iter_sites(site=site, renderer=self.render_paths):
if verbose:
print('deploy_services.site:', _site)
# Only load site configurations that are allowed for this host.
# if target_sites is not None:
# assert isinstance(target_sites, (tuple, list))
# if site not in target_sites:
# continue
for cb in self.genv._supervisor_create_service_callbacks:
if self.verbose:
print('cb:', cb)
ret = cb(site=_site)
if self.verbose:
print('ret:', ret)
if isinstance(ret, six.string_types):
supervisor_services.append(ret)
elif isinstance(ret, tuple):
assert len(ret) == 2
conf_name, conf_content = ret
if self.dryrun:
print('supervisor conf filename:', conf_name)
print(conf_content)
self.write_to_file(conf_content)
self.env.services_rendered = '\n'.join(supervisor_services)
fn = self.render_to_file(self.env.config_template)
r.put(local_path=fn, remote_path=self.env.config_path, use_sudo=True)
# We use supervisorctl to configure supervisor, but this will throw a uselessly vague
# error message is supervisor isn't running.
if not self.is_running():
self.start()
# Reload config and then add and remove as necessary (restarts programs)
r.sudo('supervisorctl update')
|
[
"Collects",
"the",
"configurations",
"for",
"all",
"registered",
"services",
"and",
"writes",
"the",
"appropriate",
"supervisord",
".",
"conf",
"file",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/supervisor.py#L237-L297
|
[
"def",
"deploy_services",
"(",
"self",
",",
"site",
"=",
"None",
")",
":",
"verbose",
"=",
"self",
".",
"verbose",
"r",
"=",
"self",
".",
"local_renderer",
"if",
"not",
"r",
".",
"env",
".",
"manage_configs",
":",
"return",
"#",
"# target_sites = self.genv.available_sites_by_host.get(hostname, None)",
"self",
".",
"render_paths",
"(",
")",
"supervisor_services",
"=",
"[",
"]",
"if",
"r",
".",
"env",
".",
"purge_all_confs",
":",
"r",
".",
"sudo",
"(",
"'rm -Rf /etc/supervisor/conf.d/*'",
")",
"#TODO:check available_sites_by_host and remove dead?",
"self",
".",
"write_configs",
"(",
"site",
"=",
"site",
")",
"for",
"_site",
",",
"site_data",
"in",
"self",
".",
"iter_sites",
"(",
"site",
"=",
"site",
",",
"renderer",
"=",
"self",
".",
"render_paths",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'deploy_services.site:'",
",",
"_site",
")",
"# Only load site configurations that are allowed for this host.",
"# if target_sites is not None:",
"# assert isinstance(target_sites, (tuple, list))",
"# if site not in target_sites:",
"# continue",
"for",
"cb",
"in",
"self",
".",
"genv",
".",
"_supervisor_create_service_callbacks",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'cb:'",
",",
"cb",
")",
"ret",
"=",
"cb",
"(",
"site",
"=",
"_site",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'ret:'",
",",
"ret",
")",
"if",
"isinstance",
"(",
"ret",
",",
"six",
".",
"string_types",
")",
":",
"supervisor_services",
".",
"append",
"(",
"ret",
")",
"elif",
"isinstance",
"(",
"ret",
",",
"tuple",
")",
":",
"assert",
"len",
"(",
"ret",
")",
"==",
"2",
"conf_name",
",",
"conf_content",
"=",
"ret",
"if",
"self",
".",
"dryrun",
":",
"print",
"(",
"'supervisor conf filename:'",
",",
"conf_name",
")",
"print",
"(",
"conf_content",
")",
"self",
".",
"write_to_file",
"(",
"conf_content",
")",
"self",
".",
"env",
".",
"services_rendered",
"=",
"'\\n'",
".",
"join",
"(",
"supervisor_services",
")",
"fn",
"=",
"self",
".",
"render_to_file",
"(",
"self",
".",
"env",
".",
"config_template",
")",
"r",
".",
"put",
"(",
"local_path",
"=",
"fn",
",",
"remote_path",
"=",
"self",
".",
"env",
".",
"config_path",
",",
"use_sudo",
"=",
"True",
")",
"# We use supervisorctl to configure supervisor, but this will throw a uselessly vague",
"# error message is supervisor isn't running.",
"if",
"not",
"self",
".",
"is_running",
"(",
")",
":",
"self",
".",
"start",
"(",
")",
"# Reload config and then add and remove as necessary (restarts programs)",
"r",
".",
"sudo",
"(",
"'supervisorctl update'",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
GitSatchel.clone
|
Clone a remote Git repository into a new directory.
:param remote_url: URL of the remote repository to clone.
:type remote_url: str
:param path: Path of the working copy directory. Must not exist yet.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
|
burlap/git.py
|
def clone(self, remote_url, path=None, use_sudo=False, user=None):
"""
Clone a remote Git repository into a new directory.
:param remote_url: URL of the remote repository to clone.
:type remote_url: str
:param path: Path of the working copy directory. Must not exist yet.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
"""
cmd = 'git clone --quiet %s' % remote_url
if path is not None:
cmd = cmd + ' %s' % path
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd)
|
def clone(self, remote_url, path=None, use_sudo=False, user=None):
"""
Clone a remote Git repository into a new directory.
:param remote_url: URL of the remote repository to clone.
:type remote_url: str
:param path: Path of the working copy directory. Must not exist yet.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
"""
cmd = 'git clone --quiet %s' % remote_url
if path is not None:
cmd = cmd + ' %s' % path
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd)
|
[
"Clone",
"a",
"remote",
"Git",
"repository",
"into",
"a",
"new",
"directory",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/git.py#L60-L90
|
[
"def",
"clone",
"(",
"self",
",",
"remote_url",
",",
"path",
"=",
"None",
",",
"use_sudo",
"=",
"False",
",",
"user",
"=",
"None",
")",
":",
"cmd",
"=",
"'git clone --quiet %s'",
"%",
"remote_url",
"if",
"path",
"is",
"not",
"None",
":",
"cmd",
"=",
"cmd",
"+",
"' %s'",
"%",
"path",
"if",
"use_sudo",
"and",
"user",
"is",
"None",
":",
"run_as_root",
"(",
"cmd",
")",
"elif",
"use_sudo",
":",
"sudo",
"(",
"cmd",
",",
"user",
"=",
"user",
")",
"else",
":",
"run",
"(",
"cmd",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
GitSatchel.add_remote
|
Add a remote Git repository into a directory.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to fetch from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:param name: name for the remote repository
:type name: str
:param remote_url: URL of the remote repository
:type remote_url: str
:param fetch: If ``True`` execute ``git remote add -f``
:type fetch: bool
|
burlap/git.py
|
def add_remote(self, path, name, remote_url, use_sudo=False, user=None, fetch=True):
"""
Add a remote Git repository into a directory.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to fetch from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:param name: name for the remote repository
:type name: str
:param remote_url: URL of the remote repository
:type remote_url: str
:param fetch: If ``True`` execute ``git remote add -f``
:type fetch: bool
"""
if path is None:
raise ValueError("Path to the working copy is needed to add a remote")
if fetch:
cmd = 'git remote add -f %s %s' % (name, remote_url)
else:
cmd = 'git remote add %s %s' % (name, remote_url)
with cd(path):
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd)
|
def add_remote(self, path, name, remote_url, use_sudo=False, user=None, fetch=True):
"""
Add a remote Git repository into a directory.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to fetch from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:param name: name for the remote repository
:type name: str
:param remote_url: URL of the remote repository
:type remote_url: str
:param fetch: If ``True`` execute ``git remote add -f``
:type fetch: bool
"""
if path is None:
raise ValueError("Path to the working copy is needed to add a remote")
if fetch:
cmd = 'git remote add -f %s %s' % (name, remote_url)
else:
cmd = 'git remote add %s %s' % (name, remote_url)
with cd(path):
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd)
|
[
"Add",
"a",
"remote",
"Git",
"repository",
"into",
"a",
"directory",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/git.py#L93-L134
|
[
"def",
"add_remote",
"(",
"self",
",",
"path",
",",
"name",
",",
"remote_url",
",",
"use_sudo",
"=",
"False",
",",
"user",
"=",
"None",
",",
"fetch",
"=",
"True",
")",
":",
"if",
"path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Path to the working copy is needed to add a remote\"",
")",
"if",
"fetch",
":",
"cmd",
"=",
"'git remote add -f %s %s'",
"%",
"(",
"name",
",",
"remote_url",
")",
"else",
":",
"cmd",
"=",
"'git remote add %s %s'",
"%",
"(",
"name",
",",
"remote_url",
")",
"with",
"cd",
"(",
"path",
")",
":",
"if",
"use_sudo",
"and",
"user",
"is",
"None",
":",
"run_as_root",
"(",
"cmd",
")",
"elif",
"use_sudo",
":",
"sudo",
"(",
"cmd",
",",
"user",
"=",
"user",
")",
"else",
":",
"run",
"(",
"cmd",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
GitSatchel.fetch
|
Fetch changes from the default remote repository.
This will fetch new changesets, but will not update the contents of
the working tree unless yo do a merge or rebase.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to fetch from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:type remote: Fetch this remote or default remote if is None
:type remote: str
|
burlap/git.py
|
def fetch(self, path, use_sudo=False, user=None, remote=None):
"""
Fetch changes from the default remote repository.
This will fetch new changesets, but will not update the contents of
the working tree unless yo do a merge or rebase.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to fetch from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:type remote: Fetch this remote or default remote if is None
:type remote: str
"""
if path is None:
raise ValueError("Path to the working copy is needed to fetch from a remote repository.")
if remote is not None:
cmd = 'git fetch %s' % remote
else:
cmd = 'git fetch'
with cd(path):
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd)
|
def fetch(self, path, use_sudo=False, user=None, remote=None):
"""
Fetch changes from the default remote repository.
This will fetch new changesets, but will not update the contents of
the working tree unless yo do a merge or rebase.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to fetch from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:type remote: Fetch this remote or default remote if is None
:type remote: str
"""
if path is None:
raise ValueError("Path to the working copy is needed to fetch from a remote repository.")
if remote is not None:
cmd = 'git fetch %s' % remote
else:
cmd = 'git fetch'
with cd(path):
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd)
|
[
"Fetch",
"changes",
"from",
"the",
"default",
"remote",
"repository",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/git.py#L137-L176
|
[
"def",
"fetch",
"(",
"self",
",",
"path",
",",
"use_sudo",
"=",
"False",
",",
"user",
"=",
"None",
",",
"remote",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Path to the working copy is needed to fetch from a remote repository.\"",
")",
"if",
"remote",
"is",
"not",
"None",
":",
"cmd",
"=",
"'git fetch %s'",
"%",
"remote",
"else",
":",
"cmd",
"=",
"'git fetch'",
"with",
"cd",
"(",
"path",
")",
":",
"if",
"use_sudo",
"and",
"user",
"is",
"None",
":",
"run_as_root",
"(",
"cmd",
")",
"elif",
"use_sudo",
":",
"sudo",
"(",
"cmd",
",",
"user",
"=",
"user",
")",
"else",
":",
"run",
"(",
"cmd",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
GitSatchel.pull
|
Fetch changes from the default remote repository and merge them.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to pull from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:param force: If ``True``, append the ``--force`` option to the command.
:type force: bool
|
burlap/git.py
|
def pull(self, path, use_sudo=False, user=None, force=False):
"""
Fetch changes from the default remote repository and merge them.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to pull from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:param force: If ``True``, append the ``--force`` option to the command.
:type force: bool
"""
if path is None:
raise ValueError("Path to the working copy is needed to pull from a remote repository.")
options = []
if force:
options.append('--force')
options = ' '.join(options)
cmd = 'git pull %s' % options
with cd(path):
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd)
|
def pull(self, path, use_sudo=False, user=None, force=False):
"""
Fetch changes from the default remote repository and merge them.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to pull from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:param force: If ``True``, append the ``--force`` option to the command.
:type force: bool
"""
if path is None:
raise ValueError("Path to the working copy is needed to pull from a remote repository.")
options = []
if force:
options.append('--force')
options = ' '.join(options)
cmd = 'git pull %s' % options
with cd(path):
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd)
|
[
"Fetch",
"changes",
"from",
"the",
"default",
"remote",
"repository",
"and",
"merge",
"them",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/git.py#L179-L216
|
[
"def",
"pull",
"(",
"self",
",",
"path",
",",
"use_sudo",
"=",
"False",
",",
"user",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"if",
"path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Path to the working copy is needed to pull from a remote repository.\"",
")",
"options",
"=",
"[",
"]",
"if",
"force",
":",
"options",
".",
"append",
"(",
"'--force'",
")",
"options",
"=",
"' '",
".",
"join",
"(",
"options",
")",
"cmd",
"=",
"'git pull %s'",
"%",
"options",
"with",
"cd",
"(",
"path",
")",
":",
"if",
"use_sudo",
"and",
"user",
"is",
"None",
":",
"run_as_root",
"(",
"cmd",
")",
"elif",
"use_sudo",
":",
"sudo",
"(",
"cmd",
",",
"user",
"=",
"user",
")",
"else",
":",
"run",
"(",
"cmd",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
GitTrackerSatchel.get_logs_between_commits
|
Retrieves all commit messages for all commits between the given commit numbers
on the current branch.
|
burlap/git.py
|
def get_logs_between_commits(self, a, b):
"""
Retrieves all commit messages for all commits between the given commit numbers
on the current branch.
"""
print('REAL')
ret = self.local('git --no-pager log --pretty=oneline %s...%s' % (a, b), capture=True)
if self.verbose:
print(ret)
return str(ret)
|
def get_logs_between_commits(self, a, b):
"""
Retrieves all commit messages for all commits between the given commit numbers
on the current branch.
"""
print('REAL')
ret = self.local('git --no-pager log --pretty=oneline %s...%s' % (a, b), capture=True)
if self.verbose:
print(ret)
return str(ret)
|
[
"Retrieves",
"all",
"commit",
"messages",
"for",
"all",
"commits",
"between",
"the",
"given",
"commit",
"numbers",
"on",
"the",
"current",
"branch",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/git.py#L351-L360
|
[
"def",
"get_logs_between_commits",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"print",
"(",
"'REAL'",
")",
"ret",
"=",
"self",
".",
"local",
"(",
"'git --no-pager log --pretty=oneline %s...%s'",
"%",
"(",
"a",
",",
"b",
")",
",",
"capture",
"=",
"True",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"ret",
")",
"return",
"str",
"(",
"ret",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
GitTrackerSatchel.get_current_commit
|
Retrieves the git commit number of the current head branch.
|
burlap/git.py
|
def get_current_commit(self):
"""
Retrieves the git commit number of the current head branch.
"""
with hide('running', 'stdout', 'stderr', 'warnings'):
s = str(self.local('git rev-parse HEAD', capture=True))
self.vprint('current commit:', s)
return s
|
def get_current_commit(self):
"""
Retrieves the git commit number of the current head branch.
"""
with hide('running', 'stdout', 'stderr', 'warnings'):
s = str(self.local('git rev-parse HEAD', capture=True))
self.vprint('current commit:', s)
return s
|
[
"Retrieves",
"the",
"git",
"commit",
"number",
"of",
"the",
"current",
"head",
"branch",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/git.py#L363-L370
|
[
"def",
"get_current_commit",
"(",
"self",
")",
":",
"with",
"hide",
"(",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
",",
"'warnings'",
")",
":",
"s",
"=",
"str",
"(",
"self",
".",
"local",
"(",
"'git rev-parse HEAD'",
",",
"capture",
"=",
"True",
")",
")",
"self",
".",
"vprint",
"(",
"'current commit:'",
",",
"s",
")",
"return",
"s"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
GitTrackerSatchel.record_manifest
|
Called after a deployment to record any data necessary to detect changes
for a future deployment.
|
burlap/git.py
|
def record_manifest(self):
"""
Called after a deployment to record any data necessary to detect changes
for a future deployment.
"""
manifest = super(GitTrackerSatchel, self).record_manifest()
manifest[CURRENT_COMMIT] = self.get_current_commit()
return manifest
|
def record_manifest(self):
"""
Called after a deployment to record any data necessary to detect changes
for a future deployment.
"""
manifest = super(GitTrackerSatchel, self).record_manifest()
manifest[CURRENT_COMMIT] = self.get_current_commit()
return manifest
|
[
"Called",
"after",
"a",
"deployment",
"to",
"record",
"any",
"data",
"necessary",
"to",
"detect",
"changes",
"for",
"a",
"future",
"deployment",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/git.py#L372-L379
|
[
"def",
"record_manifest",
"(",
"self",
")",
":",
"manifest",
"=",
"super",
"(",
"GitTrackerSatchel",
",",
"self",
")",
".",
"record_manifest",
"(",
")",
"manifest",
"[",
"CURRENT_COMMIT",
"]",
"=",
"self",
".",
"get_current_commit",
"(",
")",
"return",
"manifest"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
VagrantSatchel.ssh_config
|
Get the SSH parameters for connecting to a vagrant VM.
|
burlap/vagrant.py
|
def ssh_config(self, name=''):
"""
Get the SSH parameters for connecting to a vagrant VM.
"""
r = self.local_renderer
with self.settings(hide('running')):
output = r.local('vagrant ssh-config %s' % name, capture=True)
config = {}
for line in output.splitlines()[1:]:
key, value = line.strip().split(' ', 2)
config[key] = value
return config
|
def ssh_config(self, name=''):
"""
Get the SSH parameters for connecting to a vagrant VM.
"""
r = self.local_renderer
with self.settings(hide('running')):
output = r.local('vagrant ssh-config %s' % name, capture=True)
config = {}
for line in output.splitlines()[1:]:
key, value = line.strip().split(' ', 2)
config[key] = value
return config
|
[
"Get",
"the",
"SSH",
"parameters",
"for",
"connecting",
"to",
"a",
"vagrant",
"VM",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vagrant.py#L31-L43
|
[
"def",
"ssh_config",
"(",
"self",
",",
"name",
"=",
"''",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"with",
"self",
".",
"settings",
"(",
"hide",
"(",
"'running'",
")",
")",
":",
"output",
"=",
"r",
".",
"local",
"(",
"'vagrant ssh-config %s'",
"%",
"name",
",",
"capture",
"=",
"True",
")",
"config",
"=",
"{",
"}",
"for",
"line",
"in",
"output",
".",
"splitlines",
"(",
")",
"[",
"1",
":",
"]",
":",
"key",
",",
"value",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
",",
"2",
")",
"config",
"[",
"key",
"]",
"=",
"value",
"return",
"config"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
VagrantSatchel.version
|
Get the Vagrant version.
|
burlap/vagrant.py
|
def version(self):
"""
Get the Vagrant version.
"""
r = self.local_renderer
with self.settings(hide('running', 'warnings'), warn_only=True):
res = r.local('vagrant --version', capture=True)
if res.failed:
return None
line = res.splitlines()[-1]
version = re.match(r'Vagrant (?:v(?:ersion )?)?(.*)', line).group(1)
return tuple(_to_int(part) for part in version.split('.'))
|
def version(self):
"""
Get the Vagrant version.
"""
r = self.local_renderer
with self.settings(hide('running', 'warnings'), warn_only=True):
res = r.local('vagrant --version', capture=True)
if res.failed:
return None
line = res.splitlines()[-1]
version = re.match(r'Vagrant (?:v(?:ersion )?)?(.*)', line).group(1)
return tuple(_to_int(part) for part in version.split('.'))
|
[
"Get",
"the",
"Vagrant",
"version",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vagrant.py#L69-L80
|
[
"def",
"version",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"with",
"self",
".",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'warnings'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"res",
"=",
"r",
".",
"local",
"(",
"'vagrant --version'",
",",
"capture",
"=",
"True",
")",
"if",
"res",
".",
"failed",
":",
"return",
"None",
"line",
"=",
"res",
".",
"splitlines",
"(",
")",
"[",
"-",
"1",
"]",
"version",
"=",
"re",
".",
"match",
"(",
"r'Vagrant (?:v(?:ersion )?)?(.*)'",
",",
"line",
")",
".",
"group",
"(",
"1",
")",
"return",
"tuple",
"(",
"_to_int",
"(",
"part",
")",
"for",
"part",
"in",
"version",
".",
"split",
"(",
"'.'",
")",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
VagrantSatchel.vagrant
|
Run the following tasks on a vagrant box.
First, you need to import this task in your ``fabfile.py``::
from fabric.api import *
from burlap.vagrant import vagrant
@task
def some_task():
run('echo hello')
Then you can easily run tasks on your current Vagrant box::
$ fab vagrant some_task
|
burlap/vagrant.py
|
def vagrant(self, name=''):
"""
Run the following tasks on a vagrant box.
First, you need to import this task in your ``fabfile.py``::
from fabric.api import *
from burlap.vagrant import vagrant
@task
def some_task():
run('echo hello')
Then you can easily run tasks on your current Vagrant box::
$ fab vagrant some_task
"""
r = self.local_renderer
config = self.ssh_config(name)
extra_args = self._settings_dict(config)
r.genv.update(extra_args)
|
def vagrant(self, name=''):
"""
Run the following tasks on a vagrant box.
First, you need to import this task in your ``fabfile.py``::
from fabric.api import *
from burlap.vagrant import vagrant
@task
def some_task():
run('echo hello')
Then you can easily run tasks on your current Vagrant box::
$ fab vagrant some_task
"""
r = self.local_renderer
config = self.ssh_config(name)
extra_args = self._settings_dict(config)
r.genv.update(extra_args)
|
[
"Run",
"the",
"following",
"tasks",
"on",
"a",
"vagrant",
"box",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vagrant.py#L158-L180
|
[
"def",
"vagrant",
"(",
"self",
",",
"name",
"=",
"''",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"config",
"=",
"self",
".",
"ssh_config",
"(",
"name",
")",
"extra_args",
"=",
"self",
".",
"_settings_dict",
"(",
"config",
")",
"r",
".",
"genv",
".",
"update",
"(",
"extra_args",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
VagrantSatchel.vagrant_settings
|
Context manager that sets a vagrant VM
as the remote host.
Use this context manager inside a task to run commands
on your current Vagrant box::
from burlap.vagrant import vagrant_settings
with vagrant_settings():
run('hostname')
|
burlap/vagrant.py
|
def vagrant_settings(self, name='', *args, **kwargs):
"""
Context manager that sets a vagrant VM
as the remote host.
Use this context manager inside a task to run commands
on your current Vagrant box::
from burlap.vagrant import vagrant_settings
with vagrant_settings():
run('hostname')
"""
config = self.ssh_config(name)
extra_args = self._settings_dict(config)
kwargs.update(extra_args)
return self.settings(*args, **kwargs)
|
def vagrant_settings(self, name='', *args, **kwargs):
"""
Context manager that sets a vagrant VM
as the remote host.
Use this context manager inside a task to run commands
on your current Vagrant box::
from burlap.vagrant import vagrant_settings
with vagrant_settings():
run('hostname')
"""
config = self.ssh_config(name)
extra_args = self._settings_dict(config)
kwargs.update(extra_args)
return self.settings(*args, **kwargs)
|
[
"Context",
"manager",
"that",
"sets",
"a",
"vagrant",
"VM",
"as",
"the",
"remote",
"host",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vagrant.py#L182-L200
|
[
"def",
"vagrant_settings",
"(",
"self",
",",
"name",
"=",
"''",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"self",
".",
"ssh_config",
"(",
"name",
")",
"extra_args",
"=",
"self",
".",
"_settings_dict",
"(",
"config",
")",
"kwargs",
".",
"update",
"(",
"extra_args",
")",
"return",
"self",
".",
"settings",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
VagrantSatchel.base_boxes
|
Get the list of vagrant base boxes
|
burlap/vagrant.py
|
def base_boxes(self):
"""
Get the list of vagrant base boxes
"""
return sorted(list(set([name for name, provider in self._box_list()])))
|
def base_boxes(self):
"""
Get the list of vagrant base boxes
"""
return sorted(list(set([name for name, provider in self._box_list()])))
|
[
"Get",
"the",
"list",
"of",
"vagrant",
"base",
"boxes"
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vagrant.py#L239-L243
|
[
"def",
"base_boxes",
"(",
"self",
")",
":",
"return",
"sorted",
"(",
"list",
"(",
"set",
"(",
"[",
"name",
"for",
"name",
",",
"provider",
"in",
"self",
".",
"_box_list",
"(",
")",
"]",
")",
")",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
VagrantSatchel.install_from_upstream
|
Installs Vagrant from the most recent package available from their homepage.
|
burlap/vagrant.py
|
def install_from_upstream(self):
"""
Installs Vagrant from the most recent package available from their homepage.
"""
from burlap.system import get_arch, distrib_family
r = self.local_renderer
content = urlopen(r.env.download_url).read()
print(len(content))
matches = DOWNLOAD_LINK_PATTERN.findall(content)
print(matches)
arch = get_arch() # e.g. 'x86_64'
family = distrib_family()
if family == DEBIAN:
ext = '.deb'
matches = [match for match in matches if match.endswith(ext) and arch in match]
print('matches:', matches)
assert matches, "No matches found."
assert len(matches) == 1, "Too many matches found: %s" % (', '.join(matches))
r.env.final_download_url = matches[0]
r.env.local_filename = '/tmp/vagrant%s' % ext
r.run('wget -O {local_filename} {final_download_url}')
r.sudo('dpkg -i {local_filename}')
else:
raise NotImplementedError('Unsupported family: %s' % family)
|
def install_from_upstream(self):
"""
Installs Vagrant from the most recent package available from their homepage.
"""
from burlap.system import get_arch, distrib_family
r = self.local_renderer
content = urlopen(r.env.download_url).read()
print(len(content))
matches = DOWNLOAD_LINK_PATTERN.findall(content)
print(matches)
arch = get_arch() # e.g. 'x86_64'
family = distrib_family()
if family == DEBIAN:
ext = '.deb'
matches = [match for match in matches if match.endswith(ext) and arch in match]
print('matches:', matches)
assert matches, "No matches found."
assert len(matches) == 1, "Too many matches found: %s" % (', '.join(matches))
r.env.final_download_url = matches[0]
r.env.local_filename = '/tmp/vagrant%s' % ext
r.run('wget -O {local_filename} {final_download_url}')
r.sudo('dpkg -i {local_filename}')
else:
raise NotImplementedError('Unsupported family: %s' % family)
|
[
"Installs",
"Vagrant",
"from",
"the",
"most",
"recent",
"package",
"available",
"from",
"their",
"homepage",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vagrant.py#L280-L303
|
[
"def",
"install_from_upstream",
"(",
"self",
")",
":",
"from",
"burlap",
".",
"system",
"import",
"get_arch",
",",
"distrib_family",
"r",
"=",
"self",
".",
"local_renderer",
"content",
"=",
"urlopen",
"(",
"r",
".",
"env",
".",
"download_url",
")",
".",
"read",
"(",
")",
"print",
"(",
"len",
"(",
"content",
")",
")",
"matches",
"=",
"DOWNLOAD_LINK_PATTERN",
".",
"findall",
"(",
"content",
")",
"print",
"(",
"matches",
")",
"arch",
"=",
"get_arch",
"(",
")",
"# e.g. 'x86_64'",
"family",
"=",
"distrib_family",
"(",
")",
"if",
"family",
"==",
"DEBIAN",
":",
"ext",
"=",
"'.deb'",
"matches",
"=",
"[",
"match",
"for",
"match",
"in",
"matches",
"if",
"match",
".",
"endswith",
"(",
"ext",
")",
"and",
"arch",
"in",
"match",
"]",
"print",
"(",
"'matches:'",
",",
"matches",
")",
"assert",
"matches",
",",
"\"No matches found.\"",
"assert",
"len",
"(",
"matches",
")",
"==",
"1",
",",
"\"Too many matches found: %s\"",
"%",
"(",
"', '",
".",
"join",
"(",
"matches",
")",
")",
"r",
".",
"env",
".",
"final_download_url",
"=",
"matches",
"[",
"0",
"]",
"r",
".",
"env",
".",
"local_filename",
"=",
"'/tmp/vagrant%s'",
"%",
"ext",
"r",
".",
"run",
"(",
"'wget -O {local_filename} {final_download_url}'",
")",
"r",
".",
"sudo",
"(",
"'dpkg -i {local_filename}'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unsupported family: %s'",
"%",
"family",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
distrib_id
|
Get the OS distribution ID.
Example::
from burlap.system import distrib_id
if distrib_id() != 'Debian':
abort(u"Distribution is not supported")
|
burlap/system.py
|
def distrib_id():
"""
Get the OS distribution ID.
Example::
from burlap.system import distrib_id
if distrib_id() != 'Debian':
abort(u"Distribution is not supported")
"""
with settings(hide('running', 'stdout')):
kernel = (run('uname -s') or '').strip().lower()
if kernel == LINUX:
# lsb_release works on Ubuntu and Debian >= 6.0
# but is not always included in other distros
if is_file('/usr/bin/lsb_release'):
id_ = run('lsb_release --id --short').strip().lower()
if id in ['arch', 'archlinux']: # old IDs used before lsb-release 1.4-14
id_ = ARCH
return id_
else:
if is_file('/etc/debian_version'):
return DEBIAN
elif is_file('/etc/fedora-release'):
return FEDORA
elif is_file('/etc/arch-release'):
return ARCH
elif is_file('/etc/redhat-release'):
release = run('cat /etc/redhat-release')
if release.startswith('Red Hat Enterprise Linux'):
return REDHAT
elif release.startswith('CentOS'):
return CENTOS
elif release.startswith('Scientific Linux'):
return SLES
elif is_file('/etc/gentoo-release'):
return GENTOO
elif kernel == SUNOS:
return SUNOS
|
def distrib_id():
"""
Get the OS distribution ID.
Example::
from burlap.system import distrib_id
if distrib_id() != 'Debian':
abort(u"Distribution is not supported")
"""
with settings(hide('running', 'stdout')):
kernel = (run('uname -s') or '').strip().lower()
if kernel == LINUX:
# lsb_release works on Ubuntu and Debian >= 6.0
# but is not always included in other distros
if is_file('/usr/bin/lsb_release'):
id_ = run('lsb_release --id --short').strip().lower()
if id in ['arch', 'archlinux']: # old IDs used before lsb-release 1.4-14
id_ = ARCH
return id_
else:
if is_file('/etc/debian_version'):
return DEBIAN
elif is_file('/etc/fedora-release'):
return FEDORA
elif is_file('/etc/arch-release'):
return ARCH
elif is_file('/etc/redhat-release'):
release = run('cat /etc/redhat-release')
if release.startswith('Red Hat Enterprise Linux'):
return REDHAT
elif release.startswith('CentOS'):
return CENTOS
elif release.startswith('Scientific Linux'):
return SLES
elif is_file('/etc/gentoo-release'):
return GENTOO
elif kernel == SUNOS:
return SUNOS
|
[
"Get",
"the",
"OS",
"distribution",
"ID",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/system.py#L41-L82
|
[
"def",
"distrib_id",
"(",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
")",
":",
"kernel",
"=",
"(",
"run",
"(",
"'uname -s'",
")",
"or",
"''",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"kernel",
"==",
"LINUX",
":",
"# lsb_release works on Ubuntu and Debian >= 6.0",
"# but is not always included in other distros",
"if",
"is_file",
"(",
"'/usr/bin/lsb_release'",
")",
":",
"id_",
"=",
"run",
"(",
"'lsb_release --id --short'",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"id",
"in",
"[",
"'arch'",
",",
"'archlinux'",
"]",
":",
"# old IDs used before lsb-release 1.4-14",
"id_",
"=",
"ARCH",
"return",
"id_",
"else",
":",
"if",
"is_file",
"(",
"'/etc/debian_version'",
")",
":",
"return",
"DEBIAN",
"elif",
"is_file",
"(",
"'/etc/fedora-release'",
")",
":",
"return",
"FEDORA",
"elif",
"is_file",
"(",
"'/etc/arch-release'",
")",
":",
"return",
"ARCH",
"elif",
"is_file",
"(",
"'/etc/redhat-release'",
")",
":",
"release",
"=",
"run",
"(",
"'cat /etc/redhat-release'",
")",
"if",
"release",
".",
"startswith",
"(",
"'Red Hat Enterprise Linux'",
")",
":",
"return",
"REDHAT",
"elif",
"release",
".",
"startswith",
"(",
"'CentOS'",
")",
":",
"return",
"CENTOS",
"elif",
"release",
".",
"startswith",
"(",
"'Scientific Linux'",
")",
":",
"return",
"SLES",
"elif",
"is_file",
"(",
"'/etc/gentoo-release'",
")",
":",
"return",
"GENTOO",
"elif",
"kernel",
"==",
"SUNOS",
":",
"return",
"SUNOS"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
distrib_release
|
Get the release number of the distribution.
Example::
from burlap.system import distrib_id, distrib_release
if distrib_id() == 'CentOS' and distrib_release() == '6.1':
print(u"CentOS 6.2 has been released. Please upgrade.")
|
burlap/system.py
|
def distrib_release():
"""
Get the release number of the distribution.
Example::
from burlap.system import distrib_id, distrib_release
if distrib_id() == 'CentOS' and distrib_release() == '6.1':
print(u"CentOS 6.2 has been released. Please upgrade.")
"""
with settings(hide('running', 'stdout')):
kernel = (run('uname -s') or '').strip().lower()
if kernel == LINUX:
return run('lsb_release -r --short')
elif kernel == SUNOS:
return run('uname -v')
|
def distrib_release():
"""
Get the release number of the distribution.
Example::
from burlap.system import distrib_id, distrib_release
if distrib_id() == 'CentOS' and distrib_release() == '6.1':
print(u"CentOS 6.2 has been released. Please upgrade.")
"""
with settings(hide('running', 'stdout')):
kernel = (run('uname -s') or '').strip().lower()
if kernel == LINUX:
return run('lsb_release -r --short')
elif kernel == SUNOS:
return run('uname -v')
|
[
"Get",
"the",
"release",
"number",
"of",
"the",
"distribution",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/system.py#L85-L103
|
[
"def",
"distrib_release",
"(",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
")",
":",
"kernel",
"=",
"(",
"run",
"(",
"'uname -s'",
")",
"or",
"''",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"kernel",
"==",
"LINUX",
":",
"return",
"run",
"(",
"'lsb_release -r --short'",
")",
"elif",
"kernel",
"==",
"SUNOS",
":",
"return",
"run",
"(",
"'uname -v'",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
distrib_family
|
Get the distribution family.
Returns one of ``debian``, ``redhat``, ``arch``, ``gentoo``,
``sun``, ``other``.
|
burlap/system.py
|
def distrib_family():
"""
Get the distribution family.
Returns one of ``debian``, ``redhat``, ``arch``, ``gentoo``,
``sun``, ``other``.
"""
distrib = (distrib_id() or '').lower()
if distrib in ['debian', 'ubuntu', 'linuxmint', 'elementary os']:
return DEBIAN
elif distrib in ['redhat', 'rhel', 'centos', 'sles', 'fedora']:
return REDHAT
elif distrib in ['sunos']:
return SUN
elif distrib in ['gentoo']:
return GENTOO
elif distrib in ['arch', 'manjarolinux']:
return ARCH
return 'other'
|
def distrib_family():
"""
Get the distribution family.
Returns one of ``debian``, ``redhat``, ``arch``, ``gentoo``,
``sun``, ``other``.
"""
distrib = (distrib_id() or '').lower()
if distrib in ['debian', 'ubuntu', 'linuxmint', 'elementary os']:
return DEBIAN
elif distrib in ['redhat', 'rhel', 'centos', 'sles', 'fedora']:
return REDHAT
elif distrib in ['sunos']:
return SUN
elif distrib in ['gentoo']:
return GENTOO
elif distrib in ['arch', 'manjarolinux']:
return ARCH
return 'other'
|
[
"Get",
"the",
"distribution",
"family",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/system.py#L134-L152
|
[
"def",
"distrib_family",
"(",
")",
":",
"distrib",
"=",
"(",
"distrib_id",
"(",
")",
"or",
"''",
")",
".",
"lower",
"(",
")",
"if",
"distrib",
"in",
"[",
"'debian'",
",",
"'ubuntu'",
",",
"'linuxmint'",
",",
"'elementary os'",
"]",
":",
"return",
"DEBIAN",
"elif",
"distrib",
"in",
"[",
"'redhat'",
",",
"'rhel'",
",",
"'centos'",
",",
"'sles'",
",",
"'fedora'",
"]",
":",
"return",
"REDHAT",
"elif",
"distrib",
"in",
"[",
"'sunos'",
"]",
":",
"return",
"SUN",
"elif",
"distrib",
"in",
"[",
"'gentoo'",
"]",
":",
"return",
"GENTOO",
"elif",
"distrib",
"in",
"[",
"'arch'",
",",
"'manjarolinux'",
"]",
":",
"return",
"ARCH",
"return",
"'other'"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
supported_locales
|
Gets the list of supported locales.
Each locale is returned as a ``(locale, charset)`` tuple.
|
burlap/system.py
|
def supported_locales():
"""
Gets the list of supported locales.
Each locale is returned as a ``(locale, charset)`` tuple.
"""
family = distrib_family()
if family == 'debian':
return _parse_locales('/usr/share/i18n/SUPPORTED')
elif family == 'arch':
return _parse_locales('/etc/locale.gen')
elif family == 'redhat':
return _supported_locales_redhat()
else:
raise UnsupportedFamily(supported=['debian', 'arch', 'redhat'])
|
def supported_locales():
"""
Gets the list of supported locales.
Each locale is returned as a ``(locale, charset)`` tuple.
"""
family = distrib_family()
if family == 'debian':
return _parse_locales('/usr/share/i18n/SUPPORTED')
elif family == 'arch':
return _parse_locales('/etc/locale.gen')
elif family == 'redhat':
return _supported_locales_redhat()
else:
raise UnsupportedFamily(supported=['debian', 'arch', 'redhat'])
|
[
"Gets",
"the",
"list",
"of",
"supported",
"locales",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/system.py#L202-L216
|
[
"def",
"supported_locales",
"(",
")",
":",
"family",
"=",
"distrib_family",
"(",
")",
"if",
"family",
"==",
"'debian'",
":",
"return",
"_parse_locales",
"(",
"'/usr/share/i18n/SUPPORTED'",
")",
"elif",
"family",
"==",
"'arch'",
":",
"return",
"_parse_locales",
"(",
"'/etc/locale.gen'",
")",
"elif",
"family",
"==",
"'redhat'",
":",
"return",
"_supported_locales_redhat",
"(",
")",
"else",
":",
"raise",
"UnsupportedFamily",
"(",
"supported",
"=",
"[",
"'debian'",
",",
"'arch'",
",",
"'redhat'",
"]",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
CelerySatchel.force_stop
|
Forcibly terminates all Celery processes.
|
burlap/celery.py
|
def force_stop(self):
"""
Forcibly terminates all Celery processes.
"""
r = self.local_renderer
with self.settings(warn_only=True):
r.sudo('pkill -9 -f celery')
r.sudo('rm -f /tmp/celery*.pid')
|
def force_stop(self):
"""
Forcibly terminates all Celery processes.
"""
r = self.local_renderer
with self.settings(warn_only=True):
r.sudo('pkill -9 -f celery')
r.sudo('rm -f /tmp/celery*.pid')
|
[
"Forcibly",
"terminates",
"all",
"Celery",
"processes",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/celery.py#L94-L101
|
[
"def",
"force_stop",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"with",
"self",
".",
"settings",
"(",
"warn_only",
"=",
"True",
")",
":",
"r",
".",
"sudo",
"(",
"'pkill -9 -f celery'",
")",
"r",
".",
"sudo",
"(",
"'rm -f /tmp/celery*.pid'",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
CelerySatchel.set_permissions
|
Sets ownership and permissions for Celery-related files.
|
burlap/celery.py
|
def set_permissions(self):
"""
Sets ownership and permissions for Celery-related files.
"""
r = self.local_renderer
for path in r.env.paths_owned:
r.env.path_owned = path
r.sudo('chown {celery_daemon_user}:{celery_daemon_user} {celery_path_owned}')
|
def set_permissions(self):
"""
Sets ownership and permissions for Celery-related files.
"""
r = self.local_renderer
for path in r.env.paths_owned:
r.env.path_owned = path
r.sudo('chown {celery_daemon_user}:{celery_daemon_user} {celery_path_owned}')
|
[
"Sets",
"ownership",
"and",
"permissions",
"for",
"Celery",
"-",
"related",
"files",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/celery.py#L104-L111
|
[
"def",
"set_permissions",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"for",
"path",
"in",
"r",
".",
"env",
".",
"paths_owned",
":",
"r",
".",
"env",
".",
"path_owned",
"=",
"path",
"r",
".",
"sudo",
"(",
"'chown {celery_daemon_user}:{celery_daemon_user} {celery_path_owned}'",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
CelerySatchel.create_supervisor_services
|
This is called for each site to render a Celery config file.
|
burlap/celery.py
|
def create_supervisor_services(self, site):
"""
This is called for each site to render a Celery config file.
"""
self.vprint('create_supervisor_services:', site)
self.set_site_specifics(site=site)
r = self.local_renderer
if self.verbose:
print('r.env:')
pprint(r.env, indent=4)
self.vprint('r.env.has_worker:', r.env.has_worker)
if not r.env.has_worker:
self.vprint('skipping: no celery worker')
return
if self.name.lower() not in self.genv.services:
self.vprint('skipping: celery not enabled')
return
hostname = self.current_hostname
target_sites = self.genv.available_sites_by_host.get(hostname, None)
if target_sites and site not in target_sites:
self.vprint('skipping: site not supported on this server')
return
self.render_paths()
conf_name = 'celery_%s.conf' % site
ret = r.render_to_string('celery/celery_supervisor.template.conf')
return conf_name, ret
|
def create_supervisor_services(self, site):
"""
This is called for each site to render a Celery config file.
"""
self.vprint('create_supervisor_services:', site)
self.set_site_specifics(site=site)
r = self.local_renderer
if self.verbose:
print('r.env:')
pprint(r.env, indent=4)
self.vprint('r.env.has_worker:', r.env.has_worker)
if not r.env.has_worker:
self.vprint('skipping: no celery worker')
return
if self.name.lower() not in self.genv.services:
self.vprint('skipping: celery not enabled')
return
hostname = self.current_hostname
target_sites = self.genv.available_sites_by_host.get(hostname, None)
if target_sites and site not in target_sites:
self.vprint('skipping: site not supported on this server')
return
self.render_paths()
conf_name = 'celery_%s.conf' % site
ret = r.render_to_string('celery/celery_supervisor.template.conf')
return conf_name, ret
|
[
"This",
"is",
"called",
"for",
"each",
"site",
"to",
"render",
"a",
"Celery",
"config",
"file",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/celery.py#L121-L154
|
[
"def",
"create_supervisor_services",
"(",
"self",
",",
"site",
")",
":",
"self",
".",
"vprint",
"(",
"'create_supervisor_services:'",
",",
"site",
")",
"self",
".",
"set_site_specifics",
"(",
"site",
"=",
"site",
")",
"r",
"=",
"self",
".",
"local_renderer",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'r.env:'",
")",
"pprint",
"(",
"r",
".",
"env",
",",
"indent",
"=",
"4",
")",
"self",
".",
"vprint",
"(",
"'r.env.has_worker:'",
",",
"r",
".",
"env",
".",
"has_worker",
")",
"if",
"not",
"r",
".",
"env",
".",
"has_worker",
":",
"self",
".",
"vprint",
"(",
"'skipping: no celery worker'",
")",
"return",
"if",
"self",
".",
"name",
".",
"lower",
"(",
")",
"not",
"in",
"self",
".",
"genv",
".",
"services",
":",
"self",
".",
"vprint",
"(",
"'skipping: celery not enabled'",
")",
"return",
"hostname",
"=",
"self",
".",
"current_hostname",
"target_sites",
"=",
"self",
".",
"genv",
".",
"available_sites_by_host",
".",
"get",
"(",
"hostname",
",",
"None",
")",
"if",
"target_sites",
"and",
"site",
"not",
"in",
"target_sites",
":",
"self",
".",
"vprint",
"(",
"'skipping: site not supported on this server'",
")",
"return",
"self",
".",
"render_paths",
"(",
")",
"conf_name",
"=",
"'celery_%s.conf'",
"%",
"site",
"ret",
"=",
"r",
".",
"render_to_string",
"(",
"'celery/celery_supervisor.template.conf'",
")",
"return",
"conf_name",
",",
"ret"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
BuildBotSatchel.check_ok
|
Ensures all tests have passed for this branch.
This should be called before deployment, to prevent accidental deployment of code
that hasn't passed automated testing.
|
burlap/buildbot.py
|
def check_ok(self):
"""
Ensures all tests have passed for this branch.
This should be called before deployment, to prevent accidental deployment of code
that hasn't passed automated testing.
"""
import requests
if not self.env.check_ok:
return
# Find current git branch.
branch_name = self._local('git rev-parse --abbrev-ref HEAD', capture=True).strip()
check_ok_paths = self.env.check_ok_paths or {}
if branch_name in check_ok_paths:
check = check_ok_paths[branch_name]
if 'username' in check:
auth = (check['username'], check['password'])
else:
auth = None
ret = requests.get(check['url'], auth=auth)
passed = check['text'] in ret.content
assert passed, 'Check failed: %s' % check['url']
|
def check_ok(self):
"""
Ensures all tests have passed for this branch.
This should be called before deployment, to prevent accidental deployment of code
that hasn't passed automated testing.
"""
import requests
if not self.env.check_ok:
return
# Find current git branch.
branch_name = self._local('git rev-parse --abbrev-ref HEAD', capture=True).strip()
check_ok_paths = self.env.check_ok_paths or {}
if branch_name in check_ok_paths:
check = check_ok_paths[branch_name]
if 'username' in check:
auth = (check['username'], check['password'])
else:
auth = None
ret = requests.get(check['url'], auth=auth)
passed = check['text'] in ret.content
assert passed, 'Check failed: %s' % check['url']
|
[
"Ensures",
"all",
"tests",
"have",
"passed",
"for",
"this",
"branch",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/buildbot.py#L361-L386
|
[
"def",
"check_ok",
"(",
"self",
")",
":",
"import",
"requests",
"if",
"not",
"self",
".",
"env",
".",
"check_ok",
":",
"return",
"# Find current git branch.",
"branch_name",
"=",
"self",
".",
"_local",
"(",
"'git rev-parse --abbrev-ref HEAD'",
",",
"capture",
"=",
"True",
")",
".",
"strip",
"(",
")",
"check_ok_paths",
"=",
"self",
".",
"env",
".",
"check_ok_paths",
"or",
"{",
"}",
"if",
"branch_name",
"in",
"check_ok_paths",
":",
"check",
"=",
"check_ok_paths",
"[",
"branch_name",
"]",
"if",
"'username'",
"in",
"check",
":",
"auth",
"=",
"(",
"check",
"[",
"'username'",
"]",
",",
"check",
"[",
"'password'",
"]",
")",
"else",
":",
"auth",
"=",
"None",
"ret",
"=",
"requests",
".",
"get",
"(",
"check",
"[",
"'url'",
"]",
",",
"auth",
"=",
"auth",
")",
"passed",
"=",
"check",
"[",
"'text'",
"]",
"in",
"ret",
".",
"content",
"assert",
"passed",
",",
"'Check failed: %s'",
"%",
"check",
"[",
"'url'",
"]"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
HostSatchel.is_present
|
Returns true if the given host exists on the network.
Returns false otherwise.
|
burlap/host.py
|
def is_present(self, host=None):
"""
Returns true if the given host exists on the network.
Returns false otherwise.
"""
r = self.local_renderer
r.env.host = host or self.genv.host_string
ret = r._local("getent hosts {host} | awk '{{ print $1 }}'", capture=True) or ''
if self.verbose:
print('ret:', ret)
ret = ret.strip()
if self.verbose:
print('Host %s %s present.' % (r.env.host, 'IS' if bool(ret) else 'IS NOT'))
ip = ret
ret = bool(ret)
if not ret:
return False
r.env.ip = ip
with settings(warn_only=True):
ret = r._local('ping -c 1 {ip}', capture=True) or ''
packet_loss = re.findall(r'([0-9]+)% packet loss', ret)
# print('packet_loss:',packet_loss)
ip_accessible = packet_loss and int(packet_loss[0]) < 100
if self.verbose:
print('IP %s accessible: %s' % (ip, ip_accessible))
return bool(ip_accessible)
|
def is_present(self, host=None):
"""
Returns true if the given host exists on the network.
Returns false otherwise.
"""
r = self.local_renderer
r.env.host = host or self.genv.host_string
ret = r._local("getent hosts {host} | awk '{{ print $1 }}'", capture=True) or ''
if self.verbose:
print('ret:', ret)
ret = ret.strip()
if self.verbose:
print('Host %s %s present.' % (r.env.host, 'IS' if bool(ret) else 'IS NOT'))
ip = ret
ret = bool(ret)
if not ret:
return False
r.env.ip = ip
with settings(warn_only=True):
ret = r._local('ping -c 1 {ip}', capture=True) or ''
packet_loss = re.findall(r'([0-9]+)% packet loss', ret)
# print('packet_loss:',packet_loss)
ip_accessible = packet_loss and int(packet_loss[0]) < 100
if self.verbose:
print('IP %s accessible: %s' % (ip, ip_accessible))
return bool(ip_accessible)
|
[
"Returns",
"true",
"if",
"the",
"given",
"host",
"exists",
"on",
"the",
"network",
".",
"Returns",
"false",
"otherwise",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L78-L104
|
[
"def",
"is_present",
"(",
"self",
",",
"host",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"r",
".",
"env",
".",
"host",
"=",
"host",
"or",
"self",
".",
"genv",
".",
"host_string",
"ret",
"=",
"r",
".",
"_local",
"(",
"\"getent hosts {host} | awk '{{ print $1 }}'\"",
",",
"capture",
"=",
"True",
")",
"or",
"''",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'ret:'",
",",
"ret",
")",
"ret",
"=",
"ret",
".",
"strip",
"(",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Host %s %s present.'",
"%",
"(",
"r",
".",
"env",
".",
"host",
",",
"'IS'",
"if",
"bool",
"(",
"ret",
")",
"else",
"'IS NOT'",
")",
")",
"ip",
"=",
"ret",
"ret",
"=",
"bool",
"(",
"ret",
")",
"if",
"not",
"ret",
":",
"return",
"False",
"r",
".",
"env",
".",
"ip",
"=",
"ip",
"with",
"settings",
"(",
"warn_only",
"=",
"True",
")",
":",
"ret",
"=",
"r",
".",
"_local",
"(",
"'ping -c 1 {ip}'",
",",
"capture",
"=",
"True",
")",
"or",
"''",
"packet_loss",
"=",
"re",
".",
"findall",
"(",
"r'([0-9]+)% packet loss'",
",",
"ret",
")",
"# print('packet_loss:',packet_loss)",
"ip_accessible",
"=",
"packet_loss",
"and",
"int",
"(",
"packet_loss",
"[",
"0",
"]",
")",
"<",
"100",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'IP %s accessible: %s'",
"%",
"(",
"ip",
",",
"ip_accessible",
")",
")",
"return",
"bool",
"(",
"ip_accessible",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
HostSatchel.purge_keys
|
Deletes all SSH keys on the localhost associated with the current remote host.
|
burlap/host.py
|
def purge_keys(self):
"""
Deletes all SSH keys on the localhost associated with the current remote host.
"""
r = self.local_renderer
r.env.default_ip = self.hostname_to_ip(self.env.default_hostname)
r.env.home_dir = '/home/%s' % getpass.getuser()
r.local('ssh-keygen -f "{home_dir}/.ssh/known_hosts" -R {host_string}')
if self.env.default_hostname:
r.local('ssh-keygen -f "{home_dir}/.ssh/known_hosts" -R {default_hostname}')
if r.env.default_ip:
r.local('ssh-keygen -f "{home_dir}/.ssh/known_hosts" -R {default_ip}')
|
def purge_keys(self):
"""
Deletes all SSH keys on the localhost associated with the current remote host.
"""
r = self.local_renderer
r.env.default_ip = self.hostname_to_ip(self.env.default_hostname)
r.env.home_dir = '/home/%s' % getpass.getuser()
r.local('ssh-keygen -f "{home_dir}/.ssh/known_hosts" -R {host_string}')
if self.env.default_hostname:
r.local('ssh-keygen -f "{home_dir}/.ssh/known_hosts" -R {default_hostname}')
if r.env.default_ip:
r.local('ssh-keygen -f "{home_dir}/.ssh/known_hosts" -R {default_ip}')
|
[
"Deletes",
"all",
"SSH",
"keys",
"on",
"the",
"localhost",
"associated",
"with",
"the",
"current",
"remote",
"host",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L107-L118
|
[
"def",
"purge_keys",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"r",
".",
"env",
".",
"default_ip",
"=",
"self",
".",
"hostname_to_ip",
"(",
"self",
".",
"env",
".",
"default_hostname",
")",
"r",
".",
"env",
".",
"home_dir",
"=",
"'/home/%s'",
"%",
"getpass",
".",
"getuser",
"(",
")",
"r",
".",
"local",
"(",
"'ssh-keygen -f \"{home_dir}/.ssh/known_hosts\" -R {host_string}'",
")",
"if",
"self",
".",
"env",
".",
"default_hostname",
":",
"r",
".",
"local",
"(",
"'ssh-keygen -f \"{home_dir}/.ssh/known_hosts\" -R {default_hostname}'",
")",
"if",
"r",
".",
"env",
".",
"default_ip",
":",
"r",
".",
"local",
"(",
"'ssh-keygen -f \"{home_dir}/.ssh/known_hosts\" -R {default_ip}'",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
HostSatchel.find_working_password
|
Returns the first working combination of username and password for the current host.
|
burlap/host.py
|
def find_working_password(self, usernames=None, host_strings=None):
"""
Returns the first working combination of username and password for the current host.
"""
r = self.local_renderer
if host_strings is None:
host_strings = []
if not host_strings:
host_strings.append(self.genv.host_string)
if usernames is None:
usernames = []
if not usernames:
usernames.append(self.genv.user)
for host_string in host_strings:
for username in usernames:
passwords = []
passwords.append(self.genv.user_default_passwords[username])
passwords.append(self.genv.user_passwords[username])
passwords.append(self.env.default_password)
for password in passwords:
with settings(warn_only=True):
r.env.host_string = host_string
r.env.password = password
r.env.user = username
ret = r._local("sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello", capture=True)
#print('ret.return_code:', ret.return_code)
# print('ret000:[%s]' % ret)
#code 1 = good password, but prompts needed
#code 5 = bad password
#code 6 = good password, but host public key is unknown
if ret.return_code in (1, 6) or 'hello' in ret:
# Login succeeded, so we haven't yet changed the password, so use the default password.
return host_string, username, password
raise Exception('No working login found.')
|
def find_working_password(self, usernames=None, host_strings=None):
"""
Returns the first working combination of username and password for the current host.
"""
r = self.local_renderer
if host_strings is None:
host_strings = []
if not host_strings:
host_strings.append(self.genv.host_string)
if usernames is None:
usernames = []
if not usernames:
usernames.append(self.genv.user)
for host_string in host_strings:
for username in usernames:
passwords = []
passwords.append(self.genv.user_default_passwords[username])
passwords.append(self.genv.user_passwords[username])
passwords.append(self.env.default_password)
for password in passwords:
with settings(warn_only=True):
r.env.host_string = host_string
r.env.password = password
r.env.user = username
ret = r._local("sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello", capture=True)
#print('ret.return_code:', ret.return_code)
# print('ret000:[%s]' % ret)
#code 1 = good password, but prompts needed
#code 5 = bad password
#code 6 = good password, but host public key is unknown
if ret.return_code in (1, 6) or 'hello' in ret:
# Login succeeded, so we haven't yet changed the password, so use the default password.
return host_string, username, password
raise Exception('No working login found.')
|
[
"Returns",
"the",
"first",
"working",
"combination",
"of",
"username",
"and",
"password",
"for",
"the",
"current",
"host",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L121-L165
|
[
"def",
"find_working_password",
"(",
"self",
",",
"usernames",
"=",
"None",
",",
"host_strings",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"if",
"host_strings",
"is",
"None",
":",
"host_strings",
"=",
"[",
"]",
"if",
"not",
"host_strings",
":",
"host_strings",
".",
"append",
"(",
"self",
".",
"genv",
".",
"host_string",
")",
"if",
"usernames",
"is",
"None",
":",
"usernames",
"=",
"[",
"]",
"if",
"not",
"usernames",
":",
"usernames",
".",
"append",
"(",
"self",
".",
"genv",
".",
"user",
")",
"for",
"host_string",
"in",
"host_strings",
":",
"for",
"username",
"in",
"usernames",
":",
"passwords",
"=",
"[",
"]",
"passwords",
".",
"append",
"(",
"self",
".",
"genv",
".",
"user_default_passwords",
"[",
"username",
"]",
")",
"passwords",
".",
"append",
"(",
"self",
".",
"genv",
".",
"user_passwords",
"[",
"username",
"]",
")",
"passwords",
".",
"append",
"(",
"self",
".",
"env",
".",
"default_password",
")",
"for",
"password",
"in",
"passwords",
":",
"with",
"settings",
"(",
"warn_only",
"=",
"True",
")",
":",
"r",
".",
"env",
".",
"host_string",
"=",
"host_string",
"r",
".",
"env",
".",
"password",
"=",
"password",
"r",
".",
"env",
".",
"user",
"=",
"username",
"ret",
"=",
"r",
".",
"_local",
"(",
"\"sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello\"",
",",
"capture",
"=",
"True",
")",
"#print('ret.return_code:', ret.return_code)",
"# print('ret000:[%s]' % ret)",
"#code 1 = good password, but prompts needed",
"#code 5 = bad password",
"#code 6 = good password, but host public key is unknown",
"if",
"ret",
".",
"return_code",
"in",
"(",
"1",
",",
"6",
")",
"or",
"'hello'",
"in",
"ret",
":",
"# Login succeeded, so we haven't yet changed the password, so use the default password.",
"return",
"host_string",
",",
"username",
",",
"password",
"raise",
"Exception",
"(",
"'No working login found.'",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
HostSatchel.needs_initrole
|
Returns true if the host does not exist at the expected location and may need
to have its initial configuration set.
Returns false if the host exists at the expected location.
|
burlap/host.py
|
def needs_initrole(self, stop_on_error=False):
"""
Returns true if the host does not exist at the expected location and may need
to have its initial configuration set.
Returns false if the host exists at the expected location.
"""
ret = False
target_host_present = self.is_present()
if not target_host_present:
default_host_present = self.is_present(self.env.default_hostname)
if default_host_present:
if self.verbose:
print('Target host missing and default host present so host init required.')
ret = True
else:
if self.verbose:
print('Target host missing but default host also missing, '
'so no host init required.')
# if stop_on_error:
# raise Exception(
# 'Both target and default hosts missing! '
# 'Is the machine turned on and plugged into the network?')
else:
if self.verbose:
print('Target host is present so no host init required.')
return ret
|
def needs_initrole(self, stop_on_error=False):
"""
Returns true if the host does not exist at the expected location and may need
to have its initial configuration set.
Returns false if the host exists at the expected location.
"""
ret = False
target_host_present = self.is_present()
if not target_host_present:
default_host_present = self.is_present(self.env.default_hostname)
if default_host_present:
if self.verbose:
print('Target host missing and default host present so host init required.')
ret = True
else:
if self.verbose:
print('Target host missing but default host also missing, '
'so no host init required.')
# if stop_on_error:
# raise Exception(
# 'Both target and default hosts missing! '
# 'Is the machine turned on and plugged into the network?')
else:
if self.verbose:
print('Target host is present so no host init required.')
return ret
|
[
"Returns",
"true",
"if",
"the",
"host",
"does",
"not",
"exist",
"at",
"the",
"expected",
"location",
"and",
"may",
"need",
"to",
"have",
"its",
"initial",
"configuration",
"set",
".",
"Returns",
"false",
"if",
"the",
"host",
"exists",
"at",
"the",
"expected",
"location",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L168-L197
|
[
"def",
"needs_initrole",
"(",
"self",
",",
"stop_on_error",
"=",
"False",
")",
":",
"ret",
"=",
"False",
"target_host_present",
"=",
"self",
".",
"is_present",
"(",
")",
"if",
"not",
"target_host_present",
":",
"default_host_present",
"=",
"self",
".",
"is_present",
"(",
"self",
".",
"env",
".",
"default_hostname",
")",
"if",
"default_host_present",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Target host missing and default host present so host init required.'",
")",
"ret",
"=",
"True",
"else",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Target host missing but default host also missing, '",
"'so no host init required.'",
")",
"# if stop_on_error:",
"# raise Exception(",
"# 'Both target and default hosts missing! '",
"# 'Is the machine turned on and plugged into the network?')",
"else",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Target host is present so no host init required.'",
")",
"return",
"ret"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
HostSatchel.initrole
|
Called to set default password login for systems that do not yet have passwordless
login setup.
|
burlap/host.py
|
def initrole(self, check=True):
"""
Called to set default password login for systems that do not yet have passwordless
login setup.
"""
if self.env.original_user is None:
self.env.original_user = self.genv.user
if self.env.original_key_filename is None:
self.env.original_key_filename = self.genv.key_filename
host_string = None
user = None
password = None
if self.env.login_check:
host_string, user, password = self.find_working_password(
usernames=[self.genv.user, self.env.default_user],
host_strings=[self.genv.host_string, self.env.default_hostname],
)
if self.verbose:
print('host.initrole.host_string:', host_string)
print('host.initrole.user:', user)
print('host.initrole.password:', password)
# needs = True
# if check:
# needs = self.needs_initrole(stop_on_error=True)
needs = False
if host_string is not None:
self.genv.host_string = host_string
if user is not None:
self.genv.user = user
if password is not None:
self.genv.password = password
if not needs:
return
assert self.env.default_hostname, 'No default hostname set.'
assert self.env.default_user, 'No default user set.'
self.genv.host_string = self.env.default_hostname
if self.env.default_hosts:
self.genv.hosts = self.env.default_hosts
else:
self.genv.hosts = [self.env.default_hostname]
self.genv.user = self.env.default_user
self.genv.password = self.env.default_password
self.genv.key_filename = self.env.default_key_filename
# If the host has been reformatted, the SSH keys will mismatch, throwing an error, so clear them.
self.purge_keys()
# Do a test login with the default password to determine which password we should use.
# r.env.password = self.env.default_password
# with settings(warn_only=True):
# ret = r._local("sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello", capture=True)
# print('ret.return_code:', ret.return_code)
# # print('ret000:[%s]' % ret)
# #code 1 = good password, but prompts needed
# #code 5 = bad password
# #code 6 = good password, but host public key is unknown
# if ret.return_code in (1, 6) or 'hello' in ret:
# # Login succeeded, so we haven't yet changed the password, so use the default password.
# self.genv.password = self.env.default_password
# elif self.genv.user in self.genv.user_passwords:
# # Otherwise, use the password or key set in the config.
# self.genv.password = self.genv.user_passwords[self.genv.user]
# else:
# # Default password fails and there's no current password, so clear.
# self.genv.password = None
# self.genv.password = self.find_working_password()
# print('host.initrole,using password:', self.genv.password)
# Execute post-init callbacks.
for task_name in self.env.post_initrole_tasks:
if self.verbose:
print('Calling post initrole task %s' % task_name)
satchel_name, method_name = task_name.split('.')
satchel = self.get_satchel(name=satchel_name)
getattr(satchel, method_name)()
print('^'*80)
print('host.initrole.host_string:', self.genv.host_string)
print('host.initrole.user:', self.genv.user)
print('host.initrole.password:', self.genv.password)
|
def initrole(self, check=True):
"""
Called to set default password login for systems that do not yet have passwordless
login setup.
"""
if self.env.original_user is None:
self.env.original_user = self.genv.user
if self.env.original_key_filename is None:
self.env.original_key_filename = self.genv.key_filename
host_string = None
user = None
password = None
if self.env.login_check:
host_string, user, password = self.find_working_password(
usernames=[self.genv.user, self.env.default_user],
host_strings=[self.genv.host_string, self.env.default_hostname],
)
if self.verbose:
print('host.initrole.host_string:', host_string)
print('host.initrole.user:', user)
print('host.initrole.password:', password)
# needs = True
# if check:
# needs = self.needs_initrole(stop_on_error=True)
needs = False
if host_string is not None:
self.genv.host_string = host_string
if user is not None:
self.genv.user = user
if password is not None:
self.genv.password = password
if not needs:
return
assert self.env.default_hostname, 'No default hostname set.'
assert self.env.default_user, 'No default user set.'
self.genv.host_string = self.env.default_hostname
if self.env.default_hosts:
self.genv.hosts = self.env.default_hosts
else:
self.genv.hosts = [self.env.default_hostname]
self.genv.user = self.env.default_user
self.genv.password = self.env.default_password
self.genv.key_filename = self.env.default_key_filename
# If the host has been reformatted, the SSH keys will mismatch, throwing an error, so clear them.
self.purge_keys()
# Do a test login with the default password to determine which password we should use.
# r.env.password = self.env.default_password
# with settings(warn_only=True):
# ret = r._local("sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello", capture=True)
# print('ret.return_code:', ret.return_code)
# # print('ret000:[%s]' % ret)
# #code 1 = good password, but prompts needed
# #code 5 = bad password
# #code 6 = good password, but host public key is unknown
# if ret.return_code in (1, 6) or 'hello' in ret:
# # Login succeeded, so we haven't yet changed the password, so use the default password.
# self.genv.password = self.env.default_password
# elif self.genv.user in self.genv.user_passwords:
# # Otherwise, use the password or key set in the config.
# self.genv.password = self.genv.user_passwords[self.genv.user]
# else:
# # Default password fails and there's no current password, so clear.
# self.genv.password = None
# self.genv.password = self.find_working_password()
# print('host.initrole,using password:', self.genv.password)
# Execute post-init callbacks.
for task_name in self.env.post_initrole_tasks:
if self.verbose:
print('Calling post initrole task %s' % task_name)
satchel_name, method_name = task_name.split('.')
satchel = self.get_satchel(name=satchel_name)
getattr(satchel, method_name)()
print('^'*80)
print('host.initrole.host_string:', self.genv.host_string)
print('host.initrole.user:', self.genv.user)
print('host.initrole.password:', self.genv.password)
|
[
"Called",
"to",
"set",
"default",
"password",
"login",
"for",
"systems",
"that",
"do",
"not",
"yet",
"have",
"passwordless",
"login",
"setup",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L200-L288
|
[
"def",
"initrole",
"(",
"self",
",",
"check",
"=",
"True",
")",
":",
"if",
"self",
".",
"env",
".",
"original_user",
"is",
"None",
":",
"self",
".",
"env",
".",
"original_user",
"=",
"self",
".",
"genv",
".",
"user",
"if",
"self",
".",
"env",
".",
"original_key_filename",
"is",
"None",
":",
"self",
".",
"env",
".",
"original_key_filename",
"=",
"self",
".",
"genv",
".",
"key_filename",
"host_string",
"=",
"None",
"user",
"=",
"None",
"password",
"=",
"None",
"if",
"self",
".",
"env",
".",
"login_check",
":",
"host_string",
",",
"user",
",",
"password",
"=",
"self",
".",
"find_working_password",
"(",
"usernames",
"=",
"[",
"self",
".",
"genv",
".",
"user",
",",
"self",
".",
"env",
".",
"default_user",
"]",
",",
"host_strings",
"=",
"[",
"self",
".",
"genv",
".",
"host_string",
",",
"self",
".",
"env",
".",
"default_hostname",
"]",
",",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'host.initrole.host_string:'",
",",
"host_string",
")",
"print",
"(",
"'host.initrole.user:'",
",",
"user",
")",
"print",
"(",
"'host.initrole.password:'",
",",
"password",
")",
"# needs = True",
"# if check:",
"# needs = self.needs_initrole(stop_on_error=True)",
"needs",
"=",
"False",
"if",
"host_string",
"is",
"not",
"None",
":",
"self",
".",
"genv",
".",
"host_string",
"=",
"host_string",
"if",
"user",
"is",
"not",
"None",
":",
"self",
".",
"genv",
".",
"user",
"=",
"user",
"if",
"password",
"is",
"not",
"None",
":",
"self",
".",
"genv",
".",
"password",
"=",
"password",
"if",
"not",
"needs",
":",
"return",
"assert",
"self",
".",
"env",
".",
"default_hostname",
",",
"'No default hostname set.'",
"assert",
"self",
".",
"env",
".",
"default_user",
",",
"'No default user set.'",
"self",
".",
"genv",
".",
"host_string",
"=",
"self",
".",
"env",
".",
"default_hostname",
"if",
"self",
".",
"env",
".",
"default_hosts",
":",
"self",
".",
"genv",
".",
"hosts",
"=",
"self",
".",
"env",
".",
"default_hosts",
"else",
":",
"self",
".",
"genv",
".",
"hosts",
"=",
"[",
"self",
".",
"env",
".",
"default_hostname",
"]",
"self",
".",
"genv",
".",
"user",
"=",
"self",
".",
"env",
".",
"default_user",
"self",
".",
"genv",
".",
"password",
"=",
"self",
".",
"env",
".",
"default_password",
"self",
".",
"genv",
".",
"key_filename",
"=",
"self",
".",
"env",
".",
"default_key_filename",
"# If the host has been reformatted, the SSH keys will mismatch, throwing an error, so clear them.",
"self",
".",
"purge_keys",
"(",
")",
"# Do a test login with the default password to determine which password we should use.",
"# r.env.password = self.env.default_password",
"# with settings(warn_only=True):",
"# ret = r._local(\"sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello\", capture=True)",
"# print('ret.return_code:', ret.return_code)",
"# # print('ret000:[%s]' % ret)",
"# #code 1 = good password, but prompts needed",
"# #code 5 = bad password",
"# #code 6 = good password, but host public key is unknown",
"# if ret.return_code in (1, 6) or 'hello' in ret:",
"# # Login succeeded, so we haven't yet changed the password, so use the default password.",
"# self.genv.password = self.env.default_password",
"# elif self.genv.user in self.genv.user_passwords:",
"# # Otherwise, use the password or key set in the config.",
"# self.genv.password = self.genv.user_passwords[self.genv.user]",
"# else:",
"# # Default password fails and there's no current password, so clear.",
"# self.genv.password = None",
"# self.genv.password = self.find_working_password()",
"# print('host.initrole,using password:', self.genv.password)",
"# Execute post-init callbacks.",
"for",
"task_name",
"in",
"self",
".",
"env",
".",
"post_initrole_tasks",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Calling post initrole task %s'",
"%",
"task_name",
")",
"satchel_name",
",",
"method_name",
"=",
"task_name",
".",
"split",
"(",
"'.'",
")",
"satchel",
"=",
"self",
".",
"get_satchel",
"(",
"name",
"=",
"satchel_name",
")",
"getattr",
"(",
"satchel",
",",
"method_name",
")",
"(",
")",
"print",
"(",
"'^'",
"*",
"80",
")",
"print",
"(",
"'host.initrole.host_string:'",
",",
"self",
".",
"genv",
".",
"host_string",
")",
"print",
"(",
"'host.initrole.user:'",
",",
"self",
".",
"genv",
".",
"user",
")",
"print",
"(",
"'host.initrole.password:'",
",",
"self",
".",
"genv",
".",
"password",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
HostnameSatchel.iter_hostnames
|
Yields a list of tuples of the form (ip, hostname).
|
burlap/host.py
|
def iter_hostnames(self):
"""
Yields a list of tuples of the form (ip, hostname).
"""
from burlap.common import get_hosts_retriever
if self.env.use_retriever:
self.vprint('using retriever')
self.vprint('hosts:', self.genv.hosts)
retriever = get_hosts_retriever()
hosts = list(retriever(extended=1))
for _hostname, _data in hosts:
# Skip hosts that aren't selected for this run.
if self.genv.hosts \
and _data.ip not in self.genv.hosts \
and _data.public_dns_name not in self.genv.hosts \
and _hostname not in self.genv.hosts:
continue
assert _data.ip, 'Missing IP.'
yield _data.ip, _hostname#_data.public_dns_name
else:
self.vprint('using default')
for ip, hostname in self.env.hostnames.items():
self.vprint('ip lookup:', ip, hostname)
if ip == UNKNOWN:
ip = self.hostname_to_ip(hostname)
if not ip and hostname in self.env.default_hostnames:
ip = self.hostname_to_ip(self.env.default_hostnames[hostname])
elif not ip[0].isdigit():
ip = self.hostname_to_ip(ip)
assert ip, 'Invalid IP.'
yield ip, hostname
|
def iter_hostnames(self):
"""
Yields a list of tuples of the form (ip, hostname).
"""
from burlap.common import get_hosts_retriever
if self.env.use_retriever:
self.vprint('using retriever')
self.vprint('hosts:', self.genv.hosts)
retriever = get_hosts_retriever()
hosts = list(retriever(extended=1))
for _hostname, _data in hosts:
# Skip hosts that aren't selected for this run.
if self.genv.hosts \
and _data.ip not in self.genv.hosts \
and _data.public_dns_name not in self.genv.hosts \
and _hostname not in self.genv.hosts:
continue
assert _data.ip, 'Missing IP.'
yield _data.ip, _hostname#_data.public_dns_name
else:
self.vprint('using default')
for ip, hostname in self.env.hostnames.items():
self.vprint('ip lookup:', ip, hostname)
if ip == UNKNOWN:
ip = self.hostname_to_ip(hostname)
if not ip and hostname in self.env.default_hostnames:
ip = self.hostname_to_ip(self.env.default_hostnames[hostname])
elif not ip[0].isdigit():
ip = self.hostname_to_ip(ip)
assert ip, 'Invalid IP.'
yield ip, hostname
|
[
"Yields",
"a",
"list",
"of",
"tuples",
"of",
"the",
"form",
"(",
"ip",
"hostname",
")",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L351-L383
|
[
"def",
"iter_hostnames",
"(",
"self",
")",
":",
"from",
"burlap",
".",
"common",
"import",
"get_hosts_retriever",
"if",
"self",
".",
"env",
".",
"use_retriever",
":",
"self",
".",
"vprint",
"(",
"'using retriever'",
")",
"self",
".",
"vprint",
"(",
"'hosts:'",
",",
"self",
".",
"genv",
".",
"hosts",
")",
"retriever",
"=",
"get_hosts_retriever",
"(",
")",
"hosts",
"=",
"list",
"(",
"retriever",
"(",
"extended",
"=",
"1",
")",
")",
"for",
"_hostname",
",",
"_data",
"in",
"hosts",
":",
"# Skip hosts that aren't selected for this run.",
"if",
"self",
".",
"genv",
".",
"hosts",
"and",
"_data",
".",
"ip",
"not",
"in",
"self",
".",
"genv",
".",
"hosts",
"and",
"_data",
".",
"public_dns_name",
"not",
"in",
"self",
".",
"genv",
".",
"hosts",
"and",
"_hostname",
"not",
"in",
"self",
".",
"genv",
".",
"hosts",
":",
"continue",
"assert",
"_data",
".",
"ip",
",",
"'Missing IP.'",
"yield",
"_data",
".",
"ip",
",",
"_hostname",
"#_data.public_dns_name",
"else",
":",
"self",
".",
"vprint",
"(",
"'using default'",
")",
"for",
"ip",
",",
"hostname",
"in",
"self",
".",
"env",
".",
"hostnames",
".",
"items",
"(",
")",
":",
"self",
".",
"vprint",
"(",
"'ip lookup:'",
",",
"ip",
",",
"hostname",
")",
"if",
"ip",
"==",
"UNKNOWN",
":",
"ip",
"=",
"self",
".",
"hostname_to_ip",
"(",
"hostname",
")",
"if",
"not",
"ip",
"and",
"hostname",
"in",
"self",
".",
"env",
".",
"default_hostnames",
":",
"ip",
"=",
"self",
".",
"hostname_to_ip",
"(",
"self",
".",
"env",
".",
"default_hostnames",
"[",
"hostname",
"]",
")",
"elif",
"not",
"ip",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"ip",
"=",
"self",
".",
"hostname_to_ip",
"(",
"ip",
")",
"assert",
"ip",
",",
"'Invalid IP.'",
"yield",
"ip",
",",
"hostname"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
HostnameSatchel.get_public_ip
|
Gets the public IP for a host.
|
burlap/host.py
|
def get_public_ip(self):
"""
Gets the public IP for a host.
"""
r = self.local_renderer
ret = r.run(r.env.get_public_ip_command) or ''
ret = ret.strip()
print('ip:', ret)
return ret
|
def get_public_ip(self):
"""
Gets the public IP for a host.
"""
r = self.local_renderer
ret = r.run(r.env.get_public_ip_command) or ''
ret = ret.strip()
print('ip:', ret)
return ret
|
[
"Gets",
"the",
"public",
"IP",
"for",
"a",
"host",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L386-L394
|
[
"def",
"get_public_ip",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"ret",
"=",
"r",
".",
"run",
"(",
"r",
".",
"env",
".",
"get_public_ip_command",
")",
"or",
"''",
"ret",
"=",
"ret",
".",
"strip",
"(",
")",
"print",
"(",
"'ip:'",
",",
"ret",
")",
"return",
"ret"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
HostnameSatchel.configure
|
Assigns a name to the server accessible from user space.
Note, we add the name to /etc/hosts since not all programs use
/etc/hostname to reliably identify the server hostname.
|
burlap/host.py
|
def configure(self, reboot=1):
"""
Assigns a name to the server accessible from user space.
Note, we add the name to /etc/hosts since not all programs use
/etc/hostname to reliably identify the server hostname.
"""
r = self.local_renderer
for ip, hostname in self.iter_hostnames():
self.vprint('ip/hostname:', ip, hostname)
r.genv.host_string = ip
r.env.hostname = hostname
with settings(warn_only=True):
r.sudo('echo "{hostname}" > /etc/hostname')
r.sudo('echo "127.0.0.1 {hostname}" | cat - /etc/hosts > /tmp/out && mv /tmp/out /etc/hosts')
r.sudo(r.env.set_hostname_command)
if r.env.auto_reboot and int(reboot):
r.reboot()
|
def configure(self, reboot=1):
"""
Assigns a name to the server accessible from user space.
Note, we add the name to /etc/hosts since not all programs use
/etc/hostname to reliably identify the server hostname.
"""
r = self.local_renderer
for ip, hostname in self.iter_hostnames():
self.vprint('ip/hostname:', ip, hostname)
r.genv.host_string = ip
r.env.hostname = hostname
with settings(warn_only=True):
r.sudo('echo "{hostname}" > /etc/hostname')
r.sudo('echo "127.0.0.1 {hostname}" | cat - /etc/hosts > /tmp/out && mv /tmp/out /etc/hosts')
r.sudo(r.env.set_hostname_command)
if r.env.auto_reboot and int(reboot):
r.reboot()
|
[
"Assigns",
"a",
"name",
"to",
"the",
"server",
"accessible",
"from",
"user",
"space",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L398-L415
|
[
"def",
"configure",
"(",
"self",
",",
"reboot",
"=",
"1",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"for",
"ip",
",",
"hostname",
"in",
"self",
".",
"iter_hostnames",
"(",
")",
":",
"self",
".",
"vprint",
"(",
"'ip/hostname:'",
",",
"ip",
",",
"hostname",
")",
"r",
".",
"genv",
".",
"host_string",
"=",
"ip",
"r",
".",
"env",
".",
"hostname",
"=",
"hostname",
"with",
"settings",
"(",
"warn_only",
"=",
"True",
")",
":",
"r",
".",
"sudo",
"(",
"'echo \"{hostname}\" > /etc/hostname'",
")",
"r",
".",
"sudo",
"(",
"'echo \"127.0.0.1 {hostname}\" | cat - /etc/hosts > /tmp/out && mv /tmp/out /etc/hosts'",
")",
"r",
".",
"sudo",
"(",
"r",
".",
"env",
".",
"set_hostname_command",
")",
"if",
"r",
".",
"env",
".",
"auto_reboot",
"and",
"int",
"(",
"reboot",
")",
":",
"r",
".",
"reboot",
"(",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
JiraHelperSatchel.update_tickets_from_git
|
Run during a deployment.
Looks at all commits between now and the last deployment.
Finds all ticket numbers and updates their status in Jira.
|
burlap/jirahelper.py
|
def update_tickets_from_git(self, from_commit=None, to_commit=None):
"""
Run during a deployment.
Looks at all commits between now and the last deployment.
Finds all ticket numbers and updates their status in Jira.
"""
from jira import JIRA, JIRAError
#from burlap.deploy import get_last_current_diffs
from burlap.git import gittracker, CURRENT_COMMIT
r = self.local_renderer
# get_current_commit = gittracker.get_current_commit
# GITTRACKER = gittracker.name.upper()
# Ensure this is only run once per role.
if self.genv.host_string != self.genv.hosts[-1]:
self.vprint('Not first server. Aborting.')
return
print('self.env.update_from_git:', self.env.update_from_git)
print('self.genv.jirahelper_update_from_git:', self.genv.jirahelper_update_from_git)
if not self.env.update_from_git:
self.vprint('Update from git disabled. Aborting.')
return
if not self.env.ticket_pattern:
self.vprint('No ticket pattern defined. Aborting.')
return
if not self.env.basic_auth_username or not self.env.basic_auth_password:
self.vprint('Username or password not given. Aborting.')
return
# During a deployment, we should be given these, but for testing,
# lookup the diffs dynamically.
last = gittracker.last_manifest
current = gittracker.current_manifest
last_commit = from_commit or last.current_commit#[CURRENT_COMMIT]
print('last_commit:', last_commit)
current_commit = to_commit or current[CURRENT_COMMIT]
print('current_commit:', current_commit)
if not last_commit or not current_commit:
print('Missing commit ID. Aborting.')
return
if self.verbose:
print('-'*80)
print('last.keys:', last.keys())
print('-'*80)
print('current.keys:', current.keys())
# try:
# last_commit = last['GITTRACKER']['current_commit']
# except KeyError:
# return
# current_commit = current['GITTRACKER']['current_commit']
# Find all tickets deployed between last deployment and now.
tickets = self.get_tickets_between_commits(current_commit, last_commit)
if self.verbose:
print('tickets:', tickets)
# Update all tickets in Jira.
jira = JIRA({
'server': self.env.server
}, basic_auth=(self.env.basic_auth_username, self.env.basic_auth_password))
for ticket in tickets:
# Mention this Jira updated.
r.env.role = r.genv.ROLE.lower()
comment = r.format(self.env.ticket_update_message_template)
print('Commenting on ticket %s: %s' % (ticket, comment))
if not self.dryrun:
jira.add_comment(ticket, comment)
# Update ticket status.
recheck = False
while 1:
print('Looking up jira ticket %s...' % ticket)
issue = jira.issue(ticket)
self.vprint('Ticket %s retrieved.' % ticket)
transition_to_id = dict((t['name'], t['id']) for t in jira.transitions(issue))
self.vprint('%i allowable transitions found:' % len(transition_to_id))
pprint(transition_to_id)
self.vprint('issue.fields.status.id:', issue.fields.status.id)
self.vprint('issue.fields.status.name:', issue.fields.status.name)
jira_status_id = issue.fields.status.name.title()
self.vprint('jira_status_id:', jira_status_id)
next_transition_name = self.env.deploy_workflow.get(jira_status_id)
self.vprint('next_transition_name:', next_transition_name)
next_transition_id = transition_to_id.get(next_transition_name)
self.vprint('next_transition_id:', next_transition_id)
if next_transition_name:
if issue.fields.assignee:
if issue.fields.assignee.raw:
assignee_name = issue.fields.assignee.name
else:
# Get assignee name directly
# https://community.atlassian.com/t5/Jira-questions/Jira-in-Python-issue-fields-reporter-name-
# errors-with-TypeError/qaq-p/937924
assignee_name = issue.fields.assignee._session['name']
else:
assignee_name = None
# Get new assignee by status
new_assignee = self.env.assignee_by_status.get(
#issue.fields.status.name.title(),
next_transition_name,
assignee_name,
)
# If assigning to reporter, get reporter name.
if new_assignee == 'reporter':
if issue.fields.reporter.raw:
new_assignee = issue.fields.reporter.name
else:
# Get reporter name directly
# https://community.atlassian.com/t5/Jira-questions/Jira-in-Python-issue-fields-reporter-name-
# errors-with-TypeError/qaq-p/937924
new_assignee = issue.fields.reporter._session['name']
print('Updating ticket %s to status %s (%s) and assigning it to %s.' % (ticket, next_transition_name, next_transition_id, new_assignee))
if not self.dryrun:
if next_transition_id:
try:
jira.transition_issue(issue, next_transition_id)
recheck = True
except AttributeError as e:
print('Unable to transition ticket %s to %s: %s' % (ticket, next_transition_name, e), file=sys.stderr)
traceback.print_exc()
# Note assignment should happen after transition, since the assignment may
# effect remove transitions that we need.
try:
if new_assignee:
print('Assigning ticket %s to %s.' % (ticket, new_assignee))
jira.assign_issue(issue, new_assignee)
else:
print('No new assignee found.')
except JIRAError as e:
print('Unable to reassign ticket %s to %s: %s' % (ticket, new_assignee, e), file=sys.stderr)
else:
recheck = False
print('No transitions found for ticket %s currently in status "%s".' % (ticket, issue.fields.status.name))
if not recheck:
break
|
def update_tickets_from_git(self, from_commit=None, to_commit=None):
"""
Run during a deployment.
Looks at all commits between now and the last deployment.
Finds all ticket numbers and updates their status in Jira.
"""
from jira import JIRA, JIRAError
#from burlap.deploy import get_last_current_diffs
from burlap.git import gittracker, CURRENT_COMMIT
r = self.local_renderer
# get_current_commit = gittracker.get_current_commit
# GITTRACKER = gittracker.name.upper()
# Ensure this is only run once per role.
if self.genv.host_string != self.genv.hosts[-1]:
self.vprint('Not first server. Aborting.')
return
print('self.env.update_from_git:', self.env.update_from_git)
print('self.genv.jirahelper_update_from_git:', self.genv.jirahelper_update_from_git)
if not self.env.update_from_git:
self.vprint('Update from git disabled. Aborting.')
return
if not self.env.ticket_pattern:
self.vprint('No ticket pattern defined. Aborting.')
return
if not self.env.basic_auth_username or not self.env.basic_auth_password:
self.vprint('Username or password not given. Aborting.')
return
# During a deployment, we should be given these, but for testing,
# lookup the diffs dynamically.
last = gittracker.last_manifest
current = gittracker.current_manifest
last_commit = from_commit or last.current_commit#[CURRENT_COMMIT]
print('last_commit:', last_commit)
current_commit = to_commit or current[CURRENT_COMMIT]
print('current_commit:', current_commit)
if not last_commit or not current_commit:
print('Missing commit ID. Aborting.')
return
if self.verbose:
print('-'*80)
print('last.keys:', last.keys())
print('-'*80)
print('current.keys:', current.keys())
# try:
# last_commit = last['GITTRACKER']['current_commit']
# except KeyError:
# return
# current_commit = current['GITTRACKER']['current_commit']
# Find all tickets deployed between last deployment and now.
tickets = self.get_tickets_between_commits(current_commit, last_commit)
if self.verbose:
print('tickets:', tickets)
# Update all tickets in Jira.
jira = JIRA({
'server': self.env.server
}, basic_auth=(self.env.basic_auth_username, self.env.basic_auth_password))
for ticket in tickets:
# Mention this Jira updated.
r.env.role = r.genv.ROLE.lower()
comment = r.format(self.env.ticket_update_message_template)
print('Commenting on ticket %s: %s' % (ticket, comment))
if not self.dryrun:
jira.add_comment(ticket, comment)
# Update ticket status.
recheck = False
while 1:
print('Looking up jira ticket %s...' % ticket)
issue = jira.issue(ticket)
self.vprint('Ticket %s retrieved.' % ticket)
transition_to_id = dict((t['name'], t['id']) for t in jira.transitions(issue))
self.vprint('%i allowable transitions found:' % len(transition_to_id))
pprint(transition_to_id)
self.vprint('issue.fields.status.id:', issue.fields.status.id)
self.vprint('issue.fields.status.name:', issue.fields.status.name)
jira_status_id = issue.fields.status.name.title()
self.vprint('jira_status_id:', jira_status_id)
next_transition_name = self.env.deploy_workflow.get(jira_status_id)
self.vprint('next_transition_name:', next_transition_name)
next_transition_id = transition_to_id.get(next_transition_name)
self.vprint('next_transition_id:', next_transition_id)
if next_transition_name:
if issue.fields.assignee:
if issue.fields.assignee.raw:
assignee_name = issue.fields.assignee.name
else:
# Get assignee name directly
# https://community.atlassian.com/t5/Jira-questions/Jira-in-Python-issue-fields-reporter-name-
# errors-with-TypeError/qaq-p/937924
assignee_name = issue.fields.assignee._session['name']
else:
assignee_name = None
# Get new assignee by status
new_assignee = self.env.assignee_by_status.get(
#issue.fields.status.name.title(),
next_transition_name,
assignee_name,
)
# If assigning to reporter, get reporter name.
if new_assignee == 'reporter':
if issue.fields.reporter.raw:
new_assignee = issue.fields.reporter.name
else:
# Get reporter name directly
# https://community.atlassian.com/t5/Jira-questions/Jira-in-Python-issue-fields-reporter-name-
# errors-with-TypeError/qaq-p/937924
new_assignee = issue.fields.reporter._session['name']
print('Updating ticket %s to status %s (%s) and assigning it to %s.' % (ticket, next_transition_name, next_transition_id, new_assignee))
if not self.dryrun:
if next_transition_id:
try:
jira.transition_issue(issue, next_transition_id)
recheck = True
except AttributeError as e:
print('Unable to transition ticket %s to %s: %s' % (ticket, next_transition_name, e), file=sys.stderr)
traceback.print_exc()
# Note assignment should happen after transition, since the assignment may
# effect remove transitions that we need.
try:
if new_assignee:
print('Assigning ticket %s to %s.' % (ticket, new_assignee))
jira.assign_issue(issue, new_assignee)
else:
print('No new assignee found.')
except JIRAError as e:
print('Unable to reassign ticket %s to %s: %s' % (ticket, new_assignee, e), file=sys.stderr)
else:
recheck = False
print('No transitions found for ticket %s currently in status "%s".' % (ticket, issue.fields.status.name))
if not recheck:
break
|
[
"Run",
"during",
"a",
"deployment",
".",
"Looks",
"at",
"all",
"commits",
"between",
"now",
"and",
"the",
"last",
"deployment",
".",
"Finds",
"all",
"ticket",
"numbers",
"and",
"updates",
"their",
"status",
"in",
"Jira",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/jirahelper.py#L67-L217
|
[
"def",
"update_tickets_from_git",
"(",
"self",
",",
"from_commit",
"=",
"None",
",",
"to_commit",
"=",
"None",
")",
":",
"from",
"jira",
"import",
"JIRA",
",",
"JIRAError",
"#from burlap.deploy import get_last_current_diffs",
"from",
"burlap",
".",
"git",
"import",
"gittracker",
",",
"CURRENT_COMMIT",
"r",
"=",
"self",
".",
"local_renderer",
"# get_current_commit = gittracker.get_current_commit",
"# GITTRACKER = gittracker.name.upper()",
"# Ensure this is only run once per role.",
"if",
"self",
".",
"genv",
".",
"host_string",
"!=",
"self",
".",
"genv",
".",
"hosts",
"[",
"-",
"1",
"]",
":",
"self",
".",
"vprint",
"(",
"'Not first server. Aborting.'",
")",
"return",
"print",
"(",
"'self.env.update_from_git:'",
",",
"self",
".",
"env",
".",
"update_from_git",
")",
"print",
"(",
"'self.genv.jirahelper_update_from_git:'",
",",
"self",
".",
"genv",
".",
"jirahelper_update_from_git",
")",
"if",
"not",
"self",
".",
"env",
".",
"update_from_git",
":",
"self",
".",
"vprint",
"(",
"'Update from git disabled. Aborting.'",
")",
"return",
"if",
"not",
"self",
".",
"env",
".",
"ticket_pattern",
":",
"self",
".",
"vprint",
"(",
"'No ticket pattern defined. Aborting.'",
")",
"return",
"if",
"not",
"self",
".",
"env",
".",
"basic_auth_username",
"or",
"not",
"self",
".",
"env",
".",
"basic_auth_password",
":",
"self",
".",
"vprint",
"(",
"'Username or password not given. Aborting.'",
")",
"return",
"# During a deployment, we should be given these, but for testing,",
"# lookup the diffs dynamically.",
"last",
"=",
"gittracker",
".",
"last_manifest",
"current",
"=",
"gittracker",
".",
"current_manifest",
"last_commit",
"=",
"from_commit",
"or",
"last",
".",
"current_commit",
"#[CURRENT_COMMIT]",
"print",
"(",
"'last_commit:'",
",",
"last_commit",
")",
"current_commit",
"=",
"to_commit",
"or",
"current",
"[",
"CURRENT_COMMIT",
"]",
"print",
"(",
"'current_commit:'",
",",
"current_commit",
")",
"if",
"not",
"last_commit",
"or",
"not",
"current_commit",
":",
"print",
"(",
"'Missing commit ID. Aborting.'",
")",
"return",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'-'",
"*",
"80",
")",
"print",
"(",
"'last.keys:'",
",",
"last",
".",
"keys",
"(",
")",
")",
"print",
"(",
"'-'",
"*",
"80",
")",
"print",
"(",
"'current.keys:'",
",",
"current",
".",
"keys",
"(",
")",
")",
"# try:",
"# last_commit = last['GITTRACKER']['current_commit']",
"# except KeyError:",
"# return",
"# current_commit = current['GITTRACKER']['current_commit']",
"# Find all tickets deployed between last deployment and now.",
"tickets",
"=",
"self",
".",
"get_tickets_between_commits",
"(",
"current_commit",
",",
"last_commit",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'tickets:'",
",",
"tickets",
")",
"# Update all tickets in Jira.",
"jira",
"=",
"JIRA",
"(",
"{",
"'server'",
":",
"self",
".",
"env",
".",
"server",
"}",
",",
"basic_auth",
"=",
"(",
"self",
".",
"env",
".",
"basic_auth_username",
",",
"self",
".",
"env",
".",
"basic_auth_password",
")",
")",
"for",
"ticket",
"in",
"tickets",
":",
"# Mention this Jira updated.",
"r",
".",
"env",
".",
"role",
"=",
"r",
".",
"genv",
".",
"ROLE",
".",
"lower",
"(",
")",
"comment",
"=",
"r",
".",
"format",
"(",
"self",
".",
"env",
".",
"ticket_update_message_template",
")",
"print",
"(",
"'Commenting on ticket %s: %s'",
"%",
"(",
"ticket",
",",
"comment",
")",
")",
"if",
"not",
"self",
".",
"dryrun",
":",
"jira",
".",
"add_comment",
"(",
"ticket",
",",
"comment",
")",
"# Update ticket status.",
"recheck",
"=",
"False",
"while",
"1",
":",
"print",
"(",
"'Looking up jira ticket %s...'",
"%",
"ticket",
")",
"issue",
"=",
"jira",
".",
"issue",
"(",
"ticket",
")",
"self",
".",
"vprint",
"(",
"'Ticket %s retrieved.'",
"%",
"ticket",
")",
"transition_to_id",
"=",
"dict",
"(",
"(",
"t",
"[",
"'name'",
"]",
",",
"t",
"[",
"'id'",
"]",
")",
"for",
"t",
"in",
"jira",
".",
"transitions",
"(",
"issue",
")",
")",
"self",
".",
"vprint",
"(",
"'%i allowable transitions found:'",
"%",
"len",
"(",
"transition_to_id",
")",
")",
"pprint",
"(",
"transition_to_id",
")",
"self",
".",
"vprint",
"(",
"'issue.fields.status.id:'",
",",
"issue",
".",
"fields",
".",
"status",
".",
"id",
")",
"self",
".",
"vprint",
"(",
"'issue.fields.status.name:'",
",",
"issue",
".",
"fields",
".",
"status",
".",
"name",
")",
"jira_status_id",
"=",
"issue",
".",
"fields",
".",
"status",
".",
"name",
".",
"title",
"(",
")",
"self",
".",
"vprint",
"(",
"'jira_status_id:'",
",",
"jira_status_id",
")",
"next_transition_name",
"=",
"self",
".",
"env",
".",
"deploy_workflow",
".",
"get",
"(",
"jira_status_id",
")",
"self",
".",
"vprint",
"(",
"'next_transition_name:'",
",",
"next_transition_name",
")",
"next_transition_id",
"=",
"transition_to_id",
".",
"get",
"(",
"next_transition_name",
")",
"self",
".",
"vprint",
"(",
"'next_transition_id:'",
",",
"next_transition_id",
")",
"if",
"next_transition_name",
":",
"if",
"issue",
".",
"fields",
".",
"assignee",
":",
"if",
"issue",
".",
"fields",
".",
"assignee",
".",
"raw",
":",
"assignee_name",
"=",
"issue",
".",
"fields",
".",
"assignee",
".",
"name",
"else",
":",
"# Get assignee name directly",
"# https://community.atlassian.com/t5/Jira-questions/Jira-in-Python-issue-fields-reporter-name-",
"# errors-with-TypeError/qaq-p/937924",
"assignee_name",
"=",
"issue",
".",
"fields",
".",
"assignee",
".",
"_session",
"[",
"'name'",
"]",
"else",
":",
"assignee_name",
"=",
"None",
"# Get new assignee by status",
"new_assignee",
"=",
"self",
".",
"env",
".",
"assignee_by_status",
".",
"get",
"(",
"#issue.fields.status.name.title(),",
"next_transition_name",
",",
"assignee_name",
",",
")",
"# If assigning to reporter, get reporter name.",
"if",
"new_assignee",
"==",
"'reporter'",
":",
"if",
"issue",
".",
"fields",
".",
"reporter",
".",
"raw",
":",
"new_assignee",
"=",
"issue",
".",
"fields",
".",
"reporter",
".",
"name",
"else",
":",
"# Get reporter name directly",
"# https://community.atlassian.com/t5/Jira-questions/Jira-in-Python-issue-fields-reporter-name-",
"# errors-with-TypeError/qaq-p/937924",
"new_assignee",
"=",
"issue",
".",
"fields",
".",
"reporter",
".",
"_session",
"[",
"'name'",
"]",
"print",
"(",
"'Updating ticket %s to status %s (%s) and assigning it to %s.'",
"%",
"(",
"ticket",
",",
"next_transition_name",
",",
"next_transition_id",
",",
"new_assignee",
")",
")",
"if",
"not",
"self",
".",
"dryrun",
":",
"if",
"next_transition_id",
":",
"try",
":",
"jira",
".",
"transition_issue",
"(",
"issue",
",",
"next_transition_id",
")",
"recheck",
"=",
"True",
"except",
"AttributeError",
"as",
"e",
":",
"print",
"(",
"'Unable to transition ticket %s to %s: %s'",
"%",
"(",
"ticket",
",",
"next_transition_name",
",",
"e",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"traceback",
".",
"print_exc",
"(",
")",
"# Note assignment should happen after transition, since the assignment may",
"# effect remove transitions that we need.",
"try",
":",
"if",
"new_assignee",
":",
"print",
"(",
"'Assigning ticket %s to %s.'",
"%",
"(",
"ticket",
",",
"new_assignee",
")",
")",
"jira",
".",
"assign_issue",
"(",
"issue",
",",
"new_assignee",
")",
"else",
":",
"print",
"(",
"'No new assignee found.'",
")",
"except",
"JIRAError",
"as",
"e",
":",
"print",
"(",
"'Unable to reassign ticket %s to %s: %s'",
"%",
"(",
"ticket",
",",
"new_assignee",
",",
"e",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"else",
":",
"recheck",
"=",
"False",
"print",
"(",
"'No transitions found for ticket %s currently in status \"%s\".'",
"%",
"(",
"ticket",
",",
"issue",
".",
"fields",
".",
"status",
".",
"name",
")",
")",
"if",
"not",
"recheck",
":",
"break"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
partitions
|
Get a partition list for all disk or for selected device only
Example::
from burlap.disk import partitions
spart = {'Linux': 0x83, 'Swap': 0x82}
parts = partitions()
# parts = {'/dev/sda1': 131, '/dev/sda2': 130, '/dev/sda3': 131}
r = parts['/dev/sda1'] == spart['Linux']
r = r and parts['/dev/sda2'] == spart['Swap']
if r:
print("You can format these partitions")
|
burlap/disk.py
|
def partitions(device=""):
"""
Get a partition list for all disk or for selected device only
Example::
from burlap.disk import partitions
spart = {'Linux': 0x83, 'Swap': 0x82}
parts = partitions()
# parts = {'/dev/sda1': 131, '/dev/sda2': 130, '/dev/sda3': 131}
r = parts['/dev/sda1'] == spart['Linux']
r = r and parts['/dev/sda2'] == spart['Swap']
if r:
print("You can format these partitions")
"""
partitions_list = {}
with settings(hide('running', 'stdout')):
res = run_as_root('sfdisk -d %(device)s' % locals())
spart = re.compile(r'(?P<pname>^/.*) : .* Id=(?P<ptypeid>[0-9a-z]+)')
for line in res.splitlines():
m = spart.search(line)
if m:
partitions_list[m.group('pname')] = int(m.group('ptypeid'), 16)
return partitions_list
|
def partitions(device=""):
"""
Get a partition list for all disk or for selected device only
Example::
from burlap.disk import partitions
spart = {'Linux': 0x83, 'Swap': 0x82}
parts = partitions()
# parts = {'/dev/sda1': 131, '/dev/sda2': 130, '/dev/sda3': 131}
r = parts['/dev/sda1'] == spart['Linux']
r = r and parts['/dev/sda2'] == spart['Swap']
if r:
print("You can format these partitions")
"""
partitions_list = {}
with settings(hide('running', 'stdout')):
res = run_as_root('sfdisk -d %(device)s' % locals())
spart = re.compile(r'(?P<pname>^/.*) : .* Id=(?P<ptypeid>[0-9a-z]+)')
for line in res.splitlines():
m = spart.search(line)
if m:
partitions_list[m.group('pname')] = int(m.group('ptypeid'), 16)
return partitions_list
|
[
"Get",
"a",
"partition",
"list",
"for",
"all",
"disk",
"or",
"for",
"selected",
"device",
"only"
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/disk.py#L13-L39
|
[
"def",
"partitions",
"(",
"device",
"=",
"\"\"",
")",
":",
"partitions_list",
"=",
"{",
"}",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
")",
":",
"res",
"=",
"run_as_root",
"(",
"'sfdisk -d %(device)s'",
"%",
"locals",
"(",
")",
")",
"spart",
"=",
"re",
".",
"compile",
"(",
"r'(?P<pname>^/.*) : .* Id=(?P<ptypeid>[0-9a-z]+)'",
")",
"for",
"line",
"in",
"res",
".",
"splitlines",
"(",
")",
":",
"m",
"=",
"spart",
".",
"search",
"(",
"line",
")",
"if",
"m",
":",
"partitions_list",
"[",
"m",
".",
"group",
"(",
"'pname'",
")",
"]",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"'ptypeid'",
")",
",",
"16",
")",
"return",
"partitions_list"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
getdevice_by_uuid
|
Get a HDD device by uuid
Example::
from burlap.disk import getdevice_by_uuid
device = getdevice_by_uuid("356fafdc-21d5-408e-a3e9-2b3f32cb2a8c")
if device:
mount(device,'/mountpoint')
|
burlap/disk.py
|
def getdevice_by_uuid(uuid):
"""
Get a HDD device by uuid
Example::
from burlap.disk import getdevice_by_uuid
device = getdevice_by_uuid("356fafdc-21d5-408e-a3e9-2b3f32cb2a8c")
if device:
mount(device,'/mountpoint')
"""
with settings(hide('running', 'warnings', 'stdout'), warn_only=True):
res = run_as_root('blkid -U %s' % uuid)
if not res.succeeded:
return None
return res
|
def getdevice_by_uuid(uuid):
"""
Get a HDD device by uuid
Example::
from burlap.disk import getdevice_by_uuid
device = getdevice_by_uuid("356fafdc-21d5-408e-a3e9-2b3f32cb2a8c")
if device:
mount(device,'/mountpoint')
"""
with settings(hide('running', 'warnings', 'stdout'), warn_only=True):
res = run_as_root('blkid -U %s' % uuid)
if not res.succeeded:
return None
return res
|
[
"Get",
"a",
"HDD",
"device",
"by",
"uuid"
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/disk.py#L42-L60
|
[
"def",
"getdevice_by_uuid",
"(",
"uuid",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'warnings'",
",",
"'stdout'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"res",
"=",
"run_as_root",
"(",
"'blkid -U %s'",
"%",
"uuid",
")",
"if",
"not",
"res",
".",
"succeeded",
":",
"return",
"None",
"return",
"res"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
ismounted
|
Check if partition is mounted
Example::
from burlap.disk import ismounted
if ismounted('/dev/sda1'):
print ("disk sda1 is mounted")
|
burlap/disk.py
|
def ismounted(device):
"""
Check if partition is mounted
Example::
from burlap.disk import ismounted
if ismounted('/dev/sda1'):
print ("disk sda1 is mounted")
"""
# Check filesystem
with settings(hide('running', 'stdout')):
res = run_as_root('mount')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
# Check swap
with settings(hide('running', 'stdout')):
res = run_as_root('swapon -s')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
return False
|
def ismounted(device):
"""
Check if partition is mounted
Example::
from burlap.disk import ismounted
if ismounted('/dev/sda1'):
print ("disk sda1 is mounted")
"""
# Check filesystem
with settings(hide('running', 'stdout')):
res = run_as_root('mount')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
# Check swap
with settings(hide('running', 'stdout')):
res = run_as_root('swapon -s')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
return False
|
[
"Check",
"if",
"partition",
"is",
"mounted"
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/disk.py#L91-L118
|
[
"def",
"ismounted",
"(",
"device",
")",
":",
"# Check filesystem",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
")",
":",
"res",
"=",
"run_as_root",
"(",
"'mount'",
")",
"for",
"line",
"in",
"res",
".",
"splitlines",
"(",
")",
":",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"if",
"fields",
"[",
"0",
"]",
"==",
"device",
":",
"return",
"True",
"# Check swap",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
")",
":",
"res",
"=",
"run_as_root",
"(",
"'swapon -s'",
")",
"for",
"line",
"in",
"res",
".",
"splitlines",
"(",
")",
":",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"if",
"fields",
"[",
"0",
"]",
"==",
"device",
":",
"return",
"True",
"return",
"False"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
query
|
Run a MySQL query.
|
burlap/mysql.py
|
def query(query, use_sudo=True, **kwargs):
"""
Run a MySQL query.
"""
func = use_sudo and run_as_root or run
user = kwargs.get('mysql_user') or env.get('mysql_user')
password = kwargs.get('mysql_password') or env.get('mysql_password')
options = [
'--batch',
'--raw',
'--skip-column-names',
]
if user:
options.append('--user=%s' % quote(user))
if password:
options.append('--password=%s' % quote(password))
options = ' '.join(options)
return func('mysql %(options)s --execute=%(query)s' % {
'options': options,
'query': quote(query),
})
|
def query(query, use_sudo=True, **kwargs):
"""
Run a MySQL query.
"""
func = use_sudo and run_as_root or run
user = kwargs.get('mysql_user') or env.get('mysql_user')
password = kwargs.get('mysql_password') or env.get('mysql_password')
options = [
'--batch',
'--raw',
'--skip-column-names',
]
if user:
options.append('--user=%s' % quote(user))
if password:
options.append('--password=%s' % quote(password))
options = ' '.join(options)
return func('mysql %(options)s --execute=%(query)s' % {
'options': options,
'query': quote(query),
})
|
[
"Run",
"a",
"MySQL",
"query",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L518-L541
|
[
"def",
"query",
"(",
"query",
",",
"use_sudo",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"func",
"=",
"use_sudo",
"and",
"run_as_root",
"or",
"run",
"user",
"=",
"kwargs",
".",
"get",
"(",
"'mysql_user'",
")",
"or",
"env",
".",
"get",
"(",
"'mysql_user'",
")",
"password",
"=",
"kwargs",
".",
"get",
"(",
"'mysql_password'",
")",
"or",
"env",
".",
"get",
"(",
"'mysql_password'",
")",
"options",
"=",
"[",
"'--batch'",
",",
"'--raw'",
",",
"'--skip-column-names'",
",",
"]",
"if",
"user",
":",
"options",
".",
"append",
"(",
"'--user=%s'",
"%",
"quote",
"(",
"user",
")",
")",
"if",
"password",
":",
"options",
".",
"append",
"(",
"'--password=%s'",
"%",
"quote",
"(",
"password",
")",
")",
"options",
"=",
"' '",
".",
"join",
"(",
"options",
")",
"return",
"func",
"(",
"'mysql %(options)s --execute=%(query)s'",
"%",
"{",
"'options'",
":",
"options",
",",
"'query'",
":",
"quote",
"(",
"query",
")",
",",
"}",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
user_exists
|
Check if a MySQL user exists.
|
burlap/mysql.py
|
def user_exists(name, host='localhost', **kwargs):
"""
Check if a MySQL user exists.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = query("""
use mysql;
SELECT COUNT(*) FROM user
WHERE User = '%(name)s' AND Host = '%(host)s';
""" % {
'name': name,
'host': host,
}, **kwargs)
return res.succeeded and (int(res) == 1)
|
def user_exists(name, host='localhost', **kwargs):
"""
Check if a MySQL user exists.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = query("""
use mysql;
SELECT COUNT(*) FROM user
WHERE User = '%(name)s' AND Host = '%(host)s';
""" % {
'name': name,
'host': host,
}, **kwargs)
return res.succeeded and (int(res) == 1)
|
[
"Check",
"if",
"a",
"MySQL",
"user",
"exists",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L544-L557
|
[
"def",
"user_exists",
"(",
"name",
",",
"host",
"=",
"'localhost'",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
",",
"'warnings'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"res",
"=",
"query",
"(",
"\"\"\"\n use mysql;\n SELECT COUNT(*) FROM user\n WHERE User = '%(name)s' AND Host = '%(host)s';\n \"\"\"",
"%",
"{",
"'name'",
":",
"name",
",",
"'host'",
":",
"host",
",",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"res",
".",
"succeeded",
"and",
"(",
"int",
"(",
"res",
")",
"==",
"1",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
create_user
|
Create a MySQL user.
Example::
import burlap
# Create DB user if it does not exist
if not burlap.mysql.user_exists('dbuser'):
burlap.mysql.create_user('dbuser', password='somerandomstring')
|
burlap/mysql.py
|
def create_user(name, password, host='localhost', **kwargs):
"""
Create a MySQL user.
Example::
import burlap
# Create DB user if it does not exist
if not burlap.mysql.user_exists('dbuser'):
burlap.mysql.create_user('dbuser', password='somerandomstring')
"""
with settings(hide('running')):
query("CREATE USER '%(name)s'@'%(host)s' IDENTIFIED BY '%(password)s';" % {
'name': name,
'password': password,
'host': host
}, **kwargs)
puts("Created MySQL user '%s'." % name)
|
def create_user(name, password, host='localhost', **kwargs):
"""
Create a MySQL user.
Example::
import burlap
# Create DB user if it does not exist
if not burlap.mysql.user_exists('dbuser'):
burlap.mysql.create_user('dbuser', password='somerandomstring')
"""
with settings(hide('running')):
query("CREATE USER '%(name)s'@'%(host)s' IDENTIFIED BY '%(password)s';" % {
'name': name,
'password': password,
'host': host
}, **kwargs)
puts("Created MySQL user '%s'." % name)
|
[
"Create",
"a",
"MySQL",
"user",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L560-L579
|
[
"def",
"create_user",
"(",
"name",
",",
"password",
",",
"host",
"=",
"'localhost'",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
")",
")",
":",
"query",
"(",
"\"CREATE USER '%(name)s'@'%(host)s' IDENTIFIED BY '%(password)s';\"",
"%",
"{",
"'name'",
":",
"name",
",",
"'password'",
":",
"password",
",",
"'host'",
":",
"host",
"}",
",",
"*",
"*",
"kwargs",
")",
"puts",
"(",
"\"Created MySQL user '%s'.\"",
"%",
"name",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
database_exists
|
Check if a MySQL database exists.
|
burlap/mysql.py
|
def database_exists(name, **kwargs):
"""
Check if a MySQL database exists.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = query("SHOW DATABASES LIKE '%(name)s';" % {
'name': name
}, **kwargs)
return res.succeeded and (res == name)
|
def database_exists(name, **kwargs):
"""
Check if a MySQL database exists.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = query("SHOW DATABASES LIKE '%(name)s';" % {
'name': name
}, **kwargs)
return res.succeeded and (res == name)
|
[
"Check",
"if",
"a",
"MySQL",
"database",
"exists",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L582-L591
|
[
"def",
"database_exists",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
",",
"'warnings'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"res",
"=",
"query",
"(",
"\"SHOW DATABASES LIKE '%(name)s';\"",
"%",
"{",
"'name'",
":",
"name",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"res",
".",
"succeeded",
"and",
"(",
"res",
"==",
"name",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
create_database
|
Create a MySQL database.
Example::
import burlap
# Create DB if it does not exist
if not burlap.mysql.database_exists('myapp'):
burlap.mysql.create_database('myapp', owner='dbuser')
|
burlap/mysql.py
|
def create_database(name, owner=None, owner_host='localhost', charset='utf8',
collate='utf8_general_ci', **kwargs):
"""
Create a MySQL database.
Example::
import burlap
# Create DB if it does not exist
if not burlap.mysql.database_exists('myapp'):
burlap.mysql.create_database('myapp', owner='dbuser')
"""
with settings(hide('running')):
query("CREATE DATABASE %(name)s CHARACTER SET %(charset)s COLLATE %(collate)s;" % {
'name': name,
'charset': charset,
'collate': collate
}, **kwargs)
if owner:
query("GRANT ALL PRIVILEGES ON %(name)s.* TO '%(owner)s'@'%(owner_host)s' WITH GRANT OPTION;" % {
'name': name,
'owner': owner,
'owner_host': owner_host
}, **kwargs)
puts("Created MySQL database '%s'." % name)
|
def create_database(name, owner=None, owner_host='localhost', charset='utf8',
collate='utf8_general_ci', **kwargs):
"""
Create a MySQL database.
Example::
import burlap
# Create DB if it does not exist
if not burlap.mysql.database_exists('myapp'):
burlap.mysql.create_database('myapp', owner='dbuser')
"""
with settings(hide('running')):
query("CREATE DATABASE %(name)s CHARACTER SET %(charset)s COLLATE %(collate)s;" % {
'name': name,
'charset': charset,
'collate': collate
}, **kwargs)
if owner:
query("GRANT ALL PRIVILEGES ON %(name)s.* TO '%(owner)s'@'%(owner_host)s' WITH GRANT OPTION;" % {
'name': name,
'owner': owner,
'owner_host': owner_host
}, **kwargs)
puts("Created MySQL database '%s'." % name)
|
[
"Create",
"a",
"MySQL",
"database",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L594-L623
|
[
"def",
"create_database",
"(",
"name",
",",
"owner",
"=",
"None",
",",
"owner_host",
"=",
"'localhost'",
",",
"charset",
"=",
"'utf8'",
",",
"collate",
"=",
"'utf8_general_ci'",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
")",
")",
":",
"query",
"(",
"\"CREATE DATABASE %(name)s CHARACTER SET %(charset)s COLLATE %(collate)s;\"",
"%",
"{",
"'name'",
":",
"name",
",",
"'charset'",
":",
"charset",
",",
"'collate'",
":",
"collate",
"}",
",",
"*",
"*",
"kwargs",
")",
"if",
"owner",
":",
"query",
"(",
"\"GRANT ALL PRIVILEGES ON %(name)s.* TO '%(owner)s'@'%(owner_host)s' WITH GRANT OPTION;\"",
"%",
"{",
"'name'",
":",
"name",
",",
"'owner'",
":",
"owner",
",",
"'owner_host'",
":",
"owner_host",
"}",
",",
"*",
"*",
"kwargs",
")",
"puts",
"(",
"\"Created MySQL database '%s'.\"",
"%",
"name",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
MySQLSatchel.conf_path
|
Retrieves the path to the MySQL configuration file.
|
burlap/mysql.py
|
def conf_path(self):
"""
Retrieves the path to the MySQL configuration file.
"""
from burlap.system import distrib_id, distrib_release
hostname = self.current_hostname
if hostname not in self._conf_cache:
self.env.conf_specifics[hostname] = self.env.conf_default
d_id = distrib_id()
d_release = distrib_release()
for key in ((d_id, d_release), (d_id,)):
if key in self.env.conf_specifics:
self._conf_cache[hostname] = self.env.conf_specifics[key]
return self._conf_cache[hostname]
|
def conf_path(self):
"""
Retrieves the path to the MySQL configuration file.
"""
from burlap.system import distrib_id, distrib_release
hostname = self.current_hostname
if hostname not in self._conf_cache:
self.env.conf_specifics[hostname] = self.env.conf_default
d_id = distrib_id()
d_release = distrib_release()
for key in ((d_id, d_release), (d_id,)):
if key in self.env.conf_specifics:
self._conf_cache[hostname] = self.env.conf_specifics[key]
return self._conf_cache[hostname]
|
[
"Retrieves",
"the",
"path",
"to",
"the",
"MySQL",
"configuration",
"file",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L101-L114
|
[
"def",
"conf_path",
"(",
"self",
")",
":",
"from",
"burlap",
".",
"system",
"import",
"distrib_id",
",",
"distrib_release",
"hostname",
"=",
"self",
".",
"current_hostname",
"if",
"hostname",
"not",
"in",
"self",
".",
"_conf_cache",
":",
"self",
".",
"env",
".",
"conf_specifics",
"[",
"hostname",
"]",
"=",
"self",
".",
"env",
".",
"conf_default",
"d_id",
"=",
"distrib_id",
"(",
")",
"d_release",
"=",
"distrib_release",
"(",
")",
"for",
"key",
"in",
"(",
"(",
"d_id",
",",
"d_release",
")",
",",
"(",
"d_id",
",",
")",
")",
":",
"if",
"key",
"in",
"self",
".",
"env",
".",
"conf_specifics",
":",
"self",
".",
"_conf_cache",
"[",
"hostname",
"]",
"=",
"self",
".",
"env",
".",
"conf_specifics",
"[",
"key",
"]",
"return",
"self",
".",
"_conf_cache",
"[",
"hostname",
"]"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
MySQLSatchel.prep_root_password
|
Enters the root password prompt entries into the debconf cache
so we can set them without user interaction.
We keep this process separate from set_root_password() because we also need to do
this before installing the base MySQL package, because that will also prompt the user
for a root login.
|
burlap/mysql.py
|
def prep_root_password(self, password=None, **kwargs):
"""
Enters the root password prompt entries into the debconf cache
so we can set them without user interaction.
We keep this process separate from set_root_password() because we also need to do
this before installing the base MySQL package, because that will also prompt the user
for a root login.
"""
r = self.database_renderer(**kwargs)
r.env.root_password = password or r.genv.get('db_root_password')
r.sudo("DEBIAN_FRONTEND=noninteractive dpkg --configure -a")
r.sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password password {root_password}'")
r.sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password {root_password}'")
|
def prep_root_password(self, password=None, **kwargs):
"""
Enters the root password prompt entries into the debconf cache
so we can set them without user interaction.
We keep this process separate from set_root_password() because we also need to do
this before installing the base MySQL package, because that will also prompt the user
for a root login.
"""
r = self.database_renderer(**kwargs)
r.env.root_password = password or r.genv.get('db_root_password')
r.sudo("DEBIAN_FRONTEND=noninteractive dpkg --configure -a")
r.sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password password {root_password}'")
r.sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password {root_password}'")
|
[
"Enters",
"the",
"root",
"password",
"prompt",
"entries",
"into",
"the",
"debconf",
"cache",
"so",
"we",
"can",
"set",
"them",
"without",
"user",
"interaction",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L165-L178
|
[
"def",
"prep_root_password",
"(",
"self",
",",
"password",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"self",
".",
"database_renderer",
"(",
"*",
"*",
"kwargs",
")",
"r",
".",
"env",
".",
"root_password",
"=",
"password",
"or",
"r",
".",
"genv",
".",
"get",
"(",
"'db_root_password'",
")",
"r",
".",
"sudo",
"(",
"\"DEBIAN_FRONTEND=noninteractive dpkg --configure -a\"",
")",
"r",
".",
"sudo",
"(",
"\"debconf-set-selections <<< 'mysql-server mysql-server/root_password password {root_password}'\"",
")",
"r",
".",
"sudo",
"(",
"\"debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password {root_password}'\"",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
MySQLSatchel.drop_views
|
Drops all views.
|
burlap/mysql.py
|
def drop_views(self, name=None, site=None):
"""
Drops all views.
"""
r = self.database_renderer
result = r.sudo("mysql --batch -v -h {db_host} "
#"-u {db_root_username} -p'{db_root_password}' "
"-u {db_user} -p'{db_password}' "
"--execute=\"SELECT GROUP_CONCAT(CONCAT(TABLE_SCHEMA,'.',table_name) SEPARATOR ', ') AS views "
"FROM INFORMATION_SCHEMA.views WHERE TABLE_SCHEMA = '{db_name}' ORDER BY table_name DESC;\"")
result = re.findall(
r'^views[\s\t\r\n]+(.*)',
result,
flags=re.IGNORECASE|re.DOTALL|re.MULTILINE)
if not result:
return
r.env.db_view_list = result[0]
#cmd = ("mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' " \
r.sudo("mysql -v -h {db_host} -u {db_user} -p'{db_password}' " \
"--execute=\"DROP VIEW {db_view_list} CASCADE;\"")
|
def drop_views(self, name=None, site=None):
"""
Drops all views.
"""
r = self.database_renderer
result = r.sudo("mysql --batch -v -h {db_host} "
#"-u {db_root_username} -p'{db_root_password}' "
"-u {db_user} -p'{db_password}' "
"--execute=\"SELECT GROUP_CONCAT(CONCAT(TABLE_SCHEMA,'.',table_name) SEPARATOR ', ') AS views "
"FROM INFORMATION_SCHEMA.views WHERE TABLE_SCHEMA = '{db_name}' ORDER BY table_name DESC;\"")
result = re.findall(
r'^views[\s\t\r\n]+(.*)',
result,
flags=re.IGNORECASE|re.DOTALL|re.MULTILINE)
if not result:
return
r.env.db_view_list = result[0]
#cmd = ("mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' " \
r.sudo("mysql -v -h {db_host} -u {db_user} -p'{db_password}' " \
"--execute=\"DROP VIEW {db_view_list} CASCADE;\"")
|
[
"Drops",
"all",
"views",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L280-L299
|
[
"def",
"drop_views",
"(",
"self",
",",
"name",
"=",
"None",
",",
"site",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"database_renderer",
"result",
"=",
"r",
".",
"sudo",
"(",
"\"mysql --batch -v -h {db_host} \"",
"#\"-u {db_root_username} -p'{db_root_password}' \"",
"\"-u {db_user} -p'{db_password}' \"",
"\"--execute=\\\"SELECT GROUP_CONCAT(CONCAT(TABLE_SCHEMA,'.',table_name) SEPARATOR ', ') AS views \"",
"\"FROM INFORMATION_SCHEMA.views WHERE TABLE_SCHEMA = '{db_name}' ORDER BY table_name DESC;\\\"\"",
")",
"result",
"=",
"re",
".",
"findall",
"(",
"r'^views[\\s\\t\\r\\n]+(.*)'",
",",
"result",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
"|",
"re",
".",
"DOTALL",
"|",
"re",
".",
"MULTILINE",
")",
"if",
"not",
"result",
":",
"return",
"r",
".",
"env",
".",
"db_view_list",
"=",
"result",
"[",
"0",
"]",
"#cmd = (\"mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' \" \\",
"r",
".",
"sudo",
"(",
"\"mysql -v -h {db_host} -u {db_user} -p'{db_password}' \"",
"\"--execute=\\\"DROP VIEW {db_view_list} CASCADE;\\\"\"",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
MySQLSatchel.exists
|
Returns true if a database with the given name exists. False otherwise.
|
burlap/mysql.py
|
def exists(self, **kwargs):
"""
Returns true if a database with the given name exists. False otherwise.
"""
name = kwargs.pop('name', 'default')
site = kwargs.pop('site', None)
r = self.database_renderer(name=name, site=site)
ret = r.run('mysql -h {db_host} -u {db_root_username} '\
'-p"{db_root_password}" -N -B -e "SELECT IF(\'{db_name}\''\
' IN(SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA), '\
'\'exists\', \'notexists\') AS found;"')
if ret is not None:
ret = 'notexists' not in (ret or 'notexists')
if ret is not None:
msg = '%s database on site %s %s exist.' \
% (name.title(), env.SITE, 'DOES' if ret else 'DOES NOT')
if ret:
print(green(msg))
else:
print(red(msg))
return ret
|
def exists(self, **kwargs):
"""
Returns true if a database with the given name exists. False otherwise.
"""
name = kwargs.pop('name', 'default')
site = kwargs.pop('site', None)
r = self.database_renderer(name=name, site=site)
ret = r.run('mysql -h {db_host} -u {db_root_username} '\
'-p"{db_root_password}" -N -B -e "SELECT IF(\'{db_name}\''\
' IN(SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA), '\
'\'exists\', \'notexists\') AS found;"')
if ret is not None:
ret = 'notexists' not in (ret or 'notexists')
if ret is not None:
msg = '%s database on site %s %s exist.' \
% (name.title(), env.SITE, 'DOES' if ret else 'DOES NOT')
if ret:
print(green(msg))
else:
print(red(msg))
return ret
|
[
"Returns",
"true",
"if",
"a",
"database",
"with",
"the",
"given",
"name",
"exists",
".",
"False",
"otherwise",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L303-L323
|
[
"def",
"exists",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
",",
"'default'",
")",
"site",
"=",
"kwargs",
".",
"pop",
"(",
"'site'",
",",
"None",
")",
"r",
"=",
"self",
".",
"database_renderer",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
")",
"ret",
"=",
"r",
".",
"run",
"(",
"'mysql -h {db_host} -u {db_root_username} '",
"'-p\"{db_root_password}\" -N -B -e \"SELECT IF(\\'{db_name}\\''",
"' IN(SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA), '",
"'\\'exists\\', \\'notexists\\') AS found;\"'",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"ret",
"=",
"'notexists'",
"not",
"in",
"(",
"ret",
"or",
"'notexists'",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"msg",
"=",
"'%s database on site %s %s exist.'",
"%",
"(",
"name",
".",
"title",
"(",
")",
",",
"env",
".",
"SITE",
",",
"'DOES'",
"if",
"ret",
"else",
"'DOES NOT'",
")",
"if",
"ret",
":",
"print",
"(",
"green",
"(",
"msg",
")",
")",
"else",
":",
"print",
"(",
"red",
"(",
"msg",
")",
")",
"return",
"ret"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
MySQLSatchel.load
|
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
|
burlap/mysql.py
|
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None, force_host=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
r.pc('Loading database snapshot.')
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir).strip()
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format("Database dump file {dump_fn} does not exist.")
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
#if int(force_upload) or (not self.is_local and not r.file_exists(r.env.remote_dump_fn)):
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
#if self.verbose:
#print('Uploading MySQL database snapshot...')
#r.put(
#local_path=r.env.dump_fn,
#remote_path=r.env.remote_dump_fn)
self.upload_snapshot(name=name, site=site)
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
if force_host:
r.env.db_host = force_host
# Drop the database if it's there.
r.run("mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute='DROP DATABASE IF EXISTS {db_name}'")
# Now, create the database.
r.run("mysqladmin -h {db_host} -u {db_root_username} -p'{db_root_password}' create {db_name}")
# Create user
with settings(warn_only=True):
r.run("mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute=\"DROP USER '{db_user}'@'%%';"
"FLUSH PRIVILEGES;\"")
with settings(warn_only=True):
r.run("mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute=\"CREATE USER '{db_user}'@'%%' IDENTIFIED BY '{db_password}'; "
"GRANT ALL PRIVILEGES ON *.* TO '{db_user}'@'%%' WITH GRANT OPTION; "
"FLUSH PRIVILEGES;\"")
self.set_collation(name=name, site=site)
self.set_max_packet_size(name=name, site=site)
# Run any server-specific commands (e.g. to setup permissions) before
# we load the data.
for command in r.env.preload_commands:
r.run(command)
# Restore the database content from the dump file.
if not prep_only:
r.run(r.env.load_command)
self.set_collation(name=name, site=site)
|
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None, force_host=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
r.pc('Loading database snapshot.')
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir).strip()
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format("Database dump file {dump_fn} does not exist.")
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
#if int(force_upload) or (not self.is_local and not r.file_exists(r.env.remote_dump_fn)):
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
#if self.verbose:
#print('Uploading MySQL database snapshot...')
#r.put(
#local_path=r.env.dump_fn,
#remote_path=r.env.remote_dump_fn)
self.upload_snapshot(name=name, site=site)
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
if force_host:
r.env.db_host = force_host
# Drop the database if it's there.
r.run("mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute='DROP DATABASE IF EXISTS {db_name}'")
# Now, create the database.
r.run("mysqladmin -h {db_host} -u {db_root_username} -p'{db_root_password}' create {db_name}")
# Create user
with settings(warn_only=True):
r.run("mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute=\"DROP USER '{db_user}'@'%%';"
"FLUSH PRIVILEGES;\"")
with settings(warn_only=True):
r.run("mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute=\"CREATE USER '{db_user}'@'%%' IDENTIFIED BY '{db_password}'; "
"GRANT ALL PRIVILEGES ON *.* TO '{db_user}'@'%%' WITH GRANT OPTION; "
"FLUSH PRIVILEGES;\"")
self.set_collation(name=name, site=site)
self.set_max_packet_size(name=name, site=site)
# Run any server-specific commands (e.g. to setup permissions) before
# we load the data.
for command in r.env.preload_commands:
r.run(command)
# Restore the database content from the dump file.
if not prep_only:
r.run(r.env.load_command)
self.set_collation(name=name, site=site)
|
[
"Restores",
"a",
"database",
"snapshot",
"onto",
"the",
"target",
"database",
"server",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L371-L441
|
[
"def",
"load",
"(",
"self",
",",
"dump_fn",
"=",
"''",
",",
"prep_only",
"=",
"0",
",",
"force_upload",
"=",
"0",
",",
"from_local",
"=",
"0",
",",
"name",
"=",
"None",
",",
"site",
"=",
"None",
",",
"dest_dir",
"=",
"None",
",",
"force_host",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"database_renderer",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
")",
"r",
".",
"pc",
"(",
"'Loading database snapshot.'",
")",
"# Render the snapshot filename.",
"r",
".",
"env",
".",
"dump_fn",
"=",
"self",
".",
"get_default_db_fn",
"(",
"fn_template",
"=",
"dump_fn",
",",
"dest_dir",
"=",
"dest_dir",
")",
".",
"strip",
"(",
")",
"from_local",
"=",
"int",
"(",
"from_local",
")",
"prep_only",
"=",
"int",
"(",
"prep_only",
")",
"missing_local_dump_error",
"=",
"r",
".",
"format",
"(",
"\"Database dump file {dump_fn} does not exist.\"",
")",
"# Copy snapshot file to target.",
"if",
"self",
".",
"is_local",
":",
"r",
".",
"env",
".",
"remote_dump_fn",
"=",
"dump_fn",
"else",
":",
"r",
".",
"env",
".",
"remote_dump_fn",
"=",
"'/tmp/'",
"+",
"os",
".",
"path",
".",
"split",
"(",
"r",
".",
"env",
".",
"dump_fn",
")",
"[",
"-",
"1",
"]",
"if",
"not",
"prep_only",
"and",
"not",
"self",
".",
"is_local",
":",
"#if int(force_upload) or (not self.is_local and not r.file_exists(r.env.remote_dump_fn)):",
"if",
"not",
"self",
".",
"dryrun",
":",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"r",
".",
"env",
".",
"dump_fn",
")",
",",
"missing_local_dump_error",
"#if self.verbose:",
"#print('Uploading MySQL database snapshot...')",
"#r.put(",
"#local_path=r.env.dump_fn,",
"#remote_path=r.env.remote_dump_fn)",
"self",
".",
"upload_snapshot",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
")",
"if",
"self",
".",
"is_local",
"and",
"not",
"prep_only",
"and",
"not",
"self",
".",
"dryrun",
":",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"r",
".",
"env",
".",
"dump_fn",
")",
",",
"missing_local_dump_error",
"if",
"force_host",
":",
"r",
".",
"env",
".",
"db_host",
"=",
"force_host",
"# Drop the database if it's there.",
"r",
".",
"run",
"(",
"\"mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute='DROP DATABASE IF EXISTS {db_name}'\"",
")",
"# Now, create the database.",
"r",
".",
"run",
"(",
"\"mysqladmin -h {db_host} -u {db_root_username} -p'{db_root_password}' create {db_name}\"",
")",
"# Create user",
"with",
"settings",
"(",
"warn_only",
"=",
"True",
")",
":",
"r",
".",
"run",
"(",
"\"mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute=\\\"DROP USER '{db_user}'@'%%';\"",
"\"FLUSH PRIVILEGES;\\\"\"",
")",
"with",
"settings",
"(",
"warn_only",
"=",
"True",
")",
":",
"r",
".",
"run",
"(",
"\"mysql -v -h {db_host} -u {db_root_username} -p'{db_root_password}' --execute=\\\"CREATE USER '{db_user}'@'%%' IDENTIFIED BY '{db_password}'; \"",
"\"GRANT ALL PRIVILEGES ON *.* TO '{db_user}'@'%%' WITH GRANT OPTION; \"",
"\"FLUSH PRIVILEGES;\\\"\"",
")",
"self",
".",
"set_collation",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
")",
"self",
".",
"set_max_packet_size",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
")",
"# Run any server-specific commands (e.g. to setup permissions) before",
"# we load the data.",
"for",
"command",
"in",
"r",
".",
"env",
".",
"preload_commands",
":",
"r",
".",
"run",
"(",
"command",
")",
"# Restore the database content from the dump file.",
"if",
"not",
"prep_only",
":",
"r",
".",
"run",
"(",
"r",
".",
"env",
".",
"load_command",
")",
"self",
".",
"set_collation",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
MySQLSatchel.shell
|
Opens a SQL shell to the given database, assuming the configured database
and user supports this feature.
|
burlap/mysql.py
|
def shell(self, name='default', site=None, use_root=0, **kwargs):
"""
Opens a SQL shell to the given database, assuming the configured database
and user supports this feature.
"""
r = self.database_renderer(name=name, site=site)
if int(use_root):
kwargs = dict(
db_user=r.env.db_root_username,
db_password=r.env.db_root_password,
db_host=r.env.db_host,
db_name=r.env.db_name,
)
r.env.update(kwargs)
if not name:
r.env.db_name = ''
r.run('/bin/bash -i -c "mysql -u {db_user} -p\'{db_password}\' -h {db_host} {db_name}"')
|
def shell(self, name='default', site=None, use_root=0, **kwargs):
"""
Opens a SQL shell to the given database, assuming the configured database
and user supports this feature.
"""
r = self.database_renderer(name=name, site=site)
if int(use_root):
kwargs = dict(
db_user=r.env.db_root_username,
db_password=r.env.db_root_password,
db_host=r.env.db_host,
db_name=r.env.db_name,
)
r.env.update(kwargs)
if not name:
r.env.db_name = ''
r.run('/bin/bash -i -c "mysql -u {db_user} -p\'{db_password}\' -h {db_host} {db_name}"')
|
[
"Opens",
"a",
"SQL",
"shell",
"to",
"the",
"given",
"database",
"assuming",
"the",
"configured",
"database",
"and",
"user",
"supports",
"this",
"feature",
"."
] |
chrisspen/burlap
|
python
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L444-L463
|
[
"def",
"shell",
"(",
"self",
",",
"name",
"=",
"'default'",
",",
"site",
"=",
"None",
",",
"use_root",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"self",
".",
"database_renderer",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
")",
"if",
"int",
"(",
"use_root",
")",
":",
"kwargs",
"=",
"dict",
"(",
"db_user",
"=",
"r",
".",
"env",
".",
"db_root_username",
",",
"db_password",
"=",
"r",
".",
"env",
".",
"db_root_password",
",",
"db_host",
"=",
"r",
".",
"env",
".",
"db_host",
",",
"db_name",
"=",
"r",
".",
"env",
".",
"db_name",
",",
")",
"r",
".",
"env",
".",
"update",
"(",
"kwargs",
")",
"if",
"not",
"name",
":",
"r",
".",
"env",
".",
"db_name",
"=",
"''",
"r",
".",
"run",
"(",
"'/bin/bash -i -c \"mysql -u {db_user} -p\\'{db_password}\\' -h {db_host} {db_name}\"'",
")"
] |
a92b0a8e5206850bb777c74af8421ea8b33779bd
|
valid
|
tic_single_object_crossmatch
|
This does a cross-match against the TIC catalog on MAST.
Speed tests: about 10 crossmatches per second. (-> 3 hours for 10^5 objects
to crossmatch).
Parameters
----------
ra,dec : np.array
The coordinates to cross match against, all in decimal degrees.
radius : float
The cross-match radius to use, in decimal degrees.
Returns
-------
dict
Returns the match results JSON from MAST loaded into a dict.
|
astrobase/services/tic.py
|
def tic_single_object_crossmatch(ra, dec, radius):
'''This does a cross-match against the TIC catalog on MAST.
Speed tests: about 10 crossmatches per second. (-> 3 hours for 10^5 objects
to crossmatch).
Parameters
----------
ra,dec : np.array
The coordinates to cross match against, all in decimal degrees.
radius : float
The cross-match radius to use, in decimal degrees.
Returns
-------
dict
Returns the match results JSON from MAST loaded into a dict.
'''
for val in ra,dec,radius:
if not isinstance(val, float):
raise AssertionError('plz input ra,dec,radius in decimal degrees')
# This is a json object
crossmatchInput = {"fields":[{"name":"ra","type":"float"},
{"name":"dec","type":"float"}],
"data":[{"ra":ra,"dec":dec}]}
request = {"service":"Mast.Tic.Crossmatch",
"data":crossmatchInput,
"params":{
"raColumn":"ra",
"decColumn":"dec",
"radius":radius
},
"format":"json",
'removecache':True}
headers,out_string = _mast_query(request)
out_data = json.loads(out_string)
return out_data
|
def tic_single_object_crossmatch(ra, dec, radius):
'''This does a cross-match against the TIC catalog on MAST.
Speed tests: about 10 crossmatches per second. (-> 3 hours for 10^5 objects
to crossmatch).
Parameters
----------
ra,dec : np.array
The coordinates to cross match against, all in decimal degrees.
radius : float
The cross-match radius to use, in decimal degrees.
Returns
-------
dict
Returns the match results JSON from MAST loaded into a dict.
'''
for val in ra,dec,radius:
if not isinstance(val, float):
raise AssertionError('plz input ra,dec,radius in decimal degrees')
# This is a json object
crossmatchInput = {"fields":[{"name":"ra","type":"float"},
{"name":"dec","type":"float"}],
"data":[{"ra":ra,"dec":dec}]}
request = {"service":"Mast.Tic.Crossmatch",
"data":crossmatchInput,
"params":{
"raColumn":"ra",
"decColumn":"dec",
"radius":radius
},
"format":"json",
'removecache':True}
headers,out_string = _mast_query(request)
out_data = json.loads(out_string)
return out_data
|
[
"This",
"does",
"a",
"cross",
"-",
"match",
"against",
"the",
"TIC",
"catalog",
"on",
"MAST",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/services/tic.py#L127-L172
|
[
"def",
"tic_single_object_crossmatch",
"(",
"ra",
",",
"dec",
",",
"radius",
")",
":",
"for",
"val",
"in",
"ra",
",",
"dec",
",",
"radius",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"float",
")",
":",
"raise",
"AssertionError",
"(",
"'plz input ra,dec,radius in decimal degrees'",
")",
"# This is a json object",
"crossmatchInput",
"=",
"{",
"\"fields\"",
":",
"[",
"{",
"\"name\"",
":",
"\"ra\"",
",",
"\"type\"",
":",
"\"float\"",
"}",
",",
"{",
"\"name\"",
":",
"\"dec\"",
",",
"\"type\"",
":",
"\"float\"",
"}",
"]",
",",
"\"data\"",
":",
"[",
"{",
"\"ra\"",
":",
"ra",
",",
"\"dec\"",
":",
"dec",
"}",
"]",
"}",
"request",
"=",
"{",
"\"service\"",
":",
"\"Mast.Tic.Crossmatch\"",
",",
"\"data\"",
":",
"crossmatchInput",
",",
"\"params\"",
":",
"{",
"\"raColumn\"",
":",
"\"ra\"",
",",
"\"decColumn\"",
":",
"\"dec\"",
",",
"\"radius\"",
":",
"radius",
"}",
",",
"\"format\"",
":",
"\"json\"",
",",
"'removecache'",
":",
"True",
"}",
"headers",
",",
"out_string",
"=",
"_mast_query",
"(",
"request",
")",
"out_data",
"=",
"json",
".",
"loads",
"(",
"out_string",
")",
"return",
"out_data"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
normalized_flux_to_mag
|
This converts the normalized fluxes in the TESS lcdicts to TESS mags.
Uses the object's TESS mag stored in lcdict['objectinfo']['tessmag']::
mag - object_tess_mag = -2.5 log (flux/median_flux)
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `read_tess_fitslc` or
`consolidate_tess_fitslc`. This must have normalized fluxes in its
measurement columns (use the `normalize` kwarg for these functions).
columns : sequence of str
The column keys of the normalized flux and background measurements in
the `lcdict` to operate on and convert to magnitudes in TESS band (T).
Returns
-------
lcdict
The returned `lcdict` will contain extra columns corresponding to
magnitudes for each input normalized flux/background column.
|
astrobase/astrotess.py
|
def normalized_flux_to_mag(lcdict,
columns=('sap.sap_flux',
'sap.sap_flux_err',
'sap.sap_bkg',
'sap.sap_bkg_err',
'pdc.pdcsap_flux',
'pdc.pdcsap_flux_err')):
'''This converts the normalized fluxes in the TESS lcdicts to TESS mags.
Uses the object's TESS mag stored in lcdict['objectinfo']['tessmag']::
mag - object_tess_mag = -2.5 log (flux/median_flux)
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `read_tess_fitslc` or
`consolidate_tess_fitslc`. This must have normalized fluxes in its
measurement columns (use the `normalize` kwarg for these functions).
columns : sequence of str
The column keys of the normalized flux and background measurements in
the `lcdict` to operate on and convert to magnitudes in TESS band (T).
Returns
-------
lcdict
The returned `lcdict` will contain extra columns corresponding to
magnitudes for each input normalized flux/background column.
'''
tess_mag = lcdict['objectinfo']['tessmag']
for key in columns:
k1, k2 = key.split('.')
if 'err' not in k2:
lcdict[k1][k2.replace('flux','mag')] = (
tess_mag - 2.5*np.log10(lcdict[k1][k2])
)
else:
lcdict[k1][k2.replace('flux','mag')] = (
- 2.5*np.log10(1.0 - lcdict[k1][k2])
)
return lcdict
|
def normalized_flux_to_mag(lcdict,
columns=('sap.sap_flux',
'sap.sap_flux_err',
'sap.sap_bkg',
'sap.sap_bkg_err',
'pdc.pdcsap_flux',
'pdc.pdcsap_flux_err')):
'''This converts the normalized fluxes in the TESS lcdicts to TESS mags.
Uses the object's TESS mag stored in lcdict['objectinfo']['tessmag']::
mag - object_tess_mag = -2.5 log (flux/median_flux)
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `read_tess_fitslc` or
`consolidate_tess_fitslc`. This must have normalized fluxes in its
measurement columns (use the `normalize` kwarg for these functions).
columns : sequence of str
The column keys of the normalized flux and background measurements in
the `lcdict` to operate on and convert to magnitudes in TESS band (T).
Returns
-------
lcdict
The returned `lcdict` will contain extra columns corresponding to
magnitudes for each input normalized flux/background column.
'''
tess_mag = lcdict['objectinfo']['tessmag']
for key in columns:
k1, k2 = key.split('.')
if 'err' not in k2:
lcdict[k1][k2.replace('flux','mag')] = (
tess_mag - 2.5*np.log10(lcdict[k1][k2])
)
else:
lcdict[k1][k2.replace('flux','mag')] = (
- 2.5*np.log10(1.0 - lcdict[k1][k2])
)
return lcdict
|
[
"This",
"converts",
"the",
"normalized",
"fluxes",
"in",
"the",
"TESS",
"lcdicts",
"to",
"TESS",
"mags",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/astrotess.py#L56-L108
|
[
"def",
"normalized_flux_to_mag",
"(",
"lcdict",
",",
"columns",
"=",
"(",
"'sap.sap_flux'",
",",
"'sap.sap_flux_err'",
",",
"'sap.sap_bkg'",
",",
"'sap.sap_bkg_err'",
",",
"'pdc.pdcsap_flux'",
",",
"'pdc.pdcsap_flux_err'",
")",
")",
":",
"tess_mag",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'tessmag'",
"]",
"for",
"key",
"in",
"columns",
":",
"k1",
",",
"k2",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"if",
"'err'",
"not",
"in",
"k2",
":",
"lcdict",
"[",
"k1",
"]",
"[",
"k2",
".",
"replace",
"(",
"'flux'",
",",
"'mag'",
")",
"]",
"=",
"(",
"tess_mag",
"-",
"2.5",
"*",
"np",
".",
"log10",
"(",
"lcdict",
"[",
"k1",
"]",
"[",
"k2",
"]",
")",
")",
"else",
":",
"lcdict",
"[",
"k1",
"]",
"[",
"k2",
".",
"replace",
"(",
"'flux'",
",",
"'mag'",
")",
"]",
"=",
"(",
"-",
"2.5",
"*",
"np",
".",
"log10",
"(",
"1.0",
"-",
"lcdict",
"[",
"k1",
"]",
"[",
"k2",
"]",
")",
")",
"return",
"lcdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
get_time_flux_errs_from_Ames_lightcurve
|
Reads TESS Ames-format FITS light curve files.
MIT TOI alerts include Ames lightcurve files. This function gets the finite,
nonzero times, fluxes, and errors with QUALITY == 0.
NOTE: the PDCSAP lightcurve typically still need "pre-whitening" after this
step.
.. deprecated:: 0.3.20
This function will be removed in astrobase v0.4.2. Use the
`read_tess_fitslc` and `consolidate_tess_fitslc` functions instead.
Parameters
----------
infile : str
The path to `*.fits.gz` TOI alert file, from Ames pipeline.
lctype : {'PDCSAP','SAP'}
The type of light curve to extract from the FITS LC file.
cadence_min : int
The expected frame cadence in units of minutes. Raises ValueError if you
use the wrong cadence.
Returns
-------
tuple
The tuple returned is of the form:
(times, normalized (to median) fluxes, flux errors)
|
astrobase/astrotess.py
|
def get_time_flux_errs_from_Ames_lightcurve(infile,
lctype,
cadence_min=2):
'''Reads TESS Ames-format FITS light curve files.
MIT TOI alerts include Ames lightcurve files. This function gets the finite,
nonzero times, fluxes, and errors with QUALITY == 0.
NOTE: the PDCSAP lightcurve typically still need "pre-whitening" after this
step.
.. deprecated:: 0.3.20
This function will be removed in astrobase v0.4.2. Use the
`read_tess_fitslc` and `consolidate_tess_fitslc` functions instead.
Parameters
----------
infile : str
The path to `*.fits.gz` TOI alert file, from Ames pipeline.
lctype : {'PDCSAP','SAP'}
The type of light curve to extract from the FITS LC file.
cadence_min : int
The expected frame cadence in units of minutes. Raises ValueError if you
use the wrong cadence.
Returns
-------
tuple
The tuple returned is of the form:
(times, normalized (to median) fluxes, flux errors)
'''
warnings.warn(
"Use the astrotess.read_tess_fitslc and "
"astrotess.consolidate_tess_fitslc functions instead of this function. "
"This function will be removed in astrobase v0.4.2.",
FutureWarning
)
if lctype not in ('PDCSAP','SAP'):
raise ValueError('unknown light curve type requested: %s' % lctype)
hdulist = pyfits.open(infile)
main_hdr = hdulist[0].header
lc_hdr = hdulist[1].header
lc = hdulist[1].data
if (('Ames' not in main_hdr['ORIGIN']) or
('LIGHTCURVE' not in lc_hdr['EXTNAME'])):
raise ValueError(
'could not understand input LC format. '
'Is it a TESS TOI LC file?'
)
time = lc['TIME']
flux = lc['{:s}_FLUX'.format(lctype)]
err_flux = lc['{:s}_FLUX_ERR'.format(lctype)]
# REMOVE POINTS FLAGGED WITH:
# attitude tweaks, safe mode, coarse/earth pointing, argabrithening events,
# reaction wheel desaturation events, cosmic rays in optimal aperture
# pixels, manual excludes, discontinuities, stray light from Earth or Moon
# in camera FoV.
# (Note: it's not clear to me what a lot of these mean. Also most of these
# columns are probably not correctly propagated right now.)
sel = (lc['QUALITY'] == 0)
sel &= np.isfinite(time)
sel &= np.isfinite(flux)
sel &= np.isfinite(err_flux)
sel &= ~np.isnan(time)
sel &= ~np.isnan(flux)
sel &= ~np.isnan(err_flux)
sel &= (time != 0)
sel &= (flux != 0)
sel &= (err_flux != 0)
time = time[sel]
flux = flux[sel]
err_flux = err_flux[sel]
# ensure desired cadence
lc_cadence_diff = np.abs(np.nanmedian(np.diff(time))*24*60 - cadence_min)
if lc_cadence_diff > 1.0e-2:
raise ValueError(
'the light curve is not at the required cadence specified: %.2f' %
cadence_min
)
fluxmedian = np.nanmedian(flux)
flux /= fluxmedian
err_flux /= fluxmedian
return time, flux, err_flux
|
def get_time_flux_errs_from_Ames_lightcurve(infile,
lctype,
cadence_min=2):
'''Reads TESS Ames-format FITS light curve files.
MIT TOI alerts include Ames lightcurve files. This function gets the finite,
nonzero times, fluxes, and errors with QUALITY == 0.
NOTE: the PDCSAP lightcurve typically still need "pre-whitening" after this
step.
.. deprecated:: 0.3.20
This function will be removed in astrobase v0.4.2. Use the
`read_tess_fitslc` and `consolidate_tess_fitslc` functions instead.
Parameters
----------
infile : str
The path to `*.fits.gz` TOI alert file, from Ames pipeline.
lctype : {'PDCSAP','SAP'}
The type of light curve to extract from the FITS LC file.
cadence_min : int
The expected frame cadence in units of minutes. Raises ValueError if you
use the wrong cadence.
Returns
-------
tuple
The tuple returned is of the form:
(times, normalized (to median) fluxes, flux errors)
'''
warnings.warn(
"Use the astrotess.read_tess_fitslc and "
"astrotess.consolidate_tess_fitslc functions instead of this function. "
"This function will be removed in astrobase v0.4.2.",
FutureWarning
)
if lctype not in ('PDCSAP','SAP'):
raise ValueError('unknown light curve type requested: %s' % lctype)
hdulist = pyfits.open(infile)
main_hdr = hdulist[0].header
lc_hdr = hdulist[1].header
lc = hdulist[1].data
if (('Ames' not in main_hdr['ORIGIN']) or
('LIGHTCURVE' not in lc_hdr['EXTNAME'])):
raise ValueError(
'could not understand input LC format. '
'Is it a TESS TOI LC file?'
)
time = lc['TIME']
flux = lc['{:s}_FLUX'.format(lctype)]
err_flux = lc['{:s}_FLUX_ERR'.format(lctype)]
# REMOVE POINTS FLAGGED WITH:
# attitude tweaks, safe mode, coarse/earth pointing, argabrithening events,
# reaction wheel desaturation events, cosmic rays in optimal aperture
# pixels, manual excludes, discontinuities, stray light from Earth or Moon
# in camera FoV.
# (Note: it's not clear to me what a lot of these mean. Also most of these
# columns are probably not correctly propagated right now.)
sel = (lc['QUALITY'] == 0)
sel &= np.isfinite(time)
sel &= np.isfinite(flux)
sel &= np.isfinite(err_flux)
sel &= ~np.isnan(time)
sel &= ~np.isnan(flux)
sel &= ~np.isnan(err_flux)
sel &= (time != 0)
sel &= (flux != 0)
sel &= (err_flux != 0)
time = time[sel]
flux = flux[sel]
err_flux = err_flux[sel]
# ensure desired cadence
lc_cadence_diff = np.abs(np.nanmedian(np.diff(time))*24*60 - cadence_min)
if lc_cadence_diff > 1.0e-2:
raise ValueError(
'the light curve is not at the required cadence specified: %.2f' %
cadence_min
)
fluxmedian = np.nanmedian(flux)
flux /= fluxmedian
err_flux /= fluxmedian
return time, flux, err_flux
|
[
"Reads",
"TESS",
"Ames",
"-",
"format",
"FITS",
"light",
"curve",
"files",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/astrotess.py#L116-L216
|
[
"def",
"get_time_flux_errs_from_Ames_lightcurve",
"(",
"infile",
",",
"lctype",
",",
"cadence_min",
"=",
"2",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Use the astrotess.read_tess_fitslc and \"",
"\"astrotess.consolidate_tess_fitslc functions instead of this function. \"",
"\"This function will be removed in astrobase v0.4.2.\"",
",",
"FutureWarning",
")",
"if",
"lctype",
"not",
"in",
"(",
"'PDCSAP'",
",",
"'SAP'",
")",
":",
"raise",
"ValueError",
"(",
"'unknown light curve type requested: %s'",
"%",
"lctype",
")",
"hdulist",
"=",
"pyfits",
".",
"open",
"(",
"infile",
")",
"main_hdr",
"=",
"hdulist",
"[",
"0",
"]",
".",
"header",
"lc_hdr",
"=",
"hdulist",
"[",
"1",
"]",
".",
"header",
"lc",
"=",
"hdulist",
"[",
"1",
"]",
".",
"data",
"if",
"(",
"(",
"'Ames'",
"not",
"in",
"main_hdr",
"[",
"'ORIGIN'",
"]",
")",
"or",
"(",
"'LIGHTCURVE'",
"not",
"in",
"lc_hdr",
"[",
"'EXTNAME'",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"'could not understand input LC format. '",
"'Is it a TESS TOI LC file?'",
")",
"time",
"=",
"lc",
"[",
"'TIME'",
"]",
"flux",
"=",
"lc",
"[",
"'{:s}_FLUX'",
".",
"format",
"(",
"lctype",
")",
"]",
"err_flux",
"=",
"lc",
"[",
"'{:s}_FLUX_ERR'",
".",
"format",
"(",
"lctype",
")",
"]",
"# REMOVE POINTS FLAGGED WITH:",
"# attitude tweaks, safe mode, coarse/earth pointing, argabrithening events,",
"# reaction wheel desaturation events, cosmic rays in optimal aperture",
"# pixels, manual excludes, discontinuities, stray light from Earth or Moon",
"# in camera FoV.",
"# (Note: it's not clear to me what a lot of these mean. Also most of these",
"# columns are probably not correctly propagated right now.)",
"sel",
"=",
"(",
"lc",
"[",
"'QUALITY'",
"]",
"==",
"0",
")",
"sel",
"&=",
"np",
".",
"isfinite",
"(",
"time",
")",
"sel",
"&=",
"np",
".",
"isfinite",
"(",
"flux",
")",
"sel",
"&=",
"np",
".",
"isfinite",
"(",
"err_flux",
")",
"sel",
"&=",
"~",
"np",
".",
"isnan",
"(",
"time",
")",
"sel",
"&=",
"~",
"np",
".",
"isnan",
"(",
"flux",
")",
"sel",
"&=",
"~",
"np",
".",
"isnan",
"(",
"err_flux",
")",
"sel",
"&=",
"(",
"time",
"!=",
"0",
")",
"sel",
"&=",
"(",
"flux",
"!=",
"0",
")",
"sel",
"&=",
"(",
"err_flux",
"!=",
"0",
")",
"time",
"=",
"time",
"[",
"sel",
"]",
"flux",
"=",
"flux",
"[",
"sel",
"]",
"err_flux",
"=",
"err_flux",
"[",
"sel",
"]",
"# ensure desired cadence",
"lc_cadence_diff",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"nanmedian",
"(",
"np",
".",
"diff",
"(",
"time",
")",
")",
"*",
"24",
"*",
"60",
"-",
"cadence_min",
")",
"if",
"lc_cadence_diff",
">",
"1.0e-2",
":",
"raise",
"ValueError",
"(",
"'the light curve is not at the required cadence specified: %.2f'",
"%",
"cadence_min",
")",
"fluxmedian",
"=",
"np",
".",
"nanmedian",
"(",
"flux",
")",
"flux",
"/=",
"fluxmedian",
"err_flux",
"/=",
"fluxmedian",
"return",
"time",
",",
"flux",
",",
"err_flux"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
read_tess_fitslc
|
This extracts the light curve from a single TESS .lc.fits file.
This works on the light curves available at MAST.
TODO: look at:
https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf
for details on the column descriptions and to fill in any other info we
need.
Parameters
----------
lcfits : str
The filename of a MAST Kepler/K2 light curve FITS file.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (sectors/cameras/CCDs?). The appending
does not care about the time order. To consolidate light curves in time
order, use `consolidate_tess_fitslc` below.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
|
astrobase/astrotess.py
|
def read_tess_fitslc(lcfits,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS,
normalize=False,
appendto=None,
filterqualityflags=False,
nanfilter=None,
timestoignore=None):
'''This extracts the light curve from a single TESS .lc.fits file.
This works on the light curves available at MAST.
TODO: look at:
https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf
for details on the column descriptions and to fill in any other info we
need.
Parameters
----------
lcfits : str
The filename of a MAST Kepler/K2 light curve FITS file.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (sectors/cameras/CCDs?). The appending
does not care about the time order. To consolidate light curves in time
order, use `consolidate_tess_fitslc` below.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr, lcaperturehdr, lcaperturedata = (hdulist[0].header,
hdulist[2].header,
hdulist[2].data)
hdulist.close()
hdrinfo = {}
# now get the values we want from the header
for key in headerkeys:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in topkeys:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# get the info from the lcaperturehdr
for key in lcaperturehdr:
if key in lcaperturehdr and lcaperturehdr[key] is not None:
hdrinfo[key.lower()] = lcaperturehdr[key]
else:
hdrinfo[key.lower()] = None
# if we're appending to another lcdict
if appendto and isinstance(appendto, dict):
lcdict = appendto
# update lcinfo
lcdict['lcinfo']['timesys'].append(hdrinfo['timesys'])
lcdict['lcinfo']['bjdoffset'].append(
hdrinfo['bjdrefi'] + hdrinfo['bjdreff']
)
lcdict['lcinfo']['lcaperture'].append(lcaperturedata)
lcdict['lcinfo']['aperpix_used'].append(hdrinfo['npixsap'])
lcdict['lcinfo']['aperpix_unused'].append(hdrinfo['npixmiss'])
lcdict['lcinfo']['pixarcsec'].append(
(np.abs(hdrinfo['cdelt1']) +
np.abs(hdrinfo['cdelt2']))*3600.0/2.0
)
lcdict['lcinfo']['ndet'].append(ndet)
lcdict['lcinfo']['exptime'].append(hdrinfo['exposure'])
lcdict['lcinfo']['sector'].append(hdrinfo['sector'])
lcdict['lcinfo']['camera'].append(hdrinfo['camera'])
lcdict['lcinfo']['ccd'].append(hdrinfo['ccd'])
lcdict['lcinfo']['date_obs_start'].append(hdrinfo['date-obs'])
lcdict['lcinfo']['date_obs_end'].append(hdrinfo['date-end'])
lcdict['lcinfo']['pixel_table_id'].append(hdrinfo['pxtable'])
lcdict['lcinfo']['origin'].append(hdrinfo['origin'])
lcdict['lcinfo']['datarelease'].append(hdrinfo['data_rel'])
lcdict['lcinfo']['procversion'].append(hdrinfo['procver'])
lcdict['lcinfo']['tic_version'].append(hdrinfo['ticver'])
lcdict['lcinfo']['cr_mitigation'].append(hdrinfo['crmiten'])
lcdict['lcinfo']['cr_blocksize'].append(hdrinfo['crblksz'])
lcdict['lcinfo']['cr_spocclean'].append(hdrinfo['crspoc'])
# update the varinfo for this light curve
lcdict['varinfo']['cdpp0_5'].append(hdrinfo['cdpp0_5'])
lcdict['varinfo']['cdpp1_0'].append(hdrinfo['cdpp1_0'])
lcdict['varinfo']['cdpp2_0'].append(hdrinfo['cdpp2_0'])
lcdict['varinfo']['pdcvar'].append(hdrinfo['pdcvar'])
lcdict['varinfo']['pdcmethod'].append(hdrinfo['pdcmethd'])
lcdict['varinfo']['target_flux_total_flux_ratio_in_aper'].append(
hdrinfo['crowdsap']
)
lcdict['varinfo']['target_flux_fraction_in_aper'].append(
hdrinfo['flfrcsap']
)
# update the light curve columns now
for key in datakeys:
if key.lower() in lcdict:
lcdict[key.lower()] = (
np.concatenate((lcdict[key.lower()], lcdata[key]))
)
for key in sapkeys:
if key.lower() in lcdict['sap']:
sapflux_median = np.nanmedian(lcdata['SAP_FLUX'])
# normalize the current flux measurements if needed
if normalize and key == 'SAP_FLUX':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_FLUX_ERR':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_BKG':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_BKG_ERR':
thislcdata = lcdata[key] / sapflux_median
else:
thislcdata = lcdata[key]
lcdict['sap'][key.lower()] = (
np.concatenate((lcdict['sap'][key.lower()], thislcdata))
)
for key in pdckeys:
if key.lower() in lcdict['pdc']:
pdcsap_flux_median = np.nanmedian(lcdata['PDCSAP_FLUX'])
# normalize the current flux measurements if needed
if normalize and key == 'PDCSAP_FLUX':
thislcdata = lcdata[key] / pdcsap_flux_median
elif normalize and key == 'PDCSAP_FLUX_ERR':
thislcdata = lcdata[key] / pdcsap_flux_median
else:
thislcdata = lcdata[key]
lcdict['pdc'][key.lower()] = (
np.concatenate((lcdict['pdc'][key.lower()], thislcdata))
)
# append some of the light curve information into existing numpy arrays
# so we can sort on them later
lcdict['exptime'] = np.concatenate(
(lcdict['exptime'],
np.full_like(lcdata['TIME'],
hdrinfo['exposure'],
dtype=np.float64))
)
lcdict['sector'] = np.concatenate(
(lcdict['sector'],
np.full_like(lcdata['TIME'],
hdrinfo['sector'],
dtype=np.int64))
)
lcdict['camera'] = np.concatenate(
(lcdict['camera'],
np.full_like(lcdata['TIME'],
hdrinfo['camera'],
dtype=np.int64))
)
lcdict['ccd'] = np.concatenate(
(lcdict['ccd'],
np.full_like(lcdata['TIME'],
hdrinfo['ccd'],
dtype=np.int64))
)
lcdict['pixel_table_id'] = np.concatenate(
(lcdict['pixel_table_id'],
np.full_like(lcdata['TIME'],
hdrinfo['pxtable'],
dtype=np.int64))
)
lcdict['origin'] = np.concatenate(
(lcdict['origin'],
np.full_like(lcdata['TIME'],
hdrinfo['origin'],
dtype='U100'))
)
lcdict['date_obs_start'] = np.concatenate(
(lcdict['date_obs_start'],
np.full_like(lcdata['TIME'],
hdrinfo['date-obs'],
dtype='U100'))
)
lcdict['date_obs_end'] = np.concatenate(
(lcdict['date_obs_end'],
np.full_like(lcdata['TIME'],
hdrinfo['date-end'],
dtype='U100'))
)
lcdict['procversion'] = np.concatenate(
(lcdict['procversion'],
np.full_like(lcdata['TIME'],
hdrinfo['procver'],
dtype='U255'))
)
lcdict['datarelease'] = np.concatenate(
(lcdict['datarelease'],
np.full_like(lcdata['TIME'],
hdrinfo['data_rel'],
dtype=np.int64))
)
# otherwise, this is a new lcdict
else:
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'objectid':hdrinfo['object'],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcaperture':[lcaperturedata],
'aperpix_used':[hdrinfo['npixsap']],
'aperpix_unused':[hdrinfo['npixmiss']],
'pixarcsec':[(np.abs(hdrinfo['cdelt1']) +
np.abs(hdrinfo['cdelt2']))*3600.0/2.0],
'ndet':[ndet],
'origin':[hdrinfo['origin']],
'procversion':[hdrinfo['procver']],
'datarelease':[hdrinfo['data_rel']],
'sector':[hdrinfo['sector']],
'camera':[hdrinfo['camera']],
'ccd':[hdrinfo['ccd']],
'pixel_table_id':[hdrinfo['pxtable']],
'date_obs_start':[hdrinfo['date-obs']],
'date_obs_end':[hdrinfo['date-end']],
'tic_version':[hdrinfo['ticver']],
'cr_mitigation':[hdrinfo['crmiten']],
'cr_blocksize':[hdrinfo['crblksz']],
'cr_spocclean':[hdrinfo['crspoc']],
},
'objectinfo':{
'objectid':hdrinfo['object'],
'ticid':hdrinfo['ticid'],
'tessmag':hdrinfo['tessmag'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'star_teff':hdrinfo['teff'],
'star_logg':hdrinfo['logg'],
'star_mh':hdrinfo['mh'],
'star_radius':hdrinfo['radius'],
'observatory':'TESS',
'telescope':'TESS photometer',
},
'varinfo':{
'cdpp0_5':[hdrinfo['cdpp0_5']],
'cdpp1_0':[hdrinfo['cdpp1_0']],
'cdpp2_0':[hdrinfo['cdpp2_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'target_flux_total_flux_ratio_in_aper':[hdrinfo['crowdsap']],
'target_flux_fraction_in_aper':[hdrinfo['flfrcsap']],
},
'sap':{},
'pdc':{},
}
# get the LC columns
for key in datakeys:
lcdict[key.lower()] = lcdata[key]
for key in sapkeys:
lcdict['sap'][key.lower()] = lcdata[key]
for key in pdckeys:
lcdict['pdc'][key.lower()] = lcdata[key]
# turn some of the light curve information into numpy arrays so we can
# sort on them later
lcdict['exptime'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['exptime'][0],
dtype=np.float64)
lcdict['sector'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['sector'][0],
dtype=np.int64)
lcdict['camera'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['camera'][0],
dtype=np.int64)
lcdict['ccd'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['ccd'][0],
dtype=np.int64)
lcdict['pixel_table_id'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['pixel_table_id'][0],
dtype=np.int64,
)
lcdict['origin'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['origin'][0],
dtype='U100',
)
lcdict['date_obs_start'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['date_obs_start'][0],
dtype='U100',
)
lcdict['date_obs_end'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['date_obs_end'][0],
dtype='U100',
)
lcdict['procversion'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['procversion'][0],
dtype='U255',
)
lcdict['datarelease'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['datarelease'][0],
dtype=np.int64,
)
# normalize the SAP and PDCSAP fluxes, errs, and backgrounds if needed
if normalize:
sapflux_median = np.nanmedian(lcdict['sap']['sap_flux'])
pdcsap_flux_median = np.nanmedian(lcdict['pdc']['pdcsap_flux'])
lcdict['sap']['sap_flux'] = (
lcdict['sap']['sap_flux'] /
sapflux_median
)
lcdict['sap']['sap_flux_err'] = (
lcdict['sap']['sap_flux_err'] /
sapflux_median
)
lcdict['sap']['sap_bkg'] = (
lcdict['sap']['sap_bkg'] /
sapflux_median
)
lcdict['sap']['sap_bkg_err'] = (
lcdict['sap']['sap_bkg_err'] /
sapflux_median
)
lcdict['pdc']['pdcsap_flux'] = (
lcdict['pdc']['pdcsap_flux'] /
pdcsap_flux_median
)
lcdict['pdc']['pdcsap_flux_err'] = (
lcdict['pdc']['pdcsap_flux_err'] /
pdcsap_flux_median
)
## END OF LIGHT CURVE CONSTRUCTION ##
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in datakeys] +
['sap.%s' % x.lower() for x in sapkeys] +
['pdc.%s' % x.lower() for x in pdckeys] +
['exptime','sector','camera','ccd', 'pixel_table_id',
'origin', 'date_obs_start', 'date_obs_end',
'procversion', 'datarelease']
)
# update the ndet key in the objectinfo with the sum of all observations
lcdict['objectinfo']['ndet'] = sum(lcdict['lcinfo']['ndet'])
# filter the LC dict if requested
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
lcdict = filter_tess_lcdict(lcdict,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
# return the lcdict at the end
return lcdict
|
def read_tess_fitslc(lcfits,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS,
normalize=False,
appendto=None,
filterqualityflags=False,
nanfilter=None,
timestoignore=None):
'''This extracts the light curve from a single TESS .lc.fits file.
This works on the light curves available at MAST.
TODO: look at:
https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf
for details on the column descriptions and to fill in any other info we
need.
Parameters
----------
lcfits : str
The filename of a MAST Kepler/K2 light curve FITS file.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (sectors/cameras/CCDs?). The appending
does not care about the time order. To consolidate light curves in time
order, use `consolidate_tess_fitslc` below.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr, lcaperturehdr, lcaperturedata = (hdulist[0].header,
hdulist[2].header,
hdulist[2].data)
hdulist.close()
hdrinfo = {}
# now get the values we want from the header
for key in headerkeys:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in topkeys:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# get the info from the lcaperturehdr
for key in lcaperturehdr:
if key in lcaperturehdr and lcaperturehdr[key] is not None:
hdrinfo[key.lower()] = lcaperturehdr[key]
else:
hdrinfo[key.lower()] = None
# if we're appending to another lcdict
if appendto and isinstance(appendto, dict):
lcdict = appendto
# update lcinfo
lcdict['lcinfo']['timesys'].append(hdrinfo['timesys'])
lcdict['lcinfo']['bjdoffset'].append(
hdrinfo['bjdrefi'] + hdrinfo['bjdreff']
)
lcdict['lcinfo']['lcaperture'].append(lcaperturedata)
lcdict['lcinfo']['aperpix_used'].append(hdrinfo['npixsap'])
lcdict['lcinfo']['aperpix_unused'].append(hdrinfo['npixmiss'])
lcdict['lcinfo']['pixarcsec'].append(
(np.abs(hdrinfo['cdelt1']) +
np.abs(hdrinfo['cdelt2']))*3600.0/2.0
)
lcdict['lcinfo']['ndet'].append(ndet)
lcdict['lcinfo']['exptime'].append(hdrinfo['exposure'])
lcdict['lcinfo']['sector'].append(hdrinfo['sector'])
lcdict['lcinfo']['camera'].append(hdrinfo['camera'])
lcdict['lcinfo']['ccd'].append(hdrinfo['ccd'])
lcdict['lcinfo']['date_obs_start'].append(hdrinfo['date-obs'])
lcdict['lcinfo']['date_obs_end'].append(hdrinfo['date-end'])
lcdict['lcinfo']['pixel_table_id'].append(hdrinfo['pxtable'])
lcdict['lcinfo']['origin'].append(hdrinfo['origin'])
lcdict['lcinfo']['datarelease'].append(hdrinfo['data_rel'])
lcdict['lcinfo']['procversion'].append(hdrinfo['procver'])
lcdict['lcinfo']['tic_version'].append(hdrinfo['ticver'])
lcdict['lcinfo']['cr_mitigation'].append(hdrinfo['crmiten'])
lcdict['lcinfo']['cr_blocksize'].append(hdrinfo['crblksz'])
lcdict['lcinfo']['cr_spocclean'].append(hdrinfo['crspoc'])
# update the varinfo for this light curve
lcdict['varinfo']['cdpp0_5'].append(hdrinfo['cdpp0_5'])
lcdict['varinfo']['cdpp1_0'].append(hdrinfo['cdpp1_0'])
lcdict['varinfo']['cdpp2_0'].append(hdrinfo['cdpp2_0'])
lcdict['varinfo']['pdcvar'].append(hdrinfo['pdcvar'])
lcdict['varinfo']['pdcmethod'].append(hdrinfo['pdcmethd'])
lcdict['varinfo']['target_flux_total_flux_ratio_in_aper'].append(
hdrinfo['crowdsap']
)
lcdict['varinfo']['target_flux_fraction_in_aper'].append(
hdrinfo['flfrcsap']
)
# update the light curve columns now
for key in datakeys:
if key.lower() in lcdict:
lcdict[key.lower()] = (
np.concatenate((lcdict[key.lower()], lcdata[key]))
)
for key in sapkeys:
if key.lower() in lcdict['sap']:
sapflux_median = np.nanmedian(lcdata['SAP_FLUX'])
# normalize the current flux measurements if needed
if normalize and key == 'SAP_FLUX':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_FLUX_ERR':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_BKG':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_BKG_ERR':
thislcdata = lcdata[key] / sapflux_median
else:
thislcdata = lcdata[key]
lcdict['sap'][key.lower()] = (
np.concatenate((lcdict['sap'][key.lower()], thislcdata))
)
for key in pdckeys:
if key.lower() in lcdict['pdc']:
pdcsap_flux_median = np.nanmedian(lcdata['PDCSAP_FLUX'])
# normalize the current flux measurements if needed
if normalize and key == 'PDCSAP_FLUX':
thislcdata = lcdata[key] / pdcsap_flux_median
elif normalize and key == 'PDCSAP_FLUX_ERR':
thislcdata = lcdata[key] / pdcsap_flux_median
else:
thislcdata = lcdata[key]
lcdict['pdc'][key.lower()] = (
np.concatenate((lcdict['pdc'][key.lower()], thislcdata))
)
# append some of the light curve information into existing numpy arrays
# so we can sort on them later
lcdict['exptime'] = np.concatenate(
(lcdict['exptime'],
np.full_like(lcdata['TIME'],
hdrinfo['exposure'],
dtype=np.float64))
)
lcdict['sector'] = np.concatenate(
(lcdict['sector'],
np.full_like(lcdata['TIME'],
hdrinfo['sector'],
dtype=np.int64))
)
lcdict['camera'] = np.concatenate(
(lcdict['camera'],
np.full_like(lcdata['TIME'],
hdrinfo['camera'],
dtype=np.int64))
)
lcdict['ccd'] = np.concatenate(
(lcdict['ccd'],
np.full_like(lcdata['TIME'],
hdrinfo['ccd'],
dtype=np.int64))
)
lcdict['pixel_table_id'] = np.concatenate(
(lcdict['pixel_table_id'],
np.full_like(lcdata['TIME'],
hdrinfo['pxtable'],
dtype=np.int64))
)
lcdict['origin'] = np.concatenate(
(lcdict['origin'],
np.full_like(lcdata['TIME'],
hdrinfo['origin'],
dtype='U100'))
)
lcdict['date_obs_start'] = np.concatenate(
(lcdict['date_obs_start'],
np.full_like(lcdata['TIME'],
hdrinfo['date-obs'],
dtype='U100'))
)
lcdict['date_obs_end'] = np.concatenate(
(lcdict['date_obs_end'],
np.full_like(lcdata['TIME'],
hdrinfo['date-end'],
dtype='U100'))
)
lcdict['procversion'] = np.concatenate(
(lcdict['procversion'],
np.full_like(lcdata['TIME'],
hdrinfo['procver'],
dtype='U255'))
)
lcdict['datarelease'] = np.concatenate(
(lcdict['datarelease'],
np.full_like(lcdata['TIME'],
hdrinfo['data_rel'],
dtype=np.int64))
)
# otherwise, this is a new lcdict
else:
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'objectid':hdrinfo['object'],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcaperture':[lcaperturedata],
'aperpix_used':[hdrinfo['npixsap']],
'aperpix_unused':[hdrinfo['npixmiss']],
'pixarcsec':[(np.abs(hdrinfo['cdelt1']) +
np.abs(hdrinfo['cdelt2']))*3600.0/2.0],
'ndet':[ndet],
'origin':[hdrinfo['origin']],
'procversion':[hdrinfo['procver']],
'datarelease':[hdrinfo['data_rel']],
'sector':[hdrinfo['sector']],
'camera':[hdrinfo['camera']],
'ccd':[hdrinfo['ccd']],
'pixel_table_id':[hdrinfo['pxtable']],
'date_obs_start':[hdrinfo['date-obs']],
'date_obs_end':[hdrinfo['date-end']],
'tic_version':[hdrinfo['ticver']],
'cr_mitigation':[hdrinfo['crmiten']],
'cr_blocksize':[hdrinfo['crblksz']],
'cr_spocclean':[hdrinfo['crspoc']],
},
'objectinfo':{
'objectid':hdrinfo['object'],
'ticid':hdrinfo['ticid'],
'tessmag':hdrinfo['tessmag'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'star_teff':hdrinfo['teff'],
'star_logg':hdrinfo['logg'],
'star_mh':hdrinfo['mh'],
'star_radius':hdrinfo['radius'],
'observatory':'TESS',
'telescope':'TESS photometer',
},
'varinfo':{
'cdpp0_5':[hdrinfo['cdpp0_5']],
'cdpp1_0':[hdrinfo['cdpp1_0']],
'cdpp2_0':[hdrinfo['cdpp2_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'target_flux_total_flux_ratio_in_aper':[hdrinfo['crowdsap']],
'target_flux_fraction_in_aper':[hdrinfo['flfrcsap']],
},
'sap':{},
'pdc':{},
}
# get the LC columns
for key in datakeys:
lcdict[key.lower()] = lcdata[key]
for key in sapkeys:
lcdict['sap'][key.lower()] = lcdata[key]
for key in pdckeys:
lcdict['pdc'][key.lower()] = lcdata[key]
# turn some of the light curve information into numpy arrays so we can
# sort on them later
lcdict['exptime'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['exptime'][0],
dtype=np.float64)
lcdict['sector'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['sector'][0],
dtype=np.int64)
lcdict['camera'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['camera'][0],
dtype=np.int64)
lcdict['ccd'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['ccd'][0],
dtype=np.int64)
lcdict['pixel_table_id'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['pixel_table_id'][0],
dtype=np.int64,
)
lcdict['origin'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['origin'][0],
dtype='U100',
)
lcdict['date_obs_start'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['date_obs_start'][0],
dtype='U100',
)
lcdict['date_obs_end'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['date_obs_end'][0],
dtype='U100',
)
lcdict['procversion'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['procversion'][0],
dtype='U255',
)
lcdict['datarelease'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['datarelease'][0],
dtype=np.int64,
)
# normalize the SAP and PDCSAP fluxes, errs, and backgrounds if needed
if normalize:
sapflux_median = np.nanmedian(lcdict['sap']['sap_flux'])
pdcsap_flux_median = np.nanmedian(lcdict['pdc']['pdcsap_flux'])
lcdict['sap']['sap_flux'] = (
lcdict['sap']['sap_flux'] /
sapflux_median
)
lcdict['sap']['sap_flux_err'] = (
lcdict['sap']['sap_flux_err'] /
sapflux_median
)
lcdict['sap']['sap_bkg'] = (
lcdict['sap']['sap_bkg'] /
sapflux_median
)
lcdict['sap']['sap_bkg_err'] = (
lcdict['sap']['sap_bkg_err'] /
sapflux_median
)
lcdict['pdc']['pdcsap_flux'] = (
lcdict['pdc']['pdcsap_flux'] /
pdcsap_flux_median
)
lcdict['pdc']['pdcsap_flux_err'] = (
lcdict['pdc']['pdcsap_flux_err'] /
pdcsap_flux_median
)
## END OF LIGHT CURVE CONSTRUCTION ##
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in datakeys] +
['sap.%s' % x.lower() for x in sapkeys] +
['pdc.%s' % x.lower() for x in pdckeys] +
['exptime','sector','camera','ccd', 'pixel_table_id',
'origin', 'date_obs_start', 'date_obs_end',
'procversion', 'datarelease']
)
# update the ndet key in the objectinfo with the sum of all observations
lcdict['objectinfo']['ndet'] = sum(lcdict['lcinfo']['ndet'])
# filter the LC dict if requested
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
lcdict = filter_tess_lcdict(lcdict,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
# return the lcdict at the end
return lcdict
|
[
"This",
"extracts",
"the",
"light",
"curve",
"from",
"a",
"single",
"TESS",
".",
"lc",
".",
"fits",
"file",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/astrotess.py#L331-L794
|
[
"def",
"read_tess_fitslc",
"(",
"lcfits",
",",
"headerkeys",
"=",
"LCHEADERKEYS",
",",
"datakeys",
"=",
"LCDATAKEYS",
",",
"sapkeys",
"=",
"LCSAPKEYS",
",",
"pdckeys",
"=",
"LCPDCKEYS",
",",
"topkeys",
"=",
"LCTOPKEYS",
",",
"apkeys",
"=",
"LCAPERTUREKEYS",
",",
"normalize",
"=",
"False",
",",
"appendto",
"=",
"None",
",",
"filterqualityflags",
"=",
"False",
",",
"nanfilter",
"=",
"None",
",",
"timestoignore",
"=",
"None",
")",
":",
"# read the fits file",
"hdulist",
"=",
"pyfits",
".",
"open",
"(",
"lcfits",
")",
"lchdr",
",",
"lcdata",
"=",
"hdulist",
"[",
"1",
"]",
".",
"header",
",",
"hdulist",
"[",
"1",
"]",
".",
"data",
"lctophdr",
",",
"lcaperturehdr",
",",
"lcaperturedata",
"=",
"(",
"hdulist",
"[",
"0",
"]",
".",
"header",
",",
"hdulist",
"[",
"2",
"]",
".",
"header",
",",
"hdulist",
"[",
"2",
"]",
".",
"data",
")",
"hdulist",
".",
"close",
"(",
")",
"hdrinfo",
"=",
"{",
"}",
"# now get the values we want from the header",
"for",
"key",
"in",
"headerkeys",
":",
"if",
"key",
"in",
"lchdr",
"and",
"lchdr",
"[",
"key",
"]",
"is",
"not",
"None",
":",
"hdrinfo",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"lchdr",
"[",
"key",
"]",
"else",
":",
"hdrinfo",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"None",
"# get the number of detections",
"ndet",
"=",
"lchdr",
"[",
"'NAXIS2'",
"]",
"# get the info from the topheader",
"for",
"key",
"in",
"topkeys",
":",
"if",
"key",
"in",
"lctophdr",
"and",
"lctophdr",
"[",
"key",
"]",
"is",
"not",
"None",
":",
"hdrinfo",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"lctophdr",
"[",
"key",
"]",
"else",
":",
"hdrinfo",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"None",
"# get the info from the lcaperturehdr",
"for",
"key",
"in",
"lcaperturehdr",
":",
"if",
"key",
"in",
"lcaperturehdr",
"and",
"lcaperturehdr",
"[",
"key",
"]",
"is",
"not",
"None",
":",
"hdrinfo",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"lcaperturehdr",
"[",
"key",
"]",
"else",
":",
"hdrinfo",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"None",
"# if we're appending to another lcdict",
"if",
"appendto",
"and",
"isinstance",
"(",
"appendto",
",",
"dict",
")",
":",
"lcdict",
"=",
"appendto",
"# update lcinfo",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'timesys'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'timesys'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'bjdoffset'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'bjdrefi'",
"]",
"+",
"hdrinfo",
"[",
"'bjdreff'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'lcaperture'",
"]",
".",
"append",
"(",
"lcaperturedata",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'aperpix_used'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'npixsap'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'aperpix_unused'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'npixmiss'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'pixarcsec'",
"]",
".",
"append",
"(",
"(",
"np",
".",
"abs",
"(",
"hdrinfo",
"[",
"'cdelt1'",
"]",
")",
"+",
"np",
".",
"abs",
"(",
"hdrinfo",
"[",
"'cdelt2'",
"]",
")",
")",
"*",
"3600.0",
"/",
"2.0",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'ndet'",
"]",
".",
"append",
"(",
"ndet",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'exptime'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'exposure'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'sector'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'sector'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'camera'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'camera'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'ccd'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'ccd'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'date_obs_start'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'date-obs'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'date_obs_end'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'date-end'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'pixel_table_id'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'pxtable'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'origin'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'origin'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'datarelease'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'data_rel'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'procversion'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'procver'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'tic_version'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'ticver'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'cr_mitigation'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'crmiten'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'cr_blocksize'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'crblksz'",
"]",
")",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'cr_spocclean'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'crspoc'",
"]",
")",
"# update the varinfo for this light curve",
"lcdict",
"[",
"'varinfo'",
"]",
"[",
"'cdpp0_5'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'cdpp0_5'",
"]",
")",
"lcdict",
"[",
"'varinfo'",
"]",
"[",
"'cdpp1_0'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'cdpp1_0'",
"]",
")",
"lcdict",
"[",
"'varinfo'",
"]",
"[",
"'cdpp2_0'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'cdpp2_0'",
"]",
")",
"lcdict",
"[",
"'varinfo'",
"]",
"[",
"'pdcvar'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'pdcvar'",
"]",
")",
"lcdict",
"[",
"'varinfo'",
"]",
"[",
"'pdcmethod'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'pdcmethd'",
"]",
")",
"lcdict",
"[",
"'varinfo'",
"]",
"[",
"'target_flux_total_flux_ratio_in_aper'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'crowdsap'",
"]",
")",
"lcdict",
"[",
"'varinfo'",
"]",
"[",
"'target_flux_fraction_in_aper'",
"]",
".",
"append",
"(",
"hdrinfo",
"[",
"'flfrcsap'",
"]",
")",
"# update the light curve columns now",
"for",
"key",
"in",
"datakeys",
":",
"if",
"key",
".",
"lower",
"(",
")",
"in",
"lcdict",
":",
"lcdict",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"(",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"key",
".",
"lower",
"(",
")",
"]",
",",
"lcdata",
"[",
"key",
"]",
")",
")",
")",
"for",
"key",
"in",
"sapkeys",
":",
"if",
"key",
".",
"lower",
"(",
")",
"in",
"lcdict",
"[",
"'sap'",
"]",
":",
"sapflux_median",
"=",
"np",
".",
"nanmedian",
"(",
"lcdata",
"[",
"'SAP_FLUX'",
"]",
")",
"# normalize the current flux measurements if needed",
"if",
"normalize",
"and",
"key",
"==",
"'SAP_FLUX'",
":",
"thislcdata",
"=",
"lcdata",
"[",
"key",
"]",
"/",
"sapflux_median",
"elif",
"normalize",
"and",
"key",
"==",
"'SAP_FLUX_ERR'",
":",
"thislcdata",
"=",
"lcdata",
"[",
"key",
"]",
"/",
"sapflux_median",
"elif",
"normalize",
"and",
"key",
"==",
"'SAP_BKG'",
":",
"thislcdata",
"=",
"lcdata",
"[",
"key",
"]",
"/",
"sapflux_median",
"elif",
"normalize",
"and",
"key",
"==",
"'SAP_BKG_ERR'",
":",
"thislcdata",
"=",
"lcdata",
"[",
"key",
"]",
"/",
"sapflux_median",
"else",
":",
"thislcdata",
"=",
"lcdata",
"[",
"key",
"]",
"lcdict",
"[",
"'sap'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"(",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
",",
"thislcdata",
")",
")",
")",
"for",
"key",
"in",
"pdckeys",
":",
"if",
"key",
".",
"lower",
"(",
")",
"in",
"lcdict",
"[",
"'pdc'",
"]",
":",
"pdcsap_flux_median",
"=",
"np",
".",
"nanmedian",
"(",
"lcdata",
"[",
"'PDCSAP_FLUX'",
"]",
")",
"# normalize the current flux measurements if needed",
"if",
"normalize",
"and",
"key",
"==",
"'PDCSAP_FLUX'",
":",
"thislcdata",
"=",
"lcdata",
"[",
"key",
"]",
"/",
"pdcsap_flux_median",
"elif",
"normalize",
"and",
"key",
"==",
"'PDCSAP_FLUX_ERR'",
":",
"thislcdata",
"=",
"lcdata",
"[",
"key",
"]",
"/",
"pdcsap_flux_median",
"else",
":",
"thislcdata",
"=",
"lcdata",
"[",
"key",
"]",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"(",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
",",
"thislcdata",
")",
")",
")",
"# append some of the light curve information into existing numpy arrays",
"# so we can sort on them later",
"lcdict",
"[",
"'exptime'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'exptime'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'exposure'",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
")",
")",
"lcdict",
"[",
"'sector'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'sector'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'sector'",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
")",
")",
"lcdict",
"[",
"'camera'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'camera'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'camera'",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
")",
")",
"lcdict",
"[",
"'ccd'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'ccd'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'ccd'",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
")",
")",
"lcdict",
"[",
"'pixel_table_id'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'pixel_table_id'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'pxtable'",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
")",
")",
"lcdict",
"[",
"'origin'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'origin'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'origin'",
"]",
",",
"dtype",
"=",
"'U100'",
")",
")",
")",
"lcdict",
"[",
"'date_obs_start'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'date_obs_start'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'date-obs'",
"]",
",",
"dtype",
"=",
"'U100'",
")",
")",
")",
"lcdict",
"[",
"'date_obs_end'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'date_obs_end'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'date-end'",
"]",
",",
"dtype",
"=",
"'U100'",
")",
")",
")",
"lcdict",
"[",
"'procversion'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'procversion'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'procver'",
"]",
",",
"dtype",
"=",
"'U255'",
")",
")",
")",
"lcdict",
"[",
"'datarelease'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lcdict",
"[",
"'datarelease'",
"]",
",",
"np",
".",
"full_like",
"(",
"lcdata",
"[",
"'TIME'",
"]",
",",
"hdrinfo",
"[",
"'data_rel'",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
")",
")",
"# otherwise, this is a new lcdict",
"else",
":",
"# form the lcdict",
"# the metadata is one-elem arrays because we might add on to them later",
"lcdict",
"=",
"{",
"'objectid'",
":",
"hdrinfo",
"[",
"'object'",
"]",
",",
"'lcinfo'",
":",
"{",
"'timesys'",
":",
"[",
"hdrinfo",
"[",
"'timesys'",
"]",
"]",
",",
"'bjdoffset'",
":",
"[",
"hdrinfo",
"[",
"'bjdrefi'",
"]",
"+",
"hdrinfo",
"[",
"'bjdreff'",
"]",
"]",
",",
"'exptime'",
":",
"[",
"hdrinfo",
"[",
"'exposure'",
"]",
"]",
",",
"'lcaperture'",
":",
"[",
"lcaperturedata",
"]",
",",
"'aperpix_used'",
":",
"[",
"hdrinfo",
"[",
"'npixsap'",
"]",
"]",
",",
"'aperpix_unused'",
":",
"[",
"hdrinfo",
"[",
"'npixmiss'",
"]",
"]",
",",
"'pixarcsec'",
":",
"[",
"(",
"np",
".",
"abs",
"(",
"hdrinfo",
"[",
"'cdelt1'",
"]",
")",
"+",
"np",
".",
"abs",
"(",
"hdrinfo",
"[",
"'cdelt2'",
"]",
")",
")",
"*",
"3600.0",
"/",
"2.0",
"]",
",",
"'ndet'",
":",
"[",
"ndet",
"]",
",",
"'origin'",
":",
"[",
"hdrinfo",
"[",
"'origin'",
"]",
"]",
",",
"'procversion'",
":",
"[",
"hdrinfo",
"[",
"'procver'",
"]",
"]",
",",
"'datarelease'",
":",
"[",
"hdrinfo",
"[",
"'data_rel'",
"]",
"]",
",",
"'sector'",
":",
"[",
"hdrinfo",
"[",
"'sector'",
"]",
"]",
",",
"'camera'",
":",
"[",
"hdrinfo",
"[",
"'camera'",
"]",
"]",
",",
"'ccd'",
":",
"[",
"hdrinfo",
"[",
"'ccd'",
"]",
"]",
",",
"'pixel_table_id'",
":",
"[",
"hdrinfo",
"[",
"'pxtable'",
"]",
"]",
",",
"'date_obs_start'",
":",
"[",
"hdrinfo",
"[",
"'date-obs'",
"]",
"]",
",",
"'date_obs_end'",
":",
"[",
"hdrinfo",
"[",
"'date-end'",
"]",
"]",
",",
"'tic_version'",
":",
"[",
"hdrinfo",
"[",
"'ticver'",
"]",
"]",
",",
"'cr_mitigation'",
":",
"[",
"hdrinfo",
"[",
"'crmiten'",
"]",
"]",
",",
"'cr_blocksize'",
":",
"[",
"hdrinfo",
"[",
"'crblksz'",
"]",
"]",
",",
"'cr_spocclean'",
":",
"[",
"hdrinfo",
"[",
"'crspoc'",
"]",
"]",
",",
"}",
",",
"'objectinfo'",
":",
"{",
"'objectid'",
":",
"hdrinfo",
"[",
"'object'",
"]",
",",
"'ticid'",
":",
"hdrinfo",
"[",
"'ticid'",
"]",
",",
"'tessmag'",
":",
"hdrinfo",
"[",
"'tessmag'",
"]",
",",
"'ra'",
":",
"hdrinfo",
"[",
"'ra_obj'",
"]",
",",
"'decl'",
":",
"hdrinfo",
"[",
"'dec_obj'",
"]",
",",
"'pmra'",
":",
"hdrinfo",
"[",
"'pmra'",
"]",
",",
"'pmdecl'",
":",
"hdrinfo",
"[",
"'pmdec'",
"]",
",",
"'pmtotal'",
":",
"hdrinfo",
"[",
"'pmtotal'",
"]",
",",
"'star_teff'",
":",
"hdrinfo",
"[",
"'teff'",
"]",
",",
"'star_logg'",
":",
"hdrinfo",
"[",
"'logg'",
"]",
",",
"'star_mh'",
":",
"hdrinfo",
"[",
"'mh'",
"]",
",",
"'star_radius'",
":",
"hdrinfo",
"[",
"'radius'",
"]",
",",
"'observatory'",
":",
"'TESS'",
",",
"'telescope'",
":",
"'TESS photometer'",
",",
"}",
",",
"'varinfo'",
":",
"{",
"'cdpp0_5'",
":",
"[",
"hdrinfo",
"[",
"'cdpp0_5'",
"]",
"]",
",",
"'cdpp1_0'",
":",
"[",
"hdrinfo",
"[",
"'cdpp1_0'",
"]",
"]",
",",
"'cdpp2_0'",
":",
"[",
"hdrinfo",
"[",
"'cdpp2_0'",
"]",
"]",
",",
"'pdcvar'",
":",
"[",
"hdrinfo",
"[",
"'pdcvar'",
"]",
"]",
",",
"'pdcmethod'",
":",
"[",
"hdrinfo",
"[",
"'pdcmethd'",
"]",
"]",
",",
"'target_flux_total_flux_ratio_in_aper'",
":",
"[",
"hdrinfo",
"[",
"'crowdsap'",
"]",
"]",
",",
"'target_flux_fraction_in_aper'",
":",
"[",
"hdrinfo",
"[",
"'flfrcsap'",
"]",
"]",
",",
"}",
",",
"'sap'",
":",
"{",
"}",
",",
"'pdc'",
":",
"{",
"}",
",",
"}",
"# get the LC columns",
"for",
"key",
"in",
"datakeys",
":",
"lcdict",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"lcdata",
"[",
"key",
"]",
"for",
"key",
"in",
"sapkeys",
":",
"lcdict",
"[",
"'sap'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"lcdata",
"[",
"key",
"]",
"for",
"key",
"in",
"pdckeys",
":",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"lcdata",
"[",
"key",
"]",
"# turn some of the light curve information into numpy arrays so we can",
"# sort on them later",
"lcdict",
"[",
"'exptime'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'exptime'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"lcdict",
"[",
"'sector'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'sector'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"lcdict",
"[",
"'camera'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'camera'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"lcdict",
"[",
"'ccd'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'ccd'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"lcdict",
"[",
"'pixel_table_id'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'pixel_table_id'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
",",
")",
"lcdict",
"[",
"'origin'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'origin'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"'U100'",
",",
")",
"lcdict",
"[",
"'date_obs_start'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'date_obs_start'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"'U100'",
",",
")",
"lcdict",
"[",
"'date_obs_end'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'date_obs_end'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"'U100'",
",",
")",
"lcdict",
"[",
"'procversion'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'procversion'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"'U255'",
",",
")",
"lcdict",
"[",
"'datarelease'",
"]",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'datarelease'",
"]",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
",",
")",
"# normalize the SAP and PDCSAP fluxes, errs, and backgrounds if needed",
"if",
"normalize",
":",
"sapflux_median",
"=",
"np",
".",
"nanmedian",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux'",
"]",
")",
"pdcsap_flux_median",
"=",
"np",
".",
"nanmedian",
"(",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux'",
"]",
")",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux'",
"]",
"=",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux'",
"]",
"/",
"sapflux_median",
")",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux_err'",
"]",
"=",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux_err'",
"]",
"/",
"sapflux_median",
")",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_bkg'",
"]",
"=",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_bkg'",
"]",
"/",
"sapflux_median",
")",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_bkg_err'",
"]",
"=",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_bkg_err'",
"]",
"/",
"sapflux_median",
")",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux'",
"]",
"=",
"(",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux'",
"]",
"/",
"pdcsap_flux_median",
")",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux_err'",
"]",
"=",
"(",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux_err'",
"]",
"/",
"pdcsap_flux_median",
")",
"## END OF LIGHT CURVE CONSTRUCTION ##",
"# update the lcdict columns with the actual columns",
"lcdict",
"[",
"'columns'",
"]",
"=",
"(",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"datakeys",
"]",
"+",
"[",
"'sap.%s'",
"%",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"sapkeys",
"]",
"+",
"[",
"'pdc.%s'",
"%",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"pdckeys",
"]",
"+",
"[",
"'exptime'",
",",
"'sector'",
",",
"'camera'",
",",
"'ccd'",
",",
"'pixel_table_id'",
",",
"'origin'",
",",
"'date_obs_start'",
",",
"'date_obs_end'",
",",
"'procversion'",
",",
"'datarelease'",
"]",
")",
"# update the ndet key in the objectinfo with the sum of all observations",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'ndet'",
"]",
"=",
"sum",
"(",
"lcdict",
"[",
"'lcinfo'",
"]",
"[",
"'ndet'",
"]",
")",
"# filter the LC dict if requested",
"if",
"(",
"filterqualityflags",
"is",
"not",
"False",
"or",
"nanfilter",
"is",
"not",
"None",
"or",
"timestoignore",
"is",
"not",
"None",
")",
":",
"lcdict",
"=",
"filter_tess_lcdict",
"(",
"lcdict",
",",
"filterqualityflags",
",",
"nanfilter",
"=",
"nanfilter",
",",
"timestoignore",
"=",
"timestoignore",
")",
"# return the lcdict at the end",
"return",
"lcdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
consolidate_tess_fitslc
|
This consolidates a list of LCs for a single TIC object.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
Parameters
----------
lclist : list of str, or str
`lclist` is either a list of actual light curve files or a string that
is valid for glob.glob to search for and generate a light curve list
based on the file glob. This is useful for consolidating LC FITS files
across different TESS sectors for a single TIC ID using a glob like
`*<TICID>*_lc.fits`.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
|
astrobase/astrotess.py
|
def consolidate_tess_fitslc(lclist,
normalize=True,
filterqualityflags=False,
nanfilter=None,
timestoignore=None,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS):
'''This consolidates a list of LCs for a single TIC object.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
Parameters
----------
lclist : list of str, or str
`lclist` is either a list of actual light curve files or a string that
is valid for glob.glob to search for and generate a light curve list
based on the file glob. This is useful for consolidating LC FITS files
across different TESS sectors for a single TIC ID using a glob like
`*<TICID>*_lc.fits`.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# if the lclist is a string, assume that we're passing in a fileglob
if isinstance(lclist, str):
if sys.version_info[:2] > (3,4):
matching = glob.glob(lclist,
recursive=True)
LOGINFO('found %s LCs: %r' % (len(matching), matching))
else:
lcfitsdir = os.path.dirname(lclist)
lcfitsfile = os.path.basename(lclist)
walker = os.walk(lcfitsdir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
lcfitsfile)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO(
'found %s in dir: %s' % (repr(foundfiles),
os.path.join(root,sdir))
)
if len(matching) == 0:
LOGERROR('could not find any TESS LC files matching glob: %s' %
lclist)
return None
# if the lclist is an actual list of LCs, then use it directly
else:
matching = lclist
# get the first file
consolidated = read_tess_fitslc(matching[0],
normalize=normalize,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS)
# get the rest of the files
if len(matching) > 1:
for lcf in matching[1:]:
consolidated = read_tess_fitslc(lcf,
appendto=consolidated,
normalize=normalize,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS)
# get the sort indices. we use time for the columns and sectors for the
# bits in lcinfo and varinfo
LOGINFO('sorting by time...')
# NOTE: nans in time will be sorted to the end of the array
finiteind = np.isfinite(consolidated['time'])
if np.sum(finiteind) < consolidated['time'].size:
LOGWARNING('some time values are nan! '
'measurements at these times will be '
'sorted to the end of the column arrays.')
# get the time sort index
column_sort_ind = np.argsort(consolidated['time'])
# sort the columns by time
for col in consolidated['columns']:
if '.' in col:
key, subkey = col.split('.')
consolidated[key][subkey] = (
consolidated[key][subkey][column_sort_ind]
)
else:
consolidated[col] = consolidated[col][column_sort_ind]
info_sort_ind = np.argsort(consolidated['lcinfo']['sector'])
# sort the keys in lcinfo
for key in consolidated['lcinfo']:
consolidated['lcinfo'][key] = (
np.array(consolidated['lcinfo'][key])[info_sort_ind].tolist()
)
# sort the keys in varinfo
for key in consolidated['varinfo']:
consolidated['varinfo'][key] = (
np.array(consolidated['varinfo'][key])[info_sort_ind].tolist()
)
# filter the LC dict if requested
# we do this at the end
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
consolidated = filter_tess_lcdict(consolidated,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
return consolidated
|
def consolidate_tess_fitslc(lclist,
normalize=True,
filterqualityflags=False,
nanfilter=None,
timestoignore=None,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS):
'''This consolidates a list of LCs for a single TIC object.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
Parameters
----------
lclist : list of str, or str
`lclist` is either a list of actual light curve files or a string that
is valid for glob.glob to search for and generate a light curve list
based on the file glob. This is useful for consolidating LC FITS files
across different TESS sectors for a single TIC ID using a glob like
`*<TICID>*_lc.fits`.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# if the lclist is a string, assume that we're passing in a fileglob
if isinstance(lclist, str):
if sys.version_info[:2] > (3,4):
matching = glob.glob(lclist,
recursive=True)
LOGINFO('found %s LCs: %r' % (len(matching), matching))
else:
lcfitsdir = os.path.dirname(lclist)
lcfitsfile = os.path.basename(lclist)
walker = os.walk(lcfitsdir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
lcfitsfile)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO(
'found %s in dir: %s' % (repr(foundfiles),
os.path.join(root,sdir))
)
if len(matching) == 0:
LOGERROR('could not find any TESS LC files matching glob: %s' %
lclist)
return None
# if the lclist is an actual list of LCs, then use it directly
else:
matching = lclist
# get the first file
consolidated = read_tess_fitslc(matching[0],
normalize=normalize,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS)
# get the rest of the files
if len(matching) > 1:
for lcf in matching[1:]:
consolidated = read_tess_fitslc(lcf,
appendto=consolidated,
normalize=normalize,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS)
# get the sort indices. we use time for the columns and sectors for the
# bits in lcinfo and varinfo
LOGINFO('sorting by time...')
# NOTE: nans in time will be sorted to the end of the array
finiteind = np.isfinite(consolidated['time'])
if np.sum(finiteind) < consolidated['time'].size:
LOGWARNING('some time values are nan! '
'measurements at these times will be '
'sorted to the end of the column arrays.')
# get the time sort index
column_sort_ind = np.argsort(consolidated['time'])
# sort the columns by time
for col in consolidated['columns']:
if '.' in col:
key, subkey = col.split('.')
consolidated[key][subkey] = (
consolidated[key][subkey][column_sort_ind]
)
else:
consolidated[col] = consolidated[col][column_sort_ind]
info_sort_ind = np.argsort(consolidated['lcinfo']['sector'])
# sort the keys in lcinfo
for key in consolidated['lcinfo']:
consolidated['lcinfo'][key] = (
np.array(consolidated['lcinfo'][key])[info_sort_ind].tolist()
)
# sort the keys in varinfo
for key in consolidated['varinfo']:
consolidated['varinfo'][key] = (
np.array(consolidated['varinfo'][key])[info_sort_ind].tolist()
)
# filter the LC dict if requested
# we do this at the end
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
consolidated = filter_tess_lcdict(consolidated,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
return consolidated
|
[
"This",
"consolidates",
"a",
"list",
"of",
"LCs",
"for",
"a",
"single",
"TIC",
"object",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/astrotess.py#L798-L996
|
[
"def",
"consolidate_tess_fitslc",
"(",
"lclist",
",",
"normalize",
"=",
"True",
",",
"filterqualityflags",
"=",
"False",
",",
"nanfilter",
"=",
"None",
",",
"timestoignore",
"=",
"None",
",",
"headerkeys",
"=",
"LCHEADERKEYS",
",",
"datakeys",
"=",
"LCDATAKEYS",
",",
"sapkeys",
"=",
"LCSAPKEYS",
",",
"pdckeys",
"=",
"LCPDCKEYS",
",",
"topkeys",
"=",
"LCTOPKEYS",
",",
"apkeys",
"=",
"LCAPERTUREKEYS",
")",
":",
"# if the lclist is a string, assume that we're passing in a fileglob",
"if",
"isinstance",
"(",
"lclist",
",",
"str",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
":",
"2",
"]",
">",
"(",
"3",
",",
"4",
")",
":",
"matching",
"=",
"glob",
".",
"glob",
"(",
"lclist",
",",
"recursive",
"=",
"True",
")",
"LOGINFO",
"(",
"'found %s LCs: %r'",
"%",
"(",
"len",
"(",
"matching",
")",
",",
"matching",
")",
")",
"else",
":",
"lcfitsdir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"lclist",
")",
"lcfitsfile",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"lclist",
")",
"walker",
"=",
"os",
".",
"walk",
"(",
"lcfitsdir",
")",
"matching",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"_files",
"in",
"walker",
":",
"for",
"sdir",
"in",
"dirs",
":",
"searchpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"sdir",
",",
"lcfitsfile",
")",
"foundfiles",
"=",
"glob",
".",
"glob",
"(",
"searchpath",
")",
"if",
"foundfiles",
":",
"matching",
".",
"extend",
"(",
"foundfiles",
")",
"LOGINFO",
"(",
"'found %s in dir: %s'",
"%",
"(",
"repr",
"(",
"foundfiles",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"sdir",
")",
")",
")",
"if",
"len",
"(",
"matching",
")",
"==",
"0",
":",
"LOGERROR",
"(",
"'could not find any TESS LC files matching glob: %s'",
"%",
"lclist",
")",
"return",
"None",
"# if the lclist is an actual list of LCs, then use it directly",
"else",
":",
"matching",
"=",
"lclist",
"# get the first file",
"consolidated",
"=",
"read_tess_fitslc",
"(",
"matching",
"[",
"0",
"]",
",",
"normalize",
"=",
"normalize",
",",
"headerkeys",
"=",
"LCHEADERKEYS",
",",
"datakeys",
"=",
"LCDATAKEYS",
",",
"sapkeys",
"=",
"LCSAPKEYS",
",",
"pdckeys",
"=",
"LCPDCKEYS",
",",
"topkeys",
"=",
"LCTOPKEYS",
",",
"apkeys",
"=",
"LCAPERTUREKEYS",
")",
"# get the rest of the files",
"if",
"len",
"(",
"matching",
")",
">",
"1",
":",
"for",
"lcf",
"in",
"matching",
"[",
"1",
":",
"]",
":",
"consolidated",
"=",
"read_tess_fitslc",
"(",
"lcf",
",",
"appendto",
"=",
"consolidated",
",",
"normalize",
"=",
"normalize",
",",
"headerkeys",
"=",
"LCHEADERKEYS",
",",
"datakeys",
"=",
"LCDATAKEYS",
",",
"sapkeys",
"=",
"LCSAPKEYS",
",",
"pdckeys",
"=",
"LCPDCKEYS",
",",
"topkeys",
"=",
"LCTOPKEYS",
",",
"apkeys",
"=",
"LCAPERTUREKEYS",
")",
"# get the sort indices. we use time for the columns and sectors for the",
"# bits in lcinfo and varinfo",
"LOGINFO",
"(",
"'sorting by time...'",
")",
"# NOTE: nans in time will be sorted to the end of the array",
"finiteind",
"=",
"np",
".",
"isfinite",
"(",
"consolidated",
"[",
"'time'",
"]",
")",
"if",
"np",
".",
"sum",
"(",
"finiteind",
")",
"<",
"consolidated",
"[",
"'time'",
"]",
".",
"size",
":",
"LOGWARNING",
"(",
"'some time values are nan! '",
"'measurements at these times will be '",
"'sorted to the end of the column arrays.'",
")",
"# get the time sort index",
"column_sort_ind",
"=",
"np",
".",
"argsort",
"(",
"consolidated",
"[",
"'time'",
"]",
")",
"# sort the columns by time",
"for",
"col",
"in",
"consolidated",
"[",
"'columns'",
"]",
":",
"if",
"'.'",
"in",
"col",
":",
"key",
",",
"subkey",
"=",
"col",
".",
"split",
"(",
"'.'",
")",
"consolidated",
"[",
"key",
"]",
"[",
"subkey",
"]",
"=",
"(",
"consolidated",
"[",
"key",
"]",
"[",
"subkey",
"]",
"[",
"column_sort_ind",
"]",
")",
"else",
":",
"consolidated",
"[",
"col",
"]",
"=",
"consolidated",
"[",
"col",
"]",
"[",
"column_sort_ind",
"]",
"info_sort_ind",
"=",
"np",
".",
"argsort",
"(",
"consolidated",
"[",
"'lcinfo'",
"]",
"[",
"'sector'",
"]",
")",
"# sort the keys in lcinfo",
"for",
"key",
"in",
"consolidated",
"[",
"'lcinfo'",
"]",
":",
"consolidated",
"[",
"'lcinfo'",
"]",
"[",
"key",
"]",
"=",
"(",
"np",
".",
"array",
"(",
"consolidated",
"[",
"'lcinfo'",
"]",
"[",
"key",
"]",
")",
"[",
"info_sort_ind",
"]",
".",
"tolist",
"(",
")",
")",
"# sort the keys in varinfo",
"for",
"key",
"in",
"consolidated",
"[",
"'varinfo'",
"]",
":",
"consolidated",
"[",
"'varinfo'",
"]",
"[",
"key",
"]",
"=",
"(",
"np",
".",
"array",
"(",
"consolidated",
"[",
"'varinfo'",
"]",
"[",
"key",
"]",
")",
"[",
"info_sort_ind",
"]",
".",
"tolist",
"(",
")",
")",
"# filter the LC dict if requested",
"# we do this at the end",
"if",
"(",
"filterqualityflags",
"is",
"not",
"False",
"or",
"nanfilter",
"is",
"not",
"None",
"or",
"timestoignore",
"is",
"not",
"None",
")",
":",
"consolidated",
"=",
"filter_tess_lcdict",
"(",
"consolidated",
",",
"filterqualityflags",
",",
"nanfilter",
"=",
"nanfilter",
",",
"timestoignore",
"=",
"timestoignore",
")",
"return",
"consolidated"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
filter_tess_lcdict
|
This filters the provided TESS `lcdict`, removing nans and bad
observations.
By default, this function removes points in the TESS LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_tess_fitslc` or
`read_tess_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE!
|
astrobase/astrotess.py
|
def filter_tess_lcdict(lcdict,
filterqualityflags=True,
nanfilter='sap,pdc,time',
timestoignore=None,
quiet=False):
'''This filters the provided TESS `lcdict`, removing nans and bad
observations.
By default, this function removes points in the TESS LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_tess_fitslc` or
`read_tess_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE!
'''
cols = lcdict['columns']
# filter all bad LC points as noted by quality flags
if filterqualityflags:
nbefore = lcdict['time'].size
filterind = lcdict['quality'] == 0
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][filterind]
else:
lcdict[col] = lcdict[col][filterind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('applied quality flag filter, '
'ndet before = %s, ndet after = %s'
% (nbefore, nafter))
if nanfilter and nanfilter == 'sap,pdc,time':
notnanind = (
np.isfinite(lcdict['sap']['sap_flux']) &
np.isfinite(lcdict['sap']['sap_flux_err']) &
np.isfinite(lcdict['pdc']['pdcsap_flux']) &
np.isfinite(lcdict['pdc']['pdcsap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'sap,time':
notnanind = (
np.isfinite(lcdict['sap']['sap_flux']) &
np.isfinite(lcdict['sap']['sap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'pdc,time':
notnanind = (
np.isfinite(lcdict['pdc']['pdcsap_flux']) &
np.isfinite(lcdict['pdc']['pdcsap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter is None:
pass
else:
raise NotImplementedError
# remove nans from all columns
if nanfilter:
nbefore = lcdict['time'].size
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][notnanind]
else:
lcdict[col] = lcdict[col][notnanind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = np.full_like(lcdict['time'],True).astype(bool)
nbefore = exclind.size
# get all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = ~((lcdict['time'] >= time0) & (lcdict['time'] <= time1))
exclind = exclind & thismask
# apply the masks
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][exclind]
else:
lcdict[col] = lcdict[col][exclind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
return lcdict
|
def filter_tess_lcdict(lcdict,
filterqualityflags=True,
nanfilter='sap,pdc,time',
timestoignore=None,
quiet=False):
'''This filters the provided TESS `lcdict`, removing nans and bad
observations.
By default, this function removes points in the TESS LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_tess_fitslc` or
`read_tess_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE!
'''
cols = lcdict['columns']
# filter all bad LC points as noted by quality flags
if filterqualityflags:
nbefore = lcdict['time'].size
filterind = lcdict['quality'] == 0
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][filterind]
else:
lcdict[col] = lcdict[col][filterind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('applied quality flag filter, '
'ndet before = %s, ndet after = %s'
% (nbefore, nafter))
if nanfilter and nanfilter == 'sap,pdc,time':
notnanind = (
np.isfinite(lcdict['sap']['sap_flux']) &
np.isfinite(lcdict['sap']['sap_flux_err']) &
np.isfinite(lcdict['pdc']['pdcsap_flux']) &
np.isfinite(lcdict['pdc']['pdcsap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'sap,time':
notnanind = (
np.isfinite(lcdict['sap']['sap_flux']) &
np.isfinite(lcdict['sap']['sap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'pdc,time':
notnanind = (
np.isfinite(lcdict['pdc']['pdcsap_flux']) &
np.isfinite(lcdict['pdc']['pdcsap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter is None:
pass
else:
raise NotImplementedError
# remove nans from all columns
if nanfilter:
nbefore = lcdict['time'].size
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][notnanind]
else:
lcdict[col] = lcdict[col][notnanind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = np.full_like(lcdict['time'],True).astype(bool)
nbefore = exclind.size
# get all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = ~((lcdict['time'] >= time0) & (lcdict['time'] <= time1))
exclind = exclind & thismask
# apply the masks
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][exclind]
else:
lcdict[col] = lcdict[col][exclind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
return lcdict
|
[
"This",
"filters",
"the",
"provided",
"TESS",
"lcdict",
"removing",
"nans",
"and",
"bad",
"observations",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/astrotess.py#L1085-L1221
|
[
"def",
"filter_tess_lcdict",
"(",
"lcdict",
",",
"filterqualityflags",
"=",
"True",
",",
"nanfilter",
"=",
"'sap,pdc,time'",
",",
"timestoignore",
"=",
"None",
",",
"quiet",
"=",
"False",
")",
":",
"cols",
"=",
"lcdict",
"[",
"'columns'",
"]",
"# filter all bad LC points as noted by quality flags",
"if",
"filterqualityflags",
":",
"nbefore",
"=",
"lcdict",
"[",
"'time'",
"]",
".",
"size",
"filterind",
"=",
"lcdict",
"[",
"'quality'",
"]",
"==",
"0",
"for",
"col",
"in",
"cols",
":",
"if",
"'.'",
"in",
"col",
":",
"key",
",",
"subkey",
"=",
"col",
".",
"split",
"(",
"'.'",
")",
"lcdict",
"[",
"key",
"]",
"[",
"subkey",
"]",
"=",
"lcdict",
"[",
"key",
"]",
"[",
"subkey",
"]",
"[",
"filterind",
"]",
"else",
":",
"lcdict",
"[",
"col",
"]",
"=",
"lcdict",
"[",
"col",
"]",
"[",
"filterind",
"]",
"nafter",
"=",
"lcdict",
"[",
"'time'",
"]",
".",
"size",
"if",
"not",
"quiet",
":",
"LOGINFO",
"(",
"'applied quality flag filter, '",
"'ndet before = %s, ndet after = %s'",
"%",
"(",
"nbefore",
",",
"nafter",
")",
")",
"if",
"nanfilter",
"and",
"nanfilter",
"==",
"'sap,pdc,time'",
":",
"notnanind",
"=",
"(",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux_err'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux_err'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'time'",
"]",
")",
")",
"elif",
"nanfilter",
"and",
"nanfilter",
"==",
"'sap,time'",
":",
"notnanind",
"=",
"(",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'sap'",
"]",
"[",
"'sap_flux_err'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'time'",
"]",
")",
")",
"elif",
"nanfilter",
"and",
"nanfilter",
"==",
"'pdc,time'",
":",
"notnanind",
"=",
"(",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'pdc'",
"]",
"[",
"'pdcsap_flux_err'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"lcdict",
"[",
"'time'",
"]",
")",
")",
"elif",
"nanfilter",
"is",
"None",
":",
"pass",
"else",
":",
"raise",
"NotImplementedError",
"# remove nans from all columns",
"if",
"nanfilter",
":",
"nbefore",
"=",
"lcdict",
"[",
"'time'",
"]",
".",
"size",
"for",
"col",
"in",
"cols",
":",
"if",
"'.'",
"in",
"col",
":",
"key",
",",
"subkey",
"=",
"col",
".",
"split",
"(",
"'.'",
")",
"lcdict",
"[",
"key",
"]",
"[",
"subkey",
"]",
"=",
"lcdict",
"[",
"key",
"]",
"[",
"subkey",
"]",
"[",
"notnanind",
"]",
"else",
":",
"lcdict",
"[",
"col",
"]",
"=",
"lcdict",
"[",
"col",
"]",
"[",
"notnanind",
"]",
"nafter",
"=",
"lcdict",
"[",
"'time'",
"]",
".",
"size",
"if",
"not",
"quiet",
":",
"LOGINFO",
"(",
"'removed nans, ndet before = %s, ndet after = %s'",
"%",
"(",
"nbefore",
",",
"nafter",
")",
")",
"# exclude all times in timestoignore",
"if",
"(",
"timestoignore",
"and",
"isinstance",
"(",
"timestoignore",
",",
"list",
")",
"and",
"len",
"(",
"timestoignore",
")",
">",
"0",
")",
":",
"exclind",
"=",
"np",
".",
"full_like",
"(",
"lcdict",
"[",
"'time'",
"]",
",",
"True",
")",
".",
"astype",
"(",
"bool",
")",
"nbefore",
"=",
"exclind",
".",
"size",
"# get all the masks",
"for",
"ignoretime",
"in",
"timestoignore",
":",
"time0",
",",
"time1",
"=",
"ignoretime",
"[",
"0",
"]",
",",
"ignoretime",
"[",
"1",
"]",
"thismask",
"=",
"~",
"(",
"(",
"lcdict",
"[",
"'time'",
"]",
">=",
"time0",
")",
"&",
"(",
"lcdict",
"[",
"'time'",
"]",
"<=",
"time1",
")",
")",
"exclind",
"=",
"exclind",
"&",
"thismask",
"# apply the masks",
"for",
"col",
"in",
"cols",
":",
"if",
"'.'",
"in",
"col",
":",
"key",
",",
"subkey",
"=",
"col",
".",
"split",
"(",
"'.'",
")",
"lcdict",
"[",
"key",
"]",
"[",
"subkey",
"]",
"=",
"lcdict",
"[",
"key",
"]",
"[",
"subkey",
"]",
"[",
"exclind",
"]",
"else",
":",
"lcdict",
"[",
"col",
"]",
"=",
"lcdict",
"[",
"col",
"]",
"[",
"exclind",
"]",
"nafter",
"=",
"lcdict",
"[",
"'time'",
"]",
".",
"size",
"if",
"not",
"quiet",
":",
"LOGINFO",
"(",
"'removed timestoignore, ndet before = %s, ndet after = %s'",
"%",
"(",
"nbefore",
",",
"nafter",
")",
")",
"return",
"lcdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_pkl_finder_objectinfo
|
This returns the finder chart and object information as a dict.
Parameters
----------
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplotdict. External services such as GAIA, SIMBAD, TIC@MAST,
etc. will also be used to look up this object by its coordinates, and
will add in information available from those services.
The `objectinfo` dict must be of the form and contain at least the keys
described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG::
'pmra' -> the proper motion in mas/yr in right ascension,
'pmdecl' -> the proper motion in mas/yr in declination,
'umag' -> U mag -> colors: U-B, U-V, U-g
'bmag' -> B mag -> colors: U-B, B-V
'vmag' -> V mag -> colors: U-V, B-V, V-R, V-I, V-K
'rmag' -> R mag -> colors: V-R, R-I
'imag' -> I mag -> colors: g-I, V-I, R-I, B-I
'jmag' -> 2MASS J mag -> colors: J-H, J-K, g-J, i-J
'hmag' -> 2MASS H mag -> colors: J-H, H-K
'kmag' -> 2MASS Ks mag -> colors: g-Ks, H-Ks, J-Ks, V-Ks
'sdssu' -> SDSS u mag -> colors: u-g, u-V
'sdssg' -> SDSS g mag -> colors: g-r, g-i, g-K, u-g, U-g, g-J
'sdssr' -> SDSS r mag -> colors: r-i, g-r
'sdssi' -> SDSS i mag -> colors: r-i, i-z, g-i, i-J, i-W1
'sdssz' -> SDSS z mag -> colors: i-z, z-W2, g-z
'ujmag' -> UKIRT J mag -> colors: J-H, H-K, J-K, g-J, i-J
'uhmag' -> UKIRT H mag -> colors: J-H, H-K
'ukmag' -> UKIRT K mag -> colors: g-K, H-K, J-K, V-K
'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2
'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3
'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3
'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4
'wise1' -> WISE W1 mag -> colors: i-W1, W1-W2
'wise2' -> WISE W2 mag -> colors: W1-W2, W2-W3
'wise3' -> WISE W3 mag -> colors: W2-W3
'wise4' -> WISE W4 mag -> colors: W3-W4
If you have magnitude measurements in other bands, use the
`custom_bandpasses` kwarg to pass these in.
If this is None, no object information will be incorporated into the
checkplot (kind of making it effectively useless for anything other than
glancing at the phased light curves at various 'best' periods from the
period-finder results).
varinfo : dict or None
If this is None, a blank dict of the form below will be added to the
checkplotdict::
{'objectisvar': None -> variability flag (None indicates unset),
'vartags': CSV str containing variability type tags from review,
'varisperiodic': None -> periodic variability flag (None -> unset),
'varperiod': the period associated with the periodic variability,
'varepoch': the epoch associated with the periodic variability}
If you provide a dict matching this format in this kwarg, this will be
passed unchanged to the output checkplotdict produced.
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
normto : {'globalmedian', 'zero'} or a float
This is specified as below::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
deredden_object : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by :py:func:`astrobase.varclass.starfeatures.color_features`.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond.
If this is set to True, the default settings for the external requests
will then become::
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
If this is a float, will run in "fast" mode with the provided timeout
value in seconds and the following settings::
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
Returns
-------
dict
A checkplotdict is returned containing the objectinfo and varinfo dicts,
ready to use with the functions below to add in light curve plots,
phased LC plots, xmatch info, etc.
|
astrobase/checkplot/pkl_utils.py
|
def _pkl_finder_objectinfo(
objectinfo,
varinfo,
findercmap,
finderconvolve,
sigclip,
normto,
normmingap,
deredden_object=True,
custom_bandpasses=None,
lclistpkl=None,
nbrradiusarcsec=30.0,
maxnumneighbors=5,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
fast_mode=False,
complete_query_later=True
):
'''This returns the finder chart and object information as a dict.
Parameters
----------
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplotdict. External services such as GAIA, SIMBAD, TIC@MAST,
etc. will also be used to look up this object by its coordinates, and
will add in information available from those services.
The `objectinfo` dict must be of the form and contain at least the keys
described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG::
'pmra' -> the proper motion in mas/yr in right ascension,
'pmdecl' -> the proper motion in mas/yr in declination,
'umag' -> U mag -> colors: U-B, U-V, U-g
'bmag' -> B mag -> colors: U-B, B-V
'vmag' -> V mag -> colors: U-V, B-V, V-R, V-I, V-K
'rmag' -> R mag -> colors: V-R, R-I
'imag' -> I mag -> colors: g-I, V-I, R-I, B-I
'jmag' -> 2MASS J mag -> colors: J-H, J-K, g-J, i-J
'hmag' -> 2MASS H mag -> colors: J-H, H-K
'kmag' -> 2MASS Ks mag -> colors: g-Ks, H-Ks, J-Ks, V-Ks
'sdssu' -> SDSS u mag -> colors: u-g, u-V
'sdssg' -> SDSS g mag -> colors: g-r, g-i, g-K, u-g, U-g, g-J
'sdssr' -> SDSS r mag -> colors: r-i, g-r
'sdssi' -> SDSS i mag -> colors: r-i, i-z, g-i, i-J, i-W1
'sdssz' -> SDSS z mag -> colors: i-z, z-W2, g-z
'ujmag' -> UKIRT J mag -> colors: J-H, H-K, J-K, g-J, i-J
'uhmag' -> UKIRT H mag -> colors: J-H, H-K
'ukmag' -> UKIRT K mag -> colors: g-K, H-K, J-K, V-K
'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2
'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3
'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3
'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4
'wise1' -> WISE W1 mag -> colors: i-W1, W1-W2
'wise2' -> WISE W2 mag -> colors: W1-W2, W2-W3
'wise3' -> WISE W3 mag -> colors: W2-W3
'wise4' -> WISE W4 mag -> colors: W3-W4
If you have magnitude measurements in other bands, use the
`custom_bandpasses` kwarg to pass these in.
If this is None, no object information will be incorporated into the
checkplot (kind of making it effectively useless for anything other than
glancing at the phased light curves at various 'best' periods from the
period-finder results).
varinfo : dict or None
If this is None, a blank dict of the form below will be added to the
checkplotdict::
{'objectisvar': None -> variability flag (None indicates unset),
'vartags': CSV str containing variability type tags from review,
'varisperiodic': None -> periodic variability flag (None -> unset),
'varperiod': the period associated with the periodic variability,
'varepoch': the epoch associated with the periodic variability}
If you provide a dict matching this format in this kwarg, this will be
passed unchanged to the output checkplotdict produced.
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
normto : {'globalmedian', 'zero'} or a float
This is specified as below::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
deredden_object : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by :py:func:`astrobase.varclass.starfeatures.color_features`.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond.
If this is set to True, the default settings for the external requests
will then become::
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
If this is a float, will run in "fast" mode with the provided timeout
value in seconds and the following settings::
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
Returns
-------
dict
A checkplotdict is returned containing the objectinfo and varinfo dicts,
ready to use with the functions below to add in light curve plots,
phased LC plots, xmatch info, etc.
'''
# optional mode to hit external services and fail fast if they timeout
if fast_mode is True:
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
elif isinstance(fast_mode, (int, float)) and fast_mode > 0.0:
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
else:
skyview_lookup = True
skyview_timeout = 10.0
skyview_retry_failed = True
dust_timeout = 10.0
search_simbad = True
if (isinstance(objectinfo, dict) and
('objectid' in objectinfo or 'hatid' in objectinfo) and
'ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] and objectinfo['decl']):
if 'objectid' not in objectinfo:
objectid = objectinfo['hatid']
else:
objectid = objectinfo['objectid']
if verbose and skyview_lookup:
LOGINFO('adding in object information and '
'finder chart for %s at RA: %.3f, DEC: %.3f' %
(objectid, objectinfo['ra'], objectinfo['decl']))
elif verbose and not skyview_lookup:
LOGINFO('adding in object information '
'for %s at RA: %.3f, DEC: %.3f. '
'skipping finder chart because skyview_lookup = False' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# get the finder chart
try:
if skyview_lookup:
try:
# generate the finder chart
finder, finderheader = skyview_stamp(
objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
verbose=verbose,
flip=False,
cachedir=findercachedir,
timeout=skyview_timeout,
retry_failed=skyview_retry_failed,
)
except OSError as e:
if not fast_mode:
LOGERROR(
'finder image appears to be corrupt, retrying...'
)
# generate the finder chart
finder, finderheader = skyview_stamp(
objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
verbose=verbose,
flip=False,
cachedir=findercachedir,
forcefetch=True,
timeout=skyview_timeout,
retry_failed=False # do not start an infinite loop
)
finderfig = plt.figure(figsize=(3,3),dpi=plotdpi)
# initialize the finder WCS
finderwcs = WCS(finderheader)
# use the WCS transform for the plot
ax = finderfig.add_subplot(111, frameon=False)
ax.imshow(finder, cmap=findercmap, origin='lower')
else:
finder, finderheader, finderfig, finderwcs = (
None, None, None, None
)
# skip down to after nbr stuff for the rest of the finderchart...
# search around the target's location and get its neighbors if
# lclistpkl is provided and it exists
if (lclistpkl is not None and
nbrradiusarcsec is not None and
nbrradiusarcsec > 0.0):
# if lclistpkl is a string, open it as a pickle
if isinstance(lclistpkl, str) and os.path.exists(lclistpkl):
if lclistpkl.endswith('.gz'):
infd = gzip.open(lclistpkl,'rb')
else:
infd = open(lclistpkl,'rb')
lclist = pickle.load(infd)
infd.close()
# otherwise, if it's a dict, we get it directly
elif isinstance(lclistpkl, dict):
lclist = lclistpkl
# finally, if it's nothing we recognize, ignore it
else:
LOGERROR('could not understand lclistpkl kwarg, '
'not getting neighbor info')
lclist = dict()
# check if we have a KDTree to use
# if we don't, skip neighbor stuff
if 'kdtree' not in lclist:
LOGERROR('neighbors within %.1f arcsec for %s could '
'not be found, no kdtree in lclistpkl: %s'
% (objectid, lclistpkl))
neighbors = None
kdt = None
# otherwise, do neighbor processing
else:
kdt = lclist['kdtree']
obj_cosdecl = np.cos(np.radians(objectinfo['decl']))
obj_sindecl = np.sin(np.radians(objectinfo['decl']))
obj_cosra = np.cos(np.radians(objectinfo['ra']))
obj_sinra = np.sin(np.radians(objectinfo['ra']))
obj_xyz = np.column_stack((obj_cosra*obj_cosdecl,
obj_sinra*obj_cosdecl,
obj_sindecl))
match_xyzdist = (
2.0 * np.sin(np.radians(nbrradiusarcsec/3600.0)/2.0)
)
matchdists, matchinds = kdt.query(
obj_xyz,
k=maxnumneighbors+1, # get maxnumneighbors + tgt
distance_upper_bound=match_xyzdist
)
# sort by matchdist
mdsorted = np.argsort(matchdists[0])
matchdists = matchdists[0][mdsorted]
matchinds = matchinds[0][mdsorted]
# luckily, the indices to the kdtree are the same as that
# for the objects (I think)
neighbors = []
nbrind = 0
for md, mi in zip(matchdists, matchinds):
if np.isfinite(md) and md > 0.0:
if skyview_lookup:
# generate the xy for the finder we'll use a
# HTML5 canvas and these pixcoords to highlight
# each neighbor when we mouse over its row in
# the neighbors tab
# we use coord origin = 0 here and not the usual
# 1 because we're annotating a numpy array
pixcoords = finderwcs.all_world2pix(
np.array([[lclist['objects']['ra'][mi],
lclist['objects']['decl'][mi]]]),
0
)
# each elem is {'objectid',
# 'ra','decl',
# 'xpix','ypix',
# 'dist','lcfpath'}
thisnbr = {
'objectid':(
lclist['objects']['objectid'][mi]
),
'ra':lclist['objects']['ra'][mi],
'decl':lclist['objects']['decl'][mi],
'xpix':pixcoords[0,0],
'ypix':300.0 - pixcoords[0,1],
'dist':_xyzdist_to_distarcsec(md),
'lcfpath': lclist['objects']['lcfname'][mi]
}
neighbors.append(thisnbr)
nbrind = nbrind+1
# put in a nice marker for this neighbor into
# the overall finder chart
annotatex = pixcoords[0,0]
annotatey = pixcoords[0,1]
if ((300.0 - annotatex) > 50.0):
offx = annotatex + 30.0
xha = 'center'
else:
offx = annotatex - 30.0
xha = 'center'
if ((300.0 - annotatey) > 50.0):
offy = annotatey - 30.0
yha = 'center'
else:
offy = annotatey + 30.0
yha = 'center'
ax.annotate('N%s' % nbrind,
(annotatex, annotatey),
xytext=(offx, offy),
arrowprops={'facecolor':'blue',
'edgecolor':'blue',
'width':1.0,
'headwidth':1.0,
'headlength':0.1,
'shrink':0.0},
color='blue',
horizontalalignment=xha,
verticalalignment=yha)
else:
thisnbr = {
'objectid':(
lclist['objects']['objectid'][mi]
),
'ra':lclist['objects']['ra'][mi],
'decl':lclist['objects']['decl'][mi],
'xpix':0.0,
'ypix':0.0,
'dist':_xyzdist_to_distarcsec(md),
'lcfpath': lclist['objects']['lcfname'][mi]
}
neighbors.append(thisnbr)
nbrind = nbrind+1
# if there are no neighbors, set the 'neighbors' key to None
else:
neighbors = None
kdt = None
if skyview_lookup:
#
# finish up the finder chart after neighbors are processed
#
ax.set_xticks([])
ax.set_yticks([])
# add a reticle pointing to the object's coordinates
# we use coord origin = 0 here and not the usual
# 1 because we're annotating a numpy array
object_pixcoords = finderwcs.all_world2pix(
[[objectinfo['ra'],
objectinfo['decl']]],
0
)
ax.axvline(
# x=150.0,
x=object_pixcoords[0,0],
ymin=0.375,
ymax=0.45,
linewidth=1,
color='b'
)
ax.axhline(
# y=150.0,
y=object_pixcoords[0,1],
xmin=0.375,
xmax=0.45,
linewidth=1,
color='b'
)
ax.set_frame_on(False)
# this is the output instance
finderpng = StrIO()
finderfig.savefig(finderpng,
bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
finderpng.seek(0)
finderb64 = base64.b64encode(finderpng.read())
# close the stringio buffer
finderpng.close()
else:
finderb64 = None
except Exception as e:
LOGEXCEPTION('could not fetch a DSS stamp for this '
'object %s using coords (%.3f,%.3f)' %
(objectid, objectinfo['ra'], objectinfo['decl']))
finderb64 = None
neighbors = None
kdt = None
# if we don't have ra, dec info, then everything is none up to this point
else:
finderb64 = None
neighbors = None
kdt = None
#
# end of finder chart operations
#
# now that we have the finder chart, get the rest of the object
# information
# get the rest of the features, these don't necessarily rely on ra, dec and
# should degrade gracefully if these aren't provided
if isinstance(objectinfo, dict):
if 'objectid' not in objectinfo and 'hatid' in objectinfo:
objectid = objectinfo['hatid']
objectinfo['objectid'] = objectid
elif 'objectid' in objectinfo:
objectid = objectinfo['objectid']
else:
objectid = os.urandom(12).hex()[:7]
objectinfo['objectid'] = objectid
LOGWARNING('no objectid found in objectinfo dict, '
'making up a random one: %s')
# get the neighbor features and GAIA info
nbrfeat = neighbor_gaia_features(
objectinfo,
kdt,
nbrradiusarcsec,
verbose=False,
gaia_submit_timeout=gaia_submit_timeout,
gaia_submit_tries=gaia_submit_tries,
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later,
search_simbad=search_simbad
)
objectinfo.update(nbrfeat)
# see if the objectinfo dict has pmra/pmdecl entries. if it doesn't,
# then we'll see if the nbrfeat dict has pmra/pmdecl from GAIA. we'll
# set the appropriate provenance keys as well so we know where the PM
# came from
if ( ('pmra' not in objectinfo) or
( ('pmra' in objectinfo) and
( (objectinfo['pmra'] is None) or
(not np.isfinite(objectinfo['pmra'])) ) ) ):
if 'ok' in nbrfeat['gaia_status']:
objectinfo['pmra'] = nbrfeat['gaia_pmras'][0]
objectinfo['pmra_err'] = nbrfeat['gaia_pmra_errs'][0]
objectinfo['pmra_source'] = 'gaia'
if verbose:
LOGWARNING('pmRA not found in provided objectinfo dict, '
'using value from GAIA')
else:
objectinfo['pmra_source'] = 'light curve'
if ( ('pmdecl' not in objectinfo) or
( ('pmdecl' in objectinfo) and
( (objectinfo['pmdecl'] is None) or
(not np.isfinite(objectinfo['pmdecl'])) ) ) ):
if 'ok' in nbrfeat['gaia_status']:
objectinfo['pmdecl'] = nbrfeat['gaia_pmdecls'][0]
objectinfo['pmdecl_err'] = nbrfeat['gaia_pmdecl_errs'][0]
objectinfo['pmdecl_source'] = 'gaia'
if verbose:
LOGWARNING('pmDEC not found in provided objectinfo dict, '
'using value from GAIA')
else:
objectinfo['pmdecl_source'] = 'light curve'
#
# update GAIA info so it's available at the first level
#
if 'ok' in objectinfo['gaia_status']:
objectinfo['gaiaid'] = objectinfo['gaia_ids'][0]
objectinfo['gaiamag'] = objectinfo['gaia_mags'][0]
objectinfo['gaia_absmag'] = objectinfo['gaia_absolute_mags'][0]
objectinfo['gaia_parallax'] = objectinfo['gaia_parallaxes'][0]
objectinfo['gaia_parallax_err'] = (
objectinfo['gaia_parallax_errs'][0]
)
objectinfo['gaia_pmra'] = objectinfo['gaia_pmras'][0]
objectinfo['gaia_pmra_err'] = objectinfo['gaia_pmra_errs'][0]
objectinfo['gaia_pmdecl'] = objectinfo['gaia_pmdecls'][0]
objectinfo['gaia_pmdecl_err'] = objectinfo['gaia_pmdecl_errs'][0]
else:
objectinfo['gaiaid'] = None
objectinfo['gaiamag'] = np.nan
objectinfo['gaia_absmag'] = np.nan
objectinfo['gaia_parallax'] = np.nan
objectinfo['gaia_parallax_err'] = np.nan
objectinfo['gaia_pmra'] = np.nan
objectinfo['gaia_pmra_err'] = np.nan
objectinfo['gaia_pmdecl'] = np.nan
objectinfo['gaia_pmdecl_err'] = np.nan
#
# get the object's TIC information
#
if ('ra' in objectinfo and
objectinfo['ra'] is not None and
np.isfinite(objectinfo['ra']) and
'decl' in objectinfo and
objectinfo['decl'] is not None and
np.isfinite(objectinfo['decl'])):
try:
ticres = tic_conesearch(objectinfo['ra'],
objectinfo['decl'],
radius_arcmin=5.0/60.0,
verbose=verbose,
timeout=gaia_max_timeout,
maxtries=gaia_submit_tries)
if ticres is not None:
with open(ticres['cachefname'],'r') as infd:
ticinfo = json.load(infd)
if ('data' in ticinfo and
len(ticinfo['data']) > 0 and
isinstance(ticinfo['data'][0], dict)):
objectinfo['ticid'] = str(ticinfo['data'][0]['ID'])
objectinfo['tessmag'] = ticinfo['data'][0]['Tmag']
objectinfo['tic_version'] = (
ticinfo['data'][0]['version']
)
objectinfo['tic_distarcsec'] = (
ticinfo['data'][0]['dstArcSec']
)
objectinfo['tessmag_origin'] = (
ticinfo['data'][0]['TESSflag']
)
objectinfo['tic_starprop_origin'] = (
ticinfo['data'][0]['SPFlag']
)
objectinfo['tic_lumclass'] = (
ticinfo['data'][0]['lumclass']
)
objectinfo['tic_teff'] = (
ticinfo['data'][0]['Teff']
)
objectinfo['tic_teff_err'] = (
ticinfo['data'][0]['e_Teff']
)
objectinfo['tic_logg'] = (
ticinfo['data'][0]['logg']
)
objectinfo['tic_logg_err'] = (
ticinfo['data'][0]['e_logg']
)
objectinfo['tic_mh'] = (
ticinfo['data'][0]['MH']
)
objectinfo['tic_mh_err'] = (
ticinfo['data'][0]['e_MH']
)
objectinfo['tic_radius'] = (
ticinfo['data'][0]['rad']
)
objectinfo['tic_radius_err'] = (
ticinfo['data'][0]['e_rad']
)
objectinfo['tic_mass'] = (
ticinfo['data'][0]['mass']
)
objectinfo['tic_mass_err'] = (
ticinfo['data'][0]['e_mass']
)
objectinfo['tic_density'] = (
ticinfo['data'][0]['rho']
)
objectinfo['tic_density_err'] = (
ticinfo['data'][0]['e_rho']
)
objectinfo['tic_luminosity'] = (
ticinfo['data'][0]['lum']
)
objectinfo['tic_luminosity_err'] = (
ticinfo['data'][0]['e_lum']
)
objectinfo['tic_distancepc'] = (
ticinfo['data'][0]['d']
)
objectinfo['tic_distancepc_err'] = (
ticinfo['data'][0]['e_d']
)
#
# fill in any missing info using the TIC entry
#
if ('gaiaid' not in objectinfo or
('gaiaid' in objectinfo and
(objectinfo['gaiaid'] is None))):
objectinfo['gaiaid'] = ticinfo['data'][0]['GAIA']
if ('gaiamag' not in objectinfo or
('gaiamag' in objectinfo and
(objectinfo['gaiamag'] is None or
not np.isfinite(objectinfo['gaiamag'])))):
objectinfo['gaiamag'] = (
ticinfo['data'][0]['GAIAmag']
)
objectinfo['gaiamag_err'] = (
ticinfo['data'][0]['e_GAIAmag']
)
if ('gaia_parallax' not in objectinfo or
('gaia_parallax' in objectinfo and
(objectinfo['gaia_parallax'] is None or
not np.isfinite(objectinfo['gaia_parallax'])))):
objectinfo['gaia_parallax'] = (
ticinfo['data'][0]['plx']
)
objectinfo['gaia_parallax_err'] = (
ticinfo['data'][0]['e_plx']
)
if (objectinfo['gaiamag'] is not None and
np.isfinite(objectinfo['gaiamag']) and
objectinfo['gaia_parallax'] is not None and
np.isfinite(objectinfo['gaia_parallax'])):
objectinfo['gaia_absmag'] = (
magnitudes.absolute_gaia_magnitude(
objectinfo['gaiamag'],
objectinfo['gaia_parallax']
)
)
if ('pmra' not in objectinfo or
('pmra' in objectinfo and
(objectinfo['pmra'] is None or
not np.isfinite(objectinfo['pmra'])))):
objectinfo['pmra'] = ticinfo['data'][0]['pmRA']
objectinfo['pmra_err'] = (
ticinfo['data'][0]['e_pmRA']
)
objectinfo['pmra_source'] = 'TIC'
if ('pmdecl' not in objectinfo or
('pmdecl' in objectinfo and
(objectinfo['pmdecl'] is None or
not np.isfinite(objectinfo['pmdecl'])))):
objectinfo['pmdecl'] = ticinfo['data'][0]['pmDEC']
objectinfo['pmdecl_err'] = (
ticinfo['data'][0]['e_pmDEC']
)
objectinfo['pmdecl_source'] = 'TIC'
if ('bmag' not in objectinfo or
('bmag' in objectinfo and
(objectinfo['bmag'] is None or
not np.isfinite(objectinfo['bmag'])))):
objectinfo['bmag'] = ticinfo['data'][0]['Bmag']
objectinfo['bmag_err'] = (
ticinfo['data'][0]['e_Bmag']
)
if ('vmag' not in objectinfo or
('vmag' in objectinfo and
(objectinfo['vmag'] is None or
not np.isfinite(objectinfo['vmag'])))):
objectinfo['vmag'] = ticinfo['data'][0]['Vmag']
objectinfo['vmag_err'] = (
ticinfo['data'][0]['e_Vmag']
)
if ('sdssu' not in objectinfo or
('sdssu' in objectinfo and
(objectinfo['sdssu'] is None or
not np.isfinite(objectinfo['sdssu'])))):
objectinfo['sdssu'] = ticinfo['data'][0]['umag']
objectinfo['sdssu_err'] = (
ticinfo['data'][0]['e_umag']
)
if ('sdssg' not in objectinfo or
('sdssg' in objectinfo and
(objectinfo['sdssg'] is None or
not np.isfinite(objectinfo['sdssg'])))):
objectinfo['sdssg'] = ticinfo['data'][0]['gmag']
objectinfo['sdssg_err'] = (
ticinfo['data'][0]['e_gmag']
)
if ('sdssr' not in objectinfo or
('sdssr' in objectinfo and
(objectinfo['sdssr'] is None or
not np.isfinite(objectinfo['sdssr'])))):
objectinfo['sdssr'] = ticinfo['data'][0]['rmag']
objectinfo['sdssr_err'] = (
ticinfo['data'][0]['e_rmag']
)
if ('sdssi' not in objectinfo or
('sdssi' in objectinfo and
(objectinfo['sdssi'] is None or
not np.isfinite(objectinfo['sdssi'])))):
objectinfo['sdssi'] = ticinfo['data'][0]['imag']
objectinfo['sdssi_err'] = (
ticinfo['data'][0]['e_imag']
)
if ('sdssz' not in objectinfo or
('sdssz' in objectinfo and
(objectinfo['sdssz'] is None or
not np.isfinite(objectinfo['sdssz'])))):
objectinfo['sdssz'] = ticinfo['data'][0]['zmag']
objectinfo['sdssz_err'] = (
ticinfo['data'][0]['e_zmag']
)
if ('jmag' not in objectinfo or
('jmag' in objectinfo and
(objectinfo['jmag'] is None or
not np.isfinite(objectinfo['jmag'])))):
objectinfo['jmag'] = ticinfo['data'][0]['Jmag']
objectinfo['jmag_err'] = (
ticinfo['data'][0]['e_Jmag']
)
if ('hmag' not in objectinfo or
('hmag' in objectinfo and
(objectinfo['hmag'] is None or
not np.isfinite(objectinfo['hmag'])))):
objectinfo['hmag'] = ticinfo['data'][0]['Hmag']
objectinfo['hmag_err'] = (
ticinfo['data'][0]['e_Hmag']
)
if ('kmag' not in objectinfo or
('kmag' in objectinfo and
(objectinfo['kmag'] is None or
not np.isfinite(objectinfo['kmag'])))):
objectinfo['kmag'] = ticinfo['data'][0]['Kmag']
objectinfo['kmag_err'] = (
ticinfo['data'][0]['e_Kmag']
)
if ('wise1' not in objectinfo or
('wise1' in objectinfo and
(objectinfo['wise1'] is None or
not np.isfinite(objectinfo['wise1'])))):
objectinfo['wise1'] = ticinfo['data'][0]['w1mag']
objectinfo['wise1_err'] = (
ticinfo['data'][0]['e_w1mag']
)
if ('wise2' not in objectinfo or
('wise2' in objectinfo and
(objectinfo['wise2'] is None or
not np.isfinite(objectinfo['wise2'])))):
objectinfo['wise2'] = ticinfo['data'][0]['w2mag']
objectinfo['wise2_err'] = (
ticinfo['data'][0]['e_w2mag']
)
if ('wise3' not in objectinfo or
('wise3' in objectinfo and
(objectinfo['wise3'] is None or
not np.isfinite(objectinfo['wise3'])))):
objectinfo['wise3'] = ticinfo['data'][0]['w3mag']
objectinfo['wise3_err'] = (
ticinfo['data'][0]['e_w3mag']
)
if ('wise4' not in objectinfo or
('wise4' in objectinfo and
(objectinfo['wise4'] is None or
not np.isfinite(objectinfo['wise4'])))):
objectinfo['wise4'] = ticinfo['data'][0]['w4mag']
objectinfo['wise4_err'] = (
ticinfo['data'][0]['e_w4mag']
)
else:
LOGERROR('could not look up TIC '
'information for object: %s '
'at (%.3f, %.3f)' %
(objectinfo['objectid'],
objectinfo['ra'],
objectinfo['decl']))
except Exception as e:
LOGEXCEPTION('could not look up TIC '
'information for object: %s '
'at (%.3f, %.3f)' %
(objectinfo['objectid'],
objectinfo['ra'],
objectinfo['decl']))
# try to get the object's coord features
coordfeat = coord_features(objectinfo)
# get the color features
colorfeat = color_features(objectinfo,
deredden=deredden_object,
custom_bandpasses=custom_bandpasses,
dust_timeout=dust_timeout)
# get the object's color classification
colorclass = color_classification(colorfeat, coordfeat)
# update the objectinfo dict with everything
objectinfo.update(colorfeat)
objectinfo.update(coordfeat)
objectinfo.update(colorclass)
# put together the initial checkplot pickle dictionary
# this will be updated by the functions below as appropriate
# and will written out as a gzipped pickle at the end of processing
checkplotdict = {'objectid':objectid,
'neighbors':neighbors,
'objectinfo':objectinfo,
'finderchart':finderb64,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# add the objecttags key to objectinfo
checkplotdict['objectinfo']['objecttags'] = None
# if there's no objectinfo, we can't do anything.
else:
# empty objectinfo dict
checkplotdict = {'objectid':None,
'neighbors':None,
'objectinfo':{
'available_bands':[],
'available_band_labels':[],
'available_dereddened_bands':[],
'available_dereddened_band_labels':[],
'available_colors':[],
'available_color_labels':[],
'bmag':None,
'bmag-vmag':None,
'decl':None,
'hatid':None,
'hmag':None,
'imag-jmag':None,
'jmag-kmag':None,
'jmag':None,
'kmag':None,
'ndet':None,
'network':None,
'objecttags':None,
'pmdecl':None,
'pmdecl_err':None,
'pmra':None,
'pmra_err':None,
'propermotion':None,
'ra':None,
'rpmj':None,
'sdssg':None,
'sdssi':None,
'sdssr':None,
'stations':None,
'twomassid':None,
'ucac4id':None,
'vmag':None
},
'finderchart':None,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# end of objectinfo processing
# add the varinfo dict
if isinstance(varinfo, dict):
checkplotdict['varinfo'] = varinfo
else:
checkplotdict['varinfo'] = {
'objectisvar':None,
'vartags':None,
'varisperiodic':None,
'varperiod':None,
'varepoch':None,
}
return checkplotdict
|
def _pkl_finder_objectinfo(
objectinfo,
varinfo,
findercmap,
finderconvolve,
sigclip,
normto,
normmingap,
deredden_object=True,
custom_bandpasses=None,
lclistpkl=None,
nbrradiusarcsec=30.0,
maxnumneighbors=5,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
fast_mode=False,
complete_query_later=True
):
'''This returns the finder chart and object information as a dict.
Parameters
----------
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplotdict. External services such as GAIA, SIMBAD, TIC@MAST,
etc. will also be used to look up this object by its coordinates, and
will add in information available from those services.
The `objectinfo` dict must be of the form and contain at least the keys
described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG::
'pmra' -> the proper motion in mas/yr in right ascension,
'pmdecl' -> the proper motion in mas/yr in declination,
'umag' -> U mag -> colors: U-B, U-V, U-g
'bmag' -> B mag -> colors: U-B, B-V
'vmag' -> V mag -> colors: U-V, B-V, V-R, V-I, V-K
'rmag' -> R mag -> colors: V-R, R-I
'imag' -> I mag -> colors: g-I, V-I, R-I, B-I
'jmag' -> 2MASS J mag -> colors: J-H, J-K, g-J, i-J
'hmag' -> 2MASS H mag -> colors: J-H, H-K
'kmag' -> 2MASS Ks mag -> colors: g-Ks, H-Ks, J-Ks, V-Ks
'sdssu' -> SDSS u mag -> colors: u-g, u-V
'sdssg' -> SDSS g mag -> colors: g-r, g-i, g-K, u-g, U-g, g-J
'sdssr' -> SDSS r mag -> colors: r-i, g-r
'sdssi' -> SDSS i mag -> colors: r-i, i-z, g-i, i-J, i-W1
'sdssz' -> SDSS z mag -> colors: i-z, z-W2, g-z
'ujmag' -> UKIRT J mag -> colors: J-H, H-K, J-K, g-J, i-J
'uhmag' -> UKIRT H mag -> colors: J-H, H-K
'ukmag' -> UKIRT K mag -> colors: g-K, H-K, J-K, V-K
'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2
'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3
'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3
'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4
'wise1' -> WISE W1 mag -> colors: i-W1, W1-W2
'wise2' -> WISE W2 mag -> colors: W1-W2, W2-W3
'wise3' -> WISE W3 mag -> colors: W2-W3
'wise4' -> WISE W4 mag -> colors: W3-W4
If you have magnitude measurements in other bands, use the
`custom_bandpasses` kwarg to pass these in.
If this is None, no object information will be incorporated into the
checkplot (kind of making it effectively useless for anything other than
glancing at the phased light curves at various 'best' periods from the
period-finder results).
varinfo : dict or None
If this is None, a blank dict of the form below will be added to the
checkplotdict::
{'objectisvar': None -> variability flag (None indicates unset),
'vartags': CSV str containing variability type tags from review,
'varisperiodic': None -> periodic variability flag (None -> unset),
'varperiod': the period associated with the periodic variability,
'varepoch': the epoch associated with the periodic variability}
If you provide a dict matching this format in this kwarg, this will be
passed unchanged to the output checkplotdict produced.
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
normto : {'globalmedian', 'zero'} or a float
This is specified as below::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
deredden_object : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by :py:func:`astrobase.varclass.starfeatures.color_features`.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond.
If this is set to True, the default settings for the external requests
will then become::
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
If this is a float, will run in "fast" mode with the provided timeout
value in seconds and the following settings::
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
Returns
-------
dict
A checkplotdict is returned containing the objectinfo and varinfo dicts,
ready to use with the functions below to add in light curve plots,
phased LC plots, xmatch info, etc.
'''
# optional mode to hit external services and fail fast if they timeout
if fast_mode is True:
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
elif isinstance(fast_mode, (int, float)) and fast_mode > 0.0:
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
else:
skyview_lookup = True
skyview_timeout = 10.0
skyview_retry_failed = True
dust_timeout = 10.0
search_simbad = True
if (isinstance(objectinfo, dict) and
('objectid' in objectinfo or 'hatid' in objectinfo) and
'ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] and objectinfo['decl']):
if 'objectid' not in objectinfo:
objectid = objectinfo['hatid']
else:
objectid = objectinfo['objectid']
if verbose and skyview_lookup:
LOGINFO('adding in object information and '
'finder chart for %s at RA: %.3f, DEC: %.3f' %
(objectid, objectinfo['ra'], objectinfo['decl']))
elif verbose and not skyview_lookup:
LOGINFO('adding in object information '
'for %s at RA: %.3f, DEC: %.3f. '
'skipping finder chart because skyview_lookup = False' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# get the finder chart
try:
if skyview_lookup:
try:
# generate the finder chart
finder, finderheader = skyview_stamp(
objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
verbose=verbose,
flip=False,
cachedir=findercachedir,
timeout=skyview_timeout,
retry_failed=skyview_retry_failed,
)
except OSError as e:
if not fast_mode:
LOGERROR(
'finder image appears to be corrupt, retrying...'
)
# generate the finder chart
finder, finderheader = skyview_stamp(
objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
verbose=verbose,
flip=False,
cachedir=findercachedir,
forcefetch=True,
timeout=skyview_timeout,
retry_failed=False # do not start an infinite loop
)
finderfig = plt.figure(figsize=(3,3),dpi=plotdpi)
# initialize the finder WCS
finderwcs = WCS(finderheader)
# use the WCS transform for the plot
ax = finderfig.add_subplot(111, frameon=False)
ax.imshow(finder, cmap=findercmap, origin='lower')
else:
finder, finderheader, finderfig, finderwcs = (
None, None, None, None
)
# skip down to after nbr stuff for the rest of the finderchart...
# search around the target's location and get its neighbors if
# lclistpkl is provided and it exists
if (lclistpkl is not None and
nbrradiusarcsec is not None and
nbrradiusarcsec > 0.0):
# if lclistpkl is a string, open it as a pickle
if isinstance(lclistpkl, str) and os.path.exists(lclistpkl):
if lclistpkl.endswith('.gz'):
infd = gzip.open(lclistpkl,'rb')
else:
infd = open(lclistpkl,'rb')
lclist = pickle.load(infd)
infd.close()
# otherwise, if it's a dict, we get it directly
elif isinstance(lclistpkl, dict):
lclist = lclistpkl
# finally, if it's nothing we recognize, ignore it
else:
LOGERROR('could not understand lclistpkl kwarg, '
'not getting neighbor info')
lclist = dict()
# check if we have a KDTree to use
# if we don't, skip neighbor stuff
if 'kdtree' not in lclist:
LOGERROR('neighbors within %.1f arcsec for %s could '
'not be found, no kdtree in lclistpkl: %s'
% (objectid, lclistpkl))
neighbors = None
kdt = None
# otherwise, do neighbor processing
else:
kdt = lclist['kdtree']
obj_cosdecl = np.cos(np.radians(objectinfo['decl']))
obj_sindecl = np.sin(np.radians(objectinfo['decl']))
obj_cosra = np.cos(np.radians(objectinfo['ra']))
obj_sinra = np.sin(np.radians(objectinfo['ra']))
obj_xyz = np.column_stack((obj_cosra*obj_cosdecl,
obj_sinra*obj_cosdecl,
obj_sindecl))
match_xyzdist = (
2.0 * np.sin(np.radians(nbrradiusarcsec/3600.0)/2.0)
)
matchdists, matchinds = kdt.query(
obj_xyz,
k=maxnumneighbors+1, # get maxnumneighbors + tgt
distance_upper_bound=match_xyzdist
)
# sort by matchdist
mdsorted = np.argsort(matchdists[0])
matchdists = matchdists[0][mdsorted]
matchinds = matchinds[0][mdsorted]
# luckily, the indices to the kdtree are the same as that
# for the objects (I think)
neighbors = []
nbrind = 0
for md, mi in zip(matchdists, matchinds):
if np.isfinite(md) and md > 0.0:
if skyview_lookup:
# generate the xy for the finder we'll use a
# HTML5 canvas and these pixcoords to highlight
# each neighbor when we mouse over its row in
# the neighbors tab
# we use coord origin = 0 here and not the usual
# 1 because we're annotating a numpy array
pixcoords = finderwcs.all_world2pix(
np.array([[lclist['objects']['ra'][mi],
lclist['objects']['decl'][mi]]]),
0
)
# each elem is {'objectid',
# 'ra','decl',
# 'xpix','ypix',
# 'dist','lcfpath'}
thisnbr = {
'objectid':(
lclist['objects']['objectid'][mi]
),
'ra':lclist['objects']['ra'][mi],
'decl':lclist['objects']['decl'][mi],
'xpix':pixcoords[0,0],
'ypix':300.0 - pixcoords[0,1],
'dist':_xyzdist_to_distarcsec(md),
'lcfpath': lclist['objects']['lcfname'][mi]
}
neighbors.append(thisnbr)
nbrind = nbrind+1
# put in a nice marker for this neighbor into
# the overall finder chart
annotatex = pixcoords[0,0]
annotatey = pixcoords[0,1]
if ((300.0 - annotatex) > 50.0):
offx = annotatex + 30.0
xha = 'center'
else:
offx = annotatex - 30.0
xha = 'center'
if ((300.0 - annotatey) > 50.0):
offy = annotatey - 30.0
yha = 'center'
else:
offy = annotatey + 30.0
yha = 'center'
ax.annotate('N%s' % nbrind,
(annotatex, annotatey),
xytext=(offx, offy),
arrowprops={'facecolor':'blue',
'edgecolor':'blue',
'width':1.0,
'headwidth':1.0,
'headlength':0.1,
'shrink':0.0},
color='blue',
horizontalalignment=xha,
verticalalignment=yha)
else:
thisnbr = {
'objectid':(
lclist['objects']['objectid'][mi]
),
'ra':lclist['objects']['ra'][mi],
'decl':lclist['objects']['decl'][mi],
'xpix':0.0,
'ypix':0.0,
'dist':_xyzdist_to_distarcsec(md),
'lcfpath': lclist['objects']['lcfname'][mi]
}
neighbors.append(thisnbr)
nbrind = nbrind+1
# if there are no neighbors, set the 'neighbors' key to None
else:
neighbors = None
kdt = None
if skyview_lookup:
#
# finish up the finder chart after neighbors are processed
#
ax.set_xticks([])
ax.set_yticks([])
# add a reticle pointing to the object's coordinates
# we use coord origin = 0 here and not the usual
# 1 because we're annotating a numpy array
object_pixcoords = finderwcs.all_world2pix(
[[objectinfo['ra'],
objectinfo['decl']]],
0
)
ax.axvline(
# x=150.0,
x=object_pixcoords[0,0],
ymin=0.375,
ymax=0.45,
linewidth=1,
color='b'
)
ax.axhline(
# y=150.0,
y=object_pixcoords[0,1],
xmin=0.375,
xmax=0.45,
linewidth=1,
color='b'
)
ax.set_frame_on(False)
# this is the output instance
finderpng = StrIO()
finderfig.savefig(finderpng,
bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
finderpng.seek(0)
finderb64 = base64.b64encode(finderpng.read())
# close the stringio buffer
finderpng.close()
else:
finderb64 = None
except Exception as e:
LOGEXCEPTION('could not fetch a DSS stamp for this '
'object %s using coords (%.3f,%.3f)' %
(objectid, objectinfo['ra'], objectinfo['decl']))
finderb64 = None
neighbors = None
kdt = None
# if we don't have ra, dec info, then everything is none up to this point
else:
finderb64 = None
neighbors = None
kdt = None
#
# end of finder chart operations
#
# now that we have the finder chart, get the rest of the object
# information
# get the rest of the features, these don't necessarily rely on ra, dec and
# should degrade gracefully if these aren't provided
if isinstance(objectinfo, dict):
if 'objectid' not in objectinfo and 'hatid' in objectinfo:
objectid = objectinfo['hatid']
objectinfo['objectid'] = objectid
elif 'objectid' in objectinfo:
objectid = objectinfo['objectid']
else:
objectid = os.urandom(12).hex()[:7]
objectinfo['objectid'] = objectid
LOGWARNING('no objectid found in objectinfo dict, '
'making up a random one: %s')
# get the neighbor features and GAIA info
nbrfeat = neighbor_gaia_features(
objectinfo,
kdt,
nbrradiusarcsec,
verbose=False,
gaia_submit_timeout=gaia_submit_timeout,
gaia_submit_tries=gaia_submit_tries,
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later,
search_simbad=search_simbad
)
objectinfo.update(nbrfeat)
# see if the objectinfo dict has pmra/pmdecl entries. if it doesn't,
# then we'll see if the nbrfeat dict has pmra/pmdecl from GAIA. we'll
# set the appropriate provenance keys as well so we know where the PM
# came from
if ( ('pmra' not in objectinfo) or
( ('pmra' in objectinfo) and
( (objectinfo['pmra'] is None) or
(not np.isfinite(objectinfo['pmra'])) ) ) ):
if 'ok' in nbrfeat['gaia_status']:
objectinfo['pmra'] = nbrfeat['gaia_pmras'][0]
objectinfo['pmra_err'] = nbrfeat['gaia_pmra_errs'][0]
objectinfo['pmra_source'] = 'gaia'
if verbose:
LOGWARNING('pmRA not found in provided objectinfo dict, '
'using value from GAIA')
else:
objectinfo['pmra_source'] = 'light curve'
if ( ('pmdecl' not in objectinfo) or
( ('pmdecl' in objectinfo) and
( (objectinfo['pmdecl'] is None) or
(not np.isfinite(objectinfo['pmdecl'])) ) ) ):
if 'ok' in nbrfeat['gaia_status']:
objectinfo['pmdecl'] = nbrfeat['gaia_pmdecls'][0]
objectinfo['pmdecl_err'] = nbrfeat['gaia_pmdecl_errs'][0]
objectinfo['pmdecl_source'] = 'gaia'
if verbose:
LOGWARNING('pmDEC not found in provided objectinfo dict, '
'using value from GAIA')
else:
objectinfo['pmdecl_source'] = 'light curve'
#
# update GAIA info so it's available at the first level
#
if 'ok' in objectinfo['gaia_status']:
objectinfo['gaiaid'] = objectinfo['gaia_ids'][0]
objectinfo['gaiamag'] = objectinfo['gaia_mags'][0]
objectinfo['gaia_absmag'] = objectinfo['gaia_absolute_mags'][0]
objectinfo['gaia_parallax'] = objectinfo['gaia_parallaxes'][0]
objectinfo['gaia_parallax_err'] = (
objectinfo['gaia_parallax_errs'][0]
)
objectinfo['gaia_pmra'] = objectinfo['gaia_pmras'][0]
objectinfo['gaia_pmra_err'] = objectinfo['gaia_pmra_errs'][0]
objectinfo['gaia_pmdecl'] = objectinfo['gaia_pmdecls'][0]
objectinfo['gaia_pmdecl_err'] = objectinfo['gaia_pmdecl_errs'][0]
else:
objectinfo['gaiaid'] = None
objectinfo['gaiamag'] = np.nan
objectinfo['gaia_absmag'] = np.nan
objectinfo['gaia_parallax'] = np.nan
objectinfo['gaia_parallax_err'] = np.nan
objectinfo['gaia_pmra'] = np.nan
objectinfo['gaia_pmra_err'] = np.nan
objectinfo['gaia_pmdecl'] = np.nan
objectinfo['gaia_pmdecl_err'] = np.nan
#
# get the object's TIC information
#
if ('ra' in objectinfo and
objectinfo['ra'] is not None and
np.isfinite(objectinfo['ra']) and
'decl' in objectinfo and
objectinfo['decl'] is not None and
np.isfinite(objectinfo['decl'])):
try:
ticres = tic_conesearch(objectinfo['ra'],
objectinfo['decl'],
radius_arcmin=5.0/60.0,
verbose=verbose,
timeout=gaia_max_timeout,
maxtries=gaia_submit_tries)
if ticres is not None:
with open(ticres['cachefname'],'r') as infd:
ticinfo = json.load(infd)
if ('data' in ticinfo and
len(ticinfo['data']) > 0 and
isinstance(ticinfo['data'][0], dict)):
objectinfo['ticid'] = str(ticinfo['data'][0]['ID'])
objectinfo['tessmag'] = ticinfo['data'][0]['Tmag']
objectinfo['tic_version'] = (
ticinfo['data'][0]['version']
)
objectinfo['tic_distarcsec'] = (
ticinfo['data'][0]['dstArcSec']
)
objectinfo['tessmag_origin'] = (
ticinfo['data'][0]['TESSflag']
)
objectinfo['tic_starprop_origin'] = (
ticinfo['data'][0]['SPFlag']
)
objectinfo['tic_lumclass'] = (
ticinfo['data'][0]['lumclass']
)
objectinfo['tic_teff'] = (
ticinfo['data'][0]['Teff']
)
objectinfo['tic_teff_err'] = (
ticinfo['data'][0]['e_Teff']
)
objectinfo['tic_logg'] = (
ticinfo['data'][0]['logg']
)
objectinfo['tic_logg_err'] = (
ticinfo['data'][0]['e_logg']
)
objectinfo['tic_mh'] = (
ticinfo['data'][0]['MH']
)
objectinfo['tic_mh_err'] = (
ticinfo['data'][0]['e_MH']
)
objectinfo['tic_radius'] = (
ticinfo['data'][0]['rad']
)
objectinfo['tic_radius_err'] = (
ticinfo['data'][0]['e_rad']
)
objectinfo['tic_mass'] = (
ticinfo['data'][0]['mass']
)
objectinfo['tic_mass_err'] = (
ticinfo['data'][0]['e_mass']
)
objectinfo['tic_density'] = (
ticinfo['data'][0]['rho']
)
objectinfo['tic_density_err'] = (
ticinfo['data'][0]['e_rho']
)
objectinfo['tic_luminosity'] = (
ticinfo['data'][0]['lum']
)
objectinfo['tic_luminosity_err'] = (
ticinfo['data'][0]['e_lum']
)
objectinfo['tic_distancepc'] = (
ticinfo['data'][0]['d']
)
objectinfo['tic_distancepc_err'] = (
ticinfo['data'][0]['e_d']
)
#
# fill in any missing info using the TIC entry
#
if ('gaiaid' not in objectinfo or
('gaiaid' in objectinfo and
(objectinfo['gaiaid'] is None))):
objectinfo['gaiaid'] = ticinfo['data'][0]['GAIA']
if ('gaiamag' not in objectinfo or
('gaiamag' in objectinfo and
(objectinfo['gaiamag'] is None or
not np.isfinite(objectinfo['gaiamag'])))):
objectinfo['gaiamag'] = (
ticinfo['data'][0]['GAIAmag']
)
objectinfo['gaiamag_err'] = (
ticinfo['data'][0]['e_GAIAmag']
)
if ('gaia_parallax' not in objectinfo or
('gaia_parallax' in objectinfo and
(objectinfo['gaia_parallax'] is None or
not np.isfinite(objectinfo['gaia_parallax'])))):
objectinfo['gaia_parallax'] = (
ticinfo['data'][0]['plx']
)
objectinfo['gaia_parallax_err'] = (
ticinfo['data'][0]['e_plx']
)
if (objectinfo['gaiamag'] is not None and
np.isfinite(objectinfo['gaiamag']) and
objectinfo['gaia_parallax'] is not None and
np.isfinite(objectinfo['gaia_parallax'])):
objectinfo['gaia_absmag'] = (
magnitudes.absolute_gaia_magnitude(
objectinfo['gaiamag'],
objectinfo['gaia_parallax']
)
)
if ('pmra' not in objectinfo or
('pmra' in objectinfo and
(objectinfo['pmra'] is None or
not np.isfinite(objectinfo['pmra'])))):
objectinfo['pmra'] = ticinfo['data'][0]['pmRA']
objectinfo['pmra_err'] = (
ticinfo['data'][0]['e_pmRA']
)
objectinfo['pmra_source'] = 'TIC'
if ('pmdecl' not in objectinfo or
('pmdecl' in objectinfo and
(objectinfo['pmdecl'] is None or
not np.isfinite(objectinfo['pmdecl'])))):
objectinfo['pmdecl'] = ticinfo['data'][0]['pmDEC']
objectinfo['pmdecl_err'] = (
ticinfo['data'][0]['e_pmDEC']
)
objectinfo['pmdecl_source'] = 'TIC'
if ('bmag' not in objectinfo or
('bmag' in objectinfo and
(objectinfo['bmag'] is None or
not np.isfinite(objectinfo['bmag'])))):
objectinfo['bmag'] = ticinfo['data'][0]['Bmag']
objectinfo['bmag_err'] = (
ticinfo['data'][0]['e_Bmag']
)
if ('vmag' not in objectinfo or
('vmag' in objectinfo and
(objectinfo['vmag'] is None or
not np.isfinite(objectinfo['vmag'])))):
objectinfo['vmag'] = ticinfo['data'][0]['Vmag']
objectinfo['vmag_err'] = (
ticinfo['data'][0]['e_Vmag']
)
if ('sdssu' not in objectinfo or
('sdssu' in objectinfo and
(objectinfo['sdssu'] is None or
not np.isfinite(objectinfo['sdssu'])))):
objectinfo['sdssu'] = ticinfo['data'][0]['umag']
objectinfo['sdssu_err'] = (
ticinfo['data'][0]['e_umag']
)
if ('sdssg' not in objectinfo or
('sdssg' in objectinfo and
(objectinfo['sdssg'] is None or
not np.isfinite(objectinfo['sdssg'])))):
objectinfo['sdssg'] = ticinfo['data'][0]['gmag']
objectinfo['sdssg_err'] = (
ticinfo['data'][0]['e_gmag']
)
if ('sdssr' not in objectinfo or
('sdssr' in objectinfo and
(objectinfo['sdssr'] is None or
not np.isfinite(objectinfo['sdssr'])))):
objectinfo['sdssr'] = ticinfo['data'][0]['rmag']
objectinfo['sdssr_err'] = (
ticinfo['data'][0]['e_rmag']
)
if ('sdssi' not in objectinfo or
('sdssi' in objectinfo and
(objectinfo['sdssi'] is None or
not np.isfinite(objectinfo['sdssi'])))):
objectinfo['sdssi'] = ticinfo['data'][0]['imag']
objectinfo['sdssi_err'] = (
ticinfo['data'][0]['e_imag']
)
if ('sdssz' not in objectinfo or
('sdssz' in objectinfo and
(objectinfo['sdssz'] is None or
not np.isfinite(objectinfo['sdssz'])))):
objectinfo['sdssz'] = ticinfo['data'][0]['zmag']
objectinfo['sdssz_err'] = (
ticinfo['data'][0]['e_zmag']
)
if ('jmag' not in objectinfo or
('jmag' in objectinfo and
(objectinfo['jmag'] is None or
not np.isfinite(objectinfo['jmag'])))):
objectinfo['jmag'] = ticinfo['data'][0]['Jmag']
objectinfo['jmag_err'] = (
ticinfo['data'][0]['e_Jmag']
)
if ('hmag' not in objectinfo or
('hmag' in objectinfo and
(objectinfo['hmag'] is None or
not np.isfinite(objectinfo['hmag'])))):
objectinfo['hmag'] = ticinfo['data'][0]['Hmag']
objectinfo['hmag_err'] = (
ticinfo['data'][0]['e_Hmag']
)
if ('kmag' not in objectinfo or
('kmag' in objectinfo and
(objectinfo['kmag'] is None or
not np.isfinite(objectinfo['kmag'])))):
objectinfo['kmag'] = ticinfo['data'][0]['Kmag']
objectinfo['kmag_err'] = (
ticinfo['data'][0]['e_Kmag']
)
if ('wise1' not in objectinfo or
('wise1' in objectinfo and
(objectinfo['wise1'] is None or
not np.isfinite(objectinfo['wise1'])))):
objectinfo['wise1'] = ticinfo['data'][0]['w1mag']
objectinfo['wise1_err'] = (
ticinfo['data'][0]['e_w1mag']
)
if ('wise2' not in objectinfo or
('wise2' in objectinfo and
(objectinfo['wise2'] is None or
not np.isfinite(objectinfo['wise2'])))):
objectinfo['wise2'] = ticinfo['data'][0]['w2mag']
objectinfo['wise2_err'] = (
ticinfo['data'][0]['e_w2mag']
)
if ('wise3' not in objectinfo or
('wise3' in objectinfo and
(objectinfo['wise3'] is None or
not np.isfinite(objectinfo['wise3'])))):
objectinfo['wise3'] = ticinfo['data'][0]['w3mag']
objectinfo['wise3_err'] = (
ticinfo['data'][0]['e_w3mag']
)
if ('wise4' not in objectinfo or
('wise4' in objectinfo and
(objectinfo['wise4'] is None or
not np.isfinite(objectinfo['wise4'])))):
objectinfo['wise4'] = ticinfo['data'][0]['w4mag']
objectinfo['wise4_err'] = (
ticinfo['data'][0]['e_w4mag']
)
else:
LOGERROR('could not look up TIC '
'information for object: %s '
'at (%.3f, %.3f)' %
(objectinfo['objectid'],
objectinfo['ra'],
objectinfo['decl']))
except Exception as e:
LOGEXCEPTION('could not look up TIC '
'information for object: %s '
'at (%.3f, %.3f)' %
(objectinfo['objectid'],
objectinfo['ra'],
objectinfo['decl']))
# try to get the object's coord features
coordfeat = coord_features(objectinfo)
# get the color features
colorfeat = color_features(objectinfo,
deredden=deredden_object,
custom_bandpasses=custom_bandpasses,
dust_timeout=dust_timeout)
# get the object's color classification
colorclass = color_classification(colorfeat, coordfeat)
# update the objectinfo dict with everything
objectinfo.update(colorfeat)
objectinfo.update(coordfeat)
objectinfo.update(colorclass)
# put together the initial checkplot pickle dictionary
# this will be updated by the functions below as appropriate
# and will written out as a gzipped pickle at the end of processing
checkplotdict = {'objectid':objectid,
'neighbors':neighbors,
'objectinfo':objectinfo,
'finderchart':finderb64,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# add the objecttags key to objectinfo
checkplotdict['objectinfo']['objecttags'] = None
# if there's no objectinfo, we can't do anything.
else:
# empty objectinfo dict
checkplotdict = {'objectid':None,
'neighbors':None,
'objectinfo':{
'available_bands':[],
'available_band_labels':[],
'available_dereddened_bands':[],
'available_dereddened_band_labels':[],
'available_colors':[],
'available_color_labels':[],
'bmag':None,
'bmag-vmag':None,
'decl':None,
'hatid':None,
'hmag':None,
'imag-jmag':None,
'jmag-kmag':None,
'jmag':None,
'kmag':None,
'ndet':None,
'network':None,
'objecttags':None,
'pmdecl':None,
'pmdecl_err':None,
'pmra':None,
'pmra_err':None,
'propermotion':None,
'ra':None,
'rpmj':None,
'sdssg':None,
'sdssi':None,
'sdssr':None,
'stations':None,
'twomassid':None,
'ucac4id':None,
'vmag':None
},
'finderchart':None,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# end of objectinfo processing
# add the varinfo dict
if isinstance(varinfo, dict):
checkplotdict['varinfo'] = varinfo
else:
checkplotdict['varinfo'] = {
'objectisvar':None,
'vartags':None,
'varisperiodic':None,
'varperiod':None,
'varepoch':None,
}
return checkplotdict
|
[
"This",
"returns",
"the",
"finder",
"chart",
"and",
"object",
"information",
"as",
"a",
"dict",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl_utils.py#L120-L1203
|
[
"def",
"_pkl_finder_objectinfo",
"(",
"objectinfo",
",",
"varinfo",
",",
"findercmap",
",",
"finderconvolve",
",",
"sigclip",
",",
"normto",
",",
"normmingap",
",",
"deredden_object",
"=",
"True",
",",
"custom_bandpasses",
"=",
"None",
",",
"lclistpkl",
"=",
"None",
",",
"nbrradiusarcsec",
"=",
"30.0",
",",
"maxnumneighbors",
"=",
"5",
",",
"plotdpi",
"=",
"100",
",",
"findercachedir",
"=",
"'~/.astrobase/stamp-cache'",
",",
"verbose",
"=",
"True",
",",
"gaia_submit_timeout",
"=",
"10.0",
",",
"gaia_submit_tries",
"=",
"3",
",",
"gaia_max_timeout",
"=",
"180.0",
",",
"gaia_mirror",
"=",
"None",
",",
"fast_mode",
"=",
"False",
",",
"complete_query_later",
"=",
"True",
")",
":",
"# optional mode to hit external services and fail fast if they timeout",
"if",
"fast_mode",
"is",
"True",
":",
"skyview_lookup",
"=",
"False",
"skyview_timeout",
"=",
"10.0",
"skyview_retry_failed",
"=",
"False",
"dust_timeout",
"=",
"10.0",
"gaia_submit_timeout",
"=",
"7.0",
"gaia_max_timeout",
"=",
"10.0",
"gaia_submit_tries",
"=",
"2",
"complete_query_later",
"=",
"False",
"search_simbad",
"=",
"False",
"elif",
"isinstance",
"(",
"fast_mode",
",",
"(",
"int",
",",
"float",
")",
")",
"and",
"fast_mode",
">",
"0.0",
":",
"skyview_lookup",
"=",
"True",
"skyview_timeout",
"=",
"fast_mode",
"skyview_retry_failed",
"=",
"False",
"dust_timeout",
"=",
"fast_mode",
"gaia_submit_timeout",
"=",
"0.66",
"*",
"fast_mode",
"gaia_max_timeout",
"=",
"fast_mode",
"gaia_submit_tries",
"=",
"2",
"complete_query_later",
"=",
"False",
"search_simbad",
"=",
"False",
"else",
":",
"skyview_lookup",
"=",
"True",
"skyview_timeout",
"=",
"10.0",
"skyview_retry_failed",
"=",
"True",
"dust_timeout",
"=",
"10.0",
"search_simbad",
"=",
"True",
"if",
"(",
"isinstance",
"(",
"objectinfo",
",",
"dict",
")",
"and",
"(",
"'objectid'",
"in",
"objectinfo",
"or",
"'hatid'",
"in",
"objectinfo",
")",
"and",
"'ra'",
"in",
"objectinfo",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'ra'",
"]",
"and",
"objectinfo",
"[",
"'decl'",
"]",
")",
":",
"if",
"'objectid'",
"not",
"in",
"objectinfo",
":",
"objectid",
"=",
"objectinfo",
"[",
"'hatid'",
"]",
"else",
":",
"objectid",
"=",
"objectinfo",
"[",
"'objectid'",
"]",
"if",
"verbose",
"and",
"skyview_lookup",
":",
"LOGINFO",
"(",
"'adding in object information and '",
"'finder chart for %s at RA: %.3f, DEC: %.3f'",
"%",
"(",
"objectid",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"elif",
"verbose",
"and",
"not",
"skyview_lookup",
":",
"LOGINFO",
"(",
"'adding in object information '",
"'for %s at RA: %.3f, DEC: %.3f. '",
"'skipping finder chart because skyview_lookup = False'",
"%",
"(",
"objectid",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"# get the finder chart",
"try",
":",
"if",
"skyview_lookup",
":",
"try",
":",
"# generate the finder chart",
"finder",
",",
"finderheader",
"=",
"skyview_stamp",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"convolvewith",
"=",
"finderconvolve",
",",
"verbose",
"=",
"verbose",
",",
"flip",
"=",
"False",
",",
"cachedir",
"=",
"findercachedir",
",",
"timeout",
"=",
"skyview_timeout",
",",
"retry_failed",
"=",
"skyview_retry_failed",
",",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"not",
"fast_mode",
":",
"LOGERROR",
"(",
"'finder image appears to be corrupt, retrying...'",
")",
"# generate the finder chart",
"finder",
",",
"finderheader",
"=",
"skyview_stamp",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"convolvewith",
"=",
"finderconvolve",
",",
"verbose",
"=",
"verbose",
",",
"flip",
"=",
"False",
",",
"cachedir",
"=",
"findercachedir",
",",
"forcefetch",
"=",
"True",
",",
"timeout",
"=",
"skyview_timeout",
",",
"retry_failed",
"=",
"False",
"# do not start an infinite loop",
")",
"finderfig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"3",
",",
"3",
")",
",",
"dpi",
"=",
"plotdpi",
")",
"# initialize the finder WCS",
"finderwcs",
"=",
"WCS",
"(",
"finderheader",
")",
"# use the WCS transform for the plot",
"ax",
"=",
"finderfig",
".",
"add_subplot",
"(",
"111",
",",
"frameon",
"=",
"False",
")",
"ax",
".",
"imshow",
"(",
"finder",
",",
"cmap",
"=",
"findercmap",
",",
"origin",
"=",
"'lower'",
")",
"else",
":",
"finder",
",",
"finderheader",
",",
"finderfig",
",",
"finderwcs",
"=",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"# skip down to after nbr stuff for the rest of the finderchart...",
"# search around the target's location and get its neighbors if",
"# lclistpkl is provided and it exists",
"if",
"(",
"lclistpkl",
"is",
"not",
"None",
"and",
"nbrradiusarcsec",
"is",
"not",
"None",
"and",
"nbrradiusarcsec",
">",
"0.0",
")",
":",
"# if lclistpkl is a string, open it as a pickle",
"if",
"isinstance",
"(",
"lclistpkl",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"lclistpkl",
")",
":",
"if",
"lclistpkl",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"infd",
"=",
"gzip",
".",
"open",
"(",
"lclistpkl",
",",
"'rb'",
")",
"else",
":",
"infd",
"=",
"open",
"(",
"lclistpkl",
",",
"'rb'",
")",
"lclist",
"=",
"pickle",
".",
"load",
"(",
"infd",
")",
"infd",
".",
"close",
"(",
")",
"# otherwise, if it's a dict, we get it directly",
"elif",
"isinstance",
"(",
"lclistpkl",
",",
"dict",
")",
":",
"lclist",
"=",
"lclistpkl",
"# finally, if it's nothing we recognize, ignore it",
"else",
":",
"LOGERROR",
"(",
"'could not understand lclistpkl kwarg, '",
"'not getting neighbor info'",
")",
"lclist",
"=",
"dict",
"(",
")",
"# check if we have a KDTree to use",
"# if we don't, skip neighbor stuff",
"if",
"'kdtree'",
"not",
"in",
"lclist",
":",
"LOGERROR",
"(",
"'neighbors within %.1f arcsec for %s could '",
"'not be found, no kdtree in lclistpkl: %s'",
"%",
"(",
"objectid",
",",
"lclistpkl",
")",
")",
"neighbors",
"=",
"None",
"kdt",
"=",
"None",
"# otherwise, do neighbor processing",
"else",
":",
"kdt",
"=",
"lclist",
"[",
"'kdtree'",
"]",
"obj_cosdecl",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"obj_sindecl",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"obj_cosra",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"objectinfo",
"[",
"'ra'",
"]",
")",
")",
"obj_sinra",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"objectinfo",
"[",
"'ra'",
"]",
")",
")",
"obj_xyz",
"=",
"np",
".",
"column_stack",
"(",
"(",
"obj_cosra",
"*",
"obj_cosdecl",
",",
"obj_sinra",
"*",
"obj_cosdecl",
",",
"obj_sindecl",
")",
")",
"match_xyzdist",
"=",
"(",
"2.0",
"*",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"nbrradiusarcsec",
"/",
"3600.0",
")",
"/",
"2.0",
")",
")",
"matchdists",
",",
"matchinds",
"=",
"kdt",
".",
"query",
"(",
"obj_xyz",
",",
"k",
"=",
"maxnumneighbors",
"+",
"1",
",",
"# get maxnumneighbors + tgt",
"distance_upper_bound",
"=",
"match_xyzdist",
")",
"# sort by matchdist",
"mdsorted",
"=",
"np",
".",
"argsort",
"(",
"matchdists",
"[",
"0",
"]",
")",
"matchdists",
"=",
"matchdists",
"[",
"0",
"]",
"[",
"mdsorted",
"]",
"matchinds",
"=",
"matchinds",
"[",
"0",
"]",
"[",
"mdsorted",
"]",
"# luckily, the indices to the kdtree are the same as that",
"# for the objects (I think)",
"neighbors",
"=",
"[",
"]",
"nbrind",
"=",
"0",
"for",
"md",
",",
"mi",
"in",
"zip",
"(",
"matchdists",
",",
"matchinds",
")",
":",
"if",
"np",
".",
"isfinite",
"(",
"md",
")",
"and",
"md",
">",
"0.0",
":",
"if",
"skyview_lookup",
":",
"# generate the xy for the finder we'll use a",
"# HTML5 canvas and these pixcoords to highlight",
"# each neighbor when we mouse over its row in",
"# the neighbors tab",
"# we use coord origin = 0 here and not the usual",
"# 1 because we're annotating a numpy array",
"pixcoords",
"=",
"finderwcs",
".",
"all_world2pix",
"(",
"np",
".",
"array",
"(",
"[",
"[",
"lclist",
"[",
"'objects'",
"]",
"[",
"'ra'",
"]",
"[",
"mi",
"]",
",",
"lclist",
"[",
"'objects'",
"]",
"[",
"'decl'",
"]",
"[",
"mi",
"]",
"]",
"]",
")",
",",
"0",
")",
"# each elem is {'objectid',",
"# 'ra','decl',",
"# 'xpix','ypix',",
"# 'dist','lcfpath'}",
"thisnbr",
"=",
"{",
"'objectid'",
":",
"(",
"lclist",
"[",
"'objects'",
"]",
"[",
"'objectid'",
"]",
"[",
"mi",
"]",
")",
",",
"'ra'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'ra'",
"]",
"[",
"mi",
"]",
",",
"'decl'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'decl'",
"]",
"[",
"mi",
"]",
",",
"'xpix'",
":",
"pixcoords",
"[",
"0",
",",
"0",
"]",
",",
"'ypix'",
":",
"300.0",
"-",
"pixcoords",
"[",
"0",
",",
"1",
"]",
",",
"'dist'",
":",
"_xyzdist_to_distarcsec",
"(",
"md",
")",
",",
"'lcfpath'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'lcfname'",
"]",
"[",
"mi",
"]",
"}",
"neighbors",
".",
"append",
"(",
"thisnbr",
")",
"nbrind",
"=",
"nbrind",
"+",
"1",
"# put in a nice marker for this neighbor into",
"# the overall finder chart",
"annotatex",
"=",
"pixcoords",
"[",
"0",
",",
"0",
"]",
"annotatey",
"=",
"pixcoords",
"[",
"0",
",",
"1",
"]",
"if",
"(",
"(",
"300.0",
"-",
"annotatex",
")",
">",
"50.0",
")",
":",
"offx",
"=",
"annotatex",
"+",
"30.0",
"xha",
"=",
"'center'",
"else",
":",
"offx",
"=",
"annotatex",
"-",
"30.0",
"xha",
"=",
"'center'",
"if",
"(",
"(",
"300.0",
"-",
"annotatey",
")",
">",
"50.0",
")",
":",
"offy",
"=",
"annotatey",
"-",
"30.0",
"yha",
"=",
"'center'",
"else",
":",
"offy",
"=",
"annotatey",
"+",
"30.0",
"yha",
"=",
"'center'",
"ax",
".",
"annotate",
"(",
"'N%s'",
"%",
"nbrind",
",",
"(",
"annotatex",
",",
"annotatey",
")",
",",
"xytext",
"=",
"(",
"offx",
",",
"offy",
")",
",",
"arrowprops",
"=",
"{",
"'facecolor'",
":",
"'blue'",
",",
"'edgecolor'",
":",
"'blue'",
",",
"'width'",
":",
"1.0",
",",
"'headwidth'",
":",
"1.0",
",",
"'headlength'",
":",
"0.1",
",",
"'shrink'",
":",
"0.0",
"}",
",",
"color",
"=",
"'blue'",
",",
"horizontalalignment",
"=",
"xha",
",",
"verticalalignment",
"=",
"yha",
")",
"else",
":",
"thisnbr",
"=",
"{",
"'objectid'",
":",
"(",
"lclist",
"[",
"'objects'",
"]",
"[",
"'objectid'",
"]",
"[",
"mi",
"]",
")",
",",
"'ra'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'ra'",
"]",
"[",
"mi",
"]",
",",
"'decl'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'decl'",
"]",
"[",
"mi",
"]",
",",
"'xpix'",
":",
"0.0",
",",
"'ypix'",
":",
"0.0",
",",
"'dist'",
":",
"_xyzdist_to_distarcsec",
"(",
"md",
")",
",",
"'lcfpath'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'lcfname'",
"]",
"[",
"mi",
"]",
"}",
"neighbors",
".",
"append",
"(",
"thisnbr",
")",
"nbrind",
"=",
"nbrind",
"+",
"1",
"# if there are no neighbors, set the 'neighbors' key to None",
"else",
":",
"neighbors",
"=",
"None",
"kdt",
"=",
"None",
"if",
"skyview_lookup",
":",
"#",
"# finish up the finder chart after neighbors are processed",
"#",
"ax",
".",
"set_xticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"# add a reticle pointing to the object's coordinates",
"# we use coord origin = 0 here and not the usual",
"# 1 because we're annotating a numpy array",
"object_pixcoords",
"=",
"finderwcs",
".",
"all_world2pix",
"(",
"[",
"[",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
"]",
"]",
",",
"0",
")",
"ax",
".",
"axvline",
"(",
"# x=150.0,",
"x",
"=",
"object_pixcoords",
"[",
"0",
",",
"0",
"]",
",",
"ymin",
"=",
"0.375",
",",
"ymax",
"=",
"0.45",
",",
"linewidth",
"=",
"1",
",",
"color",
"=",
"'b'",
")",
"ax",
".",
"axhline",
"(",
"# y=150.0,",
"y",
"=",
"object_pixcoords",
"[",
"0",
",",
"1",
"]",
",",
"xmin",
"=",
"0.375",
",",
"xmax",
"=",
"0.45",
",",
"linewidth",
"=",
"1",
",",
"color",
"=",
"'b'",
")",
"ax",
".",
"set_frame_on",
"(",
"False",
")",
"# this is the output instance",
"finderpng",
"=",
"StrIO",
"(",
")",
"finderfig",
".",
"savefig",
"(",
"finderpng",
",",
"bbox_inches",
"=",
"'tight'",
",",
"pad_inches",
"=",
"0.0",
",",
"format",
"=",
"'png'",
")",
"plt",
".",
"close",
"(",
")",
"# encode the finderpng instance to base64",
"finderpng",
".",
"seek",
"(",
"0",
")",
"finderb64",
"=",
"base64",
".",
"b64encode",
"(",
"finderpng",
".",
"read",
"(",
")",
")",
"# close the stringio buffer",
"finderpng",
".",
"close",
"(",
")",
"else",
":",
"finderb64",
"=",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'could not fetch a DSS stamp for this '",
"'object %s using coords (%.3f,%.3f)'",
"%",
"(",
"objectid",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"finderb64",
"=",
"None",
"neighbors",
"=",
"None",
"kdt",
"=",
"None",
"# if we don't have ra, dec info, then everything is none up to this point",
"else",
":",
"finderb64",
"=",
"None",
"neighbors",
"=",
"None",
"kdt",
"=",
"None",
"#",
"# end of finder chart operations",
"#",
"# now that we have the finder chart, get the rest of the object",
"# information",
"# get the rest of the features, these don't necessarily rely on ra, dec and",
"# should degrade gracefully if these aren't provided",
"if",
"isinstance",
"(",
"objectinfo",
",",
"dict",
")",
":",
"if",
"'objectid'",
"not",
"in",
"objectinfo",
"and",
"'hatid'",
"in",
"objectinfo",
":",
"objectid",
"=",
"objectinfo",
"[",
"'hatid'",
"]",
"objectinfo",
"[",
"'objectid'",
"]",
"=",
"objectid",
"elif",
"'objectid'",
"in",
"objectinfo",
":",
"objectid",
"=",
"objectinfo",
"[",
"'objectid'",
"]",
"else",
":",
"objectid",
"=",
"os",
".",
"urandom",
"(",
"12",
")",
".",
"hex",
"(",
")",
"[",
":",
"7",
"]",
"objectinfo",
"[",
"'objectid'",
"]",
"=",
"objectid",
"LOGWARNING",
"(",
"'no objectid found in objectinfo dict, '",
"'making up a random one: %s'",
")",
"# get the neighbor features and GAIA info",
"nbrfeat",
"=",
"neighbor_gaia_features",
"(",
"objectinfo",
",",
"kdt",
",",
"nbrradiusarcsec",
",",
"verbose",
"=",
"False",
",",
"gaia_submit_timeout",
"=",
"gaia_submit_timeout",
",",
"gaia_submit_tries",
"=",
"gaia_submit_tries",
",",
"gaia_max_timeout",
"=",
"gaia_max_timeout",
",",
"gaia_mirror",
"=",
"gaia_mirror",
",",
"complete_query_later",
"=",
"complete_query_later",
",",
"search_simbad",
"=",
"search_simbad",
")",
"objectinfo",
".",
"update",
"(",
"nbrfeat",
")",
"# see if the objectinfo dict has pmra/pmdecl entries. if it doesn't,",
"# then we'll see if the nbrfeat dict has pmra/pmdecl from GAIA. we'll",
"# set the appropriate provenance keys as well so we know where the PM",
"# came from",
"if",
"(",
"(",
"'pmra'",
"not",
"in",
"objectinfo",
")",
"or",
"(",
"(",
"'pmra'",
"in",
"objectinfo",
")",
"and",
"(",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
"is",
"None",
")",
"or",
"(",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
")",
")",
")",
")",
")",
":",
"if",
"'ok'",
"in",
"nbrfeat",
"[",
"'gaia_status'",
"]",
":",
"objectinfo",
"[",
"'pmra'",
"]",
"=",
"nbrfeat",
"[",
"'gaia_pmras'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'pmra_err'",
"]",
"=",
"nbrfeat",
"[",
"'gaia_pmra_errs'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'pmra_source'",
"]",
"=",
"'gaia'",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"'pmRA not found in provided objectinfo dict, '",
"'using value from GAIA'",
")",
"else",
":",
"objectinfo",
"[",
"'pmra_source'",
"]",
"=",
"'light curve'",
"if",
"(",
"(",
"'pmdecl'",
"not",
"in",
"objectinfo",
")",
"or",
"(",
"(",
"'pmdecl'",
"in",
"objectinfo",
")",
"and",
"(",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
"is",
"None",
")",
"or",
"(",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
")",
")",
")",
")",
")",
":",
"if",
"'ok'",
"in",
"nbrfeat",
"[",
"'gaia_status'",
"]",
":",
"objectinfo",
"[",
"'pmdecl'",
"]",
"=",
"nbrfeat",
"[",
"'gaia_pmdecls'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'pmdecl_err'",
"]",
"=",
"nbrfeat",
"[",
"'gaia_pmdecl_errs'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'pmdecl_source'",
"]",
"=",
"'gaia'",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"'pmDEC not found in provided objectinfo dict, '",
"'using value from GAIA'",
")",
"else",
":",
"objectinfo",
"[",
"'pmdecl_source'",
"]",
"=",
"'light curve'",
"#",
"# update GAIA info so it's available at the first level",
"#",
"if",
"'ok'",
"in",
"objectinfo",
"[",
"'gaia_status'",
"]",
":",
"objectinfo",
"[",
"'gaiaid'",
"]",
"=",
"objectinfo",
"[",
"'gaia_ids'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaiamag'",
"]",
"=",
"objectinfo",
"[",
"'gaia_mags'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_absmag'",
"]",
"=",
"objectinfo",
"[",
"'gaia_absolute_mags'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"=",
"objectinfo",
"[",
"'gaia_parallaxes'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_parallax_err'",
"]",
"=",
"(",
"objectinfo",
"[",
"'gaia_parallax_errs'",
"]",
"[",
"0",
"]",
")",
"objectinfo",
"[",
"'gaia_pmra'",
"]",
"=",
"objectinfo",
"[",
"'gaia_pmras'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_pmra_err'",
"]",
"=",
"objectinfo",
"[",
"'gaia_pmra_errs'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_pmdecl'",
"]",
"=",
"objectinfo",
"[",
"'gaia_pmdecls'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_pmdecl_err'",
"]",
"=",
"objectinfo",
"[",
"'gaia_pmdecl_errs'",
"]",
"[",
"0",
"]",
"else",
":",
"objectinfo",
"[",
"'gaiaid'",
"]",
"=",
"None",
"objectinfo",
"[",
"'gaiamag'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_absmag'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_parallax_err'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_pmra'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_pmra_err'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_pmdecl'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_pmdecl_err'",
"]",
"=",
"np",
".",
"nan",
"#",
"# get the object's TIC information",
"#",
"if",
"(",
"'ra'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'ra'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'ra'",
"]",
")",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'decl'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
":",
"try",
":",
"ticres",
"=",
"tic_conesearch",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"radius_arcmin",
"=",
"5.0",
"/",
"60.0",
",",
"verbose",
"=",
"verbose",
",",
"timeout",
"=",
"gaia_max_timeout",
",",
"maxtries",
"=",
"gaia_submit_tries",
")",
"if",
"ticres",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"ticres",
"[",
"'cachefname'",
"]",
",",
"'r'",
")",
"as",
"infd",
":",
"ticinfo",
"=",
"json",
".",
"load",
"(",
"infd",
")",
"if",
"(",
"'data'",
"in",
"ticinfo",
"and",
"len",
"(",
"ticinfo",
"[",
"'data'",
"]",
")",
">",
"0",
"and",
"isinstance",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
",",
"dict",
")",
")",
":",
"objectinfo",
"[",
"'ticid'",
"]",
"=",
"str",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'ID'",
"]",
")",
"objectinfo",
"[",
"'tessmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Tmag'",
"]",
"objectinfo",
"[",
"'tic_version'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'version'",
"]",
")",
"objectinfo",
"[",
"'tic_distarcsec'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'dstArcSec'",
"]",
")",
"objectinfo",
"[",
"'tessmag_origin'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'TESSflag'",
"]",
")",
"objectinfo",
"[",
"'tic_starprop_origin'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'SPFlag'",
"]",
")",
"objectinfo",
"[",
"'tic_lumclass'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'lumclass'",
"]",
")",
"objectinfo",
"[",
"'tic_teff'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Teff'",
"]",
")",
"objectinfo",
"[",
"'tic_teff_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Teff'",
"]",
")",
"objectinfo",
"[",
"'tic_logg'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'logg'",
"]",
")",
"objectinfo",
"[",
"'tic_logg_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_logg'",
"]",
")",
"objectinfo",
"[",
"'tic_mh'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'MH'",
"]",
")",
"objectinfo",
"[",
"'tic_mh_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_MH'",
"]",
")",
"objectinfo",
"[",
"'tic_radius'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'rad'",
"]",
")",
"objectinfo",
"[",
"'tic_radius_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_rad'",
"]",
")",
"objectinfo",
"[",
"'tic_mass'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'mass'",
"]",
")",
"objectinfo",
"[",
"'tic_mass_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_mass'",
"]",
")",
"objectinfo",
"[",
"'tic_density'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'rho'",
"]",
")",
"objectinfo",
"[",
"'tic_density_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_rho'",
"]",
")",
"objectinfo",
"[",
"'tic_luminosity'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'lum'",
"]",
")",
"objectinfo",
"[",
"'tic_luminosity_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_lum'",
"]",
")",
"objectinfo",
"[",
"'tic_distancepc'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'d'",
"]",
")",
"objectinfo",
"[",
"'tic_distancepc_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_d'",
"]",
")",
"#",
"# fill in any missing info using the TIC entry",
"#",
"if",
"(",
"'gaiaid'",
"not",
"in",
"objectinfo",
"or",
"(",
"'gaiaid'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'gaiaid'",
"]",
"is",
"None",
")",
")",
")",
":",
"objectinfo",
"[",
"'gaiaid'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'GAIA'",
"]",
"if",
"(",
"'gaiamag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'gaiamag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'gaiamag'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'GAIAmag'",
"]",
")",
"objectinfo",
"[",
"'gaiamag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_GAIAmag'",
"]",
")",
"if",
"(",
"'gaia_parallax'",
"not",
"in",
"objectinfo",
"or",
"(",
"'gaia_parallax'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'plx'",
"]",
")",
"objectinfo",
"[",
"'gaia_parallax_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_plx'",
"]",
")",
"if",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
")",
"and",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
")",
")",
":",
"objectinfo",
"[",
"'gaia_absmag'",
"]",
"=",
"(",
"magnitudes",
".",
"absolute_gaia_magnitude",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
",",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
")",
")",
"if",
"(",
"'pmra'",
"not",
"in",
"objectinfo",
"or",
"(",
"'pmra'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'pmra'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'pmRA'",
"]",
"objectinfo",
"[",
"'pmra_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_pmRA'",
"]",
")",
"objectinfo",
"[",
"'pmra_source'",
"]",
"=",
"'TIC'",
"if",
"(",
"'pmdecl'",
"not",
"in",
"objectinfo",
"or",
"(",
"'pmdecl'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'pmdecl'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'pmDEC'",
"]",
"objectinfo",
"[",
"'pmdecl_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_pmDEC'",
"]",
")",
"objectinfo",
"[",
"'pmdecl_source'",
"]",
"=",
"'TIC'",
"if",
"(",
"'bmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'bmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'bmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'bmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'bmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Bmag'",
"]",
"objectinfo",
"[",
"'bmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Bmag'",
"]",
")",
"if",
"(",
"'vmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'vmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'vmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'vmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'vmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Vmag'",
"]",
"objectinfo",
"[",
"'vmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Vmag'",
"]",
")",
"if",
"(",
"'sdssu'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssu'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssu'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssu'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssu'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'umag'",
"]",
"objectinfo",
"[",
"'sdssu_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_umag'",
"]",
")",
"if",
"(",
"'sdssg'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssg'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssg'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssg'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssg'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'gmag'",
"]",
"objectinfo",
"[",
"'sdssg_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_gmag'",
"]",
")",
"if",
"(",
"'sdssr'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssr'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssr'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssr'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssr'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'rmag'",
"]",
"objectinfo",
"[",
"'sdssr_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_rmag'",
"]",
")",
"if",
"(",
"'sdssi'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssi'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssi'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssi'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssi'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'imag'",
"]",
"objectinfo",
"[",
"'sdssi_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_imag'",
"]",
")",
"if",
"(",
"'sdssz'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssz'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssz'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssz'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssz'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'zmag'",
"]",
"objectinfo",
"[",
"'sdssz_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_zmag'",
"]",
")",
"if",
"(",
"'jmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'jmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'jmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Jmag'",
"]",
"objectinfo",
"[",
"'jmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Jmag'",
"]",
")",
"if",
"(",
"'hmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'hmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'hmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'hmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'hmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Hmag'",
"]",
"objectinfo",
"[",
"'hmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Hmag'",
"]",
")",
"if",
"(",
"'kmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'kmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'kmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'kmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'kmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Kmag'",
"]",
"objectinfo",
"[",
"'kmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Kmag'",
"]",
")",
"if",
"(",
"'wise1'",
"not",
"in",
"objectinfo",
"or",
"(",
"'wise1'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'wise1'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'wise1'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'wise1'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'w1mag'",
"]",
"objectinfo",
"[",
"'wise1_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_w1mag'",
"]",
")",
"if",
"(",
"'wise2'",
"not",
"in",
"objectinfo",
"or",
"(",
"'wise2'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'wise2'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'wise2'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'wise2'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'w2mag'",
"]",
"objectinfo",
"[",
"'wise2_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_w2mag'",
"]",
")",
"if",
"(",
"'wise3'",
"not",
"in",
"objectinfo",
"or",
"(",
"'wise3'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'wise3'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'wise3'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'wise3'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'w3mag'",
"]",
"objectinfo",
"[",
"'wise3_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_w3mag'",
"]",
")",
"if",
"(",
"'wise4'",
"not",
"in",
"objectinfo",
"or",
"(",
"'wise4'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'wise4'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'wise4'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'wise4'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'w4mag'",
"]",
"objectinfo",
"[",
"'wise4_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_w4mag'",
"]",
")",
"else",
":",
"LOGERROR",
"(",
"'could not look up TIC '",
"'information for object: %s '",
"'at (%.3f, %.3f)'",
"%",
"(",
"objectinfo",
"[",
"'objectid'",
"]",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'could not look up TIC '",
"'information for object: %s '",
"'at (%.3f, %.3f)'",
"%",
"(",
"objectinfo",
"[",
"'objectid'",
"]",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"# try to get the object's coord features",
"coordfeat",
"=",
"coord_features",
"(",
"objectinfo",
")",
"# get the color features",
"colorfeat",
"=",
"color_features",
"(",
"objectinfo",
",",
"deredden",
"=",
"deredden_object",
",",
"custom_bandpasses",
"=",
"custom_bandpasses",
",",
"dust_timeout",
"=",
"dust_timeout",
")",
"# get the object's color classification",
"colorclass",
"=",
"color_classification",
"(",
"colorfeat",
",",
"coordfeat",
")",
"# update the objectinfo dict with everything",
"objectinfo",
".",
"update",
"(",
"colorfeat",
")",
"objectinfo",
".",
"update",
"(",
"coordfeat",
")",
"objectinfo",
".",
"update",
"(",
"colorclass",
")",
"# put together the initial checkplot pickle dictionary",
"# this will be updated by the functions below as appropriate",
"# and will written out as a gzipped pickle at the end of processing",
"checkplotdict",
"=",
"{",
"'objectid'",
":",
"objectid",
",",
"'neighbors'",
":",
"neighbors",
",",
"'objectinfo'",
":",
"objectinfo",
",",
"'finderchart'",
":",
"finderb64",
",",
"'sigclip'",
":",
"sigclip",
",",
"'normto'",
":",
"normto",
",",
"'normmingap'",
":",
"normmingap",
"}",
"# add the objecttags key to objectinfo",
"checkplotdict",
"[",
"'objectinfo'",
"]",
"[",
"'objecttags'",
"]",
"=",
"None",
"# if there's no objectinfo, we can't do anything.",
"else",
":",
"# empty objectinfo dict",
"checkplotdict",
"=",
"{",
"'objectid'",
":",
"None",
",",
"'neighbors'",
":",
"None",
",",
"'objectinfo'",
":",
"{",
"'available_bands'",
":",
"[",
"]",
",",
"'available_band_labels'",
":",
"[",
"]",
",",
"'available_dereddened_bands'",
":",
"[",
"]",
",",
"'available_dereddened_band_labels'",
":",
"[",
"]",
",",
"'available_colors'",
":",
"[",
"]",
",",
"'available_color_labels'",
":",
"[",
"]",
",",
"'bmag'",
":",
"None",
",",
"'bmag-vmag'",
":",
"None",
",",
"'decl'",
":",
"None",
",",
"'hatid'",
":",
"None",
",",
"'hmag'",
":",
"None",
",",
"'imag-jmag'",
":",
"None",
",",
"'jmag-kmag'",
":",
"None",
",",
"'jmag'",
":",
"None",
",",
"'kmag'",
":",
"None",
",",
"'ndet'",
":",
"None",
",",
"'network'",
":",
"None",
",",
"'objecttags'",
":",
"None",
",",
"'pmdecl'",
":",
"None",
",",
"'pmdecl_err'",
":",
"None",
",",
"'pmra'",
":",
"None",
",",
"'pmra_err'",
":",
"None",
",",
"'propermotion'",
":",
"None",
",",
"'ra'",
":",
"None",
",",
"'rpmj'",
":",
"None",
",",
"'sdssg'",
":",
"None",
",",
"'sdssi'",
":",
"None",
",",
"'sdssr'",
":",
"None",
",",
"'stations'",
":",
"None",
",",
"'twomassid'",
":",
"None",
",",
"'ucac4id'",
":",
"None",
",",
"'vmag'",
":",
"None",
"}",
",",
"'finderchart'",
":",
"None",
",",
"'sigclip'",
":",
"sigclip",
",",
"'normto'",
":",
"normto",
",",
"'normmingap'",
":",
"normmingap",
"}",
"# end of objectinfo processing",
"# add the varinfo dict",
"if",
"isinstance",
"(",
"varinfo",
",",
"dict",
")",
":",
"checkplotdict",
"[",
"'varinfo'",
"]",
"=",
"varinfo",
"else",
":",
"checkplotdict",
"[",
"'varinfo'",
"]",
"=",
"{",
"'objectisvar'",
":",
"None",
",",
"'vartags'",
":",
"None",
",",
"'varisperiodic'",
":",
"None",
",",
"'varperiod'",
":",
"None",
",",
"'varepoch'",
":",
"None",
",",
"}",
"return",
"checkplotdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_pkl_periodogram
|
This returns the periodogram plot PNG as base64, plus info as a dict.
Parameters
----------
lspinfo : dict
This is an lspinfo dict containing results from a period-finding
function. If it's from an astrobase period-finding function in
periodbase, this will already be in the correct format. To use external
period-finder results with this function, the `lspinfo` dict must be of
the following form, with at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 5 elements each,
e.g. describing the five 'best' (highest power) peaks in the
periodogram.
plotdpi : int
The resolution in DPI of the output periodogram plot to make.
override_pfmethod : str or None
This is used to set a custom label for this periodogram
method. Normally, this is taken from the 'method' key in the input
`lspinfo` dict, but if you want to override the output method name,
provide this as a string here. This can be useful if you have multiple
results you want to incorporate into a checkplotdict from a single
period-finder (e.g. if you ran BLS over several period ranges
separately).
Returns
-------
dict
Returns a dict that contains the following items::
{methodname: {'periods':the period array from lspinfo,
'lspval': the periodogram power array from lspinfo,
'bestperiod': the best period from lspinfo,
'nbestperiods': the 'nbestperiods' list from lspinfo,
'nbestlspvals': the 'nbestlspvals' list from lspinfo,
'periodogram': base64 encoded string representation of
the periodogram plot}}
The dict is returned in this format so it can be directly incorporated
under the period-finder's label `methodname` in a checkplotdict, using
Python's dict `update()` method.
|
astrobase/checkplot/pkl_utils.py
|
def _pkl_periodogram(lspinfo,
plotdpi=100,
override_pfmethod=None):
'''This returns the periodogram plot PNG as base64, plus info as a dict.
Parameters
----------
lspinfo : dict
This is an lspinfo dict containing results from a period-finding
function. If it's from an astrobase period-finding function in
periodbase, this will already be in the correct format. To use external
period-finder results with this function, the `lspinfo` dict must be of
the following form, with at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 5 elements each,
e.g. describing the five 'best' (highest power) peaks in the
periodogram.
plotdpi : int
The resolution in DPI of the output periodogram plot to make.
override_pfmethod : str or None
This is used to set a custom label for this periodogram
method. Normally, this is taken from the 'method' key in the input
`lspinfo` dict, but if you want to override the output method name,
provide this as a string here. This can be useful if you have multiple
results you want to incorporate into a checkplotdict from a single
period-finder (e.g. if you ran BLS over several period ranges
separately).
Returns
-------
dict
Returns a dict that contains the following items::
{methodname: {'periods':the period array from lspinfo,
'lspval': the periodogram power array from lspinfo,
'bestperiod': the best period from lspinfo,
'nbestperiods': the 'nbestperiods' list from lspinfo,
'nbestlspvals': the 'nbestlspvals' list from lspinfo,
'periodogram': base64 encoded string representation of
the periodogram plot}}
The dict is returned in this format so it can be directly incorporated
under the period-finder's label `methodname` in a checkplotdict, using
Python's dict `update()` method.
'''
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# open the figure instance
pgramfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
# make the plot
plt.plot(periods,lspvals)
plt.xscale('log',basex=10)
plt.xlabel('Period [days]')
plt.ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']],
bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for xbestperiod, xbestpeak in zip(nbestperiods,
nbestlspvals):
plt.annotate('%.6f' % xbestperiod,
xy=(xbestperiod, xbestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='14.0')
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# this is the output instance
pgrampng = StrIO()
pgramfig.savefig(pgrampng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
pgrampng.seek(0)
pgramb64 = base64.b64encode(pgrampng.read())
# close the stringio buffer
pgrampng.close()
if not override_pfmethod:
# this is the dict to return
checkplotdict = {
lspinfo['method']:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
else:
# this is the dict to return
checkplotdict = {
override_pfmethod:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
return checkplotdict
|
def _pkl_periodogram(lspinfo,
plotdpi=100,
override_pfmethod=None):
'''This returns the periodogram plot PNG as base64, plus info as a dict.
Parameters
----------
lspinfo : dict
This is an lspinfo dict containing results from a period-finding
function. If it's from an astrobase period-finding function in
periodbase, this will already be in the correct format. To use external
period-finder results with this function, the `lspinfo` dict must be of
the following form, with at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 5 elements each,
e.g. describing the five 'best' (highest power) peaks in the
periodogram.
plotdpi : int
The resolution in DPI of the output periodogram plot to make.
override_pfmethod : str or None
This is used to set a custom label for this periodogram
method. Normally, this is taken from the 'method' key in the input
`lspinfo` dict, but if you want to override the output method name,
provide this as a string here. This can be useful if you have multiple
results you want to incorporate into a checkplotdict from a single
period-finder (e.g. if you ran BLS over several period ranges
separately).
Returns
-------
dict
Returns a dict that contains the following items::
{methodname: {'periods':the period array from lspinfo,
'lspval': the periodogram power array from lspinfo,
'bestperiod': the best period from lspinfo,
'nbestperiods': the 'nbestperiods' list from lspinfo,
'nbestlspvals': the 'nbestlspvals' list from lspinfo,
'periodogram': base64 encoded string representation of
the periodogram plot}}
The dict is returned in this format so it can be directly incorporated
under the period-finder's label `methodname` in a checkplotdict, using
Python's dict `update()` method.
'''
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# open the figure instance
pgramfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
# make the plot
plt.plot(periods,lspvals)
plt.xscale('log',basex=10)
plt.xlabel('Period [days]')
plt.ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']],
bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for xbestperiod, xbestpeak in zip(nbestperiods,
nbestlspvals):
plt.annotate('%.6f' % xbestperiod,
xy=(xbestperiod, xbestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='14.0')
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# this is the output instance
pgrampng = StrIO()
pgramfig.savefig(pgrampng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
pgrampng.seek(0)
pgramb64 = base64.b64encode(pgrampng.read())
# close the stringio buffer
pgrampng.close()
if not override_pfmethod:
# this is the dict to return
checkplotdict = {
lspinfo['method']:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
else:
# this is the dict to return
checkplotdict = {
override_pfmethod:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
return checkplotdict
|
[
"This",
"returns",
"the",
"periodogram",
"plot",
"PNG",
"as",
"base64",
"plus",
"info",
"as",
"a",
"dict",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl_utils.py#L1207-L1355
|
[
"def",
"_pkl_periodogram",
"(",
"lspinfo",
",",
"plotdpi",
"=",
"100",
",",
"override_pfmethod",
"=",
"None",
")",
":",
"# get the appropriate plot ylabel",
"pgramylabel",
"=",
"PLOTYLABELS",
"[",
"lspinfo",
"[",
"'method'",
"]",
"]",
"# get the periods and lspvals from lspinfo",
"periods",
"=",
"lspinfo",
"[",
"'periods'",
"]",
"lspvals",
"=",
"lspinfo",
"[",
"'lspvals'",
"]",
"bestperiod",
"=",
"lspinfo",
"[",
"'bestperiod'",
"]",
"nbestperiods",
"=",
"lspinfo",
"[",
"'nbestperiods'",
"]",
"nbestlspvals",
"=",
"lspinfo",
"[",
"'nbestlspvals'",
"]",
"# open the figure instance",
"pgramfig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"7.5",
",",
"4.8",
")",
",",
"dpi",
"=",
"plotdpi",
")",
"# make the plot",
"plt",
".",
"plot",
"(",
"periods",
",",
"lspvals",
")",
"plt",
".",
"xscale",
"(",
"'log'",
",",
"basex",
"=",
"10",
")",
"plt",
".",
"xlabel",
"(",
"'Period [days]'",
")",
"plt",
".",
"ylabel",
"(",
"pgramylabel",
")",
"plottitle",
"=",
"'%s - %.6f d'",
"%",
"(",
"METHODLABELS",
"[",
"lspinfo",
"[",
"'method'",
"]",
"]",
",",
"bestperiod",
")",
"plt",
".",
"title",
"(",
"plottitle",
")",
"# show the best five peaks on the plot",
"for",
"xbestperiod",
",",
"xbestpeak",
"in",
"zip",
"(",
"nbestperiods",
",",
"nbestlspvals",
")",
":",
"plt",
".",
"annotate",
"(",
"'%.6f'",
"%",
"xbestperiod",
",",
"xy",
"=",
"(",
"xbestperiod",
",",
"xbestpeak",
")",
",",
"xycoords",
"=",
"'data'",
",",
"xytext",
"=",
"(",
"0.0",
",",
"25.0",
")",
",",
"textcoords",
"=",
"'offset points'",
",",
"arrowprops",
"=",
"dict",
"(",
"arrowstyle",
"=",
"\"->\"",
")",
",",
"fontsize",
"=",
"'14.0'",
")",
"# make a grid",
"plt",
".",
"grid",
"(",
"color",
"=",
"'#a9a9a9'",
",",
"alpha",
"=",
"0.9",
",",
"zorder",
"=",
"0",
",",
"linewidth",
"=",
"1.0",
",",
"linestyle",
"=",
"':'",
")",
"# this is the output instance",
"pgrampng",
"=",
"StrIO",
"(",
")",
"pgramfig",
".",
"savefig",
"(",
"pgrampng",
",",
"# bbox_inches='tight',",
"pad_inches",
"=",
"0.0",
",",
"format",
"=",
"'png'",
")",
"plt",
".",
"close",
"(",
")",
"# encode the finderpng instance to base64",
"pgrampng",
".",
"seek",
"(",
"0",
")",
"pgramb64",
"=",
"base64",
".",
"b64encode",
"(",
"pgrampng",
".",
"read",
"(",
")",
")",
"# close the stringio buffer",
"pgrampng",
".",
"close",
"(",
")",
"if",
"not",
"override_pfmethod",
":",
"# this is the dict to return",
"checkplotdict",
"=",
"{",
"lspinfo",
"[",
"'method'",
"]",
":",
"{",
"'periods'",
":",
"periods",
",",
"'lspvals'",
":",
"lspvals",
",",
"'bestperiod'",
":",
"bestperiod",
",",
"'nbestperiods'",
":",
"nbestperiods",
",",
"'nbestlspvals'",
":",
"nbestlspvals",
",",
"'periodogram'",
":",
"pgramb64",
",",
"}",
"}",
"else",
":",
"# this is the dict to return",
"checkplotdict",
"=",
"{",
"override_pfmethod",
":",
"{",
"'periods'",
":",
"periods",
",",
"'lspvals'",
":",
"lspvals",
",",
"'bestperiod'",
":",
"bestperiod",
",",
"'nbestperiods'",
":",
"nbestperiods",
",",
"'nbestlspvals'",
":",
"nbestlspvals",
",",
"'periodogram'",
":",
"pgramb64",
",",
"}",
"}",
"return",
"checkplotdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_pkl_magseries_plot
|
This returns the magseries plot PNG as base64, plus arrays as dict.
Parameters
----------
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
plotdpi : int
The resolution of the plot to make in DPI.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
Returns
-------
dict
A dict of the following form is returned::
{'magseries': {'plot': base64 encoded str representation of the
magnitude/flux time-series plot,
'times': the `stimes` array,
'mags': the `smags` array,
'errs': the 'serrs' array}}
The dict is returned in this format so it can be directly incorporated
in a checkplotdict, using Python's dict `update()` method.
|
astrobase/checkplot/pkl_utils.py
|
def _pkl_magseries_plot(stimes, smags, serrs,
plotdpi=100,
magsarefluxes=False):
'''This returns the magseries plot PNG as base64, plus arrays as dict.
Parameters
----------
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
plotdpi : int
The resolution of the plot to make in DPI.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
Returns
-------
dict
A dict of the following form is returned::
{'magseries': {'plot': base64 encoded str representation of the
magnitude/flux time-series plot,
'times': the `stimes` array,
'mags': the `smags` array,
'errs': the 'serrs' array}}
The dict is returned in this format so it can be directly incorporated
in a checkplotdict, using Python's dict `update()` method.
'''
scaledplottime = stimes - npmin(stimes)
# open the figure instance
magseriesfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
plt.plot(scaledplottime,
smags,
marker='o',
ms=2.0, ls='None',mew=0,
color='green',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = plt.ylim()
plt.ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
plt.xlim((npmin(scaledplottime)-2.0,
npmax(scaledplottime)+2.0))
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'JD - %.3f' % npmin(stimes)
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
# this is the output instance
magseriespng = StrIO()
magseriesfig.savefig(magseriespng,
# bbox_inches='tight',
pad_inches=0.05, format='png')
plt.close()
# encode the finderpng instance to base64
magseriespng.seek(0)
magseriesb64 = base64.b64encode(magseriespng.read())
# close the stringio buffer
magseriespng.close()
checkplotdict = {
'magseries':{
'plot':magseriesb64,
'times':stimes,
'mags':smags,
'errs':serrs
}
}
return checkplotdict
|
def _pkl_magseries_plot(stimes, smags, serrs,
plotdpi=100,
magsarefluxes=False):
'''This returns the magseries plot PNG as base64, plus arrays as dict.
Parameters
----------
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
plotdpi : int
The resolution of the plot to make in DPI.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
Returns
-------
dict
A dict of the following form is returned::
{'magseries': {'plot': base64 encoded str representation of the
magnitude/flux time-series plot,
'times': the `stimes` array,
'mags': the `smags` array,
'errs': the 'serrs' array}}
The dict is returned in this format so it can be directly incorporated
in a checkplotdict, using Python's dict `update()` method.
'''
scaledplottime = stimes - npmin(stimes)
# open the figure instance
magseriesfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
plt.plot(scaledplottime,
smags,
marker='o',
ms=2.0, ls='None',mew=0,
color='green',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = plt.ylim()
plt.ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
plt.xlim((npmin(scaledplottime)-2.0,
npmax(scaledplottime)+2.0))
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'JD - %.3f' % npmin(stimes)
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
# this is the output instance
magseriespng = StrIO()
magseriesfig.savefig(magseriespng,
# bbox_inches='tight',
pad_inches=0.05, format='png')
plt.close()
# encode the finderpng instance to base64
magseriespng.seek(0)
magseriesb64 = base64.b64encode(magseriespng.read())
# close the stringio buffer
magseriespng.close()
checkplotdict = {
'magseries':{
'plot':magseriesb64,
'times':stimes,
'mags':smags,
'errs':serrs
}
}
return checkplotdict
|
[
"This",
"returns",
"the",
"magseries",
"plot",
"PNG",
"as",
"base64",
"plus",
"arrays",
"as",
"dict",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl_utils.py#L1359-L1462
|
[
"def",
"_pkl_magseries_plot",
"(",
"stimes",
",",
"smags",
",",
"serrs",
",",
"plotdpi",
"=",
"100",
",",
"magsarefluxes",
"=",
"False",
")",
":",
"scaledplottime",
"=",
"stimes",
"-",
"npmin",
"(",
"stimes",
")",
"# open the figure instance",
"magseriesfig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"7.5",
",",
"4.8",
")",
",",
"dpi",
"=",
"plotdpi",
")",
"plt",
".",
"plot",
"(",
"scaledplottime",
",",
"smags",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"2.0",
",",
"ls",
"=",
"'None'",
",",
"mew",
"=",
"0",
",",
"color",
"=",
"'green'",
",",
"rasterized",
"=",
"True",
")",
"# flip y axis for mags",
"if",
"not",
"magsarefluxes",
":",
"plot_ylim",
"=",
"plt",
".",
"ylim",
"(",
")",
"plt",
".",
"ylim",
"(",
"(",
"plot_ylim",
"[",
"1",
"]",
",",
"plot_ylim",
"[",
"0",
"]",
")",
")",
"# set the x axis limit",
"plt",
".",
"xlim",
"(",
"(",
"npmin",
"(",
"scaledplottime",
")",
"-",
"2.0",
",",
"npmax",
"(",
"scaledplottime",
")",
"+",
"2.0",
")",
")",
"# make a grid",
"plt",
".",
"grid",
"(",
"color",
"=",
"'#a9a9a9'",
",",
"alpha",
"=",
"0.9",
",",
"zorder",
"=",
"0",
",",
"linewidth",
"=",
"1.0",
",",
"linestyle",
"=",
"':'",
")",
"# make the x and y axis labels",
"plot_xlabel",
"=",
"'JD - %.3f'",
"%",
"npmin",
"(",
"stimes",
")",
"if",
"magsarefluxes",
":",
"plot_ylabel",
"=",
"'flux'",
"else",
":",
"plot_ylabel",
"=",
"'magnitude'",
"plt",
".",
"xlabel",
"(",
"plot_xlabel",
")",
"plt",
".",
"ylabel",
"(",
"plot_ylabel",
")",
"# fix the yaxis ticks (turns off offset and uses the full",
"# value of the yaxis tick)",
"plt",
".",
"gca",
"(",
")",
".",
"get_yaxis",
"(",
")",
".",
"get_major_formatter",
"(",
")",
".",
"set_useOffset",
"(",
"False",
")",
"plt",
".",
"gca",
"(",
")",
".",
"get_xaxis",
"(",
")",
".",
"get_major_formatter",
"(",
")",
".",
"set_useOffset",
"(",
"False",
")",
"# this is the output instance",
"magseriespng",
"=",
"StrIO",
"(",
")",
"magseriesfig",
".",
"savefig",
"(",
"magseriespng",
",",
"# bbox_inches='tight',",
"pad_inches",
"=",
"0.05",
",",
"format",
"=",
"'png'",
")",
"plt",
".",
"close",
"(",
")",
"# encode the finderpng instance to base64",
"magseriespng",
".",
"seek",
"(",
"0",
")",
"magseriesb64",
"=",
"base64",
".",
"b64encode",
"(",
"magseriespng",
".",
"read",
"(",
")",
")",
"# close the stringio buffer",
"magseriespng",
".",
"close",
"(",
")",
"checkplotdict",
"=",
"{",
"'magseries'",
":",
"{",
"'plot'",
":",
"magseriesb64",
",",
"'times'",
":",
"stimes",
",",
"'mags'",
":",
"smags",
",",
"'errs'",
":",
"serrs",
"}",
"}",
"return",
"checkplotdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_pkl_phased_magseries_plot
|
This returns the phased magseries plot PNG as base64 plus info as a dict.
Parameters
----------
checkplotdict : dict
This is an existing checkplotdict to update. If it's None or
`directreturn` = True, then the generated dict result for this magseries
plot will be returned directly.
lspmethod : str
lspmethod is a string indicating the type of period-finding algorithm
that produced the period. If this is not in
`astrobase.plotbase.METHODSHORTLABELS`, it will be used verbatim. In
most cases, this will come directly from the lspinfo dict produced by a
period-finder function.
periodind : int
This is the index of the current periodogram period being operated
on::
If == 0 -> best period and `bestperiodhighlight` is applied if not
None
If > 0 -> some other peak of the periodogram
If == -1 -> special mode w/ no periodogram labels and enabled
highlight
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
varperiod : float or None
The period to use for this phased light curve plot tile.
varepoch : 'min' or float or list of lists or None
The epoch to use for this phased light curve plot tile. If this is a
float, will use the provided value directly. If this is 'min', will
automatically figure out the time-of-minimum of the phased light
curve. If this is None, will use the mimimum value of `stimes` as the
epoch of the phased light curve plot. If this is a list of lists, will
use the provided value of `lspmethodind` to look up the current
period-finder method and the provided value of `periodind` to look up
the epoch associated with that method and the current period. This is
mostly only useful when `twolspmode` is True.
phasewrap : bool
If this is True, the phased time-series will be wrapped around
phase 0.0.
phasesort : bool
If True, will sort the phased light curve in order of increasing phase.
phasebin: float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase. If this is a float, a
phase-binned version of the phased light curve will be overplotted on
top of the regular phased light curve.
minbinelems : int
The minimum number of elements required per phase bin to include it in
the phased LC plot.
plotxlim : sequence of two floats or None
The x-range (min, max) of the phased light curve plot. If None, will be
determined automatically.
plotdpi : int
The resolution of the output plot PNGs in dots per inch.
bestperiodhighlight : str or None
If not None, this is a str with a matplotlib color specification to use
as the background color to highlight the phased light curve plot of the
'best' period and epoch combination. If None, no highlight will be
applied.
xgridlines : list of floats or None
If this is provided, must be a list of floats corresponding to the phase
values where to draw vertical dashed lines as a means of highlighting
these.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
directreturn : bool
If this set to True, will return only the dict corresponding to the
phased LC plot for the input `periodind` and `lspmethod` and not return
this result embedded in a checkplotdict.
overplotfit : dict
If this is provided, it must be a dict of the form returned by one of
the astrobase.lcfit.fit_XXXXX_magseries functions. This can be used to
overplot a light curve model fit on top of the phased light curve plot
returned by this function. The `overplotfit` dict has the following
form, including at least the keys listed here::
{'fittype':str: name of fit method,
'fitchisq':float: the chi-squared value of the fit,
'fitredchisq':float: the reduced chi-squared value of the fit,
'fitinfo':{'fitmags':array: model mags or fluxes from fit function},
'magseries':{'times':array: times where the fitmags are evaluated}}
`fitmags` and `times` should all be of the same size. The input
`overplotfit` dict is copied over to the checkplotdict for each specific
phased LC plot to save all of this information for use later.
verbose : bool
If True, will indicate progress and warn about problems.
override_pfmethod : str or None
This is used to set a custom label for the periodogram method. Normally,
this is taken from the 'method' key in the input `lspinfo` dict, but if
you want to override the output method name, provide this as a string
here. This can be useful if you have multiple results you want to
incorporate into a checkplotdict from a single period-finder (e.g. if
you ran BLS over several period ranges separately).
Returns
-------
dict
Returns a dict of the following form::
{lspmethod: {'plot': the phased LC plot as base64 str,
'period': the period used for this phased LC,
'epoch': the epoch used for this phased LC,
'phase': phase value array,
'phasedmags': mags/fluxes sorted in phase order,
'binphase': array of binned phase values,
'binphasedmags': mags/fluxes sorted in binphase order,
'phasewrap': value of the input `phasewrap` kwarg,
'phasesort': value of the input `phasesort` kwarg,
'phasebin': value of the input `phasebin` kwarg,
'minbinelems': value of the input `minbinelems` kwarg,
'plotxlim': value of the input `plotxlim` kwarg,
'lcfit': the provided `overplotfit` dict}}
The dict is in this form because we can use Python dicts' `update()`
method to update an existing checkplotdict. If `returndirect` is True,
only the inner dict is returned.
|
astrobase/checkplot/pkl_utils.py
|
def _pkl_phased_magseries_plot(
checkplotdict,
lspmethod,
periodind,
stimes, smags, serrs,
varperiod, varepoch,
lspmethodind=0,
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=(-0.8,0.8),
plotdpi=100,
bestperiodhighlight=None,
xgridlines=None,
xliminsetmode=False,
magsarefluxes=False,
directreturn=False,
overplotfit=None,
verbose=True,
override_pfmethod=None
):
'''This returns the phased magseries plot PNG as base64 plus info as a dict.
Parameters
----------
checkplotdict : dict
This is an existing checkplotdict to update. If it's None or
`directreturn` = True, then the generated dict result for this magseries
plot will be returned directly.
lspmethod : str
lspmethod is a string indicating the type of period-finding algorithm
that produced the period. If this is not in
`astrobase.plotbase.METHODSHORTLABELS`, it will be used verbatim. In
most cases, this will come directly from the lspinfo dict produced by a
period-finder function.
periodind : int
This is the index of the current periodogram period being operated
on::
If == 0 -> best period and `bestperiodhighlight` is applied if not
None
If > 0 -> some other peak of the periodogram
If == -1 -> special mode w/ no periodogram labels and enabled
highlight
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
varperiod : float or None
The period to use for this phased light curve plot tile.
varepoch : 'min' or float or list of lists or None
The epoch to use for this phased light curve plot tile. If this is a
float, will use the provided value directly. If this is 'min', will
automatically figure out the time-of-minimum of the phased light
curve. If this is None, will use the mimimum value of `stimes` as the
epoch of the phased light curve plot. If this is a list of lists, will
use the provided value of `lspmethodind` to look up the current
period-finder method and the provided value of `periodind` to look up
the epoch associated with that method and the current period. This is
mostly only useful when `twolspmode` is True.
phasewrap : bool
If this is True, the phased time-series will be wrapped around
phase 0.0.
phasesort : bool
If True, will sort the phased light curve in order of increasing phase.
phasebin: float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase. If this is a float, a
phase-binned version of the phased light curve will be overplotted on
top of the regular phased light curve.
minbinelems : int
The minimum number of elements required per phase bin to include it in
the phased LC plot.
plotxlim : sequence of two floats or None
The x-range (min, max) of the phased light curve plot. If None, will be
determined automatically.
plotdpi : int
The resolution of the output plot PNGs in dots per inch.
bestperiodhighlight : str or None
If not None, this is a str with a matplotlib color specification to use
as the background color to highlight the phased light curve plot of the
'best' period and epoch combination. If None, no highlight will be
applied.
xgridlines : list of floats or None
If this is provided, must be a list of floats corresponding to the phase
values where to draw vertical dashed lines as a means of highlighting
these.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
directreturn : bool
If this set to True, will return only the dict corresponding to the
phased LC plot for the input `periodind` and `lspmethod` and not return
this result embedded in a checkplotdict.
overplotfit : dict
If this is provided, it must be a dict of the form returned by one of
the astrobase.lcfit.fit_XXXXX_magseries functions. This can be used to
overplot a light curve model fit on top of the phased light curve plot
returned by this function. The `overplotfit` dict has the following
form, including at least the keys listed here::
{'fittype':str: name of fit method,
'fitchisq':float: the chi-squared value of the fit,
'fitredchisq':float: the reduced chi-squared value of the fit,
'fitinfo':{'fitmags':array: model mags or fluxes from fit function},
'magseries':{'times':array: times where the fitmags are evaluated}}
`fitmags` and `times` should all be of the same size. The input
`overplotfit` dict is copied over to the checkplotdict for each specific
phased LC plot to save all of this information for use later.
verbose : bool
If True, will indicate progress and warn about problems.
override_pfmethod : str or None
This is used to set a custom label for the periodogram method. Normally,
this is taken from the 'method' key in the input `lspinfo` dict, but if
you want to override the output method name, provide this as a string
here. This can be useful if you have multiple results you want to
incorporate into a checkplotdict from a single period-finder (e.g. if
you ran BLS over several period ranges separately).
Returns
-------
dict
Returns a dict of the following form::
{lspmethod: {'plot': the phased LC plot as base64 str,
'period': the period used for this phased LC,
'epoch': the epoch used for this phased LC,
'phase': phase value array,
'phasedmags': mags/fluxes sorted in phase order,
'binphase': array of binned phase values,
'binphasedmags': mags/fluxes sorted in binphase order,
'phasewrap': value of the input `phasewrap` kwarg,
'phasesort': value of the input `phasesort` kwarg,
'phasebin': value of the input `phasebin` kwarg,
'minbinelems': value of the input `minbinelems` kwarg,
'plotxlim': value of the input `plotxlim` kwarg,
'lcfit': the provided `overplotfit` dict}}
The dict is in this form because we can use Python dicts' `update()`
method to update an existing checkplotdict. If `returndirect` is True,
only the inner dict is returned.
'''
# open the figure instance
phasedseriesfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
plotvarepoch = None
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
plotvarepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
magsarefluxes=magsarefluxes,
sigclip=None,
verbose=verbose)
plotvarepoch = spfit['fitinfo']['fitepoch']
if len(plotvarepoch) != 1:
plotvarepoch = plotvarepoch[0]
except Exception as e:
LOGERROR('spline fit failed, trying SavGol fit')
sgfit = savgol_fit_magseries(stimes,
smags,
serrs,
varperiod,
sigclip=None,
magsarefluxes=magsarefluxes,
verbose=verbose)
plotvarepoch = sgfit['fitinfo']['fitepoch']
if len(plotvarepoch) != 1:
plotvarepoch = plotvarepoch[0]
finally:
if plotvarepoch is None:
LOGERROR('could not find a min epoch time, '
'using min(times) as the epoch for '
'the phase-folded LC')
plotvarepoch = npmin(stimes)
# special case with varepoch lists per each period-finder method
elif isinstance(varepoch, list):
try:
thisvarepochlist = varepoch[lspmethodind]
plotvarepoch = thisvarepochlist[periodind]
except Exception as e:
LOGEXCEPTION(
"varepoch provided in list form either doesn't match "
"the length of nbestperiods from the period-finder "
"result, or something else went wrong. using min(times) "
"as the epoch instead"
)
plotvarepoch = npmin(stimes)
# the final case is to use the provided varepoch directly
else:
plotvarepoch = varepoch
if verbose:
LOGINFO('plotting %s phased LC with period %s: %.6f, epoch: %.5f' %
(lspmethod, periodind, varperiod, plotvarepoch))
# make the plot title based on the lspmethod
if periodind == 0:
plottitle = '%s best period: %.6f d - epoch: %.5f' % (
(METHODSHORTLABELS[lspmethod] if lspmethod in METHODSHORTLABELS
else lspmethod),
varperiod,
plotvarepoch
)
elif periodind > 0:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
(METHODSHORTLABELS[lspmethod] if lspmethod in METHODSHORTLABELS
else lspmethod),
periodind+1,
varperiod,
plotvarepoch
)
elif periodind == -1:
plottitle = '%s period: %.6f d - epoch: %.5f' % (
lspmethod,
varperiod,
plotvarepoch
)
# phase the magseries
phasedlc = phase_magseries(stimes,
smags,
varperiod,
plotvarepoch,
wrap=phasewrap,
sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries(plotphase,
plotmags,
binsize=phasebin,
minbinelems=minbinelems)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
else:
binplotphase = None
binplotmags = None
# finally, make the phased LC plot
plt.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
plt.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# if we're making a overplotfit, then plot the fit over the other stuff
if overplotfit and isinstance(overplotfit, dict):
fitmethod = overplotfit['fittype']
fitredchisq = overplotfit['fitredchisq']
plotfitmags = overplotfit['fitinfo']['fitmags']
plotfittimes = overplotfit['magseries']['times']
# phase the fit magseries
fitphasedlc = phase_magseries(plotfittimes,
plotfitmags,
varperiod,
plotvarepoch,
wrap=phasewrap,
sort=phasesort)
plotfitphase = fitphasedlc['phase']
plotfitmags = fitphasedlc['mags']
plotfitlabel = (r'%s fit ${\chi}^2/{\mathrm{dof}} = %.3f$' %
(fitmethod, fitredchisq))
# plot the fit phase and mags
plt.plot(plotfitphase, plotfitmags,'k-',
linewidth=3, rasterized=True,label=plotfitlabel)
plt.legend(loc='upper left', frameon=False)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = plt.ylim()
plt.ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
if not plotxlim:
plt.xlim((npmin(plotphase)-0.1,
npmax(plotphase)+0.1))
else:
plt.xlim((plotxlim[0],plotxlim[1]))
# make a grid
ax = plt.gca()
if isinstance(xgridlines, (list, tuple)):
ax.set_xticks(xgridlines, minor=False)
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'phase'
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
# set the plot title
plt.title(plottitle)
# make sure the best period phased LC plot stands out
if (periodind == 0 or periodind == -1) and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plt.gca().set_facecolor(bestperiodhighlight)
else:
plt.gca().set_axis_bgcolor(bestperiodhighlight)
# if we're making an inset plot showing the full range
if (plotxlim and isinstance(plotxlim, (list, tuple)) and
len(plotxlim) == 2 and xliminsetmode is True):
# bump the ylim of the plot so that the inset can fit in this axes plot
axesylim = plt.gca().get_ylim()
if magsarefluxes:
plt.gca().set_ylim(
axesylim[0],
axesylim[1] + 0.5*npabs(axesylim[1]-axesylim[0])
)
else:
plt.gca().set_ylim(
axesylim[0],
axesylim[1] - 0.5*npabs(axesylim[1]-axesylim[0])
)
# put the inset axes in
inset = inset_axes(plt.gca(), width="40%", height="40%", loc=1)
# make the scatter plot for the phased LC plot
inset.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
if phasebin:
# make the scatter plot for the phased LC plot
inset.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# show the full phase coverage
if phasewrap:
inset.set_xlim(-0.2,0.8)
else:
inset.set_xlim(-0.1,1.1)
# flip y axis for mags
if not magsarefluxes:
inset_ylim = inset.get_ylim()
inset.set_ylim((inset_ylim[1], inset_ylim[0]))
# set the plot title
inset.text(0.5,0.9,'full phased light curve',
ha='center',va='center',transform=inset.transAxes)
# don't show axes labels or ticks
inset.set_xticks([])
inset.set_yticks([])
# this is the output instance
phasedseriespng = StrIO()
phasedseriesfig.savefig(phasedseriespng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
phasedseriespng.seek(0)
phasedseriesb64 = base64.b64encode(phasedseriespng.read())
# close the stringio buffer
phasedseriespng.close()
# this includes a fitinfo dict if one is provided in overplotfit
retdict = {
'plot':phasedseriesb64,
'period':varperiod,
'epoch':plotvarepoch,
'phase':plotphase,
'phasedmags':plotmags,
'binphase':binplotphase,
'binphasedmags':binplotmags,
'phasewrap':phasewrap,
'phasesort':phasesort,
'phasebin':phasebin,
'minbinelems':minbinelems,
'plotxlim':plotxlim,
'lcfit':overplotfit,
}
# if we're returning stuff directly, i.e. not being used embedded within
# the checkplot_dict function
if directreturn or checkplotdict is None:
return retdict
# this requires the checkplotdict to be present already, we'll just update
# it at the appropriate lspmethod and periodind
else:
if override_pfmethod:
checkplotdict[override_pfmethod][periodind] = retdict
else:
checkplotdict[lspmethod][periodind] = retdict
return checkplotdict
|
def _pkl_phased_magseries_plot(
checkplotdict,
lspmethod,
periodind,
stimes, smags, serrs,
varperiod, varepoch,
lspmethodind=0,
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=(-0.8,0.8),
plotdpi=100,
bestperiodhighlight=None,
xgridlines=None,
xliminsetmode=False,
magsarefluxes=False,
directreturn=False,
overplotfit=None,
verbose=True,
override_pfmethod=None
):
'''This returns the phased magseries plot PNG as base64 plus info as a dict.
Parameters
----------
checkplotdict : dict
This is an existing checkplotdict to update. If it's None or
`directreturn` = True, then the generated dict result for this magseries
plot will be returned directly.
lspmethod : str
lspmethod is a string indicating the type of period-finding algorithm
that produced the period. If this is not in
`astrobase.plotbase.METHODSHORTLABELS`, it will be used verbatim. In
most cases, this will come directly from the lspinfo dict produced by a
period-finder function.
periodind : int
This is the index of the current periodogram period being operated
on::
If == 0 -> best period and `bestperiodhighlight` is applied if not
None
If > 0 -> some other peak of the periodogram
If == -1 -> special mode w/ no periodogram labels and enabled
highlight
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
varperiod : float or None
The period to use for this phased light curve plot tile.
varepoch : 'min' or float or list of lists or None
The epoch to use for this phased light curve plot tile. If this is a
float, will use the provided value directly. If this is 'min', will
automatically figure out the time-of-minimum of the phased light
curve. If this is None, will use the mimimum value of `stimes` as the
epoch of the phased light curve plot. If this is a list of lists, will
use the provided value of `lspmethodind` to look up the current
period-finder method and the provided value of `periodind` to look up
the epoch associated with that method and the current period. This is
mostly only useful when `twolspmode` is True.
phasewrap : bool
If this is True, the phased time-series will be wrapped around
phase 0.0.
phasesort : bool
If True, will sort the phased light curve in order of increasing phase.
phasebin: float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase. If this is a float, a
phase-binned version of the phased light curve will be overplotted on
top of the regular phased light curve.
minbinelems : int
The minimum number of elements required per phase bin to include it in
the phased LC plot.
plotxlim : sequence of two floats or None
The x-range (min, max) of the phased light curve plot. If None, will be
determined automatically.
plotdpi : int
The resolution of the output plot PNGs in dots per inch.
bestperiodhighlight : str or None
If not None, this is a str with a matplotlib color specification to use
as the background color to highlight the phased light curve plot of the
'best' period and epoch combination. If None, no highlight will be
applied.
xgridlines : list of floats or None
If this is provided, must be a list of floats corresponding to the phase
values where to draw vertical dashed lines as a means of highlighting
these.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
directreturn : bool
If this set to True, will return only the dict corresponding to the
phased LC plot for the input `periodind` and `lspmethod` and not return
this result embedded in a checkplotdict.
overplotfit : dict
If this is provided, it must be a dict of the form returned by one of
the astrobase.lcfit.fit_XXXXX_magseries functions. This can be used to
overplot a light curve model fit on top of the phased light curve plot
returned by this function. The `overplotfit` dict has the following
form, including at least the keys listed here::
{'fittype':str: name of fit method,
'fitchisq':float: the chi-squared value of the fit,
'fitredchisq':float: the reduced chi-squared value of the fit,
'fitinfo':{'fitmags':array: model mags or fluxes from fit function},
'magseries':{'times':array: times where the fitmags are evaluated}}
`fitmags` and `times` should all be of the same size. The input
`overplotfit` dict is copied over to the checkplotdict for each specific
phased LC plot to save all of this information for use later.
verbose : bool
If True, will indicate progress and warn about problems.
override_pfmethod : str or None
This is used to set a custom label for the periodogram method. Normally,
this is taken from the 'method' key in the input `lspinfo` dict, but if
you want to override the output method name, provide this as a string
here. This can be useful if you have multiple results you want to
incorporate into a checkplotdict from a single period-finder (e.g. if
you ran BLS over several period ranges separately).
Returns
-------
dict
Returns a dict of the following form::
{lspmethod: {'plot': the phased LC plot as base64 str,
'period': the period used for this phased LC,
'epoch': the epoch used for this phased LC,
'phase': phase value array,
'phasedmags': mags/fluxes sorted in phase order,
'binphase': array of binned phase values,
'binphasedmags': mags/fluxes sorted in binphase order,
'phasewrap': value of the input `phasewrap` kwarg,
'phasesort': value of the input `phasesort` kwarg,
'phasebin': value of the input `phasebin` kwarg,
'minbinelems': value of the input `minbinelems` kwarg,
'plotxlim': value of the input `plotxlim` kwarg,
'lcfit': the provided `overplotfit` dict}}
The dict is in this form because we can use Python dicts' `update()`
method to update an existing checkplotdict. If `returndirect` is True,
only the inner dict is returned.
'''
# open the figure instance
phasedseriesfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
plotvarepoch = None
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
plotvarepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
magsarefluxes=magsarefluxes,
sigclip=None,
verbose=verbose)
plotvarepoch = spfit['fitinfo']['fitepoch']
if len(plotvarepoch) != 1:
plotvarepoch = plotvarepoch[0]
except Exception as e:
LOGERROR('spline fit failed, trying SavGol fit')
sgfit = savgol_fit_magseries(stimes,
smags,
serrs,
varperiod,
sigclip=None,
magsarefluxes=magsarefluxes,
verbose=verbose)
plotvarepoch = sgfit['fitinfo']['fitepoch']
if len(plotvarepoch) != 1:
plotvarepoch = plotvarepoch[0]
finally:
if plotvarepoch is None:
LOGERROR('could not find a min epoch time, '
'using min(times) as the epoch for '
'the phase-folded LC')
plotvarepoch = npmin(stimes)
# special case with varepoch lists per each period-finder method
elif isinstance(varepoch, list):
try:
thisvarepochlist = varepoch[lspmethodind]
plotvarepoch = thisvarepochlist[periodind]
except Exception as e:
LOGEXCEPTION(
"varepoch provided in list form either doesn't match "
"the length of nbestperiods from the period-finder "
"result, or something else went wrong. using min(times) "
"as the epoch instead"
)
plotvarepoch = npmin(stimes)
# the final case is to use the provided varepoch directly
else:
plotvarepoch = varepoch
if verbose:
LOGINFO('plotting %s phased LC with period %s: %.6f, epoch: %.5f' %
(lspmethod, periodind, varperiod, plotvarepoch))
# make the plot title based on the lspmethod
if periodind == 0:
plottitle = '%s best period: %.6f d - epoch: %.5f' % (
(METHODSHORTLABELS[lspmethod] if lspmethod in METHODSHORTLABELS
else lspmethod),
varperiod,
plotvarepoch
)
elif periodind > 0:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
(METHODSHORTLABELS[lspmethod] if lspmethod in METHODSHORTLABELS
else lspmethod),
periodind+1,
varperiod,
plotvarepoch
)
elif periodind == -1:
plottitle = '%s period: %.6f d - epoch: %.5f' % (
lspmethod,
varperiod,
plotvarepoch
)
# phase the magseries
phasedlc = phase_magseries(stimes,
smags,
varperiod,
plotvarepoch,
wrap=phasewrap,
sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries(plotphase,
plotmags,
binsize=phasebin,
minbinelems=minbinelems)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
else:
binplotphase = None
binplotmags = None
# finally, make the phased LC plot
plt.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
plt.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# if we're making a overplotfit, then plot the fit over the other stuff
if overplotfit and isinstance(overplotfit, dict):
fitmethod = overplotfit['fittype']
fitredchisq = overplotfit['fitredchisq']
plotfitmags = overplotfit['fitinfo']['fitmags']
plotfittimes = overplotfit['magseries']['times']
# phase the fit magseries
fitphasedlc = phase_magseries(plotfittimes,
plotfitmags,
varperiod,
plotvarepoch,
wrap=phasewrap,
sort=phasesort)
plotfitphase = fitphasedlc['phase']
plotfitmags = fitphasedlc['mags']
plotfitlabel = (r'%s fit ${\chi}^2/{\mathrm{dof}} = %.3f$' %
(fitmethod, fitredchisq))
# plot the fit phase and mags
plt.plot(plotfitphase, plotfitmags,'k-',
linewidth=3, rasterized=True,label=plotfitlabel)
plt.legend(loc='upper left', frameon=False)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = plt.ylim()
plt.ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
if not plotxlim:
plt.xlim((npmin(plotphase)-0.1,
npmax(plotphase)+0.1))
else:
plt.xlim((plotxlim[0],plotxlim[1]))
# make a grid
ax = plt.gca()
if isinstance(xgridlines, (list, tuple)):
ax.set_xticks(xgridlines, minor=False)
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'phase'
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
# set the plot title
plt.title(plottitle)
# make sure the best period phased LC plot stands out
if (periodind == 0 or periodind == -1) and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plt.gca().set_facecolor(bestperiodhighlight)
else:
plt.gca().set_axis_bgcolor(bestperiodhighlight)
# if we're making an inset plot showing the full range
if (plotxlim and isinstance(plotxlim, (list, tuple)) and
len(plotxlim) == 2 and xliminsetmode is True):
# bump the ylim of the plot so that the inset can fit in this axes plot
axesylim = plt.gca().get_ylim()
if magsarefluxes:
plt.gca().set_ylim(
axesylim[0],
axesylim[1] + 0.5*npabs(axesylim[1]-axesylim[0])
)
else:
plt.gca().set_ylim(
axesylim[0],
axesylim[1] - 0.5*npabs(axesylim[1]-axesylim[0])
)
# put the inset axes in
inset = inset_axes(plt.gca(), width="40%", height="40%", loc=1)
# make the scatter plot for the phased LC plot
inset.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
if phasebin:
# make the scatter plot for the phased LC plot
inset.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# show the full phase coverage
if phasewrap:
inset.set_xlim(-0.2,0.8)
else:
inset.set_xlim(-0.1,1.1)
# flip y axis for mags
if not magsarefluxes:
inset_ylim = inset.get_ylim()
inset.set_ylim((inset_ylim[1], inset_ylim[0]))
# set the plot title
inset.text(0.5,0.9,'full phased light curve',
ha='center',va='center',transform=inset.transAxes)
# don't show axes labels or ticks
inset.set_xticks([])
inset.set_yticks([])
# this is the output instance
phasedseriespng = StrIO()
phasedseriesfig.savefig(phasedseriespng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
phasedseriespng.seek(0)
phasedseriesb64 = base64.b64encode(phasedseriespng.read())
# close the stringio buffer
phasedseriespng.close()
# this includes a fitinfo dict if one is provided in overplotfit
retdict = {
'plot':phasedseriesb64,
'period':varperiod,
'epoch':plotvarepoch,
'phase':plotphase,
'phasedmags':plotmags,
'binphase':binplotphase,
'binphasedmags':binplotmags,
'phasewrap':phasewrap,
'phasesort':phasesort,
'phasebin':phasebin,
'minbinelems':minbinelems,
'plotxlim':plotxlim,
'lcfit':overplotfit,
}
# if we're returning stuff directly, i.e. not being used embedded within
# the checkplot_dict function
if directreturn or checkplotdict is None:
return retdict
# this requires the checkplotdict to be present already, we'll just update
# it at the appropriate lspmethod and periodind
else:
if override_pfmethod:
checkplotdict[override_pfmethod][periodind] = retdict
else:
checkplotdict[lspmethod][periodind] = retdict
return checkplotdict
|
[
"This",
"returns",
"the",
"phased",
"magseries",
"plot",
"PNG",
"as",
"base64",
"plus",
"info",
"as",
"a",
"dict",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl_utils.py#L1466-L1961
|
[
"def",
"_pkl_phased_magseries_plot",
"(",
"checkplotdict",
",",
"lspmethod",
",",
"periodind",
",",
"stimes",
",",
"smags",
",",
"serrs",
",",
"varperiod",
",",
"varepoch",
",",
"lspmethodind",
"=",
"0",
",",
"phasewrap",
"=",
"True",
",",
"phasesort",
"=",
"True",
",",
"phasebin",
"=",
"0.002",
",",
"minbinelems",
"=",
"7",
",",
"plotxlim",
"=",
"(",
"-",
"0.8",
",",
"0.8",
")",
",",
"plotdpi",
"=",
"100",
",",
"bestperiodhighlight",
"=",
"None",
",",
"xgridlines",
"=",
"None",
",",
"xliminsetmode",
"=",
"False",
",",
"magsarefluxes",
"=",
"False",
",",
"directreturn",
"=",
"False",
",",
"overplotfit",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"override_pfmethod",
"=",
"None",
")",
":",
"# open the figure instance",
"phasedseriesfig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"7.5",
",",
"4.8",
")",
",",
"dpi",
"=",
"plotdpi",
")",
"plotvarepoch",
"=",
"None",
"# figure out the epoch, if it's None, use the min of the time",
"if",
"varepoch",
"is",
"None",
":",
"plotvarepoch",
"=",
"npmin",
"(",
"stimes",
")",
"# if the varepoch is 'min', then fit a spline to the light curve",
"# phased using the min of the time, find the fit mag minimum and use",
"# the time for that as the varepoch",
"elif",
"isinstance",
"(",
"varepoch",
",",
"str",
")",
"and",
"varepoch",
"==",
"'min'",
":",
"try",
":",
"spfit",
"=",
"spline_fit_magseries",
"(",
"stimes",
",",
"smags",
",",
"serrs",
",",
"varperiod",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"sigclip",
"=",
"None",
",",
"verbose",
"=",
"verbose",
")",
"plotvarepoch",
"=",
"spfit",
"[",
"'fitinfo'",
"]",
"[",
"'fitepoch'",
"]",
"if",
"len",
"(",
"plotvarepoch",
")",
"!=",
"1",
":",
"plotvarepoch",
"=",
"plotvarepoch",
"[",
"0",
"]",
"except",
"Exception",
"as",
"e",
":",
"LOGERROR",
"(",
"'spline fit failed, trying SavGol fit'",
")",
"sgfit",
"=",
"savgol_fit_magseries",
"(",
"stimes",
",",
"smags",
",",
"serrs",
",",
"varperiod",
",",
"sigclip",
"=",
"None",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"verbose",
"=",
"verbose",
")",
"plotvarepoch",
"=",
"sgfit",
"[",
"'fitinfo'",
"]",
"[",
"'fitepoch'",
"]",
"if",
"len",
"(",
"plotvarepoch",
")",
"!=",
"1",
":",
"plotvarepoch",
"=",
"plotvarepoch",
"[",
"0",
"]",
"finally",
":",
"if",
"plotvarepoch",
"is",
"None",
":",
"LOGERROR",
"(",
"'could not find a min epoch time, '",
"'using min(times) as the epoch for '",
"'the phase-folded LC'",
")",
"plotvarepoch",
"=",
"npmin",
"(",
"stimes",
")",
"# special case with varepoch lists per each period-finder method",
"elif",
"isinstance",
"(",
"varepoch",
",",
"list",
")",
":",
"try",
":",
"thisvarepochlist",
"=",
"varepoch",
"[",
"lspmethodind",
"]",
"plotvarepoch",
"=",
"thisvarepochlist",
"[",
"periodind",
"]",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"\"varepoch provided in list form either doesn't match \"",
"\"the length of nbestperiods from the period-finder \"",
"\"result, or something else went wrong. using min(times) \"",
"\"as the epoch instead\"",
")",
"plotvarepoch",
"=",
"npmin",
"(",
"stimes",
")",
"# the final case is to use the provided varepoch directly",
"else",
":",
"plotvarepoch",
"=",
"varepoch",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'plotting %s phased LC with period %s: %.6f, epoch: %.5f'",
"%",
"(",
"lspmethod",
",",
"periodind",
",",
"varperiod",
",",
"plotvarepoch",
")",
")",
"# make the plot title based on the lspmethod",
"if",
"periodind",
"==",
"0",
":",
"plottitle",
"=",
"'%s best period: %.6f d - epoch: %.5f'",
"%",
"(",
"(",
"METHODSHORTLABELS",
"[",
"lspmethod",
"]",
"if",
"lspmethod",
"in",
"METHODSHORTLABELS",
"else",
"lspmethod",
")",
",",
"varperiod",
",",
"plotvarepoch",
")",
"elif",
"periodind",
">",
"0",
":",
"plottitle",
"=",
"'%s peak %s: %.6f d - epoch: %.5f'",
"%",
"(",
"(",
"METHODSHORTLABELS",
"[",
"lspmethod",
"]",
"if",
"lspmethod",
"in",
"METHODSHORTLABELS",
"else",
"lspmethod",
")",
",",
"periodind",
"+",
"1",
",",
"varperiod",
",",
"plotvarepoch",
")",
"elif",
"periodind",
"==",
"-",
"1",
":",
"plottitle",
"=",
"'%s period: %.6f d - epoch: %.5f'",
"%",
"(",
"lspmethod",
",",
"varperiod",
",",
"plotvarepoch",
")",
"# phase the magseries",
"phasedlc",
"=",
"phase_magseries",
"(",
"stimes",
",",
"smags",
",",
"varperiod",
",",
"plotvarepoch",
",",
"wrap",
"=",
"phasewrap",
",",
"sort",
"=",
"phasesort",
")",
"plotphase",
"=",
"phasedlc",
"[",
"'phase'",
"]",
"plotmags",
"=",
"phasedlc",
"[",
"'mags'",
"]",
"# if we're supposed to bin the phases, do so",
"if",
"phasebin",
":",
"binphasedlc",
"=",
"phase_bin_magseries",
"(",
"plotphase",
",",
"plotmags",
",",
"binsize",
"=",
"phasebin",
",",
"minbinelems",
"=",
"minbinelems",
")",
"binplotphase",
"=",
"binphasedlc",
"[",
"'binnedphases'",
"]",
"binplotmags",
"=",
"binphasedlc",
"[",
"'binnedmags'",
"]",
"else",
":",
"binplotphase",
"=",
"None",
"binplotmags",
"=",
"None",
"# finally, make the phased LC plot",
"plt",
".",
"plot",
"(",
"plotphase",
",",
"plotmags",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"2.0",
",",
"ls",
"=",
"'None'",
",",
"mew",
"=",
"0",
",",
"color",
"=",
"'gray'",
",",
"rasterized",
"=",
"True",
")",
"# overlay the binned phased LC plot if we're making one",
"if",
"phasebin",
":",
"plt",
".",
"plot",
"(",
"binplotphase",
",",
"binplotmags",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"4.0",
",",
"ls",
"=",
"'None'",
",",
"mew",
"=",
"0",
",",
"color",
"=",
"'#1c1e57'",
",",
"rasterized",
"=",
"True",
")",
"# if we're making a overplotfit, then plot the fit over the other stuff",
"if",
"overplotfit",
"and",
"isinstance",
"(",
"overplotfit",
",",
"dict",
")",
":",
"fitmethod",
"=",
"overplotfit",
"[",
"'fittype'",
"]",
"fitredchisq",
"=",
"overplotfit",
"[",
"'fitredchisq'",
"]",
"plotfitmags",
"=",
"overplotfit",
"[",
"'fitinfo'",
"]",
"[",
"'fitmags'",
"]",
"plotfittimes",
"=",
"overplotfit",
"[",
"'magseries'",
"]",
"[",
"'times'",
"]",
"# phase the fit magseries",
"fitphasedlc",
"=",
"phase_magseries",
"(",
"plotfittimes",
",",
"plotfitmags",
",",
"varperiod",
",",
"plotvarepoch",
",",
"wrap",
"=",
"phasewrap",
",",
"sort",
"=",
"phasesort",
")",
"plotfitphase",
"=",
"fitphasedlc",
"[",
"'phase'",
"]",
"plotfitmags",
"=",
"fitphasedlc",
"[",
"'mags'",
"]",
"plotfitlabel",
"=",
"(",
"r'%s fit ${\\chi}^2/{\\mathrm{dof}} = %.3f$'",
"%",
"(",
"fitmethod",
",",
"fitredchisq",
")",
")",
"# plot the fit phase and mags",
"plt",
".",
"plot",
"(",
"plotfitphase",
",",
"plotfitmags",
",",
"'k-'",
",",
"linewidth",
"=",
"3",
",",
"rasterized",
"=",
"True",
",",
"label",
"=",
"plotfitlabel",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"'upper left'",
",",
"frameon",
"=",
"False",
")",
"# flip y axis for mags",
"if",
"not",
"magsarefluxes",
":",
"plot_ylim",
"=",
"plt",
".",
"ylim",
"(",
")",
"plt",
".",
"ylim",
"(",
"(",
"plot_ylim",
"[",
"1",
"]",
",",
"plot_ylim",
"[",
"0",
"]",
")",
")",
"# set the x axis limit",
"if",
"not",
"plotxlim",
":",
"plt",
".",
"xlim",
"(",
"(",
"npmin",
"(",
"plotphase",
")",
"-",
"0.1",
",",
"npmax",
"(",
"plotphase",
")",
"+",
"0.1",
")",
")",
"else",
":",
"plt",
".",
"xlim",
"(",
"(",
"plotxlim",
"[",
"0",
"]",
",",
"plotxlim",
"[",
"1",
"]",
")",
")",
"# make a grid",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"isinstance",
"(",
"xgridlines",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"ax",
".",
"set_xticks",
"(",
"xgridlines",
",",
"minor",
"=",
"False",
")",
"plt",
".",
"grid",
"(",
"color",
"=",
"'#a9a9a9'",
",",
"alpha",
"=",
"0.9",
",",
"zorder",
"=",
"0",
",",
"linewidth",
"=",
"1.0",
",",
"linestyle",
"=",
"':'",
")",
"# make the x and y axis labels",
"plot_xlabel",
"=",
"'phase'",
"if",
"magsarefluxes",
":",
"plot_ylabel",
"=",
"'flux'",
"else",
":",
"plot_ylabel",
"=",
"'magnitude'",
"plt",
".",
"xlabel",
"(",
"plot_xlabel",
")",
"plt",
".",
"ylabel",
"(",
"plot_ylabel",
")",
"# fix the yaxis ticks (turns off offset and uses the full",
"# value of the yaxis tick)",
"plt",
".",
"gca",
"(",
")",
".",
"get_yaxis",
"(",
")",
".",
"get_major_formatter",
"(",
")",
".",
"set_useOffset",
"(",
"False",
")",
"plt",
".",
"gca",
"(",
")",
".",
"get_xaxis",
"(",
")",
".",
"get_major_formatter",
"(",
")",
".",
"set_useOffset",
"(",
"False",
")",
"# set the plot title",
"plt",
".",
"title",
"(",
"plottitle",
")",
"# make sure the best period phased LC plot stands out",
"if",
"(",
"periodind",
"==",
"0",
"or",
"periodind",
"==",
"-",
"1",
")",
"and",
"bestperiodhighlight",
":",
"if",
"MPLVERSION",
">=",
"(",
"2",
",",
"0",
",",
"0",
")",
":",
"plt",
".",
"gca",
"(",
")",
".",
"set_facecolor",
"(",
"bestperiodhighlight",
")",
"else",
":",
"plt",
".",
"gca",
"(",
")",
".",
"set_axis_bgcolor",
"(",
"bestperiodhighlight",
")",
"# if we're making an inset plot showing the full range",
"if",
"(",
"plotxlim",
"and",
"isinstance",
"(",
"plotxlim",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"plotxlim",
")",
"==",
"2",
"and",
"xliminsetmode",
"is",
"True",
")",
":",
"# bump the ylim of the plot so that the inset can fit in this axes plot",
"axesylim",
"=",
"plt",
".",
"gca",
"(",
")",
".",
"get_ylim",
"(",
")",
"if",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"set_ylim",
"(",
"axesylim",
"[",
"0",
"]",
",",
"axesylim",
"[",
"1",
"]",
"+",
"0.5",
"*",
"npabs",
"(",
"axesylim",
"[",
"1",
"]",
"-",
"axesylim",
"[",
"0",
"]",
")",
")",
"else",
":",
"plt",
".",
"gca",
"(",
")",
".",
"set_ylim",
"(",
"axesylim",
"[",
"0",
"]",
",",
"axesylim",
"[",
"1",
"]",
"-",
"0.5",
"*",
"npabs",
"(",
"axesylim",
"[",
"1",
"]",
"-",
"axesylim",
"[",
"0",
"]",
")",
")",
"# put the inset axes in",
"inset",
"=",
"inset_axes",
"(",
"plt",
".",
"gca",
"(",
")",
",",
"width",
"=",
"\"40%\"",
",",
"height",
"=",
"\"40%\"",
",",
"loc",
"=",
"1",
")",
"# make the scatter plot for the phased LC plot",
"inset",
".",
"plot",
"(",
"plotphase",
",",
"plotmags",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"2.0",
",",
"ls",
"=",
"'None'",
",",
"mew",
"=",
"0",
",",
"color",
"=",
"'gray'",
",",
"rasterized",
"=",
"True",
")",
"if",
"phasebin",
":",
"# make the scatter plot for the phased LC plot",
"inset",
".",
"plot",
"(",
"binplotphase",
",",
"binplotmags",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"4.0",
",",
"ls",
"=",
"'None'",
",",
"mew",
"=",
"0",
",",
"color",
"=",
"'#1c1e57'",
",",
"rasterized",
"=",
"True",
")",
"# show the full phase coverage",
"if",
"phasewrap",
":",
"inset",
".",
"set_xlim",
"(",
"-",
"0.2",
",",
"0.8",
")",
"else",
":",
"inset",
".",
"set_xlim",
"(",
"-",
"0.1",
",",
"1.1",
")",
"# flip y axis for mags",
"if",
"not",
"magsarefluxes",
":",
"inset_ylim",
"=",
"inset",
".",
"get_ylim",
"(",
")",
"inset",
".",
"set_ylim",
"(",
"(",
"inset_ylim",
"[",
"1",
"]",
",",
"inset_ylim",
"[",
"0",
"]",
")",
")",
"# set the plot title",
"inset",
".",
"text",
"(",
"0.5",
",",
"0.9",
",",
"'full phased light curve'",
",",
"ha",
"=",
"'center'",
",",
"va",
"=",
"'center'",
",",
"transform",
"=",
"inset",
".",
"transAxes",
")",
"# don't show axes labels or ticks",
"inset",
".",
"set_xticks",
"(",
"[",
"]",
")",
"inset",
".",
"set_yticks",
"(",
"[",
"]",
")",
"# this is the output instance",
"phasedseriespng",
"=",
"StrIO",
"(",
")",
"phasedseriesfig",
".",
"savefig",
"(",
"phasedseriespng",
",",
"# bbox_inches='tight',",
"pad_inches",
"=",
"0.0",
",",
"format",
"=",
"'png'",
")",
"plt",
".",
"close",
"(",
")",
"# encode the finderpng instance to base64",
"phasedseriespng",
".",
"seek",
"(",
"0",
")",
"phasedseriesb64",
"=",
"base64",
".",
"b64encode",
"(",
"phasedseriespng",
".",
"read",
"(",
")",
")",
"# close the stringio buffer",
"phasedseriespng",
".",
"close",
"(",
")",
"# this includes a fitinfo dict if one is provided in overplotfit",
"retdict",
"=",
"{",
"'plot'",
":",
"phasedseriesb64",
",",
"'period'",
":",
"varperiod",
",",
"'epoch'",
":",
"plotvarepoch",
",",
"'phase'",
":",
"plotphase",
",",
"'phasedmags'",
":",
"plotmags",
",",
"'binphase'",
":",
"binplotphase",
",",
"'binphasedmags'",
":",
"binplotmags",
",",
"'phasewrap'",
":",
"phasewrap",
",",
"'phasesort'",
":",
"phasesort",
",",
"'phasebin'",
":",
"phasebin",
",",
"'minbinelems'",
":",
"minbinelems",
",",
"'plotxlim'",
":",
"plotxlim",
",",
"'lcfit'",
":",
"overplotfit",
",",
"}",
"# if we're returning stuff directly, i.e. not being used embedded within",
"# the checkplot_dict function",
"if",
"directreturn",
"or",
"checkplotdict",
"is",
"None",
":",
"return",
"retdict",
"# this requires the checkplotdict to be present already, we'll just update",
"# it at the appropriate lspmethod and periodind",
"else",
":",
"if",
"override_pfmethod",
":",
"checkplotdict",
"[",
"override_pfmethod",
"]",
"[",
"periodind",
"]",
"=",
"retdict",
"else",
":",
"checkplotdict",
"[",
"lspmethod",
"]",
"[",
"periodind",
"]",
"=",
"retdict",
"return",
"checkplotdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
prewhiten_magseries
|
Removes a periodic sinusoidal signal generated using whitenparams from
the input magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to prewhiten.
whitenperiod : float
The period of the sinusoidal signal to remove.
whitenparams : list of floats
This contains the Fourier amplitude and phase coefficients of the
sinusoidal signal to remove::
[ampl_1, ampl_2, ampl_3, ..., ampl_X,
pha_1, pha_2, pha_3, ..., pha_X]
where `X` is the Fourier order. These are usually the output of a
previous Fourier fit to the light curve (from
:py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for
example).
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot showing the effect
of the pre-whitening on the mag/flux time-series and write the plot to
the path specified here.
plotfitphasedlconly : bool
If True, will plot only the phased LC for showing the effect of
pre-whitening, and skip plotting the unphased LC.
rescaletomedian : bool
If this is True, then we add back the constant median term of the
magnitudes to the final pre-whitened mag series.
Returns
-------
dict
Returns a dict of the form::
{'wtimes':times array after pre-whitening,
'wphase':phase array after pre-whitening,
'wmags':mags array after pre-whitening,
'werrs':errs array after pre-whitening,
'whitenparams':the input pre-whitening params used,
'whitenperiod':the input pre-whitening period used,
'fitplotfile':the output plot file if plotfit was set}
|
astrobase/varbase/signals.py
|
def prewhiten_magseries(times, mags, errs,
whitenperiod,
whitenparams,
sigclip=3.0,
magsarefluxes=False,
plotfit=None,
plotfitphasedlconly=True,
rescaletomedian=True):
'''Removes a periodic sinusoidal signal generated using whitenparams from
the input magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to prewhiten.
whitenperiod : float
The period of the sinusoidal signal to remove.
whitenparams : list of floats
This contains the Fourier amplitude and phase coefficients of the
sinusoidal signal to remove::
[ampl_1, ampl_2, ampl_3, ..., ampl_X,
pha_1, pha_2, pha_3, ..., pha_X]
where `X` is the Fourier order. These are usually the output of a
previous Fourier fit to the light curve (from
:py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for
example).
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot showing the effect
of the pre-whitening on the mag/flux time-series and write the plot to
the path specified here.
plotfitphasedlconly : bool
If True, will plot only the phased LC for showing the effect of
pre-whitening, and skip plotting the unphased LC.
rescaletomedian : bool
If this is True, then we add back the constant median term of the
magnitudes to the final pre-whitened mag series.
Returns
-------
dict
Returns a dict of the form::
{'wtimes':times array after pre-whitening,
'wphase':phase array after pre-whitening,
'wmags':mags array after pre-whitening,
'werrs':errs array after pre-whitening,
'whitenparams':the input pre-whitening params used,
'whitenperiod':the input pre-whitening period used,
'fitplotfile':the output plot file if plotfit was set}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
median_mag = np.median(smags)
# phase the mag series using the given period and epoch = min(stimes)
mintime = np.min(stimes)
# calculate the unsorted phase, then sort it
iphase = (
(stimes - mintime)/whitenperiod -
np.floor((stimes - mintime)/whitenperiod)
)
phasesortind = np.argsort(iphase)
# these are the final quantities to use for the Fourier fits
phase = iphase[phasesortind]
pmags = smags[phasesortind]
perrs = serrs[phasesortind]
# get the times sorted in phase order (useful to get the fit mag minimum
# with respect to phase -- the light curve minimum)
ptimes = stimes[phasesortind]
# now subtract the harmonic series from the phased LC
# these are still in phase order
wmags = pmags - _fourier_func(whitenparams, phase, pmags)
# resort everything by time order
wtimeorder = np.argsort(ptimes)
wtimes = ptimes[wtimeorder]
wphase = phase[wtimeorder]
wmags = wmags[wtimeorder]
werrs = perrs[wtimeorder]
if rescaletomedian:
wmags = wmags + median_mag
# prepare the returndict
returndict = {'wtimes':wtimes, # these are in the new time order
'wphase':wphase,
'wmags':wmags,
'werrs':werrs,
'whitenparams':whitenparams,
'whitenperiod':whitenperiod}
# make the fit plot if required
if plotfit and (isinstance(plotfit, str) or isinstance(plotfit, Strio)):
if plotfitphasedlconly:
plt.figure(figsize=(10,4.8))
else:
plt.figure(figsize=(16,9.6))
if plotfitphasedlconly:
# phased series before whitening
plt.subplot(121)
plt.plot(phase,pmags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before pre-whitening')
# phased series after whitening
plt.subplot(122)
plt.plot(wphase,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after pre-whitening')
else:
# time series before whitening
plt.subplot(221)
plt.plot(stimes,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC before pre-whitening')
# time series after whitening
plt.subplot(222)
plt.plot(wtimes,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC after pre-whitening with period: %.6f' % whitenperiod)
# phased series before whitening
plt.subplot(223)
plt.plot(phase,pmags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before pre-whitening')
# phased series after whitening
plt.subplot(224)
plt.plot(wphase,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after pre-whitening')
plt.tight_layout()
plt.savefig(plotfit, format='png', pad_inches=0.0)
plt.close()
if isinstance(plotfit, str) or isinstance(plotfit, Strio):
returndict['fitplotfile'] = plotfit
return returndict
|
def prewhiten_magseries(times, mags, errs,
whitenperiod,
whitenparams,
sigclip=3.0,
magsarefluxes=False,
plotfit=None,
plotfitphasedlconly=True,
rescaletomedian=True):
'''Removes a periodic sinusoidal signal generated using whitenparams from
the input magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to prewhiten.
whitenperiod : float
The period of the sinusoidal signal to remove.
whitenparams : list of floats
This contains the Fourier amplitude and phase coefficients of the
sinusoidal signal to remove::
[ampl_1, ampl_2, ampl_3, ..., ampl_X,
pha_1, pha_2, pha_3, ..., pha_X]
where `X` is the Fourier order. These are usually the output of a
previous Fourier fit to the light curve (from
:py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for
example).
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot showing the effect
of the pre-whitening on the mag/flux time-series and write the plot to
the path specified here.
plotfitphasedlconly : bool
If True, will plot only the phased LC for showing the effect of
pre-whitening, and skip plotting the unphased LC.
rescaletomedian : bool
If this is True, then we add back the constant median term of the
magnitudes to the final pre-whitened mag series.
Returns
-------
dict
Returns a dict of the form::
{'wtimes':times array after pre-whitening,
'wphase':phase array after pre-whitening,
'wmags':mags array after pre-whitening,
'werrs':errs array after pre-whitening,
'whitenparams':the input pre-whitening params used,
'whitenperiod':the input pre-whitening period used,
'fitplotfile':the output plot file if plotfit was set}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
median_mag = np.median(smags)
# phase the mag series using the given period and epoch = min(stimes)
mintime = np.min(stimes)
# calculate the unsorted phase, then sort it
iphase = (
(stimes - mintime)/whitenperiod -
np.floor((stimes - mintime)/whitenperiod)
)
phasesortind = np.argsort(iphase)
# these are the final quantities to use for the Fourier fits
phase = iphase[phasesortind]
pmags = smags[phasesortind]
perrs = serrs[phasesortind]
# get the times sorted in phase order (useful to get the fit mag minimum
# with respect to phase -- the light curve minimum)
ptimes = stimes[phasesortind]
# now subtract the harmonic series from the phased LC
# these are still in phase order
wmags = pmags - _fourier_func(whitenparams, phase, pmags)
# resort everything by time order
wtimeorder = np.argsort(ptimes)
wtimes = ptimes[wtimeorder]
wphase = phase[wtimeorder]
wmags = wmags[wtimeorder]
werrs = perrs[wtimeorder]
if rescaletomedian:
wmags = wmags + median_mag
# prepare the returndict
returndict = {'wtimes':wtimes, # these are in the new time order
'wphase':wphase,
'wmags':wmags,
'werrs':werrs,
'whitenparams':whitenparams,
'whitenperiod':whitenperiod}
# make the fit plot if required
if plotfit and (isinstance(plotfit, str) or isinstance(plotfit, Strio)):
if plotfitphasedlconly:
plt.figure(figsize=(10,4.8))
else:
plt.figure(figsize=(16,9.6))
if plotfitphasedlconly:
# phased series before whitening
plt.subplot(121)
plt.plot(phase,pmags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before pre-whitening')
# phased series after whitening
plt.subplot(122)
plt.plot(wphase,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after pre-whitening')
else:
# time series before whitening
plt.subplot(221)
plt.plot(stimes,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC before pre-whitening')
# time series after whitening
plt.subplot(222)
plt.plot(wtimes,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC after pre-whitening with period: %.6f' % whitenperiod)
# phased series before whitening
plt.subplot(223)
plt.plot(phase,pmags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before pre-whitening')
# phased series after whitening
plt.subplot(224)
plt.plot(wphase,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after pre-whitening')
plt.tight_layout()
plt.savefig(plotfit, format='png', pad_inches=0.0)
plt.close()
if isinstance(plotfit, str) or isinstance(plotfit, Strio):
returndict['fitplotfile'] = plotfit
return returndict
|
[
"Removes",
"a",
"periodic",
"sinusoidal",
"signal",
"generated",
"using",
"whitenparams",
"from",
"the",
"input",
"magnitude",
"time",
"series",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/signals.py#L69-L328
|
[
"def",
"prewhiten_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"whitenperiod",
",",
"whitenparams",
",",
"sigclip",
"=",
"3.0",
",",
"magsarefluxes",
"=",
"False",
",",
"plotfit",
"=",
"None",
",",
"plotfitphasedlconly",
"=",
"True",
",",
"rescaletomedian",
"=",
"True",
")",
":",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"sigclip_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"sigclip",
"=",
"sigclip",
",",
"magsarefluxes",
"=",
"magsarefluxes",
")",
"median_mag",
"=",
"np",
".",
"median",
"(",
"smags",
")",
"# phase the mag series using the given period and epoch = min(stimes)",
"mintime",
"=",
"np",
".",
"min",
"(",
"stimes",
")",
"# calculate the unsorted phase, then sort it",
"iphase",
"=",
"(",
"(",
"stimes",
"-",
"mintime",
")",
"/",
"whitenperiod",
"-",
"np",
".",
"floor",
"(",
"(",
"stimes",
"-",
"mintime",
")",
"/",
"whitenperiod",
")",
")",
"phasesortind",
"=",
"np",
".",
"argsort",
"(",
"iphase",
")",
"# these are the final quantities to use for the Fourier fits",
"phase",
"=",
"iphase",
"[",
"phasesortind",
"]",
"pmags",
"=",
"smags",
"[",
"phasesortind",
"]",
"perrs",
"=",
"serrs",
"[",
"phasesortind",
"]",
"# get the times sorted in phase order (useful to get the fit mag minimum",
"# with respect to phase -- the light curve minimum)",
"ptimes",
"=",
"stimes",
"[",
"phasesortind",
"]",
"# now subtract the harmonic series from the phased LC",
"# these are still in phase order",
"wmags",
"=",
"pmags",
"-",
"_fourier_func",
"(",
"whitenparams",
",",
"phase",
",",
"pmags",
")",
"# resort everything by time order",
"wtimeorder",
"=",
"np",
".",
"argsort",
"(",
"ptimes",
")",
"wtimes",
"=",
"ptimes",
"[",
"wtimeorder",
"]",
"wphase",
"=",
"phase",
"[",
"wtimeorder",
"]",
"wmags",
"=",
"wmags",
"[",
"wtimeorder",
"]",
"werrs",
"=",
"perrs",
"[",
"wtimeorder",
"]",
"if",
"rescaletomedian",
":",
"wmags",
"=",
"wmags",
"+",
"median_mag",
"# prepare the returndict",
"returndict",
"=",
"{",
"'wtimes'",
":",
"wtimes",
",",
"# these are in the new time order",
"'wphase'",
":",
"wphase",
",",
"'wmags'",
":",
"wmags",
",",
"'werrs'",
":",
"werrs",
",",
"'whitenparams'",
":",
"whitenparams",
",",
"'whitenperiod'",
":",
"whitenperiod",
"}",
"# make the fit plot if required",
"if",
"plotfit",
"and",
"(",
"isinstance",
"(",
"plotfit",
",",
"str",
")",
"or",
"isinstance",
"(",
"plotfit",
",",
"Strio",
")",
")",
":",
"if",
"plotfitphasedlconly",
":",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"10",
",",
"4.8",
")",
")",
"else",
":",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"16",
",",
"9.6",
")",
")",
"if",
"plotfitphasedlconly",
":",
"# phased series before whitening",
"plt",
".",
"subplot",
"(",
"121",
")",
"plt",
".",
"plot",
"(",
"phase",
",",
"pmags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'k'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC before pre-whitening'",
")",
"# phased series after whitening",
"plt",
".",
"subplot",
"(",
"122",
")",
"plt",
".",
"plot",
"(",
"wphase",
",",
"wmags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'g'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC after pre-whitening'",
")",
"else",
":",
"# time series before whitening",
"plt",
".",
"subplot",
"(",
"221",
")",
"plt",
".",
"plot",
"(",
"stimes",
",",
"smags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'k'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'JD'",
")",
"plt",
".",
"title",
"(",
"'LC before pre-whitening'",
")",
"# time series after whitening",
"plt",
".",
"subplot",
"(",
"222",
")",
"plt",
".",
"plot",
"(",
"wtimes",
",",
"wmags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'g'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'JD'",
")",
"plt",
".",
"title",
"(",
"'LC after pre-whitening with period: %.6f'",
"%",
"whitenperiod",
")",
"# phased series before whitening",
"plt",
".",
"subplot",
"(",
"223",
")",
"plt",
".",
"plot",
"(",
"phase",
",",
"pmags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'k'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC before pre-whitening'",
")",
"# phased series after whitening",
"plt",
".",
"subplot",
"(",
"224",
")",
"plt",
".",
"plot",
"(",
"wphase",
",",
"wmags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'g'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC after pre-whitening'",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"savefig",
"(",
"plotfit",
",",
"format",
"=",
"'png'",
",",
"pad_inches",
"=",
"0.0",
")",
"plt",
".",
"close",
"(",
")",
"if",
"isinstance",
"(",
"plotfit",
",",
"str",
")",
"or",
"isinstance",
"(",
"plotfit",
",",
"Strio",
")",
":",
"returndict",
"[",
"'fitplotfile'",
"]",
"=",
"plotfit",
"return",
"returndict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
gls_prewhiten
|
Iterative pre-whitening of a magnitude series using the L-S periodogram.
This finds the best period, fits a fourier series with the best period, then
whitens the time series with the best period, and repeats until `nbestpeaks`
are done.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to iteratively pre-whiten.
fourierorder : int
The Fourier order of the sinusoidal signal to fit to the time-series and
iteratively remove.
initfparams : list or None
If this is provided, should be a list of Fourier amplitudes and phases
in the following format::
[ampl_1, ampl_2, ampl_3, ..., ampl_X,
pha_1, pha_2, pha_3, ..., pha_X]
where `X` is the Fourier order. These are usually the output of a
previous Fourier fit to the light curve (from
:py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for
example). You MUST provide ONE of `fourierorder` and `initfparams`, but
not both. If both are provided or both are None, a sinusoidal signal of
Fourier order 3 will be used by default.
startp_gls, endp_gls : float or None
If these are provided, will serve as input to the Generalized
Lomb-Scargle function that will attempt to find the best `nbestpeaks`
periods in the time-series. These set the minimum and maximum period to
search for in the time-series.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
plotfits : None or str
If this is a str, should indicate the file to which a plot of the
successive iterations of pre-whitening will be written to. This will
contain a row of plots indicating the before/after states of the light
curves for each round of pre-whitening.
Returns
-------
(bestperiods, plotfile) : tuple
This returns a list of the best periods (with the "highest" peak in the
periodogram) after each round of pre-whitening is done. If plotfit is a
str, will also return the path to the generated plot file.
|
astrobase/varbase/signals.py
|
def gls_prewhiten(times, mags, errs,
fourierorder=3, # 3rd order series to start with
initfparams=None,
startp_gls=None,
endp_gls=None,
stepsize=1.0e-4,
autofreq=True,
sigclip=30.0,
magsarefluxes=False,
nbestpeaks=5,
nworkers=4,
plotfits=None):
'''Iterative pre-whitening of a magnitude series using the L-S periodogram.
This finds the best period, fits a fourier series with the best period, then
whitens the time series with the best period, and repeats until `nbestpeaks`
are done.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to iteratively pre-whiten.
fourierorder : int
The Fourier order of the sinusoidal signal to fit to the time-series and
iteratively remove.
initfparams : list or None
If this is provided, should be a list of Fourier amplitudes and phases
in the following format::
[ampl_1, ampl_2, ampl_3, ..., ampl_X,
pha_1, pha_2, pha_3, ..., pha_X]
where `X` is the Fourier order. These are usually the output of a
previous Fourier fit to the light curve (from
:py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for
example). You MUST provide ONE of `fourierorder` and `initfparams`, but
not both. If both are provided or both are None, a sinusoidal signal of
Fourier order 3 will be used by default.
startp_gls, endp_gls : float or None
If these are provided, will serve as input to the Generalized
Lomb-Scargle function that will attempt to find the best `nbestpeaks`
periods in the time-series. These set the minimum and maximum period to
search for in the time-series.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
plotfits : None or str
If this is a str, should indicate the file to which a plot of the
successive iterations of pre-whitening will be written to. This will
contain a row of plots indicating the before/after states of the light
curves for each round of pre-whitening.
Returns
-------
(bestperiods, plotfile) : tuple
This returns a list of the best periods (with the "highest" peak in the
periodogram) after each round of pre-whitening is done. If plotfit is a
str, will also return the path to the generated plot file.
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# now start the cycle by doing an GLS on the initial timeseries
gls = pgen_lsp(stimes, smags, serrs,
magsarefluxes=magsarefluxes,
startp=startp_gls,
endp=endp_gls,
autofreq=autofreq,
sigclip=sigclip,
stepsize=stepsize,
nworkers=nworkers)
LOGINFO('round %s: period = %.6f' % (0, gls['bestperiod']))
if plotfits and isinstance(plotfits, str):
plt.figure(figsize=(20,6*nbestpeaks))
nplots = nbestpeaks + 1
# periodogram
plt.subplot(nplots,3,1)
plt.plot(gls['periods'],gls['lspvals'])
plt.xlabel('period [days]')
plt.ylabel('GLS power')
plt.xscale('log')
plt.title('round 0, best period = %.6f' % gls['bestperiod'])
# unphased LC
plt.subplot(nplots,3,2)
plt.plot(stimes, smags,
linestyle='none', marker='o',ms=1.0,rasterized=True)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('flux')
plt.xlabel('JD')
plt.title('unphased LC before whitening')
# phased LC
plt.subplot(nplots,3,3)
phased = phase_magseries(stimes, smags,
gls['bestperiod'], stimes.min())
plt.plot(phased['phase'], phased['mags'],
linestyle='none', marker='o',ms=1.0,rasterized=True)
if not magsarefluxes:
plt.ylabel('magnitude')
plt.gca().invert_yaxis()
else:
plt.ylabel('flux')
plt.xlabel('phase')
plt.title('phased LC before whitening: P = %.6f' % gls['bestperiod'])
# set up the initial times, mags, errs, period
wtimes, wmags, werrs = stimes, smags, serrs
wperiod = gls['bestperiod']
# start the best periods list
bestperiods = []
# now go through the rest of the cycles
for fitind in range(nbestpeaks):
wfseries = fourier_fit_magseries(wtimes, wmags, werrs, wperiod,
fourierorder=fourierorder,
fourierparams=initfparams,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
wffitparams = wfseries['fitinfo']['finalparams']
wseries = prewhiten_magseries(wtimes, wmags, werrs,
wperiod,
wffitparams,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
LOGINFO('round %s: period = %.6f' % (fitind+1, wperiod))
bestperiods.append(wperiod)
# update the mag series with whitened version
wtimes, wmags, werrs = (
wseries['wtimes'], wseries['wmags'], wseries['werrs']
)
# redo the periodogram
wgls = pgen_lsp(wtimes, wmags, werrs,
magsarefluxes=magsarefluxes,
startp=startp_gls,
endp=endp_gls,
autofreq=autofreq,
sigclip=sigclip,
stepsize=stepsize,
nworkers=nworkers)
wperiod = wgls['bestperiod']
bestperiods.append(wperiod)
# make plots if requested
if plotfits and isinstance(plotfits, str):
# periodogram
plt.subplot(nplots,3,4+fitind*3)
plt.plot(wgls['periods'],wgls['lspvals'])
plt.xlabel('period [days]')
plt.ylabel('LSP power')
plt.xscale('log')
plt.title('round %s, best period = %.6f' % (fitind+1,
wgls['bestperiod']))
# unphased LC
plt.subplot(nplots,3,5+fitind*3)
plt.plot(wtimes, wmags,
linestyle='none', marker='o',ms=1.0,rasterized=True)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('flux')
plt.xlabel('JD')
plt.title('unphased LC after whitening')
# phased LC
plt.subplot(nplots,3,6+fitind*3)
wphased = phase_magseries(wtimes, wmags,
wperiod, stimes.min())
plt.plot(wphased['phase'], wphased['mags'],
linestyle='none', marker='o',ms=1.0,rasterized=True)
if not magsarefluxes:
plt.ylabel('magnitude')
plt.gca().invert_yaxis()
else:
plt.ylabel('flux')
plt.xlabel('phase')
plt.title('phased LC after whitening: P = %.6f' % wperiod)
# in the end, write out the plot
if plotfits and isinstance(plotfits, str):
plt.subplots_adjust(hspace=0.2,wspace=0.4)
plt.savefig(plotfits, bbox_inches='tight')
plt.close('all')
return bestperiods, os.path.abspath(plotfits)
else:
return bestperiods
|
def gls_prewhiten(times, mags, errs,
fourierorder=3, # 3rd order series to start with
initfparams=None,
startp_gls=None,
endp_gls=None,
stepsize=1.0e-4,
autofreq=True,
sigclip=30.0,
magsarefluxes=False,
nbestpeaks=5,
nworkers=4,
plotfits=None):
'''Iterative pre-whitening of a magnitude series using the L-S periodogram.
This finds the best period, fits a fourier series with the best period, then
whitens the time series with the best period, and repeats until `nbestpeaks`
are done.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to iteratively pre-whiten.
fourierorder : int
The Fourier order of the sinusoidal signal to fit to the time-series and
iteratively remove.
initfparams : list or None
If this is provided, should be a list of Fourier amplitudes and phases
in the following format::
[ampl_1, ampl_2, ampl_3, ..., ampl_X,
pha_1, pha_2, pha_3, ..., pha_X]
where `X` is the Fourier order. These are usually the output of a
previous Fourier fit to the light curve (from
:py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for
example). You MUST provide ONE of `fourierorder` and `initfparams`, but
not both. If both are provided or both are None, a sinusoidal signal of
Fourier order 3 will be used by default.
startp_gls, endp_gls : float or None
If these are provided, will serve as input to the Generalized
Lomb-Scargle function that will attempt to find the best `nbestpeaks`
periods in the time-series. These set the minimum and maximum period to
search for in the time-series.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
plotfits : None or str
If this is a str, should indicate the file to which a plot of the
successive iterations of pre-whitening will be written to. This will
contain a row of plots indicating the before/after states of the light
curves for each round of pre-whitening.
Returns
-------
(bestperiods, plotfile) : tuple
This returns a list of the best periods (with the "highest" peak in the
periodogram) after each round of pre-whitening is done. If plotfit is a
str, will also return the path to the generated plot file.
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# now start the cycle by doing an GLS on the initial timeseries
gls = pgen_lsp(stimes, smags, serrs,
magsarefluxes=magsarefluxes,
startp=startp_gls,
endp=endp_gls,
autofreq=autofreq,
sigclip=sigclip,
stepsize=stepsize,
nworkers=nworkers)
LOGINFO('round %s: period = %.6f' % (0, gls['bestperiod']))
if plotfits and isinstance(plotfits, str):
plt.figure(figsize=(20,6*nbestpeaks))
nplots = nbestpeaks + 1
# periodogram
plt.subplot(nplots,3,1)
plt.plot(gls['periods'],gls['lspvals'])
plt.xlabel('period [days]')
plt.ylabel('GLS power')
plt.xscale('log')
plt.title('round 0, best period = %.6f' % gls['bestperiod'])
# unphased LC
plt.subplot(nplots,3,2)
plt.plot(stimes, smags,
linestyle='none', marker='o',ms=1.0,rasterized=True)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('flux')
plt.xlabel('JD')
plt.title('unphased LC before whitening')
# phased LC
plt.subplot(nplots,3,3)
phased = phase_magseries(stimes, smags,
gls['bestperiod'], stimes.min())
plt.plot(phased['phase'], phased['mags'],
linestyle='none', marker='o',ms=1.0,rasterized=True)
if not magsarefluxes:
plt.ylabel('magnitude')
plt.gca().invert_yaxis()
else:
plt.ylabel('flux')
plt.xlabel('phase')
plt.title('phased LC before whitening: P = %.6f' % gls['bestperiod'])
# set up the initial times, mags, errs, period
wtimes, wmags, werrs = stimes, smags, serrs
wperiod = gls['bestperiod']
# start the best periods list
bestperiods = []
# now go through the rest of the cycles
for fitind in range(nbestpeaks):
wfseries = fourier_fit_magseries(wtimes, wmags, werrs, wperiod,
fourierorder=fourierorder,
fourierparams=initfparams,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
wffitparams = wfseries['fitinfo']['finalparams']
wseries = prewhiten_magseries(wtimes, wmags, werrs,
wperiod,
wffitparams,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
LOGINFO('round %s: period = %.6f' % (fitind+1, wperiod))
bestperiods.append(wperiod)
# update the mag series with whitened version
wtimes, wmags, werrs = (
wseries['wtimes'], wseries['wmags'], wseries['werrs']
)
# redo the periodogram
wgls = pgen_lsp(wtimes, wmags, werrs,
magsarefluxes=magsarefluxes,
startp=startp_gls,
endp=endp_gls,
autofreq=autofreq,
sigclip=sigclip,
stepsize=stepsize,
nworkers=nworkers)
wperiod = wgls['bestperiod']
bestperiods.append(wperiod)
# make plots if requested
if plotfits and isinstance(plotfits, str):
# periodogram
plt.subplot(nplots,3,4+fitind*3)
plt.plot(wgls['periods'],wgls['lspvals'])
plt.xlabel('period [days]')
plt.ylabel('LSP power')
plt.xscale('log')
plt.title('round %s, best period = %.6f' % (fitind+1,
wgls['bestperiod']))
# unphased LC
plt.subplot(nplots,3,5+fitind*3)
plt.plot(wtimes, wmags,
linestyle='none', marker='o',ms=1.0,rasterized=True)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('flux')
plt.xlabel('JD')
plt.title('unphased LC after whitening')
# phased LC
plt.subplot(nplots,3,6+fitind*3)
wphased = phase_magseries(wtimes, wmags,
wperiod, stimes.min())
plt.plot(wphased['phase'], wphased['mags'],
linestyle='none', marker='o',ms=1.0,rasterized=True)
if not magsarefluxes:
plt.ylabel('magnitude')
plt.gca().invert_yaxis()
else:
plt.ylabel('flux')
plt.xlabel('phase')
plt.title('phased LC after whitening: P = %.6f' % wperiod)
# in the end, write out the plot
if plotfits and isinstance(plotfits, str):
plt.subplots_adjust(hspace=0.2,wspace=0.4)
plt.savefig(plotfits, bbox_inches='tight')
plt.close('all')
return bestperiods, os.path.abspath(plotfits)
else:
return bestperiods
|
[
"Iterative",
"pre",
"-",
"whitening",
"of",
"a",
"magnitude",
"series",
"using",
"the",
"L",
"-",
"S",
"periodogram",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/signals.py#L332-L590
|
[
"def",
"gls_prewhiten",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"fourierorder",
"=",
"3",
",",
"# 3rd order series to start with",
"initfparams",
"=",
"None",
",",
"startp_gls",
"=",
"None",
",",
"endp_gls",
"=",
"None",
",",
"stepsize",
"=",
"1.0e-4",
",",
"autofreq",
"=",
"True",
",",
"sigclip",
"=",
"30.0",
",",
"magsarefluxes",
"=",
"False",
",",
"nbestpeaks",
"=",
"5",
",",
"nworkers",
"=",
"4",
",",
"plotfits",
"=",
"None",
")",
":",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"sigclip_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"sigclip",
"=",
"sigclip",
",",
"magsarefluxes",
"=",
"magsarefluxes",
")",
"# now start the cycle by doing an GLS on the initial timeseries",
"gls",
"=",
"pgen_lsp",
"(",
"stimes",
",",
"smags",
",",
"serrs",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"startp",
"=",
"startp_gls",
",",
"endp",
"=",
"endp_gls",
",",
"autofreq",
"=",
"autofreq",
",",
"sigclip",
"=",
"sigclip",
",",
"stepsize",
"=",
"stepsize",
",",
"nworkers",
"=",
"nworkers",
")",
"LOGINFO",
"(",
"'round %s: period = %.6f'",
"%",
"(",
"0",
",",
"gls",
"[",
"'bestperiod'",
"]",
")",
")",
"if",
"plotfits",
"and",
"isinstance",
"(",
"plotfits",
",",
"str",
")",
":",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"20",
",",
"6",
"*",
"nbestpeaks",
")",
")",
"nplots",
"=",
"nbestpeaks",
"+",
"1",
"# periodogram",
"plt",
".",
"subplot",
"(",
"nplots",
",",
"3",
",",
"1",
")",
"plt",
".",
"plot",
"(",
"gls",
"[",
"'periods'",
"]",
",",
"gls",
"[",
"'lspvals'",
"]",
")",
"plt",
".",
"xlabel",
"(",
"'period [days]'",
")",
"plt",
".",
"ylabel",
"(",
"'GLS power'",
")",
"plt",
".",
"xscale",
"(",
"'log'",
")",
"plt",
".",
"title",
"(",
"'round 0, best period = %.6f'",
"%",
"gls",
"[",
"'bestperiod'",
"]",
")",
"# unphased LC",
"plt",
".",
"subplot",
"(",
"nplots",
",",
"3",
",",
"2",
")",
"plt",
".",
"plot",
"(",
"stimes",
",",
"smags",
",",
"linestyle",
"=",
"'none'",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"1.0",
",",
"rasterized",
"=",
"True",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'flux'",
")",
"plt",
".",
"xlabel",
"(",
"'JD'",
")",
"plt",
".",
"title",
"(",
"'unphased LC before whitening'",
")",
"# phased LC",
"plt",
".",
"subplot",
"(",
"nplots",
",",
"3",
",",
"3",
")",
"phased",
"=",
"phase_magseries",
"(",
"stimes",
",",
"smags",
",",
"gls",
"[",
"'bestperiod'",
"]",
",",
"stimes",
".",
"min",
"(",
")",
")",
"plt",
".",
"plot",
"(",
"phased",
"[",
"'phase'",
"]",
",",
"phased",
"[",
"'mags'",
"]",
",",
"linestyle",
"=",
"'none'",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"1.0",
",",
"rasterized",
"=",
"True",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'flux'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC before whitening: P = %.6f'",
"%",
"gls",
"[",
"'bestperiod'",
"]",
")",
"# set up the initial times, mags, errs, period",
"wtimes",
",",
"wmags",
",",
"werrs",
"=",
"stimes",
",",
"smags",
",",
"serrs",
"wperiod",
"=",
"gls",
"[",
"'bestperiod'",
"]",
"# start the best periods list",
"bestperiods",
"=",
"[",
"]",
"# now go through the rest of the cycles",
"for",
"fitind",
"in",
"range",
"(",
"nbestpeaks",
")",
":",
"wfseries",
"=",
"fourier_fit_magseries",
"(",
"wtimes",
",",
"wmags",
",",
"werrs",
",",
"wperiod",
",",
"fourierorder",
"=",
"fourierorder",
",",
"fourierparams",
"=",
"initfparams",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"sigclip",
"=",
"sigclip",
")",
"wffitparams",
"=",
"wfseries",
"[",
"'fitinfo'",
"]",
"[",
"'finalparams'",
"]",
"wseries",
"=",
"prewhiten_magseries",
"(",
"wtimes",
",",
"wmags",
",",
"werrs",
",",
"wperiod",
",",
"wffitparams",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"sigclip",
"=",
"sigclip",
")",
"LOGINFO",
"(",
"'round %s: period = %.6f'",
"%",
"(",
"fitind",
"+",
"1",
",",
"wperiod",
")",
")",
"bestperiods",
".",
"append",
"(",
"wperiod",
")",
"# update the mag series with whitened version",
"wtimes",
",",
"wmags",
",",
"werrs",
"=",
"(",
"wseries",
"[",
"'wtimes'",
"]",
",",
"wseries",
"[",
"'wmags'",
"]",
",",
"wseries",
"[",
"'werrs'",
"]",
")",
"# redo the periodogram",
"wgls",
"=",
"pgen_lsp",
"(",
"wtimes",
",",
"wmags",
",",
"werrs",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"startp",
"=",
"startp_gls",
",",
"endp",
"=",
"endp_gls",
",",
"autofreq",
"=",
"autofreq",
",",
"sigclip",
"=",
"sigclip",
",",
"stepsize",
"=",
"stepsize",
",",
"nworkers",
"=",
"nworkers",
")",
"wperiod",
"=",
"wgls",
"[",
"'bestperiod'",
"]",
"bestperiods",
".",
"append",
"(",
"wperiod",
")",
"# make plots if requested",
"if",
"plotfits",
"and",
"isinstance",
"(",
"plotfits",
",",
"str",
")",
":",
"# periodogram",
"plt",
".",
"subplot",
"(",
"nplots",
",",
"3",
",",
"4",
"+",
"fitind",
"*",
"3",
")",
"plt",
".",
"plot",
"(",
"wgls",
"[",
"'periods'",
"]",
",",
"wgls",
"[",
"'lspvals'",
"]",
")",
"plt",
".",
"xlabel",
"(",
"'period [days]'",
")",
"plt",
".",
"ylabel",
"(",
"'LSP power'",
")",
"plt",
".",
"xscale",
"(",
"'log'",
")",
"plt",
".",
"title",
"(",
"'round %s, best period = %.6f'",
"%",
"(",
"fitind",
"+",
"1",
",",
"wgls",
"[",
"'bestperiod'",
"]",
")",
")",
"# unphased LC",
"plt",
".",
"subplot",
"(",
"nplots",
",",
"3",
",",
"5",
"+",
"fitind",
"*",
"3",
")",
"plt",
".",
"plot",
"(",
"wtimes",
",",
"wmags",
",",
"linestyle",
"=",
"'none'",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"1.0",
",",
"rasterized",
"=",
"True",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'flux'",
")",
"plt",
".",
"xlabel",
"(",
"'JD'",
")",
"plt",
".",
"title",
"(",
"'unphased LC after whitening'",
")",
"# phased LC",
"plt",
".",
"subplot",
"(",
"nplots",
",",
"3",
",",
"6",
"+",
"fitind",
"*",
"3",
")",
"wphased",
"=",
"phase_magseries",
"(",
"wtimes",
",",
"wmags",
",",
"wperiod",
",",
"stimes",
".",
"min",
"(",
")",
")",
"plt",
".",
"plot",
"(",
"wphased",
"[",
"'phase'",
"]",
",",
"wphased",
"[",
"'mags'",
"]",
",",
"linestyle",
"=",
"'none'",
",",
"marker",
"=",
"'o'",
",",
"ms",
"=",
"1.0",
",",
"rasterized",
"=",
"True",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'flux'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC after whitening: P = %.6f'",
"%",
"wperiod",
")",
"# in the end, write out the plot",
"if",
"plotfits",
"and",
"isinstance",
"(",
"plotfits",
",",
"str",
")",
":",
"plt",
".",
"subplots_adjust",
"(",
"hspace",
"=",
"0.2",
",",
"wspace",
"=",
"0.4",
")",
"plt",
".",
"savefig",
"(",
"plotfits",
",",
"bbox_inches",
"=",
"'tight'",
")",
"plt",
".",
"close",
"(",
"'all'",
")",
"return",
"bestperiods",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"plotfits",
")",
"else",
":",
"return",
"bestperiods"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
mask_signal
|
This removes repeating signals in the magnitude time series.
Useful for masking planetary transit signals in light curves to search for
other variability.
A small worked example of using this and `prewhiten_magseries` above:
https://github.com/waqasbhatti/astrobase/issues/77#issuecomment-463803558
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to run the masking on.
signalperiod : float
The period of the signal to mask.
signalepoch : float
The epoch of the signal to mask.
magsarefluxes : bool
Set to True if `mags` is actually an array of fluxes.
maskphases : sequence of floats
This defines which phase values will be masked. For each item in this
sequence, this function will mask a length of phase given by
`maskphaselength` centered on each `maskphases` value, and remove all LC
points in these regions from the light curve.
maskphaselength : float
The length in phase to mask for each phase value provided in
`maskphases`.
plotfit : str or None
If provided as a str, indicates the output plot file.
plotfitphasedlconly : bool
If True, will only plot the effect of masking the signal as requested on
the phased LC. If False, will also plot the unphased LC.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
|
astrobase/varbase/signals.py
|
def mask_signal(times, mags, errs,
signalperiod,
signalepoch,
magsarefluxes=False,
maskphases=(0,0,0.5,1.0),
maskphaselength=0.1,
plotfit=None,
plotfitphasedlconly=True,
sigclip=30.0):
'''This removes repeating signals in the magnitude time series.
Useful for masking planetary transit signals in light curves to search for
other variability.
A small worked example of using this and `prewhiten_magseries` above:
https://github.com/waqasbhatti/astrobase/issues/77#issuecomment-463803558
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to run the masking on.
signalperiod : float
The period of the signal to mask.
signalepoch : float
The epoch of the signal to mask.
magsarefluxes : bool
Set to True if `mags` is actually an array of fluxes.
maskphases : sequence of floats
This defines which phase values will be masked. For each item in this
sequence, this function will mask a length of phase given by
`maskphaselength` centered on each `maskphases` value, and remove all LC
points in these regions from the light curve.
maskphaselength : float
The length in phase to mask for each phase value provided in
`maskphases`.
plotfit : str or None
If provided as a str, indicates the output plot file.
plotfitphasedlconly : bool
If True, will only plot the effect of masking the signal as requested on
the phased LC. If False, will also plot the unphased LC.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# now phase the light curve using the period and epoch provided
phases = (
(stimes - signalepoch)/signalperiod -
np.floor((stimes - signalepoch)/signalperiod)
)
# mask the requested phases using the mask length (in phase units)
# this gets all the masks into one array
masks = np.array([(np.abs(phases - x) > maskphaselength)
for x in maskphases])
# this flattens the masks to a single array for all combinations
masks = np.all(masks,axis=0)
# apply the mask to the times, mags, and errs
mphases = phases[masks]
mtimes = stimes[masks]
mmags = smags[masks]
merrs = serrs[masks]
returndict = {'mphases':mphases,
'mtimes':mtimes,
'mmags':mmags,
'merrs':merrs}
# make the fit plot if required
if plotfit and isinstance(plotfit, str) or isinstance(plotfit, Strio):
if plotfitphasedlconly:
plt.figure(figsize=(10,4.8))
else:
plt.figure(figsize=(16,9.6))
if plotfitphasedlconly:
# phased series before whitening
plt.subplot(121)
plt.plot(phases,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before signal masking')
# phased series after whitening
plt.subplot(122)
plt.plot(mphases,mmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after signal masking')
else:
# time series before whitening
plt.subplot(221)
plt.plot(stimes,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC before signal masking')
# time series after whitening
plt.subplot(222)
plt.plot(mtimes,mmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC after signal masking')
# phased series before whitening
plt.subplot(223)
plt.plot(phases,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before signal masking')
# phased series after whitening
plt.subplot(224)
plt.plot(mphases,mmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after signal masking')
plt.tight_layout()
plt.savefig(plotfit, format='png', pad_inches=0.0)
plt.close()
if isinstance(plotfit, str) or isinstance(plotfit, Strio):
returndict['fitplotfile'] = plotfit
return returndict
|
def mask_signal(times, mags, errs,
signalperiod,
signalepoch,
magsarefluxes=False,
maskphases=(0,0,0.5,1.0),
maskphaselength=0.1,
plotfit=None,
plotfitphasedlconly=True,
sigclip=30.0):
'''This removes repeating signals in the magnitude time series.
Useful for masking planetary transit signals in light curves to search for
other variability.
A small worked example of using this and `prewhiten_magseries` above:
https://github.com/waqasbhatti/astrobase/issues/77#issuecomment-463803558
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to run the masking on.
signalperiod : float
The period of the signal to mask.
signalepoch : float
The epoch of the signal to mask.
magsarefluxes : bool
Set to True if `mags` is actually an array of fluxes.
maskphases : sequence of floats
This defines which phase values will be masked. For each item in this
sequence, this function will mask a length of phase given by
`maskphaselength` centered on each `maskphases` value, and remove all LC
points in these regions from the light curve.
maskphaselength : float
The length in phase to mask for each phase value provided in
`maskphases`.
plotfit : str or None
If provided as a str, indicates the output plot file.
plotfitphasedlconly : bool
If True, will only plot the effect of masking the signal as requested on
the phased LC. If False, will also plot the unphased LC.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# now phase the light curve using the period and epoch provided
phases = (
(stimes - signalepoch)/signalperiod -
np.floor((stimes - signalepoch)/signalperiod)
)
# mask the requested phases using the mask length (in phase units)
# this gets all the masks into one array
masks = np.array([(np.abs(phases - x) > maskphaselength)
for x in maskphases])
# this flattens the masks to a single array for all combinations
masks = np.all(masks,axis=0)
# apply the mask to the times, mags, and errs
mphases = phases[masks]
mtimes = stimes[masks]
mmags = smags[masks]
merrs = serrs[masks]
returndict = {'mphases':mphases,
'mtimes':mtimes,
'mmags':mmags,
'merrs':merrs}
# make the fit plot if required
if plotfit and isinstance(plotfit, str) or isinstance(plotfit, Strio):
if plotfitphasedlconly:
plt.figure(figsize=(10,4.8))
else:
plt.figure(figsize=(16,9.6))
if plotfitphasedlconly:
# phased series before whitening
plt.subplot(121)
plt.plot(phases,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before signal masking')
# phased series after whitening
plt.subplot(122)
plt.plot(mphases,mmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after signal masking')
else:
# time series before whitening
plt.subplot(221)
plt.plot(stimes,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC before signal masking')
# time series after whitening
plt.subplot(222)
plt.plot(mtimes,mmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC after signal masking')
# phased series before whitening
plt.subplot(223)
plt.plot(phases,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before signal masking')
# phased series after whitening
plt.subplot(224)
plt.plot(mphases,mmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after signal masking')
plt.tight_layout()
plt.savefig(plotfit, format='png', pad_inches=0.0)
plt.close()
if isinstance(plotfit, str) or isinstance(plotfit, Strio):
returndict['fitplotfile'] = plotfit
return returndict
|
[
"This",
"removes",
"repeating",
"signals",
"in",
"the",
"magnitude",
"time",
"series",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/signals.py#L594-L821
|
[
"def",
"mask_signal",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"signalperiod",
",",
"signalepoch",
",",
"magsarefluxes",
"=",
"False",
",",
"maskphases",
"=",
"(",
"0",
",",
"0",
",",
"0.5",
",",
"1.0",
")",
",",
"maskphaselength",
"=",
"0.1",
",",
"plotfit",
"=",
"None",
",",
"plotfitphasedlconly",
"=",
"True",
",",
"sigclip",
"=",
"30.0",
")",
":",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"sigclip_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"sigclip",
"=",
"sigclip",
",",
"magsarefluxes",
"=",
"magsarefluxes",
")",
"# now phase the light curve using the period and epoch provided",
"phases",
"=",
"(",
"(",
"stimes",
"-",
"signalepoch",
")",
"/",
"signalperiod",
"-",
"np",
".",
"floor",
"(",
"(",
"stimes",
"-",
"signalepoch",
")",
"/",
"signalperiod",
")",
")",
"# mask the requested phases using the mask length (in phase units)",
"# this gets all the masks into one array",
"masks",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"np",
".",
"abs",
"(",
"phases",
"-",
"x",
")",
">",
"maskphaselength",
")",
"for",
"x",
"in",
"maskphases",
"]",
")",
"# this flattens the masks to a single array for all combinations",
"masks",
"=",
"np",
".",
"all",
"(",
"masks",
",",
"axis",
"=",
"0",
")",
"# apply the mask to the times, mags, and errs",
"mphases",
"=",
"phases",
"[",
"masks",
"]",
"mtimes",
"=",
"stimes",
"[",
"masks",
"]",
"mmags",
"=",
"smags",
"[",
"masks",
"]",
"merrs",
"=",
"serrs",
"[",
"masks",
"]",
"returndict",
"=",
"{",
"'mphases'",
":",
"mphases",
",",
"'mtimes'",
":",
"mtimes",
",",
"'mmags'",
":",
"mmags",
",",
"'merrs'",
":",
"merrs",
"}",
"# make the fit plot if required",
"if",
"plotfit",
"and",
"isinstance",
"(",
"plotfit",
",",
"str",
")",
"or",
"isinstance",
"(",
"plotfit",
",",
"Strio",
")",
":",
"if",
"plotfitphasedlconly",
":",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"10",
",",
"4.8",
")",
")",
"else",
":",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"16",
",",
"9.6",
")",
")",
"if",
"plotfitphasedlconly",
":",
"# phased series before whitening",
"plt",
".",
"subplot",
"(",
"121",
")",
"plt",
".",
"plot",
"(",
"phases",
",",
"smags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'k'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC before signal masking'",
")",
"# phased series after whitening",
"plt",
".",
"subplot",
"(",
"122",
")",
"plt",
".",
"plot",
"(",
"mphases",
",",
"mmags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'g'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC after signal masking'",
")",
"else",
":",
"# time series before whitening",
"plt",
".",
"subplot",
"(",
"221",
")",
"plt",
".",
"plot",
"(",
"stimes",
",",
"smags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'k'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'JD'",
")",
"plt",
".",
"title",
"(",
"'LC before signal masking'",
")",
"# time series after whitening",
"plt",
".",
"subplot",
"(",
"222",
")",
"plt",
".",
"plot",
"(",
"mtimes",
",",
"mmags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'g'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'JD'",
")",
"plt",
".",
"title",
"(",
"'LC after signal masking'",
")",
"# phased series before whitening",
"plt",
".",
"subplot",
"(",
"223",
")",
"plt",
".",
"plot",
"(",
"phases",
",",
"smags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'k'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC before signal masking'",
")",
"# phased series after whitening",
"plt",
".",
"subplot",
"(",
"224",
")",
"plt",
".",
"plot",
"(",
"mphases",
",",
"mmags",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'g'",
",",
"linestyle",
"=",
"'None'",
",",
"markersize",
"=",
"2.0",
",",
"markeredgewidth",
"=",
"0",
")",
"if",
"not",
"magsarefluxes",
":",
"plt",
".",
"gca",
"(",
")",
".",
"invert_yaxis",
"(",
")",
"plt",
".",
"ylabel",
"(",
"'magnitude'",
")",
"else",
":",
"plt",
".",
"ylabel",
"(",
"'fluxes'",
")",
"plt",
".",
"xlabel",
"(",
"'phase'",
")",
"plt",
".",
"title",
"(",
"'phased LC after signal masking'",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"savefig",
"(",
"plotfit",
",",
"format",
"=",
"'png'",
",",
"pad_inches",
"=",
"0.0",
")",
"plt",
".",
"close",
"(",
")",
"if",
"isinstance",
"(",
"plotfit",
",",
"str",
")",
"or",
"isinstance",
"(",
"plotfit",
",",
"Strio",
")",
":",
"returndict",
"[",
"'fitplotfile'",
"]",
"=",
"plotfit",
"return",
"returndict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
main
|
This launches the server. The current script args are shown below::
Usage: checkplotserver [OPTIONS]
Options:
--help show this help information
checkplotserver.py options:
--assetpath Sets the asset (server images, css, js, DB)
path for checkplotserver.
(default <astrobase install dir>
/astrobase/cpserver/cps-assets)
--baseurl Set the base URL of the checkplotserver.
This is useful when you're running
checkplotserver on a remote machine and are
reverse-proxying more than one instances of
it so you can access them using HTTP from
outside on different base URLs like
/cpserver1/, /cpserver2/, etc. If this is
set, all URLs will take the form
[baseurl]/..., instead of /... (default /)
--checkplotlist The path to the checkplot-filelist.json file
listing checkplots to load and serve. If
this is not provided, checkplotserver will
look for a checkplot-pickle-flist.json in
the directory that it was started in
--debugmode start up in debug mode if set to 1. (default
0)
--maxprocs Number of background processes to use for
saving/loading checkplot files and running
light curves tools (default 2)
--port Run on the given port. (default 5225)
--readonly Run the server in readonly mode. This is
useful for a public-facing instance of
checkplotserver where you just want to allow
collaborators to review objects but not edit
them. (default False)
--serve Bind to given address and serve content.
(default 127.0.0.1)
--sharedsecret a file containing a cryptographically secure
string that is used to authenticate requests
that come into the special standalone mode.
--standalone This starts the server in standalone mode.
(default 0)
tornado/log.py options:
--log-file-max-size max size of log files before rollover
(default 100000000)
--log-file-num-backups number of log files to keep (default 10)
--log-file-prefix=PATH Path prefix for log files. Note that if you
are running multiple tornado processes,
log_file_prefix must be different for each
of them (e.g. include the port number)
--log-rotate-interval The interval value of timed rotating
(default 1)
--log-rotate-mode The mode of rotating files(time or size)
(default size)
--log-rotate-when specify the type of TimedRotatingFileHandler
interval other options:('S', 'M', 'H', 'D',
'W0'-'W6') (default midnight)
--log-to-stderr Send log output to stderr (colorized if
possible). By default use stderr if
--log_file_prefix is not set and no other
logging is configured.
--logging=debug|info|warning|error|none
Set the Python log level. If 'none', tornado
won't touch the logging configuration.
(default info)
|
astrobase/cpserver/checkplotserver.py
|
def main():
'''
This launches the server. The current script args are shown below::
Usage: checkplotserver [OPTIONS]
Options:
--help show this help information
checkplotserver.py options:
--assetpath Sets the asset (server images, css, js, DB)
path for checkplotserver.
(default <astrobase install dir>
/astrobase/cpserver/cps-assets)
--baseurl Set the base URL of the checkplotserver.
This is useful when you're running
checkplotserver on a remote machine and are
reverse-proxying more than one instances of
it so you can access them using HTTP from
outside on different base URLs like
/cpserver1/, /cpserver2/, etc. If this is
set, all URLs will take the form
[baseurl]/..., instead of /... (default /)
--checkplotlist The path to the checkplot-filelist.json file
listing checkplots to load and serve. If
this is not provided, checkplotserver will
look for a checkplot-pickle-flist.json in
the directory that it was started in
--debugmode start up in debug mode if set to 1. (default
0)
--maxprocs Number of background processes to use for
saving/loading checkplot files and running
light curves tools (default 2)
--port Run on the given port. (default 5225)
--readonly Run the server in readonly mode. This is
useful for a public-facing instance of
checkplotserver where you just want to allow
collaborators to review objects but not edit
them. (default False)
--serve Bind to given address and serve content.
(default 127.0.0.1)
--sharedsecret a file containing a cryptographically secure
string that is used to authenticate requests
that come into the special standalone mode.
--standalone This starts the server in standalone mode.
(default 0)
tornado/log.py options:
--log-file-max-size max size of log files before rollover
(default 100000000)
--log-file-num-backups number of log files to keep (default 10)
--log-file-prefix=PATH Path prefix for log files. Note that if you
are running multiple tornado processes,
log_file_prefix must be different for each
of them (e.g. include the port number)
--log-rotate-interval The interval value of timed rotating
(default 1)
--log-rotate-mode The mode of rotating files(time or size)
(default size)
--log-rotate-when specify the type of TimedRotatingFileHandler
interval other options:('S', 'M', 'H', 'D',
'W0'-'W6') (default midnight)
--log-to-stderr Send log output to stderr (colorized if
possible). By default use stderr if
--log_file_prefix is not set and no other
logging is configured.
--logging=debug|info|warning|error|none
Set the Python log level. If 'none', tornado
won't touch the logging configuration.
(default info)
'''
# parse the command line
tornado.options.parse_command_line()
DEBUG = True if options.debugmode == 1 else False
# get a logger
LOGGER = logging.getLogger('checkplotserver')
if DEBUG:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
###################
## SET UP CONFIG ##
###################
MAXPROCS = options.maxprocs
ASSETPATH = options.assetpath
BASEURL = options.baseurl
###################################
## PERSISTENT CHECKPLOT EXECUTOR ##
###################################
EXECUTOR = ProcessPoolExecutor(MAXPROCS)
#######################################
## CHECK IF WE'RE IN STANDALONE MODE ##
#######################################
if options.standalone:
if ( (not options.sharedsecret) or
(options.sharedsecret and
not os.path.exists(options.sharedsecret)) ):
LOGGER.error('Could not find a shared secret file to use in \n'
'standalone mode. Generate one using: \n\n'
'python3 -c "import secrets; '
'print(secrets.token_urlsafe(32))" '
'> secret-key-file.txt\n\nSet user-only ro '
'permissions on the generated file (chmod 400)')
sys.exit(1)
elif options.sharedsecret and os.path.exists(options.sharedsecret):
# check if this file is readable/writeable by user only
fileperm = oct(os.stat(options.sharedsecret)[stat.ST_MODE])
if fileperm == '0100400' or fileperm == '0o100400':
with open(options.sharedsecret,'r') as infd:
SHAREDSECRET = infd.read().strip('\n')
# this is the URLSpec for the standalone Handler
standalonespec = (
r'/standalone',
cphandlers.StandaloneHandler,
{'executor':EXECUTOR,
'secret':SHAREDSECRET}
)
else:
LOGGER.error('permissions on the shared secret file '
'should be 0100400')
sys.exit(1)
else:
LOGGER.error('could not find the specified '
'shared secret file: %s' %
options.sharedsecret)
sys.exit(1)
# only one handler in standalone mode
HANDLERS = [standalonespec]
# if we're not in standalone mode, proceed normally
else:
if not BASEURL.endswith('/'):
BASEURL = BASEURL + '/'
READONLY = options.readonly
if READONLY:
LOGGER.warning('checkplotserver running in readonly mode.')
# this is the directory checkplotserver.py was executed from. used to
# figure out checkplot locations
CURRENTDIR = os.getcwd()
# if a checkplotlist is provided, then load it. NOTE: all paths in this
# file are relative to the path of the checkplotlist file itself.
cplistfile = options.checkplotlist
# if the provided cplistfile is OK
if cplistfile and os.path.exists(cplistfile):
with open(cplistfile,'r') as infd:
CHECKPLOTLIST = json.load(infd)
LOGGER.info('using provided checkplot list file: %s' % cplistfile)
# if a cplist is provided, but doesn't exist
elif cplistfile and not os.path.exists(cplistfile):
helpmsg = (
"Couldn't find the file %s\n"
"NOTE: To make a checkplot list file, "
"try running the following command:\n"
"python %s pkl "
"/path/to/folder/where/the/checkplot.pkl.gz/files/are" %
(cplistfile, os.path.join(modpath,'checkplotlist.py'))
)
LOGGER.error(helpmsg)
sys.exit(1)
# finally, if no cplistfile is provided at all, search for a
# checkplot-filelist.json in the current directory
else:
LOGGER.warning('No checkplot list file provided!\n'
'(use --checkplotlist=... for this, '
'or use --help to see all options)\n'
'looking for checkplot-filelist.json in the '
'current directory %s ...' % CURRENTDIR)
# this is for single checkplot lists
if os.path.exists(
os.path.join(CURRENTDIR,'checkplot-filelist.json')
):
cplistfile = os.path.join(CURRENTDIR,'checkplot-filelist.json')
with open(cplistfile,'r') as infd:
CHECKPLOTLIST = json.load(infd)
LOGGER.info('using checkplot list file: %s' % cplistfile)
# this is for chunked checkplot lists
elif os.path.exists(os.path.join(CURRENTDIR,
'checkplot-filelist-00.json')):
cplistfile = os.path.join(CURRENTDIR,
'checkplot-filelist-00.json')
with open(cplistfile,'r') as infd:
CHECKPLOTLIST = json.load(infd)
LOGGER.info('using checkplot list file: %s' % cplistfile)
# if we can't find a checkplot list, bail out
else:
helpmsg = (
"No checkplot file list JSON found, "
"can't continue without one.\n"
"Did you make a checkplot list file? "
"To make one, try running the following command:\n"
"checkplotlist pkl "
"/path/to/folder/where/the/checkplot.pkl.gz/files/are"
)
LOGGER.error(helpmsg)
sys.exit(1)
##################################
## URL HANDLERS FOR NORMAL MODE ##
##################################
HANDLERS = [
# index page
(r'{baseurl}'.format(baseurl=BASEURL),
cphandlers.IndexHandler,
{'currentdir':CURRENTDIR,
'assetpath':ASSETPATH,
'cplist':CHECKPLOTLIST,
'cplistfile':cplistfile,
'executor':EXECUTOR,
'readonly':READONLY,
'baseurl':BASEURL}),
# loads and interacts with checkplot pickles
(r'{baseurl}cp/?(.*)'.format(baseurl=BASEURL),
cphandlers.CheckplotHandler,
{'currentdir':CURRENTDIR,
'assetpath':ASSETPATH,
'cplist':CHECKPLOTLIST,
'cplistfile':cplistfile,
'executor':EXECUTOR,
'readonly':READONLY}),
# loads and interacts with the current checkplot list JSON file
(r'{baseurl}list'.format(baseurl=BASEURL),
cphandlers.CheckplotListHandler,
{'currentdir':CURRENTDIR,
'assetpath':ASSETPATH,
'cplist':CHECKPLOTLIST,
'cplistfile':cplistfile,
'executor':EXECUTOR,
'readonly':READONLY}),
# light curve variability and period-finding tool endpoints
(r'{baseurl}tools/?(.*)'.format(baseurl=BASEURL),
cphandlers.LCToolHandler,
{'currentdir':CURRENTDIR,
'assetpath':ASSETPATH,
'cplist':CHECKPLOTLIST,
'cplistfile':cplistfile,
'executor':EXECUTOR,
'readonly':READONLY}),
# download any file in the current base directory, mostly used for
# downloading checkplot pickles and updated checkplot list JSONs
(r'{baseurl}download/(.*)'.format(baseurl=BASEURL),
tornado.web.StaticFileHandler, {'path': CURRENTDIR})
]
#######################
## APPLICATION SETUP ##
#######################
app = tornado.web.Application(
handlers=HANDLERS,
static_path=ASSETPATH,
template_path=ASSETPATH,
static_url_prefix='{baseurl}static/'.format(baseurl=BASEURL),
compress_response=True,
debug=DEBUG,
)
# start up the HTTP server and our application. xheaders = True turns on
# X-Forwarded-For support so we can see the remote IP in the logs
http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
######################
## start the server ##
######################
# make sure the port we're going to listen on is ok
# inspired by how Jupyter notebook does this
portok = False
serverport = options.port
maxtrys = 5
thistry = 0
while not portok and thistry < maxtrys:
try:
http_server.listen(serverport, options.serve)
portok = True
except socket.error as e:
LOGGER.warning('%s:%s is already in use, trying port %s' %
(options.serve, serverport, serverport + 1))
serverport = serverport + 1
if not portok:
LOGGER.error('could not find a free port after 5 tries, giving up')
sys.exit(1)
LOGGER.info('started checkplotserver. listening on http://%s:%s%s' %
(options.serve, serverport, BASEURL))
# register the signal callbacks
signal.signal(signal.SIGINT,_recv_sigint)
signal.signal(signal.SIGTERM,_recv_sigint)
# start the IOLoop and begin serving requests
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
LOGGER.info('received Ctrl-C: shutting down...')
tornado.ioloop.IOLoop.instance().stop()
# close down the processpool
EXECUTOR.shutdown()
time.sleep(3)
|
def main():
'''
This launches the server. The current script args are shown below::
Usage: checkplotserver [OPTIONS]
Options:
--help show this help information
checkplotserver.py options:
--assetpath Sets the asset (server images, css, js, DB)
path for checkplotserver.
(default <astrobase install dir>
/astrobase/cpserver/cps-assets)
--baseurl Set the base URL of the checkplotserver.
This is useful when you're running
checkplotserver on a remote machine and are
reverse-proxying more than one instances of
it so you can access them using HTTP from
outside on different base URLs like
/cpserver1/, /cpserver2/, etc. If this is
set, all URLs will take the form
[baseurl]/..., instead of /... (default /)
--checkplotlist The path to the checkplot-filelist.json file
listing checkplots to load and serve. If
this is not provided, checkplotserver will
look for a checkplot-pickle-flist.json in
the directory that it was started in
--debugmode start up in debug mode if set to 1. (default
0)
--maxprocs Number of background processes to use for
saving/loading checkplot files and running
light curves tools (default 2)
--port Run on the given port. (default 5225)
--readonly Run the server in readonly mode. This is
useful for a public-facing instance of
checkplotserver where you just want to allow
collaborators to review objects but not edit
them. (default False)
--serve Bind to given address and serve content.
(default 127.0.0.1)
--sharedsecret a file containing a cryptographically secure
string that is used to authenticate requests
that come into the special standalone mode.
--standalone This starts the server in standalone mode.
(default 0)
tornado/log.py options:
--log-file-max-size max size of log files before rollover
(default 100000000)
--log-file-num-backups number of log files to keep (default 10)
--log-file-prefix=PATH Path prefix for log files. Note that if you
are running multiple tornado processes,
log_file_prefix must be different for each
of them (e.g. include the port number)
--log-rotate-interval The interval value of timed rotating
(default 1)
--log-rotate-mode The mode of rotating files(time or size)
(default size)
--log-rotate-when specify the type of TimedRotatingFileHandler
interval other options:('S', 'M', 'H', 'D',
'W0'-'W6') (default midnight)
--log-to-stderr Send log output to stderr (colorized if
possible). By default use stderr if
--log_file_prefix is not set and no other
logging is configured.
--logging=debug|info|warning|error|none
Set the Python log level. If 'none', tornado
won't touch the logging configuration.
(default info)
'''
# parse the command line
tornado.options.parse_command_line()
DEBUG = True if options.debugmode == 1 else False
# get a logger
LOGGER = logging.getLogger('checkplotserver')
if DEBUG:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
###################
## SET UP CONFIG ##
###################
MAXPROCS = options.maxprocs
ASSETPATH = options.assetpath
BASEURL = options.baseurl
###################################
## PERSISTENT CHECKPLOT EXECUTOR ##
###################################
EXECUTOR = ProcessPoolExecutor(MAXPROCS)
#######################################
## CHECK IF WE'RE IN STANDALONE MODE ##
#######################################
if options.standalone:
if ( (not options.sharedsecret) or
(options.sharedsecret and
not os.path.exists(options.sharedsecret)) ):
LOGGER.error('Could not find a shared secret file to use in \n'
'standalone mode. Generate one using: \n\n'
'python3 -c "import secrets; '
'print(secrets.token_urlsafe(32))" '
'> secret-key-file.txt\n\nSet user-only ro '
'permissions on the generated file (chmod 400)')
sys.exit(1)
elif options.sharedsecret and os.path.exists(options.sharedsecret):
# check if this file is readable/writeable by user only
fileperm = oct(os.stat(options.sharedsecret)[stat.ST_MODE])
if fileperm == '0100400' or fileperm == '0o100400':
with open(options.sharedsecret,'r') as infd:
SHAREDSECRET = infd.read().strip('\n')
# this is the URLSpec for the standalone Handler
standalonespec = (
r'/standalone',
cphandlers.StandaloneHandler,
{'executor':EXECUTOR,
'secret':SHAREDSECRET}
)
else:
LOGGER.error('permissions on the shared secret file '
'should be 0100400')
sys.exit(1)
else:
LOGGER.error('could not find the specified '
'shared secret file: %s' %
options.sharedsecret)
sys.exit(1)
# only one handler in standalone mode
HANDLERS = [standalonespec]
# if we're not in standalone mode, proceed normally
else:
if not BASEURL.endswith('/'):
BASEURL = BASEURL + '/'
READONLY = options.readonly
if READONLY:
LOGGER.warning('checkplotserver running in readonly mode.')
# this is the directory checkplotserver.py was executed from. used to
# figure out checkplot locations
CURRENTDIR = os.getcwd()
# if a checkplotlist is provided, then load it. NOTE: all paths in this
# file are relative to the path of the checkplotlist file itself.
cplistfile = options.checkplotlist
# if the provided cplistfile is OK
if cplistfile and os.path.exists(cplistfile):
with open(cplistfile,'r') as infd:
CHECKPLOTLIST = json.load(infd)
LOGGER.info('using provided checkplot list file: %s' % cplistfile)
# if a cplist is provided, but doesn't exist
elif cplistfile and not os.path.exists(cplistfile):
helpmsg = (
"Couldn't find the file %s\n"
"NOTE: To make a checkplot list file, "
"try running the following command:\n"
"python %s pkl "
"/path/to/folder/where/the/checkplot.pkl.gz/files/are" %
(cplistfile, os.path.join(modpath,'checkplotlist.py'))
)
LOGGER.error(helpmsg)
sys.exit(1)
# finally, if no cplistfile is provided at all, search for a
# checkplot-filelist.json in the current directory
else:
LOGGER.warning('No checkplot list file provided!\n'
'(use --checkplotlist=... for this, '
'or use --help to see all options)\n'
'looking for checkplot-filelist.json in the '
'current directory %s ...' % CURRENTDIR)
# this is for single checkplot lists
if os.path.exists(
os.path.join(CURRENTDIR,'checkplot-filelist.json')
):
cplistfile = os.path.join(CURRENTDIR,'checkplot-filelist.json')
with open(cplistfile,'r') as infd:
CHECKPLOTLIST = json.load(infd)
LOGGER.info('using checkplot list file: %s' % cplistfile)
# this is for chunked checkplot lists
elif os.path.exists(os.path.join(CURRENTDIR,
'checkplot-filelist-00.json')):
cplistfile = os.path.join(CURRENTDIR,
'checkplot-filelist-00.json')
with open(cplistfile,'r') as infd:
CHECKPLOTLIST = json.load(infd)
LOGGER.info('using checkplot list file: %s' % cplistfile)
# if we can't find a checkplot list, bail out
else:
helpmsg = (
"No checkplot file list JSON found, "
"can't continue without one.\n"
"Did you make a checkplot list file? "
"To make one, try running the following command:\n"
"checkplotlist pkl "
"/path/to/folder/where/the/checkplot.pkl.gz/files/are"
)
LOGGER.error(helpmsg)
sys.exit(1)
##################################
## URL HANDLERS FOR NORMAL MODE ##
##################################
HANDLERS = [
# index page
(r'{baseurl}'.format(baseurl=BASEURL),
cphandlers.IndexHandler,
{'currentdir':CURRENTDIR,
'assetpath':ASSETPATH,
'cplist':CHECKPLOTLIST,
'cplistfile':cplistfile,
'executor':EXECUTOR,
'readonly':READONLY,
'baseurl':BASEURL}),
# loads and interacts with checkplot pickles
(r'{baseurl}cp/?(.*)'.format(baseurl=BASEURL),
cphandlers.CheckplotHandler,
{'currentdir':CURRENTDIR,
'assetpath':ASSETPATH,
'cplist':CHECKPLOTLIST,
'cplistfile':cplistfile,
'executor':EXECUTOR,
'readonly':READONLY}),
# loads and interacts with the current checkplot list JSON file
(r'{baseurl}list'.format(baseurl=BASEURL),
cphandlers.CheckplotListHandler,
{'currentdir':CURRENTDIR,
'assetpath':ASSETPATH,
'cplist':CHECKPLOTLIST,
'cplistfile':cplistfile,
'executor':EXECUTOR,
'readonly':READONLY}),
# light curve variability and period-finding tool endpoints
(r'{baseurl}tools/?(.*)'.format(baseurl=BASEURL),
cphandlers.LCToolHandler,
{'currentdir':CURRENTDIR,
'assetpath':ASSETPATH,
'cplist':CHECKPLOTLIST,
'cplistfile':cplistfile,
'executor':EXECUTOR,
'readonly':READONLY}),
# download any file in the current base directory, mostly used for
# downloading checkplot pickles and updated checkplot list JSONs
(r'{baseurl}download/(.*)'.format(baseurl=BASEURL),
tornado.web.StaticFileHandler, {'path': CURRENTDIR})
]
#######################
## APPLICATION SETUP ##
#######################
app = tornado.web.Application(
handlers=HANDLERS,
static_path=ASSETPATH,
template_path=ASSETPATH,
static_url_prefix='{baseurl}static/'.format(baseurl=BASEURL),
compress_response=True,
debug=DEBUG,
)
# start up the HTTP server and our application. xheaders = True turns on
# X-Forwarded-For support so we can see the remote IP in the logs
http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
######################
## start the server ##
######################
# make sure the port we're going to listen on is ok
# inspired by how Jupyter notebook does this
portok = False
serverport = options.port
maxtrys = 5
thistry = 0
while not portok and thistry < maxtrys:
try:
http_server.listen(serverport, options.serve)
portok = True
except socket.error as e:
LOGGER.warning('%s:%s is already in use, trying port %s' %
(options.serve, serverport, serverport + 1))
serverport = serverport + 1
if not portok:
LOGGER.error('could not find a free port after 5 tries, giving up')
sys.exit(1)
LOGGER.info('started checkplotserver. listening on http://%s:%s%s' %
(options.serve, serverport, BASEURL))
# register the signal callbacks
signal.signal(signal.SIGINT,_recv_sigint)
signal.signal(signal.SIGTERM,_recv_sigint)
# start the IOLoop and begin serving requests
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
LOGGER.info('received Ctrl-C: shutting down...')
tornado.ioloop.IOLoop.instance().stop()
# close down the processpool
EXECUTOR.shutdown()
time.sleep(3)
|
[
"This",
"launches",
"the",
"server",
".",
"The",
"current",
"script",
"args",
"are",
"shown",
"below",
"::"
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/cpserver/checkplotserver.py#L153-L498
|
[
"def",
"main",
"(",
")",
":",
"# parse the command line",
"tornado",
".",
"options",
".",
"parse_command_line",
"(",
")",
"DEBUG",
"=",
"True",
"if",
"options",
".",
"debugmode",
"==",
"1",
"else",
"False",
"# get a logger",
"LOGGER",
"=",
"logging",
".",
"getLogger",
"(",
"'checkplotserver'",
")",
"if",
"DEBUG",
":",
"LOGGER",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"else",
":",
"LOGGER",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"###################",
"## SET UP CONFIG ##",
"###################",
"MAXPROCS",
"=",
"options",
".",
"maxprocs",
"ASSETPATH",
"=",
"options",
".",
"assetpath",
"BASEURL",
"=",
"options",
".",
"baseurl",
"###################################",
"## PERSISTENT CHECKPLOT EXECUTOR ##",
"###################################",
"EXECUTOR",
"=",
"ProcessPoolExecutor",
"(",
"MAXPROCS",
")",
"#######################################",
"## CHECK IF WE'RE IN STANDALONE MODE ##",
"#######################################",
"if",
"options",
".",
"standalone",
":",
"if",
"(",
"(",
"not",
"options",
".",
"sharedsecret",
")",
"or",
"(",
"options",
".",
"sharedsecret",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"options",
".",
"sharedsecret",
")",
")",
")",
":",
"LOGGER",
".",
"error",
"(",
"'Could not find a shared secret file to use in \\n'",
"'standalone mode. Generate one using: \\n\\n'",
"'python3 -c \"import secrets; '",
"'print(secrets.token_urlsafe(32))\" '",
"'> secret-key-file.txt\\n\\nSet user-only ro '",
"'permissions on the generated file (chmod 400)'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"options",
".",
"sharedsecret",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"options",
".",
"sharedsecret",
")",
":",
"# check if this file is readable/writeable by user only",
"fileperm",
"=",
"oct",
"(",
"os",
".",
"stat",
"(",
"options",
".",
"sharedsecret",
")",
"[",
"stat",
".",
"ST_MODE",
"]",
")",
"if",
"fileperm",
"==",
"'0100400'",
"or",
"fileperm",
"==",
"'0o100400'",
":",
"with",
"open",
"(",
"options",
".",
"sharedsecret",
",",
"'r'",
")",
"as",
"infd",
":",
"SHAREDSECRET",
"=",
"infd",
".",
"read",
"(",
")",
".",
"strip",
"(",
"'\\n'",
")",
"# this is the URLSpec for the standalone Handler",
"standalonespec",
"=",
"(",
"r'/standalone'",
",",
"cphandlers",
".",
"StandaloneHandler",
",",
"{",
"'executor'",
":",
"EXECUTOR",
",",
"'secret'",
":",
"SHAREDSECRET",
"}",
")",
"else",
":",
"LOGGER",
".",
"error",
"(",
"'permissions on the shared secret file '",
"'should be 0100400'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"LOGGER",
".",
"error",
"(",
"'could not find the specified '",
"'shared secret file: %s'",
"%",
"options",
".",
"sharedsecret",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# only one handler in standalone mode",
"HANDLERS",
"=",
"[",
"standalonespec",
"]",
"# if we're not in standalone mode, proceed normally",
"else",
":",
"if",
"not",
"BASEURL",
".",
"endswith",
"(",
"'/'",
")",
":",
"BASEURL",
"=",
"BASEURL",
"+",
"'/'",
"READONLY",
"=",
"options",
".",
"readonly",
"if",
"READONLY",
":",
"LOGGER",
".",
"warning",
"(",
"'checkplotserver running in readonly mode.'",
")",
"# this is the directory checkplotserver.py was executed from. used to",
"# figure out checkplot locations",
"CURRENTDIR",
"=",
"os",
".",
"getcwd",
"(",
")",
"# if a checkplotlist is provided, then load it. NOTE: all paths in this",
"# file are relative to the path of the checkplotlist file itself.",
"cplistfile",
"=",
"options",
".",
"checkplotlist",
"# if the provided cplistfile is OK",
"if",
"cplistfile",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"cplistfile",
")",
":",
"with",
"open",
"(",
"cplistfile",
",",
"'r'",
")",
"as",
"infd",
":",
"CHECKPLOTLIST",
"=",
"json",
".",
"load",
"(",
"infd",
")",
"LOGGER",
".",
"info",
"(",
"'using provided checkplot list file: %s'",
"%",
"cplistfile",
")",
"# if a cplist is provided, but doesn't exist",
"elif",
"cplistfile",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cplistfile",
")",
":",
"helpmsg",
"=",
"(",
"\"Couldn't find the file %s\\n\"",
"\"NOTE: To make a checkplot list file, \"",
"\"try running the following command:\\n\"",
"\"python %s pkl \"",
"\"/path/to/folder/where/the/checkplot.pkl.gz/files/are\"",
"%",
"(",
"cplistfile",
",",
"os",
".",
"path",
".",
"join",
"(",
"modpath",
",",
"'checkplotlist.py'",
")",
")",
")",
"LOGGER",
".",
"error",
"(",
"helpmsg",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# finally, if no cplistfile is provided at all, search for a",
"# checkplot-filelist.json in the current directory",
"else",
":",
"LOGGER",
".",
"warning",
"(",
"'No checkplot list file provided!\\n'",
"'(use --checkplotlist=... for this, '",
"'or use --help to see all options)\\n'",
"'looking for checkplot-filelist.json in the '",
"'current directory %s ...'",
"%",
"CURRENTDIR",
")",
"# this is for single checkplot lists",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CURRENTDIR",
",",
"'checkplot-filelist.json'",
")",
")",
":",
"cplistfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CURRENTDIR",
",",
"'checkplot-filelist.json'",
")",
"with",
"open",
"(",
"cplistfile",
",",
"'r'",
")",
"as",
"infd",
":",
"CHECKPLOTLIST",
"=",
"json",
".",
"load",
"(",
"infd",
")",
"LOGGER",
".",
"info",
"(",
"'using checkplot list file: %s'",
"%",
"cplistfile",
")",
"# this is for chunked checkplot lists",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CURRENTDIR",
",",
"'checkplot-filelist-00.json'",
")",
")",
":",
"cplistfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CURRENTDIR",
",",
"'checkplot-filelist-00.json'",
")",
"with",
"open",
"(",
"cplistfile",
",",
"'r'",
")",
"as",
"infd",
":",
"CHECKPLOTLIST",
"=",
"json",
".",
"load",
"(",
"infd",
")",
"LOGGER",
".",
"info",
"(",
"'using checkplot list file: %s'",
"%",
"cplistfile",
")",
"# if we can't find a checkplot list, bail out",
"else",
":",
"helpmsg",
"=",
"(",
"\"No checkplot file list JSON found, \"",
"\"can't continue without one.\\n\"",
"\"Did you make a checkplot list file? \"",
"\"To make one, try running the following command:\\n\"",
"\"checkplotlist pkl \"",
"\"/path/to/folder/where/the/checkplot.pkl.gz/files/are\"",
")",
"LOGGER",
".",
"error",
"(",
"helpmsg",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"##################################",
"## URL HANDLERS FOR NORMAL MODE ##",
"##################################",
"HANDLERS",
"=",
"[",
"# index page",
"(",
"r'{baseurl}'",
".",
"format",
"(",
"baseurl",
"=",
"BASEURL",
")",
",",
"cphandlers",
".",
"IndexHandler",
",",
"{",
"'currentdir'",
":",
"CURRENTDIR",
",",
"'assetpath'",
":",
"ASSETPATH",
",",
"'cplist'",
":",
"CHECKPLOTLIST",
",",
"'cplistfile'",
":",
"cplistfile",
",",
"'executor'",
":",
"EXECUTOR",
",",
"'readonly'",
":",
"READONLY",
",",
"'baseurl'",
":",
"BASEURL",
"}",
")",
",",
"# loads and interacts with checkplot pickles",
"(",
"r'{baseurl}cp/?(.*)'",
".",
"format",
"(",
"baseurl",
"=",
"BASEURL",
")",
",",
"cphandlers",
".",
"CheckplotHandler",
",",
"{",
"'currentdir'",
":",
"CURRENTDIR",
",",
"'assetpath'",
":",
"ASSETPATH",
",",
"'cplist'",
":",
"CHECKPLOTLIST",
",",
"'cplistfile'",
":",
"cplistfile",
",",
"'executor'",
":",
"EXECUTOR",
",",
"'readonly'",
":",
"READONLY",
"}",
")",
",",
"# loads and interacts with the current checkplot list JSON file",
"(",
"r'{baseurl}list'",
".",
"format",
"(",
"baseurl",
"=",
"BASEURL",
")",
",",
"cphandlers",
".",
"CheckplotListHandler",
",",
"{",
"'currentdir'",
":",
"CURRENTDIR",
",",
"'assetpath'",
":",
"ASSETPATH",
",",
"'cplist'",
":",
"CHECKPLOTLIST",
",",
"'cplistfile'",
":",
"cplistfile",
",",
"'executor'",
":",
"EXECUTOR",
",",
"'readonly'",
":",
"READONLY",
"}",
")",
",",
"# light curve variability and period-finding tool endpoints",
"(",
"r'{baseurl}tools/?(.*)'",
".",
"format",
"(",
"baseurl",
"=",
"BASEURL",
")",
",",
"cphandlers",
".",
"LCToolHandler",
",",
"{",
"'currentdir'",
":",
"CURRENTDIR",
",",
"'assetpath'",
":",
"ASSETPATH",
",",
"'cplist'",
":",
"CHECKPLOTLIST",
",",
"'cplistfile'",
":",
"cplistfile",
",",
"'executor'",
":",
"EXECUTOR",
",",
"'readonly'",
":",
"READONLY",
"}",
")",
",",
"# download any file in the current base directory, mostly used for",
"# downloading checkplot pickles and updated checkplot list JSONs",
"(",
"r'{baseurl}download/(.*)'",
".",
"format",
"(",
"baseurl",
"=",
"BASEURL",
")",
",",
"tornado",
".",
"web",
".",
"StaticFileHandler",
",",
"{",
"'path'",
":",
"CURRENTDIR",
"}",
")",
"]",
"#######################",
"## APPLICATION SETUP ##",
"#######################",
"app",
"=",
"tornado",
".",
"web",
".",
"Application",
"(",
"handlers",
"=",
"HANDLERS",
",",
"static_path",
"=",
"ASSETPATH",
",",
"template_path",
"=",
"ASSETPATH",
",",
"static_url_prefix",
"=",
"'{baseurl}static/'",
".",
"format",
"(",
"baseurl",
"=",
"BASEURL",
")",
",",
"compress_response",
"=",
"True",
",",
"debug",
"=",
"DEBUG",
",",
")",
"# start up the HTTP server and our application. xheaders = True turns on",
"# X-Forwarded-For support so we can see the remote IP in the logs",
"http_server",
"=",
"tornado",
".",
"httpserver",
".",
"HTTPServer",
"(",
"app",
",",
"xheaders",
"=",
"True",
")",
"######################",
"## start the server ##",
"######################",
"# make sure the port we're going to listen on is ok",
"# inspired by how Jupyter notebook does this",
"portok",
"=",
"False",
"serverport",
"=",
"options",
".",
"port",
"maxtrys",
"=",
"5",
"thistry",
"=",
"0",
"while",
"not",
"portok",
"and",
"thistry",
"<",
"maxtrys",
":",
"try",
":",
"http_server",
".",
"listen",
"(",
"serverport",
",",
"options",
".",
"serve",
")",
"portok",
"=",
"True",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"LOGGER",
".",
"warning",
"(",
"'%s:%s is already in use, trying port %s'",
"%",
"(",
"options",
".",
"serve",
",",
"serverport",
",",
"serverport",
"+",
"1",
")",
")",
"serverport",
"=",
"serverport",
"+",
"1",
"if",
"not",
"portok",
":",
"LOGGER",
".",
"error",
"(",
"'could not find a free port after 5 tries, giving up'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"LOGGER",
".",
"info",
"(",
"'started checkplotserver. listening on http://%s:%s%s'",
"%",
"(",
"options",
".",
"serve",
",",
"serverport",
",",
"BASEURL",
")",
")",
"# register the signal callbacks",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"_recv_sigint",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"_recv_sigint",
")",
"# start the IOLoop and begin serving requests",
"try",
":",
"tornado",
".",
"ioloop",
".",
"IOLoop",
".",
"instance",
"(",
")",
".",
"start",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"LOGGER",
".",
"info",
"(",
"'received Ctrl-C: shutting down...'",
")",
"tornado",
".",
"ioloop",
".",
"IOLoop",
".",
"instance",
"(",
")",
".",
"stop",
"(",
")",
"# close down the processpool",
"EXECUTOR",
".",
"shutdown",
"(",
")",
"time",
".",
"sleep",
"(",
"3",
")"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
find_lc_timegroups
|
Finds gaps in the provided time-series and indexes them into groups.
This finds the gaps in the provided `lctimes` array, so we can figure out
which times are for consecutive observations and which represent gaps
between seasons or observing eras.
Parameters
----------
lctimes : array-like
This contains the times to analyze for gaps; assumed to be some form of
Julian date.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
Returns
-------
tuple
A tuple of the form: `(ngroups, [slice(start_ind_1, end_ind_1), ...])`
is returned. This contains the number of groups as the first element,
and a list of Python `slice` objects for each time-group found. These
can be used directly to index into the array of times to quickly get
measurements associated with each group.
|
astrobase/lcmath.py
|
def find_lc_timegroups(lctimes, mingap=4.0):
'''Finds gaps in the provided time-series and indexes them into groups.
This finds the gaps in the provided `lctimes` array, so we can figure out
which times are for consecutive observations and which represent gaps
between seasons or observing eras.
Parameters
----------
lctimes : array-like
This contains the times to analyze for gaps; assumed to be some form of
Julian date.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
Returns
-------
tuple
A tuple of the form: `(ngroups, [slice(start_ind_1, end_ind_1), ...])`
is returned. This contains the number of groups as the first element,
and a list of Python `slice` objects for each time-group found. These
can be used directly to index into the array of times to quickly get
measurements associated with each group.
'''
lc_time_diffs = np.diff(lctimes)
group_start_indices = np.where(lc_time_diffs > mingap)[0]
if len(group_start_indices) > 0:
group_indices = []
for i, gindex in enumerate(group_start_indices):
if i == 0:
group_indices.append(slice(0,gindex+1))
else:
group_indices.append(slice(group_start_indices[i-1]+1,gindex+1))
# at the end, add the slice for the last group to the end of the times
# array
group_indices.append(slice(group_start_indices[-1]+1,len(lctimes)))
# if there's no large gap in the LC, then there's only one group to worry
# about
else:
group_indices = [slice(0,len(lctimes))]
return len(group_indices), group_indices
|
def find_lc_timegroups(lctimes, mingap=4.0):
'''Finds gaps in the provided time-series and indexes them into groups.
This finds the gaps in the provided `lctimes` array, so we can figure out
which times are for consecutive observations and which represent gaps
between seasons or observing eras.
Parameters
----------
lctimes : array-like
This contains the times to analyze for gaps; assumed to be some form of
Julian date.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
Returns
-------
tuple
A tuple of the form: `(ngroups, [slice(start_ind_1, end_ind_1), ...])`
is returned. This contains the number of groups as the first element,
and a list of Python `slice` objects for each time-group found. These
can be used directly to index into the array of times to quickly get
measurements associated with each group.
'''
lc_time_diffs = np.diff(lctimes)
group_start_indices = np.where(lc_time_diffs > mingap)[0]
if len(group_start_indices) > 0:
group_indices = []
for i, gindex in enumerate(group_start_indices):
if i == 0:
group_indices.append(slice(0,gindex+1))
else:
group_indices.append(slice(group_start_indices[i-1]+1,gindex+1))
# at the end, add the slice for the last group to the end of the times
# array
group_indices.append(slice(group_start_indices[-1]+1,len(lctimes)))
# if there's no large gap in the LC, then there's only one group to worry
# about
else:
group_indices = [slice(0,len(lctimes))]
return len(group_indices), group_indices
|
[
"Finds",
"gaps",
"in",
"the",
"provided",
"time",
"-",
"series",
"and",
"indexes",
"them",
"into",
"groups",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L58-L114
|
[
"def",
"find_lc_timegroups",
"(",
"lctimes",
",",
"mingap",
"=",
"4.0",
")",
":",
"lc_time_diffs",
"=",
"np",
".",
"diff",
"(",
"lctimes",
")",
"group_start_indices",
"=",
"np",
".",
"where",
"(",
"lc_time_diffs",
">",
"mingap",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"group_start_indices",
")",
">",
"0",
":",
"group_indices",
"=",
"[",
"]",
"for",
"i",
",",
"gindex",
"in",
"enumerate",
"(",
"group_start_indices",
")",
":",
"if",
"i",
"==",
"0",
":",
"group_indices",
".",
"append",
"(",
"slice",
"(",
"0",
",",
"gindex",
"+",
"1",
")",
")",
"else",
":",
"group_indices",
".",
"append",
"(",
"slice",
"(",
"group_start_indices",
"[",
"i",
"-",
"1",
"]",
"+",
"1",
",",
"gindex",
"+",
"1",
")",
")",
"# at the end, add the slice for the last group to the end of the times",
"# array",
"group_indices",
".",
"append",
"(",
"slice",
"(",
"group_start_indices",
"[",
"-",
"1",
"]",
"+",
"1",
",",
"len",
"(",
"lctimes",
")",
")",
")",
"# if there's no large gap in the LC, then there's only one group to worry",
"# about",
"else",
":",
"group_indices",
"=",
"[",
"slice",
"(",
"0",
",",
"len",
"(",
"lctimes",
")",
")",
"]",
"return",
"len",
"(",
"group_indices",
")",
",",
"group_indices"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
normalize_magseries
|
This normalizes the magnitude time-series to a specified value.
This is used to normalize time series measurements that may have large time
gaps and vertical offsets in mag/flux measurement between these
'timegroups', either due to instrument changes or different filters.
NOTE: this works in-place! The mags array will be replaced with normalized
mags when this function finishes.
Parameters
----------
times,mags : array-like
The times (assumed to be some form of JD) and mags (or flux)
measurements to be normalized.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
normto : {'globalmedian', 'zero'} or a float
Specifies the normalization type::
'globalmedian' -> norms each mag to the global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
magsarefluxes : bool
Indicates if the input `mags` array is actually an array of flux
measurements instead of magnitude measurements. If this is set to True,
then:
- if `normto` is 'zero', then the median flux is divided from each
observation's flux value to yield normalized fluxes with 1.0 as the
global median.
- if `normto` is 'globalmedian', then the global median flux value
across the entire time series is multiplied with each measurement.
- if `norm` is set to a `float`, then this number is multiplied with the
flux value for each measurement.
debugmode : bool
If this is True, will print out verbose info on each timegroup found.
Returns
-------
times,normalized_mags : np.arrays
Normalized magnitude values after normalization. If normalization fails
for some reason, `times` and `normalized_mags` will both be None.
|
astrobase/lcmath.py
|
def normalize_magseries(times,
mags,
mingap=4.0,
normto='globalmedian',
magsarefluxes=False,
debugmode=False):
'''This normalizes the magnitude time-series to a specified value.
This is used to normalize time series measurements that may have large time
gaps and vertical offsets in mag/flux measurement between these
'timegroups', either due to instrument changes or different filters.
NOTE: this works in-place! The mags array will be replaced with normalized
mags when this function finishes.
Parameters
----------
times,mags : array-like
The times (assumed to be some form of JD) and mags (or flux)
measurements to be normalized.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
normto : {'globalmedian', 'zero'} or a float
Specifies the normalization type::
'globalmedian' -> norms each mag to the global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
magsarefluxes : bool
Indicates if the input `mags` array is actually an array of flux
measurements instead of magnitude measurements. If this is set to True,
then:
- if `normto` is 'zero', then the median flux is divided from each
observation's flux value to yield normalized fluxes with 1.0 as the
global median.
- if `normto` is 'globalmedian', then the global median flux value
across the entire time series is multiplied with each measurement.
- if `norm` is set to a `float`, then this number is multiplied with the
flux value for each measurement.
debugmode : bool
If this is True, will print out verbose info on each timegroup found.
Returns
-------
times,normalized_mags : np.arrays
Normalized magnitude values after normalization. If normalization fails
for some reason, `times` and `normalized_mags` will both be None.
'''
ngroups, timegroups = find_lc_timegroups(times,
mingap=mingap)
# find all the non-nan indices
finite_ind = np.isfinite(mags)
if any(finite_ind):
# find the global median
global_mag_median = np.median(mags[finite_ind])
# go through the groups and normalize them to the median for
# each group
for tgind, tg in enumerate(timegroups):
finite_ind = np.isfinite(mags[tg])
# find this timegroup's median mag and normalize the mags in
# it to this median
group_median = np.median((mags[tg])[finite_ind])
if magsarefluxes:
mags[tg] = mags[tg]/group_median
else:
mags[tg] = mags[tg] - group_median
if debugmode:
LOGDEBUG('group %s: elems %s, '
'finite elems %s, median mag %s' %
(tgind,
len(mags[tg]),
len(finite_ind),
group_median))
# now that everything is normalized to 0.0, add the global median
# offset back to all the mags and write the result back to the dict
if isinstance(normto, str) and normto == 'globalmedian':
if magsarefluxes:
mags = mags * global_mag_median
else:
mags = mags + global_mag_median
# if the normto is a float, add everything to that float and return
elif isinstance(normto, float):
if magsarefluxes:
mags = mags * normto
else:
mags = mags + normto
# anything else just returns the normalized mags as usual
return times, mags
else:
LOGERROR('measurements are all nan!')
return None, None
|
def normalize_magseries(times,
mags,
mingap=4.0,
normto='globalmedian',
magsarefluxes=False,
debugmode=False):
'''This normalizes the magnitude time-series to a specified value.
This is used to normalize time series measurements that may have large time
gaps and vertical offsets in mag/flux measurement between these
'timegroups', either due to instrument changes or different filters.
NOTE: this works in-place! The mags array will be replaced with normalized
mags when this function finishes.
Parameters
----------
times,mags : array-like
The times (assumed to be some form of JD) and mags (or flux)
measurements to be normalized.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
normto : {'globalmedian', 'zero'} or a float
Specifies the normalization type::
'globalmedian' -> norms each mag to the global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
magsarefluxes : bool
Indicates if the input `mags` array is actually an array of flux
measurements instead of magnitude measurements. If this is set to True,
then:
- if `normto` is 'zero', then the median flux is divided from each
observation's flux value to yield normalized fluxes with 1.0 as the
global median.
- if `normto` is 'globalmedian', then the global median flux value
across the entire time series is multiplied with each measurement.
- if `norm` is set to a `float`, then this number is multiplied with the
flux value for each measurement.
debugmode : bool
If this is True, will print out verbose info on each timegroup found.
Returns
-------
times,normalized_mags : np.arrays
Normalized magnitude values after normalization. If normalization fails
for some reason, `times` and `normalized_mags` will both be None.
'''
ngroups, timegroups = find_lc_timegroups(times,
mingap=mingap)
# find all the non-nan indices
finite_ind = np.isfinite(mags)
if any(finite_ind):
# find the global median
global_mag_median = np.median(mags[finite_ind])
# go through the groups and normalize them to the median for
# each group
for tgind, tg in enumerate(timegroups):
finite_ind = np.isfinite(mags[tg])
# find this timegroup's median mag and normalize the mags in
# it to this median
group_median = np.median((mags[tg])[finite_ind])
if magsarefluxes:
mags[tg] = mags[tg]/group_median
else:
mags[tg] = mags[tg] - group_median
if debugmode:
LOGDEBUG('group %s: elems %s, '
'finite elems %s, median mag %s' %
(tgind,
len(mags[tg]),
len(finite_ind),
group_median))
# now that everything is normalized to 0.0, add the global median
# offset back to all the mags and write the result back to the dict
if isinstance(normto, str) and normto == 'globalmedian':
if magsarefluxes:
mags = mags * global_mag_median
else:
mags = mags + global_mag_median
# if the normto is a float, add everything to that float and return
elif isinstance(normto, float):
if magsarefluxes:
mags = mags * normto
else:
mags = mags + normto
# anything else just returns the normalized mags as usual
return times, mags
else:
LOGERROR('measurements are all nan!')
return None, None
|
[
"This",
"normalizes",
"the",
"magnitude",
"time",
"-",
"series",
"to",
"a",
"specified",
"value",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L118-L235
|
[
"def",
"normalize_magseries",
"(",
"times",
",",
"mags",
",",
"mingap",
"=",
"4.0",
",",
"normto",
"=",
"'globalmedian'",
",",
"magsarefluxes",
"=",
"False",
",",
"debugmode",
"=",
"False",
")",
":",
"ngroups",
",",
"timegroups",
"=",
"find_lc_timegroups",
"(",
"times",
",",
"mingap",
"=",
"mingap",
")",
"# find all the non-nan indices",
"finite_ind",
"=",
"np",
".",
"isfinite",
"(",
"mags",
")",
"if",
"any",
"(",
"finite_ind",
")",
":",
"# find the global median",
"global_mag_median",
"=",
"np",
".",
"median",
"(",
"mags",
"[",
"finite_ind",
"]",
")",
"# go through the groups and normalize them to the median for",
"# each group",
"for",
"tgind",
",",
"tg",
"in",
"enumerate",
"(",
"timegroups",
")",
":",
"finite_ind",
"=",
"np",
".",
"isfinite",
"(",
"mags",
"[",
"tg",
"]",
")",
"# find this timegroup's median mag and normalize the mags in",
"# it to this median",
"group_median",
"=",
"np",
".",
"median",
"(",
"(",
"mags",
"[",
"tg",
"]",
")",
"[",
"finite_ind",
"]",
")",
"if",
"magsarefluxes",
":",
"mags",
"[",
"tg",
"]",
"=",
"mags",
"[",
"tg",
"]",
"/",
"group_median",
"else",
":",
"mags",
"[",
"tg",
"]",
"=",
"mags",
"[",
"tg",
"]",
"-",
"group_median",
"if",
"debugmode",
":",
"LOGDEBUG",
"(",
"'group %s: elems %s, '",
"'finite elems %s, median mag %s'",
"%",
"(",
"tgind",
",",
"len",
"(",
"mags",
"[",
"tg",
"]",
")",
",",
"len",
"(",
"finite_ind",
")",
",",
"group_median",
")",
")",
"# now that everything is normalized to 0.0, add the global median",
"# offset back to all the mags and write the result back to the dict",
"if",
"isinstance",
"(",
"normto",
",",
"str",
")",
"and",
"normto",
"==",
"'globalmedian'",
":",
"if",
"magsarefluxes",
":",
"mags",
"=",
"mags",
"*",
"global_mag_median",
"else",
":",
"mags",
"=",
"mags",
"+",
"global_mag_median",
"# if the normto is a float, add everything to that float and return",
"elif",
"isinstance",
"(",
"normto",
",",
"float",
")",
":",
"if",
"magsarefluxes",
":",
"mags",
"=",
"mags",
"*",
"normto",
"else",
":",
"mags",
"=",
"mags",
"+",
"normto",
"# anything else just returns the normalized mags as usual",
"return",
"times",
",",
"mags",
"else",
":",
"LOGERROR",
"(",
"'measurements are all nan!'",
")",
"return",
"None",
",",
"None"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
sigclip_magseries
|
Sigma-clips a magnitude or flux time-series.
Selects the finite times, magnitudes (or fluxes), and errors from the passed
values, and apply symmetric or asymmetric sigma clipping to them.
Parameters
----------
times,mags,errs : np.array
The magnitude or flux time-series arrays to sigma-clip. This doesn't
assume all values are finite or if they're positive/negative. All of
these arrays will have their non-finite elements removed, and then will
be sigma-clipped based on the arguments to this function.
`errs` is optional. Set it to None if you don't have values for these. A
'faked' `errs` array will be generated if necessary, which can be
ignored in the output as well.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
iterative : bool
If this is set to True, will perform iterative sigma-clipping. If
`niterations` is not set and this is True, sigma-clipping is iterated
until no more points are removed.
niterations : int
The maximum number of iterations to perform for sigma-clipping. If None,
the `iterative` arg takes precedence, and `iterative=True` will
sigma-clip until no more points are removed. If `niterations` is not
None and `iterative` is False, `niterations` takes precedence and
iteration will occur for the specified number of iterations.
meanormedian : {'mean', 'median'}
Use 'mean' for sigma-clipping based on the mean value, or 'median' for
sigma-clipping based on the median value. Default is 'median'.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
Returns
-------
(stimes, smags, serrs) : tuple
The sigma-clipped and nan-stripped time-series.
|
astrobase/lcmath.py
|
def sigclip_magseries(times, mags, errs,
sigclip=None,
iterative=False,
niterations=None,
meanormedian='median',
magsarefluxes=False):
'''Sigma-clips a magnitude or flux time-series.
Selects the finite times, magnitudes (or fluxes), and errors from the passed
values, and apply symmetric or asymmetric sigma clipping to them.
Parameters
----------
times,mags,errs : np.array
The magnitude or flux time-series arrays to sigma-clip. This doesn't
assume all values are finite or if they're positive/negative. All of
these arrays will have their non-finite elements removed, and then will
be sigma-clipped based on the arguments to this function.
`errs` is optional. Set it to None if you don't have values for these. A
'faked' `errs` array will be generated if necessary, which can be
ignored in the output as well.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
iterative : bool
If this is set to True, will perform iterative sigma-clipping. If
`niterations` is not set and this is True, sigma-clipping is iterated
until no more points are removed.
niterations : int
The maximum number of iterations to perform for sigma-clipping. If None,
the `iterative` arg takes precedence, and `iterative=True` will
sigma-clip until no more points are removed. If `niterations` is not
None and `iterative` is False, `niterations` takes precedence and
iteration will occur for the specified number of iterations.
meanormedian : {'mean', 'median'}
Use 'mean' for sigma-clipping based on the mean value, or 'median' for
sigma-clipping based on the median value. Default is 'median'.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
Returns
-------
(stimes, smags, serrs) : tuple
The sigma-clipped and nan-stripped time-series.
'''
returnerrs = True
# fake the errors if they don't exist
# this is inconsequential to sigma-clipping
# we don't return these dummy values if the input errs are None
if errs is None:
# assume 0.1% errors if not given
# this should work for mags and fluxes
errs = 0.001*mags
returnerrs = False
# filter the input times, mags, errs; do sigclipping and normalization
find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[find], mags[find], errs[find]
# get the center value and stdev
if meanormedian == 'median': # stddev = 1.483 x MAD
center_mag = npmedian(fmags)
stddev_mag = (npmedian(npabs(fmags - center_mag))) * 1.483
elif meanormedian == 'mean':
center_mag = npmean(fmags)
stddev_mag = npstddev(fmags)
else:
LOGWARNING("unrecognized meanormedian value given to "
"sigclip_magseries: %s, defaulting to 'median'" %
meanormedian)
meanormedian = 'median'
center_mag = npmedian(fmags)
stddev_mag = (npmedian(npabs(fmags - center_mag))) * 1.483
# sigclip next for a single sigclip value
if sigclip and isinstance(sigclip, (float, int)):
if not iterative and niterations is None:
sigind = (npabs(fmags - center_mag)) < (sigclip * stddev_mag)
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
# First, if niterations is not set, iterate until covergence
if niterations is None:
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (
npmedian(npabs(this_mags - this_center))
) * 1.483
this_size = this_mags.size
# apply the sigclip
tsi = (
(npabs(this_mags - this_center)) <
(sigclip * this_stdev)
)
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update delta and go to the top of the loop
delta = this_size - this_mags.size
else: # If iterating only a certain number of times
this_times = ftimes
this_mags = fmags
this_errs = ferrs
iter_num = 0
delta = 1
while iter_num < niterations and delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
# apply the sigclip
tsi = (
(npabs(this_mags - this_center)) <
(sigclip * this_stdev)
)
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update the number of iterations and delta and
# go to the top of the loop
delta = this_size - this_mags.size
iter_num += 1
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
# this handles sigclipping for asymmetric +ve and -ve clip values
elif sigclip and isinstance(sigclip, (list,tuple)) and len(sigclip) == 2:
# sigclip is passed as [dimmingclip, brighteningclip]
dimmingclip = sigclip[0]
brighteningclip = sigclip[1]
if not iterative and niterations is None:
if magsarefluxes:
nottoodimind = (
(fmags - center_mag) > (-dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - center_mag) < (brighteningclip*stddev_mag)
)
else:
nottoodimind = (
(fmags - center_mag) < (dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - center_mag) > (-brighteningclip*stddev_mag)
)
sigind = nottoodimind & nottoobrightind
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
if niterations is None:
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
if magsarefluxes:
nottoodimind = (
(this_mags - this_center) >
(-dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) <
(brighteningclip*this_stdev)
)
else:
nottoodimind = (
(this_mags - this_center) <
(dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) >
(-brighteningclip*this_stdev)
)
# apply the sigclip
tsi = nottoodimind & nottoobrightind
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update delta and go to top of the loop
delta = this_size - this_mags.size
else: # If iterating only a certain number of times
this_times = ftimes
this_mags = fmags
this_errs = ferrs
iter_num = 0
delta = 1
while iter_num < niterations and delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
if magsarefluxes:
nottoodimind = (
(this_mags - this_center) >
(-dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) <
(brighteningclip*this_stdev)
)
else:
nottoodimind = (
(this_mags - this_center) < (dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) >
(-brighteningclip*this_stdev)
)
# apply the sigclip
tsi = nottoodimind & nottoobrightind
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update the number of iterations and delta
# and go to top of the loop
delta = this_size - this_mags.size
iter_num += 1
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
else:
stimes = ftimes
smags = fmags
serrs = ferrs
if returnerrs:
return stimes, smags, serrs
else:
return stimes, smags, None
|
def sigclip_magseries(times, mags, errs,
sigclip=None,
iterative=False,
niterations=None,
meanormedian='median',
magsarefluxes=False):
'''Sigma-clips a magnitude or flux time-series.
Selects the finite times, magnitudes (or fluxes), and errors from the passed
values, and apply symmetric or asymmetric sigma clipping to them.
Parameters
----------
times,mags,errs : np.array
The magnitude or flux time-series arrays to sigma-clip. This doesn't
assume all values are finite or if they're positive/negative. All of
these arrays will have their non-finite elements removed, and then will
be sigma-clipped based on the arguments to this function.
`errs` is optional. Set it to None if you don't have values for these. A
'faked' `errs` array will be generated if necessary, which can be
ignored in the output as well.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
iterative : bool
If this is set to True, will perform iterative sigma-clipping. If
`niterations` is not set and this is True, sigma-clipping is iterated
until no more points are removed.
niterations : int
The maximum number of iterations to perform for sigma-clipping. If None,
the `iterative` arg takes precedence, and `iterative=True` will
sigma-clip until no more points are removed. If `niterations` is not
None and `iterative` is False, `niterations` takes precedence and
iteration will occur for the specified number of iterations.
meanormedian : {'mean', 'median'}
Use 'mean' for sigma-clipping based on the mean value, or 'median' for
sigma-clipping based on the median value. Default is 'median'.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
Returns
-------
(stimes, smags, serrs) : tuple
The sigma-clipped and nan-stripped time-series.
'''
returnerrs = True
# fake the errors if they don't exist
# this is inconsequential to sigma-clipping
# we don't return these dummy values if the input errs are None
if errs is None:
# assume 0.1% errors if not given
# this should work for mags and fluxes
errs = 0.001*mags
returnerrs = False
# filter the input times, mags, errs; do sigclipping and normalization
find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[find], mags[find], errs[find]
# get the center value and stdev
if meanormedian == 'median': # stddev = 1.483 x MAD
center_mag = npmedian(fmags)
stddev_mag = (npmedian(npabs(fmags - center_mag))) * 1.483
elif meanormedian == 'mean':
center_mag = npmean(fmags)
stddev_mag = npstddev(fmags)
else:
LOGWARNING("unrecognized meanormedian value given to "
"sigclip_magseries: %s, defaulting to 'median'" %
meanormedian)
meanormedian = 'median'
center_mag = npmedian(fmags)
stddev_mag = (npmedian(npabs(fmags - center_mag))) * 1.483
# sigclip next for a single sigclip value
if sigclip and isinstance(sigclip, (float, int)):
if not iterative and niterations is None:
sigind = (npabs(fmags - center_mag)) < (sigclip * stddev_mag)
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
# First, if niterations is not set, iterate until covergence
if niterations is None:
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (
npmedian(npabs(this_mags - this_center))
) * 1.483
this_size = this_mags.size
# apply the sigclip
tsi = (
(npabs(this_mags - this_center)) <
(sigclip * this_stdev)
)
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update delta and go to the top of the loop
delta = this_size - this_mags.size
else: # If iterating only a certain number of times
this_times = ftimes
this_mags = fmags
this_errs = ferrs
iter_num = 0
delta = 1
while iter_num < niterations and delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
# apply the sigclip
tsi = (
(npabs(this_mags - this_center)) <
(sigclip * this_stdev)
)
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update the number of iterations and delta and
# go to the top of the loop
delta = this_size - this_mags.size
iter_num += 1
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
# this handles sigclipping for asymmetric +ve and -ve clip values
elif sigclip and isinstance(sigclip, (list,tuple)) and len(sigclip) == 2:
# sigclip is passed as [dimmingclip, brighteningclip]
dimmingclip = sigclip[0]
brighteningclip = sigclip[1]
if not iterative and niterations is None:
if magsarefluxes:
nottoodimind = (
(fmags - center_mag) > (-dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - center_mag) < (brighteningclip*stddev_mag)
)
else:
nottoodimind = (
(fmags - center_mag) < (dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - center_mag) > (-brighteningclip*stddev_mag)
)
sigind = nottoodimind & nottoobrightind
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
if niterations is None:
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
if magsarefluxes:
nottoodimind = (
(this_mags - this_center) >
(-dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) <
(brighteningclip*this_stdev)
)
else:
nottoodimind = (
(this_mags - this_center) <
(dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) >
(-brighteningclip*this_stdev)
)
# apply the sigclip
tsi = nottoodimind & nottoobrightind
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update delta and go to top of the loop
delta = this_size - this_mags.size
else: # If iterating only a certain number of times
this_times = ftimes
this_mags = fmags
this_errs = ferrs
iter_num = 0
delta = 1
while iter_num < niterations and delta:
if meanormedian == 'mean':
this_center = npmean(this_mags)
this_stdev = npstddev(this_mags)
elif meanormedian == 'median':
this_center = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags -
this_center))) * 1.483
this_size = this_mags.size
if magsarefluxes:
nottoodimind = (
(this_mags - this_center) >
(-dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) <
(brighteningclip*this_stdev)
)
else:
nottoodimind = (
(this_mags - this_center) < (dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_center) >
(-brighteningclip*this_stdev)
)
# apply the sigclip
tsi = nottoodimind & nottoobrightind
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# update the number of iterations and delta
# and go to top of the loop
delta = this_size - this_mags.size
iter_num += 1
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
else:
stimes = ftimes
smags = fmags
serrs = ferrs
if returnerrs:
return stimes, smags, serrs
else:
return stimes, smags, None
|
[
"Sigma",
"-",
"clips",
"a",
"magnitude",
"or",
"flux",
"time",
"-",
"series",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L243-L589
|
[
"def",
"sigclip_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"sigclip",
"=",
"None",
",",
"iterative",
"=",
"False",
",",
"niterations",
"=",
"None",
",",
"meanormedian",
"=",
"'median'",
",",
"magsarefluxes",
"=",
"False",
")",
":",
"returnerrs",
"=",
"True",
"# fake the errors if they don't exist",
"# this is inconsequential to sigma-clipping",
"# we don't return these dummy values if the input errs are None",
"if",
"errs",
"is",
"None",
":",
"# assume 0.1% errors if not given",
"# this should work for mags and fluxes",
"errs",
"=",
"0.001",
"*",
"mags",
"returnerrs",
"=",
"False",
"# filter the input times, mags, errs; do sigclipping and normalization",
"find",
"=",
"npisfinite",
"(",
"times",
")",
"&",
"npisfinite",
"(",
"mags",
")",
"&",
"npisfinite",
"(",
"errs",
")",
"ftimes",
",",
"fmags",
",",
"ferrs",
"=",
"times",
"[",
"find",
"]",
",",
"mags",
"[",
"find",
"]",
",",
"errs",
"[",
"find",
"]",
"# get the center value and stdev",
"if",
"meanormedian",
"==",
"'median'",
":",
"# stddev = 1.483 x MAD",
"center_mag",
"=",
"npmedian",
"(",
"fmags",
")",
"stddev_mag",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"fmags",
"-",
"center_mag",
")",
")",
")",
"*",
"1.483",
"elif",
"meanormedian",
"==",
"'mean'",
":",
"center_mag",
"=",
"npmean",
"(",
"fmags",
")",
"stddev_mag",
"=",
"npstddev",
"(",
"fmags",
")",
"else",
":",
"LOGWARNING",
"(",
"\"unrecognized meanormedian value given to \"",
"\"sigclip_magseries: %s, defaulting to 'median'\"",
"%",
"meanormedian",
")",
"meanormedian",
"=",
"'median'",
"center_mag",
"=",
"npmedian",
"(",
"fmags",
")",
"stddev_mag",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"fmags",
"-",
"center_mag",
")",
")",
")",
"*",
"1.483",
"# sigclip next for a single sigclip value",
"if",
"sigclip",
"and",
"isinstance",
"(",
"sigclip",
",",
"(",
"float",
",",
"int",
")",
")",
":",
"if",
"not",
"iterative",
"and",
"niterations",
"is",
"None",
":",
"sigind",
"=",
"(",
"npabs",
"(",
"fmags",
"-",
"center_mag",
")",
")",
"<",
"(",
"sigclip",
"*",
"stddev_mag",
")",
"stimes",
"=",
"ftimes",
"[",
"sigind",
"]",
"smags",
"=",
"fmags",
"[",
"sigind",
"]",
"serrs",
"=",
"ferrs",
"[",
"sigind",
"]",
"else",
":",
"#",
"# iterative version adapted from scipy.stats.sigmaclip",
"#",
"# First, if niterations is not set, iterate until covergence",
"if",
"niterations",
"is",
"None",
":",
"delta",
"=",
"1",
"this_times",
"=",
"ftimes",
"this_mags",
"=",
"fmags",
"this_errs",
"=",
"ferrs",
"while",
"delta",
":",
"if",
"meanormedian",
"==",
"'mean'",
":",
"this_center",
"=",
"npmean",
"(",
"this_mags",
")",
"this_stdev",
"=",
"npstddev",
"(",
"this_mags",
")",
"elif",
"meanormedian",
"==",
"'median'",
":",
"this_center",
"=",
"npmedian",
"(",
"this_mags",
")",
"this_stdev",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_center",
")",
")",
")",
"*",
"1.483",
"this_size",
"=",
"this_mags",
".",
"size",
"# apply the sigclip",
"tsi",
"=",
"(",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_center",
")",
")",
"<",
"(",
"sigclip",
"*",
"this_stdev",
")",
")",
"# update the arrays",
"this_times",
"=",
"this_times",
"[",
"tsi",
"]",
"this_mags",
"=",
"this_mags",
"[",
"tsi",
"]",
"this_errs",
"=",
"this_errs",
"[",
"tsi",
"]",
"# update delta and go to the top of the loop",
"delta",
"=",
"this_size",
"-",
"this_mags",
".",
"size",
"else",
":",
"# If iterating only a certain number of times",
"this_times",
"=",
"ftimes",
"this_mags",
"=",
"fmags",
"this_errs",
"=",
"ferrs",
"iter_num",
"=",
"0",
"delta",
"=",
"1",
"while",
"iter_num",
"<",
"niterations",
"and",
"delta",
":",
"if",
"meanormedian",
"==",
"'mean'",
":",
"this_center",
"=",
"npmean",
"(",
"this_mags",
")",
"this_stdev",
"=",
"npstddev",
"(",
"this_mags",
")",
"elif",
"meanormedian",
"==",
"'median'",
":",
"this_center",
"=",
"npmedian",
"(",
"this_mags",
")",
"this_stdev",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_center",
")",
")",
")",
"*",
"1.483",
"this_size",
"=",
"this_mags",
".",
"size",
"# apply the sigclip",
"tsi",
"=",
"(",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_center",
")",
")",
"<",
"(",
"sigclip",
"*",
"this_stdev",
")",
")",
"# update the arrays",
"this_times",
"=",
"this_times",
"[",
"tsi",
"]",
"this_mags",
"=",
"this_mags",
"[",
"tsi",
"]",
"this_errs",
"=",
"this_errs",
"[",
"tsi",
"]",
"# update the number of iterations and delta and",
"# go to the top of the loop",
"delta",
"=",
"this_size",
"-",
"this_mags",
".",
"size",
"iter_num",
"+=",
"1",
"# final sigclipped versions",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"this_times",
",",
"this_mags",
",",
"this_errs",
"# this handles sigclipping for asymmetric +ve and -ve clip values",
"elif",
"sigclip",
"and",
"isinstance",
"(",
"sigclip",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"sigclip",
")",
"==",
"2",
":",
"# sigclip is passed as [dimmingclip, brighteningclip]",
"dimmingclip",
"=",
"sigclip",
"[",
"0",
"]",
"brighteningclip",
"=",
"sigclip",
"[",
"1",
"]",
"if",
"not",
"iterative",
"and",
"niterations",
"is",
"None",
":",
"if",
"magsarefluxes",
":",
"nottoodimind",
"=",
"(",
"(",
"fmags",
"-",
"center_mag",
")",
">",
"(",
"-",
"dimmingclip",
"*",
"stddev_mag",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"fmags",
"-",
"center_mag",
")",
"<",
"(",
"brighteningclip",
"*",
"stddev_mag",
")",
")",
"else",
":",
"nottoodimind",
"=",
"(",
"(",
"fmags",
"-",
"center_mag",
")",
"<",
"(",
"dimmingclip",
"*",
"stddev_mag",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"fmags",
"-",
"center_mag",
")",
">",
"(",
"-",
"brighteningclip",
"*",
"stddev_mag",
")",
")",
"sigind",
"=",
"nottoodimind",
"&",
"nottoobrightind",
"stimes",
"=",
"ftimes",
"[",
"sigind",
"]",
"smags",
"=",
"fmags",
"[",
"sigind",
"]",
"serrs",
"=",
"ferrs",
"[",
"sigind",
"]",
"else",
":",
"#",
"# iterative version adapted from scipy.stats.sigmaclip",
"#",
"if",
"niterations",
"is",
"None",
":",
"delta",
"=",
"1",
"this_times",
"=",
"ftimes",
"this_mags",
"=",
"fmags",
"this_errs",
"=",
"ferrs",
"while",
"delta",
":",
"if",
"meanormedian",
"==",
"'mean'",
":",
"this_center",
"=",
"npmean",
"(",
"this_mags",
")",
"this_stdev",
"=",
"npstddev",
"(",
"this_mags",
")",
"elif",
"meanormedian",
"==",
"'median'",
":",
"this_center",
"=",
"npmedian",
"(",
"this_mags",
")",
"this_stdev",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_center",
")",
")",
")",
"*",
"1.483",
"this_size",
"=",
"this_mags",
".",
"size",
"if",
"magsarefluxes",
":",
"nottoodimind",
"=",
"(",
"(",
"this_mags",
"-",
"this_center",
")",
">",
"(",
"-",
"dimmingclip",
"*",
"this_stdev",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"this_mags",
"-",
"this_center",
")",
"<",
"(",
"brighteningclip",
"*",
"this_stdev",
")",
")",
"else",
":",
"nottoodimind",
"=",
"(",
"(",
"this_mags",
"-",
"this_center",
")",
"<",
"(",
"dimmingclip",
"*",
"this_stdev",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"this_mags",
"-",
"this_center",
")",
">",
"(",
"-",
"brighteningclip",
"*",
"this_stdev",
")",
")",
"# apply the sigclip",
"tsi",
"=",
"nottoodimind",
"&",
"nottoobrightind",
"# update the arrays",
"this_times",
"=",
"this_times",
"[",
"tsi",
"]",
"this_mags",
"=",
"this_mags",
"[",
"tsi",
"]",
"this_errs",
"=",
"this_errs",
"[",
"tsi",
"]",
"# update delta and go to top of the loop",
"delta",
"=",
"this_size",
"-",
"this_mags",
".",
"size",
"else",
":",
"# If iterating only a certain number of times",
"this_times",
"=",
"ftimes",
"this_mags",
"=",
"fmags",
"this_errs",
"=",
"ferrs",
"iter_num",
"=",
"0",
"delta",
"=",
"1",
"while",
"iter_num",
"<",
"niterations",
"and",
"delta",
":",
"if",
"meanormedian",
"==",
"'mean'",
":",
"this_center",
"=",
"npmean",
"(",
"this_mags",
")",
"this_stdev",
"=",
"npstddev",
"(",
"this_mags",
")",
"elif",
"meanormedian",
"==",
"'median'",
":",
"this_center",
"=",
"npmedian",
"(",
"this_mags",
")",
"this_stdev",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_center",
")",
")",
")",
"*",
"1.483",
"this_size",
"=",
"this_mags",
".",
"size",
"if",
"magsarefluxes",
":",
"nottoodimind",
"=",
"(",
"(",
"this_mags",
"-",
"this_center",
")",
">",
"(",
"-",
"dimmingclip",
"*",
"this_stdev",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"this_mags",
"-",
"this_center",
")",
"<",
"(",
"brighteningclip",
"*",
"this_stdev",
")",
")",
"else",
":",
"nottoodimind",
"=",
"(",
"(",
"this_mags",
"-",
"this_center",
")",
"<",
"(",
"dimmingclip",
"*",
"this_stdev",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"this_mags",
"-",
"this_center",
")",
">",
"(",
"-",
"brighteningclip",
"*",
"this_stdev",
")",
")",
"# apply the sigclip",
"tsi",
"=",
"nottoodimind",
"&",
"nottoobrightind",
"# update the arrays",
"this_times",
"=",
"this_times",
"[",
"tsi",
"]",
"this_mags",
"=",
"this_mags",
"[",
"tsi",
"]",
"this_errs",
"=",
"this_errs",
"[",
"tsi",
"]",
"# update the number of iterations and delta",
"# and go to top of the loop",
"delta",
"=",
"this_size",
"-",
"this_mags",
".",
"size",
"iter_num",
"+=",
"1",
"# final sigclipped versions",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"this_times",
",",
"this_mags",
",",
"this_errs",
"else",
":",
"stimes",
"=",
"ftimes",
"smags",
"=",
"fmags",
"serrs",
"=",
"ferrs",
"if",
"returnerrs",
":",
"return",
"stimes",
",",
"smags",
",",
"serrs",
"else",
":",
"return",
"stimes",
",",
"smags",
",",
"None"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
sigclip_magseries_with_extparams
|
Sigma-clips a magnitude or flux time-series and associated measurement
arrays.
Selects the finite times, magnitudes (or fluxes), and errors from the passed
values, and apply symmetric or asymmetric sigma clipping to them. Uses the
same array indices as these values to filter out the values of all arrays in
the `extparams` list. This can be useful for simultaneously sigma-clipping a
magnitude/flux time-series along with their associated values of external
parameters, such as telescope hour angle, zenith distance, temperature, moon
phase, etc.
Parameters
----------
times,mags,errs : np.array
The magnitude or flux time-series arrays to sigma-clip. This doesn't
assume all values are finite or if they're positive/negative. All of
these arrays will have their non-finite elements removed, and then will
be sigma-clipped based on the arguments to this function.
`errs` is optional. Set it to None if you don't have values for these. A
'faked' `errs` array will be generated if necessary, which can be
ignored in the output as well.
extparams : list of np.array
This is a list of all external parameter arrays to simultaneously filter
along with the magnitude/flux time-series. All of these arrays should
have the same length as the `times`, `mags`, and `errs` arrays.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
iterative : bool
If this is set to True, will perform iterative sigma-clipping. If
`niterations` is not set and this is True, sigma-clipping is iterated
until no more points are removed.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
Returns
-------
(stimes, smags, serrs) : tuple
The sigma-clipped and nan-stripped time-series in `stimes`, `smags`,
`serrs` and the associated values of the `extparams` in `sextparams`.
|
astrobase/lcmath.py
|
def sigclip_magseries_with_extparams(times, mags, errs, extparams,
sigclip=None,
iterative=False,
magsarefluxes=False):
'''Sigma-clips a magnitude or flux time-series and associated measurement
arrays.
Selects the finite times, magnitudes (or fluxes), and errors from the passed
values, and apply symmetric or asymmetric sigma clipping to them. Uses the
same array indices as these values to filter out the values of all arrays in
the `extparams` list. This can be useful for simultaneously sigma-clipping a
magnitude/flux time-series along with their associated values of external
parameters, such as telescope hour angle, zenith distance, temperature, moon
phase, etc.
Parameters
----------
times,mags,errs : np.array
The magnitude or flux time-series arrays to sigma-clip. This doesn't
assume all values are finite or if they're positive/negative. All of
these arrays will have their non-finite elements removed, and then will
be sigma-clipped based on the arguments to this function.
`errs` is optional. Set it to None if you don't have values for these. A
'faked' `errs` array will be generated if necessary, which can be
ignored in the output as well.
extparams : list of np.array
This is a list of all external parameter arrays to simultaneously filter
along with the magnitude/flux time-series. All of these arrays should
have the same length as the `times`, `mags`, and `errs` arrays.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
iterative : bool
If this is set to True, will perform iterative sigma-clipping. If
`niterations` is not set and this is True, sigma-clipping is iterated
until no more points are removed.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
Returns
-------
(stimes, smags, serrs) : tuple
The sigma-clipped and nan-stripped time-series in `stimes`, `smags`,
`serrs` and the associated values of the `extparams` in `sextparams`.
'''
returnerrs = True
# fake the errors if they don't exist
# this is inconsequential to sigma-clipping
# we don't return these dummy values if the input errs are None
if errs is None:
# assume 0.1% errors if not given
# this should work for mags and fluxes
errs = 0.001*mags
returnerrs = False
# filter the input times, mags, errs; do sigclipping and normalization
find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[find], mags[find], errs[find]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[find]
# get the median and stdev = 1.483 x MAD
median_mag = npmedian(fmags)
stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483
# sigclip next for a single sigclip value
if sigclip and isinstance(sigclip, (float, int)):
if not iterative:
sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
this_median = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags - this_median))) * 1.483
this_size = this_mags.size
# apply the sigclip
tsi = (npabs(this_mags - this_median)) < (sigclip * this_stdev)
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[tsi]
# update delta and go to the top of the loop
delta = this_size - this_mags.size
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
# this handles sigclipping for asymmetric +ve and -ve clip values
elif sigclip and isinstance(sigclip, (list, tuple)) and len(sigclip) == 2:
# sigclip is passed as [dimmingclip, brighteningclip]
dimmingclip = sigclip[0]
brighteningclip = sigclip[1]
if not iterative:
if magsarefluxes:
nottoodimind = (
(fmags - median_mag) > (-dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - median_mag) < (brighteningclip*stddev_mag)
)
else:
nottoodimind = (
(fmags - median_mag) < (dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - median_mag) > (-brighteningclip*stddev_mag)
)
sigind = nottoodimind & nottoobrightind
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
this_median = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags - this_median))) * 1.483
this_size = this_mags.size
if magsarefluxes:
nottoodimind = (
(this_mags - this_median) > (-dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_median) < (brighteningclip*this_stdev)
)
else:
nottoodimind = (
(this_mags - this_median) < (dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_median) >
(-brighteningclip*this_stdev)
)
# apply the sigclip
tsi = nottoodimind & nottoobrightind
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[tsi]
# update delta and go to top of the loop
delta = this_size - this_mags.size
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
else:
stimes = ftimes
smags = fmags
serrs = ferrs
if returnerrs:
return stimes, smags, serrs, extparams
else:
return stimes, smags, None, extparams
|
def sigclip_magseries_with_extparams(times, mags, errs, extparams,
sigclip=None,
iterative=False,
magsarefluxes=False):
'''Sigma-clips a magnitude or flux time-series and associated measurement
arrays.
Selects the finite times, magnitudes (or fluxes), and errors from the passed
values, and apply symmetric or asymmetric sigma clipping to them. Uses the
same array indices as these values to filter out the values of all arrays in
the `extparams` list. This can be useful for simultaneously sigma-clipping a
magnitude/flux time-series along with their associated values of external
parameters, such as telescope hour angle, zenith distance, temperature, moon
phase, etc.
Parameters
----------
times,mags,errs : np.array
The magnitude or flux time-series arrays to sigma-clip. This doesn't
assume all values are finite or if they're positive/negative. All of
these arrays will have their non-finite elements removed, and then will
be sigma-clipped based on the arguments to this function.
`errs` is optional. Set it to None if you don't have values for these. A
'faked' `errs` array will be generated if necessary, which can be
ignored in the output as well.
extparams : list of np.array
This is a list of all external parameter arrays to simultaneously filter
along with the magnitude/flux time-series. All of these arrays should
have the same length as the `times`, `mags`, and `errs` arrays.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
iterative : bool
If this is set to True, will perform iterative sigma-clipping. If
`niterations` is not set and this is True, sigma-clipping is iterated
until no more points are removed.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
Returns
-------
(stimes, smags, serrs) : tuple
The sigma-clipped and nan-stripped time-series in `stimes`, `smags`,
`serrs` and the associated values of the `extparams` in `sextparams`.
'''
returnerrs = True
# fake the errors if they don't exist
# this is inconsequential to sigma-clipping
# we don't return these dummy values if the input errs are None
if errs is None:
# assume 0.1% errors if not given
# this should work for mags and fluxes
errs = 0.001*mags
returnerrs = False
# filter the input times, mags, errs; do sigclipping and normalization
find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[find], mags[find], errs[find]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[find]
# get the median and stdev = 1.483 x MAD
median_mag = npmedian(fmags)
stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483
# sigclip next for a single sigclip value
if sigclip and isinstance(sigclip, (float, int)):
if not iterative:
sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
this_median = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags - this_median))) * 1.483
this_size = this_mags.size
# apply the sigclip
tsi = (npabs(this_mags - this_median)) < (sigclip * this_stdev)
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[tsi]
# update delta and go to the top of the loop
delta = this_size - this_mags.size
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
# this handles sigclipping for asymmetric +ve and -ve clip values
elif sigclip and isinstance(sigclip, (list, tuple)) and len(sigclip) == 2:
# sigclip is passed as [dimmingclip, brighteningclip]
dimmingclip = sigclip[0]
brighteningclip = sigclip[1]
if not iterative:
if magsarefluxes:
nottoodimind = (
(fmags - median_mag) > (-dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - median_mag) < (brighteningclip*stddev_mag)
)
else:
nottoodimind = (
(fmags - median_mag) < (dimmingclip*stddev_mag)
)
nottoobrightind = (
(fmags - median_mag) > (-brighteningclip*stddev_mag)
)
sigind = nottoodimind & nottoobrightind
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[sigind]
else:
#
# iterative version adapted from scipy.stats.sigmaclip
#
delta = 1
this_times = ftimes
this_mags = fmags
this_errs = ferrs
while delta:
this_median = npmedian(this_mags)
this_stdev = (npmedian(npabs(this_mags - this_median))) * 1.483
this_size = this_mags.size
if magsarefluxes:
nottoodimind = (
(this_mags - this_median) > (-dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_median) < (brighteningclip*this_stdev)
)
else:
nottoodimind = (
(this_mags - this_median) < (dimmingclip*this_stdev)
)
nottoobrightind = (
(this_mags - this_median) >
(-brighteningclip*this_stdev)
)
# apply the sigclip
tsi = nottoodimind & nottoobrightind
# update the arrays
this_times = this_times[tsi]
this_mags = this_mags[tsi]
this_errs = this_errs[tsi]
# apply the same indices to the external parameters
for epi, eparr in enumerate(extparams):
extparams[epi] = eparr[tsi]
# update delta and go to top of the loop
delta = this_size - this_mags.size
# final sigclipped versions
stimes, smags, serrs = this_times, this_mags, this_errs
else:
stimes = ftimes
smags = fmags
serrs = ferrs
if returnerrs:
return stimes, smags, serrs, extparams
else:
return stimes, smags, None, extparams
|
[
"Sigma",
"-",
"clips",
"a",
"magnitude",
"or",
"flux",
"time",
"-",
"series",
"and",
"associated",
"measurement",
"arrays",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L593-L830
|
[
"def",
"sigclip_magseries_with_extparams",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"extparams",
",",
"sigclip",
"=",
"None",
",",
"iterative",
"=",
"False",
",",
"magsarefluxes",
"=",
"False",
")",
":",
"returnerrs",
"=",
"True",
"# fake the errors if they don't exist",
"# this is inconsequential to sigma-clipping",
"# we don't return these dummy values if the input errs are None",
"if",
"errs",
"is",
"None",
":",
"# assume 0.1% errors if not given",
"# this should work for mags and fluxes",
"errs",
"=",
"0.001",
"*",
"mags",
"returnerrs",
"=",
"False",
"# filter the input times, mags, errs; do sigclipping and normalization",
"find",
"=",
"npisfinite",
"(",
"times",
")",
"&",
"npisfinite",
"(",
"mags",
")",
"&",
"npisfinite",
"(",
"errs",
")",
"ftimes",
",",
"fmags",
",",
"ferrs",
"=",
"times",
"[",
"find",
"]",
",",
"mags",
"[",
"find",
"]",
",",
"errs",
"[",
"find",
"]",
"# apply the same indices to the external parameters",
"for",
"epi",
",",
"eparr",
"in",
"enumerate",
"(",
"extparams",
")",
":",
"extparams",
"[",
"epi",
"]",
"=",
"eparr",
"[",
"find",
"]",
"# get the median and stdev = 1.483 x MAD",
"median_mag",
"=",
"npmedian",
"(",
"fmags",
")",
"stddev_mag",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"fmags",
"-",
"median_mag",
")",
")",
")",
"*",
"1.483",
"# sigclip next for a single sigclip value",
"if",
"sigclip",
"and",
"isinstance",
"(",
"sigclip",
",",
"(",
"float",
",",
"int",
")",
")",
":",
"if",
"not",
"iterative",
":",
"sigind",
"=",
"(",
"npabs",
"(",
"fmags",
"-",
"median_mag",
")",
")",
"<",
"(",
"sigclip",
"*",
"stddev_mag",
")",
"stimes",
"=",
"ftimes",
"[",
"sigind",
"]",
"smags",
"=",
"fmags",
"[",
"sigind",
"]",
"serrs",
"=",
"ferrs",
"[",
"sigind",
"]",
"# apply the same indices to the external parameters",
"for",
"epi",
",",
"eparr",
"in",
"enumerate",
"(",
"extparams",
")",
":",
"extparams",
"[",
"epi",
"]",
"=",
"eparr",
"[",
"sigind",
"]",
"else",
":",
"#",
"# iterative version adapted from scipy.stats.sigmaclip",
"#",
"delta",
"=",
"1",
"this_times",
"=",
"ftimes",
"this_mags",
"=",
"fmags",
"this_errs",
"=",
"ferrs",
"while",
"delta",
":",
"this_median",
"=",
"npmedian",
"(",
"this_mags",
")",
"this_stdev",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_median",
")",
")",
")",
"*",
"1.483",
"this_size",
"=",
"this_mags",
".",
"size",
"# apply the sigclip",
"tsi",
"=",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_median",
")",
")",
"<",
"(",
"sigclip",
"*",
"this_stdev",
")",
"# update the arrays",
"this_times",
"=",
"this_times",
"[",
"tsi",
"]",
"this_mags",
"=",
"this_mags",
"[",
"tsi",
"]",
"this_errs",
"=",
"this_errs",
"[",
"tsi",
"]",
"# apply the same indices to the external parameters",
"for",
"epi",
",",
"eparr",
"in",
"enumerate",
"(",
"extparams",
")",
":",
"extparams",
"[",
"epi",
"]",
"=",
"eparr",
"[",
"tsi",
"]",
"# update delta and go to the top of the loop",
"delta",
"=",
"this_size",
"-",
"this_mags",
".",
"size",
"# final sigclipped versions",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"this_times",
",",
"this_mags",
",",
"this_errs",
"# this handles sigclipping for asymmetric +ve and -ve clip values",
"elif",
"sigclip",
"and",
"isinstance",
"(",
"sigclip",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"sigclip",
")",
"==",
"2",
":",
"# sigclip is passed as [dimmingclip, brighteningclip]",
"dimmingclip",
"=",
"sigclip",
"[",
"0",
"]",
"brighteningclip",
"=",
"sigclip",
"[",
"1",
"]",
"if",
"not",
"iterative",
":",
"if",
"magsarefluxes",
":",
"nottoodimind",
"=",
"(",
"(",
"fmags",
"-",
"median_mag",
")",
">",
"(",
"-",
"dimmingclip",
"*",
"stddev_mag",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"fmags",
"-",
"median_mag",
")",
"<",
"(",
"brighteningclip",
"*",
"stddev_mag",
")",
")",
"else",
":",
"nottoodimind",
"=",
"(",
"(",
"fmags",
"-",
"median_mag",
")",
"<",
"(",
"dimmingclip",
"*",
"stddev_mag",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"fmags",
"-",
"median_mag",
")",
">",
"(",
"-",
"brighteningclip",
"*",
"stddev_mag",
")",
")",
"sigind",
"=",
"nottoodimind",
"&",
"nottoobrightind",
"stimes",
"=",
"ftimes",
"[",
"sigind",
"]",
"smags",
"=",
"fmags",
"[",
"sigind",
"]",
"serrs",
"=",
"ferrs",
"[",
"sigind",
"]",
"# apply the same indices to the external parameters",
"for",
"epi",
",",
"eparr",
"in",
"enumerate",
"(",
"extparams",
")",
":",
"extparams",
"[",
"epi",
"]",
"=",
"eparr",
"[",
"sigind",
"]",
"else",
":",
"#",
"# iterative version adapted from scipy.stats.sigmaclip",
"#",
"delta",
"=",
"1",
"this_times",
"=",
"ftimes",
"this_mags",
"=",
"fmags",
"this_errs",
"=",
"ferrs",
"while",
"delta",
":",
"this_median",
"=",
"npmedian",
"(",
"this_mags",
")",
"this_stdev",
"=",
"(",
"npmedian",
"(",
"npabs",
"(",
"this_mags",
"-",
"this_median",
")",
")",
")",
"*",
"1.483",
"this_size",
"=",
"this_mags",
".",
"size",
"if",
"magsarefluxes",
":",
"nottoodimind",
"=",
"(",
"(",
"this_mags",
"-",
"this_median",
")",
">",
"(",
"-",
"dimmingclip",
"*",
"this_stdev",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"this_mags",
"-",
"this_median",
")",
"<",
"(",
"brighteningclip",
"*",
"this_stdev",
")",
")",
"else",
":",
"nottoodimind",
"=",
"(",
"(",
"this_mags",
"-",
"this_median",
")",
"<",
"(",
"dimmingclip",
"*",
"this_stdev",
")",
")",
"nottoobrightind",
"=",
"(",
"(",
"this_mags",
"-",
"this_median",
")",
">",
"(",
"-",
"brighteningclip",
"*",
"this_stdev",
")",
")",
"# apply the sigclip",
"tsi",
"=",
"nottoodimind",
"&",
"nottoobrightind",
"# update the arrays",
"this_times",
"=",
"this_times",
"[",
"tsi",
"]",
"this_mags",
"=",
"this_mags",
"[",
"tsi",
"]",
"this_errs",
"=",
"this_errs",
"[",
"tsi",
"]",
"# apply the same indices to the external parameters",
"for",
"epi",
",",
"eparr",
"in",
"enumerate",
"(",
"extparams",
")",
":",
"extparams",
"[",
"epi",
"]",
"=",
"eparr",
"[",
"tsi",
"]",
"# update delta and go to top of the loop",
"delta",
"=",
"this_size",
"-",
"this_mags",
".",
"size",
"# final sigclipped versions",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"this_times",
",",
"this_mags",
",",
"this_errs",
"else",
":",
"stimes",
"=",
"ftimes",
"smags",
"=",
"fmags",
"serrs",
"=",
"ferrs",
"if",
"returnerrs",
":",
"return",
"stimes",
",",
"smags",
",",
"serrs",
",",
"extparams",
"else",
":",
"return",
"stimes",
",",
"smags",
",",
"None",
",",
"extparams"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
phase_magseries
|
Phases a magnitude/flux time-series using a given period and epoch.
The equation used is::
phase = (times - epoch)/period - floor((times - epoch)/period)
This phases the given magnitude timeseries using the given period and
epoch. If wrap is True, wraps the result around 0.0 (and returns an array
that has twice the number of the original elements). If sort is True,
returns the magnitude timeseries in phase sorted order.
Parameters
----------
times,mags : np.array
The magnitude/flux time-series values to phase using the provided
`period` and `epoch`. Non-fiinite values will be removed.
period : float
The period to use to phase the time-series.
epoch : float
The epoch to phase the time-series. This is usually the time-of-minimum
or time-of-maximum of some periodic light curve
phenomenon. Alternatively, one can use the minimum time value in
`times`.
wrap : bool
If this is True, the returned phased time-series will be wrapped around
phase 0.0, which is useful for plotting purposes. The arrays returned
will have twice the number of input elements because of this wrapping.
sort : bool
If this is True, the returned phased time-series will be sorted in
increasing phase order.
Returns
-------
dict
A dict of the following form is returned::
{'phase': the phase values,
'mags': the mags/flux values at each phase,
'period': the input `period` used to phase the time-series,
'epoch': the input `epoch` used to phase the time-series}
|
astrobase/lcmath.py
|
def phase_magseries(times, mags, period, epoch, wrap=True, sort=True):
'''Phases a magnitude/flux time-series using a given period and epoch.
The equation used is::
phase = (times - epoch)/period - floor((times - epoch)/period)
This phases the given magnitude timeseries using the given period and
epoch. If wrap is True, wraps the result around 0.0 (and returns an array
that has twice the number of the original elements). If sort is True,
returns the magnitude timeseries in phase sorted order.
Parameters
----------
times,mags : np.array
The magnitude/flux time-series values to phase using the provided
`period` and `epoch`. Non-fiinite values will be removed.
period : float
The period to use to phase the time-series.
epoch : float
The epoch to phase the time-series. This is usually the time-of-minimum
or time-of-maximum of some periodic light curve
phenomenon. Alternatively, one can use the minimum time value in
`times`.
wrap : bool
If this is True, the returned phased time-series will be wrapped around
phase 0.0, which is useful for plotting purposes. The arrays returned
will have twice the number of input elements because of this wrapping.
sort : bool
If this is True, the returned phased time-series will be sorted in
increasing phase order.
Returns
-------
dict
A dict of the following form is returned::
{'phase': the phase values,
'mags': the mags/flux values at each phase,
'period': the input `period` used to phase the time-series,
'epoch': the input `epoch` used to phase the time-series}
'''
# find all the finite values of the magnitudes and times
finiteind = np.isfinite(mags) & np.isfinite(times)
finite_times = times[finiteind]
finite_mags = mags[finiteind]
magseries_phase = (
(finite_times - epoch)/period -
np.floor(((finite_times - epoch)/period))
)
outdict = {'phase':magseries_phase,
'mags':finite_mags,
'period':period,
'epoch':epoch}
if sort:
sortorder = np.argsort(outdict['phase'])
outdict['phase'] = outdict['phase'][sortorder]
outdict['mags'] = outdict['mags'][sortorder]
if wrap:
outdict['phase'] = np.concatenate((outdict['phase']-1.0,
outdict['phase']))
outdict['mags'] = np.concatenate((outdict['mags'],
outdict['mags']))
return outdict
|
def phase_magseries(times, mags, period, epoch, wrap=True, sort=True):
'''Phases a magnitude/flux time-series using a given period and epoch.
The equation used is::
phase = (times - epoch)/period - floor((times - epoch)/period)
This phases the given magnitude timeseries using the given period and
epoch. If wrap is True, wraps the result around 0.0 (and returns an array
that has twice the number of the original elements). If sort is True,
returns the magnitude timeseries in phase sorted order.
Parameters
----------
times,mags : np.array
The magnitude/flux time-series values to phase using the provided
`period` and `epoch`. Non-fiinite values will be removed.
period : float
The period to use to phase the time-series.
epoch : float
The epoch to phase the time-series. This is usually the time-of-minimum
or time-of-maximum of some periodic light curve
phenomenon. Alternatively, one can use the minimum time value in
`times`.
wrap : bool
If this is True, the returned phased time-series will be wrapped around
phase 0.0, which is useful for plotting purposes. The arrays returned
will have twice the number of input elements because of this wrapping.
sort : bool
If this is True, the returned phased time-series will be sorted in
increasing phase order.
Returns
-------
dict
A dict of the following form is returned::
{'phase': the phase values,
'mags': the mags/flux values at each phase,
'period': the input `period` used to phase the time-series,
'epoch': the input `epoch` used to phase the time-series}
'''
# find all the finite values of the magnitudes and times
finiteind = np.isfinite(mags) & np.isfinite(times)
finite_times = times[finiteind]
finite_mags = mags[finiteind]
magseries_phase = (
(finite_times - epoch)/period -
np.floor(((finite_times - epoch)/period))
)
outdict = {'phase':magseries_phase,
'mags':finite_mags,
'period':period,
'epoch':epoch}
if sort:
sortorder = np.argsort(outdict['phase'])
outdict['phase'] = outdict['phase'][sortorder]
outdict['mags'] = outdict['mags'][sortorder]
if wrap:
outdict['phase'] = np.concatenate((outdict['phase']-1.0,
outdict['phase']))
outdict['mags'] = np.concatenate((outdict['mags'],
outdict['mags']))
return outdict
|
[
"Phases",
"a",
"magnitude",
"/",
"flux",
"time",
"-",
"series",
"using",
"a",
"given",
"period",
"and",
"epoch",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L838-L915
|
[
"def",
"phase_magseries",
"(",
"times",
",",
"mags",
",",
"period",
",",
"epoch",
",",
"wrap",
"=",
"True",
",",
"sort",
"=",
"True",
")",
":",
"# find all the finite values of the magnitudes and times",
"finiteind",
"=",
"np",
".",
"isfinite",
"(",
"mags",
")",
"&",
"np",
".",
"isfinite",
"(",
"times",
")",
"finite_times",
"=",
"times",
"[",
"finiteind",
"]",
"finite_mags",
"=",
"mags",
"[",
"finiteind",
"]",
"magseries_phase",
"=",
"(",
"(",
"finite_times",
"-",
"epoch",
")",
"/",
"period",
"-",
"np",
".",
"floor",
"(",
"(",
"(",
"finite_times",
"-",
"epoch",
")",
"/",
"period",
")",
")",
")",
"outdict",
"=",
"{",
"'phase'",
":",
"magseries_phase",
",",
"'mags'",
":",
"finite_mags",
",",
"'period'",
":",
"period",
",",
"'epoch'",
":",
"epoch",
"}",
"if",
"sort",
":",
"sortorder",
"=",
"np",
".",
"argsort",
"(",
"outdict",
"[",
"'phase'",
"]",
")",
"outdict",
"[",
"'phase'",
"]",
"=",
"outdict",
"[",
"'phase'",
"]",
"[",
"sortorder",
"]",
"outdict",
"[",
"'mags'",
"]",
"=",
"outdict",
"[",
"'mags'",
"]",
"[",
"sortorder",
"]",
"if",
"wrap",
":",
"outdict",
"[",
"'phase'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"outdict",
"[",
"'phase'",
"]",
"-",
"1.0",
",",
"outdict",
"[",
"'phase'",
"]",
")",
")",
"outdict",
"[",
"'mags'",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"outdict",
"[",
"'mags'",
"]",
",",
"outdict",
"[",
"'mags'",
"]",
")",
")",
"return",
"outdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
time_bin_magseries
|
Bins the given mag/flux time-series in time using the bin size given.
Parameters
----------
times,mags : np.array
The magnitude/flux time-series to bin in time. Non-finite elements will
be removed from these arrays. At least 10 elements in each array are
required for this function to operate.
binsize : float
The bin size to use to group together measurements closer than this
amount in time. This is in seconds.
minbinelems : int
The minimum number of elements required per bin to include it in the
output.
Returns
-------
dict
A dict of the following form is returned::
{'jdbin_indices': a list of the index arrays into the nan-filtered
input arrays per each bin,
'jdbins': list of bin boundaries for each bin,
'nbins': the number of bins generated,
'binnedtimes': the time values associated with each time bin;
this is the median of the times in each bin,
'binnedmags': the mag/flux values associated with each time bin;
this is the median of the mags/fluxes in each bin}
|
astrobase/lcmath.py
|
def time_bin_magseries(times, mags,
binsize=540.0,
minbinelems=7):
'''Bins the given mag/flux time-series in time using the bin size given.
Parameters
----------
times,mags : np.array
The magnitude/flux time-series to bin in time. Non-finite elements will
be removed from these arrays. At least 10 elements in each array are
required for this function to operate.
binsize : float
The bin size to use to group together measurements closer than this
amount in time. This is in seconds.
minbinelems : int
The minimum number of elements required per bin to include it in the
output.
Returns
-------
dict
A dict of the following form is returned::
{'jdbin_indices': a list of the index arrays into the nan-filtered
input arrays per each bin,
'jdbins': list of bin boundaries for each bin,
'nbins': the number of bins generated,
'binnedtimes': the time values associated with each time bin;
this is the median of the times in each bin,
'binnedmags': the mag/flux values associated with each time bin;
this is the median of the mags/fluxes in each bin}
'''
# check if the input arrays are ok
if not(times.shape and mags.shape and len(times) > 9 and len(mags) > 9):
LOGERROR("input time/mag arrays don't have enough elements")
return
# find all the finite values of the magnitudes and times
finiteind = np.isfinite(mags) & np.isfinite(times)
finite_times = times[finiteind]
finite_mags = mags[finiteind]
# convert binsize in seconds to JD units
binsizejd = binsize/(86400.0)
nbins = int(np.ceil((np.nanmax(finite_times) -
np.nanmin(finite_times))/binsizejd) + 1)
minjd = np.nanmin(finite_times)
jdbins = [(minjd + x*binsizejd) for x in range(nbins)]
# make a KD-tree on the JDs so we can do fast distance calculations. we
# need to add a bogus y coord to make this a problem that KD-trees can
# solve.
time_coords = np.array([[x,1.0] for x in finite_times])
jdtree = cKDTree(time_coords)
binned_finite_timeseries_indices = []
collected_binned_mags = {}
for jd in jdbins:
# find all bin indices close to within binsizejd of this point
# using the cKDTree query. we use the p-norm = 1 (I think this
# means straight-up pairwise distance? FIXME: check this)
bin_indices = jdtree.query_ball_point(np.array([jd,1.0]),
binsizejd/2.0, p=1.0)
# if the bin_indices have already been collected, then we're
# done with this bin, move to the next one. if they haven't,
# then this is the start of a new bin.
if (bin_indices not in binned_finite_timeseries_indices and
len(bin_indices) >= minbinelems):
binned_finite_timeseries_indices.append(bin_indices)
# convert to ndarrays
binned_finite_timeseries_indices = [np.array(x) for x in
binned_finite_timeseries_indices]
collected_binned_mags['jdbins_indices'] = binned_finite_timeseries_indices
collected_binned_mags['jdbins'] = jdbins
collected_binned_mags['nbins'] = len(binned_finite_timeseries_indices)
# collect the finite_times
binned_jd = np.array([np.median(finite_times[x])
for x in binned_finite_timeseries_indices])
collected_binned_mags['binnedtimes'] = binned_jd
collected_binned_mags['binsize'] = binsize
# median bin the magnitudes according to the calculated indices
collected_binned_mags['binnedmags'] = (
np.array([np.median(finite_mags[x])
for x in binned_finite_timeseries_indices])
)
return collected_binned_mags
|
def time_bin_magseries(times, mags,
binsize=540.0,
minbinelems=7):
'''Bins the given mag/flux time-series in time using the bin size given.
Parameters
----------
times,mags : np.array
The magnitude/flux time-series to bin in time. Non-finite elements will
be removed from these arrays. At least 10 elements in each array are
required for this function to operate.
binsize : float
The bin size to use to group together measurements closer than this
amount in time. This is in seconds.
minbinelems : int
The minimum number of elements required per bin to include it in the
output.
Returns
-------
dict
A dict of the following form is returned::
{'jdbin_indices': a list of the index arrays into the nan-filtered
input arrays per each bin,
'jdbins': list of bin boundaries for each bin,
'nbins': the number of bins generated,
'binnedtimes': the time values associated with each time bin;
this is the median of the times in each bin,
'binnedmags': the mag/flux values associated with each time bin;
this is the median of the mags/fluxes in each bin}
'''
# check if the input arrays are ok
if not(times.shape and mags.shape and len(times) > 9 and len(mags) > 9):
LOGERROR("input time/mag arrays don't have enough elements")
return
# find all the finite values of the magnitudes and times
finiteind = np.isfinite(mags) & np.isfinite(times)
finite_times = times[finiteind]
finite_mags = mags[finiteind]
# convert binsize in seconds to JD units
binsizejd = binsize/(86400.0)
nbins = int(np.ceil((np.nanmax(finite_times) -
np.nanmin(finite_times))/binsizejd) + 1)
minjd = np.nanmin(finite_times)
jdbins = [(minjd + x*binsizejd) for x in range(nbins)]
# make a KD-tree on the JDs so we can do fast distance calculations. we
# need to add a bogus y coord to make this a problem that KD-trees can
# solve.
time_coords = np.array([[x,1.0] for x in finite_times])
jdtree = cKDTree(time_coords)
binned_finite_timeseries_indices = []
collected_binned_mags = {}
for jd in jdbins:
# find all bin indices close to within binsizejd of this point
# using the cKDTree query. we use the p-norm = 1 (I think this
# means straight-up pairwise distance? FIXME: check this)
bin_indices = jdtree.query_ball_point(np.array([jd,1.0]),
binsizejd/2.0, p=1.0)
# if the bin_indices have already been collected, then we're
# done with this bin, move to the next one. if they haven't,
# then this is the start of a new bin.
if (bin_indices not in binned_finite_timeseries_indices and
len(bin_indices) >= minbinelems):
binned_finite_timeseries_indices.append(bin_indices)
# convert to ndarrays
binned_finite_timeseries_indices = [np.array(x) for x in
binned_finite_timeseries_indices]
collected_binned_mags['jdbins_indices'] = binned_finite_timeseries_indices
collected_binned_mags['jdbins'] = jdbins
collected_binned_mags['nbins'] = len(binned_finite_timeseries_indices)
# collect the finite_times
binned_jd = np.array([np.median(finite_times[x])
for x in binned_finite_timeseries_indices])
collected_binned_mags['binnedtimes'] = binned_jd
collected_binned_mags['binsize'] = binsize
# median bin the magnitudes according to the calculated indices
collected_binned_mags['binnedmags'] = (
np.array([np.median(finite_mags[x])
for x in binned_finite_timeseries_indices])
)
return collected_binned_mags
|
[
"Bins",
"the",
"given",
"mag",
"/",
"flux",
"time",
"-",
"series",
"in",
"time",
"using",
"the",
"bin",
"size",
"given",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L1011-L1112
|
[
"def",
"time_bin_magseries",
"(",
"times",
",",
"mags",
",",
"binsize",
"=",
"540.0",
",",
"minbinelems",
"=",
"7",
")",
":",
"# check if the input arrays are ok",
"if",
"not",
"(",
"times",
".",
"shape",
"and",
"mags",
".",
"shape",
"and",
"len",
"(",
"times",
")",
">",
"9",
"and",
"len",
"(",
"mags",
")",
">",
"9",
")",
":",
"LOGERROR",
"(",
"\"input time/mag arrays don't have enough elements\"",
")",
"return",
"# find all the finite values of the magnitudes and times",
"finiteind",
"=",
"np",
".",
"isfinite",
"(",
"mags",
")",
"&",
"np",
".",
"isfinite",
"(",
"times",
")",
"finite_times",
"=",
"times",
"[",
"finiteind",
"]",
"finite_mags",
"=",
"mags",
"[",
"finiteind",
"]",
"# convert binsize in seconds to JD units",
"binsizejd",
"=",
"binsize",
"/",
"(",
"86400.0",
")",
"nbins",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"np",
".",
"nanmax",
"(",
"finite_times",
")",
"-",
"np",
".",
"nanmin",
"(",
"finite_times",
")",
")",
"/",
"binsizejd",
")",
"+",
"1",
")",
"minjd",
"=",
"np",
".",
"nanmin",
"(",
"finite_times",
")",
"jdbins",
"=",
"[",
"(",
"minjd",
"+",
"x",
"*",
"binsizejd",
")",
"for",
"x",
"in",
"range",
"(",
"nbins",
")",
"]",
"# make a KD-tree on the JDs so we can do fast distance calculations. we",
"# need to add a bogus y coord to make this a problem that KD-trees can",
"# solve.",
"time_coords",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"x",
",",
"1.0",
"]",
"for",
"x",
"in",
"finite_times",
"]",
")",
"jdtree",
"=",
"cKDTree",
"(",
"time_coords",
")",
"binned_finite_timeseries_indices",
"=",
"[",
"]",
"collected_binned_mags",
"=",
"{",
"}",
"for",
"jd",
"in",
"jdbins",
":",
"# find all bin indices close to within binsizejd of this point",
"# using the cKDTree query. we use the p-norm = 1 (I think this",
"# means straight-up pairwise distance? FIXME: check this)",
"bin_indices",
"=",
"jdtree",
".",
"query_ball_point",
"(",
"np",
".",
"array",
"(",
"[",
"jd",
",",
"1.0",
"]",
")",
",",
"binsizejd",
"/",
"2.0",
",",
"p",
"=",
"1.0",
")",
"# if the bin_indices have already been collected, then we're",
"# done with this bin, move to the next one. if they haven't,",
"# then this is the start of a new bin.",
"if",
"(",
"bin_indices",
"not",
"in",
"binned_finite_timeseries_indices",
"and",
"len",
"(",
"bin_indices",
")",
">=",
"minbinelems",
")",
":",
"binned_finite_timeseries_indices",
".",
"append",
"(",
"bin_indices",
")",
"# convert to ndarrays",
"binned_finite_timeseries_indices",
"=",
"[",
"np",
".",
"array",
"(",
"x",
")",
"for",
"x",
"in",
"binned_finite_timeseries_indices",
"]",
"collected_binned_mags",
"[",
"'jdbins_indices'",
"]",
"=",
"binned_finite_timeseries_indices",
"collected_binned_mags",
"[",
"'jdbins'",
"]",
"=",
"jdbins",
"collected_binned_mags",
"[",
"'nbins'",
"]",
"=",
"len",
"(",
"binned_finite_timeseries_indices",
")",
"# collect the finite_times",
"binned_jd",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"median",
"(",
"finite_times",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"binned_finite_timeseries_indices",
"]",
")",
"collected_binned_mags",
"[",
"'binnedtimes'",
"]",
"=",
"binned_jd",
"collected_binned_mags",
"[",
"'binsize'",
"]",
"=",
"binsize",
"# median bin the magnitudes according to the calculated indices",
"collected_binned_mags",
"[",
"'binnedmags'",
"]",
"=",
"(",
"np",
".",
"array",
"(",
"[",
"np",
".",
"median",
"(",
"finite_mags",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"binned_finite_timeseries_indices",
"]",
")",
")",
"return",
"collected_binned_mags"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
phase_bin_magseries
|
Bins a phased magnitude/flux time-series using the bin size provided.
Parameters
----------
phases,mags : np.array
The phased magnitude/flux time-series to bin in phase. Non-finite
elements will be removed from these arrays. At least 10 elements in each
array are required for this function to operate.
binsize : float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase.
minbinelems : int
The minimum number of elements required per bin to include it in the
output.
Returns
-------
dict
A dict of the following form is returned::
{'phasebin_indices': a list of the index arrays into the
nan-filtered input arrays per each bin,
'phasebins': list of bin boundaries for each bin,
'nbins': the number of bins generated,
'binnedphases': the phase values associated with each phase bin;
this is the median of the phase value in each bin,
'binnedmags': the mag/flux values associated with each phase bin;
this is the median of the mags/fluxes in each bin}
|
astrobase/lcmath.py
|
def phase_bin_magseries(phases, mags,
binsize=0.005,
minbinelems=7):
'''Bins a phased magnitude/flux time-series using the bin size provided.
Parameters
----------
phases,mags : np.array
The phased magnitude/flux time-series to bin in phase. Non-finite
elements will be removed from these arrays. At least 10 elements in each
array are required for this function to operate.
binsize : float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase.
minbinelems : int
The minimum number of elements required per bin to include it in the
output.
Returns
-------
dict
A dict of the following form is returned::
{'phasebin_indices': a list of the index arrays into the
nan-filtered input arrays per each bin,
'phasebins': list of bin boundaries for each bin,
'nbins': the number of bins generated,
'binnedphases': the phase values associated with each phase bin;
this is the median of the phase value in each bin,
'binnedmags': the mag/flux values associated with each phase bin;
this is the median of the mags/fluxes in each bin}
'''
# check if the input arrays are ok
if not(phases.shape and mags.shape and len(phases) > 10 and len(mags) > 10):
LOGERROR("input time/mag arrays don't have enough elements")
return
# find all the finite values of the magnitudes and phases
finiteind = np.isfinite(mags) & np.isfinite(phases)
finite_phases = phases[finiteind]
finite_mags = mags[finiteind]
nbins = int(np.ceil((np.nanmax(finite_phases) -
np.nanmin(finite_phases))/binsize) + 1)
minphase = np.nanmin(finite_phases)
phasebins = [(minphase + x*binsize) for x in range(nbins)]
# make a KD-tree on the PHASEs so we can do fast distance calculations. we
# need to add a bogus y coord to make this a problem that KD-trees can
# solve.
time_coords = np.array([[x,1.0] for x in finite_phases])
phasetree = cKDTree(time_coords)
binned_finite_phaseseries_indices = []
collected_binned_mags = {}
for phase in phasebins:
# find all bin indices close to within binsize of this point using the
# cKDTree query. we use the p-norm = 1 for pairwise Euclidean distance.
bin_indices = phasetree.query_ball_point(np.array([phase,1.0]),
binsize/2.0, p=1.0)
# if the bin_indices have already been collected, then we're
# done with this bin, move to the next one. if they haven't,
# then this is the start of a new bin.
if (bin_indices not in binned_finite_phaseseries_indices and
len(bin_indices) >= minbinelems):
binned_finite_phaseseries_indices.append(bin_indices)
# convert to ndarrays
binned_finite_phaseseries_indices = [np.array(x) for x in
binned_finite_phaseseries_indices]
collected_binned_mags['phasebins_indices'] = (
binned_finite_phaseseries_indices
)
collected_binned_mags['phasebins'] = phasebins
collected_binned_mags['nbins'] = len(binned_finite_phaseseries_indices)
# collect the finite_phases
binned_phase = np.array([np.median(finite_phases[x])
for x in binned_finite_phaseseries_indices])
collected_binned_mags['binnedphases'] = binned_phase
collected_binned_mags['binsize'] = binsize
# median bin the magnitudes according to the calculated indices
collected_binned_mags['binnedmags'] = (
np.array([np.median(finite_mags[x])
for x in binned_finite_phaseseries_indices])
)
return collected_binned_mags
|
def phase_bin_magseries(phases, mags,
binsize=0.005,
minbinelems=7):
'''Bins a phased magnitude/flux time-series using the bin size provided.
Parameters
----------
phases,mags : np.array
The phased magnitude/flux time-series to bin in phase. Non-finite
elements will be removed from these arrays. At least 10 elements in each
array are required for this function to operate.
binsize : float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase.
minbinelems : int
The minimum number of elements required per bin to include it in the
output.
Returns
-------
dict
A dict of the following form is returned::
{'phasebin_indices': a list of the index arrays into the
nan-filtered input arrays per each bin,
'phasebins': list of bin boundaries for each bin,
'nbins': the number of bins generated,
'binnedphases': the phase values associated with each phase bin;
this is the median of the phase value in each bin,
'binnedmags': the mag/flux values associated with each phase bin;
this is the median of the mags/fluxes in each bin}
'''
# check if the input arrays are ok
if not(phases.shape and mags.shape and len(phases) > 10 and len(mags) > 10):
LOGERROR("input time/mag arrays don't have enough elements")
return
# find all the finite values of the magnitudes and phases
finiteind = np.isfinite(mags) & np.isfinite(phases)
finite_phases = phases[finiteind]
finite_mags = mags[finiteind]
nbins = int(np.ceil((np.nanmax(finite_phases) -
np.nanmin(finite_phases))/binsize) + 1)
minphase = np.nanmin(finite_phases)
phasebins = [(minphase + x*binsize) for x in range(nbins)]
# make a KD-tree on the PHASEs so we can do fast distance calculations. we
# need to add a bogus y coord to make this a problem that KD-trees can
# solve.
time_coords = np.array([[x,1.0] for x in finite_phases])
phasetree = cKDTree(time_coords)
binned_finite_phaseseries_indices = []
collected_binned_mags = {}
for phase in phasebins:
# find all bin indices close to within binsize of this point using the
# cKDTree query. we use the p-norm = 1 for pairwise Euclidean distance.
bin_indices = phasetree.query_ball_point(np.array([phase,1.0]),
binsize/2.0, p=1.0)
# if the bin_indices have already been collected, then we're
# done with this bin, move to the next one. if they haven't,
# then this is the start of a new bin.
if (bin_indices not in binned_finite_phaseseries_indices and
len(bin_indices) >= minbinelems):
binned_finite_phaseseries_indices.append(bin_indices)
# convert to ndarrays
binned_finite_phaseseries_indices = [np.array(x) for x in
binned_finite_phaseseries_indices]
collected_binned_mags['phasebins_indices'] = (
binned_finite_phaseseries_indices
)
collected_binned_mags['phasebins'] = phasebins
collected_binned_mags['nbins'] = len(binned_finite_phaseseries_indices)
# collect the finite_phases
binned_phase = np.array([np.median(finite_phases[x])
for x in binned_finite_phaseseries_indices])
collected_binned_mags['binnedphases'] = binned_phase
collected_binned_mags['binsize'] = binsize
# median bin the magnitudes according to the calculated indices
collected_binned_mags['binnedmags'] = (
np.array([np.median(finite_mags[x])
for x in binned_finite_phaseseries_indices])
)
return collected_binned_mags
|
[
"Bins",
"a",
"phased",
"magnitude",
"/",
"flux",
"time",
"-",
"series",
"using",
"the",
"bin",
"size",
"provided",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L1233-L1334
|
[
"def",
"phase_bin_magseries",
"(",
"phases",
",",
"mags",
",",
"binsize",
"=",
"0.005",
",",
"minbinelems",
"=",
"7",
")",
":",
"# check if the input arrays are ok",
"if",
"not",
"(",
"phases",
".",
"shape",
"and",
"mags",
".",
"shape",
"and",
"len",
"(",
"phases",
")",
">",
"10",
"and",
"len",
"(",
"mags",
")",
">",
"10",
")",
":",
"LOGERROR",
"(",
"\"input time/mag arrays don't have enough elements\"",
")",
"return",
"# find all the finite values of the magnitudes and phases",
"finiteind",
"=",
"np",
".",
"isfinite",
"(",
"mags",
")",
"&",
"np",
".",
"isfinite",
"(",
"phases",
")",
"finite_phases",
"=",
"phases",
"[",
"finiteind",
"]",
"finite_mags",
"=",
"mags",
"[",
"finiteind",
"]",
"nbins",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"np",
".",
"nanmax",
"(",
"finite_phases",
")",
"-",
"np",
".",
"nanmin",
"(",
"finite_phases",
")",
")",
"/",
"binsize",
")",
"+",
"1",
")",
"minphase",
"=",
"np",
".",
"nanmin",
"(",
"finite_phases",
")",
"phasebins",
"=",
"[",
"(",
"minphase",
"+",
"x",
"*",
"binsize",
")",
"for",
"x",
"in",
"range",
"(",
"nbins",
")",
"]",
"# make a KD-tree on the PHASEs so we can do fast distance calculations. we",
"# need to add a bogus y coord to make this a problem that KD-trees can",
"# solve.",
"time_coords",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"x",
",",
"1.0",
"]",
"for",
"x",
"in",
"finite_phases",
"]",
")",
"phasetree",
"=",
"cKDTree",
"(",
"time_coords",
")",
"binned_finite_phaseseries_indices",
"=",
"[",
"]",
"collected_binned_mags",
"=",
"{",
"}",
"for",
"phase",
"in",
"phasebins",
":",
"# find all bin indices close to within binsize of this point using the",
"# cKDTree query. we use the p-norm = 1 for pairwise Euclidean distance.",
"bin_indices",
"=",
"phasetree",
".",
"query_ball_point",
"(",
"np",
".",
"array",
"(",
"[",
"phase",
",",
"1.0",
"]",
")",
",",
"binsize",
"/",
"2.0",
",",
"p",
"=",
"1.0",
")",
"# if the bin_indices have already been collected, then we're",
"# done with this bin, move to the next one. if they haven't,",
"# then this is the start of a new bin.",
"if",
"(",
"bin_indices",
"not",
"in",
"binned_finite_phaseseries_indices",
"and",
"len",
"(",
"bin_indices",
")",
">=",
"minbinelems",
")",
":",
"binned_finite_phaseseries_indices",
".",
"append",
"(",
"bin_indices",
")",
"# convert to ndarrays",
"binned_finite_phaseseries_indices",
"=",
"[",
"np",
".",
"array",
"(",
"x",
")",
"for",
"x",
"in",
"binned_finite_phaseseries_indices",
"]",
"collected_binned_mags",
"[",
"'phasebins_indices'",
"]",
"=",
"(",
"binned_finite_phaseseries_indices",
")",
"collected_binned_mags",
"[",
"'phasebins'",
"]",
"=",
"phasebins",
"collected_binned_mags",
"[",
"'nbins'",
"]",
"=",
"len",
"(",
"binned_finite_phaseseries_indices",
")",
"# collect the finite_phases",
"binned_phase",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"median",
"(",
"finite_phases",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"binned_finite_phaseseries_indices",
"]",
")",
"collected_binned_mags",
"[",
"'binnedphases'",
"]",
"=",
"binned_phase",
"collected_binned_mags",
"[",
"'binsize'",
"]",
"=",
"binsize",
"# median bin the magnitudes according to the calculated indices",
"collected_binned_mags",
"[",
"'binnedmags'",
"]",
"=",
"(",
"np",
".",
"array",
"(",
"[",
"np",
".",
"median",
"(",
"finite_mags",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"binned_finite_phaseseries_indices",
"]",
")",
")",
"return",
"collected_binned_mags"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
fill_magseries_gaps
|
This fills in gaps in a light curve.
This is mainly intended for use in ACF period-finding, but maybe useful
otherwise (i.e. when we figure out ARMA stuff for LCs). The main steps here
are:
- normalize the light curve to zero
- remove giant outliers
- interpolate gaps in the light curve
(since ACF requires evenly spaced sampling)
From McQuillan+ 2013a (https://doi.org/10.1093/mnras/stt536):
"The ACF calculation requires the light curves to be regularly sampled
and normalized to zero. We divided the flux in each quarter by its
median and subtracted unity. Gaps in the light curve longer than the
Kepler long cadence were filled using linear interpolation with added
white Gaussian noise. This noise level was estimated using the variance
of the residuals following subtraction of a smoothed version of the
flux. To smooth the flux, we applied an iterative non-linear filter
which consists of a median filter followed by a boxcar filter, both with
11-point windows, with iterative 3σ clipping of outliers."
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series and associated measurement errors to
operate on. Non-finite elements will be removed from these arrays. At
least 10 elements in each array are required for this function to
operate.
fillgaps : {'noiselevel', 'nan'} or float
If `fillgap='noiselevel'`, fills the gaps with the noise level obtained
via the procedure above. If `fillgaps='nan'`, fills the gaps with
`np.nan`. Otherwise, if `fillgaps` is a float, will use that value to
fill the gaps. The default is to fill the gaps with 0.0 (as in
McQuillan+ 2014) to "...prevent them contributing to the ACF".
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
filterwindow : int
The number of time-series points to include in the Savitsky-Golay filter
operation when smoothing the light curve. This should be an odd integer.
forcetimebin : float or None
If `forcetimebin` is a float, this value will be used to generate the
interpolated time series, effectively binning the light curve to this
cadence. If `forcetimebin` is None, the mode of the gaps (the forward
difference between successive time values in `times`) in the provided
light curve will be used as the effective cadence. NOTE: `forcetimebin`
must be in the same units as `times`, e.g. if times are JD then
`forcetimebin` must be in days as well
verbose : bool
If this is True, will indicate progress at various stages in the
operation.
Returns
-------
dict
A dict of the following form is returned::
{'itimes': the interpolated time values after gap-filling,
'imags': the interpolated mag/flux values after gap-filling,
'ierrs': the interpolated mag/flux values after gap-filling,
'cadence': the cadence of the output mag/flux time-series}
|
astrobase/lcmath.py
|
def fill_magseries_gaps(times, mags, errs,
fillgaps=0.0,
sigclip=3.0,
magsarefluxes=False,
filterwindow=11,
forcetimebin=None,
verbose=True):
'''This fills in gaps in a light curve.
This is mainly intended for use in ACF period-finding, but maybe useful
otherwise (i.e. when we figure out ARMA stuff for LCs). The main steps here
are:
- normalize the light curve to zero
- remove giant outliers
- interpolate gaps in the light curve
(since ACF requires evenly spaced sampling)
From McQuillan+ 2013a (https://doi.org/10.1093/mnras/stt536):
"The ACF calculation requires the light curves to be regularly sampled
and normalized to zero. We divided the flux in each quarter by its
median and subtracted unity. Gaps in the light curve longer than the
Kepler long cadence were filled using linear interpolation with added
white Gaussian noise. This noise level was estimated using the variance
of the residuals following subtraction of a smoothed version of the
flux. To smooth the flux, we applied an iterative non-linear filter
which consists of a median filter followed by a boxcar filter, both with
11-point windows, with iterative 3σ clipping of outliers."
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series and associated measurement errors to
operate on. Non-finite elements will be removed from these arrays. At
least 10 elements in each array are required for this function to
operate.
fillgaps : {'noiselevel', 'nan'} or float
If `fillgap='noiselevel'`, fills the gaps with the noise level obtained
via the procedure above. If `fillgaps='nan'`, fills the gaps with
`np.nan`. Otherwise, if `fillgaps` is a float, will use that value to
fill the gaps. The default is to fill the gaps with 0.0 (as in
McQuillan+ 2014) to "...prevent them contributing to the ACF".
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
filterwindow : int
The number of time-series points to include in the Savitsky-Golay filter
operation when smoothing the light curve. This should be an odd integer.
forcetimebin : float or None
If `forcetimebin` is a float, this value will be used to generate the
interpolated time series, effectively binning the light curve to this
cadence. If `forcetimebin` is None, the mode of the gaps (the forward
difference between successive time values in `times`) in the provided
light curve will be used as the effective cadence. NOTE: `forcetimebin`
must be in the same units as `times`, e.g. if times are JD then
`forcetimebin` must be in days as well
verbose : bool
If this is True, will indicate progress at various stages in the
operation.
Returns
-------
dict
A dict of the following form is returned::
{'itimes': the interpolated time values after gap-filling,
'imags': the interpolated mag/flux values after gap-filling,
'ierrs': the interpolated mag/flux values after gap-filling,
'cadence': the cadence of the output mag/flux time-series}
'''
# remove nans
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]
# remove zero errs
nzind = np.nonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
# sigma-clip
stimes, smags, serrs = sigclip_magseries(ftimes, fmags, ferrs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# normalize to zero
if magsarefluxes:
smags = smags / np.median(smags) - 1.0
else:
smags = smags - np.median(smags)
if isinstance(fillgaps, float):
gapfiller = fillgaps
elif isinstance(fillgaps, str) and fillgaps == 'noiselevel':
# figure out the gaussian noise level by subtracting a Savitsky-Golay
# filtered version of the light curve
smoothed = smags - savgol_filter(smags, filterwindow, 2)
noiselevel = 1.483 * np.median(np.abs(smoothed - np.median(smoothed)))
gapfiller = noiselevel
elif isinstance(fillgaps, str) and fillgaps == 'nan':
gapfiller = np.nan
# figure out the gap size and where to interpolate. we do this by figuring
# out the most common gap (this should be the cadence). to do this, we need
# to calculate the mode of the gap distribution.
# get the gaps
gaps = np.diff(stimes)
# just use scipy.stats.mode instead of our hacked together nonsense earlier.
gapmoderes = scipy.stats.mode(gaps)
gapmode = gapmoderes[0].item()
LOGINFO('auto-cadence for mag series: %.5f' % gapmode)
# sort the gaps
if forcetimebin:
LOGWARNING('forcetimebin is set, forcing cadence to %.5f' %
forcetimebin)
gapmode = forcetimebin
if gapmode == 0.0:
LOGERROR('the smallest cadence of this light curve appears to be 0.0, '
'the automatic cadence finder probably failed. '
'try setting forcetimebin?')
return None
starttime, endtime = np.min(stimes), np.max(stimes)
ntimes = int(np.ceil((endtime - starttime)/gapmode) + 1)
if verbose:
LOGINFO('generating new time series with %s measurements' % ntimes)
# first, generate the full time series
interpolated_times = np.linspace(starttime, endtime, ntimes)
interpolated_mags = np.full_like(interpolated_times, gapfiller)
interpolated_errs = np.full_like(interpolated_times, gapfiller)
for ind, itime in enumerate(interpolated_times[:-1]):
nextitime = itime + gapmode
# find the mags between this and the next time bin
itimeind = np.where((stimes > itime) & (stimes < nextitime))
# if there's more than one elem in this time bin, median them
if itimeind[0].size > 1:
interpolated_mags[ind] = np.median(smags[itimeind[0]])
interpolated_errs[ind] = np.median(serrs[itimeind[0]])
# otherwise, if there's only one elem in this time bin, take it
elif itimeind[0].size == 1:
interpolated_mags[ind] = smags[itimeind[0]]
interpolated_errs[ind] = serrs[itimeind[0]]
return {'itimes':interpolated_times,
'imags':interpolated_mags,
'ierrs':interpolated_errs,
'cadence':gapmode}
|
def fill_magseries_gaps(times, mags, errs,
fillgaps=0.0,
sigclip=3.0,
magsarefluxes=False,
filterwindow=11,
forcetimebin=None,
verbose=True):
'''This fills in gaps in a light curve.
This is mainly intended for use in ACF period-finding, but maybe useful
otherwise (i.e. when we figure out ARMA stuff for LCs). The main steps here
are:
- normalize the light curve to zero
- remove giant outliers
- interpolate gaps in the light curve
(since ACF requires evenly spaced sampling)
From McQuillan+ 2013a (https://doi.org/10.1093/mnras/stt536):
"The ACF calculation requires the light curves to be regularly sampled
and normalized to zero. We divided the flux in each quarter by its
median and subtracted unity. Gaps in the light curve longer than the
Kepler long cadence were filled using linear interpolation with added
white Gaussian noise. This noise level was estimated using the variance
of the residuals following subtraction of a smoothed version of the
flux. To smooth the flux, we applied an iterative non-linear filter
which consists of a median filter followed by a boxcar filter, both with
11-point windows, with iterative 3σ clipping of outliers."
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series and associated measurement errors to
operate on. Non-finite elements will be removed from these arrays. At
least 10 elements in each array are required for this function to
operate.
fillgaps : {'noiselevel', 'nan'} or float
If `fillgap='noiselevel'`, fills the gaps with the noise level obtained
via the procedure above. If `fillgaps='nan'`, fills the gaps with
`np.nan`. Otherwise, if `fillgaps` is a float, will use that value to
fill the gaps. The default is to fill the gaps with 0.0 (as in
McQuillan+ 2014) to "...prevent them contributing to the ACF".
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsareflux : bool
True if your "mags" are in fact fluxes, i.e. if "fainter" corresponds to
`mags` getting smaller.
filterwindow : int
The number of time-series points to include in the Savitsky-Golay filter
operation when smoothing the light curve. This should be an odd integer.
forcetimebin : float or None
If `forcetimebin` is a float, this value will be used to generate the
interpolated time series, effectively binning the light curve to this
cadence. If `forcetimebin` is None, the mode of the gaps (the forward
difference between successive time values in `times`) in the provided
light curve will be used as the effective cadence. NOTE: `forcetimebin`
must be in the same units as `times`, e.g. if times are JD then
`forcetimebin` must be in days as well
verbose : bool
If this is True, will indicate progress at various stages in the
operation.
Returns
-------
dict
A dict of the following form is returned::
{'itimes': the interpolated time values after gap-filling,
'imags': the interpolated mag/flux values after gap-filling,
'ierrs': the interpolated mag/flux values after gap-filling,
'cadence': the cadence of the output mag/flux time-series}
'''
# remove nans
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]
# remove zero errs
nzind = np.nonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
# sigma-clip
stimes, smags, serrs = sigclip_magseries(ftimes, fmags, ferrs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# normalize to zero
if magsarefluxes:
smags = smags / np.median(smags) - 1.0
else:
smags = smags - np.median(smags)
if isinstance(fillgaps, float):
gapfiller = fillgaps
elif isinstance(fillgaps, str) and fillgaps == 'noiselevel':
# figure out the gaussian noise level by subtracting a Savitsky-Golay
# filtered version of the light curve
smoothed = smags - savgol_filter(smags, filterwindow, 2)
noiselevel = 1.483 * np.median(np.abs(smoothed - np.median(smoothed)))
gapfiller = noiselevel
elif isinstance(fillgaps, str) and fillgaps == 'nan':
gapfiller = np.nan
# figure out the gap size and where to interpolate. we do this by figuring
# out the most common gap (this should be the cadence). to do this, we need
# to calculate the mode of the gap distribution.
# get the gaps
gaps = np.diff(stimes)
# just use scipy.stats.mode instead of our hacked together nonsense earlier.
gapmoderes = scipy.stats.mode(gaps)
gapmode = gapmoderes[0].item()
LOGINFO('auto-cadence for mag series: %.5f' % gapmode)
# sort the gaps
if forcetimebin:
LOGWARNING('forcetimebin is set, forcing cadence to %.5f' %
forcetimebin)
gapmode = forcetimebin
if gapmode == 0.0:
LOGERROR('the smallest cadence of this light curve appears to be 0.0, '
'the automatic cadence finder probably failed. '
'try setting forcetimebin?')
return None
starttime, endtime = np.min(stimes), np.max(stimes)
ntimes = int(np.ceil((endtime - starttime)/gapmode) + 1)
if verbose:
LOGINFO('generating new time series with %s measurements' % ntimes)
# first, generate the full time series
interpolated_times = np.linspace(starttime, endtime, ntimes)
interpolated_mags = np.full_like(interpolated_times, gapfiller)
interpolated_errs = np.full_like(interpolated_times, gapfiller)
for ind, itime in enumerate(interpolated_times[:-1]):
nextitime = itime + gapmode
# find the mags between this and the next time bin
itimeind = np.where((stimes > itime) & (stimes < nextitime))
# if there's more than one elem in this time bin, median them
if itimeind[0].size > 1:
interpolated_mags[ind] = np.median(smags[itimeind[0]])
interpolated_errs[ind] = np.median(serrs[itimeind[0]])
# otherwise, if there's only one elem in this time bin, take it
elif itimeind[0].size == 1:
interpolated_mags[ind] = smags[itimeind[0]]
interpolated_errs[ind] = serrs[itimeind[0]]
return {'itimes':interpolated_times,
'imags':interpolated_mags,
'ierrs':interpolated_errs,
'cadence':gapmode}
|
[
"This",
"fills",
"in",
"gaps",
"in",
"a",
"light",
"curve",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L1455-L1645
|
[
"def",
"fill_magseries_gaps",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"fillgaps",
"=",
"0.0",
",",
"sigclip",
"=",
"3.0",
",",
"magsarefluxes",
"=",
"False",
",",
"filterwindow",
"=",
"11",
",",
"forcetimebin",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"# remove nans",
"finind",
"=",
"np",
".",
"isfinite",
"(",
"times",
")",
"&",
"np",
".",
"isfinite",
"(",
"mags",
")",
"&",
"np",
".",
"isfinite",
"(",
"errs",
")",
"ftimes",
",",
"fmags",
",",
"ferrs",
"=",
"times",
"[",
"finind",
"]",
",",
"mags",
"[",
"finind",
"]",
",",
"errs",
"[",
"finind",
"]",
"# remove zero errs",
"nzind",
"=",
"np",
".",
"nonzero",
"(",
"ferrs",
")",
"ftimes",
",",
"fmags",
",",
"ferrs",
"=",
"ftimes",
"[",
"nzind",
"]",
",",
"fmags",
"[",
"nzind",
"]",
",",
"ferrs",
"[",
"nzind",
"]",
"# sigma-clip",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"sigclip_magseries",
"(",
"ftimes",
",",
"fmags",
",",
"ferrs",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"sigclip",
"=",
"sigclip",
")",
"# normalize to zero",
"if",
"magsarefluxes",
":",
"smags",
"=",
"smags",
"/",
"np",
".",
"median",
"(",
"smags",
")",
"-",
"1.0",
"else",
":",
"smags",
"=",
"smags",
"-",
"np",
".",
"median",
"(",
"smags",
")",
"if",
"isinstance",
"(",
"fillgaps",
",",
"float",
")",
":",
"gapfiller",
"=",
"fillgaps",
"elif",
"isinstance",
"(",
"fillgaps",
",",
"str",
")",
"and",
"fillgaps",
"==",
"'noiselevel'",
":",
"# figure out the gaussian noise level by subtracting a Savitsky-Golay",
"# filtered version of the light curve",
"smoothed",
"=",
"smags",
"-",
"savgol_filter",
"(",
"smags",
",",
"filterwindow",
",",
"2",
")",
"noiselevel",
"=",
"1.483",
"*",
"np",
".",
"median",
"(",
"np",
".",
"abs",
"(",
"smoothed",
"-",
"np",
".",
"median",
"(",
"smoothed",
")",
")",
")",
"gapfiller",
"=",
"noiselevel",
"elif",
"isinstance",
"(",
"fillgaps",
",",
"str",
")",
"and",
"fillgaps",
"==",
"'nan'",
":",
"gapfiller",
"=",
"np",
".",
"nan",
"# figure out the gap size and where to interpolate. we do this by figuring",
"# out the most common gap (this should be the cadence). to do this, we need",
"# to calculate the mode of the gap distribution.",
"# get the gaps",
"gaps",
"=",
"np",
".",
"diff",
"(",
"stimes",
")",
"# just use scipy.stats.mode instead of our hacked together nonsense earlier.",
"gapmoderes",
"=",
"scipy",
".",
"stats",
".",
"mode",
"(",
"gaps",
")",
"gapmode",
"=",
"gapmoderes",
"[",
"0",
"]",
".",
"item",
"(",
")",
"LOGINFO",
"(",
"'auto-cadence for mag series: %.5f'",
"%",
"gapmode",
")",
"# sort the gaps",
"if",
"forcetimebin",
":",
"LOGWARNING",
"(",
"'forcetimebin is set, forcing cadence to %.5f'",
"%",
"forcetimebin",
")",
"gapmode",
"=",
"forcetimebin",
"if",
"gapmode",
"==",
"0.0",
":",
"LOGERROR",
"(",
"'the smallest cadence of this light curve appears to be 0.0, '",
"'the automatic cadence finder probably failed. '",
"'try setting forcetimebin?'",
")",
"return",
"None",
"starttime",
",",
"endtime",
"=",
"np",
".",
"min",
"(",
"stimes",
")",
",",
"np",
".",
"max",
"(",
"stimes",
")",
"ntimes",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"endtime",
"-",
"starttime",
")",
"/",
"gapmode",
")",
"+",
"1",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'generating new time series with %s measurements'",
"%",
"ntimes",
")",
"# first, generate the full time series",
"interpolated_times",
"=",
"np",
".",
"linspace",
"(",
"starttime",
",",
"endtime",
",",
"ntimes",
")",
"interpolated_mags",
"=",
"np",
".",
"full_like",
"(",
"interpolated_times",
",",
"gapfiller",
")",
"interpolated_errs",
"=",
"np",
".",
"full_like",
"(",
"interpolated_times",
",",
"gapfiller",
")",
"for",
"ind",
",",
"itime",
"in",
"enumerate",
"(",
"interpolated_times",
"[",
":",
"-",
"1",
"]",
")",
":",
"nextitime",
"=",
"itime",
"+",
"gapmode",
"# find the mags between this and the next time bin",
"itimeind",
"=",
"np",
".",
"where",
"(",
"(",
"stimes",
">",
"itime",
")",
"&",
"(",
"stimes",
"<",
"nextitime",
")",
")",
"# if there's more than one elem in this time bin, median them",
"if",
"itimeind",
"[",
"0",
"]",
".",
"size",
">",
"1",
":",
"interpolated_mags",
"[",
"ind",
"]",
"=",
"np",
".",
"median",
"(",
"smags",
"[",
"itimeind",
"[",
"0",
"]",
"]",
")",
"interpolated_errs",
"[",
"ind",
"]",
"=",
"np",
".",
"median",
"(",
"serrs",
"[",
"itimeind",
"[",
"0",
"]",
"]",
")",
"# otherwise, if there's only one elem in this time bin, take it",
"elif",
"itimeind",
"[",
"0",
"]",
".",
"size",
"==",
"1",
":",
"interpolated_mags",
"[",
"ind",
"]",
"=",
"smags",
"[",
"itimeind",
"[",
"0",
"]",
"]",
"interpolated_errs",
"[",
"ind",
"]",
"=",
"serrs",
"[",
"itimeind",
"[",
"0",
"]",
"]",
"return",
"{",
"'itimes'",
":",
"interpolated_times",
",",
"'imags'",
":",
"interpolated_mags",
",",
"'ierrs'",
":",
"interpolated_errs",
",",
"'cadence'",
":",
"gapmode",
"}"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
get_snr_of_dip
|
Calculate the total SNR of a transit assuming gaussian uncertainties.
`modelmags` gets interpolated onto the cadence of `mags`. The noise is
calculated as the 1-sigma std deviation of the residual (see below).
Following Carter et al. 2009::
Q = sqrt( Γ T ) * δ / σ
for Q the total SNR of the transit in the r->0 limit, where::
r = Rp/Rstar,
T = transit duration,
δ = transit depth,
σ = RMS of the lightcurve in transit.
Γ = sampling rate
Thus Γ * T is roughly the number of points obtained during transit.
(This doesn't correctly account for the SNR during ingress/egress, but this
is a second-order correction).
Note this is the same total SNR as described by e.g., Kovacs et al. 2002,
their Equation 11.
NOTE: this only works with fluxes at the moment.
Parameters
----------
times,mags : np.array
The input flux time-series to process.
modeltimes,modelmags : np.array
A transiting planet model, either from BLS, a trapezoid model, or a
Mandel-Agol model.
atol_normalization : float
The absolute tolerance to which the median of the passed model fluxes
must be equal to 1.
indsforrms : np.array
A array of bools of `len(mags)` used to select points for the RMS
measurement. If not passed, the RMS of the entire passed timeseries is
used as an approximation. Genearlly, it's best to use out of transit
points, so the RMS measurement is not model-dependent.
magsarefluxes : bool
Currently forced to be True because this function only works with
fluxes.
verbose : bool
If True, indicates progress and warns about problems.
transitdepth : float or None
If the transit depth is known, pass it in here. Otherwise, it is
calculated assuming OOT flux is 1.
npoints_in_transits : int or None
If the number of points in transit is known, pass it in here. Otherwise,
the function will guess at this value.
Returns
-------
(snr, transit_depth, noise) : tuple
The returned tuple contains the calculated SNR, transit depth, and noise
of the residual lightcurve calculated using the relation described
above.
|
astrobase/varbase/transits.py
|
def get_snr_of_dip(times,
mags,
modeltimes,
modelmags,
atol_normalization=1e-8,
indsforrms=None,
magsarefluxes=False,
verbose=True,
transitdepth=None,
npoints_in_transit=None):
'''Calculate the total SNR of a transit assuming gaussian uncertainties.
`modelmags` gets interpolated onto the cadence of `mags`. The noise is
calculated as the 1-sigma std deviation of the residual (see below).
Following Carter et al. 2009::
Q = sqrt( Γ T ) * δ / σ
for Q the total SNR of the transit in the r->0 limit, where::
r = Rp/Rstar,
T = transit duration,
δ = transit depth,
σ = RMS of the lightcurve in transit.
Γ = sampling rate
Thus Γ * T is roughly the number of points obtained during transit.
(This doesn't correctly account for the SNR during ingress/egress, but this
is a second-order correction).
Note this is the same total SNR as described by e.g., Kovacs et al. 2002,
their Equation 11.
NOTE: this only works with fluxes at the moment.
Parameters
----------
times,mags : np.array
The input flux time-series to process.
modeltimes,modelmags : np.array
A transiting planet model, either from BLS, a trapezoid model, or a
Mandel-Agol model.
atol_normalization : float
The absolute tolerance to which the median of the passed model fluxes
must be equal to 1.
indsforrms : np.array
A array of bools of `len(mags)` used to select points for the RMS
measurement. If not passed, the RMS of the entire passed timeseries is
used as an approximation. Genearlly, it's best to use out of transit
points, so the RMS measurement is not model-dependent.
magsarefluxes : bool
Currently forced to be True because this function only works with
fluxes.
verbose : bool
If True, indicates progress and warns about problems.
transitdepth : float or None
If the transit depth is known, pass it in here. Otherwise, it is
calculated assuming OOT flux is 1.
npoints_in_transits : int or None
If the number of points in transit is known, pass it in here. Otherwise,
the function will guess at this value.
Returns
-------
(snr, transit_depth, noise) : tuple
The returned tuple contains the calculated SNR, transit depth, and noise
of the residual lightcurve calculated using the relation described
above.
'''
if magsarefluxes:
if not np.isclose(np.nanmedian(modelmags), 1, atol=atol_normalization):
raise AssertionError('snr calculation assumes modelmags are '
'median-normalized')
else:
raise NotImplementedError(
'need to implement a method for identifying in-transit points when'
'mags are mags, and not fluxes'
)
if not transitdepth:
# calculate transit depth from whatever model magnitudes are passed.
transitdepth = np.abs(np.max(modelmags) - np.min(modelmags))
# generally, mags (data) and modelmags are at different cadence.
# interpolate modelmags onto the cadence of mags.
if not len(mags) == len(modelmags):
from scipy.interpolate import interp1d
fn = interp1d(modeltimes, modelmags, kind='cubic', bounds_error=True,
fill_value=np.nan)
modelmags = fn(times)
if verbose:
LOGINFO('interpolated model timeseries onto the data timeseries')
subtractedmags = mags - modelmags
if isinstance(indsforrms, np.ndarray):
subtractedrms = np.std(subtractedmags[indsforrms])
if verbose:
LOGINFO('using selected points to measure RMS')
else:
subtractedrms = np.std(subtractedmags)
if verbose:
LOGINFO('using all points to measure RMS')
def _get_npoints_in_transit(modelmags):
# assumes median-normalized fluxes are input
if np.nanmedian(modelmags) == 1:
return len(modelmags[(modelmags != 1)])
else:
raise NotImplementedError
if not npoints_in_transit:
npoints_in_transit = _get_npoints_in_transit(modelmags)
snr = np.sqrt(npoints_in_transit) * transitdepth/subtractedrms
if verbose:
LOGINFO('\npoints in transit: {:d}'.format(npoints_in_transit) +
'\ndepth: {:.2e}'.format(transitdepth) +
'\nrms in residual: {:.2e}'.format(subtractedrms) +
'\n\t SNR: {:.2e}'.format(snr))
return snr, transitdepth, subtractedrms
|
def get_snr_of_dip(times,
mags,
modeltimes,
modelmags,
atol_normalization=1e-8,
indsforrms=None,
magsarefluxes=False,
verbose=True,
transitdepth=None,
npoints_in_transit=None):
'''Calculate the total SNR of a transit assuming gaussian uncertainties.
`modelmags` gets interpolated onto the cadence of `mags`. The noise is
calculated as the 1-sigma std deviation of the residual (see below).
Following Carter et al. 2009::
Q = sqrt( Γ T ) * δ / σ
for Q the total SNR of the transit in the r->0 limit, where::
r = Rp/Rstar,
T = transit duration,
δ = transit depth,
σ = RMS of the lightcurve in transit.
Γ = sampling rate
Thus Γ * T is roughly the number of points obtained during transit.
(This doesn't correctly account for the SNR during ingress/egress, but this
is a second-order correction).
Note this is the same total SNR as described by e.g., Kovacs et al. 2002,
their Equation 11.
NOTE: this only works with fluxes at the moment.
Parameters
----------
times,mags : np.array
The input flux time-series to process.
modeltimes,modelmags : np.array
A transiting planet model, either from BLS, a trapezoid model, or a
Mandel-Agol model.
atol_normalization : float
The absolute tolerance to which the median of the passed model fluxes
must be equal to 1.
indsforrms : np.array
A array of bools of `len(mags)` used to select points for the RMS
measurement. If not passed, the RMS of the entire passed timeseries is
used as an approximation. Genearlly, it's best to use out of transit
points, so the RMS measurement is not model-dependent.
magsarefluxes : bool
Currently forced to be True because this function only works with
fluxes.
verbose : bool
If True, indicates progress and warns about problems.
transitdepth : float or None
If the transit depth is known, pass it in here. Otherwise, it is
calculated assuming OOT flux is 1.
npoints_in_transits : int or None
If the number of points in transit is known, pass it in here. Otherwise,
the function will guess at this value.
Returns
-------
(snr, transit_depth, noise) : tuple
The returned tuple contains the calculated SNR, transit depth, and noise
of the residual lightcurve calculated using the relation described
above.
'''
if magsarefluxes:
if not np.isclose(np.nanmedian(modelmags), 1, atol=atol_normalization):
raise AssertionError('snr calculation assumes modelmags are '
'median-normalized')
else:
raise NotImplementedError(
'need to implement a method for identifying in-transit points when'
'mags are mags, and not fluxes'
)
if not transitdepth:
# calculate transit depth from whatever model magnitudes are passed.
transitdepth = np.abs(np.max(modelmags) - np.min(modelmags))
# generally, mags (data) and modelmags are at different cadence.
# interpolate modelmags onto the cadence of mags.
if not len(mags) == len(modelmags):
from scipy.interpolate import interp1d
fn = interp1d(modeltimes, modelmags, kind='cubic', bounds_error=True,
fill_value=np.nan)
modelmags = fn(times)
if verbose:
LOGINFO('interpolated model timeseries onto the data timeseries')
subtractedmags = mags - modelmags
if isinstance(indsforrms, np.ndarray):
subtractedrms = np.std(subtractedmags[indsforrms])
if verbose:
LOGINFO('using selected points to measure RMS')
else:
subtractedrms = np.std(subtractedmags)
if verbose:
LOGINFO('using all points to measure RMS')
def _get_npoints_in_transit(modelmags):
# assumes median-normalized fluxes are input
if np.nanmedian(modelmags) == 1:
return len(modelmags[(modelmags != 1)])
else:
raise NotImplementedError
if not npoints_in_transit:
npoints_in_transit = _get_npoints_in_transit(modelmags)
snr = np.sqrt(npoints_in_transit) * transitdepth/subtractedrms
if verbose:
LOGINFO('\npoints in transit: {:d}'.format(npoints_in_transit) +
'\ndepth: {:.2e}'.format(transitdepth) +
'\nrms in residual: {:.2e}'.format(subtractedrms) +
'\n\t SNR: {:.2e}'.format(snr))
return snr, transitdepth, subtractedrms
|
[
"Calculate",
"the",
"total",
"SNR",
"of",
"a",
"transit",
"assuming",
"gaussian",
"uncertainties",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/transits.py#L107-L245
|
[
"def",
"get_snr_of_dip",
"(",
"times",
",",
"mags",
",",
"modeltimes",
",",
"modelmags",
",",
"atol_normalization",
"=",
"1e-8",
",",
"indsforrms",
"=",
"None",
",",
"magsarefluxes",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"transitdepth",
"=",
"None",
",",
"npoints_in_transit",
"=",
"None",
")",
":",
"if",
"magsarefluxes",
":",
"if",
"not",
"np",
".",
"isclose",
"(",
"np",
".",
"nanmedian",
"(",
"modelmags",
")",
",",
"1",
",",
"atol",
"=",
"atol_normalization",
")",
":",
"raise",
"AssertionError",
"(",
"'snr calculation assumes modelmags are '",
"'median-normalized'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'need to implement a method for identifying in-transit points when'",
"'mags are mags, and not fluxes'",
")",
"if",
"not",
"transitdepth",
":",
"# calculate transit depth from whatever model magnitudes are passed.",
"transitdepth",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"max",
"(",
"modelmags",
")",
"-",
"np",
".",
"min",
"(",
"modelmags",
")",
")",
"# generally, mags (data) and modelmags are at different cadence.",
"# interpolate modelmags onto the cadence of mags.",
"if",
"not",
"len",
"(",
"mags",
")",
"==",
"len",
"(",
"modelmags",
")",
":",
"from",
"scipy",
".",
"interpolate",
"import",
"interp1d",
"fn",
"=",
"interp1d",
"(",
"modeltimes",
",",
"modelmags",
",",
"kind",
"=",
"'cubic'",
",",
"bounds_error",
"=",
"True",
",",
"fill_value",
"=",
"np",
".",
"nan",
")",
"modelmags",
"=",
"fn",
"(",
"times",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'interpolated model timeseries onto the data timeseries'",
")",
"subtractedmags",
"=",
"mags",
"-",
"modelmags",
"if",
"isinstance",
"(",
"indsforrms",
",",
"np",
".",
"ndarray",
")",
":",
"subtractedrms",
"=",
"np",
".",
"std",
"(",
"subtractedmags",
"[",
"indsforrms",
"]",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'using selected points to measure RMS'",
")",
"else",
":",
"subtractedrms",
"=",
"np",
".",
"std",
"(",
"subtractedmags",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'using all points to measure RMS'",
")",
"def",
"_get_npoints_in_transit",
"(",
"modelmags",
")",
":",
"# assumes median-normalized fluxes are input",
"if",
"np",
".",
"nanmedian",
"(",
"modelmags",
")",
"==",
"1",
":",
"return",
"len",
"(",
"modelmags",
"[",
"(",
"modelmags",
"!=",
"1",
")",
"]",
")",
"else",
":",
"raise",
"NotImplementedError",
"if",
"not",
"npoints_in_transit",
":",
"npoints_in_transit",
"=",
"_get_npoints_in_transit",
"(",
"modelmags",
")",
"snr",
"=",
"np",
".",
"sqrt",
"(",
"npoints_in_transit",
")",
"*",
"transitdepth",
"/",
"subtractedrms",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'\\npoints in transit: {:d}'",
".",
"format",
"(",
"npoints_in_transit",
")",
"+",
"'\\ndepth: {:.2e}'",
".",
"format",
"(",
"transitdepth",
")",
"+",
"'\\nrms in residual: {:.2e}'",
".",
"format",
"(",
"subtractedrms",
")",
"+",
"'\\n\\t SNR: {:.2e}'",
".",
"format",
"(",
"snr",
")",
")",
"return",
"snr",
",",
"transitdepth",
",",
"subtractedrms"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
estimate_achievable_tmid_precision
|
Using Carter et al. 2009's estimate, calculate the theoretical optimal
precision on mid-transit time measurement possible given a transit of a
particular SNR.
The relation used is::
sigma_tc = Q^{-1} * T * sqrt(θ/2)
Q = SNR of the transit.
T = transit duration, which is 2.14 hours from discovery paper.
θ = τ/T = ratio of ingress to total duration
~= (few minutes [guess]) / 2.14 hours
Parameters
----------
snr : float
The measured signal-to-noise of the transit, e,g. from
:py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod` or from
running the `.compute_stats()` method on an Astropy BoxLeastSquares
object.
t_ingress_min : float
The ingress duration in minutes. This is t_I to t_II in Winn (2010)
nomenclature.
t_duration_hr : float
The transit duration in hours. This is t_I to t_IV in Winn (2010)
nomenclature.
Returns
-------
float
Returns the precision achievable for transit-center time as calculated
from the relation above. This is in days.
|
astrobase/varbase/transits.py
|
def estimate_achievable_tmid_precision(snr, t_ingress_min=10,
t_duration_hr=2.14):
'''Using Carter et al. 2009's estimate, calculate the theoretical optimal
precision on mid-transit time measurement possible given a transit of a
particular SNR.
The relation used is::
sigma_tc = Q^{-1} * T * sqrt(θ/2)
Q = SNR of the transit.
T = transit duration, which is 2.14 hours from discovery paper.
θ = τ/T = ratio of ingress to total duration
~= (few minutes [guess]) / 2.14 hours
Parameters
----------
snr : float
The measured signal-to-noise of the transit, e,g. from
:py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod` or from
running the `.compute_stats()` method on an Astropy BoxLeastSquares
object.
t_ingress_min : float
The ingress duration in minutes. This is t_I to t_II in Winn (2010)
nomenclature.
t_duration_hr : float
The transit duration in hours. This is t_I to t_IV in Winn (2010)
nomenclature.
Returns
-------
float
Returns the precision achievable for transit-center time as calculated
from the relation above. This is in days.
'''
t_ingress = t_ingress_min*u.minute
t_duration = t_duration_hr*u.hour
theta = t_ingress/t_duration
sigma_tc = (1/snr * t_duration * np.sqrt(theta/2))
LOGINFO('assuming t_ingress = {:.1f}'.format(t_ingress))
LOGINFO('assuming t_duration = {:.1f}'.format(t_duration))
LOGINFO('measured SNR={:.2f}\n\t'.format(snr) +
'-->theoretical sigma_tc = {:.2e} = {:.2e} = {:.2e}'.format(
sigma_tc.to(u.minute), sigma_tc.to(u.hour), sigma_tc.to(u.day)))
return sigma_tc.to(u.day).value
|
def estimate_achievable_tmid_precision(snr, t_ingress_min=10,
t_duration_hr=2.14):
'''Using Carter et al. 2009's estimate, calculate the theoretical optimal
precision on mid-transit time measurement possible given a transit of a
particular SNR.
The relation used is::
sigma_tc = Q^{-1} * T * sqrt(θ/2)
Q = SNR of the transit.
T = transit duration, which is 2.14 hours from discovery paper.
θ = τ/T = ratio of ingress to total duration
~= (few minutes [guess]) / 2.14 hours
Parameters
----------
snr : float
The measured signal-to-noise of the transit, e,g. from
:py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod` or from
running the `.compute_stats()` method on an Astropy BoxLeastSquares
object.
t_ingress_min : float
The ingress duration in minutes. This is t_I to t_II in Winn (2010)
nomenclature.
t_duration_hr : float
The transit duration in hours. This is t_I to t_IV in Winn (2010)
nomenclature.
Returns
-------
float
Returns the precision achievable for transit-center time as calculated
from the relation above. This is in days.
'''
t_ingress = t_ingress_min*u.minute
t_duration = t_duration_hr*u.hour
theta = t_ingress/t_duration
sigma_tc = (1/snr * t_duration * np.sqrt(theta/2))
LOGINFO('assuming t_ingress = {:.1f}'.format(t_ingress))
LOGINFO('assuming t_duration = {:.1f}'.format(t_duration))
LOGINFO('measured SNR={:.2f}\n\t'.format(snr) +
'-->theoretical sigma_tc = {:.2e} = {:.2e} = {:.2e}'.format(
sigma_tc.to(u.minute), sigma_tc.to(u.hour), sigma_tc.to(u.day)))
return sigma_tc.to(u.day).value
|
[
"Using",
"Carter",
"et",
"al",
".",
"2009",
"s",
"estimate",
"calculate",
"the",
"theoretical",
"optimal",
"precision",
"on",
"mid",
"-",
"transit",
"time",
"measurement",
"possible",
"given",
"a",
"transit",
"of",
"a",
"particular",
"SNR",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/transits.py#L249-L303
|
[
"def",
"estimate_achievable_tmid_precision",
"(",
"snr",
",",
"t_ingress_min",
"=",
"10",
",",
"t_duration_hr",
"=",
"2.14",
")",
":",
"t_ingress",
"=",
"t_ingress_min",
"*",
"u",
".",
"minute",
"t_duration",
"=",
"t_duration_hr",
"*",
"u",
".",
"hour",
"theta",
"=",
"t_ingress",
"/",
"t_duration",
"sigma_tc",
"=",
"(",
"1",
"/",
"snr",
"*",
"t_duration",
"*",
"np",
".",
"sqrt",
"(",
"theta",
"/",
"2",
")",
")",
"LOGINFO",
"(",
"'assuming t_ingress = {:.1f}'",
".",
"format",
"(",
"t_ingress",
")",
")",
"LOGINFO",
"(",
"'assuming t_duration = {:.1f}'",
".",
"format",
"(",
"t_duration",
")",
")",
"LOGINFO",
"(",
"'measured SNR={:.2f}\\n\\t'",
".",
"format",
"(",
"snr",
")",
"+",
"'-->theoretical sigma_tc = {:.2e} = {:.2e} = {:.2e}'",
".",
"format",
"(",
"sigma_tc",
".",
"to",
"(",
"u",
".",
"minute",
")",
",",
"sigma_tc",
".",
"to",
"(",
"u",
".",
"hour",
")",
",",
"sigma_tc",
".",
"to",
"(",
"u",
".",
"day",
")",
")",
")",
"return",
"sigma_tc",
".",
"to",
"(",
"u",
".",
"day",
")",
".",
"value"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
get_transit_times
|
Given a BLS period, epoch, and transit ingress/egress points (usually
from :py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod`), return
the times within transit durations + `extra_maskfrac` of each transit.
Optionally, can use the (more accurate) trapezoidal fit period and epoch, if
it's passed. Useful for inspecting individual transits, and masking them
out if desired.
Parameters
----------
blsd : dict
This is the dict returned by
:py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod`.
time : np.array
The times from the time-series of transit observations used to calculate
the initial period.
extra_maskfrac : float
This is the separation from in-transit points you desire, in units of
the transit duration. `extra_maskfrac = 0` if you just want points
inside transit (see below).
trapd : dict
This is a dict returned by
:py:func:`astrobase.lcfit.transits.traptransit_fit_magseries` containing
the trapezoid transit model.
nperiodint : int
This indicates how many periods backwards/forwards to try and identify
transits from the epochs reported in `blsd` or `trapd`.
Returns
-------
(tmids_obsd, t_starts, t_ends) : tuple of np.array
The returned items are::
tmids_obsd (np.ndarray): best guess of transit midtimes in
lightcurve. Has length number of transits in lightcurve.
t_starts (np.ndarray): t_Is - extra_maskfrac*tdur, for t_Is transit
first contact point.
t_ends (np.ndarray): t_Is + extra_maskfrac*tdur, for t_Is transit
first contact point.
|
astrobase/varbase/transits.py
|
def get_transit_times(
blsd,
time,
extra_maskfrac,
trapd=None,
nperiodint=1000
):
'''Given a BLS period, epoch, and transit ingress/egress points (usually
from :py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod`), return
the times within transit durations + `extra_maskfrac` of each transit.
Optionally, can use the (more accurate) trapezoidal fit period and epoch, if
it's passed. Useful for inspecting individual transits, and masking them
out if desired.
Parameters
----------
blsd : dict
This is the dict returned by
:py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod`.
time : np.array
The times from the time-series of transit observations used to calculate
the initial period.
extra_maskfrac : float
This is the separation from in-transit points you desire, in units of
the transit duration. `extra_maskfrac = 0` if you just want points
inside transit (see below).
trapd : dict
This is a dict returned by
:py:func:`astrobase.lcfit.transits.traptransit_fit_magseries` containing
the trapezoid transit model.
nperiodint : int
This indicates how many periods backwards/forwards to try and identify
transits from the epochs reported in `blsd` or `trapd`.
Returns
-------
(tmids_obsd, t_starts, t_ends) : tuple of np.array
The returned items are::
tmids_obsd (np.ndarray): best guess of transit midtimes in
lightcurve. Has length number of transits in lightcurve.
t_starts (np.ndarray): t_Is - extra_maskfrac*tdur, for t_Is transit
first contact point.
t_ends (np.ndarray): t_Is + extra_maskfrac*tdur, for t_Is transit
first contact point.
'''
if trapd:
period = trapd['fitinfo']['finalparams'][0]
t0 = trapd['fitinfo']['fitepoch']
transitduration_phase = trapd['fitinfo']['finalparams'][3]
tdur = period * transitduration_phase
else:
period = blsd['period']
t0 = blsd['epoch']
tdur = (
period *
(blsd['transegressbin']-blsd['transingressbin'])/blsd['nphasebins']
)
if not blsd['transegressbin'] > blsd['transingressbin']:
raise NotImplementedError(
'careful of the width. '
'this edge case must be dealt with separately.'
)
tmids = [t0 + ix*period for ix in range(-nperiodint,nperiodint)]
sel = (tmids > np.nanmin(time)) & (tmids < np.nanmax(time))
tmids_obsd = np.array(tmids)[sel]
t_Is = tmids_obsd - tdur/2
t_IVs = tmids_obsd + tdur/2
# focus on the times around transit
t_starts = t_Is - extra_maskfrac * tdur
t_ends = t_IVs + extra_maskfrac * tdur
return tmids_obsd, t_starts, t_ends
|
def get_transit_times(
blsd,
time,
extra_maskfrac,
trapd=None,
nperiodint=1000
):
'''Given a BLS period, epoch, and transit ingress/egress points (usually
from :py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod`), return
the times within transit durations + `extra_maskfrac` of each transit.
Optionally, can use the (more accurate) trapezoidal fit period and epoch, if
it's passed. Useful for inspecting individual transits, and masking them
out if desired.
Parameters
----------
blsd : dict
This is the dict returned by
:py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod`.
time : np.array
The times from the time-series of transit observations used to calculate
the initial period.
extra_maskfrac : float
This is the separation from in-transit points you desire, in units of
the transit duration. `extra_maskfrac = 0` if you just want points
inside transit (see below).
trapd : dict
This is a dict returned by
:py:func:`astrobase.lcfit.transits.traptransit_fit_magseries` containing
the trapezoid transit model.
nperiodint : int
This indicates how many periods backwards/forwards to try and identify
transits from the epochs reported in `blsd` or `trapd`.
Returns
-------
(tmids_obsd, t_starts, t_ends) : tuple of np.array
The returned items are::
tmids_obsd (np.ndarray): best guess of transit midtimes in
lightcurve. Has length number of transits in lightcurve.
t_starts (np.ndarray): t_Is - extra_maskfrac*tdur, for t_Is transit
first contact point.
t_ends (np.ndarray): t_Is + extra_maskfrac*tdur, for t_Is transit
first contact point.
'''
if trapd:
period = trapd['fitinfo']['finalparams'][0]
t0 = trapd['fitinfo']['fitepoch']
transitduration_phase = trapd['fitinfo']['finalparams'][3]
tdur = period * transitduration_phase
else:
period = blsd['period']
t0 = blsd['epoch']
tdur = (
period *
(blsd['transegressbin']-blsd['transingressbin'])/blsd['nphasebins']
)
if not blsd['transegressbin'] > blsd['transingressbin']:
raise NotImplementedError(
'careful of the width. '
'this edge case must be dealt with separately.'
)
tmids = [t0 + ix*period for ix in range(-nperiodint,nperiodint)]
sel = (tmids > np.nanmin(time)) & (tmids < np.nanmax(time))
tmids_obsd = np.array(tmids)[sel]
t_Is = tmids_obsd - tdur/2
t_IVs = tmids_obsd + tdur/2
# focus on the times around transit
t_starts = t_Is - extra_maskfrac * tdur
t_ends = t_IVs + extra_maskfrac * tdur
return tmids_obsd, t_starts, t_ends
|
[
"Given",
"a",
"BLS",
"period",
"epoch",
"and",
"transit",
"ingress",
"/",
"egress",
"points",
"(",
"usually",
"from",
":",
"py",
":",
"func",
":",
"astrobase",
".",
"periodbase",
".",
"kbls",
".",
"bls_stats_singleperiod",
")",
"return",
"the",
"times",
"within",
"transit",
"durations",
"+",
"extra_maskfrac",
"of",
"each",
"transit",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/transits.py#L307-L395
|
[
"def",
"get_transit_times",
"(",
"blsd",
",",
"time",
",",
"extra_maskfrac",
",",
"trapd",
"=",
"None",
",",
"nperiodint",
"=",
"1000",
")",
":",
"if",
"trapd",
":",
"period",
"=",
"trapd",
"[",
"'fitinfo'",
"]",
"[",
"'finalparams'",
"]",
"[",
"0",
"]",
"t0",
"=",
"trapd",
"[",
"'fitinfo'",
"]",
"[",
"'fitepoch'",
"]",
"transitduration_phase",
"=",
"trapd",
"[",
"'fitinfo'",
"]",
"[",
"'finalparams'",
"]",
"[",
"3",
"]",
"tdur",
"=",
"period",
"*",
"transitduration_phase",
"else",
":",
"period",
"=",
"blsd",
"[",
"'period'",
"]",
"t0",
"=",
"blsd",
"[",
"'epoch'",
"]",
"tdur",
"=",
"(",
"period",
"*",
"(",
"blsd",
"[",
"'transegressbin'",
"]",
"-",
"blsd",
"[",
"'transingressbin'",
"]",
")",
"/",
"blsd",
"[",
"'nphasebins'",
"]",
")",
"if",
"not",
"blsd",
"[",
"'transegressbin'",
"]",
">",
"blsd",
"[",
"'transingressbin'",
"]",
":",
"raise",
"NotImplementedError",
"(",
"'careful of the width. '",
"'this edge case must be dealt with separately.'",
")",
"tmids",
"=",
"[",
"t0",
"+",
"ix",
"*",
"period",
"for",
"ix",
"in",
"range",
"(",
"-",
"nperiodint",
",",
"nperiodint",
")",
"]",
"sel",
"=",
"(",
"tmids",
">",
"np",
".",
"nanmin",
"(",
"time",
")",
")",
"&",
"(",
"tmids",
"<",
"np",
".",
"nanmax",
"(",
"time",
")",
")",
"tmids_obsd",
"=",
"np",
".",
"array",
"(",
"tmids",
")",
"[",
"sel",
"]",
"t_Is",
"=",
"tmids_obsd",
"-",
"tdur",
"/",
"2",
"t_IVs",
"=",
"tmids_obsd",
"+",
"tdur",
"/",
"2",
"# focus on the times around transit",
"t_starts",
"=",
"t_Is",
"-",
"extra_maskfrac",
"*",
"tdur",
"t_ends",
"=",
"t_IVs",
"+",
"extra_maskfrac",
"*",
"tdur",
"return",
"tmids_obsd",
",",
"t_starts",
",",
"t_ends"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
given_lc_get_transit_tmids_tstarts_tends
|
Gets the transit start, middle, and end times for transits in a given
time-series of observations.
Parameters
----------
time,flux,err_flux : np.array
The input flux time-series measurements and their associated measurement
errors
blsfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
simple BLS model fit to the transit using the obtained period and epoch.
trapfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
trapezoidal transit model fit to the transit using the obtained period
and epoch.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
This is by default True for this function, since it works on fluxes only
at the moment.
nworkers : int
The number of parallel BLS period-finder workers to use.
extra_maskfrac : float
This is the separation (N) from in-transit points you desire, in units
of the transit duration. `extra_maskfrac = 0` if you just want points
inside transit, otherwise::
t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur
Thus setting N=0.03 masks slightly more than the guessed transit
duration.
Returns
-------
(tmids_obsd, t_starts, t_ends) : tuple
The returned items are::
tmids_obsd (np.ndarray): best guess of transit midtimes in
lightcurve. Has length number of transits in lightcurve.
t_starts (np.ndarray): t_Is - extra_maskfrac*tdur, for t_Is transit
first contact point.
t_ends (np.ndarray): t_Is + extra_maskfrac*tdur, for t_Is transit
first contact point.
|
astrobase/varbase/transits.py
|
def given_lc_get_transit_tmids_tstarts_tends(
time,
flux,
err_flux,
blsfit_savpath=None,
trapfit_savpath=None,
magsarefluxes=True,
nworkers=1,
sigclip=None,
extra_maskfrac=0.03
):
'''Gets the transit start, middle, and end times for transits in a given
time-series of observations.
Parameters
----------
time,flux,err_flux : np.array
The input flux time-series measurements and their associated measurement
errors
blsfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
simple BLS model fit to the transit using the obtained period and epoch.
trapfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
trapezoidal transit model fit to the transit using the obtained period
and epoch.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
This is by default True for this function, since it works on fluxes only
at the moment.
nworkers : int
The number of parallel BLS period-finder workers to use.
extra_maskfrac : float
This is the separation (N) from in-transit points you desire, in units
of the transit duration. `extra_maskfrac = 0` if you just want points
inside transit, otherwise::
t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur
Thus setting N=0.03 masks slightly more than the guessed transit
duration.
Returns
-------
(tmids_obsd, t_starts, t_ends) : tuple
The returned items are::
tmids_obsd (np.ndarray): best guess of transit midtimes in
lightcurve. Has length number of transits in lightcurve.
t_starts (np.ndarray): t_Is - extra_maskfrac*tdur, for t_Is transit
first contact point.
t_ends (np.ndarray): t_Is + extra_maskfrac*tdur, for t_Is transit
first contact point.
'''
# first, run BLS to get an initial epoch and period.
endp = 1.05*(np.nanmax(time) - np.nanmin(time))/2
blsdict = kbls.bls_parallel_pfind(time, flux, err_flux,
magsarefluxes=magsarefluxes, startp=0.1,
endp=endp, maxtransitduration=0.3,
nworkers=nworkers, sigclip=sigclip)
blsd = kbls.bls_stats_singleperiod(time, flux, err_flux,
blsdict['bestperiod'],
magsarefluxes=True, sigclip=sigclip,
perioddeltapercent=5)
# plot the BLS model.
if blsfit_savpath:
make_fit_plot(blsd['phases'], blsd['phasedmags'], None,
blsd['blsmodel'], blsd['period'], blsd['epoch'],
blsd['epoch'], blsfit_savpath,
magsarefluxes=magsarefluxes)
ingduration_guess = blsd['transitduration'] * 0.2 # a guesstimate.
transitparams = [
blsd['period'], blsd['epoch'], blsd['transitdepth'],
blsd['transitduration'], ingduration_guess
]
# fit a trapezoidal transit model; plot the resulting phased LC.
if trapfit_savpath:
trapd = traptransit_fit_magseries(time, flux, err_flux,
transitparams,
magsarefluxes=magsarefluxes,
sigclip=sigclip,
plotfit=trapfit_savpath)
# use the trapezoidal model's epoch as the guess to identify (roughly) in
# and out of transit points
tmids, t_starts, t_ends = get_transit_times(blsd,
time,
extra_maskfrac,
trapd=trapd)
return tmids, t_starts, t_ends
|
def given_lc_get_transit_tmids_tstarts_tends(
time,
flux,
err_flux,
blsfit_savpath=None,
trapfit_savpath=None,
magsarefluxes=True,
nworkers=1,
sigclip=None,
extra_maskfrac=0.03
):
'''Gets the transit start, middle, and end times for transits in a given
time-series of observations.
Parameters
----------
time,flux,err_flux : np.array
The input flux time-series measurements and their associated measurement
errors
blsfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
simple BLS model fit to the transit using the obtained period and epoch.
trapfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
trapezoidal transit model fit to the transit using the obtained period
and epoch.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
This is by default True for this function, since it works on fluxes only
at the moment.
nworkers : int
The number of parallel BLS period-finder workers to use.
extra_maskfrac : float
This is the separation (N) from in-transit points you desire, in units
of the transit duration. `extra_maskfrac = 0` if you just want points
inside transit, otherwise::
t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur
Thus setting N=0.03 masks slightly more than the guessed transit
duration.
Returns
-------
(tmids_obsd, t_starts, t_ends) : tuple
The returned items are::
tmids_obsd (np.ndarray): best guess of transit midtimes in
lightcurve. Has length number of transits in lightcurve.
t_starts (np.ndarray): t_Is - extra_maskfrac*tdur, for t_Is transit
first contact point.
t_ends (np.ndarray): t_Is + extra_maskfrac*tdur, for t_Is transit
first contact point.
'''
# first, run BLS to get an initial epoch and period.
endp = 1.05*(np.nanmax(time) - np.nanmin(time))/2
blsdict = kbls.bls_parallel_pfind(time, flux, err_flux,
magsarefluxes=magsarefluxes, startp=0.1,
endp=endp, maxtransitduration=0.3,
nworkers=nworkers, sigclip=sigclip)
blsd = kbls.bls_stats_singleperiod(time, flux, err_flux,
blsdict['bestperiod'],
magsarefluxes=True, sigclip=sigclip,
perioddeltapercent=5)
# plot the BLS model.
if blsfit_savpath:
make_fit_plot(blsd['phases'], blsd['phasedmags'], None,
blsd['blsmodel'], blsd['period'], blsd['epoch'],
blsd['epoch'], blsfit_savpath,
magsarefluxes=magsarefluxes)
ingduration_guess = blsd['transitduration'] * 0.2 # a guesstimate.
transitparams = [
blsd['period'], blsd['epoch'], blsd['transitdepth'],
blsd['transitduration'], ingduration_guess
]
# fit a trapezoidal transit model; plot the resulting phased LC.
if trapfit_savpath:
trapd = traptransit_fit_magseries(time, flux, err_flux,
transitparams,
magsarefluxes=magsarefluxes,
sigclip=sigclip,
plotfit=trapfit_savpath)
# use the trapezoidal model's epoch as the guess to identify (roughly) in
# and out of transit points
tmids, t_starts, t_ends = get_transit_times(blsd,
time,
extra_maskfrac,
trapd=trapd)
return tmids, t_starts, t_ends
|
[
"Gets",
"the",
"transit",
"start",
"middle",
"and",
"end",
"times",
"for",
"transits",
"in",
"a",
"given",
"time",
"-",
"series",
"of",
"observations",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/transits.py#L399-L521
|
[
"def",
"given_lc_get_transit_tmids_tstarts_tends",
"(",
"time",
",",
"flux",
",",
"err_flux",
",",
"blsfit_savpath",
"=",
"None",
",",
"trapfit_savpath",
"=",
"None",
",",
"magsarefluxes",
"=",
"True",
",",
"nworkers",
"=",
"1",
",",
"sigclip",
"=",
"None",
",",
"extra_maskfrac",
"=",
"0.03",
")",
":",
"# first, run BLS to get an initial epoch and period.",
"endp",
"=",
"1.05",
"*",
"(",
"np",
".",
"nanmax",
"(",
"time",
")",
"-",
"np",
".",
"nanmin",
"(",
"time",
")",
")",
"/",
"2",
"blsdict",
"=",
"kbls",
".",
"bls_parallel_pfind",
"(",
"time",
",",
"flux",
",",
"err_flux",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"startp",
"=",
"0.1",
",",
"endp",
"=",
"endp",
",",
"maxtransitduration",
"=",
"0.3",
",",
"nworkers",
"=",
"nworkers",
",",
"sigclip",
"=",
"sigclip",
")",
"blsd",
"=",
"kbls",
".",
"bls_stats_singleperiod",
"(",
"time",
",",
"flux",
",",
"err_flux",
",",
"blsdict",
"[",
"'bestperiod'",
"]",
",",
"magsarefluxes",
"=",
"True",
",",
"sigclip",
"=",
"sigclip",
",",
"perioddeltapercent",
"=",
"5",
")",
"# plot the BLS model.",
"if",
"blsfit_savpath",
":",
"make_fit_plot",
"(",
"blsd",
"[",
"'phases'",
"]",
",",
"blsd",
"[",
"'phasedmags'",
"]",
",",
"None",
",",
"blsd",
"[",
"'blsmodel'",
"]",
",",
"blsd",
"[",
"'period'",
"]",
",",
"blsd",
"[",
"'epoch'",
"]",
",",
"blsd",
"[",
"'epoch'",
"]",
",",
"blsfit_savpath",
",",
"magsarefluxes",
"=",
"magsarefluxes",
")",
"ingduration_guess",
"=",
"blsd",
"[",
"'transitduration'",
"]",
"*",
"0.2",
"# a guesstimate.",
"transitparams",
"=",
"[",
"blsd",
"[",
"'period'",
"]",
",",
"blsd",
"[",
"'epoch'",
"]",
",",
"blsd",
"[",
"'transitdepth'",
"]",
",",
"blsd",
"[",
"'transitduration'",
"]",
",",
"ingduration_guess",
"]",
"# fit a trapezoidal transit model; plot the resulting phased LC.",
"if",
"trapfit_savpath",
":",
"trapd",
"=",
"traptransit_fit_magseries",
"(",
"time",
",",
"flux",
",",
"err_flux",
",",
"transitparams",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"sigclip",
"=",
"sigclip",
",",
"plotfit",
"=",
"trapfit_savpath",
")",
"# use the trapezoidal model's epoch as the guess to identify (roughly) in",
"# and out of transit points",
"tmids",
",",
"t_starts",
",",
"t_ends",
"=",
"get_transit_times",
"(",
"blsd",
",",
"time",
",",
"extra_maskfrac",
",",
"trapd",
"=",
"trapd",
")",
"return",
"tmids",
",",
"t_starts",
",",
"t_ends"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
given_lc_get_out_of_transit_points
|
This gets the out-of-transit light curve points.
Relevant during iterative masking of transits for multiple planet system
search.
Parameters
----------
time,flux,err_flux : np.array
The input flux time-series measurements and their associated measurement
errors
blsfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
simple BLS model fit to the transit using the obtained period and epoch.
trapfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
trapezoidal transit model fit to the transit using the obtained period
and epoch.
in_out_transit_savpath : str or None
If provided as a str, indicates the path of the plot file that will be
made for a plot showing the in-transit points and out-of-transit points
tagged separately.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
This is by default True for this function, since it works on fluxes only
at the moment.
nworkers : int
The number of parallel BLS period-finder workers to use.
extra_maskfrac : float
This is the separation (N) from in-transit points you desire, in units
of the transit duration. `extra_maskfrac = 0` if you just want points
inside transit, otherwise::
t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur
Thus setting N=0.03 masks slightly more than the guessed transit
duration.
Returns
-------
(times_oot, fluxes_oot, errs_oot) : tuple of np.array
The `times`, `flux`, `err_flux` values from the input at the time values
out-of-transit are returned.
|
astrobase/varbase/transits.py
|
def given_lc_get_out_of_transit_points(
time, flux, err_flux,
blsfit_savpath=None,
trapfit_savpath=None,
in_out_transit_savpath=None,
sigclip=None,
magsarefluxes=True,
nworkers=1,
extra_maskfrac=0.03
):
'''This gets the out-of-transit light curve points.
Relevant during iterative masking of transits for multiple planet system
search.
Parameters
----------
time,flux,err_flux : np.array
The input flux time-series measurements and their associated measurement
errors
blsfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
simple BLS model fit to the transit using the obtained period and epoch.
trapfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
trapezoidal transit model fit to the transit using the obtained period
and epoch.
in_out_transit_savpath : str or None
If provided as a str, indicates the path of the plot file that will be
made for a plot showing the in-transit points and out-of-transit points
tagged separately.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
This is by default True for this function, since it works on fluxes only
at the moment.
nworkers : int
The number of parallel BLS period-finder workers to use.
extra_maskfrac : float
This is the separation (N) from in-transit points you desire, in units
of the transit duration. `extra_maskfrac = 0` if you just want points
inside transit, otherwise::
t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur
Thus setting N=0.03 masks slightly more than the guessed transit
duration.
Returns
-------
(times_oot, fluxes_oot, errs_oot) : tuple of np.array
The `times`, `flux`, `err_flux` values from the input at the time values
out-of-transit are returned.
'''
tmids_obsd, t_starts, t_ends = (
given_lc_get_transit_tmids_tstarts_tends(
time, flux, err_flux, blsfit_savpath=blsfit_savpath,
trapfit_savpath=trapfit_savpath, magsarefluxes=magsarefluxes,
nworkers=nworkers, sigclip=sigclip, extra_maskfrac=extra_maskfrac
)
)
in_transit = np.zeros_like(time).astype(bool)
for t_start, t_end in zip(t_starts, t_ends):
this_transit = ( (time > t_start) & (time < t_end) )
in_transit |= this_transit
out_of_transit = ~in_transit
if in_out_transit_savpath:
_in_out_transit_plot(time, flux, in_transit, out_of_transit,
in_out_transit_savpath)
return time[out_of_transit], flux[out_of_transit], err_flux[out_of_transit]
|
def given_lc_get_out_of_transit_points(
time, flux, err_flux,
blsfit_savpath=None,
trapfit_savpath=None,
in_out_transit_savpath=None,
sigclip=None,
magsarefluxes=True,
nworkers=1,
extra_maskfrac=0.03
):
'''This gets the out-of-transit light curve points.
Relevant during iterative masking of transits for multiple planet system
search.
Parameters
----------
time,flux,err_flux : np.array
The input flux time-series measurements and their associated measurement
errors
blsfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
simple BLS model fit to the transit using the obtained period and epoch.
trapfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
trapezoidal transit model fit to the transit using the obtained period
and epoch.
in_out_transit_savpath : str or None
If provided as a str, indicates the path of the plot file that will be
made for a plot showing the in-transit points and out-of-transit points
tagged separately.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
This is by default True for this function, since it works on fluxes only
at the moment.
nworkers : int
The number of parallel BLS period-finder workers to use.
extra_maskfrac : float
This is the separation (N) from in-transit points you desire, in units
of the transit duration. `extra_maskfrac = 0` if you just want points
inside transit, otherwise::
t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur
Thus setting N=0.03 masks slightly more than the guessed transit
duration.
Returns
-------
(times_oot, fluxes_oot, errs_oot) : tuple of np.array
The `times`, `flux`, `err_flux` values from the input at the time values
out-of-transit are returned.
'''
tmids_obsd, t_starts, t_ends = (
given_lc_get_transit_tmids_tstarts_tends(
time, flux, err_flux, blsfit_savpath=blsfit_savpath,
trapfit_savpath=trapfit_savpath, magsarefluxes=magsarefluxes,
nworkers=nworkers, sigclip=sigclip, extra_maskfrac=extra_maskfrac
)
)
in_transit = np.zeros_like(time).astype(bool)
for t_start, t_end in zip(t_starts, t_ends):
this_transit = ( (time > t_start) & (time < t_end) )
in_transit |= this_transit
out_of_transit = ~in_transit
if in_out_transit_savpath:
_in_out_transit_plot(time, flux, in_transit, out_of_transit,
in_out_transit_savpath)
return time[out_of_transit], flux[out_of_transit], err_flux[out_of_transit]
|
[
"This",
"gets",
"the",
"out",
"-",
"of",
"-",
"transit",
"light",
"curve",
"points",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/transits.py#L555-L657
|
[
"def",
"given_lc_get_out_of_transit_points",
"(",
"time",
",",
"flux",
",",
"err_flux",
",",
"blsfit_savpath",
"=",
"None",
",",
"trapfit_savpath",
"=",
"None",
",",
"in_out_transit_savpath",
"=",
"None",
",",
"sigclip",
"=",
"None",
",",
"magsarefluxes",
"=",
"True",
",",
"nworkers",
"=",
"1",
",",
"extra_maskfrac",
"=",
"0.03",
")",
":",
"tmids_obsd",
",",
"t_starts",
",",
"t_ends",
"=",
"(",
"given_lc_get_transit_tmids_tstarts_tends",
"(",
"time",
",",
"flux",
",",
"err_flux",
",",
"blsfit_savpath",
"=",
"blsfit_savpath",
",",
"trapfit_savpath",
"=",
"trapfit_savpath",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"nworkers",
"=",
"nworkers",
",",
"sigclip",
"=",
"sigclip",
",",
"extra_maskfrac",
"=",
"extra_maskfrac",
")",
")",
"in_transit",
"=",
"np",
".",
"zeros_like",
"(",
"time",
")",
".",
"astype",
"(",
"bool",
")",
"for",
"t_start",
",",
"t_end",
"in",
"zip",
"(",
"t_starts",
",",
"t_ends",
")",
":",
"this_transit",
"=",
"(",
"(",
"time",
">",
"t_start",
")",
"&",
"(",
"time",
"<",
"t_end",
")",
")",
"in_transit",
"|=",
"this_transit",
"out_of_transit",
"=",
"~",
"in_transit",
"if",
"in_out_transit_savpath",
":",
"_in_out_transit_plot",
"(",
"time",
",",
"flux",
",",
"in_transit",
",",
"out_of_transit",
",",
"in_out_transit_savpath",
")",
"return",
"time",
"[",
"out_of_transit",
"]",
",",
"flux",
"[",
"out_of_transit",
"]",
",",
"err_flux",
"[",
"out_of_transit",
"]"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_pycompress_sqlitecurve
|
This just compresses the sqlitecurve. Should be independent of OS.
|
astrobase/hatsurveys/hatlc.py
|
def _pycompress_sqlitecurve(sqlitecurve, force=False):
'''This just compresses the sqlitecurve. Should be independent of OS.
'''
outfile = '%s.gz' % sqlitecurve
try:
if os.path.exists(outfile) and not force:
os.remove(sqlitecurve)
return outfile
else:
with open(sqlitecurve,'rb') as infd:
with gzip.open(outfile,'wb') as outfd:
shutil.copyfileobj(infd, outfd)
if os.path.exists(outfile):
os.remove(sqlitecurve)
return outfile
except Exception as e:
return None
|
def _pycompress_sqlitecurve(sqlitecurve, force=False):
'''This just compresses the sqlitecurve. Should be independent of OS.
'''
outfile = '%s.gz' % sqlitecurve
try:
if os.path.exists(outfile) and not force:
os.remove(sqlitecurve)
return outfile
else:
with open(sqlitecurve,'rb') as infd:
with gzip.open(outfile,'wb') as outfd:
shutil.copyfileobj(infd, outfd)
if os.path.exists(outfile):
os.remove(sqlitecurve)
return outfile
except Exception as e:
return None
|
[
"This",
"just",
"compresses",
"the",
"sqlitecurve",
".",
"Should",
"be",
"independent",
"of",
"OS",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L429-L453
|
[
"def",
"_pycompress_sqlitecurve",
"(",
"sqlitecurve",
",",
"force",
"=",
"False",
")",
":",
"outfile",
"=",
"'%s.gz'",
"%",
"sqlitecurve",
"try",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
"and",
"not",
"force",
":",
"os",
".",
"remove",
"(",
"sqlitecurve",
")",
"return",
"outfile",
"else",
":",
"with",
"open",
"(",
"sqlitecurve",
",",
"'rb'",
")",
"as",
"infd",
":",
"with",
"gzip",
".",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"outfd",
":",
"shutil",
".",
"copyfileobj",
"(",
"infd",
",",
"outfd",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
":",
"os",
".",
"remove",
"(",
"sqlitecurve",
")",
"return",
"outfile",
"except",
"Exception",
"as",
"e",
":",
"return",
"None"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_pyuncompress_sqlitecurve
|
This just uncompresses the sqlitecurve. Should be independent of OS.
|
astrobase/hatsurveys/hatlc.py
|
def _pyuncompress_sqlitecurve(sqlitecurve, force=False):
'''This just uncompresses the sqlitecurve. Should be independent of OS.
'''
outfile = sqlitecurve.replace('.gz','')
try:
if os.path.exists(outfile) and not force:
return outfile
else:
with gzip.open(sqlitecurve,'rb') as infd:
with open(outfile,'wb') as outfd:
shutil.copyfileobj(infd, outfd)
# do not remove the intput file yet
if os.path.exists(outfile):
return outfile
except Exception as e:
return None
|
def _pyuncompress_sqlitecurve(sqlitecurve, force=False):
'''This just uncompresses the sqlitecurve. Should be independent of OS.
'''
outfile = sqlitecurve.replace('.gz','')
try:
if os.path.exists(outfile) and not force:
return outfile
else:
with gzip.open(sqlitecurve,'rb') as infd:
with open(outfile,'wb') as outfd:
shutil.copyfileobj(infd, outfd)
# do not remove the intput file yet
if os.path.exists(outfile):
return outfile
except Exception as e:
return None
|
[
"This",
"just",
"uncompresses",
"the",
"sqlitecurve",
".",
"Should",
"be",
"independent",
"of",
"OS",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L456-L479
|
[
"def",
"_pyuncompress_sqlitecurve",
"(",
"sqlitecurve",
",",
"force",
"=",
"False",
")",
":",
"outfile",
"=",
"sqlitecurve",
".",
"replace",
"(",
"'.gz'",
",",
"''",
")",
"try",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
"and",
"not",
"force",
":",
"return",
"outfile",
"else",
":",
"with",
"gzip",
".",
"open",
"(",
"sqlitecurve",
",",
"'rb'",
")",
"as",
"infd",
":",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"outfd",
":",
"shutil",
".",
"copyfileobj",
"(",
"infd",
",",
"outfd",
")",
"# do not remove the intput file yet",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
":",
"return",
"outfile",
"except",
"Exception",
"as",
"e",
":",
"return",
"None"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_gzip_sqlitecurve
|
This just compresses the sqlitecurve in gzip format.
FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
|
astrobase/hatsurveys/hatlc.py
|
def _gzip_sqlitecurve(sqlitecurve, force=False):
'''This just compresses the sqlitecurve in gzip format.
FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
'''
# -k to keep the input file just in case something explodes
if force:
cmd = 'gzip -k -f %s' % sqlitecurve
else:
cmd = 'gzip -k %s' % sqlitecurve
try:
outfile = '%s.gz' % sqlitecurve
if os.path.exists(outfile) and not force:
# get rid of the .sqlite file only
os.remove(sqlitecurve)
return outfile
else:
subprocess.check_output(cmd, shell=True)
# check if the output file was successfully created
if os.path.exists(outfile):
return outfile
else:
return None
except subprocess.CalledProcessError:
return None
|
def _gzip_sqlitecurve(sqlitecurve, force=False):
'''This just compresses the sqlitecurve in gzip format.
FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
'''
# -k to keep the input file just in case something explodes
if force:
cmd = 'gzip -k -f %s' % sqlitecurve
else:
cmd = 'gzip -k %s' % sqlitecurve
try:
outfile = '%s.gz' % sqlitecurve
if os.path.exists(outfile) and not force:
# get rid of the .sqlite file only
os.remove(sqlitecurve)
return outfile
else:
subprocess.check_output(cmd, shell=True)
# check if the output file was successfully created
if os.path.exists(outfile):
return outfile
else:
return None
except subprocess.CalledProcessError:
return None
|
[
"This",
"just",
"compresses",
"the",
"sqlitecurve",
"in",
"gzip",
"format",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L482-L514
|
[
"def",
"_gzip_sqlitecurve",
"(",
"sqlitecurve",
",",
"force",
"=",
"False",
")",
":",
"# -k to keep the input file just in case something explodes",
"if",
"force",
":",
"cmd",
"=",
"'gzip -k -f %s'",
"%",
"sqlitecurve",
"else",
":",
"cmd",
"=",
"'gzip -k %s'",
"%",
"sqlitecurve",
"try",
":",
"outfile",
"=",
"'%s.gz'",
"%",
"sqlitecurve",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
"and",
"not",
"force",
":",
"# get rid of the .sqlite file only",
"os",
".",
"remove",
"(",
"sqlitecurve",
")",
"return",
"outfile",
"else",
":",
"subprocess",
".",
"check_output",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
"# check if the output file was successfully created",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
":",
"return",
"outfile",
"else",
":",
"return",
"None",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"return",
"None"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_gunzip_sqlitecurve
|
This just uncompresses the sqlitecurve in gzip format.
FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
|
astrobase/hatsurveys/hatlc.py
|
def _gunzip_sqlitecurve(sqlitecurve):
'''This just uncompresses the sqlitecurve in gzip format.
FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
'''
# -k to keep the input .gz just in case something explodes
cmd = 'gunzip -k %s' % sqlitecurve
try:
subprocess.check_output(cmd, shell=True)
return sqlitecurve.replace('.gz','')
except subprocess.CalledProcessError:
return None
|
def _gunzip_sqlitecurve(sqlitecurve):
'''This just uncompresses the sqlitecurve in gzip format.
FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
'''
# -k to keep the input .gz just in case something explodes
cmd = 'gunzip -k %s' % sqlitecurve
try:
subprocess.check_output(cmd, shell=True)
return sqlitecurve.replace('.gz','')
except subprocess.CalledProcessError:
return None
|
[
"This",
"just",
"uncompresses",
"the",
"sqlitecurve",
"in",
"gzip",
"format",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L518-L532
|
[
"def",
"_gunzip_sqlitecurve",
"(",
"sqlitecurve",
")",
":",
"# -k to keep the input .gz just in case something explodes",
"cmd",
"=",
"'gunzip -k %s'",
"%",
"sqlitecurve",
"try",
":",
"subprocess",
".",
"check_output",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
"return",
"sqlitecurve",
".",
"replace",
"(",
"'.gz'",
",",
"''",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"return",
"None"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_validate_sqlitecurve_filters
|
This validates the sqlitecurve filter string.
This MUST be valid SQL but not contain any commands.
|
astrobase/hatsurveys/hatlc.py
|
def _validate_sqlitecurve_filters(filterstring, lccolumns):
'''This validates the sqlitecurve filter string.
This MUST be valid SQL but not contain any commands.
'''
# first, lowercase, then _squeeze to single spaces
stringelems = _squeeze(filterstring).lower()
# replace shady characters
stringelems = filterstring.replace('(','')
stringelems = stringelems.replace(')','')
stringelems = stringelems.replace(',','')
stringelems = stringelems.replace("'",'"')
stringelems = stringelems.replace('\n',' ')
stringelems = stringelems.replace('\t',' ')
stringelems = _squeeze(stringelems)
# split into words
stringelems = stringelems.split(' ')
stringelems = [x.strip() for x in stringelems]
# get rid of all numbers
stringwords = []
for x in stringelems:
try:
float(x)
except ValueError as e:
stringwords.append(x)
# get rid of everything within quotes
stringwords2 = []
for x in stringwords:
if not(x.startswith('"') and x.endswith('"')):
stringwords2.append(x)
stringwords2 = [x for x in stringwords2 if len(x) > 0]
# check the filterstring words against the allowed words
wordset = set(stringwords2)
# generate the allowed word set for these LC columns
allowedwords = SQLITE_ALLOWED_WORDS + lccolumns
checkset = set(allowedwords)
validatecheck = list(wordset - checkset)
# if there are words left over, then this filter string is suspicious
if len(validatecheck) > 0:
# check if validatecheck contains an elem with % in it
LOGWARNING("provided SQL filter string '%s' "
"contains non-allowed keywords" % filterstring)
return None
else:
return filterstring
|
def _validate_sqlitecurve_filters(filterstring, lccolumns):
'''This validates the sqlitecurve filter string.
This MUST be valid SQL but not contain any commands.
'''
# first, lowercase, then _squeeze to single spaces
stringelems = _squeeze(filterstring).lower()
# replace shady characters
stringelems = filterstring.replace('(','')
stringelems = stringelems.replace(')','')
stringelems = stringelems.replace(',','')
stringelems = stringelems.replace("'",'"')
stringelems = stringelems.replace('\n',' ')
stringelems = stringelems.replace('\t',' ')
stringelems = _squeeze(stringelems)
# split into words
stringelems = stringelems.split(' ')
stringelems = [x.strip() for x in stringelems]
# get rid of all numbers
stringwords = []
for x in stringelems:
try:
float(x)
except ValueError as e:
stringwords.append(x)
# get rid of everything within quotes
stringwords2 = []
for x in stringwords:
if not(x.startswith('"') and x.endswith('"')):
stringwords2.append(x)
stringwords2 = [x for x in stringwords2 if len(x) > 0]
# check the filterstring words against the allowed words
wordset = set(stringwords2)
# generate the allowed word set for these LC columns
allowedwords = SQLITE_ALLOWED_WORDS + lccolumns
checkset = set(allowedwords)
validatecheck = list(wordset - checkset)
# if there are words left over, then this filter string is suspicious
if len(validatecheck) > 0:
# check if validatecheck contains an elem with % in it
LOGWARNING("provided SQL filter string '%s' "
"contains non-allowed keywords" % filterstring)
return None
else:
return filterstring
|
[
"This",
"validates",
"the",
"sqlitecurve",
"filter",
"string",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L564-L620
|
[
"def",
"_validate_sqlitecurve_filters",
"(",
"filterstring",
",",
"lccolumns",
")",
":",
"# first, lowercase, then _squeeze to single spaces",
"stringelems",
"=",
"_squeeze",
"(",
"filterstring",
")",
".",
"lower",
"(",
")",
"# replace shady characters",
"stringelems",
"=",
"filterstring",
".",
"replace",
"(",
"'('",
",",
"''",
")",
"stringelems",
"=",
"stringelems",
".",
"replace",
"(",
"')'",
",",
"''",
")",
"stringelems",
"=",
"stringelems",
".",
"replace",
"(",
"','",
",",
"''",
")",
"stringelems",
"=",
"stringelems",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"stringelems",
"=",
"stringelems",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"stringelems",
"=",
"stringelems",
".",
"replace",
"(",
"'\\t'",
",",
"' '",
")",
"stringelems",
"=",
"_squeeze",
"(",
"stringelems",
")",
"# split into words",
"stringelems",
"=",
"stringelems",
".",
"split",
"(",
"' '",
")",
"stringelems",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"stringelems",
"]",
"# get rid of all numbers",
"stringwords",
"=",
"[",
"]",
"for",
"x",
"in",
"stringelems",
":",
"try",
":",
"float",
"(",
"x",
")",
"except",
"ValueError",
"as",
"e",
":",
"stringwords",
".",
"append",
"(",
"x",
")",
"# get rid of everything within quotes",
"stringwords2",
"=",
"[",
"]",
"for",
"x",
"in",
"stringwords",
":",
"if",
"not",
"(",
"x",
".",
"startswith",
"(",
"'\"'",
")",
"and",
"x",
".",
"endswith",
"(",
"'\"'",
")",
")",
":",
"stringwords2",
".",
"append",
"(",
"x",
")",
"stringwords2",
"=",
"[",
"x",
"for",
"x",
"in",
"stringwords2",
"if",
"len",
"(",
"x",
")",
">",
"0",
"]",
"# check the filterstring words against the allowed words",
"wordset",
"=",
"set",
"(",
"stringwords2",
")",
"# generate the allowed word set for these LC columns",
"allowedwords",
"=",
"SQLITE_ALLOWED_WORDS",
"+",
"lccolumns",
"checkset",
"=",
"set",
"(",
"allowedwords",
")",
"validatecheck",
"=",
"list",
"(",
"wordset",
"-",
"checkset",
")",
"# if there are words left over, then this filter string is suspicious",
"if",
"len",
"(",
"validatecheck",
")",
">",
"0",
":",
"# check if validatecheck contains an elem with % in it",
"LOGWARNING",
"(",
"\"provided SQL filter string '%s' \"",
"\"contains non-allowed keywords\"",
"%",
"filterstring",
")",
"return",
"None",
"else",
":",
"return",
"filterstring"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
read_and_filter_sqlitecurve
|
This reads a HAT sqlitecurve and optionally filters it.
Parameters
----------
lcfile : str
The path to the HAT sqlitecurve file.
columns : list
A list of columns to extract from the ligh curve file. If None, then
returns all columns present in the latest `columnlist` in the light
curve.
sqlfilters : list of str
If no None, it must be a list of text SQL filters that apply to the
columns in the lightcurve.
raiseonfail : bool
If this is True, an Exception when reading the LC will crash the
function instead of failing silently and returning None as the result.
returnarrays : bool
If this is True, the output lcdict contains columns as np.arrays instead
of lists. You generally want this to be True.
forcerecompress : bool
If True, the sqlitecurve will be recompressed even if a compressed
version of it is found. This usually happens when sqlitecurve opening is
interrupted by the OS for some reason, leaving behind a gzipped and
un-gzipped copy. By default, this function refuses to overwrite the
existing gzipped version so if the un-gzipped version is corrupt but
that one isn't, it can be safely recovered.
quiet : bool
If True, will not warn about any problems, even if the light curve
reading fails (the only clue then will be the return value of
None). Useful for batch processing of many many light curves.
Returns
-------
tuple : (lcdict, status_message)
A two-element tuple is returned, with the first element being the
lcdict.
|
astrobase/hatsurveys/hatlc.py
|
def read_and_filter_sqlitecurve(lcfile,
columns=None,
sqlfilters=None,
raiseonfail=False,
returnarrays=True,
forcerecompress=False,
quiet=True):
'''This reads a HAT sqlitecurve and optionally filters it.
Parameters
----------
lcfile : str
The path to the HAT sqlitecurve file.
columns : list
A list of columns to extract from the ligh curve file. If None, then
returns all columns present in the latest `columnlist` in the light
curve.
sqlfilters : list of str
If no None, it must be a list of text SQL filters that apply to the
columns in the lightcurve.
raiseonfail : bool
If this is True, an Exception when reading the LC will crash the
function instead of failing silently and returning None as the result.
returnarrays : bool
If this is True, the output lcdict contains columns as np.arrays instead
of lists. You generally want this to be True.
forcerecompress : bool
If True, the sqlitecurve will be recompressed even if a compressed
version of it is found. This usually happens when sqlitecurve opening is
interrupted by the OS for some reason, leaving behind a gzipped and
un-gzipped copy. By default, this function refuses to overwrite the
existing gzipped version so if the un-gzipped version is corrupt but
that one isn't, it can be safely recovered.
quiet : bool
If True, will not warn about any problems, even if the light curve
reading fails (the only clue then will be the return value of
None). Useful for batch processing of many many light curves.
Returns
-------
tuple : (lcdict, status_message)
A two-element tuple is returned, with the first element being the
lcdict.
'''
# we're proceeding with reading the LC...
try:
# if this file is a gzipped sqlite3 db, then gunzip it
if '.gz' in lcfile[-4:]:
lcf = _uncompress_sqlitecurve(lcfile)
else:
lcf = lcfile
db = sql.connect(lcf)
cur = db.cursor()
# get the objectinfo from the sqlitecurve
query = ("select * from objectinfo")
cur.execute(query)
objectinfo = cur.fetchone()
# get the lcinfo from the sqlitecurve
query = ("select * from lcinfo "
"order by version desc limit 1")
cur.execute(query)
lcinfo = cur.fetchone()
(lcversion, lcdatarelease, lccols, lcsortcol,
lcapertures, lcbestaperture,
objinfocols, objidcol,
lcunixtime, lcgitrev, lccomment) = lcinfo
# load the JSON dicts
lcapertures = json.loads(lcapertures)
lcbestaperture = json.loads(lcbestaperture)
objectinfokeys = objinfocols.split(',')
objectinfodict = {x:y for (x,y) in zip(objectinfokeys, objectinfo)}
objectid = objectinfodict[objidcol]
# need to generate the objectinfo dict and the objectid from the lcinfo
# columns
# get the filters from the sqlitecurve
query = ("select * from filters")
cur.execute(query)
filterinfo = cur.fetchall()
# validate the requested columns
if columns and all([x in lccols.split(',') for x in columns]):
LOGINFO('retrieving columns %s' % columns)
proceed = True
elif columns is None:
columns = lccols.split(',')
proceed = True
else:
proceed = False
# bail out if there's a problem and tell the user what happened
if not proceed:
# recompress the lightcurve at the end
if '.gz' in lcfile[-4:] and lcf:
_compress_sqlitecurve(lcf, force=forcerecompress)
LOGERROR('requested columns are invalid!')
return None, "requested columns are invalid"
# create the lcdict with the object, lc, and filter info
lcdict = {'objectid':objectid,
'objectinfo':objectinfodict,
'objectinfokeys':objectinfokeys,
'lcversion':lcversion,
'datarelease':lcdatarelease,
'columns':columns,
'lcsortcol':lcsortcol,
'lcapertures':lcapertures,
'lcbestaperture':lcbestaperture,
'lastupdated':lcunixtime,
'lcserver':lcgitrev,
'comment':lccomment,
'filters':filterinfo}
# validate the SQL filters for this LC
if ((sqlfilters is not None) and
(isinstance(sqlfilters,str) or
isinstance(sqlfilters, unicode))):
# give the validator the sqlfilters string and a list of lccols in
# the lightcurve
validatedfilters = _validate_sqlitecurve_filters(sqlfilters,
lccols.split(','))
if validatedfilters is not None:
LOGINFO('filtering LC using: %s' % validatedfilters)
filtersok = True
else:
filtersok = False
else:
validatedfilters = None
filtersok = None
# now read all the required columns in the order indicated
# we use the validated SQL filter string here
if validatedfilters is not None:
query = (
"select {columns} from lightcurve where {sqlfilter} "
"order by {sortcol} asc"
).format(
columns=','.join(columns), # columns is always a list
sqlfilter=validatedfilters,
sortcol=lcsortcol
)
lcdict['lcfiltersql'] = validatedfilters
else:
query = ("select %s from lightcurve order by %s asc") % (
','.join(columns),
lcsortcol
)
cur.execute(query)
lightcurve = cur.fetchall()
if lightcurve and len(lightcurve) > 0:
lightcurve = list(zip(*lightcurve))
lcdict.update({x:y for (x,y) in zip(lcdict['columns'],
lightcurve)})
lcok = True
# update the ndet after filtering
lcdict['objectinfo']['ndet'] = len(lightcurve[0])
else:
LOGWARNING('LC for %s has no detections' % lcdict['objectid'])
# fill the lightcurve with empty lists to indicate that it is empty
lcdict.update({x:y for (x,y) in
zip(lcdict['columns'],
[[] for x in lcdict['columns']])})
lcok = False
# generate the returned lcdict and status message
if filtersok is True and lcok:
statusmsg = 'SQL filters OK, LC OK'
elif filtersok is None and lcok:
statusmsg = 'no SQL filters, LC OK'
elif filtersok is False and lcok:
statusmsg = 'SQL filters invalid, LC OK'
else:
statusmsg = 'LC retrieval failed'
returnval = (lcdict, statusmsg)
# recompress the lightcurve at the end
if '.gz' in lcfile[-4:] and lcf:
_compress_sqlitecurve(lcf, force=forcerecompress)
# return ndarrays if that's set
if returnarrays:
for column in lcdict['columns']:
lcdict[column] = np.array([x if x is not None else np.nan
for x in lcdict[column]])
except Exception as e:
if not quiet:
LOGEXCEPTION('could not open sqlitecurve %s' % lcfile)
returnval = (None, 'error while reading lightcurve file')
# recompress the lightcurve at the end
if '.gz' in lcfile[-4:] and lcf:
_compress_sqlitecurve(lcf, force=forcerecompress)
if raiseonfail:
raise
return returnval
|
def read_and_filter_sqlitecurve(lcfile,
columns=None,
sqlfilters=None,
raiseonfail=False,
returnarrays=True,
forcerecompress=False,
quiet=True):
'''This reads a HAT sqlitecurve and optionally filters it.
Parameters
----------
lcfile : str
The path to the HAT sqlitecurve file.
columns : list
A list of columns to extract from the ligh curve file. If None, then
returns all columns present in the latest `columnlist` in the light
curve.
sqlfilters : list of str
If no None, it must be a list of text SQL filters that apply to the
columns in the lightcurve.
raiseonfail : bool
If this is True, an Exception when reading the LC will crash the
function instead of failing silently and returning None as the result.
returnarrays : bool
If this is True, the output lcdict contains columns as np.arrays instead
of lists. You generally want this to be True.
forcerecompress : bool
If True, the sqlitecurve will be recompressed even if a compressed
version of it is found. This usually happens when sqlitecurve opening is
interrupted by the OS for some reason, leaving behind a gzipped and
un-gzipped copy. By default, this function refuses to overwrite the
existing gzipped version so if the un-gzipped version is corrupt but
that one isn't, it can be safely recovered.
quiet : bool
If True, will not warn about any problems, even if the light curve
reading fails (the only clue then will be the return value of
None). Useful for batch processing of many many light curves.
Returns
-------
tuple : (lcdict, status_message)
A two-element tuple is returned, with the first element being the
lcdict.
'''
# we're proceeding with reading the LC...
try:
# if this file is a gzipped sqlite3 db, then gunzip it
if '.gz' in lcfile[-4:]:
lcf = _uncompress_sqlitecurve(lcfile)
else:
lcf = lcfile
db = sql.connect(lcf)
cur = db.cursor()
# get the objectinfo from the sqlitecurve
query = ("select * from objectinfo")
cur.execute(query)
objectinfo = cur.fetchone()
# get the lcinfo from the sqlitecurve
query = ("select * from lcinfo "
"order by version desc limit 1")
cur.execute(query)
lcinfo = cur.fetchone()
(lcversion, lcdatarelease, lccols, lcsortcol,
lcapertures, lcbestaperture,
objinfocols, objidcol,
lcunixtime, lcgitrev, lccomment) = lcinfo
# load the JSON dicts
lcapertures = json.loads(lcapertures)
lcbestaperture = json.loads(lcbestaperture)
objectinfokeys = objinfocols.split(',')
objectinfodict = {x:y for (x,y) in zip(objectinfokeys, objectinfo)}
objectid = objectinfodict[objidcol]
# need to generate the objectinfo dict and the objectid from the lcinfo
# columns
# get the filters from the sqlitecurve
query = ("select * from filters")
cur.execute(query)
filterinfo = cur.fetchall()
# validate the requested columns
if columns and all([x in lccols.split(',') for x in columns]):
LOGINFO('retrieving columns %s' % columns)
proceed = True
elif columns is None:
columns = lccols.split(',')
proceed = True
else:
proceed = False
# bail out if there's a problem and tell the user what happened
if not proceed:
# recompress the lightcurve at the end
if '.gz' in lcfile[-4:] and lcf:
_compress_sqlitecurve(lcf, force=forcerecompress)
LOGERROR('requested columns are invalid!')
return None, "requested columns are invalid"
# create the lcdict with the object, lc, and filter info
lcdict = {'objectid':objectid,
'objectinfo':objectinfodict,
'objectinfokeys':objectinfokeys,
'lcversion':lcversion,
'datarelease':lcdatarelease,
'columns':columns,
'lcsortcol':lcsortcol,
'lcapertures':lcapertures,
'lcbestaperture':lcbestaperture,
'lastupdated':lcunixtime,
'lcserver':lcgitrev,
'comment':lccomment,
'filters':filterinfo}
# validate the SQL filters for this LC
if ((sqlfilters is not None) and
(isinstance(sqlfilters,str) or
isinstance(sqlfilters, unicode))):
# give the validator the sqlfilters string and a list of lccols in
# the lightcurve
validatedfilters = _validate_sqlitecurve_filters(sqlfilters,
lccols.split(','))
if validatedfilters is not None:
LOGINFO('filtering LC using: %s' % validatedfilters)
filtersok = True
else:
filtersok = False
else:
validatedfilters = None
filtersok = None
# now read all the required columns in the order indicated
# we use the validated SQL filter string here
if validatedfilters is not None:
query = (
"select {columns} from lightcurve where {sqlfilter} "
"order by {sortcol} asc"
).format(
columns=','.join(columns), # columns is always a list
sqlfilter=validatedfilters,
sortcol=lcsortcol
)
lcdict['lcfiltersql'] = validatedfilters
else:
query = ("select %s from lightcurve order by %s asc") % (
','.join(columns),
lcsortcol
)
cur.execute(query)
lightcurve = cur.fetchall()
if lightcurve and len(lightcurve) > 0:
lightcurve = list(zip(*lightcurve))
lcdict.update({x:y for (x,y) in zip(lcdict['columns'],
lightcurve)})
lcok = True
# update the ndet after filtering
lcdict['objectinfo']['ndet'] = len(lightcurve[0])
else:
LOGWARNING('LC for %s has no detections' % lcdict['objectid'])
# fill the lightcurve with empty lists to indicate that it is empty
lcdict.update({x:y for (x,y) in
zip(lcdict['columns'],
[[] for x in lcdict['columns']])})
lcok = False
# generate the returned lcdict and status message
if filtersok is True and lcok:
statusmsg = 'SQL filters OK, LC OK'
elif filtersok is None and lcok:
statusmsg = 'no SQL filters, LC OK'
elif filtersok is False and lcok:
statusmsg = 'SQL filters invalid, LC OK'
else:
statusmsg = 'LC retrieval failed'
returnval = (lcdict, statusmsg)
# recompress the lightcurve at the end
if '.gz' in lcfile[-4:] and lcf:
_compress_sqlitecurve(lcf, force=forcerecompress)
# return ndarrays if that's set
if returnarrays:
for column in lcdict['columns']:
lcdict[column] = np.array([x if x is not None else np.nan
for x in lcdict[column]])
except Exception as e:
if not quiet:
LOGEXCEPTION('could not open sqlitecurve %s' % lcfile)
returnval = (None, 'error while reading lightcurve file')
# recompress the lightcurve at the end
if '.gz' in lcfile[-4:] and lcf:
_compress_sqlitecurve(lcf, force=forcerecompress)
if raiseonfail:
raise
return returnval
|
[
"This",
"reads",
"a",
"HAT",
"sqlitecurve",
"and",
"optionally",
"filters",
"it",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L624-L853
|
[
"def",
"read_and_filter_sqlitecurve",
"(",
"lcfile",
",",
"columns",
"=",
"None",
",",
"sqlfilters",
"=",
"None",
",",
"raiseonfail",
"=",
"False",
",",
"returnarrays",
"=",
"True",
",",
"forcerecompress",
"=",
"False",
",",
"quiet",
"=",
"True",
")",
":",
"# we're proceeding with reading the LC...",
"try",
":",
"# if this file is a gzipped sqlite3 db, then gunzip it",
"if",
"'.gz'",
"in",
"lcfile",
"[",
"-",
"4",
":",
"]",
":",
"lcf",
"=",
"_uncompress_sqlitecurve",
"(",
"lcfile",
")",
"else",
":",
"lcf",
"=",
"lcfile",
"db",
"=",
"sql",
".",
"connect",
"(",
"lcf",
")",
"cur",
"=",
"db",
".",
"cursor",
"(",
")",
"# get the objectinfo from the sqlitecurve",
"query",
"=",
"(",
"\"select * from objectinfo\"",
")",
"cur",
".",
"execute",
"(",
"query",
")",
"objectinfo",
"=",
"cur",
".",
"fetchone",
"(",
")",
"# get the lcinfo from the sqlitecurve",
"query",
"=",
"(",
"\"select * from lcinfo \"",
"\"order by version desc limit 1\"",
")",
"cur",
".",
"execute",
"(",
"query",
")",
"lcinfo",
"=",
"cur",
".",
"fetchone",
"(",
")",
"(",
"lcversion",
",",
"lcdatarelease",
",",
"lccols",
",",
"lcsortcol",
",",
"lcapertures",
",",
"lcbestaperture",
",",
"objinfocols",
",",
"objidcol",
",",
"lcunixtime",
",",
"lcgitrev",
",",
"lccomment",
")",
"=",
"lcinfo",
"# load the JSON dicts",
"lcapertures",
"=",
"json",
".",
"loads",
"(",
"lcapertures",
")",
"lcbestaperture",
"=",
"json",
".",
"loads",
"(",
"lcbestaperture",
")",
"objectinfokeys",
"=",
"objinfocols",
".",
"split",
"(",
"','",
")",
"objectinfodict",
"=",
"{",
"x",
":",
"y",
"for",
"(",
"x",
",",
"y",
")",
"in",
"zip",
"(",
"objectinfokeys",
",",
"objectinfo",
")",
"}",
"objectid",
"=",
"objectinfodict",
"[",
"objidcol",
"]",
"# need to generate the objectinfo dict and the objectid from the lcinfo",
"# columns",
"# get the filters from the sqlitecurve",
"query",
"=",
"(",
"\"select * from filters\"",
")",
"cur",
".",
"execute",
"(",
"query",
")",
"filterinfo",
"=",
"cur",
".",
"fetchall",
"(",
")",
"# validate the requested columns",
"if",
"columns",
"and",
"all",
"(",
"[",
"x",
"in",
"lccols",
".",
"split",
"(",
"','",
")",
"for",
"x",
"in",
"columns",
"]",
")",
":",
"LOGINFO",
"(",
"'retrieving columns %s'",
"%",
"columns",
")",
"proceed",
"=",
"True",
"elif",
"columns",
"is",
"None",
":",
"columns",
"=",
"lccols",
".",
"split",
"(",
"','",
")",
"proceed",
"=",
"True",
"else",
":",
"proceed",
"=",
"False",
"# bail out if there's a problem and tell the user what happened",
"if",
"not",
"proceed",
":",
"# recompress the lightcurve at the end",
"if",
"'.gz'",
"in",
"lcfile",
"[",
"-",
"4",
":",
"]",
"and",
"lcf",
":",
"_compress_sqlitecurve",
"(",
"lcf",
",",
"force",
"=",
"forcerecompress",
")",
"LOGERROR",
"(",
"'requested columns are invalid!'",
")",
"return",
"None",
",",
"\"requested columns are invalid\"",
"# create the lcdict with the object, lc, and filter info",
"lcdict",
"=",
"{",
"'objectid'",
":",
"objectid",
",",
"'objectinfo'",
":",
"objectinfodict",
",",
"'objectinfokeys'",
":",
"objectinfokeys",
",",
"'lcversion'",
":",
"lcversion",
",",
"'datarelease'",
":",
"lcdatarelease",
",",
"'columns'",
":",
"columns",
",",
"'lcsortcol'",
":",
"lcsortcol",
",",
"'lcapertures'",
":",
"lcapertures",
",",
"'lcbestaperture'",
":",
"lcbestaperture",
",",
"'lastupdated'",
":",
"lcunixtime",
",",
"'lcserver'",
":",
"lcgitrev",
",",
"'comment'",
":",
"lccomment",
",",
"'filters'",
":",
"filterinfo",
"}",
"# validate the SQL filters for this LC",
"if",
"(",
"(",
"sqlfilters",
"is",
"not",
"None",
")",
"and",
"(",
"isinstance",
"(",
"sqlfilters",
",",
"str",
")",
"or",
"isinstance",
"(",
"sqlfilters",
",",
"unicode",
")",
")",
")",
":",
"# give the validator the sqlfilters string and a list of lccols in",
"# the lightcurve",
"validatedfilters",
"=",
"_validate_sqlitecurve_filters",
"(",
"sqlfilters",
",",
"lccols",
".",
"split",
"(",
"','",
")",
")",
"if",
"validatedfilters",
"is",
"not",
"None",
":",
"LOGINFO",
"(",
"'filtering LC using: %s'",
"%",
"validatedfilters",
")",
"filtersok",
"=",
"True",
"else",
":",
"filtersok",
"=",
"False",
"else",
":",
"validatedfilters",
"=",
"None",
"filtersok",
"=",
"None",
"# now read all the required columns in the order indicated",
"# we use the validated SQL filter string here",
"if",
"validatedfilters",
"is",
"not",
"None",
":",
"query",
"=",
"(",
"\"select {columns} from lightcurve where {sqlfilter} \"",
"\"order by {sortcol} asc\"",
")",
".",
"format",
"(",
"columns",
"=",
"','",
".",
"join",
"(",
"columns",
")",
",",
"# columns is always a list",
"sqlfilter",
"=",
"validatedfilters",
",",
"sortcol",
"=",
"lcsortcol",
")",
"lcdict",
"[",
"'lcfiltersql'",
"]",
"=",
"validatedfilters",
"else",
":",
"query",
"=",
"(",
"\"select %s from lightcurve order by %s asc\"",
")",
"%",
"(",
"','",
".",
"join",
"(",
"columns",
")",
",",
"lcsortcol",
")",
"cur",
".",
"execute",
"(",
"query",
")",
"lightcurve",
"=",
"cur",
".",
"fetchall",
"(",
")",
"if",
"lightcurve",
"and",
"len",
"(",
"lightcurve",
")",
">",
"0",
":",
"lightcurve",
"=",
"list",
"(",
"zip",
"(",
"*",
"lightcurve",
")",
")",
"lcdict",
".",
"update",
"(",
"{",
"x",
":",
"y",
"for",
"(",
"x",
",",
"y",
")",
"in",
"zip",
"(",
"lcdict",
"[",
"'columns'",
"]",
",",
"lightcurve",
")",
"}",
")",
"lcok",
"=",
"True",
"# update the ndet after filtering",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'ndet'",
"]",
"=",
"len",
"(",
"lightcurve",
"[",
"0",
"]",
")",
"else",
":",
"LOGWARNING",
"(",
"'LC for %s has no detections'",
"%",
"lcdict",
"[",
"'objectid'",
"]",
")",
"# fill the lightcurve with empty lists to indicate that it is empty",
"lcdict",
".",
"update",
"(",
"{",
"x",
":",
"y",
"for",
"(",
"x",
",",
"y",
")",
"in",
"zip",
"(",
"lcdict",
"[",
"'columns'",
"]",
",",
"[",
"[",
"]",
"for",
"x",
"in",
"lcdict",
"[",
"'columns'",
"]",
"]",
")",
"}",
")",
"lcok",
"=",
"False",
"# generate the returned lcdict and status message",
"if",
"filtersok",
"is",
"True",
"and",
"lcok",
":",
"statusmsg",
"=",
"'SQL filters OK, LC OK'",
"elif",
"filtersok",
"is",
"None",
"and",
"lcok",
":",
"statusmsg",
"=",
"'no SQL filters, LC OK'",
"elif",
"filtersok",
"is",
"False",
"and",
"lcok",
":",
"statusmsg",
"=",
"'SQL filters invalid, LC OK'",
"else",
":",
"statusmsg",
"=",
"'LC retrieval failed'",
"returnval",
"=",
"(",
"lcdict",
",",
"statusmsg",
")",
"# recompress the lightcurve at the end",
"if",
"'.gz'",
"in",
"lcfile",
"[",
"-",
"4",
":",
"]",
"and",
"lcf",
":",
"_compress_sqlitecurve",
"(",
"lcf",
",",
"force",
"=",
"forcerecompress",
")",
"# return ndarrays if that's set",
"if",
"returnarrays",
":",
"for",
"column",
"in",
"lcdict",
"[",
"'columns'",
"]",
":",
"lcdict",
"[",
"column",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"if",
"x",
"is",
"not",
"None",
"else",
"np",
".",
"nan",
"for",
"x",
"in",
"lcdict",
"[",
"column",
"]",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"not",
"quiet",
":",
"LOGEXCEPTION",
"(",
"'could not open sqlitecurve %s'",
"%",
"lcfile",
")",
"returnval",
"=",
"(",
"None",
",",
"'error while reading lightcurve file'",
")",
"# recompress the lightcurve at the end",
"if",
"'.gz'",
"in",
"lcfile",
"[",
"-",
"4",
":",
"]",
"and",
"lcf",
":",
"_compress_sqlitecurve",
"(",
"lcf",
",",
"force",
"=",
"forcerecompress",
")",
"if",
"raiseonfail",
":",
"raise",
"return",
"returnval"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
describe
|
This describes the light curve object and columns present.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
offsetwith : str
This is a character to offset the output description lines by. This is
useful to add comment characters like '#' to the output description
lines.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing.
|
astrobase/hatsurveys/hatlc.py
|
def describe(lcdict, returndesc=False, offsetwith=None):
'''This describes the light curve object and columns present.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
offsetwith : str
This is a character to offset the output description lines by. This is
useful to add comment characters like '#' to the output description
lines.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing.
'''
# transparently read LCC CSV format description
if 'lcformat' in lcdict and 'lcc-csv' in lcdict['lcformat'].lower():
return describe_lcc_csv(lcdict, returndesc=returndesc)
# figure out the columndefs part of the header string
columndefs = []
for colind, column in enumerate(lcdict['columns']):
if '_' in column:
colkey, colap = column.split('_')
coldesc = COLUMNDEFS[colkey][0] % colap
else:
coldesc = COLUMNDEFS[column][0]
columndefstr = '%03i - %s - %s' % (colind,
column,
coldesc)
columndefs.append(columndefstr)
columndefs = '\n'.join(columndefs)
# figure out the filterdefs
filterdefs = []
for row in lcdict['filters']:
filterid, filtername, filterdesc = row
filterdefstr = '%s - %s - %s' % (filterid,
filtername,
filterdesc)
filterdefs.append(filterdefstr)
filterdefs = '\n'.join(filterdefs)
# figure out the apertures
aperturedefs = []
for key in sorted(lcdict['lcapertures'].keys()):
aperturedefstr = '%s - %.2f px' % (key, lcdict['lcapertures'][key])
aperturedefs.append(aperturedefstr)
aperturedefs = '\n'.join(aperturedefs)
# now fill in the description
description = DESCTEMPLATE.format(
objectid=lcdict['objectid'],
hatid=lcdict['objectinfo']['hatid'],
twomassid=lcdict['objectinfo']['twomassid'].strip(),
ra=lcdict['objectinfo']['ra'],
decl=lcdict['objectinfo']['decl'],
pmra=lcdict['objectinfo']['pmra'],
pmra_err=lcdict['objectinfo']['pmra_err'],
pmdecl=lcdict['objectinfo']['pmdecl'],
pmdecl_err=lcdict['objectinfo']['pmdecl_err'],
jmag=lcdict['objectinfo']['jmag'],
hmag=lcdict['objectinfo']['hmag'],
kmag=lcdict['objectinfo']['kmag'],
bmag=lcdict['objectinfo']['bmag'],
vmag=lcdict['objectinfo']['vmag'],
sdssg=lcdict['objectinfo']['sdssg'],
sdssr=lcdict['objectinfo']['sdssr'],
sdssi=lcdict['objectinfo']['sdssi'],
ndet=lcdict['objectinfo']['ndet'],
lcsortcol=lcdict['lcsortcol'],
lcbestaperture=json.dumps(lcdict['lcbestaperture'],ensure_ascii=True),
network=lcdict['objectinfo']['network'],
stations=lcdict['objectinfo']['stations'],
lastupdated=lcdict['lastupdated'],
datarelease=lcdict['datarelease'],
lcversion=lcdict['lcversion'],
lcserver=lcdict['lcserver'],
comment=lcdict['comment'],
lcfiltersql=(lcdict['lcfiltersql'] if 'lcfiltersql' in lcdict else ''),
lcnormcols=(lcdict['lcnormcols'] if 'lcnormcols' in lcdict else ''),
filterdefs=filterdefs,
columndefs=columndefs,
aperturedefs=aperturedefs
)
if offsetwith is not None:
description = textwrap.indent(
description,
'%s ' % offsetwith,
lambda line: True
)
print(description)
else:
print(description)
if returndesc:
return description
|
def describe(lcdict, returndesc=False, offsetwith=None):
'''This describes the light curve object and columns present.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
offsetwith : str
This is a character to offset the output description lines by. This is
useful to add comment characters like '#' to the output description
lines.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing.
'''
# transparently read LCC CSV format description
if 'lcformat' in lcdict and 'lcc-csv' in lcdict['lcformat'].lower():
return describe_lcc_csv(lcdict, returndesc=returndesc)
# figure out the columndefs part of the header string
columndefs = []
for colind, column in enumerate(lcdict['columns']):
if '_' in column:
colkey, colap = column.split('_')
coldesc = COLUMNDEFS[colkey][0] % colap
else:
coldesc = COLUMNDEFS[column][0]
columndefstr = '%03i - %s - %s' % (colind,
column,
coldesc)
columndefs.append(columndefstr)
columndefs = '\n'.join(columndefs)
# figure out the filterdefs
filterdefs = []
for row in lcdict['filters']:
filterid, filtername, filterdesc = row
filterdefstr = '%s - %s - %s' % (filterid,
filtername,
filterdesc)
filterdefs.append(filterdefstr)
filterdefs = '\n'.join(filterdefs)
# figure out the apertures
aperturedefs = []
for key in sorted(lcdict['lcapertures'].keys()):
aperturedefstr = '%s - %.2f px' % (key, lcdict['lcapertures'][key])
aperturedefs.append(aperturedefstr)
aperturedefs = '\n'.join(aperturedefs)
# now fill in the description
description = DESCTEMPLATE.format(
objectid=lcdict['objectid'],
hatid=lcdict['objectinfo']['hatid'],
twomassid=lcdict['objectinfo']['twomassid'].strip(),
ra=lcdict['objectinfo']['ra'],
decl=lcdict['objectinfo']['decl'],
pmra=lcdict['objectinfo']['pmra'],
pmra_err=lcdict['objectinfo']['pmra_err'],
pmdecl=lcdict['objectinfo']['pmdecl'],
pmdecl_err=lcdict['objectinfo']['pmdecl_err'],
jmag=lcdict['objectinfo']['jmag'],
hmag=lcdict['objectinfo']['hmag'],
kmag=lcdict['objectinfo']['kmag'],
bmag=lcdict['objectinfo']['bmag'],
vmag=lcdict['objectinfo']['vmag'],
sdssg=lcdict['objectinfo']['sdssg'],
sdssr=lcdict['objectinfo']['sdssr'],
sdssi=lcdict['objectinfo']['sdssi'],
ndet=lcdict['objectinfo']['ndet'],
lcsortcol=lcdict['lcsortcol'],
lcbestaperture=json.dumps(lcdict['lcbestaperture'],ensure_ascii=True),
network=lcdict['objectinfo']['network'],
stations=lcdict['objectinfo']['stations'],
lastupdated=lcdict['lastupdated'],
datarelease=lcdict['datarelease'],
lcversion=lcdict['lcversion'],
lcserver=lcdict['lcserver'],
comment=lcdict['comment'],
lcfiltersql=(lcdict['lcfiltersql'] if 'lcfiltersql' in lcdict else ''),
lcnormcols=(lcdict['lcnormcols'] if 'lcnormcols' in lcdict else ''),
filterdefs=filterdefs,
columndefs=columndefs,
aperturedefs=aperturedefs
)
if offsetwith is not None:
description = textwrap.indent(
description,
'%s ' % offsetwith,
lambda line: True
)
print(description)
else:
print(description)
if returndesc:
return description
|
[
"This",
"describes",
"the",
"light",
"curve",
"object",
"and",
"columns",
"present",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L918-L1037
|
[
"def",
"describe",
"(",
"lcdict",
",",
"returndesc",
"=",
"False",
",",
"offsetwith",
"=",
"None",
")",
":",
"# transparently read LCC CSV format description",
"if",
"'lcformat'",
"in",
"lcdict",
"and",
"'lcc-csv'",
"in",
"lcdict",
"[",
"'lcformat'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"describe_lcc_csv",
"(",
"lcdict",
",",
"returndesc",
"=",
"returndesc",
")",
"# figure out the columndefs part of the header string",
"columndefs",
"=",
"[",
"]",
"for",
"colind",
",",
"column",
"in",
"enumerate",
"(",
"lcdict",
"[",
"'columns'",
"]",
")",
":",
"if",
"'_'",
"in",
"column",
":",
"colkey",
",",
"colap",
"=",
"column",
".",
"split",
"(",
"'_'",
")",
"coldesc",
"=",
"COLUMNDEFS",
"[",
"colkey",
"]",
"[",
"0",
"]",
"%",
"colap",
"else",
":",
"coldesc",
"=",
"COLUMNDEFS",
"[",
"column",
"]",
"[",
"0",
"]",
"columndefstr",
"=",
"'%03i - %s - %s'",
"%",
"(",
"colind",
",",
"column",
",",
"coldesc",
")",
"columndefs",
".",
"append",
"(",
"columndefstr",
")",
"columndefs",
"=",
"'\\n'",
".",
"join",
"(",
"columndefs",
")",
"# figure out the filterdefs",
"filterdefs",
"=",
"[",
"]",
"for",
"row",
"in",
"lcdict",
"[",
"'filters'",
"]",
":",
"filterid",
",",
"filtername",
",",
"filterdesc",
"=",
"row",
"filterdefstr",
"=",
"'%s - %s - %s'",
"%",
"(",
"filterid",
",",
"filtername",
",",
"filterdesc",
")",
"filterdefs",
".",
"append",
"(",
"filterdefstr",
")",
"filterdefs",
"=",
"'\\n'",
".",
"join",
"(",
"filterdefs",
")",
"# figure out the apertures",
"aperturedefs",
"=",
"[",
"]",
"for",
"key",
"in",
"sorted",
"(",
"lcdict",
"[",
"'lcapertures'",
"]",
".",
"keys",
"(",
")",
")",
":",
"aperturedefstr",
"=",
"'%s - %.2f px'",
"%",
"(",
"key",
",",
"lcdict",
"[",
"'lcapertures'",
"]",
"[",
"key",
"]",
")",
"aperturedefs",
".",
"append",
"(",
"aperturedefstr",
")",
"aperturedefs",
"=",
"'\\n'",
".",
"join",
"(",
"aperturedefs",
")",
"# now fill in the description",
"description",
"=",
"DESCTEMPLATE",
".",
"format",
"(",
"objectid",
"=",
"lcdict",
"[",
"'objectid'",
"]",
",",
"hatid",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'hatid'",
"]",
",",
"twomassid",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'twomassid'",
"]",
".",
"strip",
"(",
")",
",",
"ra",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'ra'",
"]",
",",
"decl",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'decl'",
"]",
",",
"pmra",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'pmra'",
"]",
",",
"pmra_err",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'pmra_err'",
"]",
",",
"pmdecl",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'pmdecl'",
"]",
",",
"pmdecl_err",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'pmdecl_err'",
"]",
",",
"jmag",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'jmag'",
"]",
",",
"hmag",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'hmag'",
"]",
",",
"kmag",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'kmag'",
"]",
",",
"bmag",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'bmag'",
"]",
",",
"vmag",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'vmag'",
"]",
",",
"sdssg",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'sdssg'",
"]",
",",
"sdssr",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'sdssr'",
"]",
",",
"sdssi",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'sdssi'",
"]",
",",
"ndet",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'ndet'",
"]",
",",
"lcsortcol",
"=",
"lcdict",
"[",
"'lcsortcol'",
"]",
",",
"lcbestaperture",
"=",
"json",
".",
"dumps",
"(",
"lcdict",
"[",
"'lcbestaperture'",
"]",
",",
"ensure_ascii",
"=",
"True",
")",
",",
"network",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'network'",
"]",
",",
"stations",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'stations'",
"]",
",",
"lastupdated",
"=",
"lcdict",
"[",
"'lastupdated'",
"]",
",",
"datarelease",
"=",
"lcdict",
"[",
"'datarelease'",
"]",
",",
"lcversion",
"=",
"lcdict",
"[",
"'lcversion'",
"]",
",",
"lcserver",
"=",
"lcdict",
"[",
"'lcserver'",
"]",
",",
"comment",
"=",
"lcdict",
"[",
"'comment'",
"]",
",",
"lcfiltersql",
"=",
"(",
"lcdict",
"[",
"'lcfiltersql'",
"]",
"if",
"'lcfiltersql'",
"in",
"lcdict",
"else",
"''",
")",
",",
"lcnormcols",
"=",
"(",
"lcdict",
"[",
"'lcnormcols'",
"]",
"if",
"'lcnormcols'",
"in",
"lcdict",
"else",
"''",
")",
",",
"filterdefs",
"=",
"filterdefs",
",",
"columndefs",
"=",
"columndefs",
",",
"aperturedefs",
"=",
"aperturedefs",
")",
"if",
"offsetwith",
"is",
"not",
"None",
":",
"description",
"=",
"textwrap",
".",
"indent",
"(",
"description",
",",
"'%s '",
"%",
"offsetwith",
",",
"lambda",
"line",
":",
"True",
")",
"print",
"(",
"description",
")",
"else",
":",
"print",
"(",
"description",
")",
"if",
"returndesc",
":",
"return",
"description"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_smartcast
|
This just tries to apply the caster function to castee.
Returns None on failure.
|
astrobase/hatsurveys/hatlc.py
|
def _smartcast(castee, caster, subval=None):
'''
This just tries to apply the caster function to castee.
Returns None on failure.
'''
try:
return caster(castee)
except Exception as e:
if caster is float or caster is int:
return nan
elif caster is str:
return ''
else:
return subval
|
def _smartcast(castee, caster, subval=None):
'''
This just tries to apply the caster function to castee.
Returns None on failure.
'''
try:
return caster(castee)
except Exception as e:
if caster is float or caster is int:
return nan
elif caster is str:
return ''
else:
return subval
|
[
"This",
"just",
"tries",
"to",
"apply",
"the",
"caster",
"function",
"to",
"castee",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1045-L1061
|
[
"def",
"_smartcast",
"(",
"castee",
",",
"caster",
",",
"subval",
"=",
"None",
")",
":",
"try",
":",
"return",
"caster",
"(",
"castee",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"caster",
"is",
"float",
"or",
"caster",
"is",
"int",
":",
"return",
"nan",
"elif",
"caster",
"is",
"str",
":",
"return",
"''",
"else",
":",
"return",
"subval"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_parse_csv_header
|
This parses the CSV header from the CSV HAT sqlitecurve.
Returns a dict that can be used to update an existing lcdict with the
relevant metadata info needed to form a full LC.
|
astrobase/hatsurveys/hatlc.py
|
def _parse_csv_header(header):
'''
This parses the CSV header from the CSV HAT sqlitecurve.
Returns a dict that can be used to update an existing lcdict with the
relevant metadata info needed to form a full LC.
'''
# first, break into lines
headerlines = header.split('\n')
headerlines = [x.lstrip('# ') for x in headerlines]
# next, find the indices of the metadata sections
objectstart = headerlines.index('OBJECT')
metadatastart = headerlines.index('METADATA')
camfilterstart = headerlines.index('CAMFILTERS')
photaperturestart = headerlines.index('PHOTAPERTURES')
columnstart = headerlines.index('COLUMNS')
lcstart = headerlines.index('LIGHTCURVE')
# get the lines for the header sections
objectinfo = headerlines[objectstart+1:metadatastart-1]
metadatainfo = headerlines[metadatastart+1:camfilterstart-1]
camfilterinfo = headerlines[camfilterstart+1:photaperturestart-1]
photapertureinfo = headerlines[photaperturestart+1:columnstart-1]
columninfo = headerlines[columnstart+1:lcstart-1]
# parse the header sections and insert the appropriate key-val pairs into
# the lcdict
metadict = {'objectinfo':{}}
# first, the objectinfo section
objectinfo = [x.split(';') for x in objectinfo]
for elem in objectinfo:
for kvelem in elem:
key, val = kvelem.split(' = ',1)
metadict['objectinfo'][key.strip()] = (
_smartcast(val, METAKEYS[key.strip()])
)
# the objectid belongs at the top level
metadict['objectid'] = metadict['objectinfo']['objectid'][:]
del metadict['objectinfo']['objectid']
# get the lightcurve metadata
metadatainfo = [x.split(';') for x in metadatainfo]
for elem in metadatainfo:
for kvelem in elem:
try:
key, val = kvelem.split(' = ',1)
# get the lcbestaperture into a dict again
if key.strip() == 'lcbestaperture':
val = json.loads(val)
# get the lcversion and datarelease as integers
if key.strip() in ('datarelease', 'lcversion'):
val = int(val)
# get the lastupdated as a float
if key.strip() == 'lastupdated':
val = float(val)
# put the key-val into the dict
metadict[key.strip()] = val
except Exception as e:
LOGWARNING('could not understand header element "%s",'
' skipped.' % kvelem)
# get the camera filters
metadict['filters'] = []
for row in camfilterinfo:
filterid, filtername, filterdesc = row.split(' - ')
metadict['filters'].append((int(filterid),
filtername,
filterdesc))
# get the photometric apertures
metadict['lcapertures'] = {}
for row in photapertureinfo:
apnum, appix = row.split(' - ')
appix = float(appix.rstrip(' px'))
metadict['lcapertures'][apnum.strip()] = appix
# get the columns
metadict['columns'] = []
for row in columninfo:
colnum, colname, coldesc = row.split(' - ')
metadict['columns'].append(colname)
return metadict
|
def _parse_csv_header(header):
'''
This parses the CSV header from the CSV HAT sqlitecurve.
Returns a dict that can be used to update an existing lcdict with the
relevant metadata info needed to form a full LC.
'''
# first, break into lines
headerlines = header.split('\n')
headerlines = [x.lstrip('# ') for x in headerlines]
# next, find the indices of the metadata sections
objectstart = headerlines.index('OBJECT')
metadatastart = headerlines.index('METADATA')
camfilterstart = headerlines.index('CAMFILTERS')
photaperturestart = headerlines.index('PHOTAPERTURES')
columnstart = headerlines.index('COLUMNS')
lcstart = headerlines.index('LIGHTCURVE')
# get the lines for the header sections
objectinfo = headerlines[objectstart+1:metadatastart-1]
metadatainfo = headerlines[metadatastart+1:camfilterstart-1]
camfilterinfo = headerlines[camfilterstart+1:photaperturestart-1]
photapertureinfo = headerlines[photaperturestart+1:columnstart-1]
columninfo = headerlines[columnstart+1:lcstart-1]
# parse the header sections and insert the appropriate key-val pairs into
# the lcdict
metadict = {'objectinfo':{}}
# first, the objectinfo section
objectinfo = [x.split(';') for x in objectinfo]
for elem in objectinfo:
for kvelem in elem:
key, val = kvelem.split(' = ',1)
metadict['objectinfo'][key.strip()] = (
_smartcast(val, METAKEYS[key.strip()])
)
# the objectid belongs at the top level
metadict['objectid'] = metadict['objectinfo']['objectid'][:]
del metadict['objectinfo']['objectid']
# get the lightcurve metadata
metadatainfo = [x.split(';') for x in metadatainfo]
for elem in metadatainfo:
for kvelem in elem:
try:
key, val = kvelem.split(' = ',1)
# get the lcbestaperture into a dict again
if key.strip() == 'lcbestaperture':
val = json.loads(val)
# get the lcversion and datarelease as integers
if key.strip() in ('datarelease', 'lcversion'):
val = int(val)
# get the lastupdated as a float
if key.strip() == 'lastupdated':
val = float(val)
# put the key-val into the dict
metadict[key.strip()] = val
except Exception as e:
LOGWARNING('could not understand header element "%s",'
' skipped.' % kvelem)
# get the camera filters
metadict['filters'] = []
for row in camfilterinfo:
filterid, filtername, filterdesc = row.split(' - ')
metadict['filters'].append((int(filterid),
filtername,
filterdesc))
# get the photometric apertures
metadict['lcapertures'] = {}
for row in photapertureinfo:
apnum, appix = row.split(' - ')
appix = float(appix.rstrip(' px'))
metadict['lcapertures'][apnum.strip()] = appix
# get the columns
metadict['columns'] = []
for row in columninfo:
colnum, colname, coldesc = row.split(' - ')
metadict['columns'].append(colname)
return metadict
|
[
"This",
"parses",
"the",
"CSV",
"header",
"from",
"the",
"CSV",
"HAT",
"sqlitecurve",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1090-L1187
|
[
"def",
"_parse_csv_header",
"(",
"header",
")",
":",
"# first, break into lines",
"headerlines",
"=",
"header",
".",
"split",
"(",
"'\\n'",
")",
"headerlines",
"=",
"[",
"x",
".",
"lstrip",
"(",
"'# '",
")",
"for",
"x",
"in",
"headerlines",
"]",
"# next, find the indices of the metadata sections",
"objectstart",
"=",
"headerlines",
".",
"index",
"(",
"'OBJECT'",
")",
"metadatastart",
"=",
"headerlines",
".",
"index",
"(",
"'METADATA'",
")",
"camfilterstart",
"=",
"headerlines",
".",
"index",
"(",
"'CAMFILTERS'",
")",
"photaperturestart",
"=",
"headerlines",
".",
"index",
"(",
"'PHOTAPERTURES'",
")",
"columnstart",
"=",
"headerlines",
".",
"index",
"(",
"'COLUMNS'",
")",
"lcstart",
"=",
"headerlines",
".",
"index",
"(",
"'LIGHTCURVE'",
")",
"# get the lines for the header sections",
"objectinfo",
"=",
"headerlines",
"[",
"objectstart",
"+",
"1",
":",
"metadatastart",
"-",
"1",
"]",
"metadatainfo",
"=",
"headerlines",
"[",
"metadatastart",
"+",
"1",
":",
"camfilterstart",
"-",
"1",
"]",
"camfilterinfo",
"=",
"headerlines",
"[",
"camfilterstart",
"+",
"1",
":",
"photaperturestart",
"-",
"1",
"]",
"photapertureinfo",
"=",
"headerlines",
"[",
"photaperturestart",
"+",
"1",
":",
"columnstart",
"-",
"1",
"]",
"columninfo",
"=",
"headerlines",
"[",
"columnstart",
"+",
"1",
":",
"lcstart",
"-",
"1",
"]",
"# parse the header sections and insert the appropriate key-val pairs into",
"# the lcdict",
"metadict",
"=",
"{",
"'objectinfo'",
":",
"{",
"}",
"}",
"# first, the objectinfo section",
"objectinfo",
"=",
"[",
"x",
".",
"split",
"(",
"';'",
")",
"for",
"x",
"in",
"objectinfo",
"]",
"for",
"elem",
"in",
"objectinfo",
":",
"for",
"kvelem",
"in",
"elem",
":",
"key",
",",
"val",
"=",
"kvelem",
".",
"split",
"(",
"' = '",
",",
"1",
")",
"metadict",
"[",
"'objectinfo'",
"]",
"[",
"key",
".",
"strip",
"(",
")",
"]",
"=",
"(",
"_smartcast",
"(",
"val",
",",
"METAKEYS",
"[",
"key",
".",
"strip",
"(",
")",
"]",
")",
")",
"# the objectid belongs at the top level",
"metadict",
"[",
"'objectid'",
"]",
"=",
"metadict",
"[",
"'objectinfo'",
"]",
"[",
"'objectid'",
"]",
"[",
":",
"]",
"del",
"metadict",
"[",
"'objectinfo'",
"]",
"[",
"'objectid'",
"]",
"# get the lightcurve metadata",
"metadatainfo",
"=",
"[",
"x",
".",
"split",
"(",
"';'",
")",
"for",
"x",
"in",
"metadatainfo",
"]",
"for",
"elem",
"in",
"metadatainfo",
":",
"for",
"kvelem",
"in",
"elem",
":",
"try",
":",
"key",
",",
"val",
"=",
"kvelem",
".",
"split",
"(",
"' = '",
",",
"1",
")",
"# get the lcbestaperture into a dict again",
"if",
"key",
".",
"strip",
"(",
")",
"==",
"'lcbestaperture'",
":",
"val",
"=",
"json",
".",
"loads",
"(",
"val",
")",
"# get the lcversion and datarelease as integers",
"if",
"key",
".",
"strip",
"(",
")",
"in",
"(",
"'datarelease'",
",",
"'lcversion'",
")",
":",
"val",
"=",
"int",
"(",
"val",
")",
"# get the lastupdated as a float",
"if",
"key",
".",
"strip",
"(",
")",
"==",
"'lastupdated'",
":",
"val",
"=",
"float",
"(",
"val",
")",
"# put the key-val into the dict",
"metadict",
"[",
"key",
".",
"strip",
"(",
")",
"]",
"=",
"val",
"except",
"Exception",
"as",
"e",
":",
"LOGWARNING",
"(",
"'could not understand header element \"%s\",'",
"' skipped.'",
"%",
"kvelem",
")",
"# get the camera filters",
"metadict",
"[",
"'filters'",
"]",
"=",
"[",
"]",
"for",
"row",
"in",
"camfilterinfo",
":",
"filterid",
",",
"filtername",
",",
"filterdesc",
"=",
"row",
".",
"split",
"(",
"' - '",
")",
"metadict",
"[",
"'filters'",
"]",
".",
"append",
"(",
"(",
"int",
"(",
"filterid",
")",
",",
"filtername",
",",
"filterdesc",
")",
")",
"# get the photometric apertures",
"metadict",
"[",
"'lcapertures'",
"]",
"=",
"{",
"}",
"for",
"row",
"in",
"photapertureinfo",
":",
"apnum",
",",
"appix",
"=",
"row",
".",
"split",
"(",
"' - '",
")",
"appix",
"=",
"float",
"(",
"appix",
".",
"rstrip",
"(",
"' px'",
")",
")",
"metadict",
"[",
"'lcapertures'",
"]",
"[",
"apnum",
".",
"strip",
"(",
")",
"]",
"=",
"appix",
"# get the columns",
"metadict",
"[",
"'columns'",
"]",
"=",
"[",
"]",
"for",
"row",
"in",
"columninfo",
":",
"colnum",
",",
"colname",
",",
"coldesc",
"=",
"row",
".",
"split",
"(",
"' - '",
")",
"metadict",
"[",
"'columns'",
"]",
".",
"append",
"(",
"colname",
")",
"return",
"metadict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_parse_csv_header_lcc_csv_v1
|
This parses the header of the LCC CSV V1 LC format.
|
astrobase/hatsurveys/hatlc.py
|
def _parse_csv_header_lcc_csv_v1(headerlines):
'''
This parses the header of the LCC CSV V1 LC format.
'''
# the first three lines indicate the format name, comment char, separator
commentchar = headerlines[1]
separator = headerlines[2]
headerlines = [x.lstrip('%s ' % commentchar) for x in headerlines[3:]]
# next, find the indices of the various LC sections
metadatastart = headerlines.index('OBJECT METADATA')
columnstart = headerlines.index('COLUMN DEFINITIONS')
lcstart = headerlines.index('LIGHTCURVE')
metadata = ' ' .join(headerlines[metadatastart+1:columnstart-1])
columns = ' ' .join(headerlines[columnstart+1:lcstart-1])
metadata = json.loads(metadata)
columns = json.loads(columns)
return metadata, columns, separator
|
def _parse_csv_header_lcc_csv_v1(headerlines):
'''
This parses the header of the LCC CSV V1 LC format.
'''
# the first three lines indicate the format name, comment char, separator
commentchar = headerlines[1]
separator = headerlines[2]
headerlines = [x.lstrip('%s ' % commentchar) for x in headerlines[3:]]
# next, find the indices of the various LC sections
metadatastart = headerlines.index('OBJECT METADATA')
columnstart = headerlines.index('COLUMN DEFINITIONS')
lcstart = headerlines.index('LIGHTCURVE')
metadata = ' ' .join(headerlines[metadatastart+1:columnstart-1])
columns = ' ' .join(headerlines[columnstart+1:lcstart-1])
metadata = json.loads(metadata)
columns = json.loads(columns)
return metadata, columns, separator
|
[
"This",
"parses",
"the",
"header",
"of",
"the",
"LCC",
"CSV",
"V1",
"LC",
"format",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1195-L1217
|
[
"def",
"_parse_csv_header_lcc_csv_v1",
"(",
"headerlines",
")",
":",
"# the first three lines indicate the format name, comment char, separator",
"commentchar",
"=",
"headerlines",
"[",
"1",
"]",
"separator",
"=",
"headerlines",
"[",
"2",
"]",
"headerlines",
"=",
"[",
"x",
".",
"lstrip",
"(",
"'%s '",
"%",
"commentchar",
")",
"for",
"x",
"in",
"headerlines",
"[",
"3",
":",
"]",
"]",
"# next, find the indices of the various LC sections",
"metadatastart",
"=",
"headerlines",
".",
"index",
"(",
"'OBJECT METADATA'",
")",
"columnstart",
"=",
"headerlines",
".",
"index",
"(",
"'COLUMN DEFINITIONS'",
")",
"lcstart",
"=",
"headerlines",
".",
"index",
"(",
"'LIGHTCURVE'",
")",
"metadata",
"=",
"' '",
".",
"join",
"(",
"headerlines",
"[",
"metadatastart",
"+",
"1",
":",
"columnstart",
"-",
"1",
"]",
")",
"columns",
"=",
"' '",
".",
"join",
"(",
"headerlines",
"[",
"columnstart",
"+",
"1",
":",
"lcstart",
"-",
"1",
"]",
")",
"metadata",
"=",
"json",
".",
"loads",
"(",
"metadata",
")",
"columns",
"=",
"json",
".",
"loads",
"(",
"columns",
")",
"return",
"metadata",
",",
"columns",
",",
"separator"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
read_lcc_csvlc
|
This reads a CSV LC produced by an `LCC-Server
<https://github.com/waqasbhatti/lcc-server>`_ instance.
Parameters
----------
lcfile : str
The LC file to read.
Returns
-------
dict
Returns an lcdict that's readable by most astrobase functions for
further processing.
|
astrobase/hatsurveys/hatlc.py
|
def read_lcc_csvlc(lcfile):
'''This reads a CSV LC produced by an `LCC-Server
<https://github.com/waqasbhatti/lcc-server>`_ instance.
Parameters
----------
lcfile : str
The LC file to read.
Returns
-------
dict
Returns an lcdict that's readable by most astrobase functions for
further processing.
'''
# read in the file and split by lines
if '.gz' in os.path.basename(lcfile):
infd = gzip.open(lcfile,'rb')
else:
infd = open(lcfile,'rb')
lctext = infd.read().decode()
infd.close()
lctextlines = lctext.split('\n')
lcformat = lctextlines[0]
commentchar = lctextlines[1]
lcstart = lctextlines.index('%s LIGHTCURVE' % commentchar)
headerlines = lctextlines[:lcstart+1]
lclines = lctextlines[lcstart+1:]
metadata, columns, separator = _parse_csv_header_lcc_csv_v1(headerlines)
# break out the objectid and objectinfo
objectid = metadata['objectid']['val']
objectinfo = {key:metadata[key]['val'] for key in metadata}
# figure out the column dtypes
colnames = []
colnum = []
coldtypes = []
# generate the args for np.genfromtxt
for k in columns:
coldef = columns[k]
colnames.append(k)
colnum.append(coldef['colnum'])
coldtypes.append(coldef['dtype'])
coldtypes = ','.join(coldtypes)
# read in the LC
recarr = np.genfromtxt(
lclines,
comments=commentchar,
delimiter=separator,
usecols=colnum,
autostrip=True,
names=colnames,
dtype=coldtypes
)
lcdict = {x:recarr[x] for x in colnames}
lcdict['lcformat'] = lcformat
lcdict['objectid'] = objectid
lcdict['objectinfo'] = objectinfo
lcdict['columns'] = colnames
lcdict['coldefs'] = columns
lcdict['metadata'] = metadata
return lcdict
|
def read_lcc_csvlc(lcfile):
'''This reads a CSV LC produced by an `LCC-Server
<https://github.com/waqasbhatti/lcc-server>`_ instance.
Parameters
----------
lcfile : str
The LC file to read.
Returns
-------
dict
Returns an lcdict that's readable by most astrobase functions for
further processing.
'''
# read in the file and split by lines
if '.gz' in os.path.basename(lcfile):
infd = gzip.open(lcfile,'rb')
else:
infd = open(lcfile,'rb')
lctext = infd.read().decode()
infd.close()
lctextlines = lctext.split('\n')
lcformat = lctextlines[0]
commentchar = lctextlines[1]
lcstart = lctextlines.index('%s LIGHTCURVE' % commentchar)
headerlines = lctextlines[:lcstart+1]
lclines = lctextlines[lcstart+1:]
metadata, columns, separator = _parse_csv_header_lcc_csv_v1(headerlines)
# break out the objectid and objectinfo
objectid = metadata['objectid']['val']
objectinfo = {key:metadata[key]['val'] for key in metadata}
# figure out the column dtypes
colnames = []
colnum = []
coldtypes = []
# generate the args for np.genfromtxt
for k in columns:
coldef = columns[k]
colnames.append(k)
colnum.append(coldef['colnum'])
coldtypes.append(coldef['dtype'])
coldtypes = ','.join(coldtypes)
# read in the LC
recarr = np.genfromtxt(
lclines,
comments=commentchar,
delimiter=separator,
usecols=colnum,
autostrip=True,
names=colnames,
dtype=coldtypes
)
lcdict = {x:recarr[x] for x in colnames}
lcdict['lcformat'] = lcformat
lcdict['objectid'] = objectid
lcdict['objectinfo'] = objectinfo
lcdict['columns'] = colnames
lcdict['coldefs'] = columns
lcdict['metadata'] = metadata
return lcdict
|
[
"This",
"reads",
"a",
"CSV",
"LC",
"produced",
"by",
"an",
"LCC",
"-",
"Server",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"waqasbhatti",
"/",
"lcc",
"-",
"server",
">",
"_",
"instance",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1221-L1299
|
[
"def",
"read_lcc_csvlc",
"(",
"lcfile",
")",
":",
"# read in the file and split by lines",
"if",
"'.gz'",
"in",
"os",
".",
"path",
".",
"basename",
"(",
"lcfile",
")",
":",
"infd",
"=",
"gzip",
".",
"open",
"(",
"lcfile",
",",
"'rb'",
")",
"else",
":",
"infd",
"=",
"open",
"(",
"lcfile",
",",
"'rb'",
")",
"lctext",
"=",
"infd",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
"infd",
".",
"close",
"(",
")",
"lctextlines",
"=",
"lctext",
".",
"split",
"(",
"'\\n'",
")",
"lcformat",
"=",
"lctextlines",
"[",
"0",
"]",
"commentchar",
"=",
"lctextlines",
"[",
"1",
"]",
"lcstart",
"=",
"lctextlines",
".",
"index",
"(",
"'%s LIGHTCURVE'",
"%",
"commentchar",
")",
"headerlines",
"=",
"lctextlines",
"[",
":",
"lcstart",
"+",
"1",
"]",
"lclines",
"=",
"lctextlines",
"[",
"lcstart",
"+",
"1",
":",
"]",
"metadata",
",",
"columns",
",",
"separator",
"=",
"_parse_csv_header_lcc_csv_v1",
"(",
"headerlines",
")",
"# break out the objectid and objectinfo",
"objectid",
"=",
"metadata",
"[",
"'objectid'",
"]",
"[",
"'val'",
"]",
"objectinfo",
"=",
"{",
"key",
":",
"metadata",
"[",
"key",
"]",
"[",
"'val'",
"]",
"for",
"key",
"in",
"metadata",
"}",
"# figure out the column dtypes",
"colnames",
"=",
"[",
"]",
"colnum",
"=",
"[",
"]",
"coldtypes",
"=",
"[",
"]",
"# generate the args for np.genfromtxt",
"for",
"k",
"in",
"columns",
":",
"coldef",
"=",
"columns",
"[",
"k",
"]",
"colnames",
".",
"append",
"(",
"k",
")",
"colnum",
".",
"append",
"(",
"coldef",
"[",
"'colnum'",
"]",
")",
"coldtypes",
".",
"append",
"(",
"coldef",
"[",
"'dtype'",
"]",
")",
"coldtypes",
"=",
"','",
".",
"join",
"(",
"coldtypes",
")",
"# read in the LC",
"recarr",
"=",
"np",
".",
"genfromtxt",
"(",
"lclines",
",",
"comments",
"=",
"commentchar",
",",
"delimiter",
"=",
"separator",
",",
"usecols",
"=",
"colnum",
",",
"autostrip",
"=",
"True",
",",
"names",
"=",
"colnames",
",",
"dtype",
"=",
"coldtypes",
")",
"lcdict",
"=",
"{",
"x",
":",
"recarr",
"[",
"x",
"]",
"for",
"x",
"in",
"colnames",
"}",
"lcdict",
"[",
"'lcformat'",
"]",
"=",
"lcformat",
"lcdict",
"[",
"'objectid'",
"]",
"=",
"objectid",
"lcdict",
"[",
"'objectinfo'",
"]",
"=",
"objectinfo",
"lcdict",
"[",
"'columns'",
"]",
"=",
"colnames",
"lcdict",
"[",
"'coldefs'",
"]",
"=",
"columns",
"lcdict",
"[",
"'metadata'",
"]",
"=",
"metadata",
"return",
"lcdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
describe_lcc_csv
|
This describes the LCC CSV format light curve file.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing.
|
astrobase/hatsurveys/hatlc.py
|
def describe_lcc_csv(lcdict, returndesc=False):
'''
This describes the LCC CSV format light curve file.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing.
'''
metadata_lines = []
coldef_lines = []
if 'lcformat' in lcdict and 'lcc-csv' in lcdict['lcformat'].lower():
metadata = lcdict['metadata']
metakeys = lcdict['objectinfo'].keys()
coldefs = lcdict['coldefs']
for mk in metakeys:
metadata_lines.append(
'%20s | %s' % (
mk,
metadata[mk]['desc']
)
)
for ck in lcdict['columns']:
coldef_lines.append('column %02d | %8s | numpy dtype: %3s | %s'
% (coldefs[ck]['colnum'],
ck,
coldefs[ck]['dtype'],
coldefs[ck]['desc']))
desc = LCC_CSVLC_DESCTEMPLATE.format(
objectid=lcdict['objectid'],
metadata_desc='\n'.join(metadata_lines),
metadata=pformat(lcdict['objectinfo']),
columndefs='\n'.join(coldef_lines)
)
print(desc)
if returndesc:
return desc
else:
LOGERROR("this lcdict is not from an LCC CSV, can't figure it out...")
return None
|
def describe_lcc_csv(lcdict, returndesc=False):
'''
This describes the LCC CSV format light curve file.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing.
'''
metadata_lines = []
coldef_lines = []
if 'lcformat' in lcdict and 'lcc-csv' in lcdict['lcformat'].lower():
metadata = lcdict['metadata']
metakeys = lcdict['objectinfo'].keys()
coldefs = lcdict['coldefs']
for mk in metakeys:
metadata_lines.append(
'%20s | %s' % (
mk,
metadata[mk]['desc']
)
)
for ck in lcdict['columns']:
coldef_lines.append('column %02d | %8s | numpy dtype: %3s | %s'
% (coldefs[ck]['colnum'],
ck,
coldefs[ck]['dtype'],
coldefs[ck]['desc']))
desc = LCC_CSVLC_DESCTEMPLATE.format(
objectid=lcdict['objectid'],
metadata_desc='\n'.join(metadata_lines),
metadata=pformat(lcdict['objectinfo']),
columndefs='\n'.join(coldef_lines)
)
print(desc)
if returndesc:
return desc
else:
LOGERROR("this lcdict is not from an LCC CSV, can't figure it out...")
return None
|
[
"This",
"describes",
"the",
"LCC",
"CSV",
"format",
"light",
"curve",
"file",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1303-L1369
|
[
"def",
"describe_lcc_csv",
"(",
"lcdict",
",",
"returndesc",
"=",
"False",
")",
":",
"metadata_lines",
"=",
"[",
"]",
"coldef_lines",
"=",
"[",
"]",
"if",
"'lcformat'",
"in",
"lcdict",
"and",
"'lcc-csv'",
"in",
"lcdict",
"[",
"'lcformat'",
"]",
".",
"lower",
"(",
")",
":",
"metadata",
"=",
"lcdict",
"[",
"'metadata'",
"]",
"metakeys",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
".",
"keys",
"(",
")",
"coldefs",
"=",
"lcdict",
"[",
"'coldefs'",
"]",
"for",
"mk",
"in",
"metakeys",
":",
"metadata_lines",
".",
"append",
"(",
"'%20s | %s'",
"%",
"(",
"mk",
",",
"metadata",
"[",
"mk",
"]",
"[",
"'desc'",
"]",
")",
")",
"for",
"ck",
"in",
"lcdict",
"[",
"'columns'",
"]",
":",
"coldef_lines",
".",
"append",
"(",
"'column %02d | %8s | numpy dtype: %3s | %s'",
"%",
"(",
"coldefs",
"[",
"ck",
"]",
"[",
"'colnum'",
"]",
",",
"ck",
",",
"coldefs",
"[",
"ck",
"]",
"[",
"'dtype'",
"]",
",",
"coldefs",
"[",
"ck",
"]",
"[",
"'desc'",
"]",
")",
")",
"desc",
"=",
"LCC_CSVLC_DESCTEMPLATE",
".",
"format",
"(",
"objectid",
"=",
"lcdict",
"[",
"'objectid'",
"]",
",",
"metadata_desc",
"=",
"'\\n'",
".",
"join",
"(",
"metadata_lines",
")",
",",
"metadata",
"=",
"pformat",
"(",
"lcdict",
"[",
"'objectinfo'",
"]",
")",
",",
"columndefs",
"=",
"'\\n'",
".",
"join",
"(",
"coldef_lines",
")",
")",
"print",
"(",
"desc",
")",
"if",
"returndesc",
":",
"return",
"desc",
"else",
":",
"LOGERROR",
"(",
"\"this lcdict is not from an LCC CSV, can't figure it out...\"",
")",
"return",
"None"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
read_csvlc
|
This reads a HAT data server or LCC-Server produced CSV light curve
into an lcdict.
This will automatically figure out the format of the file
provided. Currently, it can read:
- legacy HAT data server CSV LCs (e.g. from
https://hatsouth.org/planets/lightcurves.html) with an extension of the
form: `.hatlc.csv.gz`.
- all LCC-Server produced LCC-CSV-V1 LCs (e.g. from
https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict that can be read and used by many astrobase processing
functions.
|
astrobase/hatsurveys/hatlc.py
|
def read_csvlc(lcfile):
'''This reads a HAT data server or LCC-Server produced CSV light curve
into an lcdict.
This will automatically figure out the format of the file
provided. Currently, it can read:
- legacy HAT data server CSV LCs (e.g. from
https://hatsouth.org/planets/lightcurves.html) with an extension of the
form: `.hatlc.csv.gz`.
- all LCC-Server produced LCC-CSV-V1 LCs (e.g. from
https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict that can be read and used by many astrobase processing
functions.
'''
# read in the file and split by lines
if '.gz' in os.path.basename(lcfile):
LOGINFO('reading gzipped HATLC: %s' % lcfile)
infd = gzip.open(lcfile,'rb')
else:
LOGINFO('reading HATLC: %s' % lcfile)
infd = open(lcfile,'rb')
# this transparently reads LCC CSVLCs
lcformat_check = infd.read(12).decode()
if 'LCC-CSVLC' in lcformat_check:
infd.close()
return read_lcc_csvlc(lcfile)
else:
infd.seek(0)
# below is reading the HATLC v2 CSV LCs
lctext = infd.read().decode() # argh Python 3
infd.close()
# figure out the header and get the LC columns
lcstart = lctext.index('# LIGHTCURVE\n')
lcheader = lctext[:lcstart+12]
lccolumns = lctext[lcstart+13:].split('\n')
lccolumns = [x for x in lccolumns if len(x) > 0]
# initialize the lcdict and parse the CSV header
lcdict = _parse_csv_header(lcheader)
# tranpose the LC rows into columns
lccolumns = [x.split(',') for x in lccolumns]
lccolumns = list(zip(*lccolumns)) # argh more Python 3
# write the columns to the dict
for colind, col in enumerate(lcdict['columns']):
if (col.split('_')[0] in LC_MAG_COLUMNS or
col.split('_')[0] in LC_ERR_COLUMNS or
col.split('_')[0] in LC_FLAG_COLUMNS):
lcdict[col] = np.array([_smartcast(x,
COLUMNDEFS[col.split('_')[0]][2])
for x in lccolumns[colind]])
elif col in COLUMNDEFS:
lcdict[col] = np.array([_smartcast(x,COLUMNDEFS[col][2])
for x in lccolumns[colind]])
else:
LOGWARNING('lcdict col %s has no formatter available' % col)
continue
return lcdict
|
def read_csvlc(lcfile):
'''This reads a HAT data server or LCC-Server produced CSV light curve
into an lcdict.
This will automatically figure out the format of the file
provided. Currently, it can read:
- legacy HAT data server CSV LCs (e.g. from
https://hatsouth.org/planets/lightcurves.html) with an extension of the
form: `.hatlc.csv.gz`.
- all LCC-Server produced LCC-CSV-V1 LCs (e.g. from
https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict that can be read and used by many astrobase processing
functions.
'''
# read in the file and split by lines
if '.gz' in os.path.basename(lcfile):
LOGINFO('reading gzipped HATLC: %s' % lcfile)
infd = gzip.open(lcfile,'rb')
else:
LOGINFO('reading HATLC: %s' % lcfile)
infd = open(lcfile,'rb')
# this transparently reads LCC CSVLCs
lcformat_check = infd.read(12).decode()
if 'LCC-CSVLC' in lcformat_check:
infd.close()
return read_lcc_csvlc(lcfile)
else:
infd.seek(0)
# below is reading the HATLC v2 CSV LCs
lctext = infd.read().decode() # argh Python 3
infd.close()
# figure out the header and get the LC columns
lcstart = lctext.index('# LIGHTCURVE\n')
lcheader = lctext[:lcstart+12]
lccolumns = lctext[lcstart+13:].split('\n')
lccolumns = [x for x in lccolumns if len(x) > 0]
# initialize the lcdict and parse the CSV header
lcdict = _parse_csv_header(lcheader)
# tranpose the LC rows into columns
lccolumns = [x.split(',') for x in lccolumns]
lccolumns = list(zip(*lccolumns)) # argh more Python 3
# write the columns to the dict
for colind, col in enumerate(lcdict['columns']):
if (col.split('_')[0] in LC_MAG_COLUMNS or
col.split('_')[0] in LC_ERR_COLUMNS or
col.split('_')[0] in LC_FLAG_COLUMNS):
lcdict[col] = np.array([_smartcast(x,
COLUMNDEFS[col.split('_')[0]][2])
for x in lccolumns[colind]])
elif col in COLUMNDEFS:
lcdict[col] = np.array([_smartcast(x,COLUMNDEFS[col][2])
for x in lccolumns[colind]])
else:
LOGWARNING('lcdict col %s has no formatter available' % col)
continue
return lcdict
|
[
"This",
"reads",
"a",
"HAT",
"data",
"server",
"or",
"LCC",
"-",
"Server",
"produced",
"CSV",
"light",
"curve",
"into",
"an",
"lcdict",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1377-L1459
|
[
"def",
"read_csvlc",
"(",
"lcfile",
")",
":",
"# read in the file and split by lines",
"if",
"'.gz'",
"in",
"os",
".",
"path",
".",
"basename",
"(",
"lcfile",
")",
":",
"LOGINFO",
"(",
"'reading gzipped HATLC: %s'",
"%",
"lcfile",
")",
"infd",
"=",
"gzip",
".",
"open",
"(",
"lcfile",
",",
"'rb'",
")",
"else",
":",
"LOGINFO",
"(",
"'reading HATLC: %s'",
"%",
"lcfile",
")",
"infd",
"=",
"open",
"(",
"lcfile",
",",
"'rb'",
")",
"# this transparently reads LCC CSVLCs",
"lcformat_check",
"=",
"infd",
".",
"read",
"(",
"12",
")",
".",
"decode",
"(",
")",
"if",
"'LCC-CSVLC'",
"in",
"lcformat_check",
":",
"infd",
".",
"close",
"(",
")",
"return",
"read_lcc_csvlc",
"(",
"lcfile",
")",
"else",
":",
"infd",
".",
"seek",
"(",
"0",
")",
"# below is reading the HATLC v2 CSV LCs",
"lctext",
"=",
"infd",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
"# argh Python 3",
"infd",
".",
"close",
"(",
")",
"# figure out the header and get the LC columns",
"lcstart",
"=",
"lctext",
".",
"index",
"(",
"'# LIGHTCURVE\\n'",
")",
"lcheader",
"=",
"lctext",
"[",
":",
"lcstart",
"+",
"12",
"]",
"lccolumns",
"=",
"lctext",
"[",
"lcstart",
"+",
"13",
":",
"]",
".",
"split",
"(",
"'\\n'",
")",
"lccolumns",
"=",
"[",
"x",
"for",
"x",
"in",
"lccolumns",
"if",
"len",
"(",
"x",
")",
">",
"0",
"]",
"# initialize the lcdict and parse the CSV header",
"lcdict",
"=",
"_parse_csv_header",
"(",
"lcheader",
")",
"# tranpose the LC rows into columns",
"lccolumns",
"=",
"[",
"x",
".",
"split",
"(",
"','",
")",
"for",
"x",
"in",
"lccolumns",
"]",
"lccolumns",
"=",
"list",
"(",
"zip",
"(",
"*",
"lccolumns",
")",
")",
"# argh more Python 3",
"# write the columns to the dict",
"for",
"colind",
",",
"col",
"in",
"enumerate",
"(",
"lcdict",
"[",
"'columns'",
"]",
")",
":",
"if",
"(",
"col",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"in",
"LC_MAG_COLUMNS",
"or",
"col",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"in",
"LC_ERR_COLUMNS",
"or",
"col",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"in",
"LC_FLAG_COLUMNS",
")",
":",
"lcdict",
"[",
"col",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"_smartcast",
"(",
"x",
",",
"COLUMNDEFS",
"[",
"col",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"]",
"[",
"2",
"]",
")",
"for",
"x",
"in",
"lccolumns",
"[",
"colind",
"]",
"]",
")",
"elif",
"col",
"in",
"COLUMNDEFS",
":",
"lcdict",
"[",
"col",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"_smartcast",
"(",
"x",
",",
"COLUMNDEFS",
"[",
"col",
"]",
"[",
"2",
"]",
")",
"for",
"x",
"in",
"lccolumns",
"[",
"colind",
"]",
"]",
")",
"else",
":",
"LOGWARNING",
"(",
"'lcdict col %s has no formatter available'",
"%",
"col",
")",
"continue",
"return",
"lcdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
find_lc_timegroups
|
This finds the time gaps in the light curve, so we can figure out which
times are for consecutive observations and which represent gaps
between seasons.
Parameters
----------
lctimes : np.array
This is the input array of times, assumed to be in some form of JD.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
Returns
-------
tuple
A tuple of the form below is returned, containing the number of time
groups found and Python slice objects for each group::
(ngroups, [slice(start_ind_1, end_ind_1), ...])
|
astrobase/hatsurveys/hatlc.py
|
def find_lc_timegroups(lctimes, mingap=4.0):
'''This finds the time gaps in the light curve, so we can figure out which
times are for consecutive observations and which represent gaps
between seasons.
Parameters
----------
lctimes : np.array
This is the input array of times, assumed to be in some form of JD.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
Returns
-------
tuple
A tuple of the form below is returned, containing the number of time
groups found and Python slice objects for each group::
(ngroups, [slice(start_ind_1, end_ind_1), ...])
'''
lc_time_diffs = [(lctimes[x] - lctimes[x-1]) for x in range(1,len(lctimes))]
lc_time_diffs = np.array(lc_time_diffs)
group_start_indices = np.where(lc_time_diffs > mingap)[0]
if len(group_start_indices) > 0:
group_indices = []
for i, gindex in enumerate(group_start_indices):
if i == 0:
group_indices.append(slice(0,gindex+1))
else:
group_indices.append(slice(group_start_indices[i-1]+1,gindex+1))
# at the end, add the slice for the last group to the end of the times
# array
group_indices.append(slice(group_start_indices[-1]+1,len(lctimes)))
# if there's no large gap in the LC, then there's only one group to worry
# about
else:
group_indices = [slice(0,len(lctimes))]
return len(group_indices), group_indices
|
def find_lc_timegroups(lctimes, mingap=4.0):
'''This finds the time gaps in the light curve, so we can figure out which
times are for consecutive observations and which represent gaps
between seasons.
Parameters
----------
lctimes : np.array
This is the input array of times, assumed to be in some form of JD.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
Returns
-------
tuple
A tuple of the form below is returned, containing the number of time
groups found and Python slice objects for each group::
(ngroups, [slice(start_ind_1, end_ind_1), ...])
'''
lc_time_diffs = [(lctimes[x] - lctimes[x-1]) for x in range(1,len(lctimes))]
lc_time_diffs = np.array(lc_time_diffs)
group_start_indices = np.where(lc_time_diffs > mingap)[0]
if len(group_start_indices) > 0:
group_indices = []
for i, gindex in enumerate(group_start_indices):
if i == 0:
group_indices.append(slice(0,gindex+1))
else:
group_indices.append(slice(group_start_indices[i-1]+1,gindex+1))
# at the end, add the slice for the last group to the end of the times
# array
group_indices.append(slice(group_start_indices[-1]+1,len(lctimes)))
# if there's no large gap in the LC, then there's only one group to worry
# about
else:
group_indices = [slice(0,len(lctimes))]
return len(group_indices), group_indices
|
[
"This",
"finds",
"the",
"time",
"gaps",
"in",
"the",
"light",
"curve",
"so",
"we",
"can",
"figure",
"out",
"which",
"times",
"are",
"for",
"consecutive",
"observations",
"and",
"which",
"represent",
"gaps",
"between",
"seasons",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1467-L1521
|
[
"def",
"find_lc_timegroups",
"(",
"lctimes",
",",
"mingap",
"=",
"4.0",
")",
":",
"lc_time_diffs",
"=",
"[",
"(",
"lctimes",
"[",
"x",
"]",
"-",
"lctimes",
"[",
"x",
"-",
"1",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"lctimes",
")",
")",
"]",
"lc_time_diffs",
"=",
"np",
".",
"array",
"(",
"lc_time_diffs",
")",
"group_start_indices",
"=",
"np",
".",
"where",
"(",
"lc_time_diffs",
">",
"mingap",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"group_start_indices",
")",
">",
"0",
":",
"group_indices",
"=",
"[",
"]",
"for",
"i",
",",
"gindex",
"in",
"enumerate",
"(",
"group_start_indices",
")",
":",
"if",
"i",
"==",
"0",
":",
"group_indices",
".",
"append",
"(",
"slice",
"(",
"0",
",",
"gindex",
"+",
"1",
")",
")",
"else",
":",
"group_indices",
".",
"append",
"(",
"slice",
"(",
"group_start_indices",
"[",
"i",
"-",
"1",
"]",
"+",
"1",
",",
"gindex",
"+",
"1",
")",
")",
"# at the end, add the slice for the last group to the end of the times",
"# array",
"group_indices",
".",
"append",
"(",
"slice",
"(",
"group_start_indices",
"[",
"-",
"1",
"]",
"+",
"1",
",",
"len",
"(",
"lctimes",
")",
")",
")",
"# if there's no large gap in the LC, then there's only one group to worry",
"# about",
"else",
":",
"group_indices",
"=",
"[",
"slice",
"(",
"0",
",",
"len",
"(",
"lctimes",
")",
")",
"]",
"return",
"len",
"(",
"group_indices",
")",
",",
"group_indices"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
normalize_lcdict
|
This normalizes magcols in `lcdict` using `timecol` to find timegroups.
Parameters
----------
lcdict : dict
The input lcdict to process.
timecol : str
The key in the lcdict that is to be used to extract the time column.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
normto : {'globalmedian', 'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'globalmedian', the normalization will be to the global median of each
LC column. If this is 'zero', will normalize to 0.0 for each LC
column. Otherwise, will normalize to the value of one of the other keys
in the lcdict['objectinfo'][magkey], meaning the normalization will be
to some form of catalog magnitude.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE.
|
astrobase/hatsurveys/hatlc.py
|
def normalize_lcdict(lcdict,
timecol='rjd',
magcols='all',
mingap=4.0,
normto='sdssr',
debugmode=False,
quiet=False):
'''This normalizes magcols in `lcdict` using `timecol` to find timegroups.
Parameters
----------
lcdict : dict
The input lcdict to process.
timecol : str
The key in the lcdict that is to be used to extract the time column.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
normto : {'globalmedian', 'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'globalmedian', the normalization will be to the global median of each
LC column. If this is 'zero', will normalize to 0.0 for each LC
column. Otherwise, will normalize to the value of one of the other keys
in the lcdict['objectinfo'][magkey], meaning the normalization will be
to some form of catalog magnitude.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE.
'''
# check if this lc has been normalized already. return as-is if so
if 'lcnormcols' in lcdict and len(lcdict['lcnormcols']) > 0:
if not quiet:
LOGWARNING('this lightcurve is already normalized, returning...')
return lcdict
# first, get the LC timegroups
if timecol in lcdict:
times = lcdict[timecol]
elif 'rjd' in lcdict:
times = lcdict['rjd']
# if there aren't any time columns in this lcdict, then we can't do any
# normalization, return it as-is
else:
LOGERROR("can't figure out the time column to use, lcdict cols = %s" %
lcdict['columns'])
return lcdict
ngroups, timegroups = find_lc_timegroups(np.array(times),
mingap=mingap)
# HATLC V2 format
if 'lcapertures' in lcdict:
apertures = sorted(lcdict['lcapertures'].keys())
# LCC-CSV-V1 format HATLC
elif 'objectinfo' in lcdict and 'lcapertures' in lcdict['objectinfo']:
apertures = sorted(lcdict['objectinfo']['lcapertures'].keys())
aimcols = [('aim_%s' % x) for x in apertures if ('aim_%s' % x) in lcdict]
armcols = [('arm_%s' % x) for x in apertures if ('arm_%s' % x) in lcdict]
aepcols = [('aep_%s' % x)for x in apertures if ('aep_%s' % x) in lcdict]
atfcols = [('atf_%s' % x) for x in apertures if ('atf_%s' % x) in lcdict]
psimcols = [x for x in ['psim','psrm','psep','pstf'] if x in lcdict]
irmcols = [('irm_%s' % x) for x in apertures if ('irm_%s' % x) in lcdict]
iepcols = [('iep_%s' % x) for x in apertures if ('iep_%s' % x) in lcdict]
itfcols = [('itf_%s' % x) for x in apertures if ('itf_%s' % x) in lcdict]
# next, find all the mag columns to normalize
if magcols == 'all':
cols_to_normalize = (aimcols + armcols + aepcols + atfcols +
psimcols + irmcols + iepcols + itfcols)
elif magcols == 'redmags':
cols_to_normalize = (irmcols + (['psrm'] if 'psrm' in lcdict else []) +
irmcols)
elif magcols == 'epdmags':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols)
elif magcols == 'tfamags':
cols_to_normalize = (atfcols + (['pstf'] if 'pstf' in lcdict else []) +
itfcols)
elif magcols == 'epdtfa':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols + atfcols +
(['pstf'] if 'pstf' in lcdict else []) +
itfcols)
else:
cols_to_normalize = magcols.split(',')
cols_to_normalize = [x.strip() for x in cols_to_normalize]
colsnormalized = []
# now, normalize each column
for col in cols_to_normalize:
if col in lcdict:
mags = lcdict[col]
mags = [(nan if x is None else x) for x in mags]
mags = np.array(mags)
colsnormalized.append(col)
# find all the non-nan indices
finite_ind = np.isfinite(mags)
if any(finite_ind):
# find the global median
global_mag_median = np.median(mags[finite_ind])
# go through the groups and normalize them to the median for
# each group
for tgind, tg in enumerate(timegroups):
finite_ind = np.isfinite(mags[tg])
# find this timegroup's median mag and normalize the mags in
# it to this median
group_median = np.median((mags[tg])[finite_ind])
mags[tg] = mags[tg] - group_median
if debugmode:
LOGDEBUG('%s group %s: elems %s, '
'finite elems %s, median mag %s' %
(col, tgind,
len(mags[tg]),
len(finite_ind),
group_median))
else:
LOGWARNING('column %s is all nan, skipping...' % col)
continue
# now that everything is normalized to 0.0, add the global median
# offset back to all the mags and write the result back to the dict
if normto == 'globalmedian':
mags = mags + global_mag_median
elif normto in ('jmag', 'hmag', 'kmag',
'bmag', 'vmag',
'sdssg', 'sdssr', 'sdssi'):
if (normto in lcdict['objectinfo'] and
lcdict['objectinfo'][normto] is not None):
mags = mags + lcdict['objectinfo'][normto]
else:
if not quiet:
LOGWARNING('no %s available in lcdict, '
'normalizing to global mag median' % normto)
normto = 'globalmedian'
mags = mags + global_mag_median
lcdict[col] = mags
else:
if not quiet:
LOGWARNING('column %s is not present, skipping...' % col)
continue
# add the lcnormcols key to the lcdict
lcnormcols = ('cols normalized: %s - '
'min day gap: %s - '
'normalized to: %s') % (
repr(colsnormalized),
mingap,
normto
)
lcdict['lcnormcols'] = lcnormcols
return lcdict
|
def normalize_lcdict(lcdict,
timecol='rjd',
magcols='all',
mingap=4.0,
normto='sdssr',
debugmode=False,
quiet=False):
'''This normalizes magcols in `lcdict` using `timecol` to find timegroups.
Parameters
----------
lcdict : dict
The input lcdict to process.
timecol : str
The key in the lcdict that is to be used to extract the time column.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
mingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
normto : {'globalmedian', 'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'globalmedian', the normalization will be to the global median of each
LC column. If this is 'zero', will normalize to 0.0 for each LC
column. Otherwise, will normalize to the value of one of the other keys
in the lcdict['objectinfo'][magkey], meaning the normalization will be
to some form of catalog magnitude.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE.
'''
# check if this lc has been normalized already. return as-is if so
if 'lcnormcols' in lcdict and len(lcdict['lcnormcols']) > 0:
if not quiet:
LOGWARNING('this lightcurve is already normalized, returning...')
return lcdict
# first, get the LC timegroups
if timecol in lcdict:
times = lcdict[timecol]
elif 'rjd' in lcdict:
times = lcdict['rjd']
# if there aren't any time columns in this lcdict, then we can't do any
# normalization, return it as-is
else:
LOGERROR("can't figure out the time column to use, lcdict cols = %s" %
lcdict['columns'])
return lcdict
ngroups, timegroups = find_lc_timegroups(np.array(times),
mingap=mingap)
# HATLC V2 format
if 'lcapertures' in lcdict:
apertures = sorted(lcdict['lcapertures'].keys())
# LCC-CSV-V1 format HATLC
elif 'objectinfo' in lcdict and 'lcapertures' in lcdict['objectinfo']:
apertures = sorted(lcdict['objectinfo']['lcapertures'].keys())
aimcols = [('aim_%s' % x) for x in apertures if ('aim_%s' % x) in lcdict]
armcols = [('arm_%s' % x) for x in apertures if ('arm_%s' % x) in lcdict]
aepcols = [('aep_%s' % x)for x in apertures if ('aep_%s' % x) in lcdict]
atfcols = [('atf_%s' % x) for x in apertures if ('atf_%s' % x) in lcdict]
psimcols = [x for x in ['psim','psrm','psep','pstf'] if x in lcdict]
irmcols = [('irm_%s' % x) for x in apertures if ('irm_%s' % x) in lcdict]
iepcols = [('iep_%s' % x) for x in apertures if ('iep_%s' % x) in lcdict]
itfcols = [('itf_%s' % x) for x in apertures if ('itf_%s' % x) in lcdict]
# next, find all the mag columns to normalize
if magcols == 'all':
cols_to_normalize = (aimcols + armcols + aepcols + atfcols +
psimcols + irmcols + iepcols + itfcols)
elif magcols == 'redmags':
cols_to_normalize = (irmcols + (['psrm'] if 'psrm' in lcdict else []) +
irmcols)
elif magcols == 'epdmags':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols)
elif magcols == 'tfamags':
cols_to_normalize = (atfcols + (['pstf'] if 'pstf' in lcdict else []) +
itfcols)
elif magcols == 'epdtfa':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols + atfcols +
(['pstf'] if 'pstf' in lcdict else []) +
itfcols)
else:
cols_to_normalize = magcols.split(',')
cols_to_normalize = [x.strip() for x in cols_to_normalize]
colsnormalized = []
# now, normalize each column
for col in cols_to_normalize:
if col in lcdict:
mags = lcdict[col]
mags = [(nan if x is None else x) for x in mags]
mags = np.array(mags)
colsnormalized.append(col)
# find all the non-nan indices
finite_ind = np.isfinite(mags)
if any(finite_ind):
# find the global median
global_mag_median = np.median(mags[finite_ind])
# go through the groups and normalize them to the median for
# each group
for tgind, tg in enumerate(timegroups):
finite_ind = np.isfinite(mags[tg])
# find this timegroup's median mag and normalize the mags in
# it to this median
group_median = np.median((mags[tg])[finite_ind])
mags[tg] = mags[tg] - group_median
if debugmode:
LOGDEBUG('%s group %s: elems %s, '
'finite elems %s, median mag %s' %
(col, tgind,
len(mags[tg]),
len(finite_ind),
group_median))
else:
LOGWARNING('column %s is all nan, skipping...' % col)
continue
# now that everything is normalized to 0.0, add the global median
# offset back to all the mags and write the result back to the dict
if normto == 'globalmedian':
mags = mags + global_mag_median
elif normto in ('jmag', 'hmag', 'kmag',
'bmag', 'vmag',
'sdssg', 'sdssr', 'sdssi'):
if (normto in lcdict['objectinfo'] and
lcdict['objectinfo'][normto] is not None):
mags = mags + lcdict['objectinfo'][normto]
else:
if not quiet:
LOGWARNING('no %s available in lcdict, '
'normalizing to global mag median' % normto)
normto = 'globalmedian'
mags = mags + global_mag_median
lcdict[col] = mags
else:
if not quiet:
LOGWARNING('column %s is not present, skipping...' % col)
continue
# add the lcnormcols key to the lcdict
lcnormcols = ('cols normalized: %s - '
'min day gap: %s - '
'normalized to: %s') % (
repr(colsnormalized),
mingap,
normto
)
lcdict['lcnormcols'] = lcnormcols
return lcdict
|
[
"This",
"normalizes",
"magcols",
"in",
"lcdict",
"using",
"timecol",
"to",
"find",
"timegroups",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1525-L1718
|
[
"def",
"normalize_lcdict",
"(",
"lcdict",
",",
"timecol",
"=",
"'rjd'",
",",
"magcols",
"=",
"'all'",
",",
"mingap",
"=",
"4.0",
",",
"normto",
"=",
"'sdssr'",
",",
"debugmode",
"=",
"False",
",",
"quiet",
"=",
"False",
")",
":",
"# check if this lc has been normalized already. return as-is if so",
"if",
"'lcnormcols'",
"in",
"lcdict",
"and",
"len",
"(",
"lcdict",
"[",
"'lcnormcols'",
"]",
")",
">",
"0",
":",
"if",
"not",
"quiet",
":",
"LOGWARNING",
"(",
"'this lightcurve is already normalized, returning...'",
")",
"return",
"lcdict",
"# first, get the LC timegroups",
"if",
"timecol",
"in",
"lcdict",
":",
"times",
"=",
"lcdict",
"[",
"timecol",
"]",
"elif",
"'rjd'",
"in",
"lcdict",
":",
"times",
"=",
"lcdict",
"[",
"'rjd'",
"]",
"# if there aren't any time columns in this lcdict, then we can't do any",
"# normalization, return it as-is",
"else",
":",
"LOGERROR",
"(",
"\"can't figure out the time column to use, lcdict cols = %s\"",
"%",
"lcdict",
"[",
"'columns'",
"]",
")",
"return",
"lcdict",
"ngroups",
",",
"timegroups",
"=",
"find_lc_timegroups",
"(",
"np",
".",
"array",
"(",
"times",
")",
",",
"mingap",
"=",
"mingap",
")",
"# HATLC V2 format",
"if",
"'lcapertures'",
"in",
"lcdict",
":",
"apertures",
"=",
"sorted",
"(",
"lcdict",
"[",
"'lcapertures'",
"]",
".",
"keys",
"(",
")",
")",
"# LCC-CSV-V1 format HATLC",
"elif",
"'objectinfo'",
"in",
"lcdict",
"and",
"'lcapertures'",
"in",
"lcdict",
"[",
"'objectinfo'",
"]",
":",
"apertures",
"=",
"sorted",
"(",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'lcapertures'",
"]",
".",
"keys",
"(",
")",
")",
"aimcols",
"=",
"[",
"(",
"'aim_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'aim_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"armcols",
"=",
"[",
"(",
"'arm_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'arm_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"aepcols",
"=",
"[",
"(",
"'aep_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'aep_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"atfcols",
"=",
"[",
"(",
"'atf_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'atf_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"psimcols",
"=",
"[",
"x",
"for",
"x",
"in",
"[",
"'psim'",
",",
"'psrm'",
",",
"'psep'",
",",
"'pstf'",
"]",
"if",
"x",
"in",
"lcdict",
"]",
"irmcols",
"=",
"[",
"(",
"'irm_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'irm_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"iepcols",
"=",
"[",
"(",
"'iep_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'iep_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"itfcols",
"=",
"[",
"(",
"'itf_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'itf_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"# next, find all the mag columns to normalize",
"if",
"magcols",
"==",
"'all'",
":",
"cols_to_normalize",
"=",
"(",
"aimcols",
"+",
"armcols",
"+",
"aepcols",
"+",
"atfcols",
"+",
"psimcols",
"+",
"irmcols",
"+",
"iepcols",
"+",
"itfcols",
")",
"elif",
"magcols",
"==",
"'redmags'",
":",
"cols_to_normalize",
"=",
"(",
"irmcols",
"+",
"(",
"[",
"'psrm'",
"]",
"if",
"'psrm'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"irmcols",
")",
"elif",
"magcols",
"==",
"'epdmags'",
":",
"cols_to_normalize",
"=",
"(",
"aepcols",
"+",
"(",
"[",
"'psep'",
"]",
"if",
"'psep'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"iepcols",
")",
"elif",
"magcols",
"==",
"'tfamags'",
":",
"cols_to_normalize",
"=",
"(",
"atfcols",
"+",
"(",
"[",
"'pstf'",
"]",
"if",
"'pstf'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"itfcols",
")",
"elif",
"magcols",
"==",
"'epdtfa'",
":",
"cols_to_normalize",
"=",
"(",
"aepcols",
"+",
"(",
"[",
"'psep'",
"]",
"if",
"'psep'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"iepcols",
"+",
"atfcols",
"+",
"(",
"[",
"'pstf'",
"]",
"if",
"'pstf'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"itfcols",
")",
"else",
":",
"cols_to_normalize",
"=",
"magcols",
".",
"split",
"(",
"','",
")",
"cols_to_normalize",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"cols_to_normalize",
"]",
"colsnormalized",
"=",
"[",
"]",
"# now, normalize each column",
"for",
"col",
"in",
"cols_to_normalize",
":",
"if",
"col",
"in",
"lcdict",
":",
"mags",
"=",
"lcdict",
"[",
"col",
"]",
"mags",
"=",
"[",
"(",
"nan",
"if",
"x",
"is",
"None",
"else",
"x",
")",
"for",
"x",
"in",
"mags",
"]",
"mags",
"=",
"np",
".",
"array",
"(",
"mags",
")",
"colsnormalized",
".",
"append",
"(",
"col",
")",
"# find all the non-nan indices",
"finite_ind",
"=",
"np",
".",
"isfinite",
"(",
"mags",
")",
"if",
"any",
"(",
"finite_ind",
")",
":",
"# find the global median",
"global_mag_median",
"=",
"np",
".",
"median",
"(",
"mags",
"[",
"finite_ind",
"]",
")",
"# go through the groups and normalize them to the median for",
"# each group",
"for",
"tgind",
",",
"tg",
"in",
"enumerate",
"(",
"timegroups",
")",
":",
"finite_ind",
"=",
"np",
".",
"isfinite",
"(",
"mags",
"[",
"tg",
"]",
")",
"# find this timegroup's median mag and normalize the mags in",
"# it to this median",
"group_median",
"=",
"np",
".",
"median",
"(",
"(",
"mags",
"[",
"tg",
"]",
")",
"[",
"finite_ind",
"]",
")",
"mags",
"[",
"tg",
"]",
"=",
"mags",
"[",
"tg",
"]",
"-",
"group_median",
"if",
"debugmode",
":",
"LOGDEBUG",
"(",
"'%s group %s: elems %s, '",
"'finite elems %s, median mag %s'",
"%",
"(",
"col",
",",
"tgind",
",",
"len",
"(",
"mags",
"[",
"tg",
"]",
")",
",",
"len",
"(",
"finite_ind",
")",
",",
"group_median",
")",
")",
"else",
":",
"LOGWARNING",
"(",
"'column %s is all nan, skipping...'",
"%",
"col",
")",
"continue",
"# now that everything is normalized to 0.0, add the global median",
"# offset back to all the mags and write the result back to the dict",
"if",
"normto",
"==",
"'globalmedian'",
":",
"mags",
"=",
"mags",
"+",
"global_mag_median",
"elif",
"normto",
"in",
"(",
"'jmag'",
",",
"'hmag'",
",",
"'kmag'",
",",
"'bmag'",
",",
"'vmag'",
",",
"'sdssg'",
",",
"'sdssr'",
",",
"'sdssi'",
")",
":",
"if",
"(",
"normto",
"in",
"lcdict",
"[",
"'objectinfo'",
"]",
"and",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"normto",
"]",
"is",
"not",
"None",
")",
":",
"mags",
"=",
"mags",
"+",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"normto",
"]",
"else",
":",
"if",
"not",
"quiet",
":",
"LOGWARNING",
"(",
"'no %s available in lcdict, '",
"'normalizing to global mag median'",
"%",
"normto",
")",
"normto",
"=",
"'globalmedian'",
"mags",
"=",
"mags",
"+",
"global_mag_median",
"lcdict",
"[",
"col",
"]",
"=",
"mags",
"else",
":",
"if",
"not",
"quiet",
":",
"LOGWARNING",
"(",
"'column %s is not present, skipping...'",
"%",
"col",
")",
"continue",
"# add the lcnormcols key to the lcdict",
"lcnormcols",
"=",
"(",
"'cols normalized: %s - '",
"'min day gap: %s - '",
"'normalized to: %s'",
")",
"%",
"(",
"repr",
"(",
"colsnormalized",
")",
",",
"mingap",
",",
"normto",
")",
"lcdict",
"[",
"'lcnormcols'",
"]",
"=",
"lcnormcols",
"return",
"lcdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
normalize_lcdict_byinst
|
This is a function to normalize light curves across all instrument
combinations present.
Use this to normalize a light curve containing a variety of:
- HAT station IDs ('stf')
- camera IDs ('ccd')
- filters ('flt')
- observed field names ('fld')
- HAT project IDs ('prj')
- exposure times ('exp')
Parameters
----------
lcdict : dict
The input lcdict to process.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
normto : {'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'zero', will normalize to 0.0 for each LC column. Otherwise, will
normalize to the value of one of the other keys in the
lcdict['objectinfo'][magkey], meaning the normalization will be to some
form of catalog magnitude.
normkeylist : list of str
These are the column keys to use to form the normalization
index. Measurements in the specified `magcols` with identical
normalization index values will be considered as part of a single
measurement 'era', and will be normalized to zero. Once all eras have
been normalized this way, the final light curve will be re-normalized as
specified in `normto`.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE.
|
astrobase/hatsurveys/hatlc.py
|
def normalize_lcdict_byinst(
lcdict,
magcols='all',
normto='sdssr',
normkeylist=('stf','ccd','flt','fld','prj','exp'),
debugmode=False,
quiet=False
):
'''This is a function to normalize light curves across all instrument
combinations present.
Use this to normalize a light curve containing a variety of:
- HAT station IDs ('stf')
- camera IDs ('ccd')
- filters ('flt')
- observed field names ('fld')
- HAT project IDs ('prj')
- exposure times ('exp')
Parameters
----------
lcdict : dict
The input lcdict to process.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
normto : {'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'zero', will normalize to 0.0 for each LC column. Otherwise, will
normalize to the value of one of the other keys in the
lcdict['objectinfo'][magkey], meaning the normalization will be to some
form of catalog magnitude.
normkeylist : list of str
These are the column keys to use to form the normalization
index. Measurements in the specified `magcols` with identical
normalization index values will be considered as part of a single
measurement 'era', and will be normalized to zero. Once all eras have
been normalized this way, the final light curve will be re-normalized as
specified in `normto`.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE.
'''
# check if this lc has been normalized already. return as-is if so
if 'lcinstnormcols' in lcdict and len(lcdict['lcinstnormcols']) > 0:
if not quiet:
LOGWARNING('this lightcurve is already '
'normalized by instrument keys, '
'returning...')
return lcdict
# generate the normalization key
normkeycols = []
availablenormkeys = []
for key in normkeylist:
if key in lcdict and lcdict[key] is not None:
normkeycols.append(lcdict[key])
availablenormkeys.append(key)
# transpose to turn these into rows
normkeycols = list(zip(*normkeycols))
# convert to a string rep for each key and post-process for simplicity
allkeys = [repr(x) for x in normkeycols]
allkeys = [a.replace('(','').replace(')','').replace("'",'').replace(' ','')
for a in allkeys]
# turn these into a numpy array and get the unique values
allkeys = np.array(allkeys)
normkeys = np.unique(allkeys)
# figure out the apertures
# HATLC V2 format
if 'lcapertures' in lcdict:
apertures = sorted(lcdict['lcapertures'].keys())
# LCC-CSV-V1 format HATLC
elif 'objectinfo' in lcdict and 'lcapertures' in lcdict['objectinfo']:
apertures = sorted(lcdict['objectinfo']['lcapertures'].keys())
# put together the column names
aimcols = [('aim_%s' % x) for x in apertures if ('aim_%s' % x) in lcdict]
armcols = [('arm_%s' % x) for x in apertures if ('arm_%s' % x) in lcdict]
aepcols = [('aep_%s' % x)for x in apertures if ('aep_%s' % x) in lcdict]
atfcols = [('atf_%s' % x) for x in apertures if ('atf_%s' % x) in lcdict]
psimcols = [x for x in ['psim','psrm','psep','pstf'] if x in lcdict]
irmcols = [('irm_%s' % x) for x in apertures if ('irm_%s' % x) in lcdict]
iepcols = [('iep_%s' % x) for x in apertures if ('iep_%s' % x) in lcdict]
itfcols = [('itf_%s' % x) for x in apertures if ('itf_%s' % x) in lcdict]
# next, find all the mag columns to normalize
if magcols == 'all':
cols_to_normalize = (aimcols + armcols + aepcols + atfcols +
psimcols + irmcols + iepcols + itfcols)
elif magcols == 'redmags':
cols_to_normalize = (irmcols + (['psrm'] if 'psrm' in lcdict else []) +
irmcols)
elif magcols == 'epdmags':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols)
elif magcols == 'tfamags':
cols_to_normalize = (atfcols + (['pstf'] if 'pstf' in lcdict else []) +
itfcols)
elif magcols == 'epdtfa':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols + atfcols +
(['pstf'] if 'pstf' in lcdict else []) +
itfcols)
else:
cols_to_normalize = magcols.split(',')
cols_to_normalize = [x.strip() for x in cols_to_normalize]
colsnormalized = []
# go through each column and normalize them
for col in cols_to_normalize:
if col in lcdict:
# note: this requires the columns in ndarray format
# unlike normalize_lcdict
thismags = lcdict[col]
# go through each key in normusing
for nkey in normkeys:
thisind = allkeys == nkey
# make sure we have at least 3 elements in the matched set of
# magnitudes corresponding to this key. also make sure that the
# magnitudes corresponding to this key aren't all nan.
thismagsize = thismags[thisind].size
thismagfinite = np.where(np.isfinite(thismags[thisind]))[0].size
if thismagsize > 2 and thismagfinite > 2:
# do the normalization and update the thismags in the lcdict
medmag = np.nanmedian(thismags[thisind])
lcdict[col][thisind] = lcdict[col][thisind] - medmag
if debugmode:
LOGDEBUG('magcol: %s, currkey: "%s", nelem: %s, '
'medmag: %s' %
(col, nkey, len(thismags[thisind]), medmag))
# we remove mags that correspond to keys with less than 3
# (finite) elements because we can't get the median mag
# correctly and renormalizing them to zero would just set them
# to zero
else:
lcdict[col][thisind] = np.nan
# everything should now be normalized to zero
# add back the requested normto
if normto in ('jmag', 'hmag', 'kmag',
'bmag', 'vmag',
'sdssg', 'sdssr', 'sdssi'):
if (normto in lcdict['objectinfo'] and
lcdict['objectinfo'][normto] is not None):
lcdict[col] = lcdict[col] + lcdict['objectinfo'][normto]
else:
if not quiet:
LOGWARNING('no %s available in lcdict, '
'normalizing to 0.0' % normto)
normto = 'zero'
# update the colsnormalized list
colsnormalized.append(col)
else:
if not quiet:
LOGWARNING('column %s is not present, skipping...' % col)
continue
# add the lcnormcols key to the lcdict
lcinstnormcols = ('cols normalized: %s - '
'normalized to: %s - '
'norm keys used: %s') % (repr(colsnormalized),
normto,
repr(availablenormkeys))
lcdict['lcinstnormcols'] = lcinstnormcols
return lcdict
|
def normalize_lcdict_byinst(
lcdict,
magcols='all',
normto='sdssr',
normkeylist=('stf','ccd','flt','fld','prj','exp'),
debugmode=False,
quiet=False
):
'''This is a function to normalize light curves across all instrument
combinations present.
Use this to normalize a light curve containing a variety of:
- HAT station IDs ('stf')
- camera IDs ('ccd')
- filters ('flt')
- observed field names ('fld')
- HAT project IDs ('prj')
- exposure times ('exp')
Parameters
----------
lcdict : dict
The input lcdict to process.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
normto : {'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'zero', will normalize to 0.0 for each LC column. Otherwise, will
normalize to the value of one of the other keys in the
lcdict['objectinfo'][magkey], meaning the normalization will be to some
form of catalog magnitude.
normkeylist : list of str
These are the column keys to use to form the normalization
index. Measurements in the specified `magcols` with identical
normalization index values will be considered as part of a single
measurement 'era', and will be normalized to zero. Once all eras have
been normalized this way, the final light curve will be re-normalized as
specified in `normto`.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE.
'''
# check if this lc has been normalized already. return as-is if so
if 'lcinstnormcols' in lcdict and len(lcdict['lcinstnormcols']) > 0:
if not quiet:
LOGWARNING('this lightcurve is already '
'normalized by instrument keys, '
'returning...')
return lcdict
# generate the normalization key
normkeycols = []
availablenormkeys = []
for key in normkeylist:
if key in lcdict and lcdict[key] is not None:
normkeycols.append(lcdict[key])
availablenormkeys.append(key)
# transpose to turn these into rows
normkeycols = list(zip(*normkeycols))
# convert to a string rep for each key and post-process for simplicity
allkeys = [repr(x) for x in normkeycols]
allkeys = [a.replace('(','').replace(')','').replace("'",'').replace(' ','')
for a in allkeys]
# turn these into a numpy array and get the unique values
allkeys = np.array(allkeys)
normkeys = np.unique(allkeys)
# figure out the apertures
# HATLC V2 format
if 'lcapertures' in lcdict:
apertures = sorted(lcdict['lcapertures'].keys())
# LCC-CSV-V1 format HATLC
elif 'objectinfo' in lcdict and 'lcapertures' in lcdict['objectinfo']:
apertures = sorted(lcdict['objectinfo']['lcapertures'].keys())
# put together the column names
aimcols = [('aim_%s' % x) for x in apertures if ('aim_%s' % x) in lcdict]
armcols = [('arm_%s' % x) for x in apertures if ('arm_%s' % x) in lcdict]
aepcols = [('aep_%s' % x)for x in apertures if ('aep_%s' % x) in lcdict]
atfcols = [('atf_%s' % x) for x in apertures if ('atf_%s' % x) in lcdict]
psimcols = [x for x in ['psim','psrm','psep','pstf'] if x in lcdict]
irmcols = [('irm_%s' % x) for x in apertures if ('irm_%s' % x) in lcdict]
iepcols = [('iep_%s' % x) for x in apertures if ('iep_%s' % x) in lcdict]
itfcols = [('itf_%s' % x) for x in apertures if ('itf_%s' % x) in lcdict]
# next, find all the mag columns to normalize
if magcols == 'all':
cols_to_normalize = (aimcols + armcols + aepcols + atfcols +
psimcols + irmcols + iepcols + itfcols)
elif magcols == 'redmags':
cols_to_normalize = (irmcols + (['psrm'] if 'psrm' in lcdict else []) +
irmcols)
elif magcols == 'epdmags':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols)
elif magcols == 'tfamags':
cols_to_normalize = (atfcols + (['pstf'] if 'pstf' in lcdict else []) +
itfcols)
elif magcols == 'epdtfa':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols + atfcols +
(['pstf'] if 'pstf' in lcdict else []) +
itfcols)
else:
cols_to_normalize = magcols.split(',')
cols_to_normalize = [x.strip() for x in cols_to_normalize]
colsnormalized = []
# go through each column and normalize them
for col in cols_to_normalize:
if col in lcdict:
# note: this requires the columns in ndarray format
# unlike normalize_lcdict
thismags = lcdict[col]
# go through each key in normusing
for nkey in normkeys:
thisind = allkeys == nkey
# make sure we have at least 3 elements in the matched set of
# magnitudes corresponding to this key. also make sure that the
# magnitudes corresponding to this key aren't all nan.
thismagsize = thismags[thisind].size
thismagfinite = np.where(np.isfinite(thismags[thisind]))[0].size
if thismagsize > 2 and thismagfinite > 2:
# do the normalization and update the thismags in the lcdict
medmag = np.nanmedian(thismags[thisind])
lcdict[col][thisind] = lcdict[col][thisind] - medmag
if debugmode:
LOGDEBUG('magcol: %s, currkey: "%s", nelem: %s, '
'medmag: %s' %
(col, nkey, len(thismags[thisind]), medmag))
# we remove mags that correspond to keys with less than 3
# (finite) elements because we can't get the median mag
# correctly and renormalizing them to zero would just set them
# to zero
else:
lcdict[col][thisind] = np.nan
# everything should now be normalized to zero
# add back the requested normto
if normto in ('jmag', 'hmag', 'kmag',
'bmag', 'vmag',
'sdssg', 'sdssr', 'sdssi'):
if (normto in lcdict['objectinfo'] and
lcdict['objectinfo'][normto] is not None):
lcdict[col] = lcdict[col] + lcdict['objectinfo'][normto]
else:
if not quiet:
LOGWARNING('no %s available in lcdict, '
'normalizing to 0.0' % normto)
normto = 'zero'
# update the colsnormalized list
colsnormalized.append(col)
else:
if not quiet:
LOGWARNING('column %s is not present, skipping...' % col)
continue
# add the lcnormcols key to the lcdict
lcinstnormcols = ('cols normalized: %s - '
'normalized to: %s - '
'norm keys used: %s') % (repr(colsnormalized),
normto,
repr(availablenormkeys))
lcdict['lcinstnormcols'] = lcinstnormcols
return lcdict
|
[
"This",
"is",
"a",
"function",
"to",
"normalize",
"light",
"curves",
"across",
"all",
"instrument",
"combinations",
"present",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1722-L1925
|
[
"def",
"normalize_lcdict_byinst",
"(",
"lcdict",
",",
"magcols",
"=",
"'all'",
",",
"normto",
"=",
"'sdssr'",
",",
"normkeylist",
"=",
"(",
"'stf'",
",",
"'ccd'",
",",
"'flt'",
",",
"'fld'",
",",
"'prj'",
",",
"'exp'",
")",
",",
"debugmode",
"=",
"False",
",",
"quiet",
"=",
"False",
")",
":",
"# check if this lc has been normalized already. return as-is if so",
"if",
"'lcinstnormcols'",
"in",
"lcdict",
"and",
"len",
"(",
"lcdict",
"[",
"'lcinstnormcols'",
"]",
")",
">",
"0",
":",
"if",
"not",
"quiet",
":",
"LOGWARNING",
"(",
"'this lightcurve is already '",
"'normalized by instrument keys, '",
"'returning...'",
")",
"return",
"lcdict",
"# generate the normalization key",
"normkeycols",
"=",
"[",
"]",
"availablenormkeys",
"=",
"[",
"]",
"for",
"key",
"in",
"normkeylist",
":",
"if",
"key",
"in",
"lcdict",
"and",
"lcdict",
"[",
"key",
"]",
"is",
"not",
"None",
":",
"normkeycols",
".",
"append",
"(",
"lcdict",
"[",
"key",
"]",
")",
"availablenormkeys",
".",
"append",
"(",
"key",
")",
"# transpose to turn these into rows",
"normkeycols",
"=",
"list",
"(",
"zip",
"(",
"*",
"normkeycols",
")",
")",
"# convert to a string rep for each key and post-process for simplicity",
"allkeys",
"=",
"[",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"normkeycols",
"]",
"allkeys",
"=",
"[",
"a",
".",
"replace",
"(",
"'('",
",",
"''",
")",
".",
"replace",
"(",
"')'",
",",
"''",
")",
".",
"replace",
"(",
"\"'\"",
",",
"''",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"for",
"a",
"in",
"allkeys",
"]",
"# turn these into a numpy array and get the unique values",
"allkeys",
"=",
"np",
".",
"array",
"(",
"allkeys",
")",
"normkeys",
"=",
"np",
".",
"unique",
"(",
"allkeys",
")",
"# figure out the apertures",
"# HATLC V2 format",
"if",
"'lcapertures'",
"in",
"lcdict",
":",
"apertures",
"=",
"sorted",
"(",
"lcdict",
"[",
"'lcapertures'",
"]",
".",
"keys",
"(",
")",
")",
"# LCC-CSV-V1 format HATLC",
"elif",
"'objectinfo'",
"in",
"lcdict",
"and",
"'lcapertures'",
"in",
"lcdict",
"[",
"'objectinfo'",
"]",
":",
"apertures",
"=",
"sorted",
"(",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'lcapertures'",
"]",
".",
"keys",
"(",
")",
")",
"# put together the column names",
"aimcols",
"=",
"[",
"(",
"'aim_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'aim_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"armcols",
"=",
"[",
"(",
"'arm_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'arm_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"aepcols",
"=",
"[",
"(",
"'aep_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'aep_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"atfcols",
"=",
"[",
"(",
"'atf_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'atf_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"psimcols",
"=",
"[",
"x",
"for",
"x",
"in",
"[",
"'psim'",
",",
"'psrm'",
",",
"'psep'",
",",
"'pstf'",
"]",
"if",
"x",
"in",
"lcdict",
"]",
"irmcols",
"=",
"[",
"(",
"'irm_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'irm_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"iepcols",
"=",
"[",
"(",
"'iep_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'iep_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"itfcols",
"=",
"[",
"(",
"'itf_%s'",
"%",
"x",
")",
"for",
"x",
"in",
"apertures",
"if",
"(",
"'itf_%s'",
"%",
"x",
")",
"in",
"lcdict",
"]",
"# next, find all the mag columns to normalize",
"if",
"magcols",
"==",
"'all'",
":",
"cols_to_normalize",
"=",
"(",
"aimcols",
"+",
"armcols",
"+",
"aepcols",
"+",
"atfcols",
"+",
"psimcols",
"+",
"irmcols",
"+",
"iepcols",
"+",
"itfcols",
")",
"elif",
"magcols",
"==",
"'redmags'",
":",
"cols_to_normalize",
"=",
"(",
"irmcols",
"+",
"(",
"[",
"'psrm'",
"]",
"if",
"'psrm'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"irmcols",
")",
"elif",
"magcols",
"==",
"'epdmags'",
":",
"cols_to_normalize",
"=",
"(",
"aepcols",
"+",
"(",
"[",
"'psep'",
"]",
"if",
"'psep'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"iepcols",
")",
"elif",
"magcols",
"==",
"'tfamags'",
":",
"cols_to_normalize",
"=",
"(",
"atfcols",
"+",
"(",
"[",
"'pstf'",
"]",
"if",
"'pstf'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"itfcols",
")",
"elif",
"magcols",
"==",
"'epdtfa'",
":",
"cols_to_normalize",
"=",
"(",
"aepcols",
"+",
"(",
"[",
"'psep'",
"]",
"if",
"'psep'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"iepcols",
"+",
"atfcols",
"+",
"(",
"[",
"'pstf'",
"]",
"if",
"'pstf'",
"in",
"lcdict",
"else",
"[",
"]",
")",
"+",
"itfcols",
")",
"else",
":",
"cols_to_normalize",
"=",
"magcols",
".",
"split",
"(",
"','",
")",
"cols_to_normalize",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"cols_to_normalize",
"]",
"colsnormalized",
"=",
"[",
"]",
"# go through each column and normalize them",
"for",
"col",
"in",
"cols_to_normalize",
":",
"if",
"col",
"in",
"lcdict",
":",
"# note: this requires the columns in ndarray format",
"# unlike normalize_lcdict",
"thismags",
"=",
"lcdict",
"[",
"col",
"]",
"# go through each key in normusing",
"for",
"nkey",
"in",
"normkeys",
":",
"thisind",
"=",
"allkeys",
"==",
"nkey",
"# make sure we have at least 3 elements in the matched set of",
"# magnitudes corresponding to this key. also make sure that the",
"# magnitudes corresponding to this key aren't all nan.",
"thismagsize",
"=",
"thismags",
"[",
"thisind",
"]",
".",
"size",
"thismagfinite",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isfinite",
"(",
"thismags",
"[",
"thisind",
"]",
")",
")",
"[",
"0",
"]",
".",
"size",
"if",
"thismagsize",
">",
"2",
"and",
"thismagfinite",
">",
"2",
":",
"# do the normalization and update the thismags in the lcdict",
"medmag",
"=",
"np",
".",
"nanmedian",
"(",
"thismags",
"[",
"thisind",
"]",
")",
"lcdict",
"[",
"col",
"]",
"[",
"thisind",
"]",
"=",
"lcdict",
"[",
"col",
"]",
"[",
"thisind",
"]",
"-",
"medmag",
"if",
"debugmode",
":",
"LOGDEBUG",
"(",
"'magcol: %s, currkey: \"%s\", nelem: %s, '",
"'medmag: %s'",
"%",
"(",
"col",
",",
"nkey",
",",
"len",
"(",
"thismags",
"[",
"thisind",
"]",
")",
",",
"medmag",
")",
")",
"# we remove mags that correspond to keys with less than 3",
"# (finite) elements because we can't get the median mag",
"# correctly and renormalizing them to zero would just set them",
"# to zero",
"else",
":",
"lcdict",
"[",
"col",
"]",
"[",
"thisind",
"]",
"=",
"np",
".",
"nan",
"# everything should now be normalized to zero",
"# add back the requested normto",
"if",
"normto",
"in",
"(",
"'jmag'",
",",
"'hmag'",
",",
"'kmag'",
",",
"'bmag'",
",",
"'vmag'",
",",
"'sdssg'",
",",
"'sdssr'",
",",
"'sdssi'",
")",
":",
"if",
"(",
"normto",
"in",
"lcdict",
"[",
"'objectinfo'",
"]",
"and",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"normto",
"]",
"is",
"not",
"None",
")",
":",
"lcdict",
"[",
"col",
"]",
"=",
"lcdict",
"[",
"col",
"]",
"+",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"normto",
"]",
"else",
":",
"if",
"not",
"quiet",
":",
"LOGWARNING",
"(",
"'no %s available in lcdict, '",
"'normalizing to 0.0'",
"%",
"normto",
")",
"normto",
"=",
"'zero'",
"# update the colsnormalized list",
"colsnormalized",
".",
"append",
"(",
"col",
")",
"else",
":",
"if",
"not",
"quiet",
":",
"LOGWARNING",
"(",
"'column %s is not present, skipping...'",
"%",
"col",
")",
"continue",
"# add the lcnormcols key to the lcdict",
"lcinstnormcols",
"=",
"(",
"'cols normalized: %s - '",
"'normalized to: %s - '",
"'norm keys used: %s'",
")",
"%",
"(",
"repr",
"(",
"colsnormalized",
")",
",",
"normto",
",",
"repr",
"(",
"availablenormkeys",
")",
")",
"lcdict",
"[",
"'lcinstnormcols'",
"]",
"=",
"lcinstnormcols",
"return",
"lcdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
main
|
This is called when we're executed from the commandline.
The current usage from the command-line is described below::
usage: hatlc [-h] [--describe] hatlcfile
read a HAT LC of any format and output to stdout
positional arguments:
hatlcfile path to the light curve you want to read and pipe to stdout
optional arguments:
-h, --help show this help message and exit
--describe don't dump the columns, show only object info and LC metadata
|
astrobase/hatsurveys/hatlc.py
|
def main():
'''
This is called when we're executed from the commandline.
The current usage from the command-line is described below::
usage: hatlc [-h] [--describe] hatlcfile
read a HAT LC of any format and output to stdout
positional arguments:
hatlcfile path to the light curve you want to read and pipe to stdout
optional arguments:
-h, --help show this help message and exit
--describe don't dump the columns, show only object info and LC metadata
'''
# handle SIGPIPE sent by less, head, et al.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
import argparse
aparser = argparse.ArgumentParser(
description='read a HAT LC of any format and output to stdout'
)
aparser.add_argument(
'hatlcfile',
action='store',
type=str,
help=("path to the light curve you want to read and pipe to stdout")
)
aparser.add_argument(
'--describe',
action='store_true',
default=False,
help=("don't dump the columns, show only object info and LC metadata")
)
args = aparser.parse_args()
filetoread = args.hatlcfile
if not os.path.exists(filetoread):
LOGERROR("file provided: %s doesn't seem to exist" % filetoread)
sys.exit(1)
# figure out the type of LC this is
filename = os.path.basename(filetoread)
# switch based on filetype
if filename.endswith('-hatlc.csv.gz') or filename.endswith('-csvlc.gz'):
if args.describe:
describe(read_csvlc(filename))
sys.exit(0)
else:
with gzip.open(filename,'rb') as infd:
for line in infd:
print(line.decode(),end='')
elif filename.endswith('-hatlc.sqlite.gz'):
lcdict, msg = read_and_filter_sqlitecurve(filetoread)
# dump the description
describe(lcdict, offsetwith='#')
# stop here if describe is True
if args.describe:
sys.exit(0)
# otherwise, continue to parse the cols, etc.
# get the aperture names
apertures = sorted(lcdict['lcapertures'].keys())
# update column defs per aperture
for aper in apertures:
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_MAG_COLUMNS})
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_ERR_COLUMNS})
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_FLAG_COLUMNS})
formstr = ','.join([COLUMNDEFS[x][1] for x in lcdict['columns']])
ndet = lcdict['objectinfo']['ndet']
for ind in range(ndet):
line = [lcdict[x][ind] for x in lcdict['columns']]
formline = formstr % tuple(line)
print(formline)
else:
LOGERROR('unrecognized HATLC file: %s' % filetoread)
sys.exit(1)
|
def main():
'''
This is called when we're executed from the commandline.
The current usage from the command-line is described below::
usage: hatlc [-h] [--describe] hatlcfile
read a HAT LC of any format and output to stdout
positional arguments:
hatlcfile path to the light curve you want to read and pipe to stdout
optional arguments:
-h, --help show this help message and exit
--describe don't dump the columns, show only object info and LC metadata
'''
# handle SIGPIPE sent by less, head, et al.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
import argparse
aparser = argparse.ArgumentParser(
description='read a HAT LC of any format and output to stdout'
)
aparser.add_argument(
'hatlcfile',
action='store',
type=str,
help=("path to the light curve you want to read and pipe to stdout")
)
aparser.add_argument(
'--describe',
action='store_true',
default=False,
help=("don't dump the columns, show only object info and LC metadata")
)
args = aparser.parse_args()
filetoread = args.hatlcfile
if not os.path.exists(filetoread):
LOGERROR("file provided: %s doesn't seem to exist" % filetoread)
sys.exit(1)
# figure out the type of LC this is
filename = os.path.basename(filetoread)
# switch based on filetype
if filename.endswith('-hatlc.csv.gz') or filename.endswith('-csvlc.gz'):
if args.describe:
describe(read_csvlc(filename))
sys.exit(0)
else:
with gzip.open(filename,'rb') as infd:
for line in infd:
print(line.decode(),end='')
elif filename.endswith('-hatlc.sqlite.gz'):
lcdict, msg = read_and_filter_sqlitecurve(filetoread)
# dump the description
describe(lcdict, offsetwith='#')
# stop here if describe is True
if args.describe:
sys.exit(0)
# otherwise, continue to parse the cols, etc.
# get the aperture names
apertures = sorted(lcdict['lcapertures'].keys())
# update column defs per aperture
for aper in apertures:
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_MAG_COLUMNS})
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_ERR_COLUMNS})
COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in
LC_FLAG_COLUMNS})
formstr = ','.join([COLUMNDEFS[x][1] for x in lcdict['columns']])
ndet = lcdict['objectinfo']['ndet']
for ind in range(ndet):
line = [lcdict[x][ind] for x in lcdict['columns']]
formline = formstr % tuple(line)
print(formline)
else:
LOGERROR('unrecognized HATLC file: %s' % filetoread)
sys.exit(1)
|
[
"This",
"is",
"called",
"when",
"we",
"re",
"executed",
"from",
"the",
"commandline",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1929-L2031
|
[
"def",
"main",
"(",
")",
":",
"# handle SIGPIPE sent by less, head, et al.",
"import",
"signal",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGPIPE",
",",
"signal",
".",
"SIG_DFL",
")",
"import",
"argparse",
"aparser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'read a HAT LC of any format and output to stdout'",
")",
"aparser",
".",
"add_argument",
"(",
"'hatlcfile'",
",",
"action",
"=",
"'store'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"(",
"\"path to the light curve you want to read and pipe to stdout\"",
")",
")",
"aparser",
".",
"add_argument",
"(",
"'--describe'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"(",
"\"don't dump the columns, show only object info and LC metadata\"",
")",
")",
"args",
"=",
"aparser",
".",
"parse_args",
"(",
")",
"filetoread",
"=",
"args",
".",
"hatlcfile",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filetoread",
")",
":",
"LOGERROR",
"(",
"\"file provided: %s doesn't seem to exist\"",
"%",
"filetoread",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# figure out the type of LC this is",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filetoread",
")",
"# switch based on filetype",
"if",
"filename",
".",
"endswith",
"(",
"'-hatlc.csv.gz'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'-csvlc.gz'",
")",
":",
"if",
"args",
".",
"describe",
":",
"describe",
"(",
"read_csvlc",
"(",
"filename",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"else",
":",
"with",
"gzip",
".",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"infd",
":",
"for",
"line",
"in",
"infd",
":",
"print",
"(",
"line",
".",
"decode",
"(",
")",
",",
"end",
"=",
"''",
")",
"elif",
"filename",
".",
"endswith",
"(",
"'-hatlc.sqlite.gz'",
")",
":",
"lcdict",
",",
"msg",
"=",
"read_and_filter_sqlitecurve",
"(",
"filetoread",
")",
"# dump the description",
"describe",
"(",
"lcdict",
",",
"offsetwith",
"=",
"'#'",
")",
"# stop here if describe is True",
"if",
"args",
".",
"describe",
":",
"sys",
".",
"exit",
"(",
"0",
")",
"# otherwise, continue to parse the cols, etc.",
"# get the aperture names",
"apertures",
"=",
"sorted",
"(",
"lcdict",
"[",
"'lcapertures'",
"]",
".",
"keys",
"(",
")",
")",
"# update column defs per aperture",
"for",
"aper",
"in",
"apertures",
":",
"COLUMNDEFS",
".",
"update",
"(",
"{",
"'%s_%s'",
"%",
"(",
"x",
",",
"aper",
")",
":",
"COLUMNDEFS",
"[",
"x",
"]",
"for",
"x",
"in",
"LC_MAG_COLUMNS",
"}",
")",
"COLUMNDEFS",
".",
"update",
"(",
"{",
"'%s_%s'",
"%",
"(",
"x",
",",
"aper",
")",
":",
"COLUMNDEFS",
"[",
"x",
"]",
"for",
"x",
"in",
"LC_ERR_COLUMNS",
"}",
")",
"COLUMNDEFS",
".",
"update",
"(",
"{",
"'%s_%s'",
"%",
"(",
"x",
",",
"aper",
")",
":",
"COLUMNDEFS",
"[",
"x",
"]",
"for",
"x",
"in",
"LC_FLAG_COLUMNS",
"}",
")",
"formstr",
"=",
"','",
".",
"join",
"(",
"[",
"COLUMNDEFS",
"[",
"x",
"]",
"[",
"1",
"]",
"for",
"x",
"in",
"lcdict",
"[",
"'columns'",
"]",
"]",
")",
"ndet",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
"[",
"'ndet'",
"]",
"for",
"ind",
"in",
"range",
"(",
"ndet",
")",
":",
"line",
"=",
"[",
"lcdict",
"[",
"x",
"]",
"[",
"ind",
"]",
"for",
"x",
"in",
"lcdict",
"[",
"'columns'",
"]",
"]",
"formline",
"=",
"formstr",
"%",
"tuple",
"(",
"line",
")",
"print",
"(",
"formline",
")",
"else",
":",
"LOGERROR",
"(",
"'unrecognized HATLC file: %s'",
"%",
"filetoread",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
coord_features
|
Calculates object coordinates features, including:
- galactic coordinates
- total proper motion from pmra, pmdecl
- reduced J proper motion from propermotion and Jmag
Parameters
----------
objectinfo : dict
This is an objectinfo dict from a light curve file read into an
`lcdict`. The format and the minimum keys required are::
{'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'pmra': the proper motion in right ascension in mas/yr,
'pmdecl': the proper motion in declination in mas/yr,
'jmag': the 2MASS J mag of this object}
Returns
-------
dict
A dict containing the total proper motion
|
astrobase/varclass/starfeatures.py
|
def coord_features(objectinfo):
'''Calculates object coordinates features, including:
- galactic coordinates
- total proper motion from pmra, pmdecl
- reduced J proper motion from propermotion and Jmag
Parameters
----------
objectinfo : dict
This is an objectinfo dict from a light curve file read into an
`lcdict`. The format and the minimum keys required are::
{'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'pmra': the proper motion in right ascension in mas/yr,
'pmdecl': the proper motion in declination in mas/yr,
'jmag': the 2MASS J mag of this object}
Returns
-------
dict
A dict containing the total proper motion
'''
retdict = {'propermotion': np.nan,
'gl':np.nan,
'gb':np.nan,
'rpmj':np.nan}
if ('ra' in objectinfo and
objectinfo['ra'] is not None and
np.isfinite(objectinfo['ra']) and
'decl' in objectinfo and
objectinfo['decl'] is not None and
np.isfinite(objectinfo['decl'])):
retdict['gl'], retdict['gb'] = coordutils.equatorial_to_galactic(
objectinfo['ra'],
objectinfo['decl']
)
if ('pmra' in objectinfo and
objectinfo['pmra'] is not None and
np.isfinite(objectinfo['pmra']) and
'pmdecl' in objectinfo and
objectinfo['pmdecl'] is not None and
np.isfinite(objectinfo['pmdecl']) and
'decl' in objectinfo and
objectinfo['decl'] is not None and
np.isfinite(objectinfo['decl'])):
retdict['propermotion'] = coordutils.total_proper_motion(
objectinfo['pmra'],
objectinfo['pmdecl'],
objectinfo['decl']
)
if ('jmag' in objectinfo and
objectinfo['jmag'] is not None and
np.isfinite(objectinfo['jmag']) and
np.isfinite(retdict['propermotion'])):
retdict['rpmj'] = coordutils.reduced_proper_motion(
objectinfo['jmag'],
retdict['propermotion']
)
return retdict
|
def coord_features(objectinfo):
'''Calculates object coordinates features, including:
- galactic coordinates
- total proper motion from pmra, pmdecl
- reduced J proper motion from propermotion and Jmag
Parameters
----------
objectinfo : dict
This is an objectinfo dict from a light curve file read into an
`lcdict`. The format and the minimum keys required are::
{'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'pmra': the proper motion in right ascension in mas/yr,
'pmdecl': the proper motion in declination in mas/yr,
'jmag': the 2MASS J mag of this object}
Returns
-------
dict
A dict containing the total proper motion
'''
retdict = {'propermotion': np.nan,
'gl':np.nan,
'gb':np.nan,
'rpmj':np.nan}
if ('ra' in objectinfo and
objectinfo['ra'] is not None and
np.isfinite(objectinfo['ra']) and
'decl' in objectinfo and
objectinfo['decl'] is not None and
np.isfinite(objectinfo['decl'])):
retdict['gl'], retdict['gb'] = coordutils.equatorial_to_galactic(
objectinfo['ra'],
objectinfo['decl']
)
if ('pmra' in objectinfo and
objectinfo['pmra'] is not None and
np.isfinite(objectinfo['pmra']) and
'pmdecl' in objectinfo and
objectinfo['pmdecl'] is not None and
np.isfinite(objectinfo['pmdecl']) and
'decl' in objectinfo and
objectinfo['decl'] is not None and
np.isfinite(objectinfo['decl'])):
retdict['propermotion'] = coordutils.total_proper_motion(
objectinfo['pmra'],
objectinfo['pmdecl'],
objectinfo['decl']
)
if ('jmag' in objectinfo and
objectinfo['jmag'] is not None and
np.isfinite(objectinfo['jmag']) and
np.isfinite(retdict['propermotion'])):
retdict['rpmj'] = coordutils.reduced_proper_motion(
objectinfo['jmag'],
retdict['propermotion']
)
return retdict
|
[
"Calculates",
"object",
"coordinates",
"features",
"including",
":"
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varclass/starfeatures.py#L102-L174
|
[
"def",
"coord_features",
"(",
"objectinfo",
")",
":",
"retdict",
"=",
"{",
"'propermotion'",
":",
"np",
".",
"nan",
",",
"'gl'",
":",
"np",
".",
"nan",
",",
"'gb'",
":",
"np",
".",
"nan",
",",
"'rpmj'",
":",
"np",
".",
"nan",
"}",
"if",
"(",
"'ra'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'ra'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'ra'",
"]",
")",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'decl'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
":",
"retdict",
"[",
"'gl'",
"]",
",",
"retdict",
"[",
"'gb'",
"]",
"=",
"coordutils",
".",
"equatorial_to_galactic",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
"if",
"(",
"'pmra'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'pmra'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
")",
"and",
"'pmdecl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'pmdecl'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
")",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'decl'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
":",
"retdict",
"[",
"'propermotion'",
"]",
"=",
"coordutils",
".",
"total_proper_motion",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
",",
"objectinfo",
"[",
"'pmdecl'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
"if",
"(",
"'jmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'jmag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
")",
"and",
"np",
".",
"isfinite",
"(",
"retdict",
"[",
"'propermotion'",
"]",
")",
")",
":",
"retdict",
"[",
"'rpmj'",
"]",
"=",
"coordutils",
".",
"reduced_proper_motion",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
",",
"retdict",
"[",
"'propermotion'",
"]",
")",
"return",
"retdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
color_features
|
Stellar colors and dereddened stellar colors using 2MASS DUST API:
http://irsa.ipac.caltech.edu/applications/DUST/docs/dustProgramInterface.html
Parameters
----------
in_objectinfo : dict
This is a dict that contains the object's magnitudes and positions. This
requires at least 'ra', and 'decl' as keys which correspond to the right
ascension and declination of the object, and one or more of the
following keys for object magnitudes::
'umag' -> U mag -> colors: U-B, U-V, U-g
'bmag' -> B mag -> colors: U-B, B-V
'vmag' -> V mag -> colors: U-V, B-V, V-R, V-I, V-K
'rmag' -> R mag -> colors: V-R, R-I
'imag' -> I mag -> colors: g-I, V-I, R-I, B-I
'jmag' -> 2MASS J mag -> colors: J-H, J-K, g-J, i-J
'hmag' -> 2MASS H mag -> colors: J-H, H-K
'kmag' -> 2MASS Ks mag -> colors: g-Ks, H-Ks, J-Ks, V-Ks
'sdssu' -> SDSS u mag -> colors: u-g, u-V
'sdssg' -> SDSS g mag -> colors: g-r, g-i, g-K, u-g, U-g, g-J
'sdssr' -> SDSS r mag -> colors: r-i, g-r
'sdssi' -> SDSS i mag -> colors: r-i, i-z, g-i, i-J, i-W1
'sdssz' -> SDSS z mag -> colors: i-z, z-W2, g-z
'ujmag' -> UKIRT J mag -> colors: J-H, H-K, J-K, g-J, i-J
'uhmag' -> UKIRT H mag -> colors: J-H, H-K
'ukmag' -> UKIRT K mag -> colors: g-K, H-K, J-K, V-K
'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2
'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3
'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3
'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4
'wise1' -> WISE W1 mag -> colors: i-W1, W1-W2
'wise2' -> WISE W2 mag -> colors: W1-W2, W2-W3
'wise3' -> WISE W3 mag -> colors: W2-W3
'wise4' -> WISE W4 mag -> colors: W3-W4
These are basically taken from the available reddening bandpasses from
the 2MASS DUST service. If B, V, u, g, r, i, z aren't provided but 2MASS
J, H, Ks are all provided, the former will be calculated using the 2MASS
JHKs -> BVugriz conversion functions in :py:mod:`astrobase.magnitudes`.
deredden : bool
If True, will make sure all colors use dereddened mags where possible.
custom_bandpasses : dict
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
dust_timeout : float
The timeout to use when contacting the 2MASS DUST web service.
Returns
-------
dict
An `objectinfo` dict with all of the generated colors, dereddened
magnitude,s dereddened colors, as specified in the input args is
returned.
|
astrobase/varclass/starfeatures.py
|
def color_features(in_objectinfo,
deredden=True,
custom_bandpasses=None,
dust_timeout=10.0):
'''Stellar colors and dereddened stellar colors using 2MASS DUST API:
http://irsa.ipac.caltech.edu/applications/DUST/docs/dustProgramInterface.html
Parameters
----------
in_objectinfo : dict
This is a dict that contains the object's magnitudes and positions. This
requires at least 'ra', and 'decl' as keys which correspond to the right
ascension and declination of the object, and one or more of the
following keys for object magnitudes::
'umag' -> U mag -> colors: U-B, U-V, U-g
'bmag' -> B mag -> colors: U-B, B-V
'vmag' -> V mag -> colors: U-V, B-V, V-R, V-I, V-K
'rmag' -> R mag -> colors: V-R, R-I
'imag' -> I mag -> colors: g-I, V-I, R-I, B-I
'jmag' -> 2MASS J mag -> colors: J-H, J-K, g-J, i-J
'hmag' -> 2MASS H mag -> colors: J-H, H-K
'kmag' -> 2MASS Ks mag -> colors: g-Ks, H-Ks, J-Ks, V-Ks
'sdssu' -> SDSS u mag -> colors: u-g, u-V
'sdssg' -> SDSS g mag -> colors: g-r, g-i, g-K, u-g, U-g, g-J
'sdssr' -> SDSS r mag -> colors: r-i, g-r
'sdssi' -> SDSS i mag -> colors: r-i, i-z, g-i, i-J, i-W1
'sdssz' -> SDSS z mag -> colors: i-z, z-W2, g-z
'ujmag' -> UKIRT J mag -> colors: J-H, H-K, J-K, g-J, i-J
'uhmag' -> UKIRT H mag -> colors: J-H, H-K
'ukmag' -> UKIRT K mag -> colors: g-K, H-K, J-K, V-K
'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2
'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3
'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3
'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4
'wise1' -> WISE W1 mag -> colors: i-W1, W1-W2
'wise2' -> WISE W2 mag -> colors: W1-W2, W2-W3
'wise3' -> WISE W3 mag -> colors: W2-W3
'wise4' -> WISE W4 mag -> colors: W3-W4
These are basically taken from the available reddening bandpasses from
the 2MASS DUST service. If B, V, u, g, r, i, z aren't provided but 2MASS
J, H, Ks are all provided, the former will be calculated using the 2MASS
JHKs -> BVugriz conversion functions in :py:mod:`astrobase.magnitudes`.
deredden : bool
If True, will make sure all colors use dereddened mags where possible.
custom_bandpasses : dict
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
dust_timeout : float
The timeout to use when contacting the 2MASS DUST web service.
Returns
-------
dict
An `objectinfo` dict with all of the generated colors, dereddened
magnitude,s dereddened colors, as specified in the input args is
returned.
'''
objectinfo = in_objectinfo.copy()
# this is the initial output dict
outdict = {
'available_bands':[],
'available_band_labels':[],
'available_dereddened_bands':[],
'available_dereddened_band_labels':[],
'available_colors':[],
'available_color_labels':[],
'dereddened':False
}
#
# get the BVugriz mags from the JHK mags if necessary
#
# FIXME: should these be direct dered mag_0 = f(J_0, H_0, K_0) instead?
# Bilir+ 2008 uses dereddened colors for their transforms, should check if
# we need to do so here
if ('jmag' in objectinfo and
objectinfo['jmag'] is not None and
np.isfinite(objectinfo['jmag']) and
'hmag' in objectinfo and
objectinfo['hmag'] is not None and
np.isfinite(objectinfo['hmag']) and
'kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag'])):
if ('bmag' not in objectinfo or
('bmag' in objectinfo and objectinfo['bmag'] is None) or
('bmag' in objectinfo and not np.isfinite(objectinfo['bmag']))):
objectinfo['bmag'] = magnitudes.jhk_to_bmag(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['bmagfromjhk'] = True
else:
outdict['bmagfromjhk'] = False
if ('vmag' not in objectinfo or
('vmag' in objectinfo and objectinfo['vmag'] is None) or
('vmag' in objectinfo and not np.isfinite(objectinfo['vmag']))):
objectinfo['vmag'] = magnitudes.jhk_to_vmag(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['vmagfromjhk'] = True
else:
outdict['vmagfromjhk'] = False
if ('sdssu' not in objectinfo or
('sdssu' in objectinfo and objectinfo['sdssu'] is None) or
('sdssu' in objectinfo and not np.isfinite(objectinfo['sdssu']))):
objectinfo['sdssu'] = magnitudes.jhk_to_sdssu(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdssufromjhk'] = True
else:
outdict['sdssufromjhk'] = False
if ('sdssg' not in objectinfo or
('sdssg' in objectinfo and objectinfo['sdssg'] is None) or
('sdssg' in objectinfo and not np.isfinite(objectinfo['sdssg']))):
objectinfo['sdssg'] = magnitudes.jhk_to_sdssg(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdssgfromjhk'] = True
else:
outdict['sdssgfromjhk'] = False
if ('sdssr' not in objectinfo or
('sdssr' in objectinfo and objectinfo['sdssr'] is None) or
('sdssr' in objectinfo and not np.isfinite(objectinfo['sdssr']))):
objectinfo['sdssr'] = magnitudes.jhk_to_sdssr(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdssrfromjhk'] = True
else:
outdict['sdssrfromjhk'] = False
if ('sdssi' not in objectinfo or
('sdssi' in objectinfo and objectinfo['sdssi'] is None) or
('sdssi' in objectinfo and not np.isfinite(objectinfo['sdssi']))):
objectinfo['sdssi'] = magnitudes.jhk_to_sdssi(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdssifromjhk'] = True
else:
outdict['sdssifromjhk'] = False
if ('sdssz' not in objectinfo or
('sdssz' in objectinfo and objectinfo['sdssz'] is None) or
('sdssz' in objectinfo and not np.isfinite(objectinfo['sdssz']))):
objectinfo['sdssz'] = magnitudes.jhk_to_sdssz(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdsszfromjhk'] = True
else:
outdict['sdsszfromjhk'] = False
# now handle dereddening if possible
if deredden:
try:
# first, get the extinction table for this object
extinction = dust.extinction_query(objectinfo['ra'],
objectinfo['decl'],
verbose=False,
timeout=dust_timeout)
except Exception as e:
LOGERROR("deredden = True but 'ra', 'decl' keys not present "
"or invalid in objectinfo dict, ignoring reddening...")
extinction = None
outdict['dereddened'] = False
else:
extinction = None
outdict['dereddened'] = False
# handle timeout from DUST service
if not extinction:
outdict['dereddened'] = False
# go through the objectdict and pick out the mags we have available from the
# BANDPASSES_COLORS dict
# update our bandpasses_colors dict with any custom ones the user defined
our_bandpasses_colors = BANDPASSES_COLORS.copy()
our_bandpass_list = BANDPASS_LIST[::]
if custom_bandpasses is not None and isinstance(custom_bandpasses, dict):
our_bandpasses_colors.update(custom_bandpasses)
# also update the list
for key in custom_bandpasses:
if key not in our_bandpass_list:
our_bandpass_list.append(key)
for mk in our_bandpass_list:
if (mk in objectinfo and
objectinfo[mk] is not None and
np.isfinite(objectinfo[mk])):
thisbandlabel = our_bandpasses_colors[mk]['label']
thisdustkey = our_bandpasses_colors[mk]['dustkey']
# add this to the outdict
outdict[mk] = objectinfo[mk]
outdict['available_bands'].append(mk)
outdict['available_band_labels'].append(thisbandlabel)
#
# deredden if possible
#
# calculating dereddened mags:
# A_x = m - m0_x where m is measured mag, m0 is intrinsic mag
# m0_x = m - A_x
#
# so for two bands x, y:
# intrinsic color (m_x - m_y)_0 = (m_x - m_y) - (A_x - A_y)
if (deredden and extinction):
outdict['dereddened'] = True
# check if the dustkey is None, float, or str to figure out how
# to retrieve the reddening
if (thisdustkey is not None and
isinstance(thisdustkey, str) and
thisdustkey in extinction['Amag'] and
np.isfinite(extinction['Amag'][thisdustkey]['sf11'])):
outdict['extinction_%s' % mk] = (
extinction['Amag'][thisdustkey]['sf11']
)
elif (thisdustkey is not None and
isinstance(thisdustkey, float)):
outdict['extinction_%s' % mk] = thisdustkey
else:
outdict['extinction_%s' % mk] = 0.0
# apply the extinction
outdict['dered_%s' % mk] = (
outdict[mk] - outdict['extinction_%s' % mk]
)
outdict['available_dereddened_bands'].append('dered_%s' % mk)
outdict['available_dereddened_band_labels'].append(
thisbandlabel
)
# get all the colors to generate for this bandpass
for colorspec in BANDPASSES_COLORS[mk]['colors']:
# only add this if the color's not there already
if colorspec[0] not in outdict:
colorkey, colorlabel = colorspec
# look for the bands to make this color
# if it's not found now, this should work when we come
# around for the next bandpass for this color
band1, band2 = colorkey.split('-')
if ('dered_%s' % band1 in outdict and
'dered_%s' % band2 in outdict and
np.isfinite(outdict['dered_%s' % band1]) and
np.isfinite(outdict['dered_%s' % band2])):
outdict[colorkey] = (
outdict['dered_%s' % band1] -
outdict['dered_%s' % band2]
)
outdict['available_colors'].append(colorkey)
outdict['available_color_labels'].append(colorlabel)
# handle no dereddening
else:
outdict['dereddened'] = False
outdict['extinction_%s' % mk] = 0.0
outdict['dered_%s' % mk] = np.nan
# get all the colors to generate for this bandpass
for colorspec in our_bandpasses_colors[mk]['colors']:
# only add this if the color's not there already
if colorspec[0] not in outdict:
colorkey, colorlabel = colorspec
# look for the bands to make this color
# if it's not found now, this should work when we come
# around for the next bandpass for this color
band1, band2 = colorkey.split('-')
if (band1 in outdict and
band2 in outdict and
outdict[band1] is not None and
outdict[band2] is not None and
np.isfinite(outdict[band1]) and
np.isfinite(outdict[band2])):
outdict[colorkey] = (
outdict[band1] -
outdict[band2]
)
outdict['available_colors'].append(colorkey)
outdict['available_color_labels'].append(colorlabel)
# if this bandpass was not found in the objectinfo dict, ignore it
else:
outdict[mk] = np.nan
return outdict
|
def color_features(in_objectinfo,
deredden=True,
custom_bandpasses=None,
dust_timeout=10.0):
'''Stellar colors and dereddened stellar colors using 2MASS DUST API:
http://irsa.ipac.caltech.edu/applications/DUST/docs/dustProgramInterface.html
Parameters
----------
in_objectinfo : dict
This is a dict that contains the object's magnitudes and positions. This
requires at least 'ra', and 'decl' as keys which correspond to the right
ascension and declination of the object, and one or more of the
following keys for object magnitudes::
'umag' -> U mag -> colors: U-B, U-V, U-g
'bmag' -> B mag -> colors: U-B, B-V
'vmag' -> V mag -> colors: U-V, B-V, V-R, V-I, V-K
'rmag' -> R mag -> colors: V-R, R-I
'imag' -> I mag -> colors: g-I, V-I, R-I, B-I
'jmag' -> 2MASS J mag -> colors: J-H, J-K, g-J, i-J
'hmag' -> 2MASS H mag -> colors: J-H, H-K
'kmag' -> 2MASS Ks mag -> colors: g-Ks, H-Ks, J-Ks, V-Ks
'sdssu' -> SDSS u mag -> colors: u-g, u-V
'sdssg' -> SDSS g mag -> colors: g-r, g-i, g-K, u-g, U-g, g-J
'sdssr' -> SDSS r mag -> colors: r-i, g-r
'sdssi' -> SDSS i mag -> colors: r-i, i-z, g-i, i-J, i-W1
'sdssz' -> SDSS z mag -> colors: i-z, z-W2, g-z
'ujmag' -> UKIRT J mag -> colors: J-H, H-K, J-K, g-J, i-J
'uhmag' -> UKIRT H mag -> colors: J-H, H-K
'ukmag' -> UKIRT K mag -> colors: g-K, H-K, J-K, V-K
'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2
'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3
'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3
'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4
'wise1' -> WISE W1 mag -> colors: i-W1, W1-W2
'wise2' -> WISE W2 mag -> colors: W1-W2, W2-W3
'wise3' -> WISE W3 mag -> colors: W2-W3
'wise4' -> WISE W4 mag -> colors: W3-W4
These are basically taken from the available reddening bandpasses from
the 2MASS DUST service. If B, V, u, g, r, i, z aren't provided but 2MASS
J, H, Ks are all provided, the former will be calculated using the 2MASS
JHKs -> BVugriz conversion functions in :py:mod:`astrobase.magnitudes`.
deredden : bool
If True, will make sure all colors use dereddened mags where possible.
custom_bandpasses : dict
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
dust_timeout : float
The timeout to use when contacting the 2MASS DUST web service.
Returns
-------
dict
An `objectinfo` dict with all of the generated colors, dereddened
magnitude,s dereddened colors, as specified in the input args is
returned.
'''
objectinfo = in_objectinfo.copy()
# this is the initial output dict
outdict = {
'available_bands':[],
'available_band_labels':[],
'available_dereddened_bands':[],
'available_dereddened_band_labels':[],
'available_colors':[],
'available_color_labels':[],
'dereddened':False
}
#
# get the BVugriz mags from the JHK mags if necessary
#
# FIXME: should these be direct dered mag_0 = f(J_0, H_0, K_0) instead?
# Bilir+ 2008 uses dereddened colors for their transforms, should check if
# we need to do so here
if ('jmag' in objectinfo and
objectinfo['jmag'] is not None and
np.isfinite(objectinfo['jmag']) and
'hmag' in objectinfo and
objectinfo['hmag'] is not None and
np.isfinite(objectinfo['hmag']) and
'kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag'])):
if ('bmag' not in objectinfo or
('bmag' in objectinfo and objectinfo['bmag'] is None) or
('bmag' in objectinfo and not np.isfinite(objectinfo['bmag']))):
objectinfo['bmag'] = magnitudes.jhk_to_bmag(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['bmagfromjhk'] = True
else:
outdict['bmagfromjhk'] = False
if ('vmag' not in objectinfo or
('vmag' in objectinfo and objectinfo['vmag'] is None) or
('vmag' in objectinfo and not np.isfinite(objectinfo['vmag']))):
objectinfo['vmag'] = magnitudes.jhk_to_vmag(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['vmagfromjhk'] = True
else:
outdict['vmagfromjhk'] = False
if ('sdssu' not in objectinfo or
('sdssu' in objectinfo and objectinfo['sdssu'] is None) or
('sdssu' in objectinfo and not np.isfinite(objectinfo['sdssu']))):
objectinfo['sdssu'] = magnitudes.jhk_to_sdssu(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdssufromjhk'] = True
else:
outdict['sdssufromjhk'] = False
if ('sdssg' not in objectinfo or
('sdssg' in objectinfo and objectinfo['sdssg'] is None) or
('sdssg' in objectinfo and not np.isfinite(objectinfo['sdssg']))):
objectinfo['sdssg'] = magnitudes.jhk_to_sdssg(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdssgfromjhk'] = True
else:
outdict['sdssgfromjhk'] = False
if ('sdssr' not in objectinfo or
('sdssr' in objectinfo and objectinfo['sdssr'] is None) or
('sdssr' in objectinfo and not np.isfinite(objectinfo['sdssr']))):
objectinfo['sdssr'] = magnitudes.jhk_to_sdssr(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdssrfromjhk'] = True
else:
outdict['sdssrfromjhk'] = False
if ('sdssi' not in objectinfo or
('sdssi' in objectinfo and objectinfo['sdssi'] is None) or
('sdssi' in objectinfo and not np.isfinite(objectinfo['sdssi']))):
objectinfo['sdssi'] = magnitudes.jhk_to_sdssi(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdssifromjhk'] = True
else:
outdict['sdssifromjhk'] = False
if ('sdssz' not in objectinfo or
('sdssz' in objectinfo and objectinfo['sdssz'] is None) or
('sdssz' in objectinfo and not np.isfinite(objectinfo['sdssz']))):
objectinfo['sdssz'] = magnitudes.jhk_to_sdssz(objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag'])
outdict['sdsszfromjhk'] = True
else:
outdict['sdsszfromjhk'] = False
# now handle dereddening if possible
if deredden:
try:
# first, get the extinction table for this object
extinction = dust.extinction_query(objectinfo['ra'],
objectinfo['decl'],
verbose=False,
timeout=dust_timeout)
except Exception as e:
LOGERROR("deredden = True but 'ra', 'decl' keys not present "
"or invalid in objectinfo dict, ignoring reddening...")
extinction = None
outdict['dereddened'] = False
else:
extinction = None
outdict['dereddened'] = False
# handle timeout from DUST service
if not extinction:
outdict['dereddened'] = False
# go through the objectdict and pick out the mags we have available from the
# BANDPASSES_COLORS dict
# update our bandpasses_colors dict with any custom ones the user defined
our_bandpasses_colors = BANDPASSES_COLORS.copy()
our_bandpass_list = BANDPASS_LIST[::]
if custom_bandpasses is not None and isinstance(custom_bandpasses, dict):
our_bandpasses_colors.update(custom_bandpasses)
# also update the list
for key in custom_bandpasses:
if key not in our_bandpass_list:
our_bandpass_list.append(key)
for mk in our_bandpass_list:
if (mk in objectinfo and
objectinfo[mk] is not None and
np.isfinite(objectinfo[mk])):
thisbandlabel = our_bandpasses_colors[mk]['label']
thisdustkey = our_bandpasses_colors[mk]['dustkey']
# add this to the outdict
outdict[mk] = objectinfo[mk]
outdict['available_bands'].append(mk)
outdict['available_band_labels'].append(thisbandlabel)
#
# deredden if possible
#
# calculating dereddened mags:
# A_x = m - m0_x where m is measured mag, m0 is intrinsic mag
# m0_x = m - A_x
#
# so for two bands x, y:
# intrinsic color (m_x - m_y)_0 = (m_x - m_y) - (A_x - A_y)
if (deredden and extinction):
outdict['dereddened'] = True
# check if the dustkey is None, float, or str to figure out how
# to retrieve the reddening
if (thisdustkey is not None and
isinstance(thisdustkey, str) and
thisdustkey in extinction['Amag'] and
np.isfinite(extinction['Amag'][thisdustkey]['sf11'])):
outdict['extinction_%s' % mk] = (
extinction['Amag'][thisdustkey]['sf11']
)
elif (thisdustkey is not None and
isinstance(thisdustkey, float)):
outdict['extinction_%s' % mk] = thisdustkey
else:
outdict['extinction_%s' % mk] = 0.0
# apply the extinction
outdict['dered_%s' % mk] = (
outdict[mk] - outdict['extinction_%s' % mk]
)
outdict['available_dereddened_bands'].append('dered_%s' % mk)
outdict['available_dereddened_band_labels'].append(
thisbandlabel
)
# get all the colors to generate for this bandpass
for colorspec in BANDPASSES_COLORS[mk]['colors']:
# only add this if the color's not there already
if colorspec[0] not in outdict:
colorkey, colorlabel = colorspec
# look for the bands to make this color
# if it's not found now, this should work when we come
# around for the next bandpass for this color
band1, band2 = colorkey.split('-')
if ('dered_%s' % band1 in outdict and
'dered_%s' % band2 in outdict and
np.isfinite(outdict['dered_%s' % band1]) and
np.isfinite(outdict['dered_%s' % band2])):
outdict[colorkey] = (
outdict['dered_%s' % band1] -
outdict['dered_%s' % band2]
)
outdict['available_colors'].append(colorkey)
outdict['available_color_labels'].append(colorlabel)
# handle no dereddening
else:
outdict['dereddened'] = False
outdict['extinction_%s' % mk] = 0.0
outdict['dered_%s' % mk] = np.nan
# get all the colors to generate for this bandpass
for colorspec in our_bandpasses_colors[mk]['colors']:
# only add this if the color's not there already
if colorspec[0] not in outdict:
colorkey, colorlabel = colorspec
# look for the bands to make this color
# if it's not found now, this should work when we come
# around for the next bandpass for this color
band1, band2 = colorkey.split('-')
if (band1 in outdict and
band2 in outdict and
outdict[band1] is not None and
outdict[band2] is not None and
np.isfinite(outdict[band1]) and
np.isfinite(outdict[band2])):
outdict[colorkey] = (
outdict[band1] -
outdict[band2]
)
outdict['available_colors'].append(colorkey)
outdict['available_color_labels'].append(colorlabel)
# if this bandpass was not found in the objectinfo dict, ignore it
else:
outdict[mk] = np.nan
return outdict
|
[
"Stellar",
"colors",
"and",
"dereddened",
"stellar",
"colors",
"using",
"2MASS",
"DUST",
"API",
":"
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varclass/starfeatures.py#L324-L723
|
[
"def",
"color_features",
"(",
"in_objectinfo",
",",
"deredden",
"=",
"True",
",",
"custom_bandpasses",
"=",
"None",
",",
"dust_timeout",
"=",
"10.0",
")",
":",
"objectinfo",
"=",
"in_objectinfo",
".",
"copy",
"(",
")",
"# this is the initial output dict",
"outdict",
"=",
"{",
"'available_bands'",
":",
"[",
"]",
",",
"'available_band_labels'",
":",
"[",
"]",
",",
"'available_dereddened_bands'",
":",
"[",
"]",
",",
"'available_dereddened_band_labels'",
":",
"[",
"]",
",",
"'available_colors'",
":",
"[",
"]",
",",
"'available_color_labels'",
":",
"[",
"]",
",",
"'dereddened'",
":",
"False",
"}",
"#",
"# get the BVugriz mags from the JHK mags if necessary",
"#",
"# FIXME: should these be direct dered mag_0 = f(J_0, H_0, K_0) instead?",
"# Bilir+ 2008 uses dereddened colors for their transforms, should check if",
"# we need to do so here",
"if",
"(",
"'jmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'jmag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
")",
"and",
"'hmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'hmag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'hmag'",
"]",
")",
"and",
"'kmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'kmag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'kmag'",
"]",
")",
")",
":",
"if",
"(",
"'bmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'bmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'bmag'",
"]",
"is",
"None",
")",
"or",
"(",
"'bmag'",
"in",
"objectinfo",
"and",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'bmag'",
"]",
")",
")",
")",
":",
"objectinfo",
"[",
"'bmag'",
"]",
"=",
"magnitudes",
".",
"jhk_to_bmag",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
",",
"objectinfo",
"[",
"'hmag'",
"]",
",",
"objectinfo",
"[",
"'kmag'",
"]",
")",
"outdict",
"[",
"'bmagfromjhk'",
"]",
"=",
"True",
"else",
":",
"outdict",
"[",
"'bmagfromjhk'",
"]",
"=",
"False",
"if",
"(",
"'vmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'vmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'vmag'",
"]",
"is",
"None",
")",
"or",
"(",
"'vmag'",
"in",
"objectinfo",
"and",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'vmag'",
"]",
")",
")",
")",
":",
"objectinfo",
"[",
"'vmag'",
"]",
"=",
"magnitudes",
".",
"jhk_to_vmag",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
",",
"objectinfo",
"[",
"'hmag'",
"]",
",",
"objectinfo",
"[",
"'kmag'",
"]",
")",
"outdict",
"[",
"'vmagfromjhk'",
"]",
"=",
"True",
"else",
":",
"outdict",
"[",
"'vmagfromjhk'",
"]",
"=",
"False",
"if",
"(",
"'sdssu'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssu'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'sdssu'",
"]",
"is",
"None",
")",
"or",
"(",
"'sdssu'",
"in",
"objectinfo",
"and",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssu'",
"]",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssu'",
"]",
"=",
"magnitudes",
".",
"jhk_to_sdssu",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
",",
"objectinfo",
"[",
"'hmag'",
"]",
",",
"objectinfo",
"[",
"'kmag'",
"]",
")",
"outdict",
"[",
"'sdssufromjhk'",
"]",
"=",
"True",
"else",
":",
"outdict",
"[",
"'sdssufromjhk'",
"]",
"=",
"False",
"if",
"(",
"'sdssg'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssg'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'sdssg'",
"]",
"is",
"None",
")",
"or",
"(",
"'sdssg'",
"in",
"objectinfo",
"and",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssg'",
"]",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssg'",
"]",
"=",
"magnitudes",
".",
"jhk_to_sdssg",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
",",
"objectinfo",
"[",
"'hmag'",
"]",
",",
"objectinfo",
"[",
"'kmag'",
"]",
")",
"outdict",
"[",
"'sdssgfromjhk'",
"]",
"=",
"True",
"else",
":",
"outdict",
"[",
"'sdssgfromjhk'",
"]",
"=",
"False",
"if",
"(",
"'sdssr'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssr'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'sdssr'",
"]",
"is",
"None",
")",
"or",
"(",
"'sdssr'",
"in",
"objectinfo",
"and",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssr'",
"]",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssr'",
"]",
"=",
"magnitudes",
".",
"jhk_to_sdssr",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
",",
"objectinfo",
"[",
"'hmag'",
"]",
",",
"objectinfo",
"[",
"'kmag'",
"]",
")",
"outdict",
"[",
"'sdssrfromjhk'",
"]",
"=",
"True",
"else",
":",
"outdict",
"[",
"'sdssrfromjhk'",
"]",
"=",
"False",
"if",
"(",
"'sdssi'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssi'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'sdssi'",
"]",
"is",
"None",
")",
"or",
"(",
"'sdssi'",
"in",
"objectinfo",
"and",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssi'",
"]",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssi'",
"]",
"=",
"magnitudes",
".",
"jhk_to_sdssi",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
",",
"objectinfo",
"[",
"'hmag'",
"]",
",",
"objectinfo",
"[",
"'kmag'",
"]",
")",
"outdict",
"[",
"'sdssifromjhk'",
"]",
"=",
"True",
"else",
":",
"outdict",
"[",
"'sdssifromjhk'",
"]",
"=",
"False",
"if",
"(",
"'sdssz'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssz'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'sdssz'",
"]",
"is",
"None",
")",
"or",
"(",
"'sdssz'",
"in",
"objectinfo",
"and",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssz'",
"]",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssz'",
"]",
"=",
"magnitudes",
".",
"jhk_to_sdssz",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
",",
"objectinfo",
"[",
"'hmag'",
"]",
",",
"objectinfo",
"[",
"'kmag'",
"]",
")",
"outdict",
"[",
"'sdsszfromjhk'",
"]",
"=",
"True",
"else",
":",
"outdict",
"[",
"'sdsszfromjhk'",
"]",
"=",
"False",
"# now handle dereddening if possible",
"if",
"deredden",
":",
"try",
":",
"# first, get the extinction table for this object",
"extinction",
"=",
"dust",
".",
"extinction_query",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"verbose",
"=",
"False",
",",
"timeout",
"=",
"dust_timeout",
")",
"except",
"Exception",
"as",
"e",
":",
"LOGERROR",
"(",
"\"deredden = True but 'ra', 'decl' keys not present \"",
"\"or invalid in objectinfo dict, ignoring reddening...\"",
")",
"extinction",
"=",
"None",
"outdict",
"[",
"'dereddened'",
"]",
"=",
"False",
"else",
":",
"extinction",
"=",
"None",
"outdict",
"[",
"'dereddened'",
"]",
"=",
"False",
"# handle timeout from DUST service",
"if",
"not",
"extinction",
":",
"outdict",
"[",
"'dereddened'",
"]",
"=",
"False",
"# go through the objectdict and pick out the mags we have available from the",
"# BANDPASSES_COLORS dict",
"# update our bandpasses_colors dict with any custom ones the user defined",
"our_bandpasses_colors",
"=",
"BANDPASSES_COLORS",
".",
"copy",
"(",
")",
"our_bandpass_list",
"=",
"BANDPASS_LIST",
"[",
":",
":",
"]",
"if",
"custom_bandpasses",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"custom_bandpasses",
",",
"dict",
")",
":",
"our_bandpasses_colors",
".",
"update",
"(",
"custom_bandpasses",
")",
"# also update the list",
"for",
"key",
"in",
"custom_bandpasses",
":",
"if",
"key",
"not",
"in",
"our_bandpass_list",
":",
"our_bandpass_list",
".",
"append",
"(",
"key",
")",
"for",
"mk",
"in",
"our_bandpass_list",
":",
"if",
"(",
"mk",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"mk",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"mk",
"]",
")",
")",
":",
"thisbandlabel",
"=",
"our_bandpasses_colors",
"[",
"mk",
"]",
"[",
"'label'",
"]",
"thisdustkey",
"=",
"our_bandpasses_colors",
"[",
"mk",
"]",
"[",
"'dustkey'",
"]",
"# add this to the outdict",
"outdict",
"[",
"mk",
"]",
"=",
"objectinfo",
"[",
"mk",
"]",
"outdict",
"[",
"'available_bands'",
"]",
".",
"append",
"(",
"mk",
")",
"outdict",
"[",
"'available_band_labels'",
"]",
".",
"append",
"(",
"thisbandlabel",
")",
"#",
"# deredden if possible",
"#",
"# calculating dereddened mags:",
"# A_x = m - m0_x where m is measured mag, m0 is intrinsic mag",
"# m0_x = m - A_x",
"#",
"# so for two bands x, y:",
"# intrinsic color (m_x - m_y)_0 = (m_x - m_y) - (A_x - A_y)",
"if",
"(",
"deredden",
"and",
"extinction",
")",
":",
"outdict",
"[",
"'dereddened'",
"]",
"=",
"True",
"# check if the dustkey is None, float, or str to figure out how",
"# to retrieve the reddening",
"if",
"(",
"thisdustkey",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"thisdustkey",
",",
"str",
")",
"and",
"thisdustkey",
"in",
"extinction",
"[",
"'Amag'",
"]",
"and",
"np",
".",
"isfinite",
"(",
"extinction",
"[",
"'Amag'",
"]",
"[",
"thisdustkey",
"]",
"[",
"'sf11'",
"]",
")",
")",
":",
"outdict",
"[",
"'extinction_%s'",
"%",
"mk",
"]",
"=",
"(",
"extinction",
"[",
"'Amag'",
"]",
"[",
"thisdustkey",
"]",
"[",
"'sf11'",
"]",
")",
"elif",
"(",
"thisdustkey",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"thisdustkey",
",",
"float",
")",
")",
":",
"outdict",
"[",
"'extinction_%s'",
"%",
"mk",
"]",
"=",
"thisdustkey",
"else",
":",
"outdict",
"[",
"'extinction_%s'",
"%",
"mk",
"]",
"=",
"0.0",
"# apply the extinction",
"outdict",
"[",
"'dered_%s'",
"%",
"mk",
"]",
"=",
"(",
"outdict",
"[",
"mk",
"]",
"-",
"outdict",
"[",
"'extinction_%s'",
"%",
"mk",
"]",
")",
"outdict",
"[",
"'available_dereddened_bands'",
"]",
".",
"append",
"(",
"'dered_%s'",
"%",
"mk",
")",
"outdict",
"[",
"'available_dereddened_band_labels'",
"]",
".",
"append",
"(",
"thisbandlabel",
")",
"# get all the colors to generate for this bandpass",
"for",
"colorspec",
"in",
"BANDPASSES_COLORS",
"[",
"mk",
"]",
"[",
"'colors'",
"]",
":",
"# only add this if the color's not there already",
"if",
"colorspec",
"[",
"0",
"]",
"not",
"in",
"outdict",
":",
"colorkey",
",",
"colorlabel",
"=",
"colorspec",
"# look for the bands to make this color",
"# if it's not found now, this should work when we come",
"# around for the next bandpass for this color",
"band1",
",",
"band2",
"=",
"colorkey",
".",
"split",
"(",
"'-'",
")",
"if",
"(",
"'dered_%s'",
"%",
"band1",
"in",
"outdict",
"and",
"'dered_%s'",
"%",
"band2",
"in",
"outdict",
"and",
"np",
".",
"isfinite",
"(",
"outdict",
"[",
"'dered_%s'",
"%",
"band1",
"]",
")",
"and",
"np",
".",
"isfinite",
"(",
"outdict",
"[",
"'dered_%s'",
"%",
"band2",
"]",
")",
")",
":",
"outdict",
"[",
"colorkey",
"]",
"=",
"(",
"outdict",
"[",
"'dered_%s'",
"%",
"band1",
"]",
"-",
"outdict",
"[",
"'dered_%s'",
"%",
"band2",
"]",
")",
"outdict",
"[",
"'available_colors'",
"]",
".",
"append",
"(",
"colorkey",
")",
"outdict",
"[",
"'available_color_labels'",
"]",
".",
"append",
"(",
"colorlabel",
")",
"# handle no dereddening",
"else",
":",
"outdict",
"[",
"'dereddened'",
"]",
"=",
"False",
"outdict",
"[",
"'extinction_%s'",
"%",
"mk",
"]",
"=",
"0.0",
"outdict",
"[",
"'dered_%s'",
"%",
"mk",
"]",
"=",
"np",
".",
"nan",
"# get all the colors to generate for this bandpass",
"for",
"colorspec",
"in",
"our_bandpasses_colors",
"[",
"mk",
"]",
"[",
"'colors'",
"]",
":",
"# only add this if the color's not there already",
"if",
"colorspec",
"[",
"0",
"]",
"not",
"in",
"outdict",
":",
"colorkey",
",",
"colorlabel",
"=",
"colorspec",
"# look for the bands to make this color",
"# if it's not found now, this should work when we come",
"# around for the next bandpass for this color",
"band1",
",",
"band2",
"=",
"colorkey",
".",
"split",
"(",
"'-'",
")",
"if",
"(",
"band1",
"in",
"outdict",
"and",
"band2",
"in",
"outdict",
"and",
"outdict",
"[",
"band1",
"]",
"is",
"not",
"None",
"and",
"outdict",
"[",
"band2",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"outdict",
"[",
"band1",
"]",
")",
"and",
"np",
".",
"isfinite",
"(",
"outdict",
"[",
"band2",
"]",
")",
")",
":",
"outdict",
"[",
"colorkey",
"]",
"=",
"(",
"outdict",
"[",
"band1",
"]",
"-",
"outdict",
"[",
"band2",
"]",
")",
"outdict",
"[",
"'available_colors'",
"]",
".",
"append",
"(",
"colorkey",
")",
"outdict",
"[",
"'available_color_labels'",
"]",
".",
"append",
"(",
"colorlabel",
")",
"# if this bandpass was not found in the objectinfo dict, ignore it",
"else",
":",
"outdict",
"[",
"mk",
"]",
"=",
"np",
".",
"nan",
"return",
"outdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
mdwarf_subtype_from_sdsscolor
|
This calculates the M-dwarf subtype given SDSS `r-i` and `i-z` colors.
Parameters
----------
ri_color : float
The SDSS `r-i` color of the object.
iz_color : float
The SDSS `i-z` color of the object.
Returns
-------
(subtype, index1, index2) : tuple
`subtype`: if the star appears to be an M dwarf, will return an int
between 0 and 9 indicating its subtype, e.g. will return 4 for an M4
dwarf. If the object isn't an M dwarf, will return None
`index1`, `index2`: the M-dwarf color locus value and spread of this
object calculated from the `r-i` and `i-z` colors.
|
astrobase/varclass/starfeatures.py
|
def mdwarf_subtype_from_sdsscolor(ri_color, iz_color):
'''This calculates the M-dwarf subtype given SDSS `r-i` and `i-z` colors.
Parameters
----------
ri_color : float
The SDSS `r-i` color of the object.
iz_color : float
The SDSS `i-z` color of the object.
Returns
-------
(subtype, index1, index2) : tuple
`subtype`: if the star appears to be an M dwarf, will return an int
between 0 and 9 indicating its subtype, e.g. will return 4 for an M4
dwarf. If the object isn't an M dwarf, will return None
`index1`, `index2`: the M-dwarf color locus value and spread of this
object calculated from the `r-i` and `i-z` colors.
'''
# calculate the spectral type index and the spectral type spread of the
# object. sti is calculated by fitting a line to the locus in r-i and i-z
# space for M dwarfs in West+ 2007
if np.isfinite(ri_color) and np.isfinite(iz_color):
obj_sti = 0.875274*ri_color + 0.483628*(iz_color + 0.00438)
obj_sts = -0.483628*ri_color + 0.875274*(iz_color + 0.00438)
else:
obj_sti = np.nan
obj_sts = np.nan
# possible M star if sti is >= 0.666 but <= 3.4559
if (np.isfinite(obj_sti) and np.isfinite(obj_sts) and
(obj_sti > 0.666) and (obj_sti < 3.4559)):
# decide which M subclass object this is
if ((obj_sti > 0.6660) and (obj_sti < 0.8592)):
m_class = 'M0'
if ((obj_sti > 0.8592) and (obj_sti < 1.0822)):
m_class = 'M1'
if ((obj_sti > 1.0822) and (obj_sti < 1.2998)):
m_class = 'M2'
if ((obj_sti > 1.2998) and (obj_sti < 1.6378)):
m_class = 'M3'
if ((obj_sti > 1.6378) and (obj_sti < 2.0363)):
m_class = 'M4'
if ((obj_sti > 2.0363) and (obj_sti < 2.2411)):
m_class = 'M5'
if ((obj_sti > 2.2411) and (obj_sti < 2.4126)):
m_class = 'M6'
if ((obj_sti > 2.4126) and (obj_sti < 2.9213)):
m_class = 'M7'
if ((obj_sti > 2.9213) and (obj_sti < 3.2418)):
m_class = 'M8'
if ((obj_sti > 3.2418) and (obj_sti < 3.4559)):
m_class = 'M9'
else:
m_class = None
return m_class, obj_sti, obj_sts
|
def mdwarf_subtype_from_sdsscolor(ri_color, iz_color):
'''This calculates the M-dwarf subtype given SDSS `r-i` and `i-z` colors.
Parameters
----------
ri_color : float
The SDSS `r-i` color of the object.
iz_color : float
The SDSS `i-z` color of the object.
Returns
-------
(subtype, index1, index2) : tuple
`subtype`: if the star appears to be an M dwarf, will return an int
between 0 and 9 indicating its subtype, e.g. will return 4 for an M4
dwarf. If the object isn't an M dwarf, will return None
`index1`, `index2`: the M-dwarf color locus value and spread of this
object calculated from the `r-i` and `i-z` colors.
'''
# calculate the spectral type index and the spectral type spread of the
# object. sti is calculated by fitting a line to the locus in r-i and i-z
# space for M dwarfs in West+ 2007
if np.isfinite(ri_color) and np.isfinite(iz_color):
obj_sti = 0.875274*ri_color + 0.483628*(iz_color + 0.00438)
obj_sts = -0.483628*ri_color + 0.875274*(iz_color + 0.00438)
else:
obj_sti = np.nan
obj_sts = np.nan
# possible M star if sti is >= 0.666 but <= 3.4559
if (np.isfinite(obj_sti) and np.isfinite(obj_sts) and
(obj_sti > 0.666) and (obj_sti < 3.4559)):
# decide which M subclass object this is
if ((obj_sti > 0.6660) and (obj_sti < 0.8592)):
m_class = 'M0'
if ((obj_sti > 0.8592) and (obj_sti < 1.0822)):
m_class = 'M1'
if ((obj_sti > 1.0822) and (obj_sti < 1.2998)):
m_class = 'M2'
if ((obj_sti > 1.2998) and (obj_sti < 1.6378)):
m_class = 'M3'
if ((obj_sti > 1.6378) and (obj_sti < 2.0363)):
m_class = 'M4'
if ((obj_sti > 2.0363) and (obj_sti < 2.2411)):
m_class = 'M5'
if ((obj_sti > 2.2411) and (obj_sti < 2.4126)):
m_class = 'M6'
if ((obj_sti > 2.4126) and (obj_sti < 2.9213)):
m_class = 'M7'
if ((obj_sti > 2.9213) and (obj_sti < 3.2418)):
m_class = 'M8'
if ((obj_sti > 3.2418) and (obj_sti < 3.4559)):
m_class = 'M9'
else:
m_class = None
return m_class, obj_sti, obj_sts
|
[
"This",
"calculates",
"the",
"M",
"-",
"dwarf",
"subtype",
"given",
"SDSS",
"r",
"-",
"i",
"and",
"i",
"-",
"z",
"colors",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varclass/starfeatures.py#L727-L800
|
[
"def",
"mdwarf_subtype_from_sdsscolor",
"(",
"ri_color",
",",
"iz_color",
")",
":",
"# calculate the spectral type index and the spectral type spread of the",
"# object. sti is calculated by fitting a line to the locus in r-i and i-z",
"# space for M dwarfs in West+ 2007",
"if",
"np",
".",
"isfinite",
"(",
"ri_color",
")",
"and",
"np",
".",
"isfinite",
"(",
"iz_color",
")",
":",
"obj_sti",
"=",
"0.875274",
"*",
"ri_color",
"+",
"0.483628",
"*",
"(",
"iz_color",
"+",
"0.00438",
")",
"obj_sts",
"=",
"-",
"0.483628",
"*",
"ri_color",
"+",
"0.875274",
"*",
"(",
"iz_color",
"+",
"0.00438",
")",
"else",
":",
"obj_sti",
"=",
"np",
".",
"nan",
"obj_sts",
"=",
"np",
".",
"nan",
"# possible M star if sti is >= 0.666 but <= 3.4559",
"if",
"(",
"np",
".",
"isfinite",
"(",
"obj_sti",
")",
"and",
"np",
".",
"isfinite",
"(",
"obj_sts",
")",
"and",
"(",
"obj_sti",
">",
"0.666",
")",
"and",
"(",
"obj_sti",
"<",
"3.4559",
")",
")",
":",
"# decide which M subclass object this is",
"if",
"(",
"(",
"obj_sti",
">",
"0.6660",
")",
"and",
"(",
"obj_sti",
"<",
"0.8592",
")",
")",
":",
"m_class",
"=",
"'M0'",
"if",
"(",
"(",
"obj_sti",
">",
"0.8592",
")",
"and",
"(",
"obj_sti",
"<",
"1.0822",
")",
")",
":",
"m_class",
"=",
"'M1'",
"if",
"(",
"(",
"obj_sti",
">",
"1.0822",
")",
"and",
"(",
"obj_sti",
"<",
"1.2998",
")",
")",
":",
"m_class",
"=",
"'M2'",
"if",
"(",
"(",
"obj_sti",
">",
"1.2998",
")",
"and",
"(",
"obj_sti",
"<",
"1.6378",
")",
")",
":",
"m_class",
"=",
"'M3'",
"if",
"(",
"(",
"obj_sti",
">",
"1.6378",
")",
"and",
"(",
"obj_sti",
"<",
"2.0363",
")",
")",
":",
"m_class",
"=",
"'M4'",
"if",
"(",
"(",
"obj_sti",
">",
"2.0363",
")",
"and",
"(",
"obj_sti",
"<",
"2.2411",
")",
")",
":",
"m_class",
"=",
"'M5'",
"if",
"(",
"(",
"obj_sti",
">",
"2.2411",
")",
"and",
"(",
"obj_sti",
"<",
"2.4126",
")",
")",
":",
"m_class",
"=",
"'M6'",
"if",
"(",
"(",
"obj_sti",
">",
"2.4126",
")",
"and",
"(",
"obj_sti",
"<",
"2.9213",
")",
")",
":",
"m_class",
"=",
"'M7'",
"if",
"(",
"(",
"obj_sti",
">",
"2.9213",
")",
"and",
"(",
"obj_sti",
"<",
"3.2418",
")",
")",
":",
"m_class",
"=",
"'M8'",
"if",
"(",
"(",
"obj_sti",
">",
"3.2418",
")",
"and",
"(",
"obj_sti",
"<",
"3.4559",
")",
")",
":",
"m_class",
"=",
"'M9'",
"else",
":",
"m_class",
"=",
"None",
"return",
"m_class",
",",
"obj_sti",
",",
"obj_sts"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
color_classification
|
This calculates rough star type classifications based on star colors
in the ugrizJHK bands.
Uses the output from `color_features` and `coord_features`. By default,
`color_features` will use dereddened colors, as are expected by most
relations here.
Based on the color cuts from:
- SDSS SEGUE (Yanny+ 2009)
- SDSS QSO catalog (Schneider+ 2007)
- SDSS RR Lyrae catalog (Sesar+ 2011)
- SDSS M-dwarf catalog (West+ 2008)
- Helmi+ 2003
- Bochanski+ 2014
Parameters
----------
colorfeatures : dict
This is the dict produced by the `color_features` function.
pmfeatures : dict
This is the dict produced by the `coord_features` function.
Returns
-------
dict
A dict containing all of the possible classes this object can belong to
as a list in the `color_classes` key, and values of the various color
indices used to arrive to that conclusion as the other keys.
|
astrobase/varclass/starfeatures.py
|
def color_classification(colorfeatures, pmfeatures):
'''This calculates rough star type classifications based on star colors
in the ugrizJHK bands.
Uses the output from `color_features` and `coord_features`. By default,
`color_features` will use dereddened colors, as are expected by most
relations here.
Based on the color cuts from:
- SDSS SEGUE (Yanny+ 2009)
- SDSS QSO catalog (Schneider+ 2007)
- SDSS RR Lyrae catalog (Sesar+ 2011)
- SDSS M-dwarf catalog (West+ 2008)
- Helmi+ 2003
- Bochanski+ 2014
Parameters
----------
colorfeatures : dict
This is the dict produced by the `color_features` function.
pmfeatures : dict
This is the dict produced by the `coord_features` function.
Returns
-------
dict
A dict containing all of the possible classes this object can belong to
as a list in the `color_classes` key, and values of the various color
indices used to arrive to that conclusion as the other keys.
'''
possible_classes = []
if not colorfeatures:
return possible_classes
if not pmfeatures:
return possible_classes
# dered mags
if ( ('dered_sdssu' in colorfeatures) and
(colorfeatures['dered_sdssu'] is not None) and
(np.isfinite(colorfeatures['dered_sdssu'])) ):
u = colorfeatures['dered_sdssu']
else:
u = np.nan
if ( ('dered_sdssg' in colorfeatures) and
(colorfeatures['dered_sdssg'] is not None) and
(np.isfinite(colorfeatures['dered_sdssg'])) ):
g = colorfeatures['dered_sdssg']
else:
g = np.nan
if ( ('dered_sdssr' in colorfeatures) and
(colorfeatures['dered_sdssr'] is not None) and
(np.isfinite(colorfeatures['dered_sdssr'])) ):
r = colorfeatures['dered_sdssr']
else:
r = np.nan
if ( ('dered_sdssi' in colorfeatures) and
(colorfeatures['dered_sdssi'] is not None) and
(np.isfinite(colorfeatures['dered_sdssi'])) ):
i = colorfeatures['dered_sdssi']
else:
i = np.nan
if ( ('dered_sdssz' in colorfeatures) and
(colorfeatures['dered_sdssz'] is not None) and
(np.isfinite(colorfeatures['dered_sdssz'])) ):
z = colorfeatures['dered_sdssz']
else:
z = np.nan
if ( ('dered_jmag' in colorfeatures) and
(colorfeatures['dered_jmag'] is not None) and
(np.isfinite(colorfeatures['dered_jmag'])) ):
j = colorfeatures['dered_jmag']
else:
j = np.nan
if ( ('dered_hmag' in colorfeatures) and
(colorfeatures['dered_hmag'] is not None) and
(np.isfinite(colorfeatures['dered_hmag'])) ):
h = colorfeatures['dered_hmag']
else:
h = np.nan
if ( ('dered_kmag' in colorfeatures) and
(colorfeatures['dered_kmag'] is not None) and
(np.isfinite(colorfeatures['dered_kmag'])) ):
k = colorfeatures['dered_kmag']
else:
k = np.nan
# measured mags
if 'sdssu' in colorfeatures and colorfeatures['sdssu'] is not None:
um = colorfeatures['sdssu']
else:
um = np.nan
if 'sdssg' in colorfeatures and colorfeatures['sdssg'] is not None:
gm = colorfeatures['sdssg']
else:
gm = np.nan
if 'sdssr' in colorfeatures and colorfeatures['sdssr'] is not None:
rm = colorfeatures['sdssr']
else:
rm = np.nan
if 'sdssi' in colorfeatures and colorfeatures['sdssi'] is not None:
im = colorfeatures['sdssi']
else:
im = np.nan
if 'sdssz' in colorfeatures and colorfeatures['sdssz'] is not None:
zm = colorfeatures['sdssz']
else:
zm = np.nan
if 'jmag' in colorfeatures and colorfeatures['jmag'] is not None:
jm = colorfeatures['jmag']
else:
jm = np.nan
if 'hmag' in colorfeatures and colorfeatures['hmag'] is not None:
hm = colorfeatures['hmag']
else:
hm = np.nan
if 'kmag' in colorfeatures and colorfeatures['kmag'] is not None:
km = colorfeatures['kmag']
else:
km = np.nan
# reduced proper motion
rpmj = pmfeatures['rpmj'] if np.isfinite(pmfeatures['rpmj']) else None
# now generate the various color indices
# color-gravity index
if (np.isfinite(u) and np.isfinite(g) and
np.isfinite(r) and np.isfinite(i) and
np.isfinite(z)):
v_color = 0.283*(u-g)-0.354*(g-r)+0.455*(r-i)+0.766*(i-z)
else:
v_color = np.nan
# metallicity index p1
if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):
p1_color = 0.91*(u-g)+0.415*(g-r)-1.28
else:
p1_color = np.nan
# metallicity index l
if (np.isfinite(u) and np.isfinite(g) and
np.isfinite(r) and np.isfinite(i)):
l_color = -0.436*u + 1.129*g - 0.119*r - 0.574*i + 0.1984
else:
l_color = np.nan
# metallicity index s
if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):
s_color = -0.249*u + 0.794*g - 0.555*r + 0.124
else:
s_color = np.nan
# RR Lyrae ug and gr indexes
if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):
d_ug = (u-g) + 0.67*(g-r) - 1.07
d_gr = 0.45*(u-g) - (g-r) - 0.12
else:
d_ug, d_gr = np.nan, np.nan
# check the M subtype
m_subtype, m_sti, m_sts = mdwarf_subtype_from_sdsscolor(r-i, i-z)
# now check if this is a likely M dwarf
if m_subtype and rpmj and rpmj > 1.0:
possible_classes.append('d' + m_subtype)
# white dwarf
if ( np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and
((g-r) < -0.2) and ((g-r) > -1.0) and
((u-g) < 0.7) and ((u-g) > -1) and
((u-g+2*(g-r)) < -0.1) ):
possible_classes.append('WD/sdO/sdB')
# A/BHB/BStrg
if ( np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and
((u-g) < 1.5) and ((u-g) > 0.8) and
((g-r) < 0.2) and ((g-r) > -0.5) ):
possible_classes.append('A/BHB/blustrg')
# F turnoff/sub-dwarf
if ( (np.isfinite(p1_color) and np.isfinite(p1_color) and
np.isfinite(u) and np.isfinite(g) and np.isfinite(r) ) and
(p1_color < -0.25) and (p1_color > -0.7) and
((u-g) < 1.4) and ((u-g) > 0.4) and
((g-r) < 0.7) and ((g-r) > 0.2) ):
possible_classes.append('Fturnoff/sdF')
# low metallicity
if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and
np.isfinite(l_color)) and
((g-r) < 0.75) and ((g-r) > -0.5) and
((u-g) < 3.0) and ((u-g) > 0.6) and
(l_color > 0.135) ):
possible_classes.append('lowmetal')
# low metallicity giants from Helmi+ 2003
if ( (np.isfinite(p1_color) and np.isfinite(s_color)) and
(-0.1 < p1_color < 0.6) and (s_color > 0.05) ):
possible_classes.append('lowmetalgiant')
# F/G star
if ( (np.isfinite(g) and np.isfinite(g) and np.isfinite(r)) and
((g-r) < 0.48) and ((g-r) > 0.2) ):
possible_classes.append('F/G')
# G dwarf
if ( (np.isfinite(g) and np.isfinite(r)) and
((g-r) < 0.55) and ((g-r) > 0.48) ):
possible_classes.append('dG')
# K giant
if ( (np.isfinite(u) and np.isfinite(g) and
np.isfinite(r) and np.isfinite(i) and
np.isfinite(l_color)) and
((g-r) > 0.35) and ((g-r) < 0.7) and
(l_color > 0.07) and ((u-g) > 0.7) and ((u-g) < 4.0) and
((r-i) > 0.15) and ((r-i) < 0.6) ):
possible_classes.append('gK')
# AGB
if ( (np.isfinite(u) and np.isfinite(g) and
np.isfinite(r) and np.isfinite(s_color)) and
((u-g) < 3.5) and ((u-g) > 2.5) and
((g-r) < 1.3) and ((g-r) > 0.9) and
(s_color < -0.06) ):
possible_classes.append('AGB')
# K dwarf
if ( (np.isfinite(g) and np.isfinite(r)) and
((g-r) < 0.75) and ((g-r) > 0.55) ):
possible_classes.append('dK')
# M subdwarf
if ( (np.isfinite(g) and np.isfinite(r) and np.isfinite(i)) and
((g-r) > 1.6) and ((r-i) < 1.3) and ((r-i) > 0.95) ):
possible_classes.append('sdM')
# M giant colors from Bochanski+ 2014
if ( (np.isfinite(j) and np.isfinite(h) and np.isfinite(k) and
np.isfinite(g) and np.isfinite(i)) and
((j-k) > 1.02) and
((j-h) < (0.561*(j-k) + 0.46)) and
((j-h) > (0.561*(j-k) + 0.14)) and
((g-i) > (0.932*(i-k) - 0.872)) ):
possible_classes.append('gM')
# MS+WD pair
if ( (np.isfinite(um) and np.isfinite(gm) and
np.isfinite(rm) and np.isfinite(im)) and
((um-gm) < 2.25) and ((gm-rm) > -0.2) and
((gm-rm) < 1.2) and ((rm-im) > 0.5) and
((rm-im) < 2.0) and
((gm-rm) > (-19.78*(rm-im)+11.13)) and
((gm-rm) < (0.95*(rm-im)+0.5)) ):
possible_classes.append('MSWD')
# brown dwarf
if ( (np.isfinite(um) and np.isfinite(gm) and np.isfinite(rm) and
np.isfinite(im) and np.isfinite(zm)) and
(zm < 19.5) and (um > 21.0) and (gm > 22.0) and
(rm > 21.0) and ((im - zm) > 1.7) ):
possible_classes.append('BD')
# RR Lyrae candidate
if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and
np.isfinite(i) and np.isfinite(z) and np.isfinite(d_ug) and
np.isfinite(d_gr)) and
((u-g) > 0.98) and ((u-g) < 1.3) and
(d_ug > -0.05) and (d_ug < 0.35) and
(d_gr > 0.06) and (d_gr < 0.55) and
((r-i) > -0.15) and ((r-i) < 0.22) and
((i-z) > -0.21) and ((i-z) < 0.25) ):
possible_classes.append('RRL')
# QSO color
if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)) and
( (((u-g) > -0.1) and ((u-g) < 0.7) and
((g-r) > -0.3) and ((g-r) < 0.5)) or
((u-g) > (1.6*(g-r) + 1.34)) ) ):
possible_classes.append('QSO')
return {'color_classes':possible_classes,
'v_color':v_color,
'p1_color':p1_color,
's_color':s_color,
'l_color':l_color,
'd_ug':d_ug,
'd_gr':d_gr,
'm_sti':m_sti,
'm_sts':m_sts}
|
def color_classification(colorfeatures, pmfeatures):
'''This calculates rough star type classifications based on star colors
in the ugrizJHK bands.
Uses the output from `color_features` and `coord_features`. By default,
`color_features` will use dereddened colors, as are expected by most
relations here.
Based on the color cuts from:
- SDSS SEGUE (Yanny+ 2009)
- SDSS QSO catalog (Schneider+ 2007)
- SDSS RR Lyrae catalog (Sesar+ 2011)
- SDSS M-dwarf catalog (West+ 2008)
- Helmi+ 2003
- Bochanski+ 2014
Parameters
----------
colorfeatures : dict
This is the dict produced by the `color_features` function.
pmfeatures : dict
This is the dict produced by the `coord_features` function.
Returns
-------
dict
A dict containing all of the possible classes this object can belong to
as a list in the `color_classes` key, and values of the various color
indices used to arrive to that conclusion as the other keys.
'''
possible_classes = []
if not colorfeatures:
return possible_classes
if not pmfeatures:
return possible_classes
# dered mags
if ( ('dered_sdssu' in colorfeatures) and
(colorfeatures['dered_sdssu'] is not None) and
(np.isfinite(colorfeatures['dered_sdssu'])) ):
u = colorfeatures['dered_sdssu']
else:
u = np.nan
if ( ('dered_sdssg' in colorfeatures) and
(colorfeatures['dered_sdssg'] is not None) and
(np.isfinite(colorfeatures['dered_sdssg'])) ):
g = colorfeatures['dered_sdssg']
else:
g = np.nan
if ( ('dered_sdssr' in colorfeatures) and
(colorfeatures['dered_sdssr'] is not None) and
(np.isfinite(colorfeatures['dered_sdssr'])) ):
r = colorfeatures['dered_sdssr']
else:
r = np.nan
if ( ('dered_sdssi' in colorfeatures) and
(colorfeatures['dered_sdssi'] is not None) and
(np.isfinite(colorfeatures['dered_sdssi'])) ):
i = colorfeatures['dered_sdssi']
else:
i = np.nan
if ( ('dered_sdssz' in colorfeatures) and
(colorfeatures['dered_sdssz'] is not None) and
(np.isfinite(colorfeatures['dered_sdssz'])) ):
z = colorfeatures['dered_sdssz']
else:
z = np.nan
if ( ('dered_jmag' in colorfeatures) and
(colorfeatures['dered_jmag'] is not None) and
(np.isfinite(colorfeatures['dered_jmag'])) ):
j = colorfeatures['dered_jmag']
else:
j = np.nan
if ( ('dered_hmag' in colorfeatures) and
(colorfeatures['dered_hmag'] is not None) and
(np.isfinite(colorfeatures['dered_hmag'])) ):
h = colorfeatures['dered_hmag']
else:
h = np.nan
if ( ('dered_kmag' in colorfeatures) and
(colorfeatures['dered_kmag'] is not None) and
(np.isfinite(colorfeatures['dered_kmag'])) ):
k = colorfeatures['dered_kmag']
else:
k = np.nan
# measured mags
if 'sdssu' in colorfeatures and colorfeatures['sdssu'] is not None:
um = colorfeatures['sdssu']
else:
um = np.nan
if 'sdssg' in colorfeatures and colorfeatures['sdssg'] is not None:
gm = colorfeatures['sdssg']
else:
gm = np.nan
if 'sdssr' in colorfeatures and colorfeatures['sdssr'] is not None:
rm = colorfeatures['sdssr']
else:
rm = np.nan
if 'sdssi' in colorfeatures and colorfeatures['sdssi'] is not None:
im = colorfeatures['sdssi']
else:
im = np.nan
if 'sdssz' in colorfeatures and colorfeatures['sdssz'] is not None:
zm = colorfeatures['sdssz']
else:
zm = np.nan
if 'jmag' in colorfeatures and colorfeatures['jmag'] is not None:
jm = colorfeatures['jmag']
else:
jm = np.nan
if 'hmag' in colorfeatures and colorfeatures['hmag'] is not None:
hm = colorfeatures['hmag']
else:
hm = np.nan
if 'kmag' in colorfeatures and colorfeatures['kmag'] is not None:
km = colorfeatures['kmag']
else:
km = np.nan
# reduced proper motion
rpmj = pmfeatures['rpmj'] if np.isfinite(pmfeatures['rpmj']) else None
# now generate the various color indices
# color-gravity index
if (np.isfinite(u) and np.isfinite(g) and
np.isfinite(r) and np.isfinite(i) and
np.isfinite(z)):
v_color = 0.283*(u-g)-0.354*(g-r)+0.455*(r-i)+0.766*(i-z)
else:
v_color = np.nan
# metallicity index p1
if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):
p1_color = 0.91*(u-g)+0.415*(g-r)-1.28
else:
p1_color = np.nan
# metallicity index l
if (np.isfinite(u) and np.isfinite(g) and
np.isfinite(r) and np.isfinite(i)):
l_color = -0.436*u + 1.129*g - 0.119*r - 0.574*i + 0.1984
else:
l_color = np.nan
# metallicity index s
if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):
s_color = -0.249*u + 0.794*g - 0.555*r + 0.124
else:
s_color = np.nan
# RR Lyrae ug and gr indexes
if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):
d_ug = (u-g) + 0.67*(g-r) - 1.07
d_gr = 0.45*(u-g) - (g-r) - 0.12
else:
d_ug, d_gr = np.nan, np.nan
# check the M subtype
m_subtype, m_sti, m_sts = mdwarf_subtype_from_sdsscolor(r-i, i-z)
# now check if this is a likely M dwarf
if m_subtype and rpmj and rpmj > 1.0:
possible_classes.append('d' + m_subtype)
# white dwarf
if ( np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and
((g-r) < -0.2) and ((g-r) > -1.0) and
((u-g) < 0.7) and ((u-g) > -1) and
((u-g+2*(g-r)) < -0.1) ):
possible_classes.append('WD/sdO/sdB')
# A/BHB/BStrg
if ( np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and
((u-g) < 1.5) and ((u-g) > 0.8) and
((g-r) < 0.2) and ((g-r) > -0.5) ):
possible_classes.append('A/BHB/blustrg')
# F turnoff/sub-dwarf
if ( (np.isfinite(p1_color) and np.isfinite(p1_color) and
np.isfinite(u) and np.isfinite(g) and np.isfinite(r) ) and
(p1_color < -0.25) and (p1_color > -0.7) and
((u-g) < 1.4) and ((u-g) > 0.4) and
((g-r) < 0.7) and ((g-r) > 0.2) ):
possible_classes.append('Fturnoff/sdF')
# low metallicity
if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and
np.isfinite(l_color)) and
((g-r) < 0.75) and ((g-r) > -0.5) and
((u-g) < 3.0) and ((u-g) > 0.6) and
(l_color > 0.135) ):
possible_classes.append('lowmetal')
# low metallicity giants from Helmi+ 2003
if ( (np.isfinite(p1_color) and np.isfinite(s_color)) and
(-0.1 < p1_color < 0.6) and (s_color > 0.05) ):
possible_classes.append('lowmetalgiant')
# F/G star
if ( (np.isfinite(g) and np.isfinite(g) and np.isfinite(r)) and
((g-r) < 0.48) and ((g-r) > 0.2) ):
possible_classes.append('F/G')
# G dwarf
if ( (np.isfinite(g) and np.isfinite(r)) and
((g-r) < 0.55) and ((g-r) > 0.48) ):
possible_classes.append('dG')
# K giant
if ( (np.isfinite(u) and np.isfinite(g) and
np.isfinite(r) and np.isfinite(i) and
np.isfinite(l_color)) and
((g-r) > 0.35) and ((g-r) < 0.7) and
(l_color > 0.07) and ((u-g) > 0.7) and ((u-g) < 4.0) and
((r-i) > 0.15) and ((r-i) < 0.6) ):
possible_classes.append('gK')
# AGB
if ( (np.isfinite(u) and np.isfinite(g) and
np.isfinite(r) and np.isfinite(s_color)) and
((u-g) < 3.5) and ((u-g) > 2.5) and
((g-r) < 1.3) and ((g-r) > 0.9) and
(s_color < -0.06) ):
possible_classes.append('AGB')
# K dwarf
if ( (np.isfinite(g) and np.isfinite(r)) and
((g-r) < 0.75) and ((g-r) > 0.55) ):
possible_classes.append('dK')
# M subdwarf
if ( (np.isfinite(g) and np.isfinite(r) and np.isfinite(i)) and
((g-r) > 1.6) and ((r-i) < 1.3) and ((r-i) > 0.95) ):
possible_classes.append('sdM')
# M giant colors from Bochanski+ 2014
if ( (np.isfinite(j) and np.isfinite(h) and np.isfinite(k) and
np.isfinite(g) and np.isfinite(i)) and
((j-k) > 1.02) and
((j-h) < (0.561*(j-k) + 0.46)) and
((j-h) > (0.561*(j-k) + 0.14)) and
((g-i) > (0.932*(i-k) - 0.872)) ):
possible_classes.append('gM')
# MS+WD pair
if ( (np.isfinite(um) and np.isfinite(gm) and
np.isfinite(rm) and np.isfinite(im)) and
((um-gm) < 2.25) and ((gm-rm) > -0.2) and
((gm-rm) < 1.2) and ((rm-im) > 0.5) and
((rm-im) < 2.0) and
((gm-rm) > (-19.78*(rm-im)+11.13)) and
((gm-rm) < (0.95*(rm-im)+0.5)) ):
possible_classes.append('MSWD')
# brown dwarf
if ( (np.isfinite(um) and np.isfinite(gm) and np.isfinite(rm) and
np.isfinite(im) and np.isfinite(zm)) and
(zm < 19.5) and (um > 21.0) and (gm > 22.0) and
(rm > 21.0) and ((im - zm) > 1.7) ):
possible_classes.append('BD')
# RR Lyrae candidate
if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and
np.isfinite(i) and np.isfinite(z) and np.isfinite(d_ug) and
np.isfinite(d_gr)) and
((u-g) > 0.98) and ((u-g) < 1.3) and
(d_ug > -0.05) and (d_ug < 0.35) and
(d_gr > 0.06) and (d_gr < 0.55) and
((r-i) > -0.15) and ((r-i) < 0.22) and
((i-z) > -0.21) and ((i-z) < 0.25) ):
possible_classes.append('RRL')
# QSO color
if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)) and
( (((u-g) > -0.1) and ((u-g) < 0.7) and
((g-r) > -0.3) and ((g-r) < 0.5)) or
((u-g) > (1.6*(g-r) + 1.34)) ) ):
possible_classes.append('QSO')
return {'color_classes':possible_classes,
'v_color':v_color,
'p1_color':p1_color,
's_color':s_color,
'l_color':l_color,
'd_ug':d_ug,
'd_gr':d_gr,
'm_sti':m_sti,
'm_sts':m_sts}
|
[
"This",
"calculates",
"rough",
"star",
"type",
"classifications",
"based",
"on",
"star",
"colors",
"in",
"the",
"ugrizJHK",
"bands",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varclass/starfeatures.py#L804-L1110
|
[
"def",
"color_classification",
"(",
"colorfeatures",
",",
"pmfeatures",
")",
":",
"possible_classes",
"=",
"[",
"]",
"if",
"not",
"colorfeatures",
":",
"return",
"possible_classes",
"if",
"not",
"pmfeatures",
":",
"return",
"possible_classes",
"# dered mags",
"if",
"(",
"(",
"'dered_sdssu'",
"in",
"colorfeatures",
")",
"and",
"(",
"colorfeatures",
"[",
"'dered_sdssu'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"np",
".",
"isfinite",
"(",
"colorfeatures",
"[",
"'dered_sdssu'",
"]",
")",
")",
")",
":",
"u",
"=",
"colorfeatures",
"[",
"'dered_sdssu'",
"]",
"else",
":",
"u",
"=",
"np",
".",
"nan",
"if",
"(",
"(",
"'dered_sdssg'",
"in",
"colorfeatures",
")",
"and",
"(",
"colorfeatures",
"[",
"'dered_sdssg'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"np",
".",
"isfinite",
"(",
"colorfeatures",
"[",
"'dered_sdssg'",
"]",
")",
")",
")",
":",
"g",
"=",
"colorfeatures",
"[",
"'dered_sdssg'",
"]",
"else",
":",
"g",
"=",
"np",
".",
"nan",
"if",
"(",
"(",
"'dered_sdssr'",
"in",
"colorfeatures",
")",
"and",
"(",
"colorfeatures",
"[",
"'dered_sdssr'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"np",
".",
"isfinite",
"(",
"colorfeatures",
"[",
"'dered_sdssr'",
"]",
")",
")",
")",
":",
"r",
"=",
"colorfeatures",
"[",
"'dered_sdssr'",
"]",
"else",
":",
"r",
"=",
"np",
".",
"nan",
"if",
"(",
"(",
"'dered_sdssi'",
"in",
"colorfeatures",
")",
"and",
"(",
"colorfeatures",
"[",
"'dered_sdssi'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"np",
".",
"isfinite",
"(",
"colorfeatures",
"[",
"'dered_sdssi'",
"]",
")",
")",
")",
":",
"i",
"=",
"colorfeatures",
"[",
"'dered_sdssi'",
"]",
"else",
":",
"i",
"=",
"np",
".",
"nan",
"if",
"(",
"(",
"'dered_sdssz'",
"in",
"colorfeatures",
")",
"and",
"(",
"colorfeatures",
"[",
"'dered_sdssz'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"np",
".",
"isfinite",
"(",
"colorfeatures",
"[",
"'dered_sdssz'",
"]",
")",
")",
")",
":",
"z",
"=",
"colorfeatures",
"[",
"'dered_sdssz'",
"]",
"else",
":",
"z",
"=",
"np",
".",
"nan",
"if",
"(",
"(",
"'dered_jmag'",
"in",
"colorfeatures",
")",
"and",
"(",
"colorfeatures",
"[",
"'dered_jmag'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"np",
".",
"isfinite",
"(",
"colorfeatures",
"[",
"'dered_jmag'",
"]",
")",
")",
")",
":",
"j",
"=",
"colorfeatures",
"[",
"'dered_jmag'",
"]",
"else",
":",
"j",
"=",
"np",
".",
"nan",
"if",
"(",
"(",
"'dered_hmag'",
"in",
"colorfeatures",
")",
"and",
"(",
"colorfeatures",
"[",
"'dered_hmag'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"np",
".",
"isfinite",
"(",
"colorfeatures",
"[",
"'dered_hmag'",
"]",
")",
")",
")",
":",
"h",
"=",
"colorfeatures",
"[",
"'dered_hmag'",
"]",
"else",
":",
"h",
"=",
"np",
".",
"nan",
"if",
"(",
"(",
"'dered_kmag'",
"in",
"colorfeatures",
")",
"and",
"(",
"colorfeatures",
"[",
"'dered_kmag'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"np",
".",
"isfinite",
"(",
"colorfeatures",
"[",
"'dered_kmag'",
"]",
")",
")",
")",
":",
"k",
"=",
"colorfeatures",
"[",
"'dered_kmag'",
"]",
"else",
":",
"k",
"=",
"np",
".",
"nan",
"# measured mags",
"if",
"'sdssu'",
"in",
"colorfeatures",
"and",
"colorfeatures",
"[",
"'sdssu'",
"]",
"is",
"not",
"None",
":",
"um",
"=",
"colorfeatures",
"[",
"'sdssu'",
"]",
"else",
":",
"um",
"=",
"np",
".",
"nan",
"if",
"'sdssg'",
"in",
"colorfeatures",
"and",
"colorfeatures",
"[",
"'sdssg'",
"]",
"is",
"not",
"None",
":",
"gm",
"=",
"colorfeatures",
"[",
"'sdssg'",
"]",
"else",
":",
"gm",
"=",
"np",
".",
"nan",
"if",
"'sdssr'",
"in",
"colorfeatures",
"and",
"colorfeatures",
"[",
"'sdssr'",
"]",
"is",
"not",
"None",
":",
"rm",
"=",
"colorfeatures",
"[",
"'sdssr'",
"]",
"else",
":",
"rm",
"=",
"np",
".",
"nan",
"if",
"'sdssi'",
"in",
"colorfeatures",
"and",
"colorfeatures",
"[",
"'sdssi'",
"]",
"is",
"not",
"None",
":",
"im",
"=",
"colorfeatures",
"[",
"'sdssi'",
"]",
"else",
":",
"im",
"=",
"np",
".",
"nan",
"if",
"'sdssz'",
"in",
"colorfeatures",
"and",
"colorfeatures",
"[",
"'sdssz'",
"]",
"is",
"not",
"None",
":",
"zm",
"=",
"colorfeatures",
"[",
"'sdssz'",
"]",
"else",
":",
"zm",
"=",
"np",
".",
"nan",
"if",
"'jmag'",
"in",
"colorfeatures",
"and",
"colorfeatures",
"[",
"'jmag'",
"]",
"is",
"not",
"None",
":",
"jm",
"=",
"colorfeatures",
"[",
"'jmag'",
"]",
"else",
":",
"jm",
"=",
"np",
".",
"nan",
"if",
"'hmag'",
"in",
"colorfeatures",
"and",
"colorfeatures",
"[",
"'hmag'",
"]",
"is",
"not",
"None",
":",
"hm",
"=",
"colorfeatures",
"[",
"'hmag'",
"]",
"else",
":",
"hm",
"=",
"np",
".",
"nan",
"if",
"'kmag'",
"in",
"colorfeatures",
"and",
"colorfeatures",
"[",
"'kmag'",
"]",
"is",
"not",
"None",
":",
"km",
"=",
"colorfeatures",
"[",
"'kmag'",
"]",
"else",
":",
"km",
"=",
"np",
".",
"nan",
"# reduced proper motion",
"rpmj",
"=",
"pmfeatures",
"[",
"'rpmj'",
"]",
"if",
"np",
".",
"isfinite",
"(",
"pmfeatures",
"[",
"'rpmj'",
"]",
")",
"else",
"None",
"# now generate the various color indices",
"# color-gravity index",
"if",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"np",
".",
"isfinite",
"(",
"i",
")",
"and",
"np",
".",
"isfinite",
"(",
"z",
")",
")",
":",
"v_color",
"=",
"0.283",
"*",
"(",
"u",
"-",
"g",
")",
"-",
"0.354",
"*",
"(",
"g",
"-",
"r",
")",
"+",
"0.455",
"*",
"(",
"r",
"-",
"i",
")",
"+",
"0.766",
"*",
"(",
"i",
"-",
"z",
")",
"else",
":",
"v_color",
"=",
"np",
".",
"nan",
"# metallicity index p1",
"if",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
")",
":",
"p1_color",
"=",
"0.91",
"*",
"(",
"u",
"-",
"g",
")",
"+",
"0.415",
"*",
"(",
"g",
"-",
"r",
")",
"-",
"1.28",
"else",
":",
"p1_color",
"=",
"np",
".",
"nan",
"# metallicity index l",
"if",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"np",
".",
"isfinite",
"(",
"i",
")",
")",
":",
"l_color",
"=",
"-",
"0.436",
"*",
"u",
"+",
"1.129",
"*",
"g",
"-",
"0.119",
"*",
"r",
"-",
"0.574",
"*",
"i",
"+",
"0.1984",
"else",
":",
"l_color",
"=",
"np",
".",
"nan",
"# metallicity index s",
"if",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
")",
":",
"s_color",
"=",
"-",
"0.249",
"*",
"u",
"+",
"0.794",
"*",
"g",
"-",
"0.555",
"*",
"r",
"+",
"0.124",
"else",
":",
"s_color",
"=",
"np",
".",
"nan",
"# RR Lyrae ug and gr indexes",
"if",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
")",
":",
"d_ug",
"=",
"(",
"u",
"-",
"g",
")",
"+",
"0.67",
"*",
"(",
"g",
"-",
"r",
")",
"-",
"1.07",
"d_gr",
"=",
"0.45",
"*",
"(",
"u",
"-",
"g",
")",
"-",
"(",
"g",
"-",
"r",
")",
"-",
"0.12",
"else",
":",
"d_ug",
",",
"d_gr",
"=",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"# check the M subtype",
"m_subtype",
",",
"m_sti",
",",
"m_sts",
"=",
"mdwarf_subtype_from_sdsscolor",
"(",
"r",
"-",
"i",
",",
"i",
"-",
"z",
")",
"# now check if this is a likely M dwarf",
"if",
"m_subtype",
"and",
"rpmj",
"and",
"rpmj",
">",
"1.0",
":",
"possible_classes",
".",
"append",
"(",
"'d'",
"+",
"m_subtype",
")",
"# white dwarf",
"if",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"-",
"0.2",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"-",
"1.0",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
"<",
"0.7",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
">",
"-",
"1",
")",
"and",
"(",
"(",
"u",
"-",
"g",
"+",
"2",
"*",
"(",
"g",
"-",
"r",
")",
")",
"<",
"-",
"0.1",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'WD/sdO/sdB'",
")",
"# A/BHB/BStrg",
"if",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
"<",
"1.5",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
">",
"0.8",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"0.2",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"-",
"0.5",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'A/BHB/blustrg'",
")",
"# F turnoff/sub-dwarf",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"p1_color",
")",
"and",
"np",
".",
"isfinite",
"(",
"p1_color",
")",
"and",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
")",
"and",
"(",
"p1_color",
"<",
"-",
"0.25",
")",
"and",
"(",
"p1_color",
">",
"-",
"0.7",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
"<",
"1.4",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
">",
"0.4",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"0.7",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"0.2",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'Fturnoff/sdF'",
")",
"# low metallicity",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"np",
".",
"isfinite",
"(",
"l_color",
")",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"0.75",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"-",
"0.5",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
"<",
"3.0",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
">",
"0.6",
")",
"and",
"(",
"l_color",
">",
"0.135",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'lowmetal'",
")",
"# low metallicity giants from Helmi+ 2003",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"p1_color",
")",
"and",
"np",
".",
"isfinite",
"(",
"s_color",
")",
")",
"and",
"(",
"-",
"0.1",
"<",
"p1_color",
"<",
"0.6",
")",
"and",
"(",
"s_color",
">",
"0.05",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'lowmetalgiant'",
")",
"# F/G star",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"0.48",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"0.2",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'F/G'",
")",
"# G dwarf",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"0.55",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"0.48",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'dG'",
")",
"# K giant",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"np",
".",
"isfinite",
"(",
"i",
")",
"and",
"np",
".",
"isfinite",
"(",
"l_color",
")",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"0.35",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"0.7",
")",
"and",
"(",
"l_color",
">",
"0.07",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
">",
"0.7",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
"<",
"4.0",
")",
"and",
"(",
"(",
"r",
"-",
"i",
")",
">",
"0.15",
")",
"and",
"(",
"(",
"r",
"-",
"i",
")",
"<",
"0.6",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'gK'",
")",
"# AGB",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"np",
".",
"isfinite",
"(",
"s_color",
")",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
"<",
"3.5",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
">",
"2.5",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"1.3",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"0.9",
")",
"and",
"(",
"s_color",
"<",
"-",
"0.06",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'AGB'",
")",
"# K dwarf",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"0.75",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"0.55",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'dK'",
")",
"# M subdwarf",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"np",
".",
"isfinite",
"(",
"i",
")",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"1.6",
")",
"and",
"(",
"(",
"r",
"-",
"i",
")",
"<",
"1.3",
")",
"and",
"(",
"(",
"r",
"-",
"i",
")",
">",
"0.95",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'sdM'",
")",
"# M giant colors from Bochanski+ 2014",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"j",
")",
"and",
"np",
".",
"isfinite",
"(",
"h",
")",
"and",
"np",
".",
"isfinite",
"(",
"k",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"i",
")",
")",
"and",
"(",
"(",
"j",
"-",
"k",
")",
">",
"1.02",
")",
"and",
"(",
"(",
"j",
"-",
"h",
")",
"<",
"(",
"0.561",
"*",
"(",
"j",
"-",
"k",
")",
"+",
"0.46",
")",
")",
"and",
"(",
"(",
"j",
"-",
"h",
")",
">",
"(",
"0.561",
"*",
"(",
"j",
"-",
"k",
")",
"+",
"0.14",
")",
")",
"and",
"(",
"(",
"g",
"-",
"i",
")",
">",
"(",
"0.932",
"*",
"(",
"i",
"-",
"k",
")",
"-",
"0.872",
")",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'gM'",
")",
"# MS+WD pair",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"um",
")",
"and",
"np",
".",
"isfinite",
"(",
"gm",
")",
"and",
"np",
".",
"isfinite",
"(",
"rm",
")",
"and",
"np",
".",
"isfinite",
"(",
"im",
")",
")",
"and",
"(",
"(",
"um",
"-",
"gm",
")",
"<",
"2.25",
")",
"and",
"(",
"(",
"gm",
"-",
"rm",
")",
">",
"-",
"0.2",
")",
"and",
"(",
"(",
"gm",
"-",
"rm",
")",
"<",
"1.2",
")",
"and",
"(",
"(",
"rm",
"-",
"im",
")",
">",
"0.5",
")",
"and",
"(",
"(",
"rm",
"-",
"im",
")",
"<",
"2.0",
")",
"and",
"(",
"(",
"gm",
"-",
"rm",
")",
">",
"(",
"-",
"19.78",
"*",
"(",
"rm",
"-",
"im",
")",
"+",
"11.13",
")",
")",
"and",
"(",
"(",
"gm",
"-",
"rm",
")",
"<",
"(",
"0.95",
"*",
"(",
"rm",
"-",
"im",
")",
"+",
"0.5",
")",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'MSWD'",
")",
"# brown dwarf",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"um",
")",
"and",
"np",
".",
"isfinite",
"(",
"gm",
")",
"and",
"np",
".",
"isfinite",
"(",
"rm",
")",
"and",
"np",
".",
"isfinite",
"(",
"im",
")",
"and",
"np",
".",
"isfinite",
"(",
"zm",
")",
")",
"and",
"(",
"zm",
"<",
"19.5",
")",
"and",
"(",
"um",
">",
"21.0",
")",
"and",
"(",
"gm",
">",
"22.0",
")",
"and",
"(",
"rm",
">",
"21.0",
")",
"and",
"(",
"(",
"im",
"-",
"zm",
")",
">",
"1.7",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'BD'",
")",
"# RR Lyrae candidate",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
"and",
"np",
".",
"isfinite",
"(",
"i",
")",
"and",
"np",
".",
"isfinite",
"(",
"z",
")",
"and",
"np",
".",
"isfinite",
"(",
"d_ug",
")",
"and",
"np",
".",
"isfinite",
"(",
"d_gr",
")",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
">",
"0.98",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
"<",
"1.3",
")",
"and",
"(",
"d_ug",
">",
"-",
"0.05",
")",
"and",
"(",
"d_ug",
"<",
"0.35",
")",
"and",
"(",
"d_gr",
">",
"0.06",
")",
"and",
"(",
"d_gr",
"<",
"0.55",
")",
"and",
"(",
"(",
"r",
"-",
"i",
")",
">",
"-",
"0.15",
")",
"and",
"(",
"(",
"r",
"-",
"i",
")",
"<",
"0.22",
")",
"and",
"(",
"(",
"i",
"-",
"z",
")",
">",
"-",
"0.21",
")",
"and",
"(",
"(",
"i",
"-",
"z",
")",
"<",
"0.25",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'RRL'",
")",
"# QSO color",
"if",
"(",
"(",
"np",
".",
"isfinite",
"(",
"u",
")",
"and",
"np",
".",
"isfinite",
"(",
"g",
")",
"and",
"np",
".",
"isfinite",
"(",
"r",
")",
")",
"and",
"(",
"(",
"(",
"(",
"u",
"-",
"g",
")",
">",
"-",
"0.1",
")",
"and",
"(",
"(",
"u",
"-",
"g",
")",
"<",
"0.7",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
">",
"-",
"0.3",
")",
"and",
"(",
"(",
"g",
"-",
"r",
")",
"<",
"0.5",
")",
")",
"or",
"(",
"(",
"u",
"-",
"g",
")",
">",
"(",
"1.6",
"*",
"(",
"g",
"-",
"r",
")",
"+",
"1.34",
")",
")",
")",
")",
":",
"possible_classes",
".",
"append",
"(",
"'QSO'",
")",
"return",
"{",
"'color_classes'",
":",
"possible_classes",
",",
"'v_color'",
":",
"v_color",
",",
"'p1_color'",
":",
"p1_color",
",",
"'s_color'",
":",
"s_color",
",",
"'l_color'",
":",
"l_color",
",",
"'d_ug'",
":",
"d_ug",
",",
"'d_gr'",
":",
"d_gr",
",",
"'m_sti'",
":",
"m_sti",
",",
"'m_sts'",
":",
"m_sts",
"}"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
neighbor_gaia_features
|
Gets several neighbor, GAIA, and SIMBAD features:
From the KD-Tree in the given light curve catalog the object is in:
`lclist_kdtree`:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
From the GAIA DR2 catalog:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
- gets the parallax for the object and neighbors
- calculates the absolute GAIA mag and `G-K` color for use in CMDs
- gets the proper motion in RA/Dec if available
From the SIMBAD catalog:
- the name of the object
- the type of the object
Parameters
----------
objectinfo : dict
This is the objectinfo dict from an object's light curve. This must
contain at least the following keys::
{'ra': the right ascension of the object,
'decl': the declination of the object}
lclist_kdtree : scipy.spatial.cKDTree object
This is a KD-Tree built on the Cartesian xyz coordinates from (ra, dec)
of all objects in the same field as this object. It is similar to that
produced by :py:func:`astrobase.lcproc.catalogs.make_lclist`, and is
used to carry out the spatial search required to find neighbors for this
object.
neighbor_radius_arcsec : float
The maximum radius in arcseconds around this object to search for
neighbors in both the light curve catalog and in the GAIA DR2 catalog.
gaia_matchdist_arcsec : float
The maximum distance in arcseconds to use for a GAIA cross-match to this
object.
verbose : bool
If True, indicates progress and warns of problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
search_simbad : bool
If this is True, searches for objects in SIMBAD at this object's
location and gets the object's SIMBAD main ID, type, and stellar
classification if available.
Returns
-------
dict
Returns a dict with neighbor, GAIA, and SIMBAD features.
|
astrobase/varclass/starfeatures.py
|
def neighbor_gaia_features(objectinfo,
lclist_kdtree,
neighbor_radius_arcsec,
gaia_matchdist_arcsec=3.0,
verbose=True,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
complete_query_later=True,
search_simbad=False):
'''Gets several neighbor, GAIA, and SIMBAD features:
From the KD-Tree in the given light curve catalog the object is in:
`lclist_kdtree`:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
From the GAIA DR2 catalog:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
- gets the parallax for the object and neighbors
- calculates the absolute GAIA mag and `G-K` color for use in CMDs
- gets the proper motion in RA/Dec if available
From the SIMBAD catalog:
- the name of the object
- the type of the object
Parameters
----------
objectinfo : dict
This is the objectinfo dict from an object's light curve. This must
contain at least the following keys::
{'ra': the right ascension of the object,
'decl': the declination of the object}
lclist_kdtree : scipy.spatial.cKDTree object
This is a KD-Tree built on the Cartesian xyz coordinates from (ra, dec)
of all objects in the same field as this object. It is similar to that
produced by :py:func:`astrobase.lcproc.catalogs.make_lclist`, and is
used to carry out the spatial search required to find neighbors for this
object.
neighbor_radius_arcsec : float
The maximum radius in arcseconds around this object to search for
neighbors in both the light curve catalog and in the GAIA DR2 catalog.
gaia_matchdist_arcsec : float
The maximum distance in arcseconds to use for a GAIA cross-match to this
object.
verbose : bool
If True, indicates progress and warns of problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
search_simbad : bool
If this is True, searches for objects in SIMBAD at this object's
location and gets the object's SIMBAD main ID, type, and stellar
classification if available.
Returns
-------
dict
Returns a dict with neighbor, GAIA, and SIMBAD features.
'''
# kdtree search for neighbors in light curve catalog
if ('ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] is not None and objectinfo['decl'] is not None and
(isinstance(lclist_kdtree, cKDTree) or
isinstance(lclist_kdtree, KDTree))):
ra, decl = objectinfo['ra'], objectinfo['decl']
cosdecl = np.cos(np.radians(decl))
sindecl = np.sin(np.radians(decl))
cosra = np.cos(np.radians(ra))
sinra = np.sin(np.radians(ra))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(neighbor_radius_arcsec/3600.0)/2.0)
# look up the coordinates for the closest 100 objects in the kdtree
# within 2 x neighbor_radius_arcsec
kdt_dist, kdt_ind = lclist_kdtree.query(
[cosra*cosdecl,
sinra*cosdecl,
sindecl],
k=100,
distance_upper_bound=xyzdist
)
# the first match is the object itself
finite_distind = (np.isfinite(kdt_dist)) & (kdt_dist > 0)
finite_dists = kdt_dist[finite_distind]
nbrindices = kdt_ind[finite_distind]
n_neighbors = finite_dists.size
if n_neighbors > 0:
closest_dist = finite_dists.min()
closest_dist_arcsec = (
np.degrees(2.0*np.arcsin(closest_dist/2.0))*3600.0
)
closest_dist_nbrind = nbrindices[finite_dists == finite_dists.min()]
resultdict = {
'neighbors':n_neighbors,
'nbrindices':nbrindices,
'distarcsec':np.degrees(2.0*np.arcsin(finite_dists/2.0))*3600.0,
'closestdistarcsec':closest_dist_arcsec,
'closestdistnbrind':closest_dist_nbrind,
'searchradarcsec':neighbor_radius_arcsec,
}
else:
resultdict = {
'neighbors':0,
'nbrindices':np.array([]),
'distarcsec':np.array([]),
'closestdistarcsec':np.nan,
'closestdistnbrind':np.array([]),
'searchradarcsec':neighbor_radius_arcsec,
}
else:
if verbose:
LOGWARNING("one of ra, decl, kdtree is missing in "
"objectinfo dict or lclistpkl, "
"can't get observed neighbors")
resultdict = {
'neighbors':np.nan,
'nbrindices':np.array([]),
'distarcsec':np.array([]),
'closestdistarcsec':np.nan,
'closestdistnbrind':np.array([]),
'searchradarcsec':neighbor_radius_arcsec,
}
# next, search for this object in GAIA
if ('ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] is not None and objectinfo['decl'] is not None):
gaia_result = gaia.objectlist_conesearch(
objectinfo['ra'],
objectinfo['decl'],
neighbor_radius_arcsec,
verbose=verbose,
timeout=gaia_submit_timeout,
maxtimeout=gaia_max_timeout,
maxtries=gaia_submit_tries,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later
)
if gaia_result:
gaia_objlistf = gaia_result['result']
with gzip.open(gaia_objlistf,'rb') as infd:
try:
gaia_objlist = np.genfromtxt(
infd,
names=True,
delimiter=',',
dtype='U20,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12)
)
except Exception as e:
gaia_objlist = []
gaia_objlist = np.atleast_1d(gaia_objlist)
if gaia_objlist.size > 0:
# if we have GAIA results, we can get xypositions of all of
# these objects on the object skyview stamp
stampres = skyview.get_stamp(objectinfo['ra'],
objectinfo['decl'])
if (stampres and
'fitsfile' in stampres and
stampres['fitsfile'] is not None and
os.path.exists(stampres['fitsfile'])):
stampwcs = WCS(stampres['fitsfile'])
gaia_xypos = stampwcs.all_world2pix(
np.column_stack((gaia_objlist['ra'],
gaia_objlist['dec'])),
1
)
else:
gaia_xypos = None
# the first object is likely the match to the object itself
if gaia_objlist['dist_arcsec'][0] < gaia_matchdist_arcsec:
if gaia_objlist.size > 1:
gaia_nneighbors = gaia_objlist[1:].size
gaia_status = (
'ok: object found with %s neighbors' %
gaia_nneighbors
)
# the first in each array is the object
gaia_ids = gaia_objlist['source_id']
gaia_mags = gaia_objlist['phot_g_mean_mag']
gaia_parallaxes = gaia_objlist['parallax']
gaia_parallax_errs = gaia_objlist['parallax_error']
gaia_pmra = gaia_objlist['pmra']
gaia_pmra_err = gaia_objlist['pmra_error']
gaia_pmdecl = gaia_objlist['pmdec']
gaia_pmdecl_err = gaia_objlist['pmdec_error']
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ('kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag'])):
gaiak_colors = gaia_mags - objectinfo['kmag']
else:
gaiak_colors = None
gaia_dists = gaia_objlist['dist_arcsec']
gaia_closest_distarcsec = gaia_objlist['dist_arcsec'][1]
gaia_closest_gmagdiff = (
gaia_objlist['phot_g_mean_mag'][0] -
gaia_objlist['phot_g_mean_mag'][1]
)
else:
LOGWARNING('object found in GAIA at (%.3f,%.3f), '
'but no neighbors' % (objectinfo['ra'],
objectinfo['decl']))
gaia_nneighbors = 0
gaia_status = (
'ok: object found but no neighbors'
)
# the first in each array is the object
gaia_ids = gaia_objlist['source_id']
gaia_mags = gaia_objlist['phot_g_mean_mag']
gaia_parallaxes = gaia_objlist['parallax']
gaia_parallax_errs = gaia_objlist['parallax_error']
gaia_pmra = gaia_objlist['pmra']
gaia_pmra_err = gaia_objlist['pmra_error']
gaia_pmdecl = gaia_objlist['pmdec']
gaia_pmdecl_err = gaia_objlist['pmdec_error']
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ('kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag'])):
gaiak_colors = gaia_mags - objectinfo['kmag']
else:
gaiak_colors = None
gaia_dists = gaia_objlist['dist_arcsec']
gaia_closest_distarcsec = np.nan
gaia_closest_gmagdiff = np.nan
# otherwise, the object wasn't found in GAIA for some reason
else:
LOGWARNING('no GAIA objects found within '
'%.3f arcsec of object position (%.3f, %.3f), '
'closest object is at %.3f arcsec away' %
(gaia_matchdist_arcsec,
objectinfo['ra'], objectinfo['decl'],
gaia_objlist['dist_arcsec'][0]))
gaia_status = ('failed: no object within %.3f '
'arcsec, closest = %.3f arcsec' %
(gaia_matchdist_arcsec,
gaia_objlist['dist_arcsec'][0]))
gaia_nneighbors = np.nan
gaia_ids = gaia_objlist['source_id']
gaia_mags = gaia_objlist['phot_g_mean_mag']
gaia_parallaxes = gaia_objlist['parallax']
gaia_parallax_errs = gaia_objlist['parallax_error']
gaia_pmra = gaia_objlist['pmra']
gaia_pmra_err = gaia_objlist['pmra_error']
gaia_pmdecl = gaia_objlist['pmdec']
gaia_pmdecl_err = gaia_objlist['pmdec_error']
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ('kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag'])):
gaiak_colors = gaia_mags - objectinfo['kmag']
else:
gaiak_colors = None
gaia_dists = gaia_objlist['dist_arcsec']
gaia_closest_distarcsec = np.nan
gaia_closest_gmagdiff = np.nan
# if there are no neighbors within neighbor_radius_arcsec
# or this object is not covered by GAIA. return nothing
else:
LOGERROR('no GAIA objects at this '
'position or GAIA query failed')
gaia_status = (
'failed: no GAIA objects at this '
'position or GAIA query failed.'
)
gaia_nneighbors = np.nan
gaia_ids = None
gaia_mags = None
gaia_xypos = None
gaia_parallaxes = None
gaia_parallax_errs = None
gaia_pmra = None
gaia_pmra_err = None
gaia_pmdecl = None
gaia_pmdecl_err = None
gaia_absolute_mags = None
gaiak_colors = None
gaia_dists = None
gaia_closest_distarcsec = np.nan
gaia_closest_gmagdiff = np.nan
# update the resultdict with gaia stuff
resultdict.update(
{'gaia_status':gaia_status,
'gaia_neighbors':gaia_nneighbors,
'gaia_ids':gaia_ids,
'gaia_xypos':gaia_xypos,
'gaia_mags':gaia_mags,
'gaia_parallaxes':gaia_parallaxes,
'gaia_parallax_errs':gaia_parallax_errs,
'gaia_pmras':gaia_pmra,
'gaia_pmra_errs':gaia_pmra_err,
'gaia_pmdecls':gaia_pmdecl,
'gaia_pmdecl_errs':gaia_pmdecl_err,
'gaia_absolute_mags':gaia_absolute_mags,
'gaiak_colors':gaiak_colors,
'gaia_dists':gaia_dists,
'gaia_closest_distarcsec':gaia_closest_distarcsec,
'gaia_closest_gmagdiff':gaia_closest_gmagdiff}
)
else:
LOGERROR('GAIA query did not return a '
'result for object at (%.3f, %.3f)' % (objectinfo['ra'],
objectinfo['decl']))
resultdict.update(
{'gaia_status':'failed: GAIA TAP query failed',
'gaia_neighbors':np.nan,
'gaia_ids':None,
'gaia_xypos':None,
'gaia_mags':None,
'gaia_parallaxes':None,
'gaia_parallax_errs':None,
'gaia_pmras':None,
'gaia_pmra_errs':None,
'gaia_pmdecls':None,
'gaia_pmdecl_errs':None,
'gaia_absolute_mags':None,
'gaiak_colors':None,
'gaia_dists':None,
'gaia_closest_distarcsec':np.nan,
'gaia_closest_gmagdiff':np.nan}
)
else:
LOGERROR("one or more of the 'ra', 'decl' keys "
"are missing from the objectinfo dict, "
"can't get GAIA or LC collection neighbor features")
resultdict.update(
{'gaia_status':'failed: no ra/decl for object',
'gaia_neighbors':np.nan,
'gaia_ids':None,
'gaia_xypos':None,
'gaia_mags':None,
'gaia_parallaxes':None,
'gaia_parallax_errs':None,
'gaia_pmras':None,
'gaia_pmra_errs':None,
'gaia_pmdecls':None,
'gaia_pmdecl_errs':None,
'gaia_absolute_mags':None,
'gaiak_colors':None,
'gaia_dists':None,
'gaia_closest_distarcsec':np.nan,
'gaia_closest_gmagdiff':np.nan}
)
# finally, search for this object in SIMBAD
if ('ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] is not None and objectinfo['decl'] is not None and
search_simbad):
simbad_result = simbad.objectnames_conesearch(
objectinfo['ra'],
objectinfo['decl'],
neighbor_radius_arcsec,
verbose=verbose,
timeout=gaia_submit_timeout,
maxtimeout=gaia_max_timeout,
maxtries=gaia_submit_tries,
complete_query_later=complete_query_later
)
else:
simbad_result = None
if (simbad_result and
simbad_result['result'] and
os.path.exists(simbad_result['result'])):
with gzip.open(simbad_result['result'],'rb') as infd:
try:
simbad_objectnames = np.genfromtxt(
infd,
names=True,
delimiter=',',
dtype='U20,f8,f8,U20,U20,U20,i8,U600,f8',
usecols=(0,1,2,3,4,5,6,7,8),
comments='?',
)
except Exception as e:
simbad_objectnames = []
simbad_objectnames = np.atleast_1d(simbad_objectnames)
if simbad_objectnames.size > 0:
simbad_mainid = simbad_objectnames['main_id'].tolist()
simbad_allids = simbad_objectnames['all_ids'].tolist()
simbad_objtype = simbad_objectnames['otype_txt'].tolist()
simbad_distarcsec = simbad_objectnames['dist_arcsec'].tolist()
simbad_nmatches = len(simbad_mainid)
simbad_mainid = [x.replace('"','') for x in simbad_mainid]
simbad_allids = [x.replace('"','') for x in simbad_allids]
simbad_objtype = [x.replace('"','') for x in simbad_objtype]
resultdict.update({
'simbad_nmatches':simbad_nmatches,
'simbad_mainid':simbad_mainid,
'simbad_objtype':simbad_objtype,
'simbad_allids':simbad_allids,
'simbad_distarcsec':simbad_distarcsec
})
if simbad_nmatches > 1:
resultdict['simbad_status'] = (
'ok: multiple SIMBAD matches found'
)
else:
resultdict['simbad_status'] = 'ok: single SIMBAD match'
# get the closest match
if simbad_distarcsec[0] < gaia_matchdist_arcsec:
resultdict.update({
'simbad_best_mainid':simbad_mainid[0],
'simbad_best_objtype':simbad_objtype[0],
'simbad_best_allids':simbad_allids[0],
'simbad_best_distarcsec':simbad_distarcsec[0],
'simbad_status':'ok: object found within match radius'
})
else:
LOGWARNING('no SIMBAD objects found within '
'%.3f arcsec of object position (%.3f, %.3f), '
'closest object: %s at %.3f arcsec away' %
(gaia_matchdist_arcsec,
objectinfo['ra'],
objectinfo['decl'],
simbad_mainid[0],
simbad_distarcsec[0]))
simbad_status = ('failed: no object within %.3f '
'arcsec, closest = %.3f arcsec' %
(gaia_matchdist_arcsec,
simbad_distarcsec[0]))
resultdict.update({
'simbad_best_mainid':None,
'simbad_best_objtype':None,
'simbad_best_allids':None,
'simbad_best_distarcsec':None,
'simbad_status':simbad_status
})
else:
resultdict.update({
'simbad_status':'failed: no SIMBAD matches found',
'simbad_nmatches':None,
'simbad_mainid':None,
'simbad_objtype':None,
'simbad_allids':None,
'simbad_distarcsec':None,
'simbad_best_mainid':None,
'simbad_best_objtype':None,
'simbad_best_allids':None,
'simbad_best_distarcsec':None,
})
else:
if search_simbad:
simbad_status = 'failed: SIMBAD query failed'
else:
simbad_status = 'failed: SIMBAD query not tried'
resultdict.update({
'simbad_status':simbad_status,
'simbad_nmatches':None,
'simbad_mainid':None,
'simbad_objtype':None,
'simbad_allids':None,
'simbad_distarcsec':None,
'simbad_best_mainid':None,
'simbad_best_objtype':None,
'simbad_best_allids':None,
'simbad_best_distarcsec':None,
})
return resultdict
|
def neighbor_gaia_features(objectinfo,
lclist_kdtree,
neighbor_radius_arcsec,
gaia_matchdist_arcsec=3.0,
verbose=True,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
complete_query_later=True,
search_simbad=False):
'''Gets several neighbor, GAIA, and SIMBAD features:
From the KD-Tree in the given light curve catalog the object is in:
`lclist_kdtree`:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
From the GAIA DR2 catalog:
- distance to closest neighbor in arcsec
- total number of all neighbors within 2 x `neighbor_radius_arcsec`
- gets the parallax for the object and neighbors
- calculates the absolute GAIA mag and `G-K` color for use in CMDs
- gets the proper motion in RA/Dec if available
From the SIMBAD catalog:
- the name of the object
- the type of the object
Parameters
----------
objectinfo : dict
This is the objectinfo dict from an object's light curve. This must
contain at least the following keys::
{'ra': the right ascension of the object,
'decl': the declination of the object}
lclist_kdtree : scipy.spatial.cKDTree object
This is a KD-Tree built on the Cartesian xyz coordinates from (ra, dec)
of all objects in the same field as this object. It is similar to that
produced by :py:func:`astrobase.lcproc.catalogs.make_lclist`, and is
used to carry out the spatial search required to find neighbors for this
object.
neighbor_radius_arcsec : float
The maximum radius in arcseconds around this object to search for
neighbors in both the light curve catalog and in the GAIA DR2 catalog.
gaia_matchdist_arcsec : float
The maximum distance in arcseconds to use for a GAIA cross-match to this
object.
verbose : bool
If True, indicates progress and warns of problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
search_simbad : bool
If this is True, searches for objects in SIMBAD at this object's
location and gets the object's SIMBAD main ID, type, and stellar
classification if available.
Returns
-------
dict
Returns a dict with neighbor, GAIA, and SIMBAD features.
'''
# kdtree search for neighbors in light curve catalog
if ('ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] is not None and objectinfo['decl'] is not None and
(isinstance(lclist_kdtree, cKDTree) or
isinstance(lclist_kdtree, KDTree))):
ra, decl = objectinfo['ra'], objectinfo['decl']
cosdecl = np.cos(np.radians(decl))
sindecl = np.sin(np.radians(decl))
cosra = np.cos(np.radians(ra))
sinra = np.sin(np.radians(ra))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(neighbor_radius_arcsec/3600.0)/2.0)
# look up the coordinates for the closest 100 objects in the kdtree
# within 2 x neighbor_radius_arcsec
kdt_dist, kdt_ind = lclist_kdtree.query(
[cosra*cosdecl,
sinra*cosdecl,
sindecl],
k=100,
distance_upper_bound=xyzdist
)
# the first match is the object itself
finite_distind = (np.isfinite(kdt_dist)) & (kdt_dist > 0)
finite_dists = kdt_dist[finite_distind]
nbrindices = kdt_ind[finite_distind]
n_neighbors = finite_dists.size
if n_neighbors > 0:
closest_dist = finite_dists.min()
closest_dist_arcsec = (
np.degrees(2.0*np.arcsin(closest_dist/2.0))*3600.0
)
closest_dist_nbrind = nbrindices[finite_dists == finite_dists.min()]
resultdict = {
'neighbors':n_neighbors,
'nbrindices':nbrindices,
'distarcsec':np.degrees(2.0*np.arcsin(finite_dists/2.0))*3600.0,
'closestdistarcsec':closest_dist_arcsec,
'closestdistnbrind':closest_dist_nbrind,
'searchradarcsec':neighbor_radius_arcsec,
}
else:
resultdict = {
'neighbors':0,
'nbrindices':np.array([]),
'distarcsec':np.array([]),
'closestdistarcsec':np.nan,
'closestdistnbrind':np.array([]),
'searchradarcsec':neighbor_radius_arcsec,
}
else:
if verbose:
LOGWARNING("one of ra, decl, kdtree is missing in "
"objectinfo dict or lclistpkl, "
"can't get observed neighbors")
resultdict = {
'neighbors':np.nan,
'nbrindices':np.array([]),
'distarcsec':np.array([]),
'closestdistarcsec':np.nan,
'closestdistnbrind':np.array([]),
'searchradarcsec':neighbor_radius_arcsec,
}
# next, search for this object in GAIA
if ('ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] is not None and objectinfo['decl'] is not None):
gaia_result = gaia.objectlist_conesearch(
objectinfo['ra'],
objectinfo['decl'],
neighbor_radius_arcsec,
verbose=verbose,
timeout=gaia_submit_timeout,
maxtimeout=gaia_max_timeout,
maxtries=gaia_submit_tries,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later
)
if gaia_result:
gaia_objlistf = gaia_result['result']
with gzip.open(gaia_objlistf,'rb') as infd:
try:
gaia_objlist = np.genfromtxt(
infd,
names=True,
delimiter=',',
dtype='U20,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12)
)
except Exception as e:
gaia_objlist = []
gaia_objlist = np.atleast_1d(gaia_objlist)
if gaia_objlist.size > 0:
# if we have GAIA results, we can get xypositions of all of
# these objects on the object skyview stamp
stampres = skyview.get_stamp(objectinfo['ra'],
objectinfo['decl'])
if (stampres and
'fitsfile' in stampres and
stampres['fitsfile'] is not None and
os.path.exists(stampres['fitsfile'])):
stampwcs = WCS(stampres['fitsfile'])
gaia_xypos = stampwcs.all_world2pix(
np.column_stack((gaia_objlist['ra'],
gaia_objlist['dec'])),
1
)
else:
gaia_xypos = None
# the first object is likely the match to the object itself
if gaia_objlist['dist_arcsec'][0] < gaia_matchdist_arcsec:
if gaia_objlist.size > 1:
gaia_nneighbors = gaia_objlist[1:].size
gaia_status = (
'ok: object found with %s neighbors' %
gaia_nneighbors
)
# the first in each array is the object
gaia_ids = gaia_objlist['source_id']
gaia_mags = gaia_objlist['phot_g_mean_mag']
gaia_parallaxes = gaia_objlist['parallax']
gaia_parallax_errs = gaia_objlist['parallax_error']
gaia_pmra = gaia_objlist['pmra']
gaia_pmra_err = gaia_objlist['pmra_error']
gaia_pmdecl = gaia_objlist['pmdec']
gaia_pmdecl_err = gaia_objlist['pmdec_error']
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ('kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag'])):
gaiak_colors = gaia_mags - objectinfo['kmag']
else:
gaiak_colors = None
gaia_dists = gaia_objlist['dist_arcsec']
gaia_closest_distarcsec = gaia_objlist['dist_arcsec'][1]
gaia_closest_gmagdiff = (
gaia_objlist['phot_g_mean_mag'][0] -
gaia_objlist['phot_g_mean_mag'][1]
)
else:
LOGWARNING('object found in GAIA at (%.3f,%.3f), '
'but no neighbors' % (objectinfo['ra'],
objectinfo['decl']))
gaia_nneighbors = 0
gaia_status = (
'ok: object found but no neighbors'
)
# the first in each array is the object
gaia_ids = gaia_objlist['source_id']
gaia_mags = gaia_objlist['phot_g_mean_mag']
gaia_parallaxes = gaia_objlist['parallax']
gaia_parallax_errs = gaia_objlist['parallax_error']
gaia_pmra = gaia_objlist['pmra']
gaia_pmra_err = gaia_objlist['pmra_error']
gaia_pmdecl = gaia_objlist['pmdec']
gaia_pmdecl_err = gaia_objlist['pmdec_error']
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ('kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag'])):
gaiak_colors = gaia_mags - objectinfo['kmag']
else:
gaiak_colors = None
gaia_dists = gaia_objlist['dist_arcsec']
gaia_closest_distarcsec = np.nan
gaia_closest_gmagdiff = np.nan
# otherwise, the object wasn't found in GAIA for some reason
else:
LOGWARNING('no GAIA objects found within '
'%.3f arcsec of object position (%.3f, %.3f), '
'closest object is at %.3f arcsec away' %
(gaia_matchdist_arcsec,
objectinfo['ra'], objectinfo['decl'],
gaia_objlist['dist_arcsec'][0]))
gaia_status = ('failed: no object within %.3f '
'arcsec, closest = %.3f arcsec' %
(gaia_matchdist_arcsec,
gaia_objlist['dist_arcsec'][0]))
gaia_nneighbors = np.nan
gaia_ids = gaia_objlist['source_id']
gaia_mags = gaia_objlist['phot_g_mean_mag']
gaia_parallaxes = gaia_objlist['parallax']
gaia_parallax_errs = gaia_objlist['parallax_error']
gaia_pmra = gaia_objlist['pmra']
gaia_pmra_err = gaia_objlist['pmra_error']
gaia_pmdecl = gaia_objlist['pmdec']
gaia_pmdecl_err = gaia_objlist['pmdec_error']
gaia_absolute_mags = magnitudes.absolute_gaia_magnitude(
gaia_mags, gaia_parallaxes
)
if ('kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag'])):
gaiak_colors = gaia_mags - objectinfo['kmag']
else:
gaiak_colors = None
gaia_dists = gaia_objlist['dist_arcsec']
gaia_closest_distarcsec = np.nan
gaia_closest_gmagdiff = np.nan
# if there are no neighbors within neighbor_radius_arcsec
# or this object is not covered by GAIA. return nothing
else:
LOGERROR('no GAIA objects at this '
'position or GAIA query failed')
gaia_status = (
'failed: no GAIA objects at this '
'position or GAIA query failed.'
)
gaia_nneighbors = np.nan
gaia_ids = None
gaia_mags = None
gaia_xypos = None
gaia_parallaxes = None
gaia_parallax_errs = None
gaia_pmra = None
gaia_pmra_err = None
gaia_pmdecl = None
gaia_pmdecl_err = None
gaia_absolute_mags = None
gaiak_colors = None
gaia_dists = None
gaia_closest_distarcsec = np.nan
gaia_closest_gmagdiff = np.nan
# update the resultdict with gaia stuff
resultdict.update(
{'gaia_status':gaia_status,
'gaia_neighbors':gaia_nneighbors,
'gaia_ids':gaia_ids,
'gaia_xypos':gaia_xypos,
'gaia_mags':gaia_mags,
'gaia_parallaxes':gaia_parallaxes,
'gaia_parallax_errs':gaia_parallax_errs,
'gaia_pmras':gaia_pmra,
'gaia_pmra_errs':gaia_pmra_err,
'gaia_pmdecls':gaia_pmdecl,
'gaia_pmdecl_errs':gaia_pmdecl_err,
'gaia_absolute_mags':gaia_absolute_mags,
'gaiak_colors':gaiak_colors,
'gaia_dists':gaia_dists,
'gaia_closest_distarcsec':gaia_closest_distarcsec,
'gaia_closest_gmagdiff':gaia_closest_gmagdiff}
)
else:
LOGERROR('GAIA query did not return a '
'result for object at (%.3f, %.3f)' % (objectinfo['ra'],
objectinfo['decl']))
resultdict.update(
{'gaia_status':'failed: GAIA TAP query failed',
'gaia_neighbors':np.nan,
'gaia_ids':None,
'gaia_xypos':None,
'gaia_mags':None,
'gaia_parallaxes':None,
'gaia_parallax_errs':None,
'gaia_pmras':None,
'gaia_pmra_errs':None,
'gaia_pmdecls':None,
'gaia_pmdecl_errs':None,
'gaia_absolute_mags':None,
'gaiak_colors':None,
'gaia_dists':None,
'gaia_closest_distarcsec':np.nan,
'gaia_closest_gmagdiff':np.nan}
)
else:
LOGERROR("one or more of the 'ra', 'decl' keys "
"are missing from the objectinfo dict, "
"can't get GAIA or LC collection neighbor features")
resultdict.update(
{'gaia_status':'failed: no ra/decl for object',
'gaia_neighbors':np.nan,
'gaia_ids':None,
'gaia_xypos':None,
'gaia_mags':None,
'gaia_parallaxes':None,
'gaia_parallax_errs':None,
'gaia_pmras':None,
'gaia_pmra_errs':None,
'gaia_pmdecls':None,
'gaia_pmdecl_errs':None,
'gaia_absolute_mags':None,
'gaiak_colors':None,
'gaia_dists':None,
'gaia_closest_distarcsec':np.nan,
'gaia_closest_gmagdiff':np.nan}
)
# finally, search for this object in SIMBAD
if ('ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] is not None and objectinfo['decl'] is not None and
search_simbad):
simbad_result = simbad.objectnames_conesearch(
objectinfo['ra'],
objectinfo['decl'],
neighbor_radius_arcsec,
verbose=verbose,
timeout=gaia_submit_timeout,
maxtimeout=gaia_max_timeout,
maxtries=gaia_submit_tries,
complete_query_later=complete_query_later
)
else:
simbad_result = None
if (simbad_result and
simbad_result['result'] and
os.path.exists(simbad_result['result'])):
with gzip.open(simbad_result['result'],'rb') as infd:
try:
simbad_objectnames = np.genfromtxt(
infd,
names=True,
delimiter=',',
dtype='U20,f8,f8,U20,U20,U20,i8,U600,f8',
usecols=(0,1,2,3,4,5,6,7,8),
comments='?',
)
except Exception as e:
simbad_objectnames = []
simbad_objectnames = np.atleast_1d(simbad_objectnames)
if simbad_objectnames.size > 0:
simbad_mainid = simbad_objectnames['main_id'].tolist()
simbad_allids = simbad_objectnames['all_ids'].tolist()
simbad_objtype = simbad_objectnames['otype_txt'].tolist()
simbad_distarcsec = simbad_objectnames['dist_arcsec'].tolist()
simbad_nmatches = len(simbad_mainid)
simbad_mainid = [x.replace('"','') for x in simbad_mainid]
simbad_allids = [x.replace('"','') for x in simbad_allids]
simbad_objtype = [x.replace('"','') for x in simbad_objtype]
resultdict.update({
'simbad_nmatches':simbad_nmatches,
'simbad_mainid':simbad_mainid,
'simbad_objtype':simbad_objtype,
'simbad_allids':simbad_allids,
'simbad_distarcsec':simbad_distarcsec
})
if simbad_nmatches > 1:
resultdict['simbad_status'] = (
'ok: multiple SIMBAD matches found'
)
else:
resultdict['simbad_status'] = 'ok: single SIMBAD match'
# get the closest match
if simbad_distarcsec[0] < gaia_matchdist_arcsec:
resultdict.update({
'simbad_best_mainid':simbad_mainid[0],
'simbad_best_objtype':simbad_objtype[0],
'simbad_best_allids':simbad_allids[0],
'simbad_best_distarcsec':simbad_distarcsec[0],
'simbad_status':'ok: object found within match radius'
})
else:
LOGWARNING('no SIMBAD objects found within '
'%.3f arcsec of object position (%.3f, %.3f), '
'closest object: %s at %.3f arcsec away' %
(gaia_matchdist_arcsec,
objectinfo['ra'],
objectinfo['decl'],
simbad_mainid[0],
simbad_distarcsec[0]))
simbad_status = ('failed: no object within %.3f '
'arcsec, closest = %.3f arcsec' %
(gaia_matchdist_arcsec,
simbad_distarcsec[0]))
resultdict.update({
'simbad_best_mainid':None,
'simbad_best_objtype':None,
'simbad_best_allids':None,
'simbad_best_distarcsec':None,
'simbad_status':simbad_status
})
else:
resultdict.update({
'simbad_status':'failed: no SIMBAD matches found',
'simbad_nmatches':None,
'simbad_mainid':None,
'simbad_objtype':None,
'simbad_allids':None,
'simbad_distarcsec':None,
'simbad_best_mainid':None,
'simbad_best_objtype':None,
'simbad_best_allids':None,
'simbad_best_distarcsec':None,
})
else:
if search_simbad:
simbad_status = 'failed: SIMBAD query failed'
else:
simbad_status = 'failed: SIMBAD query not tried'
resultdict.update({
'simbad_status':simbad_status,
'simbad_nmatches':None,
'simbad_mainid':None,
'simbad_objtype':None,
'simbad_allids':None,
'simbad_distarcsec':None,
'simbad_best_mainid':None,
'simbad_best_objtype':None,
'simbad_best_allids':None,
'simbad_best_distarcsec':None,
})
return resultdict
|
[
"Gets",
"several",
"neighbor",
"GAIA",
"and",
"SIMBAD",
"features",
":"
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varclass/starfeatures.py#L1114-L1709
|
[
"def",
"neighbor_gaia_features",
"(",
"objectinfo",
",",
"lclist_kdtree",
",",
"neighbor_radius_arcsec",
",",
"gaia_matchdist_arcsec",
"=",
"3.0",
",",
"verbose",
"=",
"True",
",",
"gaia_submit_timeout",
"=",
"10.0",
",",
"gaia_submit_tries",
"=",
"3",
",",
"gaia_max_timeout",
"=",
"180.0",
",",
"gaia_mirror",
"=",
"None",
",",
"complete_query_later",
"=",
"True",
",",
"search_simbad",
"=",
"False",
")",
":",
"# kdtree search for neighbors in light curve catalog",
"if",
"(",
"'ra'",
"in",
"objectinfo",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'ra'",
"]",
"is",
"not",
"None",
"and",
"objectinfo",
"[",
"'decl'",
"]",
"is",
"not",
"None",
"and",
"(",
"isinstance",
"(",
"lclist_kdtree",
",",
"cKDTree",
")",
"or",
"isinstance",
"(",
"lclist_kdtree",
",",
"KDTree",
")",
")",
")",
":",
"ra",
",",
"decl",
"=",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
"cosdecl",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"decl",
")",
")",
"sindecl",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"decl",
")",
")",
"cosra",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"ra",
")",
")",
"sinra",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"ra",
")",
")",
"# this is the search distance in xyz unit vectors",
"xyzdist",
"=",
"2.0",
"*",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"neighbor_radius_arcsec",
"/",
"3600.0",
")",
"/",
"2.0",
")",
"# look up the coordinates for the closest 100 objects in the kdtree",
"# within 2 x neighbor_radius_arcsec",
"kdt_dist",
",",
"kdt_ind",
"=",
"lclist_kdtree",
".",
"query",
"(",
"[",
"cosra",
"*",
"cosdecl",
",",
"sinra",
"*",
"cosdecl",
",",
"sindecl",
"]",
",",
"k",
"=",
"100",
",",
"distance_upper_bound",
"=",
"xyzdist",
")",
"# the first match is the object itself",
"finite_distind",
"=",
"(",
"np",
".",
"isfinite",
"(",
"kdt_dist",
")",
")",
"&",
"(",
"kdt_dist",
">",
"0",
")",
"finite_dists",
"=",
"kdt_dist",
"[",
"finite_distind",
"]",
"nbrindices",
"=",
"kdt_ind",
"[",
"finite_distind",
"]",
"n_neighbors",
"=",
"finite_dists",
".",
"size",
"if",
"n_neighbors",
">",
"0",
":",
"closest_dist",
"=",
"finite_dists",
".",
"min",
"(",
")",
"closest_dist_arcsec",
"=",
"(",
"np",
".",
"degrees",
"(",
"2.0",
"*",
"np",
".",
"arcsin",
"(",
"closest_dist",
"/",
"2.0",
")",
")",
"*",
"3600.0",
")",
"closest_dist_nbrind",
"=",
"nbrindices",
"[",
"finite_dists",
"==",
"finite_dists",
".",
"min",
"(",
")",
"]",
"resultdict",
"=",
"{",
"'neighbors'",
":",
"n_neighbors",
",",
"'nbrindices'",
":",
"nbrindices",
",",
"'distarcsec'",
":",
"np",
".",
"degrees",
"(",
"2.0",
"*",
"np",
".",
"arcsin",
"(",
"finite_dists",
"/",
"2.0",
")",
")",
"*",
"3600.0",
",",
"'closestdistarcsec'",
":",
"closest_dist_arcsec",
",",
"'closestdistnbrind'",
":",
"closest_dist_nbrind",
",",
"'searchradarcsec'",
":",
"neighbor_radius_arcsec",
",",
"}",
"else",
":",
"resultdict",
"=",
"{",
"'neighbors'",
":",
"0",
",",
"'nbrindices'",
":",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"'distarcsec'",
":",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"'closestdistarcsec'",
":",
"np",
".",
"nan",
",",
"'closestdistnbrind'",
":",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"'searchradarcsec'",
":",
"neighbor_radius_arcsec",
",",
"}",
"else",
":",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"\"one of ra, decl, kdtree is missing in \"",
"\"objectinfo dict or lclistpkl, \"",
"\"can't get observed neighbors\"",
")",
"resultdict",
"=",
"{",
"'neighbors'",
":",
"np",
".",
"nan",
",",
"'nbrindices'",
":",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"'distarcsec'",
":",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"'closestdistarcsec'",
":",
"np",
".",
"nan",
",",
"'closestdistnbrind'",
":",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"'searchradarcsec'",
":",
"neighbor_radius_arcsec",
",",
"}",
"# next, search for this object in GAIA",
"if",
"(",
"'ra'",
"in",
"objectinfo",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'ra'",
"]",
"is",
"not",
"None",
"and",
"objectinfo",
"[",
"'decl'",
"]",
"is",
"not",
"None",
")",
":",
"gaia_result",
"=",
"gaia",
".",
"objectlist_conesearch",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"neighbor_radius_arcsec",
",",
"verbose",
"=",
"verbose",
",",
"timeout",
"=",
"gaia_submit_timeout",
",",
"maxtimeout",
"=",
"gaia_max_timeout",
",",
"maxtries",
"=",
"gaia_submit_tries",
",",
"gaia_mirror",
"=",
"gaia_mirror",
",",
"complete_query_later",
"=",
"complete_query_later",
")",
"if",
"gaia_result",
":",
"gaia_objlistf",
"=",
"gaia_result",
"[",
"'result'",
"]",
"with",
"gzip",
".",
"open",
"(",
"gaia_objlistf",
",",
"'rb'",
")",
"as",
"infd",
":",
"try",
":",
"gaia_objlist",
"=",
"np",
".",
"genfromtxt",
"(",
"infd",
",",
"names",
"=",
"True",
",",
"delimiter",
"=",
"','",
",",
"dtype",
"=",
"'U20,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8'",
",",
"usecols",
"=",
"(",
"0",
",",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
",",
"6",
",",
"7",
",",
"8",
",",
"9",
",",
"10",
",",
"11",
",",
"12",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"gaia_objlist",
"=",
"[",
"]",
"gaia_objlist",
"=",
"np",
".",
"atleast_1d",
"(",
"gaia_objlist",
")",
"if",
"gaia_objlist",
".",
"size",
">",
"0",
":",
"# if we have GAIA results, we can get xypositions of all of",
"# these objects on the object skyview stamp",
"stampres",
"=",
"skyview",
".",
"get_stamp",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
"if",
"(",
"stampres",
"and",
"'fitsfile'",
"in",
"stampres",
"and",
"stampres",
"[",
"'fitsfile'",
"]",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"stampres",
"[",
"'fitsfile'",
"]",
")",
")",
":",
"stampwcs",
"=",
"WCS",
"(",
"stampres",
"[",
"'fitsfile'",
"]",
")",
"gaia_xypos",
"=",
"stampwcs",
".",
"all_world2pix",
"(",
"np",
".",
"column_stack",
"(",
"(",
"gaia_objlist",
"[",
"'ra'",
"]",
",",
"gaia_objlist",
"[",
"'dec'",
"]",
")",
")",
",",
"1",
")",
"else",
":",
"gaia_xypos",
"=",
"None",
"# the first object is likely the match to the object itself",
"if",
"gaia_objlist",
"[",
"'dist_arcsec'",
"]",
"[",
"0",
"]",
"<",
"gaia_matchdist_arcsec",
":",
"if",
"gaia_objlist",
".",
"size",
">",
"1",
":",
"gaia_nneighbors",
"=",
"gaia_objlist",
"[",
"1",
":",
"]",
".",
"size",
"gaia_status",
"=",
"(",
"'ok: object found with %s neighbors'",
"%",
"gaia_nneighbors",
")",
"# the first in each array is the object",
"gaia_ids",
"=",
"gaia_objlist",
"[",
"'source_id'",
"]",
"gaia_mags",
"=",
"gaia_objlist",
"[",
"'phot_g_mean_mag'",
"]",
"gaia_parallaxes",
"=",
"gaia_objlist",
"[",
"'parallax'",
"]",
"gaia_parallax_errs",
"=",
"gaia_objlist",
"[",
"'parallax_error'",
"]",
"gaia_pmra",
"=",
"gaia_objlist",
"[",
"'pmra'",
"]",
"gaia_pmra_err",
"=",
"gaia_objlist",
"[",
"'pmra_error'",
"]",
"gaia_pmdecl",
"=",
"gaia_objlist",
"[",
"'pmdec'",
"]",
"gaia_pmdecl_err",
"=",
"gaia_objlist",
"[",
"'pmdec_error'",
"]",
"gaia_absolute_mags",
"=",
"magnitudes",
".",
"absolute_gaia_magnitude",
"(",
"gaia_mags",
",",
"gaia_parallaxes",
")",
"if",
"(",
"'kmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'kmag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'kmag'",
"]",
")",
")",
":",
"gaiak_colors",
"=",
"gaia_mags",
"-",
"objectinfo",
"[",
"'kmag'",
"]",
"else",
":",
"gaiak_colors",
"=",
"None",
"gaia_dists",
"=",
"gaia_objlist",
"[",
"'dist_arcsec'",
"]",
"gaia_closest_distarcsec",
"=",
"gaia_objlist",
"[",
"'dist_arcsec'",
"]",
"[",
"1",
"]",
"gaia_closest_gmagdiff",
"=",
"(",
"gaia_objlist",
"[",
"'phot_g_mean_mag'",
"]",
"[",
"0",
"]",
"-",
"gaia_objlist",
"[",
"'phot_g_mean_mag'",
"]",
"[",
"1",
"]",
")",
"else",
":",
"LOGWARNING",
"(",
"'object found in GAIA at (%.3f,%.3f), '",
"'but no neighbors'",
"%",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"gaia_nneighbors",
"=",
"0",
"gaia_status",
"=",
"(",
"'ok: object found but no neighbors'",
")",
"# the first in each array is the object",
"gaia_ids",
"=",
"gaia_objlist",
"[",
"'source_id'",
"]",
"gaia_mags",
"=",
"gaia_objlist",
"[",
"'phot_g_mean_mag'",
"]",
"gaia_parallaxes",
"=",
"gaia_objlist",
"[",
"'parallax'",
"]",
"gaia_parallax_errs",
"=",
"gaia_objlist",
"[",
"'parallax_error'",
"]",
"gaia_pmra",
"=",
"gaia_objlist",
"[",
"'pmra'",
"]",
"gaia_pmra_err",
"=",
"gaia_objlist",
"[",
"'pmra_error'",
"]",
"gaia_pmdecl",
"=",
"gaia_objlist",
"[",
"'pmdec'",
"]",
"gaia_pmdecl_err",
"=",
"gaia_objlist",
"[",
"'pmdec_error'",
"]",
"gaia_absolute_mags",
"=",
"magnitudes",
".",
"absolute_gaia_magnitude",
"(",
"gaia_mags",
",",
"gaia_parallaxes",
")",
"if",
"(",
"'kmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'kmag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'kmag'",
"]",
")",
")",
":",
"gaiak_colors",
"=",
"gaia_mags",
"-",
"objectinfo",
"[",
"'kmag'",
"]",
"else",
":",
"gaiak_colors",
"=",
"None",
"gaia_dists",
"=",
"gaia_objlist",
"[",
"'dist_arcsec'",
"]",
"gaia_closest_distarcsec",
"=",
"np",
".",
"nan",
"gaia_closest_gmagdiff",
"=",
"np",
".",
"nan",
"# otherwise, the object wasn't found in GAIA for some reason",
"else",
":",
"LOGWARNING",
"(",
"'no GAIA objects found within '",
"'%.3f arcsec of object position (%.3f, %.3f), '",
"'closest object is at %.3f arcsec away'",
"%",
"(",
"gaia_matchdist_arcsec",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"gaia_objlist",
"[",
"'dist_arcsec'",
"]",
"[",
"0",
"]",
")",
")",
"gaia_status",
"=",
"(",
"'failed: no object within %.3f '",
"'arcsec, closest = %.3f arcsec'",
"%",
"(",
"gaia_matchdist_arcsec",
",",
"gaia_objlist",
"[",
"'dist_arcsec'",
"]",
"[",
"0",
"]",
")",
")",
"gaia_nneighbors",
"=",
"np",
".",
"nan",
"gaia_ids",
"=",
"gaia_objlist",
"[",
"'source_id'",
"]",
"gaia_mags",
"=",
"gaia_objlist",
"[",
"'phot_g_mean_mag'",
"]",
"gaia_parallaxes",
"=",
"gaia_objlist",
"[",
"'parallax'",
"]",
"gaia_parallax_errs",
"=",
"gaia_objlist",
"[",
"'parallax_error'",
"]",
"gaia_pmra",
"=",
"gaia_objlist",
"[",
"'pmra'",
"]",
"gaia_pmra_err",
"=",
"gaia_objlist",
"[",
"'pmra_error'",
"]",
"gaia_pmdecl",
"=",
"gaia_objlist",
"[",
"'pmdec'",
"]",
"gaia_pmdecl_err",
"=",
"gaia_objlist",
"[",
"'pmdec_error'",
"]",
"gaia_absolute_mags",
"=",
"magnitudes",
".",
"absolute_gaia_magnitude",
"(",
"gaia_mags",
",",
"gaia_parallaxes",
")",
"if",
"(",
"'kmag'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'kmag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'kmag'",
"]",
")",
")",
":",
"gaiak_colors",
"=",
"gaia_mags",
"-",
"objectinfo",
"[",
"'kmag'",
"]",
"else",
":",
"gaiak_colors",
"=",
"None",
"gaia_dists",
"=",
"gaia_objlist",
"[",
"'dist_arcsec'",
"]",
"gaia_closest_distarcsec",
"=",
"np",
".",
"nan",
"gaia_closest_gmagdiff",
"=",
"np",
".",
"nan",
"# if there are no neighbors within neighbor_radius_arcsec",
"# or this object is not covered by GAIA. return nothing",
"else",
":",
"LOGERROR",
"(",
"'no GAIA objects at this '",
"'position or GAIA query failed'",
")",
"gaia_status",
"=",
"(",
"'failed: no GAIA objects at this '",
"'position or GAIA query failed.'",
")",
"gaia_nneighbors",
"=",
"np",
".",
"nan",
"gaia_ids",
"=",
"None",
"gaia_mags",
"=",
"None",
"gaia_xypos",
"=",
"None",
"gaia_parallaxes",
"=",
"None",
"gaia_parallax_errs",
"=",
"None",
"gaia_pmra",
"=",
"None",
"gaia_pmra_err",
"=",
"None",
"gaia_pmdecl",
"=",
"None",
"gaia_pmdecl_err",
"=",
"None",
"gaia_absolute_mags",
"=",
"None",
"gaiak_colors",
"=",
"None",
"gaia_dists",
"=",
"None",
"gaia_closest_distarcsec",
"=",
"np",
".",
"nan",
"gaia_closest_gmagdiff",
"=",
"np",
".",
"nan",
"# update the resultdict with gaia stuff",
"resultdict",
".",
"update",
"(",
"{",
"'gaia_status'",
":",
"gaia_status",
",",
"'gaia_neighbors'",
":",
"gaia_nneighbors",
",",
"'gaia_ids'",
":",
"gaia_ids",
",",
"'gaia_xypos'",
":",
"gaia_xypos",
",",
"'gaia_mags'",
":",
"gaia_mags",
",",
"'gaia_parallaxes'",
":",
"gaia_parallaxes",
",",
"'gaia_parallax_errs'",
":",
"gaia_parallax_errs",
",",
"'gaia_pmras'",
":",
"gaia_pmra",
",",
"'gaia_pmra_errs'",
":",
"gaia_pmra_err",
",",
"'gaia_pmdecls'",
":",
"gaia_pmdecl",
",",
"'gaia_pmdecl_errs'",
":",
"gaia_pmdecl_err",
",",
"'gaia_absolute_mags'",
":",
"gaia_absolute_mags",
",",
"'gaiak_colors'",
":",
"gaiak_colors",
",",
"'gaia_dists'",
":",
"gaia_dists",
",",
"'gaia_closest_distarcsec'",
":",
"gaia_closest_distarcsec",
",",
"'gaia_closest_gmagdiff'",
":",
"gaia_closest_gmagdiff",
"}",
")",
"else",
":",
"LOGERROR",
"(",
"'GAIA query did not return a '",
"'result for object at (%.3f, %.3f)'",
"%",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"resultdict",
".",
"update",
"(",
"{",
"'gaia_status'",
":",
"'failed: GAIA TAP query failed'",
",",
"'gaia_neighbors'",
":",
"np",
".",
"nan",
",",
"'gaia_ids'",
":",
"None",
",",
"'gaia_xypos'",
":",
"None",
",",
"'gaia_mags'",
":",
"None",
",",
"'gaia_parallaxes'",
":",
"None",
",",
"'gaia_parallax_errs'",
":",
"None",
",",
"'gaia_pmras'",
":",
"None",
",",
"'gaia_pmra_errs'",
":",
"None",
",",
"'gaia_pmdecls'",
":",
"None",
",",
"'gaia_pmdecl_errs'",
":",
"None",
",",
"'gaia_absolute_mags'",
":",
"None",
",",
"'gaiak_colors'",
":",
"None",
",",
"'gaia_dists'",
":",
"None",
",",
"'gaia_closest_distarcsec'",
":",
"np",
".",
"nan",
",",
"'gaia_closest_gmagdiff'",
":",
"np",
".",
"nan",
"}",
")",
"else",
":",
"LOGERROR",
"(",
"\"one or more of the 'ra', 'decl' keys \"",
"\"are missing from the objectinfo dict, \"",
"\"can't get GAIA or LC collection neighbor features\"",
")",
"resultdict",
".",
"update",
"(",
"{",
"'gaia_status'",
":",
"'failed: no ra/decl for object'",
",",
"'gaia_neighbors'",
":",
"np",
".",
"nan",
",",
"'gaia_ids'",
":",
"None",
",",
"'gaia_xypos'",
":",
"None",
",",
"'gaia_mags'",
":",
"None",
",",
"'gaia_parallaxes'",
":",
"None",
",",
"'gaia_parallax_errs'",
":",
"None",
",",
"'gaia_pmras'",
":",
"None",
",",
"'gaia_pmra_errs'",
":",
"None",
",",
"'gaia_pmdecls'",
":",
"None",
",",
"'gaia_pmdecl_errs'",
":",
"None",
",",
"'gaia_absolute_mags'",
":",
"None",
",",
"'gaiak_colors'",
":",
"None",
",",
"'gaia_dists'",
":",
"None",
",",
"'gaia_closest_distarcsec'",
":",
"np",
".",
"nan",
",",
"'gaia_closest_gmagdiff'",
":",
"np",
".",
"nan",
"}",
")",
"# finally, search for this object in SIMBAD",
"if",
"(",
"'ra'",
"in",
"objectinfo",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'ra'",
"]",
"is",
"not",
"None",
"and",
"objectinfo",
"[",
"'decl'",
"]",
"is",
"not",
"None",
"and",
"search_simbad",
")",
":",
"simbad_result",
"=",
"simbad",
".",
"objectnames_conesearch",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"neighbor_radius_arcsec",
",",
"verbose",
"=",
"verbose",
",",
"timeout",
"=",
"gaia_submit_timeout",
",",
"maxtimeout",
"=",
"gaia_max_timeout",
",",
"maxtries",
"=",
"gaia_submit_tries",
",",
"complete_query_later",
"=",
"complete_query_later",
")",
"else",
":",
"simbad_result",
"=",
"None",
"if",
"(",
"simbad_result",
"and",
"simbad_result",
"[",
"'result'",
"]",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"simbad_result",
"[",
"'result'",
"]",
")",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"simbad_result",
"[",
"'result'",
"]",
",",
"'rb'",
")",
"as",
"infd",
":",
"try",
":",
"simbad_objectnames",
"=",
"np",
".",
"genfromtxt",
"(",
"infd",
",",
"names",
"=",
"True",
",",
"delimiter",
"=",
"','",
",",
"dtype",
"=",
"'U20,f8,f8,U20,U20,U20,i8,U600,f8'",
",",
"usecols",
"=",
"(",
"0",
",",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
",",
"6",
",",
"7",
",",
"8",
")",
",",
"comments",
"=",
"'?'",
",",
")",
"except",
"Exception",
"as",
"e",
":",
"simbad_objectnames",
"=",
"[",
"]",
"simbad_objectnames",
"=",
"np",
".",
"atleast_1d",
"(",
"simbad_objectnames",
")",
"if",
"simbad_objectnames",
".",
"size",
">",
"0",
":",
"simbad_mainid",
"=",
"simbad_objectnames",
"[",
"'main_id'",
"]",
".",
"tolist",
"(",
")",
"simbad_allids",
"=",
"simbad_objectnames",
"[",
"'all_ids'",
"]",
".",
"tolist",
"(",
")",
"simbad_objtype",
"=",
"simbad_objectnames",
"[",
"'otype_txt'",
"]",
".",
"tolist",
"(",
")",
"simbad_distarcsec",
"=",
"simbad_objectnames",
"[",
"'dist_arcsec'",
"]",
".",
"tolist",
"(",
")",
"simbad_nmatches",
"=",
"len",
"(",
"simbad_mainid",
")",
"simbad_mainid",
"=",
"[",
"x",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"for",
"x",
"in",
"simbad_mainid",
"]",
"simbad_allids",
"=",
"[",
"x",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"for",
"x",
"in",
"simbad_allids",
"]",
"simbad_objtype",
"=",
"[",
"x",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"for",
"x",
"in",
"simbad_objtype",
"]",
"resultdict",
".",
"update",
"(",
"{",
"'simbad_nmatches'",
":",
"simbad_nmatches",
",",
"'simbad_mainid'",
":",
"simbad_mainid",
",",
"'simbad_objtype'",
":",
"simbad_objtype",
",",
"'simbad_allids'",
":",
"simbad_allids",
",",
"'simbad_distarcsec'",
":",
"simbad_distarcsec",
"}",
")",
"if",
"simbad_nmatches",
">",
"1",
":",
"resultdict",
"[",
"'simbad_status'",
"]",
"=",
"(",
"'ok: multiple SIMBAD matches found'",
")",
"else",
":",
"resultdict",
"[",
"'simbad_status'",
"]",
"=",
"'ok: single SIMBAD match'",
"# get the closest match",
"if",
"simbad_distarcsec",
"[",
"0",
"]",
"<",
"gaia_matchdist_arcsec",
":",
"resultdict",
".",
"update",
"(",
"{",
"'simbad_best_mainid'",
":",
"simbad_mainid",
"[",
"0",
"]",
",",
"'simbad_best_objtype'",
":",
"simbad_objtype",
"[",
"0",
"]",
",",
"'simbad_best_allids'",
":",
"simbad_allids",
"[",
"0",
"]",
",",
"'simbad_best_distarcsec'",
":",
"simbad_distarcsec",
"[",
"0",
"]",
",",
"'simbad_status'",
":",
"'ok: object found within match radius'",
"}",
")",
"else",
":",
"LOGWARNING",
"(",
"'no SIMBAD objects found within '",
"'%.3f arcsec of object position (%.3f, %.3f), '",
"'closest object: %s at %.3f arcsec away'",
"%",
"(",
"gaia_matchdist_arcsec",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"simbad_mainid",
"[",
"0",
"]",
",",
"simbad_distarcsec",
"[",
"0",
"]",
")",
")",
"simbad_status",
"=",
"(",
"'failed: no object within %.3f '",
"'arcsec, closest = %.3f arcsec'",
"%",
"(",
"gaia_matchdist_arcsec",
",",
"simbad_distarcsec",
"[",
"0",
"]",
")",
")",
"resultdict",
".",
"update",
"(",
"{",
"'simbad_best_mainid'",
":",
"None",
",",
"'simbad_best_objtype'",
":",
"None",
",",
"'simbad_best_allids'",
":",
"None",
",",
"'simbad_best_distarcsec'",
":",
"None",
",",
"'simbad_status'",
":",
"simbad_status",
"}",
")",
"else",
":",
"resultdict",
".",
"update",
"(",
"{",
"'simbad_status'",
":",
"'failed: no SIMBAD matches found'",
",",
"'simbad_nmatches'",
":",
"None",
",",
"'simbad_mainid'",
":",
"None",
",",
"'simbad_objtype'",
":",
"None",
",",
"'simbad_allids'",
":",
"None",
",",
"'simbad_distarcsec'",
":",
"None",
",",
"'simbad_best_mainid'",
":",
"None",
",",
"'simbad_best_objtype'",
":",
"None",
",",
"'simbad_best_allids'",
":",
"None",
",",
"'simbad_best_distarcsec'",
":",
"None",
",",
"}",
")",
"else",
":",
"if",
"search_simbad",
":",
"simbad_status",
"=",
"'failed: SIMBAD query failed'",
"else",
":",
"simbad_status",
"=",
"'failed: SIMBAD query not tried'",
"resultdict",
".",
"update",
"(",
"{",
"'simbad_status'",
":",
"simbad_status",
",",
"'simbad_nmatches'",
":",
"None",
",",
"'simbad_mainid'",
":",
"None",
",",
"'simbad_objtype'",
":",
"None",
",",
"'simbad_allids'",
":",
"None",
",",
"'simbad_distarcsec'",
":",
"None",
",",
"'simbad_best_mainid'",
":",
"None",
",",
"'simbad_best_objtype'",
":",
"None",
",",
"'simbad_best_allids'",
":",
"None",
",",
"'simbad_best_distarcsec'",
":",
"None",
",",
"}",
")",
"return",
"resultdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
apply_epd_magseries
|
This applies external parameter decorrelation (EPD) to a light curve.
Parameters
----------
lcfile : str
The filename of the light curve file to process.
timecol,magcol,errcol : str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as input to the EPD process.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
Returns
-------
str
Writes the output EPD light curve to a pickle that contains the lcdict
with an added `lcdict['epd']` key, which contains the EPD times,
mags/fluxes, and errs as `lcdict['epd']['times']`,
`lcdict['epd']['mags']`, and `lcdict['epd']['errs']`. Returns the
filename of this generated EPD LC pickle file.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
|
astrobase/lcproc/epd.py
|
def apply_epd_magseries(lcfile,
timecol,
magcol,
errcol,
externalparams,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None):
'''This applies external parameter decorrelation (EPD) to a light curve.
Parameters
----------
lcfile : str
The filename of the light curve file to process.
timecol,magcol,errcol : str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as input to the EPD process.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
Returns
-------
str
Writes the output EPD light curve to a pickle that contains the lcdict
with an added `lcdict['epd']` key, which contains the EPD times,
mags/fluxes, and errs as `lcdict['epd']['times']`,
`lcdict['epd']['mags']`, and `lcdict['epd']['errs']`. Returns the
filename of this generated EPD LC pickle file.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
lcdict = readerfunc(lcfile)
if ((isinstance(lcdict, (tuple, list))) and
isinstance(lcdict[0], dict)):
lcdict = lcdict[0]
objectid = lcdict['objectid']
times, mags, errs = lcdict[timecol], lcdict[magcol], lcdict[errcol]
if externalparams is not None:
fsv = lcdict[externalparams['fsv']]
fdv = lcdict[externalparams['fdv']]
fkv = lcdict[externalparams['fkv']]
xcc = lcdict[externalparams['xcc']]
ycc = lcdict[externalparams['ycc']]
bgv = lcdict[externalparams['bgv']]
bge = lcdict[externalparams['bge']]
iha = lcdict[externalparams['iha']]
izd = lcdict[externalparams['izd']]
else:
fsv = lcdict['fsv']
fdv = lcdict['fdv']
fkv = lcdict['fkv']
xcc = lcdict['xcc']
ycc = lcdict['ycc']
bgv = lcdict['bgv']
bge = lcdict['bge']
iha = lcdict['iha']
izd = lcdict['izd']
# apply the corrections for EPD
epd = epd_magseries(
times,
mags,
errs,
fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd,
magsarefluxes=magsarefluxes,
epdsmooth_sigclip=epdsmooth_sigclip,
epdsmooth_windowsize=epdsmooth_windowsize,
epdsmooth_func=epdsmooth_func,
epdsmooth_extraparams=epdsmooth_extraparams
)
# save the EPD magseries to a pickle LC
lcdict['epd'] = epd
outfile = os.path.join(
os.path.dirname(lcfile),
'%s-epd-%s-pklc.pkl' % (
squeeze(objectid).replace(' ','-'),
magcol
)
)
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd,
protocol=pickle.HIGHEST_PROTOCOL)
return outfile
|
def apply_epd_magseries(lcfile,
timecol,
magcol,
errcol,
externalparams,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None):
'''This applies external parameter decorrelation (EPD) to a light curve.
Parameters
----------
lcfile : str
The filename of the light curve file to process.
timecol,magcol,errcol : str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as input to the EPD process.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
Returns
-------
str
Writes the output EPD light curve to a pickle that contains the lcdict
with an added `lcdict['epd']` key, which contains the EPD times,
mags/fluxes, and errs as `lcdict['epd']['times']`,
`lcdict['epd']['mags']`, and `lcdict['epd']['errs']`. Returns the
filename of this generated EPD LC pickle file.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
lcdict = readerfunc(lcfile)
if ((isinstance(lcdict, (tuple, list))) and
isinstance(lcdict[0], dict)):
lcdict = lcdict[0]
objectid = lcdict['objectid']
times, mags, errs = lcdict[timecol], lcdict[magcol], lcdict[errcol]
if externalparams is not None:
fsv = lcdict[externalparams['fsv']]
fdv = lcdict[externalparams['fdv']]
fkv = lcdict[externalparams['fkv']]
xcc = lcdict[externalparams['xcc']]
ycc = lcdict[externalparams['ycc']]
bgv = lcdict[externalparams['bgv']]
bge = lcdict[externalparams['bge']]
iha = lcdict[externalparams['iha']]
izd = lcdict[externalparams['izd']]
else:
fsv = lcdict['fsv']
fdv = lcdict['fdv']
fkv = lcdict['fkv']
xcc = lcdict['xcc']
ycc = lcdict['ycc']
bgv = lcdict['bgv']
bge = lcdict['bge']
iha = lcdict['iha']
izd = lcdict['izd']
# apply the corrections for EPD
epd = epd_magseries(
times,
mags,
errs,
fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd,
magsarefluxes=magsarefluxes,
epdsmooth_sigclip=epdsmooth_sigclip,
epdsmooth_windowsize=epdsmooth_windowsize,
epdsmooth_func=epdsmooth_func,
epdsmooth_extraparams=epdsmooth_extraparams
)
# save the EPD magseries to a pickle LC
lcdict['epd'] = epd
outfile = os.path.join(
os.path.dirname(lcfile),
'%s-epd-%s-pklc.pkl' % (
squeeze(objectid).replace(' ','-'),
magcol
)
)
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd,
protocol=pickle.HIGHEST_PROTOCOL)
return outfile
|
[
"This",
"applies",
"external",
"parameter",
"decorrelation",
"(",
"EPD",
")",
"to",
"a",
"light",
"curve",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/epd.py#L84-L280
|
[
"def",
"apply_epd_magseries",
"(",
"lcfile",
",",
"timecol",
",",
"magcol",
",",
"errcol",
",",
"externalparams",
",",
"lcformat",
"=",
"'hat-sql'",
",",
"lcformatdir",
"=",
"None",
",",
"epdsmooth_sigclip",
"=",
"3.0",
",",
"epdsmooth_windowsize",
"=",
"21",
",",
"epdsmooth_func",
"=",
"smooth_magseries_savgol",
",",
"epdsmooth_extraparams",
"=",
"None",
")",
":",
"try",
":",
"formatinfo",
"=",
"get_lcformat",
"(",
"lcformat",
",",
"use_lcformat_dir",
"=",
"lcformatdir",
")",
"if",
"formatinfo",
":",
"(",
"dfileglob",
",",
"readerfunc",
",",
"dtimecols",
",",
"dmagcols",
",",
"derrcols",
",",
"magsarefluxes",
",",
"normfunc",
")",
"=",
"formatinfo",
"else",
":",
"LOGERROR",
"(",
"\"can't figure out the light curve format\"",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"\"can't figure out the light curve format\"",
")",
"return",
"None",
"lcdict",
"=",
"readerfunc",
"(",
"lcfile",
")",
"if",
"(",
"(",
"isinstance",
"(",
"lcdict",
",",
"(",
"tuple",
",",
"list",
")",
")",
")",
"and",
"isinstance",
"(",
"lcdict",
"[",
"0",
"]",
",",
"dict",
")",
")",
":",
"lcdict",
"=",
"lcdict",
"[",
"0",
"]",
"objectid",
"=",
"lcdict",
"[",
"'objectid'",
"]",
"times",
",",
"mags",
",",
"errs",
"=",
"lcdict",
"[",
"timecol",
"]",
",",
"lcdict",
"[",
"magcol",
"]",
",",
"lcdict",
"[",
"errcol",
"]",
"if",
"externalparams",
"is",
"not",
"None",
":",
"fsv",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'fsv'",
"]",
"]",
"fdv",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'fdv'",
"]",
"]",
"fkv",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'fkv'",
"]",
"]",
"xcc",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'xcc'",
"]",
"]",
"ycc",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'ycc'",
"]",
"]",
"bgv",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'bgv'",
"]",
"]",
"bge",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'bge'",
"]",
"]",
"iha",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'iha'",
"]",
"]",
"izd",
"=",
"lcdict",
"[",
"externalparams",
"[",
"'izd'",
"]",
"]",
"else",
":",
"fsv",
"=",
"lcdict",
"[",
"'fsv'",
"]",
"fdv",
"=",
"lcdict",
"[",
"'fdv'",
"]",
"fkv",
"=",
"lcdict",
"[",
"'fkv'",
"]",
"xcc",
"=",
"lcdict",
"[",
"'xcc'",
"]",
"ycc",
"=",
"lcdict",
"[",
"'ycc'",
"]",
"bgv",
"=",
"lcdict",
"[",
"'bgv'",
"]",
"bge",
"=",
"lcdict",
"[",
"'bge'",
"]",
"iha",
"=",
"lcdict",
"[",
"'iha'",
"]",
"izd",
"=",
"lcdict",
"[",
"'izd'",
"]",
"# apply the corrections for EPD",
"epd",
"=",
"epd_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"fsv",
",",
"fdv",
",",
"fkv",
",",
"xcc",
",",
"ycc",
",",
"bgv",
",",
"bge",
",",
"iha",
",",
"izd",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"epdsmooth_sigclip",
"=",
"epdsmooth_sigclip",
",",
"epdsmooth_windowsize",
"=",
"epdsmooth_windowsize",
",",
"epdsmooth_func",
"=",
"epdsmooth_func",
",",
"epdsmooth_extraparams",
"=",
"epdsmooth_extraparams",
")",
"# save the EPD magseries to a pickle LC",
"lcdict",
"[",
"'epd'",
"]",
"=",
"epd",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"lcfile",
")",
",",
"'%s-epd-%s-pklc.pkl'",
"%",
"(",
"squeeze",
"(",
"objectid",
")",
".",
"replace",
"(",
"' '",
",",
"'-'",
")",
",",
"magcol",
")",
")",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"outfd",
":",
"pickle",
".",
"dump",
"(",
"lcdict",
",",
"outfd",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"return",
"outfile"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
parallel_epd_worker
|
This is a parallel worker for the function below.
Parameters
----------
task : tuple
- task[0] = lcfile
- task[1] = timecol
- task[2] = magcol
- task[3] = errcol
- task[4] = externalparams
- task[5] = lcformat
- task[6] = lcformatdir
- task[7] = epdsmooth_sigclip
- task[8] = epdsmooth_windowsize
- task[9] = epdsmooth_func
- task[10] = epdsmooth_extraparams
Returns
-------
str or None
If EPD succeeds for an input LC, returns the filename of the output EPD
LC pickle file. If it fails, returns None.
|
astrobase/lcproc/epd.py
|
def parallel_epd_worker(task):
'''This is a parallel worker for the function below.
Parameters
----------
task : tuple
- task[0] = lcfile
- task[1] = timecol
- task[2] = magcol
- task[3] = errcol
- task[4] = externalparams
- task[5] = lcformat
- task[6] = lcformatdir
- task[7] = epdsmooth_sigclip
- task[8] = epdsmooth_windowsize
- task[9] = epdsmooth_func
- task[10] = epdsmooth_extraparams
Returns
-------
str or None
If EPD succeeds for an input LC, returns the filename of the output EPD
LC pickle file. If it fails, returns None.
'''
(lcfile, timecol, magcol, errcol,
externalparams, lcformat, lcformatdir, magsarefluxes,
epdsmooth_sigclip, epdsmooth_windowsize,
epdsmooth_func, epdsmooth_extraparams) = task
try:
epd = apply_epd_magseries(lcfile,
timecol,
magcol,
errcol,
externalparams,
lcformat=lcformat,
lcformatdir=lcformatdir,
epdsmooth_sigclip=epdsmooth_sigclip,
epdsmooth_windowsize=epdsmooth_windowsize,
epdsmooth_func=epdsmooth_func,
epdsmooth_extraparams=epdsmooth_extraparams)
if epd is not None:
LOGINFO('%s -> %s EPD OK' % (lcfile, epd))
return epd
else:
LOGERROR('EPD failed for %s' % lcfile)
return None
except Exception as e:
LOGEXCEPTION('EPD failed for %s' % lcfile)
return None
|
def parallel_epd_worker(task):
'''This is a parallel worker for the function below.
Parameters
----------
task : tuple
- task[0] = lcfile
- task[1] = timecol
- task[2] = magcol
- task[3] = errcol
- task[4] = externalparams
- task[5] = lcformat
- task[6] = lcformatdir
- task[7] = epdsmooth_sigclip
- task[8] = epdsmooth_windowsize
- task[9] = epdsmooth_func
- task[10] = epdsmooth_extraparams
Returns
-------
str or None
If EPD succeeds for an input LC, returns the filename of the output EPD
LC pickle file. If it fails, returns None.
'''
(lcfile, timecol, magcol, errcol,
externalparams, lcformat, lcformatdir, magsarefluxes,
epdsmooth_sigclip, epdsmooth_windowsize,
epdsmooth_func, epdsmooth_extraparams) = task
try:
epd = apply_epd_magseries(lcfile,
timecol,
magcol,
errcol,
externalparams,
lcformat=lcformat,
lcformatdir=lcformatdir,
epdsmooth_sigclip=epdsmooth_sigclip,
epdsmooth_windowsize=epdsmooth_windowsize,
epdsmooth_func=epdsmooth_func,
epdsmooth_extraparams=epdsmooth_extraparams)
if epd is not None:
LOGINFO('%s -> %s EPD OK' % (lcfile, epd))
return epd
else:
LOGERROR('EPD failed for %s' % lcfile)
return None
except Exception as e:
LOGEXCEPTION('EPD failed for %s' % lcfile)
return None
|
[
"This",
"is",
"a",
"parallel",
"worker",
"for",
"the",
"function",
"below",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/epd.py#L284-L340
|
[
"def",
"parallel_epd_worker",
"(",
"task",
")",
":",
"(",
"lcfile",
",",
"timecol",
",",
"magcol",
",",
"errcol",
",",
"externalparams",
",",
"lcformat",
",",
"lcformatdir",
",",
"magsarefluxes",
",",
"epdsmooth_sigclip",
",",
"epdsmooth_windowsize",
",",
"epdsmooth_func",
",",
"epdsmooth_extraparams",
")",
"=",
"task",
"try",
":",
"epd",
"=",
"apply_epd_magseries",
"(",
"lcfile",
",",
"timecol",
",",
"magcol",
",",
"errcol",
",",
"externalparams",
",",
"lcformat",
"=",
"lcformat",
",",
"lcformatdir",
"=",
"lcformatdir",
",",
"epdsmooth_sigclip",
"=",
"epdsmooth_sigclip",
",",
"epdsmooth_windowsize",
"=",
"epdsmooth_windowsize",
",",
"epdsmooth_func",
"=",
"epdsmooth_func",
",",
"epdsmooth_extraparams",
"=",
"epdsmooth_extraparams",
")",
"if",
"epd",
"is",
"not",
"None",
":",
"LOGINFO",
"(",
"'%s -> %s EPD OK'",
"%",
"(",
"lcfile",
",",
"epd",
")",
")",
"return",
"epd",
"else",
":",
"LOGERROR",
"(",
"'EPD failed for %s'",
"%",
"lcfile",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'EPD failed for %s'",
"%",
"lcfile",
")",
"return",
"None"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
parallel_epd_lclist
|
This applies EPD in parallel to all LCs in the input list.
Parameters
----------
lclist : list of str
This is the list of light curve files to run EPD on.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve files.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
|
astrobase/lcproc/epd.py
|
def parallel_epd_lclist(lclist,
externalparams,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
nworkers=NCPUS,
maxworkertasks=1000):
'''This applies EPD in parallel to all LCs in the input list.
Parameters
----------
lclist : list of str
This is the list of light curve files to run EPD on.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve files.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
outdict = {}
# run by magcol
for t, m, e in zip(timecols, magcols, errcols):
tasks = [(x, t, m, e, externalparams, lcformat, lcformatdir,
epdsmooth_sigclip, epdsmooth_windowsize,
epdsmooth_func, epdsmooth_extraparams) for
x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(parallel_epd_worker, tasks)
pool.close()
pool.join()
outdict[m] = results
return outdict
|
def parallel_epd_lclist(lclist,
externalparams,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
nworkers=NCPUS,
maxworkertasks=1000):
'''This applies EPD in parallel to all LCs in the input list.
Parameters
----------
lclist : list of str
This is the list of light curve files to run EPD on.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
Alternatively, if these exact keys are already present in the lcdict,
indicate this by setting externalparams to None.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve files.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
outdict = {}
# run by magcol
for t, m, e in zip(timecols, magcols, errcols):
tasks = [(x, t, m, e, externalparams, lcformat, lcformatdir,
epdsmooth_sigclip, epdsmooth_windowsize,
epdsmooth_func, epdsmooth_extraparams) for
x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(parallel_epd_worker, tasks)
pool.close()
pool.join()
outdict[m] = results
return outdict
|
[
"This",
"applies",
"EPD",
"in",
"parallel",
"to",
"all",
"LCs",
"in",
"the",
"input",
"list",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/epd.py#L344-L510
|
[
"def",
"parallel_epd_lclist",
"(",
"lclist",
",",
"externalparams",
",",
"timecols",
"=",
"None",
",",
"magcols",
"=",
"None",
",",
"errcols",
"=",
"None",
",",
"lcformat",
"=",
"'hat-sql'",
",",
"lcformatdir",
"=",
"None",
",",
"epdsmooth_sigclip",
"=",
"3.0",
",",
"epdsmooth_windowsize",
"=",
"21",
",",
"epdsmooth_func",
"=",
"smooth_magseries_savgol",
",",
"epdsmooth_extraparams",
"=",
"None",
",",
"nworkers",
"=",
"NCPUS",
",",
"maxworkertasks",
"=",
"1000",
")",
":",
"try",
":",
"formatinfo",
"=",
"get_lcformat",
"(",
"lcformat",
",",
"use_lcformat_dir",
"=",
"lcformatdir",
")",
"if",
"formatinfo",
":",
"(",
"fileglob",
",",
"readerfunc",
",",
"dtimecols",
",",
"dmagcols",
",",
"derrcols",
",",
"magsarefluxes",
",",
"normfunc",
")",
"=",
"formatinfo",
"else",
":",
"LOGERROR",
"(",
"\"can't figure out the light curve format\"",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"\"can't figure out the light curve format\"",
")",
"return",
"None",
"# override the default timecols, magcols, and errcols",
"# using the ones provided to the function",
"if",
"timecols",
"is",
"None",
":",
"timecols",
"=",
"dtimecols",
"if",
"magcols",
"is",
"None",
":",
"magcols",
"=",
"dmagcols",
"if",
"errcols",
"is",
"None",
":",
"errcols",
"=",
"derrcols",
"outdict",
"=",
"{",
"}",
"# run by magcol",
"for",
"t",
",",
"m",
",",
"e",
"in",
"zip",
"(",
"timecols",
",",
"magcols",
",",
"errcols",
")",
":",
"tasks",
"=",
"[",
"(",
"x",
",",
"t",
",",
"m",
",",
"e",
",",
"externalparams",
",",
"lcformat",
",",
"lcformatdir",
",",
"epdsmooth_sigclip",
",",
"epdsmooth_windowsize",
",",
"epdsmooth_func",
",",
"epdsmooth_extraparams",
")",
"for",
"x",
"in",
"lclist",
"]",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"nworkers",
",",
"maxtasksperchild",
"=",
"maxworkertasks",
")",
"results",
"=",
"pool",
".",
"map",
"(",
"parallel_epd_worker",
",",
"tasks",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"outdict",
"[",
"m",
"]",
"=",
"results",
"return",
"outdict"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
parallel_epd_lcdir
|
This applies EPD in parallel to all LCs in a directory.
Parameters
----------
lcdir : str
The light curve directory to process.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
lcfileglob : str or None
A UNIX fileglob to use to select light curve files in `lcdir`. If this
is not None, the value provided will override the default fileglob for
your light curve format.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
|
astrobase/lcproc/epd.py
|
def parallel_epd_lcdir(
lcdir,
externalparams,
lcfileglob=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
nworkers=NCPUS,
maxworkertasks=1000
):
'''This applies EPD in parallel to all LCs in a directory.
Parameters
----------
lcdir : str
The light curve directory to process.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
lcfileglob : str or None
A UNIX fileglob to use to select light curve files in `lcdir`. If this
is not None, the value provided will override the default fileglob for
your light curve format.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# find all the files matching the lcglob in lcdir
if lcfileglob is None:
lcfileglob = fileglob
lclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob)))
return parallel_epd_lclist(
lclist,
externalparams,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
epdsmooth_sigclip=epdsmooth_sigclip,
epdsmooth_windowsize=epdsmooth_windowsize,
epdsmooth_func=epdsmooth_func,
epdsmooth_extraparams=epdsmooth_extraparams,
nworkers=nworkers,
maxworkertasks=maxworkertasks
)
|
def parallel_epd_lcdir(
lcdir,
externalparams,
lcfileglob=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
nworkers=NCPUS,
maxworkertasks=1000
):
'''This applies EPD in parallel to all LCs in a directory.
Parameters
----------
lcdir : str
The light curve directory to process.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
lcfileglob : str or None
A UNIX fileglob to use to select light curve files in `lcdir`. If this
is not None, the value provided will override the default fileglob for
your light curve format.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# find all the files matching the lcglob in lcdir
if lcfileglob is None:
lcfileglob = fileglob
lclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob)))
return parallel_epd_lclist(
lclist,
externalparams,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
epdsmooth_sigclip=epdsmooth_sigclip,
epdsmooth_windowsize=epdsmooth_windowsize,
epdsmooth_func=epdsmooth_func,
epdsmooth_extraparams=epdsmooth_extraparams,
nworkers=nworkers,
maxworkertasks=maxworkertasks
)
|
[
"This",
"applies",
"EPD",
"in",
"parallel",
"to",
"all",
"LCs",
"in",
"a",
"directory",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/epd.py#L514-L678
|
[
"def",
"parallel_epd_lcdir",
"(",
"lcdir",
",",
"externalparams",
",",
"lcfileglob",
"=",
"None",
",",
"timecols",
"=",
"None",
",",
"magcols",
"=",
"None",
",",
"errcols",
"=",
"None",
",",
"lcformat",
"=",
"'hat-sql'",
",",
"lcformatdir",
"=",
"None",
",",
"epdsmooth_sigclip",
"=",
"3.0",
",",
"epdsmooth_windowsize",
"=",
"21",
",",
"epdsmooth_func",
"=",
"smooth_magseries_savgol",
",",
"epdsmooth_extraparams",
"=",
"None",
",",
"nworkers",
"=",
"NCPUS",
",",
"maxworkertasks",
"=",
"1000",
")",
":",
"try",
":",
"formatinfo",
"=",
"get_lcformat",
"(",
"lcformat",
",",
"use_lcformat_dir",
"=",
"lcformatdir",
")",
"if",
"formatinfo",
":",
"(",
"fileglob",
",",
"readerfunc",
",",
"dtimecols",
",",
"dmagcols",
",",
"derrcols",
",",
"magsarefluxes",
",",
"normfunc",
")",
"=",
"formatinfo",
"else",
":",
"LOGERROR",
"(",
"\"can't figure out the light curve format\"",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"\"can't figure out the light curve format\"",
")",
"return",
"None",
"# find all the files matching the lcglob in lcdir",
"if",
"lcfileglob",
"is",
"None",
":",
"lcfileglob",
"=",
"fileglob",
"lclist",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"lcdir",
",",
"lcfileglob",
")",
")",
")",
"return",
"parallel_epd_lclist",
"(",
"lclist",
",",
"externalparams",
",",
"timecols",
"=",
"timecols",
",",
"magcols",
"=",
"magcols",
",",
"errcols",
"=",
"errcols",
",",
"lcformat",
"=",
"lcformat",
",",
"epdsmooth_sigclip",
"=",
"epdsmooth_sigclip",
",",
"epdsmooth_windowsize",
"=",
"epdsmooth_windowsize",
",",
"epdsmooth_func",
"=",
"epdsmooth_func",
",",
"epdsmooth_extraparams",
"=",
"epdsmooth_extraparams",
",",
"nworkers",
"=",
"nworkers",
",",
"maxworkertasks",
"=",
"maxworkertasks",
")"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
bls_serial_pfind
|
Runs the Box Least Squares Fitting Search for transit-shaped signals.
Based on the version of BLS in Astropy 3.1:
`astropy.stats.BoxLeastSquares`. If you don't have Astropy 3.1, this module
will fail to import. Note that by default, this implementation of
`bls_serial_pfind` doesn't use the `.autoperiod()` function from
`BoxLeastSquares` but uses the same auto frequency-grid generation as the
functions in `periodbase.kbls`. If you want to use Astropy's implementation,
set the value of `autofreq` kwarg to 'astropy'.
The dict returned from this function contains a `blsmodel` key, which is the
generated model from Astropy's BLS. Use the `.compute_stats()` method to
calculate the required stats like SNR, depth, duration, etc.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
ndurations : int
The number of transit durations to use in the period-search.
autofreq : bool or str
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
If this is False, you must set `startp`, `endp`, and `stepsize` as
appropriate.
If this is str == 'astropy', will use the
`astropy.stats.BoxLeastSquares.autoperiod()` function to calculate the
frequency grid instead of the kbls method.
blsobjective : {'likelihood','snr'}
Sets the type of objective to optimize in the `BoxLeastSquares.power()`
function.
blsmethod : {'fast','slow'}
Sets the type of method to use in the `BoxLeastSquares.power()`
function.
blsoversample : {'likelihood','snr'}
Sets the `oversample` kwarg for the `BoxLeastSquares.power()` function.
blsmintransits : int
Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`
function.
blsfreqfactor : float
Sets the `frequency_factor` kwarg for the `BoxLeastSquares.autperiod()`
function.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
raiseonfail : bool
If True, raises an exception if something goes wrong. Otherwise, returns
None.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'durations': the array of durations used to run BLS,
'blsresult': Astropy BLS result object (BoxLeastSquaresResult),
'blsmodel': Astropy BLS BoxLeastSquares object used for work,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'durations': the durations array used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
|
astrobase/periodbase/abls.py
|
def bls_serial_pfind(times, mags, errs,
magsarefluxes=False,
startp=0.1, # search from 0.1 d to...
endp=100.0, # ... 100.0 d -- don't search full timebase
stepsize=5.0e-4,
mintransitduration=0.01, # minimum transit length in phase
maxtransitduration=0.4, # maximum transit length in phase
ndurations=100,
autofreq=True, # figure out f0, nf, and df automatically
blsobjective='likelihood',
blsmethod='fast',
blsoversample=10,
blsmintransits=3,
blsfreqfactor=10.0,
periodepsilon=0.1,
nbestpeaks=5,
sigclip=10.0,
verbose=True,
raiseonfail=False):
'''Runs the Box Least Squares Fitting Search for transit-shaped signals.
Based on the version of BLS in Astropy 3.1:
`astropy.stats.BoxLeastSquares`. If you don't have Astropy 3.1, this module
will fail to import. Note that by default, this implementation of
`bls_serial_pfind` doesn't use the `.autoperiod()` function from
`BoxLeastSquares` but uses the same auto frequency-grid generation as the
functions in `periodbase.kbls`. If you want to use Astropy's implementation,
set the value of `autofreq` kwarg to 'astropy'.
The dict returned from this function contains a `blsmodel` key, which is the
generated model from Astropy's BLS. Use the `.compute_stats()` method to
calculate the required stats like SNR, depth, duration, etc.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
ndurations : int
The number of transit durations to use in the period-search.
autofreq : bool or str
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
If this is False, you must set `startp`, `endp`, and `stepsize` as
appropriate.
If this is str == 'astropy', will use the
`astropy.stats.BoxLeastSquares.autoperiod()` function to calculate the
frequency grid instead of the kbls method.
blsobjective : {'likelihood','snr'}
Sets the type of objective to optimize in the `BoxLeastSquares.power()`
function.
blsmethod : {'fast','slow'}
Sets the type of method to use in the `BoxLeastSquares.power()`
function.
blsoversample : {'likelihood','snr'}
Sets the `oversample` kwarg for the `BoxLeastSquares.power()` function.
blsmintransits : int
Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`
function.
blsfreqfactor : float
Sets the `frequency_factor` kwarg for the `BoxLeastSquares.autperiod()`
function.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
raiseonfail : bool
If True, raises an exception if something goes wrong. Otherwise, returns
None.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'durations': the array of durations used to run BLS,
'blsresult': Astropy BLS result object (BoxLeastSquaresResult),
'blsmodel': Astropy BLS BoxLeastSquares object used for work,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'durations': the durations array used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# if we're setting up everything automatically
if isinstance(autofreq, bool) and autofreq:
# use heuristic to figure out best timestep
stepsize = 0.25*mintransitduration/(stimes.max()-stimes.min())
# now figure out the frequencies to use
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = True: using AUTOMATIC values for '
'freq stepsize: %s, ndurations: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, ndurations,
mintransitduration, maxtransitduration))
use_autoperiod = False
elif isinstance(autofreq, bool) and not autofreq:
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = False: using PROVIDED values for '
'freq stepsize: %s, ndurations: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, ndurations,
mintransitduration, maxtransitduration))
use_autoperiod = False
elif isinstance(autofreq, str) and autofreq == 'astropy':
use_autoperiod = True
minfreq = 1.0/endp
maxfreq = 1.0/startp
else:
LOGERROR("unknown autofreq kwarg encountered. can't continue...")
return None
# check the time-base vs. endp value
if minfreq < (1.0/(stimes.max() - stimes.min())):
if verbose:
LOGWARNING('the requested max P = %.3f is larger than '
'the time base of the observations = %.3f, '
' will make minfreq = 2 x 1/timebase'
% (endp, stimes.max() - stimes.min()))
minfreq = 2.0/(stimes.max() - stimes.min())
if verbose:
LOGINFO('new minfreq: %s, maxfreq: %s' %
(minfreq, maxfreq))
# run BLS
try:
# astropy's BLS requires durations in units of time
durations = nplinspace(mintransitduration*startp,
maxtransitduration*startp,
ndurations)
# set up the correct units for the BLS model
if magsarefluxes:
blsmodel = BoxLeastSquares(
stimes*u.day,
smags*u.dimensionless_unscaled,
dy=serrs*u.dimensionless_unscaled
)
else:
blsmodel = BoxLeastSquares(
stimes*u.day,
smags*u.mag,
dy=serrs*u.mag
)
# use autoperiod if requested
if use_autoperiod:
periods = nparray(
blsmodel.autoperiod(
durations,
minimum_period=startp,
maximum_period=endp,
minimum_n_transit=blsmintransits,
frequency_factor=blsfreqfactor
)
)
nfreq = periods.size
if verbose:
LOGINFO(
"autofreq = 'astropy', used .autoperiod() with "
"minimum_n_transit = %s, freq_factor = %s "
"to generate the frequency grid" %
(blsmintransits, blsfreqfactor)
)
LOGINFO('stepsize = %.5f, nfreq = %s, minfreq = %.5f, '
'maxfreq = %.5f, ndurations = %s' %
(abs(1.0/periods[1] - 1.0/periods[0]),
nfreq,
1.0/periods.max(),
1.0/periods.min(),
durations.size))
# otherwise, use kbls method
else:
frequencies = minfreq + nparange(nfreq)*stepsize
periods = 1.0/frequencies
if nfreq > 5.0e5:
if verbose:
LOGWARNING('more than 5.0e5 frequencies to go through; '
'this will take a while. '
'you might want to use the '
'abls.bls_parallel_pfind function instead')
# run the periodogram
blsresult = blsmodel.power(
periods*u.day,
durations*u.day,
objective=blsobjective,
method=blsmethod,
oversample=blsoversample
)
# get the peak values
lsp = nparray(blsresult.power)
# find the nbestpeaks for the periodogram: 1. sort the lsp array
# by highest value first 2. go down the values until we find
# five values that are separated by at least periodepsilon in
# period
# make sure to get only the finite peaks in the periodogram
# this is needed because BLS may produce infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestinds':None,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'method':'bls',
'blsresult':None,
'blsmodel':None,
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'blsntransits':blsmintransits,
'blsfreqfactor':blsfreqfactor,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, nbestinds, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
[bestperiodind],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval, ind in zip(sortedlspperiods,
sortedlspvals,
sortedlspind):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period)
for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
nbestinds.append(ind)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestinds':nbestinds,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'frequencies':frequencies,
'periods':periods,
'durations':durations,
'blsresult':blsresult,
'blsmodel':blsmodel,
'stepsize':stepsize,
'nfreq':nfreq,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'blsntransits':blsmintransits,
'blsfreqfactor':blsfreqfactor,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
except Exception as e:
LOGEXCEPTION('BLS failed!')
if raiseonfail:
raise
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestinds':None,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'blsresult':None,
'blsmodel':None,
'stepsize':stepsize,
'nfreq':nfreq,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'blsntransits':blsmintransits,
'blsfreqfactor':blsfreqfactor,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestinds':None,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'blsresult':None,
'blsmodel':None,
'stepsize':stepsize,
'nfreq':None,
'nphasebins':None,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'blsntransits':blsmintransits,
'blsfreqfactor':blsfreqfactor,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
|
def bls_serial_pfind(times, mags, errs,
magsarefluxes=False,
startp=0.1, # search from 0.1 d to...
endp=100.0, # ... 100.0 d -- don't search full timebase
stepsize=5.0e-4,
mintransitduration=0.01, # minimum transit length in phase
maxtransitduration=0.4, # maximum transit length in phase
ndurations=100,
autofreq=True, # figure out f0, nf, and df automatically
blsobjective='likelihood',
blsmethod='fast',
blsoversample=10,
blsmintransits=3,
blsfreqfactor=10.0,
periodepsilon=0.1,
nbestpeaks=5,
sigclip=10.0,
verbose=True,
raiseonfail=False):
'''Runs the Box Least Squares Fitting Search for transit-shaped signals.
Based on the version of BLS in Astropy 3.1:
`astropy.stats.BoxLeastSquares`. If you don't have Astropy 3.1, this module
will fail to import. Note that by default, this implementation of
`bls_serial_pfind` doesn't use the `.autoperiod()` function from
`BoxLeastSquares` but uses the same auto frequency-grid generation as the
functions in `periodbase.kbls`. If you want to use Astropy's implementation,
set the value of `autofreq` kwarg to 'astropy'.
The dict returned from this function contains a `blsmodel` key, which is the
generated model from Astropy's BLS. Use the `.compute_stats()` method to
calculate the required stats like SNR, depth, duration, etc.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
ndurations : int
The number of transit durations to use in the period-search.
autofreq : bool or str
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
If this is False, you must set `startp`, `endp`, and `stepsize` as
appropriate.
If this is str == 'astropy', will use the
`astropy.stats.BoxLeastSquares.autoperiod()` function to calculate the
frequency grid instead of the kbls method.
blsobjective : {'likelihood','snr'}
Sets the type of objective to optimize in the `BoxLeastSquares.power()`
function.
blsmethod : {'fast','slow'}
Sets the type of method to use in the `BoxLeastSquares.power()`
function.
blsoversample : {'likelihood','snr'}
Sets the `oversample` kwarg for the `BoxLeastSquares.power()` function.
blsmintransits : int
Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`
function.
blsfreqfactor : float
Sets the `frequency_factor` kwarg for the `BoxLeastSquares.autperiod()`
function.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
raiseonfail : bool
If True, raises an exception if something goes wrong. Otherwise, returns
None.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'durations': the array of durations used to run BLS,
'blsresult': Astropy BLS result object (BoxLeastSquaresResult),
'blsmodel': Astropy BLS BoxLeastSquares object used for work,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'durations': the durations array used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# if we're setting up everything automatically
if isinstance(autofreq, bool) and autofreq:
# use heuristic to figure out best timestep
stepsize = 0.25*mintransitduration/(stimes.max()-stimes.min())
# now figure out the frequencies to use
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = True: using AUTOMATIC values for '
'freq stepsize: %s, ndurations: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, ndurations,
mintransitduration, maxtransitduration))
use_autoperiod = False
elif isinstance(autofreq, bool) and not autofreq:
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = False: using PROVIDED values for '
'freq stepsize: %s, ndurations: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, ndurations,
mintransitduration, maxtransitduration))
use_autoperiod = False
elif isinstance(autofreq, str) and autofreq == 'astropy':
use_autoperiod = True
minfreq = 1.0/endp
maxfreq = 1.0/startp
else:
LOGERROR("unknown autofreq kwarg encountered. can't continue...")
return None
# check the time-base vs. endp value
if minfreq < (1.0/(stimes.max() - stimes.min())):
if verbose:
LOGWARNING('the requested max P = %.3f is larger than '
'the time base of the observations = %.3f, '
' will make minfreq = 2 x 1/timebase'
% (endp, stimes.max() - stimes.min()))
minfreq = 2.0/(stimes.max() - stimes.min())
if verbose:
LOGINFO('new minfreq: %s, maxfreq: %s' %
(minfreq, maxfreq))
# run BLS
try:
# astropy's BLS requires durations in units of time
durations = nplinspace(mintransitduration*startp,
maxtransitduration*startp,
ndurations)
# set up the correct units for the BLS model
if magsarefluxes:
blsmodel = BoxLeastSquares(
stimes*u.day,
smags*u.dimensionless_unscaled,
dy=serrs*u.dimensionless_unscaled
)
else:
blsmodel = BoxLeastSquares(
stimes*u.day,
smags*u.mag,
dy=serrs*u.mag
)
# use autoperiod if requested
if use_autoperiod:
periods = nparray(
blsmodel.autoperiod(
durations,
minimum_period=startp,
maximum_period=endp,
minimum_n_transit=blsmintransits,
frequency_factor=blsfreqfactor
)
)
nfreq = periods.size
if verbose:
LOGINFO(
"autofreq = 'astropy', used .autoperiod() with "
"minimum_n_transit = %s, freq_factor = %s "
"to generate the frequency grid" %
(blsmintransits, blsfreqfactor)
)
LOGINFO('stepsize = %.5f, nfreq = %s, minfreq = %.5f, '
'maxfreq = %.5f, ndurations = %s' %
(abs(1.0/periods[1] - 1.0/periods[0]),
nfreq,
1.0/periods.max(),
1.0/periods.min(),
durations.size))
# otherwise, use kbls method
else:
frequencies = minfreq + nparange(nfreq)*stepsize
periods = 1.0/frequencies
if nfreq > 5.0e5:
if verbose:
LOGWARNING('more than 5.0e5 frequencies to go through; '
'this will take a while. '
'you might want to use the '
'abls.bls_parallel_pfind function instead')
# run the periodogram
blsresult = blsmodel.power(
periods*u.day,
durations*u.day,
objective=blsobjective,
method=blsmethod,
oversample=blsoversample
)
# get the peak values
lsp = nparray(blsresult.power)
# find the nbestpeaks for the periodogram: 1. sort the lsp array
# by highest value first 2. go down the values until we find
# five values that are separated by at least periodepsilon in
# period
# make sure to get only the finite peaks in the periodogram
# this is needed because BLS may produce infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestinds':None,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'method':'bls',
'blsresult':None,
'blsmodel':None,
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'blsntransits':blsmintransits,
'blsfreqfactor':blsfreqfactor,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, nbestinds, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
[bestperiodind],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval, ind in zip(sortedlspperiods,
sortedlspvals,
sortedlspind):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period)
for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
nbestinds.append(ind)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestinds':nbestinds,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'frequencies':frequencies,
'periods':periods,
'durations':durations,
'blsresult':blsresult,
'blsmodel':blsmodel,
'stepsize':stepsize,
'nfreq':nfreq,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'blsntransits':blsmintransits,
'blsfreqfactor':blsfreqfactor,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
except Exception as e:
LOGEXCEPTION('BLS failed!')
if raiseonfail:
raise
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestinds':None,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'blsresult':None,
'blsmodel':None,
'stepsize':stepsize,
'nfreq':nfreq,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'blsntransits':blsmintransits,
'blsfreqfactor':blsfreqfactor,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestinds':None,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'blsresult':None,
'blsmodel':None,
'stepsize':stepsize,
'nfreq':None,
'nphasebins':None,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'blsntransits':blsmintransits,
'blsfreqfactor':blsfreqfactor,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
|
[
"Runs",
"the",
"Box",
"Least",
"Squares",
"Fitting",
"Search",
"for",
"transit",
"-",
"shaped",
"signals",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/periodbase/abls.py#L75-L601
|
[
"def",
"bls_serial_pfind",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"magsarefluxes",
"=",
"False",
",",
"startp",
"=",
"0.1",
",",
"# search from 0.1 d to...",
"endp",
"=",
"100.0",
",",
"# ... 100.0 d -- don't search full timebase",
"stepsize",
"=",
"5.0e-4",
",",
"mintransitduration",
"=",
"0.01",
",",
"# minimum transit length in phase",
"maxtransitduration",
"=",
"0.4",
",",
"# maximum transit length in phase",
"ndurations",
"=",
"100",
",",
"autofreq",
"=",
"True",
",",
"# figure out f0, nf, and df automatically",
"blsobjective",
"=",
"'likelihood'",
",",
"blsmethod",
"=",
"'fast'",
",",
"blsoversample",
"=",
"10",
",",
"blsmintransits",
"=",
"3",
",",
"blsfreqfactor",
"=",
"10.0",
",",
"periodepsilon",
"=",
"0.1",
",",
"nbestpeaks",
"=",
"5",
",",
"sigclip",
"=",
"10.0",
",",
"verbose",
"=",
"True",
",",
"raiseonfail",
"=",
"False",
")",
":",
"# get rid of nans first and sigclip",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"sigclip_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"sigclip",
"=",
"sigclip",
")",
"# make sure there are enough points to calculate a spectrum",
"if",
"len",
"(",
"stimes",
")",
">",
"9",
"and",
"len",
"(",
"smags",
")",
">",
"9",
"and",
"len",
"(",
"serrs",
")",
">",
"9",
":",
"# if we're setting up everything automatically",
"if",
"isinstance",
"(",
"autofreq",
",",
"bool",
")",
"and",
"autofreq",
":",
"# use heuristic to figure out best timestep",
"stepsize",
"=",
"0.25",
"*",
"mintransitduration",
"/",
"(",
"stimes",
".",
"max",
"(",
")",
"-",
"stimes",
".",
"min",
"(",
")",
")",
"# now figure out the frequencies to use",
"minfreq",
"=",
"1.0",
"/",
"endp",
"maxfreq",
"=",
"1.0",
"/",
"startp",
"nfreq",
"=",
"int",
"(",
"npceil",
"(",
"(",
"maxfreq",
"-",
"minfreq",
")",
"/",
"stepsize",
")",
")",
"# say what we're using",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'min P: %s, max P: %s, nfreq: %s, '",
"'minfreq: %s, maxfreq: %s'",
"%",
"(",
"startp",
",",
"endp",
",",
"nfreq",
",",
"minfreq",
",",
"maxfreq",
")",
")",
"LOGINFO",
"(",
"'autofreq = True: using AUTOMATIC values for '",
"'freq stepsize: %s, ndurations: %s, '",
"'min transit duration: %s, max transit duration: %s'",
"%",
"(",
"stepsize",
",",
"ndurations",
",",
"mintransitduration",
",",
"maxtransitduration",
")",
")",
"use_autoperiod",
"=",
"False",
"elif",
"isinstance",
"(",
"autofreq",
",",
"bool",
")",
"and",
"not",
"autofreq",
":",
"minfreq",
"=",
"1.0",
"/",
"endp",
"maxfreq",
"=",
"1.0",
"/",
"startp",
"nfreq",
"=",
"int",
"(",
"npceil",
"(",
"(",
"maxfreq",
"-",
"minfreq",
")",
"/",
"stepsize",
")",
")",
"# say what we're using",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'min P: %s, max P: %s, nfreq: %s, '",
"'minfreq: %s, maxfreq: %s'",
"%",
"(",
"startp",
",",
"endp",
",",
"nfreq",
",",
"minfreq",
",",
"maxfreq",
")",
")",
"LOGINFO",
"(",
"'autofreq = False: using PROVIDED values for '",
"'freq stepsize: %s, ndurations: %s, '",
"'min transit duration: %s, max transit duration: %s'",
"%",
"(",
"stepsize",
",",
"ndurations",
",",
"mintransitduration",
",",
"maxtransitduration",
")",
")",
"use_autoperiod",
"=",
"False",
"elif",
"isinstance",
"(",
"autofreq",
",",
"str",
")",
"and",
"autofreq",
"==",
"'astropy'",
":",
"use_autoperiod",
"=",
"True",
"minfreq",
"=",
"1.0",
"/",
"endp",
"maxfreq",
"=",
"1.0",
"/",
"startp",
"else",
":",
"LOGERROR",
"(",
"\"unknown autofreq kwarg encountered. can't continue...\"",
")",
"return",
"None",
"# check the time-base vs. endp value",
"if",
"minfreq",
"<",
"(",
"1.0",
"/",
"(",
"stimes",
".",
"max",
"(",
")",
"-",
"stimes",
".",
"min",
"(",
")",
")",
")",
":",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"'the requested max P = %.3f is larger than '",
"'the time base of the observations = %.3f, '",
"' will make minfreq = 2 x 1/timebase'",
"%",
"(",
"endp",
",",
"stimes",
".",
"max",
"(",
")",
"-",
"stimes",
".",
"min",
"(",
")",
")",
")",
"minfreq",
"=",
"2.0",
"/",
"(",
"stimes",
".",
"max",
"(",
")",
"-",
"stimes",
".",
"min",
"(",
")",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'new minfreq: %s, maxfreq: %s'",
"%",
"(",
"minfreq",
",",
"maxfreq",
")",
")",
"# run BLS",
"try",
":",
"# astropy's BLS requires durations in units of time",
"durations",
"=",
"nplinspace",
"(",
"mintransitduration",
"*",
"startp",
",",
"maxtransitduration",
"*",
"startp",
",",
"ndurations",
")",
"# set up the correct units for the BLS model",
"if",
"magsarefluxes",
":",
"blsmodel",
"=",
"BoxLeastSquares",
"(",
"stimes",
"*",
"u",
".",
"day",
",",
"smags",
"*",
"u",
".",
"dimensionless_unscaled",
",",
"dy",
"=",
"serrs",
"*",
"u",
".",
"dimensionless_unscaled",
")",
"else",
":",
"blsmodel",
"=",
"BoxLeastSquares",
"(",
"stimes",
"*",
"u",
".",
"day",
",",
"smags",
"*",
"u",
".",
"mag",
",",
"dy",
"=",
"serrs",
"*",
"u",
".",
"mag",
")",
"# use autoperiod if requested",
"if",
"use_autoperiod",
":",
"periods",
"=",
"nparray",
"(",
"blsmodel",
".",
"autoperiod",
"(",
"durations",
",",
"minimum_period",
"=",
"startp",
",",
"maximum_period",
"=",
"endp",
",",
"minimum_n_transit",
"=",
"blsmintransits",
",",
"frequency_factor",
"=",
"blsfreqfactor",
")",
")",
"nfreq",
"=",
"periods",
".",
"size",
"if",
"verbose",
":",
"LOGINFO",
"(",
"\"autofreq = 'astropy', used .autoperiod() with \"",
"\"minimum_n_transit = %s, freq_factor = %s \"",
"\"to generate the frequency grid\"",
"%",
"(",
"blsmintransits",
",",
"blsfreqfactor",
")",
")",
"LOGINFO",
"(",
"'stepsize = %.5f, nfreq = %s, minfreq = %.5f, '",
"'maxfreq = %.5f, ndurations = %s'",
"%",
"(",
"abs",
"(",
"1.0",
"/",
"periods",
"[",
"1",
"]",
"-",
"1.0",
"/",
"periods",
"[",
"0",
"]",
")",
",",
"nfreq",
",",
"1.0",
"/",
"periods",
".",
"max",
"(",
")",
",",
"1.0",
"/",
"periods",
".",
"min",
"(",
")",
",",
"durations",
".",
"size",
")",
")",
"# otherwise, use kbls method",
"else",
":",
"frequencies",
"=",
"minfreq",
"+",
"nparange",
"(",
"nfreq",
")",
"*",
"stepsize",
"periods",
"=",
"1.0",
"/",
"frequencies",
"if",
"nfreq",
">",
"5.0e5",
":",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"'more than 5.0e5 frequencies to go through; '",
"'this will take a while. '",
"'you might want to use the '",
"'abls.bls_parallel_pfind function instead'",
")",
"# run the periodogram",
"blsresult",
"=",
"blsmodel",
".",
"power",
"(",
"periods",
"*",
"u",
".",
"day",
",",
"durations",
"*",
"u",
".",
"day",
",",
"objective",
"=",
"blsobjective",
",",
"method",
"=",
"blsmethod",
",",
"oversample",
"=",
"blsoversample",
")",
"# get the peak values",
"lsp",
"=",
"nparray",
"(",
"blsresult",
".",
"power",
")",
"# find the nbestpeaks for the periodogram: 1. sort the lsp array",
"# by highest value first 2. go down the values until we find",
"# five values that are separated by at least periodepsilon in",
"# period",
"# make sure to get only the finite peaks in the periodogram",
"# this is needed because BLS may produce infs for some peaks",
"finitepeakind",
"=",
"npisfinite",
"(",
"lsp",
")",
"finlsp",
"=",
"lsp",
"[",
"finitepeakind",
"]",
"finperiods",
"=",
"periods",
"[",
"finitepeakind",
"]",
"# make sure that finlsp has finite values before we work on it",
"try",
":",
"bestperiodind",
"=",
"npargmax",
"(",
"finlsp",
")",
"except",
"ValueError",
":",
"LOGERROR",
"(",
"'no finite periodogram values '",
"'for this mag series, skipping...'",
")",
"return",
"{",
"'bestperiod'",
":",
"npnan",
",",
"'bestlspval'",
":",
"npnan",
",",
"'nbestpeaks'",
":",
"nbestpeaks",
",",
"'nbestinds'",
":",
"None",
",",
"'nbestlspvals'",
":",
"None",
",",
"'nbestperiods'",
":",
"None",
",",
"'lspvals'",
":",
"None",
",",
"'periods'",
":",
"None",
",",
"'durations'",
":",
"None",
",",
"'method'",
":",
"'bls'",
",",
"'blsresult'",
":",
"None",
",",
"'blsmodel'",
":",
"None",
",",
"'kwargs'",
":",
"{",
"'startp'",
":",
"startp",
",",
"'endp'",
":",
"endp",
",",
"'stepsize'",
":",
"stepsize",
",",
"'mintransitduration'",
":",
"mintransitduration",
",",
"'maxtransitduration'",
":",
"maxtransitduration",
",",
"'ndurations'",
":",
"ndurations",
",",
"'blsobjective'",
":",
"blsobjective",
",",
"'blsmethod'",
":",
"blsmethod",
",",
"'blsoversample'",
":",
"blsoversample",
",",
"'blsntransits'",
":",
"blsmintransits",
",",
"'blsfreqfactor'",
":",
"blsfreqfactor",
",",
"'autofreq'",
":",
"autofreq",
",",
"'periodepsilon'",
":",
"periodepsilon",
",",
"'nbestpeaks'",
":",
"nbestpeaks",
",",
"'sigclip'",
":",
"sigclip",
",",
"'magsarefluxes'",
":",
"magsarefluxes",
"}",
"}",
"sortedlspind",
"=",
"npargsort",
"(",
"finlsp",
")",
"[",
":",
":",
"-",
"1",
"]",
"sortedlspperiods",
"=",
"finperiods",
"[",
"sortedlspind",
"]",
"sortedlspvals",
"=",
"finlsp",
"[",
"sortedlspind",
"]",
"# now get the nbestpeaks",
"nbestperiods",
",",
"nbestlspvals",
",",
"nbestinds",
",",
"peakcount",
"=",
"(",
"[",
"finperiods",
"[",
"bestperiodind",
"]",
"]",
",",
"[",
"finlsp",
"[",
"bestperiodind",
"]",
"]",
",",
"[",
"bestperiodind",
"]",
",",
"1",
")",
"prevperiod",
"=",
"sortedlspperiods",
"[",
"0",
"]",
"# find the best nbestpeaks in the lsp and their periods",
"for",
"period",
",",
"lspval",
",",
"ind",
"in",
"zip",
"(",
"sortedlspperiods",
",",
"sortedlspvals",
",",
"sortedlspind",
")",
":",
"if",
"peakcount",
"==",
"nbestpeaks",
":",
"break",
"perioddiff",
"=",
"abs",
"(",
"period",
"-",
"prevperiod",
")",
"bestperiodsdiff",
"=",
"[",
"abs",
"(",
"period",
"-",
"x",
")",
"for",
"x",
"in",
"nbestperiods",
"]",
"# print('prevperiod = %s, thisperiod = %s, '",
"# 'perioddiff = %s, peakcount = %s' %",
"# (prevperiod, period, perioddiff, peakcount))",
"# this ensures that this period is different from the last",
"# period and from all the other existing best periods by",
"# periodepsilon to make sure we jump to an entire different",
"# peak in the periodogram",
"if",
"(",
"perioddiff",
">",
"(",
"periodepsilon",
"*",
"prevperiod",
")",
"and",
"all",
"(",
"x",
">",
"(",
"periodepsilon",
"*",
"period",
")",
"for",
"x",
"in",
"bestperiodsdiff",
")",
")",
":",
"nbestperiods",
".",
"append",
"(",
"period",
")",
"nbestlspvals",
".",
"append",
"(",
"lspval",
")",
"nbestinds",
".",
"append",
"(",
"ind",
")",
"peakcount",
"=",
"peakcount",
"+",
"1",
"prevperiod",
"=",
"period",
"# generate the return dict",
"resultdict",
"=",
"{",
"'bestperiod'",
":",
"finperiods",
"[",
"bestperiodind",
"]",
",",
"'bestlspval'",
":",
"finlsp",
"[",
"bestperiodind",
"]",
",",
"'nbestpeaks'",
":",
"nbestpeaks",
",",
"'nbestinds'",
":",
"nbestinds",
",",
"'nbestlspvals'",
":",
"nbestlspvals",
",",
"'nbestperiods'",
":",
"nbestperiods",
",",
"'lspvals'",
":",
"lsp",
",",
"'frequencies'",
":",
"frequencies",
",",
"'periods'",
":",
"periods",
",",
"'durations'",
":",
"durations",
",",
"'blsresult'",
":",
"blsresult",
",",
"'blsmodel'",
":",
"blsmodel",
",",
"'stepsize'",
":",
"stepsize",
",",
"'nfreq'",
":",
"nfreq",
",",
"'mintransitduration'",
":",
"mintransitduration",
",",
"'maxtransitduration'",
":",
"maxtransitduration",
",",
"'method'",
":",
"'bls'",
",",
"'kwargs'",
":",
"{",
"'startp'",
":",
"startp",
",",
"'endp'",
":",
"endp",
",",
"'stepsize'",
":",
"stepsize",
",",
"'mintransitduration'",
":",
"mintransitduration",
",",
"'maxtransitduration'",
":",
"maxtransitduration",
",",
"'ndurations'",
":",
"ndurations",
",",
"'blsobjective'",
":",
"blsobjective",
",",
"'blsmethod'",
":",
"blsmethod",
",",
"'blsoversample'",
":",
"blsoversample",
",",
"'blsntransits'",
":",
"blsmintransits",
",",
"'blsfreqfactor'",
":",
"blsfreqfactor",
",",
"'autofreq'",
":",
"autofreq",
",",
"'periodepsilon'",
":",
"periodepsilon",
",",
"'nbestpeaks'",
":",
"nbestpeaks",
",",
"'sigclip'",
":",
"sigclip",
",",
"'magsarefluxes'",
":",
"magsarefluxes",
"}",
"}",
"return",
"resultdict",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'BLS failed!'",
")",
"if",
"raiseonfail",
":",
"raise",
"return",
"{",
"'bestperiod'",
":",
"npnan",
",",
"'bestlspval'",
":",
"npnan",
",",
"'nbestinds'",
":",
"None",
",",
"'nbestpeaks'",
":",
"nbestpeaks",
",",
"'nbestlspvals'",
":",
"None",
",",
"'nbestperiods'",
":",
"None",
",",
"'lspvals'",
":",
"None",
",",
"'periods'",
":",
"None",
",",
"'durations'",
":",
"None",
",",
"'blsresult'",
":",
"None",
",",
"'blsmodel'",
":",
"None",
",",
"'stepsize'",
":",
"stepsize",
",",
"'nfreq'",
":",
"nfreq",
",",
"'mintransitduration'",
":",
"mintransitduration",
",",
"'maxtransitduration'",
":",
"maxtransitduration",
",",
"'method'",
":",
"'bls'",
",",
"'kwargs'",
":",
"{",
"'startp'",
":",
"startp",
",",
"'endp'",
":",
"endp",
",",
"'stepsize'",
":",
"stepsize",
",",
"'mintransitduration'",
":",
"mintransitduration",
",",
"'maxtransitduration'",
":",
"maxtransitduration",
",",
"'ndurations'",
":",
"ndurations",
",",
"'blsobjective'",
":",
"blsobjective",
",",
"'blsmethod'",
":",
"blsmethod",
",",
"'blsoversample'",
":",
"blsoversample",
",",
"'blsntransits'",
":",
"blsmintransits",
",",
"'blsfreqfactor'",
":",
"blsfreqfactor",
",",
"'autofreq'",
":",
"autofreq",
",",
"'periodepsilon'",
":",
"periodepsilon",
",",
"'nbestpeaks'",
":",
"nbestpeaks",
",",
"'sigclip'",
":",
"sigclip",
",",
"'magsarefluxes'",
":",
"magsarefluxes",
"}",
"}",
"else",
":",
"LOGERROR",
"(",
"'no good detections for these times and mags, skipping...'",
")",
"return",
"{",
"'bestperiod'",
":",
"npnan",
",",
"'bestlspval'",
":",
"npnan",
",",
"'nbestinds'",
":",
"None",
",",
"'nbestpeaks'",
":",
"nbestpeaks",
",",
"'nbestlspvals'",
":",
"None",
",",
"'nbestperiods'",
":",
"None",
",",
"'lspvals'",
":",
"None",
",",
"'periods'",
":",
"None",
",",
"'durations'",
":",
"None",
",",
"'blsresult'",
":",
"None",
",",
"'blsmodel'",
":",
"None",
",",
"'stepsize'",
":",
"stepsize",
",",
"'nfreq'",
":",
"None",
",",
"'nphasebins'",
":",
"None",
",",
"'mintransitduration'",
":",
"mintransitduration",
",",
"'maxtransitduration'",
":",
"maxtransitduration",
",",
"'method'",
":",
"'bls'",
",",
"'kwargs'",
":",
"{",
"'startp'",
":",
"startp",
",",
"'endp'",
":",
"endp",
",",
"'stepsize'",
":",
"stepsize",
",",
"'mintransitduration'",
":",
"mintransitduration",
",",
"'maxtransitduration'",
":",
"maxtransitduration",
",",
"'ndurations'",
":",
"ndurations",
",",
"'blsobjective'",
":",
"blsobjective",
",",
"'blsmethod'",
":",
"blsmethod",
",",
"'blsoversample'",
":",
"blsoversample",
",",
"'blsntransits'",
":",
"blsmintransits",
",",
"'blsfreqfactor'",
":",
"blsfreqfactor",
",",
"'autofreq'",
":",
"autofreq",
",",
"'periodepsilon'",
":",
"periodepsilon",
",",
"'nbestpeaks'",
":",
"nbestpeaks",
",",
"'sigclip'",
":",
"sigclip",
",",
"'magsarefluxes'",
":",
"magsarefluxes",
"}",
"}"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
valid
|
_parallel_bls_worker
|
This wraps Astropy's BoxLeastSquares for use with bls_parallel_pfind below.
`task` is a tuple::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = magsarefluxes
task[4] = minfreq
task[5] = nfreq
task[6] = stepsize
task[7] = ndurations
task[8] = mintransitduration
task[9] = maxtransitduration
task[10] = blsobjective
task[11] = blsmethod
task[12] = blsoversample
|
astrobase/periodbase/abls.py
|
def _parallel_bls_worker(task):
'''
This wraps Astropy's BoxLeastSquares for use with bls_parallel_pfind below.
`task` is a tuple::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = magsarefluxes
task[4] = minfreq
task[5] = nfreq
task[6] = stepsize
task[7] = ndurations
task[8] = mintransitduration
task[9] = maxtransitduration
task[10] = blsobjective
task[11] = blsmethod
task[12] = blsoversample
'''
try:
times, mags, errs = task[:3]
magsarefluxes = task[3]
minfreq, nfreq, stepsize = task[4:7]
ndurations, mintransitduration, maxtransitduration = task[7:10]
blsobjective, blsmethod, blsoversample = task[10:]
frequencies = minfreq + nparange(nfreq)*stepsize
periods = 1.0/frequencies
# astropy's BLS requires durations in units of time
durations = nplinspace(mintransitduration*periods.min(),
maxtransitduration*periods.min(),
ndurations)
# set up the correct units for the BLS model
if magsarefluxes:
blsmodel = BoxLeastSquares(
times*u.day,
mags*u.dimensionless_unscaled,
dy=errs*u.dimensionless_unscaled
)
else:
blsmodel = BoxLeastSquares(
times*u.day,
mags*u.mag,
dy=errs*u.mag
)
blsresult = blsmodel.power(
periods*u.day,
durations*u.day,
objective=blsobjective,
method=blsmethod,
oversample=blsoversample
)
return {
'blsresult': blsresult,
'blsmodel': blsmodel,
'durations': durations,
'power': nparray(blsresult.power)
}
except Exception as e:
LOGEXCEPTION('BLS for frequency chunk: (%.6f, %.6f) failed.' %
(frequencies[0], frequencies[-1]))
return {
'blsresult': None,
'blsmodel': None,
'durations': durations,
'power': nparray([npnan for x in range(nfreq)]),
}
|
def _parallel_bls_worker(task):
'''
This wraps Astropy's BoxLeastSquares for use with bls_parallel_pfind below.
`task` is a tuple::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = magsarefluxes
task[4] = minfreq
task[5] = nfreq
task[6] = stepsize
task[7] = ndurations
task[8] = mintransitduration
task[9] = maxtransitduration
task[10] = blsobjective
task[11] = blsmethod
task[12] = blsoversample
'''
try:
times, mags, errs = task[:3]
magsarefluxes = task[3]
minfreq, nfreq, stepsize = task[4:7]
ndurations, mintransitduration, maxtransitduration = task[7:10]
blsobjective, blsmethod, blsoversample = task[10:]
frequencies = minfreq + nparange(nfreq)*stepsize
periods = 1.0/frequencies
# astropy's BLS requires durations in units of time
durations = nplinspace(mintransitduration*periods.min(),
maxtransitduration*periods.min(),
ndurations)
# set up the correct units for the BLS model
if magsarefluxes:
blsmodel = BoxLeastSquares(
times*u.day,
mags*u.dimensionless_unscaled,
dy=errs*u.dimensionless_unscaled
)
else:
blsmodel = BoxLeastSquares(
times*u.day,
mags*u.mag,
dy=errs*u.mag
)
blsresult = blsmodel.power(
periods*u.day,
durations*u.day,
objective=blsobjective,
method=blsmethod,
oversample=blsoversample
)
return {
'blsresult': blsresult,
'blsmodel': blsmodel,
'durations': durations,
'power': nparray(blsresult.power)
}
except Exception as e:
LOGEXCEPTION('BLS for frequency chunk: (%.6f, %.6f) failed.' %
(frequencies[0], frequencies[-1]))
return {
'blsresult': None,
'blsmodel': None,
'durations': durations,
'power': nparray([npnan for x in range(nfreq)]),
}
|
[
"This",
"wraps",
"Astropy",
"s",
"BoxLeastSquares",
"for",
"use",
"with",
"bls_parallel_pfind",
"below",
"."
] |
waqasbhatti/astrobase
|
python
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/periodbase/abls.py#L605-L691
|
[
"def",
"_parallel_bls_worker",
"(",
"task",
")",
":",
"try",
":",
"times",
",",
"mags",
",",
"errs",
"=",
"task",
"[",
":",
"3",
"]",
"magsarefluxes",
"=",
"task",
"[",
"3",
"]",
"minfreq",
",",
"nfreq",
",",
"stepsize",
"=",
"task",
"[",
"4",
":",
"7",
"]",
"ndurations",
",",
"mintransitduration",
",",
"maxtransitduration",
"=",
"task",
"[",
"7",
":",
"10",
"]",
"blsobjective",
",",
"blsmethod",
",",
"blsoversample",
"=",
"task",
"[",
"10",
":",
"]",
"frequencies",
"=",
"minfreq",
"+",
"nparange",
"(",
"nfreq",
")",
"*",
"stepsize",
"periods",
"=",
"1.0",
"/",
"frequencies",
"# astropy's BLS requires durations in units of time",
"durations",
"=",
"nplinspace",
"(",
"mintransitduration",
"*",
"periods",
".",
"min",
"(",
")",
",",
"maxtransitduration",
"*",
"periods",
".",
"min",
"(",
")",
",",
"ndurations",
")",
"# set up the correct units for the BLS model",
"if",
"magsarefluxes",
":",
"blsmodel",
"=",
"BoxLeastSquares",
"(",
"times",
"*",
"u",
".",
"day",
",",
"mags",
"*",
"u",
".",
"dimensionless_unscaled",
",",
"dy",
"=",
"errs",
"*",
"u",
".",
"dimensionless_unscaled",
")",
"else",
":",
"blsmodel",
"=",
"BoxLeastSquares",
"(",
"times",
"*",
"u",
".",
"day",
",",
"mags",
"*",
"u",
".",
"mag",
",",
"dy",
"=",
"errs",
"*",
"u",
".",
"mag",
")",
"blsresult",
"=",
"blsmodel",
".",
"power",
"(",
"periods",
"*",
"u",
".",
"day",
",",
"durations",
"*",
"u",
".",
"day",
",",
"objective",
"=",
"blsobjective",
",",
"method",
"=",
"blsmethod",
",",
"oversample",
"=",
"blsoversample",
")",
"return",
"{",
"'blsresult'",
":",
"blsresult",
",",
"'blsmodel'",
":",
"blsmodel",
",",
"'durations'",
":",
"durations",
",",
"'power'",
":",
"nparray",
"(",
"blsresult",
".",
"power",
")",
"}",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'BLS for frequency chunk: (%.6f, %.6f) failed.'",
"%",
"(",
"frequencies",
"[",
"0",
"]",
",",
"frequencies",
"[",
"-",
"1",
"]",
")",
")",
"return",
"{",
"'blsresult'",
":",
"None",
",",
"'blsmodel'",
":",
"None",
",",
"'durations'",
":",
"durations",
",",
"'power'",
":",
"nparray",
"(",
"[",
"npnan",
"for",
"x",
"in",
"range",
"(",
"nfreq",
")",
"]",
")",
",",
"}"
] |
2922a14619d183fb28005fa7d02027ac436f2265
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.