partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
request
|
same as requests/requests/api.py request(...)
|
searx/poolrequests.py
|
def request(method, url, **kwargs):
"""same as requests/requests/api.py request(...)"""
time_before_request = time()
# session start
session = SessionSinglePool()
# proxies
kwargs['proxies'] = settings['outgoing'].get('proxies') or None
# timeout
if 'timeout' in kwargs:
timeout = kwargs['timeout']
else:
timeout = getattr(threadLocal, 'timeout', None)
if timeout is not None:
kwargs['timeout'] = timeout
# do request
response = session.request(method=method, url=url, **kwargs)
time_after_request = time()
# is there a timeout for this engine ?
if timeout is not None:
timeout_overhead = 0.2 # seconds
# start_time = when the user request started
start_time = getattr(threadLocal, 'start_time', time_before_request)
search_duration = time_after_request - start_time
if search_duration > timeout + timeout_overhead:
raise requests.exceptions.Timeout(response=response)
# session end
session.close()
if hasattr(threadLocal, 'total_time'):
threadLocal.total_time += time_after_request - time_before_request
return response
|
def request(method, url, **kwargs):
"""same as requests/requests/api.py request(...)"""
time_before_request = time()
# session start
session = SessionSinglePool()
# proxies
kwargs['proxies'] = settings['outgoing'].get('proxies') or None
# timeout
if 'timeout' in kwargs:
timeout = kwargs['timeout']
else:
timeout = getattr(threadLocal, 'timeout', None)
if timeout is not None:
kwargs['timeout'] = timeout
# do request
response = session.request(method=method, url=url, **kwargs)
time_after_request = time()
# is there a timeout for this engine ?
if timeout is not None:
timeout_overhead = 0.2 # seconds
# start_time = when the user request started
start_time = getattr(threadLocal, 'start_time', time_before_request)
search_duration = time_after_request - start_time
if search_duration > timeout + timeout_overhead:
raise requests.exceptions.Timeout(response=response)
# session end
session.close()
if hasattr(threadLocal, 'total_time'):
threadLocal.total_time += time_after_request - time_before_request
return response
|
[
"same",
"as",
"requests",
"/",
"requests",
"/",
"api",
".",
"py",
"request",
"(",
"...",
")"
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/poolrequests.py#L90-L128
|
[
"def",
"request",
"(",
"method",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"time_before_request",
"=",
"time",
"(",
")",
"# session start",
"session",
"=",
"SessionSinglePool",
"(",
")",
"# proxies",
"kwargs",
"[",
"'proxies'",
"]",
"=",
"settings",
"[",
"'outgoing'",
"]",
".",
"get",
"(",
"'proxies'",
")",
"or",
"None",
"# timeout",
"if",
"'timeout'",
"in",
"kwargs",
":",
"timeout",
"=",
"kwargs",
"[",
"'timeout'",
"]",
"else",
":",
"timeout",
"=",
"getattr",
"(",
"threadLocal",
",",
"'timeout'",
",",
"None",
")",
"if",
"timeout",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'timeout'",
"]",
"=",
"timeout",
"# do request",
"response",
"=",
"session",
".",
"request",
"(",
"method",
"=",
"method",
",",
"url",
"=",
"url",
",",
"*",
"*",
"kwargs",
")",
"time_after_request",
"=",
"time",
"(",
")",
"# is there a timeout for this engine ?",
"if",
"timeout",
"is",
"not",
"None",
":",
"timeout_overhead",
"=",
"0.2",
"# seconds",
"# start_time = when the user request started",
"start_time",
"=",
"getattr",
"(",
"threadLocal",
",",
"'start_time'",
",",
"time_before_request",
")",
"search_duration",
"=",
"time_after_request",
"-",
"start_time",
"if",
"search_duration",
">",
"timeout",
"+",
"timeout_overhead",
":",
"raise",
"requests",
".",
"exceptions",
".",
"Timeout",
"(",
"response",
"=",
"response",
")",
"# session end",
"session",
".",
"close",
"(",
")",
"if",
"hasattr",
"(",
"threadLocal",
",",
"'total_time'",
")",
":",
"threadLocal",
".",
"total_time",
"+=",
"time_after_request",
"-",
"time_before_request",
"return",
"response"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
get_current_theme_name
|
Returns theme name.
Checks in this order:
1. override
2. cookies
3. settings
|
searx/webapp.py
|
def get_current_theme_name(override=None):
"""Returns theme name.
Checks in this order:
1. override
2. cookies
3. settings"""
if override and (override in themes or override == '__common__'):
return override
theme_name = request.args.get('theme', request.preferences.get_value('theme'))
if theme_name not in themes:
theme_name = default_theme
return theme_name
|
def get_current_theme_name(override=None):
"""Returns theme name.
Checks in this order:
1. override
2. cookies
3. settings"""
if override and (override in themes or override == '__common__'):
return override
theme_name = request.args.get('theme', request.preferences.get_value('theme'))
if theme_name not in themes:
theme_name = default_theme
return theme_name
|
[
"Returns",
"theme",
"name",
"."
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/webapp.py#L240-L253
|
[
"def",
"get_current_theme_name",
"(",
"override",
"=",
"None",
")",
":",
"if",
"override",
"and",
"(",
"override",
"in",
"themes",
"or",
"override",
"==",
"'__common__'",
")",
":",
"return",
"override",
"theme_name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'theme'",
",",
"request",
".",
"preferences",
".",
"get_value",
"(",
"'theme'",
")",
")",
"if",
"theme_name",
"not",
"in",
"themes",
":",
"theme_name",
"=",
"default_theme",
"return",
"theme_name"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
index
|
Render index page.
Supported outputs: html, json, csv, rss.
|
searx/webapp.py
|
def index():
"""Render index page.
Supported outputs: html, json, csv, rss.
"""
# output_format
output_format = request.form.get('format', 'html')
if output_format not in ['html', 'csv', 'json', 'rss']:
output_format = 'html'
# check if there is query
if request.form.get('q') is None:
if output_format == 'html':
return render(
'index.html',
)
else:
return index_error(output_format, 'No query'), 400
# search
search_query = None
result_container = None
try:
search_query = get_search_query_from_webapp(request.preferences, request.form)
# search = Search(search_query) # without plugins
search = SearchWithPlugins(search_query, request.user_plugins, request)
result_container = search.search()
except Exception as e:
# log exception
logger.exception('search error')
# is it an invalid input parameter or something else ?
if (issubclass(e.__class__, SearxParameterException)):
return index_error(output_format, e.message), 400
else:
return index_error(output_format, gettext('search error')), 500
# results
results = result_container.get_ordered_results()
number_of_results = result_container.results_number()
if number_of_results < result_container.results_length():
number_of_results = 0
# UI
advanced_search = request.form.get('advanced_search', None)
# output
for result in results:
if output_format == 'html':
if 'content' in result and result['content']:
result['content'] = highlight_content(escape(result['content'][:1024]), search_query.query)
result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)
else:
if result.get('content'):
result['content'] = html_to_text(result['content']).strip()
# removing html content and whitespace duplications
result['title'] = ' '.join(html_to_text(result['title']).strip().split())
result['pretty_url'] = prettify_url(result['url'])
# TODO, check if timezone is calculated right
if 'publishedDate' in result:
try: # test if publishedDate >= 1900 (datetime module bug)
result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')
except ValueError:
result['publishedDate'] = None
else:
if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1):
timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None)
minutes = int((timedifference.seconds / 60) % 60)
hours = int(timedifference.seconds / 60 / 60)
if hours == 0:
result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes)
else:
result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa
else:
result['publishedDate'] = format_date(result['publishedDate'])
if output_format == 'json':
return Response(json.dumps({'query': search_query.query.decode('utf-8'),
'number_of_results': number_of_results,
'results': results,
'answers': list(result_container.answers),
'corrections': list(result_container.corrections),
'infoboxes': result_container.infoboxes,
'suggestions': list(result_container.suggestions),
'unresponsive_engines': list(result_container.unresponsive_engines)},
default=lambda item: list(item) if isinstance(item, set) else item),
mimetype='application/json')
elif output_format == 'csv':
csv = UnicodeWriter(StringIO())
keys = ('title', 'url', 'content', 'host', 'engine', 'score')
csv.writerow(keys)
for row in results:
row['host'] = row['parsed_url'].netloc
csv.writerow([row.get(key, '') for key in keys])
csv.stream.seek(0)
response = Response(csv.stream.read(), mimetype='application/csv')
cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search_query.query)
response.headers.add('Content-Disposition', cont_disp)
return response
elif output_format == 'rss':
response_rss = render(
'opensearch_response_rss.xml',
results=results,
q=request.form['q'],
number_of_results=number_of_results,
base_url=get_base_url(),
override_theme='__common__',
)
return Response(response_rss, mimetype='text/xml')
return render(
'results.html',
results=results,
q=request.form['q'],
selected_categories=search_query.categories,
pageno=search_query.pageno,
time_range=search_query.time_range,
number_of_results=format_decimal(number_of_results),
advanced_search=advanced_search,
suggestions=result_container.suggestions,
answers=result_container.answers,
corrections=result_container.corrections,
infoboxes=result_container.infoboxes,
paging=result_container.paging,
unresponsive_engines=result_container.unresponsive_engines,
current_language=match_language(search_query.lang,
LANGUAGE_CODES,
fallback=settings['search']['language']),
base_url=get_base_url(),
theme=get_current_theme_name(),
favicons=global_favicons[themes.index(get_current_theme_name())]
)
|
def index():
"""Render index page.
Supported outputs: html, json, csv, rss.
"""
# output_format
output_format = request.form.get('format', 'html')
if output_format not in ['html', 'csv', 'json', 'rss']:
output_format = 'html'
# check if there is query
if request.form.get('q') is None:
if output_format == 'html':
return render(
'index.html',
)
else:
return index_error(output_format, 'No query'), 400
# search
search_query = None
result_container = None
try:
search_query = get_search_query_from_webapp(request.preferences, request.form)
# search = Search(search_query) # without plugins
search = SearchWithPlugins(search_query, request.user_plugins, request)
result_container = search.search()
except Exception as e:
# log exception
logger.exception('search error')
# is it an invalid input parameter or something else ?
if (issubclass(e.__class__, SearxParameterException)):
return index_error(output_format, e.message), 400
else:
return index_error(output_format, gettext('search error')), 500
# results
results = result_container.get_ordered_results()
number_of_results = result_container.results_number()
if number_of_results < result_container.results_length():
number_of_results = 0
# UI
advanced_search = request.form.get('advanced_search', None)
# output
for result in results:
if output_format == 'html':
if 'content' in result and result['content']:
result['content'] = highlight_content(escape(result['content'][:1024]), search_query.query)
result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)
else:
if result.get('content'):
result['content'] = html_to_text(result['content']).strip()
# removing html content and whitespace duplications
result['title'] = ' '.join(html_to_text(result['title']).strip().split())
result['pretty_url'] = prettify_url(result['url'])
# TODO, check if timezone is calculated right
if 'publishedDate' in result:
try: # test if publishedDate >= 1900 (datetime module bug)
result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')
except ValueError:
result['publishedDate'] = None
else:
if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1):
timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None)
minutes = int((timedifference.seconds / 60) % 60)
hours = int(timedifference.seconds / 60 / 60)
if hours == 0:
result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes)
else:
result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa
else:
result['publishedDate'] = format_date(result['publishedDate'])
if output_format == 'json':
return Response(json.dumps({'query': search_query.query.decode('utf-8'),
'number_of_results': number_of_results,
'results': results,
'answers': list(result_container.answers),
'corrections': list(result_container.corrections),
'infoboxes': result_container.infoboxes,
'suggestions': list(result_container.suggestions),
'unresponsive_engines': list(result_container.unresponsive_engines)},
default=lambda item: list(item) if isinstance(item, set) else item),
mimetype='application/json')
elif output_format == 'csv':
csv = UnicodeWriter(StringIO())
keys = ('title', 'url', 'content', 'host', 'engine', 'score')
csv.writerow(keys)
for row in results:
row['host'] = row['parsed_url'].netloc
csv.writerow([row.get(key, '') for key in keys])
csv.stream.seek(0)
response = Response(csv.stream.read(), mimetype='application/csv')
cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search_query.query)
response.headers.add('Content-Disposition', cont_disp)
return response
elif output_format == 'rss':
response_rss = render(
'opensearch_response_rss.xml',
results=results,
q=request.form['q'],
number_of_results=number_of_results,
base_url=get_base_url(),
override_theme='__common__',
)
return Response(response_rss, mimetype='text/xml')
return render(
'results.html',
results=results,
q=request.form['q'],
selected_categories=search_query.categories,
pageno=search_query.pageno,
time_range=search_query.time_range,
number_of_results=format_decimal(number_of_results),
advanced_search=advanced_search,
suggestions=result_container.suggestions,
answers=result_container.answers,
corrections=result_container.corrections,
infoboxes=result_container.infoboxes,
paging=result_container.paging,
unresponsive_engines=result_container.unresponsive_engines,
current_language=match_language(search_query.lang,
LANGUAGE_CODES,
fallback=settings['search']['language']),
base_url=get_base_url(),
theme=get_current_theme_name(),
favicons=global_favicons[themes.index(get_current_theme_name())]
)
|
[
"Render",
"index",
"page",
"."
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/webapp.py#L470-L604
|
[
"def",
"index",
"(",
")",
":",
"# output_format",
"output_format",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'format'",
",",
"'html'",
")",
"if",
"output_format",
"not",
"in",
"[",
"'html'",
",",
"'csv'",
",",
"'json'",
",",
"'rss'",
"]",
":",
"output_format",
"=",
"'html'",
"# check if there is query",
"if",
"request",
".",
"form",
".",
"get",
"(",
"'q'",
")",
"is",
"None",
":",
"if",
"output_format",
"==",
"'html'",
":",
"return",
"render",
"(",
"'index.html'",
",",
")",
"else",
":",
"return",
"index_error",
"(",
"output_format",
",",
"'No query'",
")",
",",
"400",
"# search",
"search_query",
"=",
"None",
"result_container",
"=",
"None",
"try",
":",
"search_query",
"=",
"get_search_query_from_webapp",
"(",
"request",
".",
"preferences",
",",
"request",
".",
"form",
")",
"# search = Search(search_query) # without plugins",
"search",
"=",
"SearchWithPlugins",
"(",
"search_query",
",",
"request",
".",
"user_plugins",
",",
"request",
")",
"result_container",
"=",
"search",
".",
"search",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"# log exception",
"logger",
".",
"exception",
"(",
"'search error'",
")",
"# is it an invalid input parameter or something else ?",
"if",
"(",
"issubclass",
"(",
"e",
".",
"__class__",
",",
"SearxParameterException",
")",
")",
":",
"return",
"index_error",
"(",
"output_format",
",",
"e",
".",
"message",
")",
",",
"400",
"else",
":",
"return",
"index_error",
"(",
"output_format",
",",
"gettext",
"(",
"'search error'",
")",
")",
",",
"500",
"# results",
"results",
"=",
"result_container",
".",
"get_ordered_results",
"(",
")",
"number_of_results",
"=",
"result_container",
".",
"results_number",
"(",
")",
"if",
"number_of_results",
"<",
"result_container",
".",
"results_length",
"(",
")",
":",
"number_of_results",
"=",
"0",
"# UI",
"advanced_search",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'advanced_search'",
",",
"None",
")",
"# output",
"for",
"result",
"in",
"results",
":",
"if",
"output_format",
"==",
"'html'",
":",
"if",
"'content'",
"in",
"result",
"and",
"result",
"[",
"'content'",
"]",
":",
"result",
"[",
"'content'",
"]",
"=",
"highlight_content",
"(",
"escape",
"(",
"result",
"[",
"'content'",
"]",
"[",
":",
"1024",
"]",
")",
",",
"search_query",
".",
"query",
")",
"result",
"[",
"'title'",
"]",
"=",
"highlight_content",
"(",
"escape",
"(",
"result",
"[",
"'title'",
"]",
"or",
"u''",
")",
",",
"search_query",
".",
"query",
")",
"else",
":",
"if",
"result",
".",
"get",
"(",
"'content'",
")",
":",
"result",
"[",
"'content'",
"]",
"=",
"html_to_text",
"(",
"result",
"[",
"'content'",
"]",
")",
".",
"strip",
"(",
")",
"# removing html content and whitespace duplications",
"result",
"[",
"'title'",
"]",
"=",
"' '",
".",
"join",
"(",
"html_to_text",
"(",
"result",
"[",
"'title'",
"]",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
")",
"result",
"[",
"'pretty_url'",
"]",
"=",
"prettify_url",
"(",
"result",
"[",
"'url'",
"]",
")",
"# TODO, check if timezone is calculated right",
"if",
"'publishedDate'",
"in",
"result",
":",
"try",
":",
"# test if publishedDate >= 1900 (datetime module bug)",
"result",
"[",
"'pubdate'",
"]",
"=",
"result",
"[",
"'publishedDate'",
"]",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S%z'",
")",
"except",
"ValueError",
":",
"result",
"[",
"'publishedDate'",
"]",
"=",
"None",
"else",
":",
"if",
"result",
"[",
"'publishedDate'",
"]",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
">=",
"datetime",
".",
"now",
"(",
")",
"-",
"timedelta",
"(",
"days",
"=",
"1",
")",
":",
"timedifference",
"=",
"datetime",
".",
"now",
"(",
")",
"-",
"result",
"[",
"'publishedDate'",
"]",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"minutes",
"=",
"int",
"(",
"(",
"timedifference",
".",
"seconds",
"/",
"60",
")",
"%",
"60",
")",
"hours",
"=",
"int",
"(",
"timedifference",
".",
"seconds",
"/",
"60",
"/",
"60",
")",
"if",
"hours",
"==",
"0",
":",
"result",
"[",
"'publishedDate'",
"]",
"=",
"gettext",
"(",
"u'{minutes} minute(s) ago'",
")",
".",
"format",
"(",
"minutes",
"=",
"minutes",
")",
"else",
":",
"result",
"[",
"'publishedDate'",
"]",
"=",
"gettext",
"(",
"u'{hours} hour(s), {minutes} minute(s) ago'",
")",
".",
"format",
"(",
"hours",
"=",
"hours",
",",
"minutes",
"=",
"minutes",
")",
"# noqa",
"else",
":",
"result",
"[",
"'publishedDate'",
"]",
"=",
"format_date",
"(",
"result",
"[",
"'publishedDate'",
"]",
")",
"if",
"output_format",
"==",
"'json'",
":",
"return",
"Response",
"(",
"json",
".",
"dumps",
"(",
"{",
"'query'",
":",
"search_query",
".",
"query",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"'number_of_results'",
":",
"number_of_results",
",",
"'results'",
":",
"results",
",",
"'answers'",
":",
"list",
"(",
"result_container",
".",
"answers",
")",
",",
"'corrections'",
":",
"list",
"(",
"result_container",
".",
"corrections",
")",
",",
"'infoboxes'",
":",
"result_container",
".",
"infoboxes",
",",
"'suggestions'",
":",
"list",
"(",
"result_container",
".",
"suggestions",
")",
",",
"'unresponsive_engines'",
":",
"list",
"(",
"result_container",
".",
"unresponsive_engines",
")",
"}",
",",
"default",
"=",
"lambda",
"item",
":",
"list",
"(",
"item",
")",
"if",
"isinstance",
"(",
"item",
",",
"set",
")",
"else",
"item",
")",
",",
"mimetype",
"=",
"'application/json'",
")",
"elif",
"output_format",
"==",
"'csv'",
":",
"csv",
"=",
"UnicodeWriter",
"(",
"StringIO",
"(",
")",
")",
"keys",
"=",
"(",
"'title'",
",",
"'url'",
",",
"'content'",
",",
"'host'",
",",
"'engine'",
",",
"'score'",
")",
"csv",
".",
"writerow",
"(",
"keys",
")",
"for",
"row",
"in",
"results",
":",
"row",
"[",
"'host'",
"]",
"=",
"row",
"[",
"'parsed_url'",
"]",
".",
"netloc",
"csv",
".",
"writerow",
"(",
"[",
"row",
".",
"get",
"(",
"key",
",",
"''",
")",
"for",
"key",
"in",
"keys",
"]",
")",
"csv",
".",
"stream",
".",
"seek",
"(",
"0",
")",
"response",
"=",
"Response",
"(",
"csv",
".",
"stream",
".",
"read",
"(",
")",
",",
"mimetype",
"=",
"'application/csv'",
")",
"cont_disp",
"=",
"'attachment;Filename=searx_-_{0}.csv'",
".",
"format",
"(",
"search_query",
".",
"query",
")",
"response",
".",
"headers",
".",
"add",
"(",
"'Content-Disposition'",
",",
"cont_disp",
")",
"return",
"response",
"elif",
"output_format",
"==",
"'rss'",
":",
"response_rss",
"=",
"render",
"(",
"'opensearch_response_rss.xml'",
",",
"results",
"=",
"results",
",",
"q",
"=",
"request",
".",
"form",
"[",
"'q'",
"]",
",",
"number_of_results",
"=",
"number_of_results",
",",
"base_url",
"=",
"get_base_url",
"(",
")",
",",
"override_theme",
"=",
"'__common__'",
",",
")",
"return",
"Response",
"(",
"response_rss",
",",
"mimetype",
"=",
"'text/xml'",
")",
"return",
"render",
"(",
"'results.html'",
",",
"results",
"=",
"results",
",",
"q",
"=",
"request",
".",
"form",
"[",
"'q'",
"]",
",",
"selected_categories",
"=",
"search_query",
".",
"categories",
",",
"pageno",
"=",
"search_query",
".",
"pageno",
",",
"time_range",
"=",
"search_query",
".",
"time_range",
",",
"number_of_results",
"=",
"format_decimal",
"(",
"number_of_results",
")",
",",
"advanced_search",
"=",
"advanced_search",
",",
"suggestions",
"=",
"result_container",
".",
"suggestions",
",",
"answers",
"=",
"result_container",
".",
"answers",
",",
"corrections",
"=",
"result_container",
".",
"corrections",
",",
"infoboxes",
"=",
"result_container",
".",
"infoboxes",
",",
"paging",
"=",
"result_container",
".",
"paging",
",",
"unresponsive_engines",
"=",
"result_container",
".",
"unresponsive_engines",
",",
"current_language",
"=",
"match_language",
"(",
"search_query",
".",
"lang",
",",
"LANGUAGE_CODES",
",",
"fallback",
"=",
"settings",
"[",
"'search'",
"]",
"[",
"'language'",
"]",
")",
",",
"base_url",
"=",
"get_base_url",
"(",
")",
",",
"theme",
"=",
"get_current_theme_name",
"(",
")",
",",
"favicons",
"=",
"global_favicons",
"[",
"themes",
".",
"index",
"(",
"get_current_theme_name",
"(",
")",
")",
"]",
")"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
autocompleter
|
Return autocompleter results
|
searx/webapp.py
|
def autocompleter():
"""Return autocompleter results"""
# set blocked engines
disabled_engines = request.preferences.engines.get_disabled()
# parse query
if PY3:
raw_text_query = RawTextQuery(request.form.get('q', b''), disabled_engines)
else:
raw_text_query = RawTextQuery(request.form.get('q', u'').encode('utf-8'), disabled_engines)
raw_text_query.parse_query()
# check if search query is set
if not raw_text_query.getSearchQuery():
return '', 400
# run autocompleter
completer = autocomplete_backends.get(request.preferences.get_value('autocomplete'))
# parse searx specific autocompleter results like !bang
raw_results = searx_bang(raw_text_query)
# normal autocompletion results only appear if max 3 inner results returned
if len(raw_results) <= 3 and completer:
# get language from cookie
language = request.preferences.get_value('language')
if not language or language == 'all':
language = 'en'
else:
language = language.split('-')[0]
# run autocompletion
raw_results.extend(completer(raw_text_query.getSearchQuery(), language))
# parse results (write :language and !engine back to result string)
results = []
for result in raw_results:
raw_text_query.changeSearchQuery(result)
# add parsed result
results.append(raw_text_query.getFullQuery())
# return autocompleter results
if request.form.get('format') == 'x-suggestions':
return Response(json.dumps([raw_text_query.query, results]),
mimetype='application/json')
return Response(json.dumps(results),
mimetype='application/json')
|
def autocompleter():
"""Return autocompleter results"""
# set blocked engines
disabled_engines = request.preferences.engines.get_disabled()
# parse query
if PY3:
raw_text_query = RawTextQuery(request.form.get('q', b''), disabled_engines)
else:
raw_text_query = RawTextQuery(request.form.get('q', u'').encode('utf-8'), disabled_engines)
raw_text_query.parse_query()
# check if search query is set
if not raw_text_query.getSearchQuery():
return '', 400
# run autocompleter
completer = autocomplete_backends.get(request.preferences.get_value('autocomplete'))
# parse searx specific autocompleter results like !bang
raw_results = searx_bang(raw_text_query)
# normal autocompletion results only appear if max 3 inner results returned
if len(raw_results) <= 3 and completer:
# get language from cookie
language = request.preferences.get_value('language')
if not language or language == 'all':
language = 'en'
else:
language = language.split('-')[0]
# run autocompletion
raw_results.extend(completer(raw_text_query.getSearchQuery(), language))
# parse results (write :language and !engine back to result string)
results = []
for result in raw_results:
raw_text_query.changeSearchQuery(result)
# add parsed result
results.append(raw_text_query.getFullQuery())
# return autocompleter results
if request.form.get('format') == 'x-suggestions':
return Response(json.dumps([raw_text_query.query, results]),
mimetype='application/json')
return Response(json.dumps(results),
mimetype='application/json')
|
[
"Return",
"autocompleter",
"results"
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/webapp.py#L616-L664
|
[
"def",
"autocompleter",
"(",
")",
":",
"# set blocked engines",
"disabled_engines",
"=",
"request",
".",
"preferences",
".",
"engines",
".",
"get_disabled",
"(",
")",
"# parse query",
"if",
"PY3",
":",
"raw_text_query",
"=",
"RawTextQuery",
"(",
"request",
".",
"form",
".",
"get",
"(",
"'q'",
",",
"b''",
")",
",",
"disabled_engines",
")",
"else",
":",
"raw_text_query",
"=",
"RawTextQuery",
"(",
"request",
".",
"form",
".",
"get",
"(",
"'q'",
",",
"u''",
")",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"disabled_engines",
")",
"raw_text_query",
".",
"parse_query",
"(",
")",
"# check if search query is set",
"if",
"not",
"raw_text_query",
".",
"getSearchQuery",
"(",
")",
":",
"return",
"''",
",",
"400",
"# run autocompleter",
"completer",
"=",
"autocomplete_backends",
".",
"get",
"(",
"request",
".",
"preferences",
".",
"get_value",
"(",
"'autocomplete'",
")",
")",
"# parse searx specific autocompleter results like !bang",
"raw_results",
"=",
"searx_bang",
"(",
"raw_text_query",
")",
"# normal autocompletion results only appear if max 3 inner results returned",
"if",
"len",
"(",
"raw_results",
")",
"<=",
"3",
"and",
"completer",
":",
"# get language from cookie",
"language",
"=",
"request",
".",
"preferences",
".",
"get_value",
"(",
"'language'",
")",
"if",
"not",
"language",
"or",
"language",
"==",
"'all'",
":",
"language",
"=",
"'en'",
"else",
":",
"language",
"=",
"language",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"# run autocompletion",
"raw_results",
".",
"extend",
"(",
"completer",
"(",
"raw_text_query",
".",
"getSearchQuery",
"(",
")",
",",
"language",
")",
")",
"# parse results (write :language and !engine back to result string)",
"results",
"=",
"[",
"]",
"for",
"result",
"in",
"raw_results",
":",
"raw_text_query",
".",
"changeSearchQuery",
"(",
"result",
")",
"# add parsed result",
"results",
".",
"append",
"(",
"raw_text_query",
".",
"getFullQuery",
"(",
")",
")",
"# return autocompleter results",
"if",
"request",
".",
"form",
".",
"get",
"(",
"'format'",
")",
"==",
"'x-suggestions'",
":",
"return",
"Response",
"(",
"json",
".",
"dumps",
"(",
"[",
"raw_text_query",
".",
"query",
",",
"results",
"]",
")",
",",
"mimetype",
"=",
"'application/json'",
")",
"return",
"Response",
"(",
"json",
".",
"dumps",
"(",
"results",
")",
",",
"mimetype",
"=",
"'application/json'",
")"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
preferences
|
Render preferences page && save user preferences
|
searx/webapp.py
|
def preferences():
"""Render preferences page && save user preferences"""
# save preferences
if request.method == 'POST':
resp = make_response(redirect(urljoin(settings['server']['base_url'], url_for('index'))))
try:
request.preferences.parse_form(request.form)
except ValidationException:
request.errors.append(gettext('Invalid settings, please edit your preferences'))
return resp
return request.preferences.save(resp)
# render preferences
image_proxy = request.preferences.get_value('image_proxy')
lang = request.preferences.get_value('language')
disabled_engines = request.preferences.engines.get_disabled()
allowed_plugins = request.preferences.plugins.get_enabled()
# stats for preferences page
stats = {}
for c in categories:
for e in categories[c]:
stats[e.name] = {'time': None,
'warn_timeout': False,
'warn_time': False}
if e.timeout > settings['outgoing']['request_timeout']:
stats[e.name]['warn_timeout'] = True
stats[e.name]['supports_selected_language'] = _is_selected_language_supported(e, request.preferences)
# get first element [0], the engine time,
# and then the second element [1] : the time (the first one is the label)
for engine_stat in get_engines_stats()[0][1]:
stats[engine_stat.get('name')]['time'] = round(engine_stat.get('avg'), 3)
if engine_stat.get('avg') > settings['outgoing']['request_timeout']:
stats[engine_stat.get('name')]['warn_time'] = True
# end of stats
return render('preferences.html',
locales=settings['locales'],
current_locale=get_locale(),
image_proxy=image_proxy,
engines_by_category=categories,
stats=stats,
answerers=[{'info': a.self_info(), 'keywords': a.keywords} for a in answerers],
disabled_engines=disabled_engines,
autocomplete_backends=autocomplete_backends,
shortcuts={y: x for x, y in engine_shortcuts.items()},
themes=themes,
plugins=plugins,
doi_resolvers=settings['doi_resolvers'],
current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')),
allowed_plugins=allowed_plugins,
theme=get_current_theme_name(),
preferences_url_params=request.preferences.get_as_url_params(),
base_url=get_base_url(),
preferences=True)
|
def preferences():
"""Render preferences page && save user preferences"""
# save preferences
if request.method == 'POST':
resp = make_response(redirect(urljoin(settings['server']['base_url'], url_for('index'))))
try:
request.preferences.parse_form(request.form)
except ValidationException:
request.errors.append(gettext('Invalid settings, please edit your preferences'))
return resp
return request.preferences.save(resp)
# render preferences
image_proxy = request.preferences.get_value('image_proxy')
lang = request.preferences.get_value('language')
disabled_engines = request.preferences.engines.get_disabled()
allowed_plugins = request.preferences.plugins.get_enabled()
# stats for preferences page
stats = {}
for c in categories:
for e in categories[c]:
stats[e.name] = {'time': None,
'warn_timeout': False,
'warn_time': False}
if e.timeout > settings['outgoing']['request_timeout']:
stats[e.name]['warn_timeout'] = True
stats[e.name]['supports_selected_language'] = _is_selected_language_supported(e, request.preferences)
# get first element [0], the engine time,
# and then the second element [1] : the time (the first one is the label)
for engine_stat in get_engines_stats()[0][1]:
stats[engine_stat.get('name')]['time'] = round(engine_stat.get('avg'), 3)
if engine_stat.get('avg') > settings['outgoing']['request_timeout']:
stats[engine_stat.get('name')]['warn_time'] = True
# end of stats
return render('preferences.html',
locales=settings['locales'],
current_locale=get_locale(),
image_proxy=image_proxy,
engines_by_category=categories,
stats=stats,
answerers=[{'info': a.self_info(), 'keywords': a.keywords} for a in answerers],
disabled_engines=disabled_engines,
autocomplete_backends=autocomplete_backends,
shortcuts={y: x for x, y in engine_shortcuts.items()},
themes=themes,
plugins=plugins,
doi_resolvers=settings['doi_resolvers'],
current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')),
allowed_plugins=allowed_plugins,
theme=get_current_theme_name(),
preferences_url_params=request.preferences.get_as_url_params(),
base_url=get_base_url(),
preferences=True)
|
[
"Render",
"preferences",
"page",
"&&",
"save",
"user",
"preferences"
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/webapp.py#L668-L725
|
[
"def",
"preferences",
"(",
")",
":",
"# save preferences",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"resp",
"=",
"make_response",
"(",
"redirect",
"(",
"urljoin",
"(",
"settings",
"[",
"'server'",
"]",
"[",
"'base_url'",
"]",
",",
"url_for",
"(",
"'index'",
")",
")",
")",
")",
"try",
":",
"request",
".",
"preferences",
".",
"parse_form",
"(",
"request",
".",
"form",
")",
"except",
"ValidationException",
":",
"request",
".",
"errors",
".",
"append",
"(",
"gettext",
"(",
"'Invalid settings, please edit your preferences'",
")",
")",
"return",
"resp",
"return",
"request",
".",
"preferences",
".",
"save",
"(",
"resp",
")",
"# render preferences",
"image_proxy",
"=",
"request",
".",
"preferences",
".",
"get_value",
"(",
"'image_proxy'",
")",
"lang",
"=",
"request",
".",
"preferences",
".",
"get_value",
"(",
"'language'",
")",
"disabled_engines",
"=",
"request",
".",
"preferences",
".",
"engines",
".",
"get_disabled",
"(",
")",
"allowed_plugins",
"=",
"request",
".",
"preferences",
".",
"plugins",
".",
"get_enabled",
"(",
")",
"# stats for preferences page",
"stats",
"=",
"{",
"}",
"for",
"c",
"in",
"categories",
":",
"for",
"e",
"in",
"categories",
"[",
"c",
"]",
":",
"stats",
"[",
"e",
".",
"name",
"]",
"=",
"{",
"'time'",
":",
"None",
",",
"'warn_timeout'",
":",
"False",
",",
"'warn_time'",
":",
"False",
"}",
"if",
"e",
".",
"timeout",
">",
"settings",
"[",
"'outgoing'",
"]",
"[",
"'request_timeout'",
"]",
":",
"stats",
"[",
"e",
".",
"name",
"]",
"[",
"'warn_timeout'",
"]",
"=",
"True",
"stats",
"[",
"e",
".",
"name",
"]",
"[",
"'supports_selected_language'",
"]",
"=",
"_is_selected_language_supported",
"(",
"e",
",",
"request",
".",
"preferences",
")",
"# get first element [0], the engine time,",
"# and then the second element [1] : the time (the first one is the label)",
"for",
"engine_stat",
"in",
"get_engines_stats",
"(",
")",
"[",
"0",
"]",
"[",
"1",
"]",
":",
"stats",
"[",
"engine_stat",
".",
"get",
"(",
"'name'",
")",
"]",
"[",
"'time'",
"]",
"=",
"round",
"(",
"engine_stat",
".",
"get",
"(",
"'avg'",
")",
",",
"3",
")",
"if",
"engine_stat",
".",
"get",
"(",
"'avg'",
")",
">",
"settings",
"[",
"'outgoing'",
"]",
"[",
"'request_timeout'",
"]",
":",
"stats",
"[",
"engine_stat",
".",
"get",
"(",
"'name'",
")",
"]",
"[",
"'warn_time'",
"]",
"=",
"True",
"# end of stats",
"return",
"render",
"(",
"'preferences.html'",
",",
"locales",
"=",
"settings",
"[",
"'locales'",
"]",
",",
"current_locale",
"=",
"get_locale",
"(",
")",
",",
"image_proxy",
"=",
"image_proxy",
",",
"engines_by_category",
"=",
"categories",
",",
"stats",
"=",
"stats",
",",
"answerers",
"=",
"[",
"{",
"'info'",
":",
"a",
".",
"self_info",
"(",
")",
",",
"'keywords'",
":",
"a",
".",
"keywords",
"}",
"for",
"a",
"in",
"answerers",
"]",
",",
"disabled_engines",
"=",
"disabled_engines",
",",
"autocomplete_backends",
"=",
"autocomplete_backends",
",",
"shortcuts",
"=",
"{",
"y",
":",
"x",
"for",
"x",
",",
"y",
"in",
"engine_shortcuts",
".",
"items",
"(",
")",
"}",
",",
"themes",
"=",
"themes",
",",
"plugins",
"=",
"plugins",
",",
"doi_resolvers",
"=",
"settings",
"[",
"'doi_resolvers'",
"]",
",",
"current_doi_resolver",
"=",
"get_doi_resolver",
"(",
"request",
".",
"args",
",",
"request",
".",
"preferences",
".",
"get_value",
"(",
"'doi_resolver'",
")",
")",
",",
"allowed_plugins",
"=",
"allowed_plugins",
",",
"theme",
"=",
"get_current_theme_name",
"(",
")",
",",
"preferences_url_params",
"=",
"request",
".",
"preferences",
".",
"get_as_url_params",
"(",
")",
",",
"base_url",
"=",
"get_base_url",
"(",
")",
",",
"preferences",
"=",
"True",
")"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
request
|
pre-request callback
params<dict>:
method : POST/GET
headers : {}
data : {} # if method == POST
url : ''
category: 'search category'
pageno : 1 # number of the requested page
|
searx/engines/duden.py
|
def request(query, params):
'''pre-request callback
params<dict>:
method : POST/GET
headers : {}
data : {} # if method == POST
url : ''
category: 'search category'
pageno : 1 # number of the requested page
'''
offset = (params['pageno'] - 1)
params['url'] = search_url.format(offset=offset, query=quote(query))
return params
|
def request(query, params):
'''pre-request callback
params<dict>:
method : POST/GET
headers : {}
data : {} # if method == POST
url : ''
category: 'search category'
pageno : 1 # number of the requested page
'''
offset = (params['pageno'] - 1)
params['url'] = search_url.format(offset=offset, query=quote(query))
return params
|
[
"pre",
"-",
"request",
"callback",
"params<dict",
">",
":",
"method",
":",
"POST",
"/",
"GET",
"headers",
":",
"{}",
"data",
":",
"{}",
"#",
"if",
"method",
"==",
"POST",
"url",
":",
"category",
":",
"search",
"category",
"pageno",
":",
"1",
"#",
"number",
"of",
"the",
"requested",
"page"
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/engines/duden.py#L26-L39
|
[
"def",
"request",
"(",
"query",
",",
"params",
")",
":",
"offset",
"=",
"(",
"params",
"[",
"'pageno'",
"]",
"-",
"1",
")",
"params",
"[",
"'url'",
"]",
"=",
"search_url",
".",
"format",
"(",
"offset",
"=",
"offset",
",",
"query",
"=",
"quote",
"(",
"query",
")",
")",
"return",
"params"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
response
|
post-response callback
resp: requests response object
|
searx/engines/duden.py
|
def response(resp):
'''post-response callback
resp: requests response object
'''
results = []
dom = html.fromstring(resp.text)
try:
number_of_results_string = re.sub('[^0-9]', '', dom.xpath(
'//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]
)
results.append({'number_of_results': int(number_of_results_string)})
except:
logger.debug("Couldn't read number of results.")
pass
for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'):
try:
logger.debug("running for %s" % str(result))
link = result.xpath('.//h2/a')[0]
url = link.attrib.get('href')
title = result.xpath('string(.//h2/a)')
content = extract_text(result.xpath('.//p'))
# append result
results.append({'url': url,
'title': title,
'content': content})
except:
logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
continue
return results
|
def response(resp):
'''post-response callback
resp: requests response object
'''
results = []
dom = html.fromstring(resp.text)
try:
number_of_results_string = re.sub('[^0-9]', '', dom.xpath(
'//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]
)
results.append({'number_of_results': int(number_of_results_string)})
except:
logger.debug("Couldn't read number of results.")
pass
for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'):
try:
logger.debug("running for %s" % str(result))
link = result.xpath('.//h2/a')[0]
url = link.attrib.get('href')
title = result.xpath('string(.//h2/a)')
content = extract_text(result.xpath('.//p'))
# append result
results.append({'url': url,
'title': title,
'content': content})
except:
logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
continue
return results
|
[
"post",
"-",
"response",
"callback",
"resp",
":",
"requests",
"response",
"object"
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/engines/duden.py#L42-L76
|
[
"def",
"response",
"(",
"resp",
")",
":",
"results",
"=",
"[",
"]",
"dom",
"=",
"html",
".",
"fromstring",
"(",
"resp",
".",
"text",
")",
"try",
":",
"number_of_results_string",
"=",
"re",
".",
"sub",
"(",
"'[^0-9]'",
",",
"''",
",",
"dom",
".",
"xpath",
"(",
"'//a[@class=\"active\" and contains(@href,\"/suchen/dudenonline\")]/span/text()'",
")",
"[",
"0",
"]",
")",
"results",
".",
"append",
"(",
"{",
"'number_of_results'",
":",
"int",
"(",
"number_of_results_string",
")",
"}",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"\"Couldn't read number of results.\"",
")",
"pass",
"for",
"result",
"in",
"dom",
".",
"xpath",
"(",
"'//section[@class=\"wide\" and not(contains(@style,\"overflow:hidden\"))]'",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"running for %s\"",
"%",
"str",
"(",
"result",
")",
")",
"link",
"=",
"result",
".",
"xpath",
"(",
"'.//h2/a'",
")",
"[",
"0",
"]",
"url",
"=",
"link",
".",
"attrib",
".",
"get",
"(",
"'href'",
")",
"title",
"=",
"result",
".",
"xpath",
"(",
"'string(.//h2/a)'",
")",
"content",
"=",
"extract_text",
"(",
"result",
".",
"xpath",
"(",
"'.//p'",
")",
")",
"# append result",
"results",
".",
"append",
"(",
"{",
"'url'",
":",
"url",
",",
"'title'",
":",
"title",
",",
"'content'",
":",
"content",
"}",
")",
"except",
":",
"logger",
".",
"debug",
"(",
"'result parse error in:\\n%s'",
",",
"etree",
".",
"tostring",
"(",
"result",
",",
"pretty_print",
"=",
"True",
")",
")",
"continue",
"return",
"results"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
get_themes
|
Returns available themes list.
|
searx/utils.py
|
def get_themes(templates_path):
"""Returns available themes list."""
themes = os.listdir(templates_path)
if '__common__' in themes:
themes.remove('__common__')
return themes
|
def get_themes(templates_path):
"""Returns available themes list."""
themes = os.listdir(templates_path)
if '__common__' in themes:
themes.remove('__common__')
return themes
|
[
"Returns",
"available",
"themes",
"list",
"."
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/utils.py#L190-L195
|
[
"def",
"get_themes",
"(",
"templates_path",
")",
":",
"themes",
"=",
"os",
".",
"listdir",
"(",
"templates_path",
")",
"if",
"'__common__'",
"in",
"themes",
":",
"themes",
".",
"remove",
"(",
"'__common__'",
")",
"return",
"themes"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
searx_bang
|
check if the searchQuery contain a bang, and create fitting autocompleter results
|
searx/autocomplete.py
|
def searx_bang(full_query):
'''check if the searchQuery contain a bang, and create fitting autocompleter results'''
# check if there is a query which can be parsed
if len(full_query.getSearchQuery()) == 0:
return []
results = []
# check if current query stats with !bang
first_char = full_query.getSearchQuery()[0]
if first_char == '!' or first_char == '?':
if len(full_query.getSearchQuery()) == 1:
# show some example queries
# TODO, check if engine is not avaliable
results.append(first_char + "images")
results.append(first_char + "wikipedia")
results.append(first_char + "osm")
else:
engine_query = full_query.getSearchQuery()[1:]
# check if query starts with categorie name
for categorie in categories:
if categorie.startswith(engine_query):
results.append(first_char + '{categorie}'.format(categorie=categorie))
# check if query starts with engine name
for engine in engines:
if engine.startswith(engine_query.replace('_', ' ')):
results.append(first_char + '{engine}'.format(engine=engine.replace(' ', '_')))
# check if query starts with engine shortcut
for engine_shortcut in engine_shortcuts:
if engine_shortcut.startswith(engine_query):
results.append(first_char + '{engine_shortcut}'.format(engine_shortcut=engine_shortcut))
# check if current query stats with :bang
elif first_char == ':':
if len(full_query.getSearchQuery()) == 1:
# show some example queries
results.append(":en")
results.append(":en_us")
results.append(":english")
results.append(":united_kingdom")
else:
engine_query = full_query.getSearchQuery()[1:]
for lc in language_codes:
lang_id, lang_name, country, english_name = map(unicode.lower, lc)
# check if query starts with language-id
if lang_id.startswith(engine_query):
if len(engine_query) <= 2:
results.append(u':{lang_id}'.format(lang_id=lang_id.split('-')[0]))
else:
results.append(u':{lang_id}'.format(lang_id=lang_id))
# check if query starts with language name
if lang_name.startswith(engine_query) or english_name.startswith(engine_query):
results.append(u':{lang_name}'.format(lang_name=lang_name))
# check if query starts with country
if country.startswith(engine_query.replace('_', ' ')):
results.append(u':{country}'.format(country=country.replace(' ', '_')))
# remove duplicates
result_set = set(results)
# remove results which are already contained in the query
for query_part in full_query.query_parts:
if query_part in result_set:
result_set.remove(query_part)
# convert result_set back to list
return list(result_set)
|
def searx_bang(full_query):
'''check if the searchQuery contain a bang, and create fitting autocompleter results'''
# check if there is a query which can be parsed
if len(full_query.getSearchQuery()) == 0:
return []
results = []
# check if current query stats with !bang
first_char = full_query.getSearchQuery()[0]
if first_char == '!' or first_char == '?':
if len(full_query.getSearchQuery()) == 1:
# show some example queries
# TODO, check if engine is not avaliable
results.append(first_char + "images")
results.append(first_char + "wikipedia")
results.append(first_char + "osm")
else:
engine_query = full_query.getSearchQuery()[1:]
# check if query starts with categorie name
for categorie in categories:
if categorie.startswith(engine_query):
results.append(first_char + '{categorie}'.format(categorie=categorie))
# check if query starts with engine name
for engine in engines:
if engine.startswith(engine_query.replace('_', ' ')):
results.append(first_char + '{engine}'.format(engine=engine.replace(' ', '_')))
# check if query starts with engine shortcut
for engine_shortcut in engine_shortcuts:
if engine_shortcut.startswith(engine_query):
results.append(first_char + '{engine_shortcut}'.format(engine_shortcut=engine_shortcut))
# check if current query stats with :bang
elif first_char == ':':
if len(full_query.getSearchQuery()) == 1:
# show some example queries
results.append(":en")
results.append(":en_us")
results.append(":english")
results.append(":united_kingdom")
else:
engine_query = full_query.getSearchQuery()[1:]
for lc in language_codes:
lang_id, lang_name, country, english_name = map(unicode.lower, lc)
# check if query starts with language-id
if lang_id.startswith(engine_query):
if len(engine_query) <= 2:
results.append(u':{lang_id}'.format(lang_id=lang_id.split('-')[0]))
else:
results.append(u':{lang_id}'.format(lang_id=lang_id))
# check if query starts with language name
if lang_name.startswith(engine_query) or english_name.startswith(engine_query):
results.append(u':{lang_name}'.format(lang_name=lang_name))
# check if query starts with country
if country.startswith(engine_query.replace('_', ' ')):
results.append(u':{country}'.format(country=country.replace(' ', '_')))
# remove duplicates
result_set = set(results)
# remove results which are already contained in the query
for query_part in full_query.query_parts:
if query_part in result_set:
result_set.remove(query_part)
# convert result_set back to list
return list(result_set)
|
[
"check",
"if",
"the",
"searchQuery",
"contain",
"a",
"bang",
"and",
"create",
"fitting",
"autocompleter",
"results"
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/autocomplete.py#L37-L110
|
[
"def",
"searx_bang",
"(",
"full_query",
")",
":",
"# check if there is a query which can be parsed",
"if",
"len",
"(",
"full_query",
".",
"getSearchQuery",
"(",
")",
")",
"==",
"0",
":",
"return",
"[",
"]",
"results",
"=",
"[",
"]",
"# check if current query stats with !bang",
"first_char",
"=",
"full_query",
".",
"getSearchQuery",
"(",
")",
"[",
"0",
"]",
"if",
"first_char",
"==",
"'!'",
"or",
"first_char",
"==",
"'?'",
":",
"if",
"len",
"(",
"full_query",
".",
"getSearchQuery",
"(",
")",
")",
"==",
"1",
":",
"# show some example queries",
"# TODO, check if engine is not avaliable",
"results",
".",
"append",
"(",
"first_char",
"+",
"\"images\"",
")",
"results",
".",
"append",
"(",
"first_char",
"+",
"\"wikipedia\"",
")",
"results",
".",
"append",
"(",
"first_char",
"+",
"\"osm\"",
")",
"else",
":",
"engine_query",
"=",
"full_query",
".",
"getSearchQuery",
"(",
")",
"[",
"1",
":",
"]",
"# check if query starts with categorie name",
"for",
"categorie",
"in",
"categories",
":",
"if",
"categorie",
".",
"startswith",
"(",
"engine_query",
")",
":",
"results",
".",
"append",
"(",
"first_char",
"+",
"'{categorie}'",
".",
"format",
"(",
"categorie",
"=",
"categorie",
")",
")",
"# check if query starts with engine name",
"for",
"engine",
"in",
"engines",
":",
"if",
"engine",
".",
"startswith",
"(",
"engine_query",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
")",
":",
"results",
".",
"append",
"(",
"first_char",
"+",
"'{engine}'",
".",
"format",
"(",
"engine",
"=",
"engine",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
")",
")",
"# check if query starts with engine shortcut",
"for",
"engine_shortcut",
"in",
"engine_shortcuts",
":",
"if",
"engine_shortcut",
".",
"startswith",
"(",
"engine_query",
")",
":",
"results",
".",
"append",
"(",
"first_char",
"+",
"'{engine_shortcut}'",
".",
"format",
"(",
"engine_shortcut",
"=",
"engine_shortcut",
")",
")",
"# check if current query stats with :bang",
"elif",
"first_char",
"==",
"':'",
":",
"if",
"len",
"(",
"full_query",
".",
"getSearchQuery",
"(",
")",
")",
"==",
"1",
":",
"# show some example queries",
"results",
".",
"append",
"(",
"\":en\"",
")",
"results",
".",
"append",
"(",
"\":en_us\"",
")",
"results",
".",
"append",
"(",
"\":english\"",
")",
"results",
".",
"append",
"(",
"\":united_kingdom\"",
")",
"else",
":",
"engine_query",
"=",
"full_query",
".",
"getSearchQuery",
"(",
")",
"[",
"1",
":",
"]",
"for",
"lc",
"in",
"language_codes",
":",
"lang_id",
",",
"lang_name",
",",
"country",
",",
"english_name",
"=",
"map",
"(",
"unicode",
".",
"lower",
",",
"lc",
")",
"# check if query starts with language-id",
"if",
"lang_id",
".",
"startswith",
"(",
"engine_query",
")",
":",
"if",
"len",
"(",
"engine_query",
")",
"<=",
"2",
":",
"results",
".",
"append",
"(",
"u':{lang_id}'",
".",
"format",
"(",
"lang_id",
"=",
"lang_id",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
")",
")",
"else",
":",
"results",
".",
"append",
"(",
"u':{lang_id}'",
".",
"format",
"(",
"lang_id",
"=",
"lang_id",
")",
")",
"# check if query starts with language name",
"if",
"lang_name",
".",
"startswith",
"(",
"engine_query",
")",
"or",
"english_name",
".",
"startswith",
"(",
"engine_query",
")",
":",
"results",
".",
"append",
"(",
"u':{lang_name}'",
".",
"format",
"(",
"lang_name",
"=",
"lang_name",
")",
")",
"# check if query starts with country",
"if",
"country",
".",
"startswith",
"(",
"engine_query",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
")",
":",
"results",
".",
"append",
"(",
"u':{country}'",
".",
"format",
"(",
"country",
"=",
"country",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
")",
")",
"# remove duplicates",
"result_set",
"=",
"set",
"(",
"results",
")",
"# remove results which are already contained in the query",
"for",
"query_part",
"in",
"full_query",
".",
"query_parts",
":",
"if",
"query_part",
"in",
"result_set",
":",
"result_set",
".",
"remove",
"(",
"query_part",
")",
"# convert result_set back to list",
"return",
"list",
"(",
"result_set",
")"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
response
|
remove first and last lines to get only json
|
searx/engines/currency_convert.py
|
def response(resp):
"""remove first and last lines to get only json"""
json_resp = resp.text[resp.text.find('\n') + 1:resp.text.rfind('\n') - 2]
results = []
try:
conversion_rate = float(json.loads(json_resp)['conversion']['converted-amount'])
except:
return results
answer = '{0} {1} = {2} {3}, 1 {1} ({5}) = {4} {3} ({6})'.format(
resp.search_params['amount'],
resp.search_params['from'],
resp.search_params['amount'] * conversion_rate,
resp.search_params['to'],
conversion_rate,
resp.search_params['from_name'],
resp.search_params['to_name'],
)
url = 'https://duckduckgo.com/js/spice/currency/1/{0}/{1}'.format(
resp.search_params['from'].upper(), resp.search_params['to'])
results.append({'answer': answer, 'url': url})
return results
|
def response(resp):
"""remove first and last lines to get only json"""
json_resp = resp.text[resp.text.find('\n') + 1:resp.text.rfind('\n') - 2]
results = []
try:
conversion_rate = float(json.loads(json_resp)['conversion']['converted-amount'])
except:
return results
answer = '{0} {1} = {2} {3}, 1 {1} ({5}) = {4} {3} ({6})'.format(
resp.search_params['amount'],
resp.search_params['from'],
resp.search_params['amount'] * conversion_rate,
resp.search_params['to'],
conversion_rate,
resp.search_params['from_name'],
resp.search_params['to_name'],
)
url = 'https://duckduckgo.com/js/spice/currency/1/{0}/{1}'.format(
resp.search_params['from'].upper(), resp.search_params['to'])
results.append({'answer': answer, 'url': url})
return results
|
[
"remove",
"first",
"and",
"last",
"lines",
"to",
"get",
"only",
"json"
] |
asciimoo/searx
|
python
|
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/engines/currency_convert.py#L64-L87
|
[
"def",
"response",
"(",
"resp",
")",
":",
"json_resp",
"=",
"resp",
".",
"text",
"[",
"resp",
".",
"text",
".",
"find",
"(",
"'\\n'",
")",
"+",
"1",
":",
"resp",
".",
"text",
".",
"rfind",
"(",
"'\\n'",
")",
"-",
"2",
"]",
"results",
"=",
"[",
"]",
"try",
":",
"conversion_rate",
"=",
"float",
"(",
"json",
".",
"loads",
"(",
"json_resp",
")",
"[",
"'conversion'",
"]",
"[",
"'converted-amount'",
"]",
")",
"except",
":",
"return",
"results",
"answer",
"=",
"'{0} {1} = {2} {3}, 1 {1} ({5}) = {4} {3} ({6})'",
".",
"format",
"(",
"resp",
".",
"search_params",
"[",
"'amount'",
"]",
",",
"resp",
".",
"search_params",
"[",
"'from'",
"]",
",",
"resp",
".",
"search_params",
"[",
"'amount'",
"]",
"*",
"conversion_rate",
",",
"resp",
".",
"search_params",
"[",
"'to'",
"]",
",",
"conversion_rate",
",",
"resp",
".",
"search_params",
"[",
"'from_name'",
"]",
",",
"resp",
".",
"search_params",
"[",
"'to_name'",
"]",
",",
")",
"url",
"=",
"'https://duckduckgo.com/js/spice/currency/1/{0}/{1}'",
".",
"format",
"(",
"resp",
".",
"search_params",
"[",
"'from'",
"]",
".",
"upper",
"(",
")",
",",
"resp",
".",
"search_params",
"[",
"'to'",
"]",
")",
"results",
".",
"append",
"(",
"{",
"'answer'",
":",
"answer",
",",
"'url'",
":",
"url",
"}",
")",
"return",
"results"
] |
a84caa22cf947e973c10aa968d35fb2bdda6d048
|
test
|
custom_gradient
|
Embeds a custom gradient into a `Tensor`.
This function works by clever application of `stop_gradient`. I.e., observe
that:
```none
h(x) = stop_gradient(f(x)) + stop_gradient(g(x)) * (x - stop_gradient(x))
```
is such that `h(x) == stop_gradient(f(x))` and
`grad[h(x), x] == stop_gradient(g(x)).`
In addition to scalar-domain/scalar-range functions, this function also
supports tensor-domain/scalar-range functions.
Partial Custom Gradient:
Suppose `h(x) = htilde(x, y)`. Note that `dh/dx = stop(g(x))` but `dh/dy =
None`. This is because a `Tensor` cannot have only a portion of its gradient
stopped. To circumvent this issue, one must manually `stop_gradient` the
relevant portions of `f`, `g`. For example see the unit-test,
`test_works_correctly_fx_gx_manually_stopped`.
Args:
fx: `Tensor`. Output of function evaluated at `x`.
gx: `Tensor` or list of `Tensor`s. Gradient of function at (each) `x`.
x: `Tensor` or list of `Tensor`s. Args of evaluation for `f`.
fx_gx_manually_stopped: Python `bool` indicating that `fx`, `gx` manually
have `stop_gradient` applied.
name: Python `str` name prefixed to Ops created by this function.
Returns:
fx: Floating-type `Tensor` equal to `f(x)` but which has gradient
`stop_gradient(g(x))`.
|
tensorflow_probability/python/math/custom_gradient.py
|
def custom_gradient(fx, gx, x, fx_gx_manually_stopped=False, name=None):
"""Embeds a custom gradient into a `Tensor`.
This function works by clever application of `stop_gradient`. I.e., observe
that:
```none
h(x) = stop_gradient(f(x)) + stop_gradient(g(x)) * (x - stop_gradient(x))
```
is such that `h(x) == stop_gradient(f(x))` and
`grad[h(x), x] == stop_gradient(g(x)).`
In addition to scalar-domain/scalar-range functions, this function also
supports tensor-domain/scalar-range functions.
Partial Custom Gradient:
Suppose `h(x) = htilde(x, y)`. Note that `dh/dx = stop(g(x))` but `dh/dy =
None`. This is because a `Tensor` cannot have only a portion of its gradient
stopped. To circumvent this issue, one must manually `stop_gradient` the
relevant portions of `f`, `g`. For example see the unit-test,
`test_works_correctly_fx_gx_manually_stopped`.
Args:
fx: `Tensor`. Output of function evaluated at `x`.
gx: `Tensor` or list of `Tensor`s. Gradient of function at (each) `x`.
x: `Tensor` or list of `Tensor`s. Args of evaluation for `f`.
fx_gx_manually_stopped: Python `bool` indicating that `fx`, `gx` manually
have `stop_gradient` applied.
name: Python `str` name prefixed to Ops created by this function.
Returns:
fx: Floating-type `Tensor` equal to `f(x)` but which has gradient
`stop_gradient(g(x))`.
"""
def maybe_stop(x):
if fx_gx_manually_stopped:
return x
return tf.stop_gradient(x)
with tf.compat.v1.name_scope(name, 'custom_gradient', [fx, gx, x]):
fx = tf.convert_to_tensor(value=fx, name='fx')
# We don't want to bother eagerly computing `gx` since we may not even need
# it.
with tf.control_dependencies([fx]):
if is_list_like(x):
x = [identity(x_, name='x') for x_ in x]
else:
x = [identity(x, name='x')]
if is_list_like(gx):
gx = [identity(gx_, dtype=fx.dtype, name='gx')
for gx_ in gx]
else:
gx = [identity(gx, dtype=fx.dtype, name='gx')]
override_grad = []
for x_, gx_ in zip(x, gx):
# Observe: tf.gradients(f(x), x)[i].shape == x[i].shape
# thus we check that the user is supplying correct shapes.
equal_shape = tf.compat.v1.assert_equal(
tf.shape(input=x_),
tf.shape(input=gx_),
message='Each `x` must have the same shape as each `gx`.')
with tf.control_dependencies([equal_shape]):
# IEEE754 ensures `(x-x)==0.` and that `0.*x==0.` so we make sure to
# write the code this way, rather than, e.g.,
# `sum_x * stop(gx) + stop(fx - sum_x * gx)`.
# For more discussion regarding the relevant portions of the IEEE754
# standard, see the StackOverflow question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
zeros_like_x_ = x_ - tf.stop_gradient(x_)
override_grad.append(
tf.reduce_sum(input_tensor=maybe_stop(gx_) * zeros_like_x_))
override_grad = sum(override_grad)
override_grad /= tf.cast(tf.size(input=fx), dtype=fx.dtype.base_dtype)
# Proof of correctness:
#
# f(x) = x * stop[gx] + stop[fx - x * gx]
# = stop[fx]
#
# g(x) = grad[fx]
# = stop[gx] + grad[stop[fx - x * gx]]
# = stop[gx] + 0
#
# Notice that when x is zero it still works:
# grad[x * stop(gx) + stop(fx - x * gx)] = 1 * stop[gx] + 0 = stop[gx]
#
# The proof is similar for the tensor-domain case, except that we
# `reduce_sum` the `stop[gx] * (x - stop[x])` then rescale by
# `tf.size(fx)` since this reduced version is broadcast to `fx`.
return maybe_stop(fx) + override_grad
|
def custom_gradient(fx, gx, x, fx_gx_manually_stopped=False, name=None):
"""Embeds a custom gradient into a `Tensor`.
This function works by clever application of `stop_gradient`. I.e., observe
that:
```none
h(x) = stop_gradient(f(x)) + stop_gradient(g(x)) * (x - stop_gradient(x))
```
is such that `h(x) == stop_gradient(f(x))` and
`grad[h(x), x] == stop_gradient(g(x)).`
In addition to scalar-domain/scalar-range functions, this function also
supports tensor-domain/scalar-range functions.
Partial Custom Gradient:
Suppose `h(x) = htilde(x, y)`. Note that `dh/dx = stop(g(x))` but `dh/dy =
None`. This is because a `Tensor` cannot have only a portion of its gradient
stopped. To circumvent this issue, one must manually `stop_gradient` the
relevant portions of `f`, `g`. For example see the unit-test,
`test_works_correctly_fx_gx_manually_stopped`.
Args:
fx: `Tensor`. Output of function evaluated at `x`.
gx: `Tensor` or list of `Tensor`s. Gradient of function at (each) `x`.
x: `Tensor` or list of `Tensor`s. Args of evaluation for `f`.
fx_gx_manually_stopped: Python `bool` indicating that `fx`, `gx` manually
have `stop_gradient` applied.
name: Python `str` name prefixed to Ops created by this function.
Returns:
fx: Floating-type `Tensor` equal to `f(x)` but which has gradient
`stop_gradient(g(x))`.
"""
def maybe_stop(x):
if fx_gx_manually_stopped:
return x
return tf.stop_gradient(x)
with tf.compat.v1.name_scope(name, 'custom_gradient', [fx, gx, x]):
fx = tf.convert_to_tensor(value=fx, name='fx')
# We don't want to bother eagerly computing `gx` since we may not even need
# it.
with tf.control_dependencies([fx]):
if is_list_like(x):
x = [identity(x_, name='x') for x_ in x]
else:
x = [identity(x, name='x')]
if is_list_like(gx):
gx = [identity(gx_, dtype=fx.dtype, name='gx')
for gx_ in gx]
else:
gx = [identity(gx, dtype=fx.dtype, name='gx')]
override_grad = []
for x_, gx_ in zip(x, gx):
# Observe: tf.gradients(f(x), x)[i].shape == x[i].shape
# thus we check that the user is supplying correct shapes.
equal_shape = tf.compat.v1.assert_equal(
tf.shape(input=x_),
tf.shape(input=gx_),
message='Each `x` must have the same shape as each `gx`.')
with tf.control_dependencies([equal_shape]):
# IEEE754 ensures `(x-x)==0.` and that `0.*x==0.` so we make sure to
# write the code this way, rather than, e.g.,
# `sum_x * stop(gx) + stop(fx - sum_x * gx)`.
# For more discussion regarding the relevant portions of the IEEE754
# standard, see the StackOverflow question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
zeros_like_x_ = x_ - tf.stop_gradient(x_)
override_grad.append(
tf.reduce_sum(input_tensor=maybe_stop(gx_) * zeros_like_x_))
override_grad = sum(override_grad)
override_grad /= tf.cast(tf.size(input=fx), dtype=fx.dtype.base_dtype)
# Proof of correctness:
#
# f(x) = x * stop[gx] + stop[fx - x * gx]
# = stop[fx]
#
# g(x) = grad[fx]
# = stop[gx] + grad[stop[fx - x * gx]]
# = stop[gx] + 0
#
# Notice that when x is zero it still works:
# grad[x * stop(gx) + stop(fx - x * gx)] = 1 * stop[gx] + 0 = stop[gx]
#
# The proof is similar for the tensor-domain case, except that we
# `reduce_sum` the `stop[gx] * (x - stop[x])` then rescale by
# `tf.size(fx)` since this reduced version is broadcast to `fx`.
return maybe_stop(fx) + override_grad
|
[
"Embeds",
"a",
"custom",
"gradient",
"into",
"a",
"Tensor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/custom_gradient.py#L39-L133
|
[
"def",
"custom_gradient",
"(",
"fx",
",",
"gx",
",",
"x",
",",
"fx_gx_manually_stopped",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"def",
"maybe_stop",
"(",
"x",
")",
":",
"if",
"fx_gx_manually_stopped",
":",
"return",
"x",
"return",
"tf",
".",
"stop_gradient",
"(",
"x",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'custom_gradient'",
",",
"[",
"fx",
",",
"gx",
",",
"x",
"]",
")",
":",
"fx",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"fx",
",",
"name",
"=",
"'fx'",
")",
"# We don't want to bother eagerly computing `gx` since we may not even need",
"# it.",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"fx",
"]",
")",
":",
"if",
"is_list_like",
"(",
"x",
")",
":",
"x",
"=",
"[",
"identity",
"(",
"x_",
",",
"name",
"=",
"'x'",
")",
"for",
"x_",
"in",
"x",
"]",
"else",
":",
"x",
"=",
"[",
"identity",
"(",
"x",
",",
"name",
"=",
"'x'",
")",
"]",
"if",
"is_list_like",
"(",
"gx",
")",
":",
"gx",
"=",
"[",
"identity",
"(",
"gx_",
",",
"dtype",
"=",
"fx",
".",
"dtype",
",",
"name",
"=",
"'gx'",
")",
"for",
"gx_",
"in",
"gx",
"]",
"else",
":",
"gx",
"=",
"[",
"identity",
"(",
"gx",
",",
"dtype",
"=",
"fx",
".",
"dtype",
",",
"name",
"=",
"'gx'",
")",
"]",
"override_grad",
"=",
"[",
"]",
"for",
"x_",
",",
"gx_",
"in",
"zip",
"(",
"x",
",",
"gx",
")",
":",
"# Observe: tf.gradients(f(x), x)[i].shape == x[i].shape",
"# thus we check that the user is supplying correct shapes.",
"equal_shape",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_equal",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_",
")",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"gx_",
")",
",",
"message",
"=",
"'Each `x` must have the same shape as each `gx`.'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"equal_shape",
"]",
")",
":",
"# IEEE754 ensures `(x-x)==0.` and that `0.*x==0.` so we make sure to",
"# write the code this way, rather than, e.g.,",
"# `sum_x * stop(gx) + stop(fx - sum_x * gx)`.",
"# For more discussion regarding the relevant portions of the IEEE754",
"# standard, see the StackOverflow question,",
"# \"Is there a floating point value of x, for which x-x == 0 is false?\"",
"# http://stackoverflow.com/q/2686644",
"zeros_like_x_",
"=",
"x_",
"-",
"tf",
".",
"stop_gradient",
"(",
"x_",
")",
"override_grad",
".",
"append",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"maybe_stop",
"(",
"gx_",
")",
"*",
"zeros_like_x_",
")",
")",
"override_grad",
"=",
"sum",
"(",
"override_grad",
")",
"override_grad",
"/=",
"tf",
".",
"cast",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"fx",
")",
",",
"dtype",
"=",
"fx",
".",
"dtype",
".",
"base_dtype",
")",
"# Proof of correctness:",
"#",
"# f(x) = x * stop[gx] + stop[fx - x * gx]",
"# = stop[fx]",
"#",
"# g(x) = grad[fx]",
"# = stop[gx] + grad[stop[fx - x * gx]]",
"# = stop[gx] + 0",
"#",
"# Notice that when x is zero it still works:",
"# grad[x * stop(gx) + stop(fx - x * gx)] = 1 * stop[gx] + 0 = stop[gx]",
"#",
"# The proof is similar for the tensor-domain case, except that we",
"# `reduce_sum` the `stop[gx] * (x - stop[x])` then rescale by",
"# `tf.size(fx)` since this reduced version is broadcast to `fx`.",
"return",
"maybe_stop",
"(",
"fx",
")",
"+",
"override_grad"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
value_and_gradient
|
Computes `f(*xs)` and its gradients wrt to `*xs`.
Args:
f: Python `callable` to be differentiated. If `f` returns a scalar, this
scalar will be differentiated. If `f` returns a tensor or list of tensors,
by default a scalar will be computed by adding all their values to produce
a single scalar. If desired, the tensors can be elementwise multiplied by
the tensors passed as the `dy` keyword argument to the returned gradient
function.
xs: Python list of parameters of f for which to differentiate. (Can also
be single `Tensor`.)
use_gradient_tape: Python `bool` indicating that `tf.GradientTape`
should be used regardless of `tf.executing_eagerly()` status.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., `'value_and_gradient'`).
Returns:
y: `y = f(*xs)`.
dydx: Gradient of `y` wrt each of `xs`.
|
tensorflow_probability/python/math/gradient.py
|
def value_and_gradient(f, xs, use_gradient_tape=False, name=None):
"""Computes `f(*xs)` and its gradients wrt to `*xs`.
Args:
f: Python `callable` to be differentiated. If `f` returns a scalar, this
scalar will be differentiated. If `f` returns a tensor or list of tensors,
by default a scalar will be computed by adding all their values to produce
a single scalar. If desired, the tensors can be elementwise multiplied by
the tensors passed as the `dy` keyword argument to the returned gradient
function.
xs: Python list of parameters of f for which to differentiate. (Can also
be single `Tensor`.)
use_gradient_tape: Python `bool` indicating that `tf.GradientTape`
should be used regardless of `tf.executing_eagerly()` status.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., `'value_and_gradient'`).
Returns:
y: `y = f(*xs)`.
dydx: Gradient of `y` wrt each of `xs`.
"""
with tf.compat.v1.name_scope(name, 'value_and_gradient', [xs]):
is_xs_list_like = isinstance(xs, (tuple, list))
if not is_xs_list_like:
xs = [xs]
xs = [tf.convert_to_tensor(value=x, name='x{}'.format(i))
for i, x in enumerate(xs)]
if tf.executing_eagerly() or use_gradient_tape:
with tf.GradientTape(watch_accessed_variables=False) as tape:
for x in xs:
tape.watch(x)
y = f(*xs)
dydx = tape.gradient(y, xs)
else:
y = f(*xs)
dydx = tf.gradients(ys=y, xs=xs)
if not is_xs_list_like:
dydx = dydx[0]
return y, dydx
|
def value_and_gradient(f, xs, use_gradient_tape=False, name=None):
"""Computes `f(*xs)` and its gradients wrt to `*xs`.
Args:
f: Python `callable` to be differentiated. If `f` returns a scalar, this
scalar will be differentiated. If `f` returns a tensor or list of tensors,
by default a scalar will be computed by adding all their values to produce
a single scalar. If desired, the tensors can be elementwise multiplied by
the tensors passed as the `dy` keyword argument to the returned gradient
function.
xs: Python list of parameters of f for which to differentiate. (Can also
be single `Tensor`.)
use_gradient_tape: Python `bool` indicating that `tf.GradientTape`
should be used regardless of `tf.executing_eagerly()` status.
Default value: `False`.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., `'value_and_gradient'`).
Returns:
y: `y = f(*xs)`.
dydx: Gradient of `y` wrt each of `xs`.
"""
with tf.compat.v1.name_scope(name, 'value_and_gradient', [xs]):
is_xs_list_like = isinstance(xs, (tuple, list))
if not is_xs_list_like:
xs = [xs]
xs = [tf.convert_to_tensor(value=x, name='x{}'.format(i))
for i, x in enumerate(xs)]
if tf.executing_eagerly() or use_gradient_tape:
with tf.GradientTape(watch_accessed_variables=False) as tape:
for x in xs:
tape.watch(x)
y = f(*xs)
dydx = tape.gradient(y, xs)
else:
y = f(*xs)
dydx = tf.gradients(ys=y, xs=xs)
if not is_xs_list_like:
dydx = dydx[0]
return y, dydx
|
[
"Computes",
"f",
"(",
"*",
"xs",
")",
"and",
"its",
"gradients",
"wrt",
"to",
"*",
"xs",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/gradient.py#L30-L69
|
[
"def",
"value_and_gradient",
"(",
"f",
",",
"xs",
",",
"use_gradient_tape",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'value_and_gradient'",
",",
"[",
"xs",
"]",
")",
":",
"is_xs_list_like",
"=",
"isinstance",
"(",
"xs",
",",
"(",
"tuple",
",",
"list",
")",
")",
"if",
"not",
"is_xs_list_like",
":",
"xs",
"=",
"[",
"xs",
"]",
"xs",
"=",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x{}'",
".",
"format",
"(",
"i",
")",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"xs",
")",
"]",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
"or",
"use_gradient_tape",
":",
"with",
"tf",
".",
"GradientTape",
"(",
"watch_accessed_variables",
"=",
"False",
")",
"as",
"tape",
":",
"for",
"x",
"in",
"xs",
":",
"tape",
".",
"watch",
"(",
"x",
")",
"y",
"=",
"f",
"(",
"*",
"xs",
")",
"dydx",
"=",
"tape",
".",
"gradient",
"(",
"y",
",",
"xs",
")",
"else",
":",
"y",
"=",
"f",
"(",
"*",
"xs",
")",
"dydx",
"=",
"tf",
".",
"gradients",
"(",
"ys",
"=",
"y",
",",
"xs",
"=",
"xs",
")",
"if",
"not",
"is_xs_list_like",
":",
"dydx",
"=",
"dydx",
"[",
"0",
"]",
"return",
"y",
",",
"dydx"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
mvn
|
Convenience function to efficiently construct a MultivariateNormalDiag.
|
tensorflow_probability/python/mcmc/eight_schools_hmc.py
|
def mvn(*args, **kwargs):
"""Convenience function to efficiently construct a MultivariateNormalDiag."""
# Faster than using `tfd.MultivariateNormalDiag`.
return tfd.Independent(tfd.Normal(*args, **kwargs),
reinterpreted_batch_ndims=1)
|
def mvn(*args, **kwargs):
"""Convenience function to efficiently construct a MultivariateNormalDiag."""
# Faster than using `tfd.MultivariateNormalDiag`.
return tfd.Independent(tfd.Normal(*args, **kwargs),
reinterpreted_batch_ndims=1)
|
[
"Convenience",
"function",
"to",
"efficiently",
"construct",
"a",
"MultivariateNormalDiag",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/eight_schools_hmc.py#L37-L41
|
[
"def",
"mvn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Faster than using `tfd.MultivariateNormalDiag`.",
"return",
"tfd",
".",
"Independent",
"(",
"tfd",
".",
"Normal",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"reinterpreted_batch_ndims",
"=",
"1",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
eight_schools_joint_log_prob
|
Eight-schools joint log-prob.
|
tensorflow_probability/python/mcmc/eight_schools_hmc.py
|
def eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools joint log-prob."""
rv_avg_effect = tfd.Normal(loc=0., scale=10.)
rv_avg_stddev = tfd.Normal(loc=5., scale=1.)
rv_school_effects_standard = mvn(
loc=tf.zeros_like(school_effects_standard),
scale=tf.ones_like(school_effects_standard))
rv_treatment_effects = mvn(
loc=(avg_effect + tf.exp(avg_stddev) * school_effects_standard),
scale=treatment_stddevs)
return (
rv_avg_effect.log_prob(avg_effect) +
rv_avg_stddev.log_prob(avg_stddev) +
rv_school_effects_standard.log_prob(school_effects_standard) +
rv_treatment_effects.log_prob(treatment_effects))
|
def eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools joint log-prob."""
rv_avg_effect = tfd.Normal(loc=0., scale=10.)
rv_avg_stddev = tfd.Normal(loc=5., scale=1.)
rv_school_effects_standard = mvn(
loc=tf.zeros_like(school_effects_standard),
scale=tf.ones_like(school_effects_standard))
rv_treatment_effects = mvn(
loc=(avg_effect + tf.exp(avg_stddev) * school_effects_standard),
scale=treatment_stddevs)
return (
rv_avg_effect.log_prob(avg_effect) +
rv_avg_stddev.log_prob(avg_stddev) +
rv_school_effects_standard.log_prob(school_effects_standard) +
rv_treatment_effects.log_prob(treatment_effects))
|
[
"Eight",
"-",
"schools",
"joint",
"log",
"-",
"prob",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/eight_schools_hmc.py#L44-L60
|
[
"def",
"eight_schools_joint_log_prob",
"(",
"treatment_effects",
",",
"treatment_stddevs",
",",
"avg_effect",
",",
"avg_stddev",
",",
"school_effects_standard",
")",
":",
"rv_avg_effect",
"=",
"tfd",
".",
"Normal",
"(",
"loc",
"=",
"0.",
",",
"scale",
"=",
"10.",
")",
"rv_avg_stddev",
"=",
"tfd",
".",
"Normal",
"(",
"loc",
"=",
"5.",
",",
"scale",
"=",
"1.",
")",
"rv_school_effects_standard",
"=",
"mvn",
"(",
"loc",
"=",
"tf",
".",
"zeros_like",
"(",
"school_effects_standard",
")",
",",
"scale",
"=",
"tf",
".",
"ones_like",
"(",
"school_effects_standard",
")",
")",
"rv_treatment_effects",
"=",
"mvn",
"(",
"loc",
"=",
"(",
"avg_effect",
"+",
"tf",
".",
"exp",
"(",
"avg_stddev",
")",
"*",
"school_effects_standard",
")",
",",
"scale",
"=",
"treatment_stddevs",
")",
"return",
"(",
"rv_avg_effect",
".",
"log_prob",
"(",
"avg_effect",
")",
"+",
"rv_avg_stddev",
".",
"log_prob",
"(",
"avg_stddev",
")",
"+",
"rv_school_effects_standard",
".",
"log_prob",
"(",
"school_effects_standard",
")",
"+",
"rv_treatment_effects",
".",
"log_prob",
"(",
"treatment_effects",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
benchmark_eight_schools_hmc
|
Runs HMC on the eight-schools unnormalized posterior.
|
tensorflow_probability/python/mcmc/eight_schools_hmc.py
|
def benchmark_eight_schools_hmc(
num_results=int(5e3),
num_burnin_steps=int(3e3),
num_leapfrog_steps=3,
step_size=0.4):
"""Runs HMC on the eight-schools unnormalized posterior."""
num_schools = 8
treatment_effects = tf.constant(
[28, 8, -3, 7, -1, 1, 18, 12],
dtype=np.float32,
name='treatment_effects')
treatment_stddevs = tf.constant(
[15, 10, 16, 11, 9, 11, 10, 18],
dtype=np.float32,
name='treatment_stddevs')
def unnormalized_posterior_log_prob(
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools unnormalized log posterior."""
return eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard)
if tf.executing_eagerly():
sample_chain = tf.function(tfp.mcmc.sample_chain)
else:
sample_chain = tfp.mcmc.sample_chain
def computation():
"""The benchmark computation."""
_, kernel_results = sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=(
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps))
return kernel_results.is_accepted
# Let's force evaluation of graph to ensure build time is not part of our time
# trial.
is_accepted_tensor = computation()
if not tf.executing_eagerly():
session = tf.compat.v1.Session()
session.run(is_accepted_tensor)
start_time = time.time()
if tf.executing_eagerly():
is_accepted = computation()
else:
is_accepted = session.run(is_accepted_tensor)
wall_time = time.time() - start_time
num_accepted = np.sum(is_accepted)
acceptance_rate = np.float32(num_accepted) / np.float32(num_results)
return dict(
iters=(num_results + num_burnin_steps) * num_leapfrog_steps,
extras={'acceptance_rate': acceptance_rate},
wall_time=wall_time)
|
def benchmark_eight_schools_hmc(
num_results=int(5e3),
num_burnin_steps=int(3e3),
num_leapfrog_steps=3,
step_size=0.4):
"""Runs HMC on the eight-schools unnormalized posterior."""
num_schools = 8
treatment_effects = tf.constant(
[28, 8, -3, 7, -1, 1, 18, 12],
dtype=np.float32,
name='treatment_effects')
treatment_stddevs = tf.constant(
[15, 10, 16, 11, 9, 11, 10, 18],
dtype=np.float32,
name='treatment_stddevs')
def unnormalized_posterior_log_prob(
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools unnormalized log posterior."""
return eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard)
if tf.executing_eagerly():
sample_chain = tf.function(tfp.mcmc.sample_chain)
else:
sample_chain = tfp.mcmc.sample_chain
def computation():
"""The benchmark computation."""
_, kernel_results = sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=(
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps))
return kernel_results.is_accepted
# Let's force evaluation of graph to ensure build time is not part of our time
# trial.
is_accepted_tensor = computation()
if not tf.executing_eagerly():
session = tf.compat.v1.Session()
session.run(is_accepted_tensor)
start_time = time.time()
if tf.executing_eagerly():
is_accepted = computation()
else:
is_accepted = session.run(is_accepted_tensor)
wall_time = time.time() - start_time
num_accepted = np.sum(is_accepted)
acceptance_rate = np.float32(num_accepted) / np.float32(num_results)
return dict(
iters=(num_results + num_burnin_steps) * num_leapfrog_steps,
extras={'acceptance_rate': acceptance_rate},
wall_time=wall_time)
|
[
"Runs",
"HMC",
"on",
"the",
"eight",
"-",
"schools",
"unnormalized",
"posterior",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/eight_schools_hmc.py#L63-L129
|
[
"def",
"benchmark_eight_schools_hmc",
"(",
"num_results",
"=",
"int",
"(",
"5e3",
")",
",",
"num_burnin_steps",
"=",
"int",
"(",
"3e3",
")",
",",
"num_leapfrog_steps",
"=",
"3",
",",
"step_size",
"=",
"0.4",
")",
":",
"num_schools",
"=",
"8",
"treatment_effects",
"=",
"tf",
".",
"constant",
"(",
"[",
"28",
",",
"8",
",",
"-",
"3",
",",
"7",
",",
"-",
"1",
",",
"1",
",",
"18",
",",
"12",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"name",
"=",
"'treatment_effects'",
")",
"treatment_stddevs",
"=",
"tf",
".",
"constant",
"(",
"[",
"15",
",",
"10",
",",
"16",
",",
"11",
",",
"9",
",",
"11",
",",
"10",
",",
"18",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"name",
"=",
"'treatment_stddevs'",
")",
"def",
"unnormalized_posterior_log_prob",
"(",
"avg_effect",
",",
"avg_stddev",
",",
"school_effects_standard",
")",
":",
"\"\"\"Eight-schools unnormalized log posterior.\"\"\"",
"return",
"eight_schools_joint_log_prob",
"(",
"treatment_effects",
",",
"treatment_stddevs",
",",
"avg_effect",
",",
"avg_stddev",
",",
"school_effects_standard",
")",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"sample_chain",
"=",
"tf",
".",
"function",
"(",
"tfp",
".",
"mcmc",
".",
"sample_chain",
")",
"else",
":",
"sample_chain",
"=",
"tfp",
".",
"mcmc",
".",
"sample_chain",
"def",
"computation",
"(",
")",
":",
"\"\"\"The benchmark computation.\"\"\"",
"_",
",",
"kernel_results",
"=",
"sample_chain",
"(",
"num_results",
"=",
"num_results",
",",
"num_burnin_steps",
"=",
"num_burnin_steps",
",",
"current_state",
"=",
"(",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"name",
"=",
"'init_avg_effect'",
")",
",",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"name",
"=",
"'init_avg_stddev'",
")",
",",
"tf",
".",
"ones",
"(",
"[",
"num_schools",
"]",
",",
"name",
"=",
"'init_school_effects_standard'",
")",
",",
")",
",",
"kernel",
"=",
"tfp",
".",
"mcmc",
".",
"HamiltonianMonteCarlo",
"(",
"target_log_prob_fn",
"=",
"unnormalized_posterior_log_prob",
",",
"step_size",
"=",
"step_size",
",",
"num_leapfrog_steps",
"=",
"num_leapfrog_steps",
")",
")",
"return",
"kernel_results",
".",
"is_accepted",
"# Let's force evaluation of graph to ensure build time is not part of our time",
"# trial.",
"is_accepted_tensor",
"=",
"computation",
"(",
")",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"session",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"Session",
"(",
")",
"session",
".",
"run",
"(",
"is_accepted_tensor",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"is_accepted",
"=",
"computation",
"(",
")",
"else",
":",
"is_accepted",
"=",
"session",
".",
"run",
"(",
"is_accepted_tensor",
")",
"wall_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"num_accepted",
"=",
"np",
".",
"sum",
"(",
"is_accepted",
")",
"acceptance_rate",
"=",
"np",
".",
"float32",
"(",
"num_accepted",
")",
"/",
"np",
".",
"float32",
"(",
"num_results",
")",
"return",
"dict",
"(",
"iters",
"=",
"(",
"num_results",
"+",
"num_burnin_steps",
")",
"*",
"num_leapfrog_steps",
",",
"extras",
"=",
"{",
"'acceptance_rate'",
":",
"acceptance_rate",
"}",
",",
"wall_time",
"=",
"wall_time",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
expand_docstring
|
Decorator to programmatically expand the docstring.
Args:
**kwargs: Keyword arguments to set. For each key-value pair `k` and `v`,
the key is found as `${k}` in the docstring and replaced with `v`.
Returns:
Decorated function.
|
tensorflow_probability/python/util/docstring.py
|
def expand_docstring(**kwargs):
"""Decorator to programmatically expand the docstring.
Args:
**kwargs: Keyword arguments to set. For each key-value pair `k` and `v`,
the key is found as `${k}` in the docstring and replaced with `v`.
Returns:
Decorated function.
"""
def _fn_wrapped(fn):
"""Original function with modified `__doc__` attribute."""
doc = inspect.cleandoc(fn.__doc__)
for k, v in six.iteritems(kwargs):
# Capture each ${k} reference to replace with v.
# We wrap the replacement in a function so no backslash escapes
# are processed.
pattern = r'\$\{' + str(k) + r'\}'
doc = re.sub(pattern, lambda match: v, doc) # pylint: disable=cell-var-from-loop
fn.__doc__ = doc
return fn
return _fn_wrapped
|
def expand_docstring(**kwargs):
"""Decorator to programmatically expand the docstring.
Args:
**kwargs: Keyword arguments to set. For each key-value pair `k` and `v`,
the key is found as `${k}` in the docstring and replaced with `v`.
Returns:
Decorated function.
"""
def _fn_wrapped(fn):
"""Original function with modified `__doc__` attribute."""
doc = inspect.cleandoc(fn.__doc__)
for k, v in six.iteritems(kwargs):
# Capture each ${k} reference to replace with v.
# We wrap the replacement in a function so no backslash escapes
# are processed.
pattern = r'\$\{' + str(k) + r'\}'
doc = re.sub(pattern, lambda match: v, doc) # pylint: disable=cell-var-from-loop
fn.__doc__ = doc
return fn
return _fn_wrapped
|
[
"Decorator",
"to",
"programmatically",
"expand",
"the",
"docstring",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/util/docstring.py#L30-L51
|
[
"def",
"expand_docstring",
"(",
"*",
"*",
"kwargs",
")",
":",
"def",
"_fn_wrapped",
"(",
"fn",
")",
":",
"\"\"\"Original function with modified `__doc__` attribute.\"\"\"",
"doc",
"=",
"inspect",
".",
"cleandoc",
"(",
"fn",
".",
"__doc__",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
":",
"# Capture each ${k} reference to replace with v.",
"# We wrap the replacement in a function so no backslash escapes",
"# are processed.",
"pattern",
"=",
"r'\\$\\{'",
"+",
"str",
"(",
"k",
")",
"+",
"r'\\}'",
"doc",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"lambda",
"match",
":",
"v",
",",
"doc",
")",
"# pylint: disable=cell-var-from-loop",
"fn",
".",
"__doc__",
"=",
"doc",
"return",
"fn",
"return",
"_fn_wrapped"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_simple_name
|
Infer the original name passed into a distribution constructor.
Distributions typically follow the pattern of
with.name_scope(name) as name:
super(name=name)
so we attempt to reverse the name-scope transformation to allow
addressing of RVs by the distribution's original, user-visible
name kwarg.
Args:
distribution: a tfd.Distribution instance.
Returns:
simple_name: the original name passed into the Distribution.
#### Example
```
d1 = tfd.Normal(0., 1., name='x') # d1.name = 'x/'
d2 = tfd.Normal(0., 1., name='x') # d2.name = 'x_2/'
_simple_name(d2) # returns 'x'
```
|
tensorflow_probability/python/edward2/generated_random_variables.py
|
def _simple_name(distribution):
"""Infer the original name passed into a distribution constructor.
Distributions typically follow the pattern of
with.name_scope(name) as name:
super(name=name)
so we attempt to reverse the name-scope transformation to allow
addressing of RVs by the distribution's original, user-visible
name kwarg.
Args:
distribution: a tfd.Distribution instance.
Returns:
simple_name: the original name passed into the Distribution.
#### Example
```
d1 = tfd.Normal(0., 1., name='x') # d1.name = 'x/'
d2 = tfd.Normal(0., 1., name='x') # d2.name = 'x_2/'
_simple_name(d2) # returns 'x'
```
"""
simple_name = distribution.name
# turn 'scope/x/' into 'x'
if simple_name.endswith('/'):
simple_name = simple_name.split('/')[-2]
# turn 'x_3' into 'x'
parts = simple_name.split('_')
if parts[-1].isdigit():
simple_name = '_'.join(parts[:-1])
return simple_name
|
def _simple_name(distribution):
"""Infer the original name passed into a distribution constructor.
Distributions typically follow the pattern of
with.name_scope(name) as name:
super(name=name)
so we attempt to reverse the name-scope transformation to allow
addressing of RVs by the distribution's original, user-visible
name kwarg.
Args:
distribution: a tfd.Distribution instance.
Returns:
simple_name: the original name passed into the Distribution.
#### Example
```
d1 = tfd.Normal(0., 1., name='x') # d1.name = 'x/'
d2 = tfd.Normal(0., 1., name='x') # d2.name = 'x_2/'
_simple_name(d2) # returns 'x'
```
"""
simple_name = distribution.name
# turn 'scope/x/' into 'x'
if simple_name.endswith('/'):
simple_name = simple_name.split('/')[-2]
# turn 'x_3' into 'x'
parts = simple_name.split('_')
if parts[-1].isdigit():
simple_name = '_'.join(parts[:-1])
return simple_name
|
[
"Infer",
"the",
"original",
"name",
"passed",
"into",
"a",
"distribution",
"constructor",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/generated_random_variables.py#L43-L79
|
[
"def",
"_simple_name",
"(",
"distribution",
")",
":",
"simple_name",
"=",
"distribution",
".",
"name",
"# turn 'scope/x/' into 'x'",
"if",
"simple_name",
".",
"endswith",
"(",
"'/'",
")",
":",
"simple_name",
"=",
"simple_name",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"2",
"]",
"# turn 'x_3' into 'x'",
"parts",
"=",
"simple_name",
".",
"split",
"(",
"'_'",
")",
"if",
"parts",
"[",
"-",
"1",
"]",
".",
"isdigit",
"(",
")",
":",
"simple_name",
"=",
"'_'",
".",
"join",
"(",
"parts",
"[",
":",
"-",
"1",
"]",
")",
"return",
"simple_name"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_build_custom_rv
|
RandomVariable constructor with a dummy name argument.
|
tensorflow_probability/python/edward2/generated_random_variables.py
|
def _build_custom_rv(distribution, sample_shape, value, name):
"""RandomVariable constructor with a dummy name argument."""
# Program transformations (e.g., `make_log_joint_fn`) assume that
# the traced constructor has `name` and `value` kwargs, enabling
# them to override the value of an RV according to its name.
# User-defined RVs inherit their name from the provided
# distribution; this helper method exposes the name as a dummy kwarg
# so that it's visible to program transformations.
del name # unused
return RandomVariable(distribution=distribution,
sample_shape=sample_shape,
value=value)
|
def _build_custom_rv(distribution, sample_shape, value, name):
"""RandomVariable constructor with a dummy name argument."""
# Program transformations (e.g., `make_log_joint_fn`) assume that
# the traced constructor has `name` and `value` kwargs, enabling
# them to override the value of an RV according to its name.
# User-defined RVs inherit their name from the provided
# distribution; this helper method exposes the name as a dummy kwarg
# so that it's visible to program transformations.
del name # unused
return RandomVariable(distribution=distribution,
sample_shape=sample_shape,
value=value)
|
[
"RandomVariable",
"constructor",
"with",
"a",
"dummy",
"name",
"argument",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/generated_random_variables.py#L83-L94
|
[
"def",
"_build_custom_rv",
"(",
"distribution",
",",
"sample_shape",
",",
"value",
",",
"name",
")",
":",
"# Program transformations (e.g., `make_log_joint_fn`) assume that",
"# the traced constructor has `name` and `value` kwargs, enabling",
"# them to override the value of an RV according to its name.",
"# User-defined RVs inherit their name from the provided",
"# distribution; this helper method exposes the name as a dummy kwarg",
"# so that it's visible to program transformations.",
"del",
"name",
"# unused",
"return",
"RandomVariable",
"(",
"distribution",
"=",
"distribution",
",",
"sample_shape",
"=",
"sample_shape",
",",
"value",
"=",
"value",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
as_random_variable
|
Wrap an existing distribution as a traceable random variable.
This enables the use of custom or user-provided distributions in
Edward models. Unlike a bare `RandomVariable` object, this method
wraps the constructor so it is included in the Edward trace and its
values can be properly intercepted and overridden.
Where possible, you should prefer the built-in constructors
(`ed.Normal`, etc); these simultaneously construct a Distribution
and a RandomVariable object so that the distribution parameters
themselves may be intercepted and overridden. RVs constructed via
`as_random_variable()` have a fixed distribution and may not support
program transformations (e.g, conjugate marginalization) that rely
on overriding distribution parameters.
Args:
distribution: tfd.Distribution governing the distribution of the random
variable, such as sampling and log-probabilities.
sample_shape: tf.TensorShape of samples to draw from the random variable.
Default is `()` corresponding to a single sample.
value: Fixed tf.Tensor to associate with random variable. Must have shape
`sample_shape + distribution.batch_shape + distribution.event_shape`.
Default is to sample from random variable according to `sample_shape`.
Returns:
rv: a `RandomVariable` wrapping the provided distribution.
#### Example
```python
from tensorflow_probability import distributions as tfd
from tensorflow_probability import edward2 as ed
def model():
# equivalent to ed.Normal(0., 1., name='x')
return ed.as_random_variable(tfd.Normal(0., 1., name='x'))
log_joint = ed.make_log_joint_fn(model)
output = log_joint(x=2.)
```
|
tensorflow_probability/python/edward2/generated_random_variables.py
|
def as_random_variable(distribution,
sample_shape=(),
value=None):
"""Wrap an existing distribution as a traceable random variable.
This enables the use of custom or user-provided distributions in
Edward models. Unlike a bare `RandomVariable` object, this method
wraps the constructor so it is included in the Edward trace and its
values can be properly intercepted and overridden.
Where possible, you should prefer the built-in constructors
(`ed.Normal`, etc); these simultaneously construct a Distribution
and a RandomVariable object so that the distribution parameters
themselves may be intercepted and overridden. RVs constructed via
`as_random_variable()` have a fixed distribution and may not support
program transformations (e.g, conjugate marginalization) that rely
on overriding distribution parameters.
Args:
distribution: tfd.Distribution governing the distribution of the random
variable, such as sampling and log-probabilities.
sample_shape: tf.TensorShape of samples to draw from the random variable.
Default is `()` corresponding to a single sample.
value: Fixed tf.Tensor to associate with random variable. Must have shape
`sample_shape + distribution.batch_shape + distribution.event_shape`.
Default is to sample from random variable according to `sample_shape`.
Returns:
rv: a `RandomVariable` wrapping the provided distribution.
#### Example
```python
from tensorflow_probability import distributions as tfd
from tensorflow_probability import edward2 as ed
def model():
# equivalent to ed.Normal(0., 1., name='x')
return ed.as_random_variable(tfd.Normal(0., 1., name='x'))
log_joint = ed.make_log_joint_fn(model)
output = log_joint(x=2.)
```
"""
return _build_custom_rv(distribution=distribution,
sample_shape=sample_shape,
value=value,
name=_simple_name(distribution))
|
def as_random_variable(distribution,
sample_shape=(),
value=None):
"""Wrap an existing distribution as a traceable random variable.
This enables the use of custom or user-provided distributions in
Edward models. Unlike a bare `RandomVariable` object, this method
wraps the constructor so it is included in the Edward trace and its
values can be properly intercepted and overridden.
Where possible, you should prefer the built-in constructors
(`ed.Normal`, etc); these simultaneously construct a Distribution
and a RandomVariable object so that the distribution parameters
themselves may be intercepted and overridden. RVs constructed via
`as_random_variable()` have a fixed distribution and may not support
program transformations (e.g, conjugate marginalization) that rely
on overriding distribution parameters.
Args:
distribution: tfd.Distribution governing the distribution of the random
variable, such as sampling and log-probabilities.
sample_shape: tf.TensorShape of samples to draw from the random variable.
Default is `()` corresponding to a single sample.
value: Fixed tf.Tensor to associate with random variable. Must have shape
`sample_shape + distribution.batch_shape + distribution.event_shape`.
Default is to sample from random variable according to `sample_shape`.
Returns:
rv: a `RandomVariable` wrapping the provided distribution.
#### Example
```python
from tensorflow_probability import distributions as tfd
from tensorflow_probability import edward2 as ed
def model():
# equivalent to ed.Normal(0., 1., name='x')
return ed.as_random_variable(tfd.Normal(0., 1., name='x'))
log_joint = ed.make_log_joint_fn(model)
output = log_joint(x=2.)
```
"""
return _build_custom_rv(distribution=distribution,
sample_shape=sample_shape,
value=value,
name=_simple_name(distribution))
|
[
"Wrap",
"an",
"existing",
"distribution",
"as",
"a",
"traceable",
"random",
"variable",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/generated_random_variables.py#L97-L145
|
[
"def",
"as_random_variable",
"(",
"distribution",
",",
"sample_shape",
"=",
"(",
")",
",",
"value",
"=",
"None",
")",
":",
"return",
"_build_custom_rv",
"(",
"distribution",
"=",
"distribution",
",",
"sample_shape",
"=",
"sample_shape",
",",
"value",
"=",
"value",
",",
"name",
"=",
"_simple_name",
"(",
"distribution",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_make_random_variable
|
Factory function to make random variable given distribution class.
|
tensorflow_probability/python/edward2/generated_random_variables.py
|
def _make_random_variable(distribution_cls):
"""Factory function to make random variable given distribution class."""
@interceptable
@functools.wraps(distribution_cls, assigned=('__module__', '__name__'))
@docstring_util.expand_docstring(
cls=distribution_cls.__name__,
doc=inspect.cleandoc(distribution_cls.__init__.__doc__ or ''))
def func(*args, **kwargs):
# pylint: disable=g-doc-args
"""Create a random variable for ${cls}.
See ${cls} for more details.
Returns:
RandomVariable.
#### Original Docstring for Distribution
${doc}
"""
# pylint: enable=g-doc-args
sample_shape = kwargs.pop('sample_shape', ())
value = kwargs.pop('value', None)
return RandomVariable(distribution=distribution_cls(*args, **kwargs),
sample_shape=sample_shape,
value=value)
return func
|
def _make_random_variable(distribution_cls):
"""Factory function to make random variable given distribution class."""
@interceptable
@functools.wraps(distribution_cls, assigned=('__module__', '__name__'))
@docstring_util.expand_docstring(
cls=distribution_cls.__name__,
doc=inspect.cleandoc(distribution_cls.__init__.__doc__ or ''))
def func(*args, **kwargs):
# pylint: disable=g-doc-args
"""Create a random variable for ${cls}.
See ${cls} for more details.
Returns:
RandomVariable.
#### Original Docstring for Distribution
${doc}
"""
# pylint: enable=g-doc-args
sample_shape = kwargs.pop('sample_shape', ())
value = kwargs.pop('value', None)
return RandomVariable(distribution=distribution_cls(*args, **kwargs),
sample_shape=sample_shape,
value=value)
return func
|
[
"Factory",
"function",
"to",
"make",
"random",
"variable",
"given",
"distribution",
"class",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/generated_random_variables.py#L148-L175
|
[
"def",
"_make_random_variable",
"(",
"distribution_cls",
")",
":",
"@",
"interceptable",
"@",
"functools",
".",
"wraps",
"(",
"distribution_cls",
",",
"assigned",
"=",
"(",
"'__module__'",
",",
"'__name__'",
")",
")",
"@",
"docstring_util",
".",
"expand_docstring",
"(",
"cls",
"=",
"distribution_cls",
".",
"__name__",
",",
"doc",
"=",
"inspect",
".",
"cleandoc",
"(",
"distribution_cls",
".",
"__init__",
".",
"__doc__",
"or",
"''",
")",
")",
"def",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=g-doc-args",
"\"\"\"Create a random variable for ${cls}.\n\n See ${cls} for more details.\n\n Returns:\n RandomVariable.\n\n #### Original Docstring for Distribution\n\n ${doc}\n \"\"\"",
"# pylint: enable=g-doc-args",
"sample_shape",
"=",
"kwargs",
".",
"pop",
"(",
"'sample_shape'",
",",
"(",
")",
")",
"value",
"=",
"kwargs",
".",
"pop",
"(",
"'value'",
",",
"None",
")",
"return",
"RandomVariable",
"(",
"distribution",
"=",
"distribution_cls",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"sample_shape",
"=",
"sample_shape",
",",
"value",
"=",
"value",
")",
"return",
"func"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
VectorExponentialLinearOperator._mode_mean_shape
|
Shape for the mode/mean Tensors.
|
tensorflow_probability/python/distributions/vector_exponential_linear_operator.py
|
def _mode_mean_shape(self):
"""Shape for the mode/mean Tensors."""
shape = tensorshape_util.concatenate(self.batch_shape, self.event_shape)
has_static_shape = tensorshape_util.is_fully_defined(shape)
if not has_static_shape:
shape = tf.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
return shape
|
def _mode_mean_shape(self):
"""Shape for the mode/mean Tensors."""
shape = tensorshape_util.concatenate(self.batch_shape, self.event_shape)
has_static_shape = tensorshape_util.is_fully_defined(shape)
if not has_static_shape:
shape = tf.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
return shape
|
[
"Shape",
"for",
"the",
"mode",
"/",
"mean",
"Tensors",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_exponential_linear_operator.py#L278-L287
|
[
"def",
"_mode_mean_shape",
"(",
"self",
")",
":",
"shape",
"=",
"tensorshape_util",
".",
"concatenate",
"(",
"self",
".",
"batch_shape",
",",
"self",
".",
"event_shape",
")",
"has_static_shape",
"=",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"shape",
")",
"if",
"not",
"has_static_shape",
":",
"shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"self",
".",
"batch_shape_tensor",
"(",
")",
",",
"self",
".",
"event_shape_tensor",
"(",
")",
",",
"]",
",",
"0",
")",
"return",
"shape"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
one_step_predictive
|
Compute one-step-ahead predictive distributions for all timesteps.
Given samples from the posterior over parameters, return the predictive
distribution over observations at each time `T`, given observations up
through time `T-1`.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries` including a
mask `Tensor` to encode the locations of missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
Returns:
forecast_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_timesteps] and
batch shape `concat([sample_shape, model.batch_shape])`, with
`num_posterior_draws` mixture components. The `t`th step represents the
forecast distribution `p(observed_time_series[t] |
observed_time_series[0:t-1], parameter_samples)`.
#### Examples
Suppose we've built a model and fit it to data using HMC:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
Passing the posterior samples into `one_step_predictive`, we construct a
one-step-ahead predictive distribution:
```python
one_step_predictive_dist = tfp.sts.one_step_predictive(
model, observed_time_series, parameter_samples=samples)
predictive_means = one_step_predictive_dist.mean()
predictive_scales = one_step_predictive_dist.stddev()
```
If using variational inference instead of HMC, we'd construct a forecast using
samples from the variational posterior:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
# OMITTED: take steps to optimize variational loss
samples = {k: q.sample(30) for (k, q) in variational_distributions.items()}
one_step_predictive_dist = tfp.sts.one_step_predictive(
model, observed_time_series, parameter_samples=samples)
```
We can visualize the forecast by plotting:
```python
from matplotlib import pylab as plt
def plot_one_step_predictive(observed_time_series,
forecast_mean,
forecast_scale):
plt.figure(figsize=(12, 6))
num_timesteps = forecast_mean.shape[-1]
c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)
plt.plot(observed_time_series, label="observed time series", color=c1)
plt.plot(forecast_mean, label="one-step prediction", color=c2)
plt.fill_between(np.arange(num_timesteps),
forecast_mean - 2 * forecast_scale,
forecast_mean + 2 * forecast_scale,
alpha=0.1, color=c2)
plt.legend()
plot_one_step_predictive(observed_time_series,
forecast_mean=predictive_means,
forecast_scale=predictive_scales)
```
To detect anomalous timesteps, we check whether the observed value at each
step is within a 95% predictive interval, i.e., two standard deviations from
the mean:
```python
z_scores = ((observed_time_series[..., 1:] - predictive_means[..., :-1])
/ predictive_scales[..., :-1])
anomalous_timesteps = tf.boolean_mask(
tf.range(1, num_timesteps),
tf.abs(z_scores) > 2.0)
```
|
tensorflow_probability/python/sts/forecast.py
|
def one_step_predictive(model, observed_time_series, parameter_samples):
"""Compute one-step-ahead predictive distributions for all timesteps.
Given samples from the posterior over parameters, return the predictive
distribution over observations at each time `T`, given observations up
through time `T-1`.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries` including a
mask `Tensor` to encode the locations of missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
Returns:
forecast_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_timesteps] and
batch shape `concat([sample_shape, model.batch_shape])`, with
`num_posterior_draws` mixture components. The `t`th step represents the
forecast distribution `p(observed_time_series[t] |
observed_time_series[0:t-1], parameter_samples)`.
#### Examples
Suppose we've built a model and fit it to data using HMC:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
Passing the posterior samples into `one_step_predictive`, we construct a
one-step-ahead predictive distribution:
```python
one_step_predictive_dist = tfp.sts.one_step_predictive(
model, observed_time_series, parameter_samples=samples)
predictive_means = one_step_predictive_dist.mean()
predictive_scales = one_step_predictive_dist.stddev()
```
If using variational inference instead of HMC, we'd construct a forecast using
samples from the variational posterior:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
# OMITTED: take steps to optimize variational loss
samples = {k: q.sample(30) for (k, q) in variational_distributions.items()}
one_step_predictive_dist = tfp.sts.one_step_predictive(
model, observed_time_series, parameter_samples=samples)
```
We can visualize the forecast by plotting:
```python
from matplotlib import pylab as plt
def plot_one_step_predictive(observed_time_series,
forecast_mean,
forecast_scale):
plt.figure(figsize=(12, 6))
num_timesteps = forecast_mean.shape[-1]
c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)
plt.plot(observed_time_series, label="observed time series", color=c1)
plt.plot(forecast_mean, label="one-step prediction", color=c2)
plt.fill_between(np.arange(num_timesteps),
forecast_mean - 2 * forecast_scale,
forecast_mean + 2 * forecast_scale,
alpha=0.1, color=c2)
plt.legend()
plot_one_step_predictive(observed_time_series,
forecast_mean=predictive_means,
forecast_scale=predictive_scales)
```
To detect anomalous timesteps, we check whether the observed value at each
step is within a 95% predictive interval, i.e., two standard deviations from
the mean:
```python
z_scores = ((observed_time_series[..., 1:] - predictive_means[..., :-1])
/ predictive_scales[..., :-1])
anomalous_timesteps = tf.boolean_mask(
tf.range(1, num_timesteps),
tf.abs(z_scores) > 2.0)
```
"""
with tf.compat.v1.name_scope(
'one_step_predictive', values=[observed_time_series, parameter_samples]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run filtering over the training timesteps to extract the
# predictive means and variances.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
lgssm = model.make_state_space_model(
num_timesteps=num_timesteps, param_vals=parameter_samples)
(_, _, _, _, _, observation_means, observation_covs
) = lgssm.forward_filter(observed_time_series, mask=is_missing)
# Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]`
# to a scalar time series.
return sts_util.mix_over_posterior_draws(
means=observation_means[..., 0],
variances=observation_covs[..., 0, 0])
|
def one_step_predictive(model, observed_time_series, parameter_samples):
"""Compute one-step-ahead predictive distributions for all timesteps.
Given samples from the posterior over parameters, return the predictive
distribution over observations at each time `T`, given observations up
through time `T-1`.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries` including a
mask `Tensor` to encode the locations of missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
Returns:
forecast_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_timesteps] and
batch shape `concat([sample_shape, model.batch_shape])`, with
`num_posterior_draws` mixture components. The `t`th step represents the
forecast distribution `p(observed_time_series[t] |
observed_time_series[0:t-1], parameter_samples)`.
#### Examples
Suppose we've built a model and fit it to data using HMC:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
Passing the posterior samples into `one_step_predictive`, we construct a
one-step-ahead predictive distribution:
```python
one_step_predictive_dist = tfp.sts.one_step_predictive(
model, observed_time_series, parameter_samples=samples)
predictive_means = one_step_predictive_dist.mean()
predictive_scales = one_step_predictive_dist.stddev()
```
If using variational inference instead of HMC, we'd construct a forecast using
samples from the variational posterior:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
# OMITTED: take steps to optimize variational loss
samples = {k: q.sample(30) for (k, q) in variational_distributions.items()}
one_step_predictive_dist = tfp.sts.one_step_predictive(
model, observed_time_series, parameter_samples=samples)
```
We can visualize the forecast by plotting:
```python
from matplotlib import pylab as plt
def plot_one_step_predictive(observed_time_series,
forecast_mean,
forecast_scale):
plt.figure(figsize=(12, 6))
num_timesteps = forecast_mean.shape[-1]
c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)
plt.plot(observed_time_series, label="observed time series", color=c1)
plt.plot(forecast_mean, label="one-step prediction", color=c2)
plt.fill_between(np.arange(num_timesteps),
forecast_mean - 2 * forecast_scale,
forecast_mean + 2 * forecast_scale,
alpha=0.1, color=c2)
plt.legend()
plot_one_step_predictive(observed_time_series,
forecast_mean=predictive_means,
forecast_scale=predictive_scales)
```
To detect anomalous timesteps, we check whether the observed value at each
step is within a 95% predictive interval, i.e., two standard deviations from
the mean:
```python
z_scores = ((observed_time_series[..., 1:] - predictive_means[..., :-1])
/ predictive_scales[..., :-1])
anomalous_timesteps = tf.boolean_mask(
tf.range(1, num_timesteps),
tf.abs(z_scores) > 2.0)
```
"""
with tf.compat.v1.name_scope(
'one_step_predictive', values=[observed_time_series, parameter_samples]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run filtering over the training timesteps to extract the
# predictive means and variances.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
lgssm = model.make_state_space_model(
num_timesteps=num_timesteps, param_vals=parameter_samples)
(_, _, _, _, _, observation_means, observation_covs
) = lgssm.forward_filter(observed_time_series, mask=is_missing)
# Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]`
# to a scalar time series.
return sts_util.mix_over_posterior_draws(
means=observation_means[..., 0],
variances=observation_covs[..., 0, 0])
|
[
"Compute",
"one",
"-",
"step",
"-",
"ahead",
"predictive",
"distributions",
"for",
"all",
"timesteps",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/forecast.py#L35-L169
|
[
"def",
"one_step_predictive",
"(",
"model",
",",
"observed_time_series",
",",
"parameter_samples",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'one_step_predictive'",
",",
"values",
"=",
"[",
"observed_time_series",
",",
"parameter_samples",
"]",
")",
":",
"[",
"observed_time_series",
",",
"is_missing",
"]",
"=",
"sts_util",
".",
"canonicalize_observed_time_series_with_mask",
"(",
"observed_time_series",
")",
"# Run filtering over the training timesteps to extract the",
"# predictive means and variances.",
"num_timesteps",
"=",
"dist_util",
".",
"prefer_static_value",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"observed_time_series",
")",
")",
"[",
"-",
"2",
"]",
"lgssm",
"=",
"model",
".",
"make_state_space_model",
"(",
"num_timesteps",
"=",
"num_timesteps",
",",
"param_vals",
"=",
"parameter_samples",
")",
"(",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"observation_means",
",",
"observation_covs",
")",
"=",
"lgssm",
".",
"forward_filter",
"(",
"observed_time_series",
",",
"mask",
"=",
"is_missing",
")",
"# Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]`",
"# to a scalar time series.",
"return",
"sts_util",
".",
"mix_over_posterior_draws",
"(",
"means",
"=",
"observation_means",
"[",
"...",
",",
"0",
"]",
",",
"variances",
"=",
"observation_covs",
"[",
"...",
",",
"0",
",",
"0",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
forecast
|
Construct predictive distribution over future observations.
Given samples from the posterior over parameters, return the predictive
distribution over future observations for num_steps_forecast timesteps.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries` including a
mask `Tensor` to encode the locations of missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
num_steps_forecast: scalar `int` `Tensor` number of steps to forecast.
Returns:
forecast_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_steps_forecast, 1] and batch shape
`concat([sample_shape, model.batch_shape])`, with `num_posterior_draws`
mixture components.
#### Examples
Suppose we've built a model and fit it to data using HMC:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
Passing the posterior samples into `forecast`, we construct a forecast
distribution:
```python
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=50)
forecast_mean = forecast_dist.mean()[..., 0] # shape: [50]
forecast_scale = forecast_dist.stddev()[..., 0] # shape: [50]
forecast_samples = forecast_dist.sample(10)[..., 0] # shape: [10, 50]
```
If using variational inference instead of HMC, we'd construct a forecast using
samples from the variational posterior:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
# OMITTED: take steps to optimize variational loss
samples = {k: q.sample(30) for (k, q) in variational_distributions.items()}
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=50)
```
We can visualize the forecast by plotting:
```python
from matplotlib import pylab as plt
def plot_forecast(observed_time_series,
forecast_mean,
forecast_scale,
forecast_samples):
plt.figure(figsize=(12, 6))
num_steps = observed_time_series.shape[-1]
num_steps_forecast = forecast_mean.shape[-1]
num_steps_train = num_steps - num_steps_forecast
c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)
plt.plot(np.arange(num_steps), observed_time_series,
lw=2, color=c1, label='ground truth')
forecast_steps = np.arange(num_steps_train,
num_steps_train+num_steps_forecast)
plt.plot(forecast_steps, forecast_samples.T, lw=1, color=c2, alpha=0.1)
plt.plot(forecast_steps, forecast_mean, lw=2, ls='--', color=c2,
label='forecast')
plt.fill_between(forecast_steps,
forecast_mean - 2 * forecast_scale,
forecast_mean + 2 * forecast_scale, color=c2, alpha=0.2)
plt.xlim([0, num_steps])
plt.legend()
plot_forecast(observed_time_series,
forecast_mean=forecast_mean,
forecast_scale=forecast_scale,
forecast_samples=forecast_samples)
```
|
tensorflow_probability/python/sts/forecast.py
|
def forecast(model,
observed_time_series,
parameter_samples,
num_steps_forecast):
"""Construct predictive distribution over future observations.
Given samples from the posterior over parameters, return the predictive
distribution over future observations for num_steps_forecast timesteps.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries` including a
mask `Tensor` to encode the locations of missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
num_steps_forecast: scalar `int` `Tensor` number of steps to forecast.
Returns:
forecast_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_steps_forecast, 1] and batch shape
`concat([sample_shape, model.batch_shape])`, with `num_posterior_draws`
mixture components.
#### Examples
Suppose we've built a model and fit it to data using HMC:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
Passing the posterior samples into `forecast`, we construct a forecast
distribution:
```python
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=50)
forecast_mean = forecast_dist.mean()[..., 0] # shape: [50]
forecast_scale = forecast_dist.stddev()[..., 0] # shape: [50]
forecast_samples = forecast_dist.sample(10)[..., 0] # shape: [10, 50]
```
If using variational inference instead of HMC, we'd construct a forecast using
samples from the variational posterior:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
# OMITTED: take steps to optimize variational loss
samples = {k: q.sample(30) for (k, q) in variational_distributions.items()}
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=50)
```
We can visualize the forecast by plotting:
```python
from matplotlib import pylab as plt
def plot_forecast(observed_time_series,
forecast_mean,
forecast_scale,
forecast_samples):
plt.figure(figsize=(12, 6))
num_steps = observed_time_series.shape[-1]
num_steps_forecast = forecast_mean.shape[-1]
num_steps_train = num_steps - num_steps_forecast
c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)
plt.plot(np.arange(num_steps), observed_time_series,
lw=2, color=c1, label='ground truth')
forecast_steps = np.arange(num_steps_train,
num_steps_train+num_steps_forecast)
plt.plot(forecast_steps, forecast_samples.T, lw=1, color=c2, alpha=0.1)
plt.plot(forecast_steps, forecast_mean, lw=2, ls='--', color=c2,
label='forecast')
plt.fill_between(forecast_steps,
forecast_mean - 2 * forecast_scale,
forecast_mean + 2 * forecast_scale, color=c2, alpha=0.2)
plt.xlim([0, num_steps])
plt.legend()
plot_forecast(observed_time_series,
forecast_mean=forecast_mean,
forecast_scale=forecast_scale,
forecast_samples=forecast_samples)
```
"""
with tf.compat.v1.name_scope(
'forecast',
values=[observed_time_series, parameter_samples, num_steps_forecast]):
[
observed_time_series,
mask
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run filtering over the observed timesteps to extract the
# latent state posterior at timestep T+1 (i.e., the final
# filtering distribution, pushed through the transition model).
# This is the prior for the forecast model ("today's prior
# is yesterday's posterior").
num_observed_steps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
observed_data_ssm = model.make_state_space_model(
num_timesteps=num_observed_steps, param_vals=parameter_samples)
(_, _, _, predictive_means, predictive_covs, _, _
) = observed_data_ssm.forward_filter(observed_time_series, mask=mask)
# Build a batch of state-space models over the forecast period. Because
# we'll use MixtureSameFamily to mix over the posterior draws, we need to
# do some shenanigans to move the `[num_posterior_draws]` batch dimension
# from the leftmost to the rightmost side of the model's batch shape.
# TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an
# arbitrary axis, and eliminate `move_dimension` calls here.
parameter_samples = model._canonicalize_param_vals_as_map(parameter_samples) # pylint: disable=protected-access
parameter_samples_with_reordered_batch_dimension = {
param.name: dist_util.move_dimension(
parameter_samples[param.name],
0, -(1 + _prefer_static_event_ndims(param.prior)))
for param in model.parameters}
forecast_prior = tfd.MultivariateNormalFullCovariance(
loc=dist_util.move_dimension(predictive_means[..., -1, :], 0, -2),
covariance_matrix=dist_util.move_dimension(
predictive_covs[..., -1, :, :], 0, -3))
# Ugly hack: because we moved `num_posterior_draws` to the trailing (rather
# than leading) dimension of parameters, the parameter batch shapes no
# longer broadcast against the `constant_offset` attribute used in `sts.Sum`
# models. We fix this by manually adding an extra broadcasting dim to
# `constant_offset` if present.
# The root cause of this hack is that we mucked with param dimensions above
# and are now passing params that are 'invalid' in the sense that they don't
# match the shapes of the model's param priors. The fix (as above) will be
# to update MixtureSameFamily so we can avoid changing param dimensions
# altogether.
# TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an
# arbitrary axis, and eliminate this hack.
kwargs = {}
if hasattr(model, 'constant_offset'):
kwargs['constant_offset'] = tf.convert_to_tensor(
value=model.constant_offset,
dtype=forecast_prior.dtype)[..., tf.newaxis]
# We assume that any STS model that has a `constant_offset` attribute
# will allow it to be overridden as a kwarg. This is currently just
# `sts.Sum`.
# TODO(b/120245392): when kwargs hack is removed, switch back to calling
# the public version of `_make_state_space_model`.
forecast_ssm = model._make_state_space_model( # pylint: disable=protected-access
num_timesteps=num_steps_forecast,
param_map=parameter_samples_with_reordered_batch_dimension,
initial_state_prior=forecast_prior,
initial_step=num_observed_steps,
**kwargs)
num_posterior_draws = dist_util.prefer_static_value(
forecast_ssm.batch_shape_tensor())[-1]
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
logits=tf.zeros([num_posterior_draws], dtype=forecast_ssm.dtype)),
components_distribution=forecast_ssm)
|
def forecast(model,
observed_time_series,
parameter_samples,
num_steps_forecast):
"""Construct predictive distribution over future observations.
Given samples from the posterior over parameters, return the predictive
distribution over future observations for num_steps_forecast timesteps.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries` including a
mask `Tensor` to encode the locations of missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
num_steps_forecast: scalar `int` `Tensor` number of steps to forecast.
Returns:
forecast_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_steps_forecast, 1] and batch shape
`concat([sample_shape, model.batch_shape])`, with `num_posterior_draws`
mixture components.
#### Examples
Suppose we've built a model and fit it to data using HMC:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
Passing the posterior samples into `forecast`, we construct a forecast
distribution:
```python
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=50)
forecast_mean = forecast_dist.mean()[..., 0] # shape: [50]
forecast_scale = forecast_dist.stddev()[..., 0] # shape: [50]
forecast_samples = forecast_dist.sample(10)[..., 0] # shape: [10, 50]
```
If using variational inference instead of HMC, we'd construct a forecast using
samples from the variational posterior:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
# OMITTED: take steps to optimize variational loss
samples = {k: q.sample(30) for (k, q) in variational_distributions.items()}
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=50)
```
We can visualize the forecast by plotting:
```python
from matplotlib import pylab as plt
def plot_forecast(observed_time_series,
forecast_mean,
forecast_scale,
forecast_samples):
plt.figure(figsize=(12, 6))
num_steps = observed_time_series.shape[-1]
num_steps_forecast = forecast_mean.shape[-1]
num_steps_train = num_steps - num_steps_forecast
c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)
plt.plot(np.arange(num_steps), observed_time_series,
lw=2, color=c1, label='ground truth')
forecast_steps = np.arange(num_steps_train,
num_steps_train+num_steps_forecast)
plt.plot(forecast_steps, forecast_samples.T, lw=1, color=c2, alpha=0.1)
plt.plot(forecast_steps, forecast_mean, lw=2, ls='--', color=c2,
label='forecast')
plt.fill_between(forecast_steps,
forecast_mean - 2 * forecast_scale,
forecast_mean + 2 * forecast_scale, color=c2, alpha=0.2)
plt.xlim([0, num_steps])
plt.legend()
plot_forecast(observed_time_series,
forecast_mean=forecast_mean,
forecast_scale=forecast_scale,
forecast_samples=forecast_samples)
```
"""
with tf.compat.v1.name_scope(
'forecast',
values=[observed_time_series, parameter_samples, num_steps_forecast]):
[
observed_time_series,
mask
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run filtering over the observed timesteps to extract the
# latent state posterior at timestep T+1 (i.e., the final
# filtering distribution, pushed through the transition model).
# This is the prior for the forecast model ("today's prior
# is yesterday's posterior").
num_observed_steps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
observed_data_ssm = model.make_state_space_model(
num_timesteps=num_observed_steps, param_vals=parameter_samples)
(_, _, _, predictive_means, predictive_covs, _, _
) = observed_data_ssm.forward_filter(observed_time_series, mask=mask)
# Build a batch of state-space models over the forecast period. Because
# we'll use MixtureSameFamily to mix over the posterior draws, we need to
# do some shenanigans to move the `[num_posterior_draws]` batch dimension
# from the leftmost to the rightmost side of the model's batch shape.
# TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an
# arbitrary axis, and eliminate `move_dimension` calls here.
parameter_samples = model._canonicalize_param_vals_as_map(parameter_samples) # pylint: disable=protected-access
parameter_samples_with_reordered_batch_dimension = {
param.name: dist_util.move_dimension(
parameter_samples[param.name],
0, -(1 + _prefer_static_event_ndims(param.prior)))
for param in model.parameters}
forecast_prior = tfd.MultivariateNormalFullCovariance(
loc=dist_util.move_dimension(predictive_means[..., -1, :], 0, -2),
covariance_matrix=dist_util.move_dimension(
predictive_covs[..., -1, :, :], 0, -3))
# Ugly hack: because we moved `num_posterior_draws` to the trailing (rather
# than leading) dimension of parameters, the parameter batch shapes no
# longer broadcast against the `constant_offset` attribute used in `sts.Sum`
# models. We fix this by manually adding an extra broadcasting dim to
# `constant_offset` if present.
# The root cause of this hack is that we mucked with param dimensions above
# and are now passing params that are 'invalid' in the sense that they don't
# match the shapes of the model's param priors. The fix (as above) will be
# to update MixtureSameFamily so we can avoid changing param dimensions
# altogether.
# TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an
# arbitrary axis, and eliminate this hack.
kwargs = {}
if hasattr(model, 'constant_offset'):
kwargs['constant_offset'] = tf.convert_to_tensor(
value=model.constant_offset,
dtype=forecast_prior.dtype)[..., tf.newaxis]
# We assume that any STS model that has a `constant_offset` attribute
# will allow it to be overridden as a kwarg. This is currently just
# `sts.Sum`.
# TODO(b/120245392): when kwargs hack is removed, switch back to calling
# the public version of `_make_state_space_model`.
forecast_ssm = model._make_state_space_model( # pylint: disable=protected-access
num_timesteps=num_steps_forecast,
param_map=parameter_samples_with_reordered_batch_dimension,
initial_state_prior=forecast_prior,
initial_step=num_observed_steps,
**kwargs)
num_posterior_draws = dist_util.prefer_static_value(
forecast_ssm.batch_shape_tensor())[-1]
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
logits=tf.zeros([num_posterior_draws], dtype=forecast_ssm.dtype)),
components_distribution=forecast_ssm)
|
[
"Construct",
"predictive",
"distribution",
"over",
"future",
"observations",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/forecast.py#L172-L362
|
[
"def",
"forecast",
"(",
"model",
",",
"observed_time_series",
",",
"parameter_samples",
",",
"num_steps_forecast",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'forecast'",
",",
"values",
"=",
"[",
"observed_time_series",
",",
"parameter_samples",
",",
"num_steps_forecast",
"]",
")",
":",
"[",
"observed_time_series",
",",
"mask",
"]",
"=",
"sts_util",
".",
"canonicalize_observed_time_series_with_mask",
"(",
"observed_time_series",
")",
"# Run filtering over the observed timesteps to extract the",
"# latent state posterior at timestep T+1 (i.e., the final",
"# filtering distribution, pushed through the transition model).",
"# This is the prior for the forecast model (\"today's prior",
"# is yesterday's posterior\").",
"num_observed_steps",
"=",
"dist_util",
".",
"prefer_static_value",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"observed_time_series",
")",
")",
"[",
"-",
"2",
"]",
"observed_data_ssm",
"=",
"model",
".",
"make_state_space_model",
"(",
"num_timesteps",
"=",
"num_observed_steps",
",",
"param_vals",
"=",
"parameter_samples",
")",
"(",
"_",
",",
"_",
",",
"_",
",",
"predictive_means",
",",
"predictive_covs",
",",
"_",
",",
"_",
")",
"=",
"observed_data_ssm",
".",
"forward_filter",
"(",
"observed_time_series",
",",
"mask",
"=",
"mask",
")",
"# Build a batch of state-space models over the forecast period. Because",
"# we'll use MixtureSameFamily to mix over the posterior draws, we need to",
"# do some shenanigans to move the `[num_posterior_draws]` batch dimension",
"# from the leftmost to the rightmost side of the model's batch shape.",
"# TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an",
"# arbitrary axis, and eliminate `move_dimension` calls here.",
"parameter_samples",
"=",
"model",
".",
"_canonicalize_param_vals_as_map",
"(",
"parameter_samples",
")",
"# pylint: disable=protected-access",
"parameter_samples_with_reordered_batch_dimension",
"=",
"{",
"param",
".",
"name",
":",
"dist_util",
".",
"move_dimension",
"(",
"parameter_samples",
"[",
"param",
".",
"name",
"]",
",",
"0",
",",
"-",
"(",
"1",
"+",
"_prefer_static_event_ndims",
"(",
"param",
".",
"prior",
")",
")",
")",
"for",
"param",
"in",
"model",
".",
"parameters",
"}",
"forecast_prior",
"=",
"tfd",
".",
"MultivariateNormalFullCovariance",
"(",
"loc",
"=",
"dist_util",
".",
"move_dimension",
"(",
"predictive_means",
"[",
"...",
",",
"-",
"1",
",",
":",
"]",
",",
"0",
",",
"-",
"2",
")",
",",
"covariance_matrix",
"=",
"dist_util",
".",
"move_dimension",
"(",
"predictive_covs",
"[",
"...",
",",
"-",
"1",
",",
":",
",",
":",
"]",
",",
"0",
",",
"-",
"3",
")",
")",
"# Ugly hack: because we moved `num_posterior_draws` to the trailing (rather",
"# than leading) dimension of parameters, the parameter batch shapes no",
"# longer broadcast against the `constant_offset` attribute used in `sts.Sum`",
"# models. We fix this by manually adding an extra broadcasting dim to",
"# `constant_offset` if present.",
"# The root cause of this hack is that we mucked with param dimensions above",
"# and are now passing params that are 'invalid' in the sense that they don't",
"# match the shapes of the model's param priors. The fix (as above) will be",
"# to update MixtureSameFamily so we can avoid changing param dimensions",
"# altogether.",
"# TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an",
"# arbitrary axis, and eliminate this hack.",
"kwargs",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"model",
",",
"'constant_offset'",
")",
":",
"kwargs",
"[",
"'constant_offset'",
"]",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"model",
".",
"constant_offset",
",",
"dtype",
"=",
"forecast_prior",
".",
"dtype",
")",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
"# We assume that any STS model that has a `constant_offset` attribute",
"# will allow it to be overridden as a kwarg. This is currently just",
"# `sts.Sum`.",
"# TODO(b/120245392): when kwargs hack is removed, switch back to calling",
"# the public version of `_make_state_space_model`.",
"forecast_ssm",
"=",
"model",
".",
"_make_state_space_model",
"(",
"# pylint: disable=protected-access",
"num_timesteps",
"=",
"num_steps_forecast",
",",
"param_map",
"=",
"parameter_samples_with_reordered_batch_dimension",
",",
"initial_state_prior",
"=",
"forecast_prior",
",",
"initial_step",
"=",
"num_observed_steps",
",",
"*",
"*",
"kwargs",
")",
"num_posterior_draws",
"=",
"dist_util",
".",
"prefer_static_value",
"(",
"forecast_ssm",
".",
"batch_shape_tensor",
"(",
")",
")",
"[",
"-",
"1",
"]",
"return",
"tfd",
".",
"MixtureSameFamily",
"(",
"mixture_distribution",
"=",
"tfd",
".",
"Categorical",
"(",
"logits",
"=",
"tf",
".",
"zeros",
"(",
"[",
"num_posterior_draws",
"]",
",",
"dtype",
"=",
"forecast_ssm",
".",
"dtype",
")",
")",
",",
"components_distribution",
"=",
"forecast_ssm",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_max_mask_non_finite
|
Returns `max` or `mask` if `max` is not finite.
|
tensorflow_probability/python/internal/backend/numpy/math.py
|
def _max_mask_non_finite(x, axis=-1, keepdims=False, mask=0):
"""Returns `max` or `mask` if `max` is not finite."""
m = np.max(x, axis=_astuple(axis), keepdims=keepdims)
needs_masking = ~np.isfinite(m)
if needs_masking.ndim > 0:
m[needs_masking] = mask
elif needs_masking:
m = mask
return m
|
def _max_mask_non_finite(x, axis=-1, keepdims=False, mask=0):
"""Returns `max` or `mask` if `max` is not finite."""
m = np.max(x, axis=_astuple(axis), keepdims=keepdims)
needs_masking = ~np.isfinite(m)
if needs_masking.ndim > 0:
m[needs_masking] = mask
elif needs_masking:
m = mask
return m
|
[
"Returns",
"max",
"or",
"mask",
"if",
"max",
"is",
"not",
"finite",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/backend/numpy/math.py#L172-L180
|
[
"def",
"_max_mask_non_finite",
"(",
"x",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"False",
",",
"mask",
"=",
"0",
")",
":",
"m",
"=",
"np",
".",
"max",
"(",
"x",
",",
"axis",
"=",
"_astuple",
"(",
"axis",
")",
",",
"keepdims",
"=",
"keepdims",
")",
"needs_masking",
"=",
"~",
"np",
".",
"isfinite",
"(",
"m",
")",
"if",
"needs_masking",
".",
"ndim",
">",
"0",
":",
"m",
"[",
"needs_masking",
"]",
"=",
"mask",
"elif",
"needs_masking",
":",
"m",
"=",
"mask",
"return",
"m"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_reduce_logsumexp
|
Computes `log(sum(exp(input_tensor))) along the specified axis.
|
tensorflow_probability/python/internal/backend/numpy/math.py
|
def _reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): # pylint: disable=unused-argument
"""Computes `log(sum(exp(input_tensor))) along the specified axis."""
try:
return scipy_special.logsumexp(
input_tensor, axis=_astuple(axis), keepdims=keepdims)
except NotImplementedError:
# We offer a non SP version just in case SP isn't installed and this
# because logsumexp is often used.
m = _max_mask_non_finite(input_tensor, axis=axis, keepdims=True)
y = input_tensor - m
y = np.exp(y, out=y)
return m + np.log(np.sum(y, axis=_astuple(axis), keepdims=keepdims))
|
def _reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): # pylint: disable=unused-argument
"""Computes `log(sum(exp(input_tensor))) along the specified axis."""
try:
return scipy_special.logsumexp(
input_tensor, axis=_astuple(axis), keepdims=keepdims)
except NotImplementedError:
# We offer a non SP version just in case SP isn't installed and this
# because logsumexp is often used.
m = _max_mask_non_finite(input_tensor, axis=axis, keepdims=True)
y = input_tensor - m
y = np.exp(y, out=y)
return m + np.log(np.sum(y, axis=_astuple(axis), keepdims=keepdims))
|
[
"Computes",
"log",
"(",
"sum",
"(",
"exp",
"(",
"input_tensor",
")))",
"along",
"the",
"specified",
"axis",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/backend/numpy/math.py#L191-L202
|
[
"def",
"_reduce_logsumexp",
"(",
"input_tensor",
",",
"axis",
"=",
"None",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"try",
":",
"return",
"scipy_special",
".",
"logsumexp",
"(",
"input_tensor",
",",
"axis",
"=",
"_astuple",
"(",
"axis",
")",
",",
"keepdims",
"=",
"keepdims",
")",
"except",
"NotImplementedError",
":",
"# We offer a non SP version just in case SP isn't installed and this",
"# because logsumexp is often used.",
"m",
"=",
"_max_mask_non_finite",
"(",
"input_tensor",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"y",
"=",
"input_tensor",
"-",
"m",
"y",
"=",
"np",
".",
"exp",
"(",
"y",
",",
"out",
"=",
"y",
")",
"return",
"m",
"+",
"np",
".",
"log",
"(",
"np",
".",
"sum",
"(",
"y",
",",
"axis",
"=",
"_astuple",
"(",
"axis",
")",
",",
"keepdims",
"=",
"keepdims",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
assert_finite
|
Assert all elements of `x` are finite.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_finite".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or lower.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
|
tensorflow_probability/python/internal/assert_util.py
|
def assert_finite(x, data=None, summarize=None, message=None, name=None):
"""Assert all elements of `x` are finite.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_finite".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or lower.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with tf.compat.v2.name_scope(name or 'assert_finite'):
x_ = tf.get_static_value(x)
if x_ is not None:
if ~np.all(np.isfinite(x_)):
raise ValueError(message)
return x
assertion = tf.compat.v1.assert_equal(
tf.math.is_finite(x), tf.ones_like(x, tf.bool),
data=data, summarize=summarize, message=message)
with tf.control_dependencies([assertion]):
return tf.identity(x)
|
def assert_finite(x, data=None, summarize=None, message=None, name=None):
"""Assert all elements of `x` are finite.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_finite".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or lower.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with tf.compat.v2.name_scope(name or 'assert_finite'):
x_ = tf.get_static_value(x)
if x_ is not None:
if ~np.all(np.isfinite(x_)):
raise ValueError(message)
return x
assertion = tf.compat.v1.assert_equal(
tf.math.is_finite(x), tf.ones_like(x, tf.bool),
data=data, summarize=summarize, message=message)
with tf.control_dependencies([assertion]):
return tf.identity(x)
|
[
"Assert",
"all",
"elements",
"of",
"x",
"are",
"finite",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/assert_util.py#L44-L73
|
[
"def",
"assert_finite",
"(",
"x",
",",
"data",
"=",
"None",
",",
"summarize",
"=",
"None",
",",
"message",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v2",
".",
"name_scope",
"(",
"name",
"or",
"'assert_finite'",
")",
":",
"x_",
"=",
"tf",
".",
"get_static_value",
"(",
"x",
")",
"if",
"x_",
"is",
"not",
"None",
":",
"if",
"~",
"np",
".",
"all",
"(",
"np",
".",
"isfinite",
"(",
"x_",
")",
")",
":",
"raise",
"ValueError",
"(",
"message",
")",
"return",
"x",
"assertion",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_equal",
"(",
"tf",
".",
"math",
".",
"is_finite",
"(",
"x",
")",
",",
"tf",
".",
"ones_like",
"(",
"x",
",",
"tf",
".",
"bool",
")",
",",
"data",
"=",
"data",
",",
"summarize",
"=",
"summarize",
",",
"message",
"=",
"message",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assertion",
"]",
")",
":",
"return",
"tf",
".",
"identity",
"(",
"x",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
assert_rank_at_most
|
Assert `x` has rank equal to `rank` or smaller.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_most(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_most".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or lower.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
|
tensorflow_probability/python/internal/assert_util.py
|
def assert_rank_at_most(x, rank, data=None, summarize=None, message=None,
name=None):
"""Assert `x` has rank equal to `rank` or smaller.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_most(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_most".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or lower.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with tf.compat.v2.name_scope(name or 'assert_rank_at_most'):
return tf.compat.v1.assert_less_equal(
tf.rank(x), rank, data=data, summarize=summarize, message=message)
|
def assert_rank_at_most(x, rank, data=None, summarize=None, message=None,
name=None):
"""Assert `x` has rank equal to `rank` or smaller.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_most(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_most".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or lower.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with tf.compat.v2.name_scope(name or 'assert_rank_at_most'):
return tf.compat.v1.assert_less_equal(
tf.rank(x), rank, data=data, summarize=summarize, message=message)
|
[
"Assert",
"x",
"has",
"rank",
"equal",
"to",
"rank",
"or",
"smaller",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/assert_util.py#L76-L106
|
[
"def",
"assert_rank_at_most",
"(",
"x",
",",
"rank",
",",
"data",
"=",
"None",
",",
"summarize",
"=",
"None",
",",
"message",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v2",
".",
"name_scope",
"(",
"name",
"or",
"'assert_rank_at_most'",
")",
":",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_less_equal",
"(",
"tf",
".",
"rank",
"(",
"x",
")",
",",
"rank",
",",
"data",
"=",
"data",
",",
"summarize",
"=",
"summarize",
",",
"message",
"=",
"message",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_event_size
|
Computes the number of elements in a tensor with shape `event_shape`.
Args:
event_shape: A tensor shape.
name: The name to use for the tensor op to compute the number of elements
(if such an op needs to be created).
Returns:
event_size: The number of elements in `tensor_shape`. Returns a numpy int
when the number of elements can be computed immediately. Otherwise, returns
a scalar tensor.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def _event_size(event_shape, name=None):
"""Computes the number of elements in a tensor with shape `event_shape`.
Args:
event_shape: A tensor shape.
name: The name to use for the tensor op to compute the number of elements
(if such an op needs to be created).
Returns:
event_size: The number of elements in `tensor_shape`. Returns a numpy int
when the number of elements can be computed immediately. Otherwise, returns
a scalar tensor.
"""
with tf.compat.v1.name_scope(name, 'event_size', [event_shape]):
event_shape = tf.convert_to_tensor(
value=event_shape, dtype=tf.int32, name='event_shape')
event_shape_const = tf.get_static_value(event_shape)
if event_shape_const is not None:
return np.prod(event_shape_const)
else:
return tf.reduce_prod(input_tensor=event_shape)
|
def _event_size(event_shape, name=None):
"""Computes the number of elements in a tensor with shape `event_shape`.
Args:
event_shape: A tensor shape.
name: The name to use for the tensor op to compute the number of elements
(if such an op needs to be created).
Returns:
event_size: The number of elements in `tensor_shape`. Returns a numpy int
when the number of elements can be computed immediately. Otherwise, returns
a scalar tensor.
"""
with tf.compat.v1.name_scope(name, 'event_size', [event_shape]):
event_shape = tf.convert_to_tensor(
value=event_shape, dtype=tf.int32, name='event_shape')
event_shape_const = tf.get_static_value(event_shape)
if event_shape_const is not None:
return np.prod(event_shape_const)
else:
return tf.reduce_prod(input_tensor=event_shape)
|
[
"Computes",
"the",
"number",
"of",
"elements",
"in",
"a",
"tensor",
"with",
"shape",
"event_shape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L65-L86
|
[
"def",
"_event_size",
"(",
"event_shape",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'event_size'",
",",
"[",
"event_shape",
"]",
")",
":",
"event_shape",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"event_shape",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"'event_shape'",
")",
"event_shape_const",
"=",
"tf",
".",
"get_static_value",
"(",
"event_shape",
")",
"if",
"event_shape_const",
"is",
"not",
"None",
":",
"return",
"np",
".",
"prod",
"(",
"event_shape_const",
")",
"else",
":",
"return",
"tf",
".",
"reduce_prod",
"(",
"input_tensor",
"=",
"event_shape",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_eval_all_one_hot
|
OneHotCategorical helper computing probs, cdf, etc over its support.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def _eval_all_one_hot(fn, dist, name=None):
"""OneHotCategorical helper computing probs, cdf, etc over its support."""
with tf.compat.v1.name_scope(name, 'eval_all_one_hot'):
event_size = dist.event_shape_tensor()[-1]
batch_ndims = tf.size(input=dist.batch_shape_tensor())
# Reshape `eye(d)` to: `[d] + [1]*batch_ndims + [d]`.
x = tf.reshape(
tf.eye(event_size, dtype=dist.dtype),
shape=tf.pad(
tensor=tf.ones(batch_ndims, tf.int32),
paddings=[[1, 1]],
constant_values=event_size))
# Compute `fn(x)` then cyclically left-transpose one dim.
perm = tf.pad(tensor=tf.range(1, batch_ndims + 1), paddings=[[0, 1]])
return tf.transpose(a=fn(dist, x), perm=perm)
|
def _eval_all_one_hot(fn, dist, name=None):
"""OneHotCategorical helper computing probs, cdf, etc over its support."""
with tf.compat.v1.name_scope(name, 'eval_all_one_hot'):
event_size = dist.event_shape_tensor()[-1]
batch_ndims = tf.size(input=dist.batch_shape_tensor())
# Reshape `eye(d)` to: `[d] + [1]*batch_ndims + [d]`.
x = tf.reshape(
tf.eye(event_size, dtype=dist.dtype),
shape=tf.pad(
tensor=tf.ones(batch_ndims, tf.int32),
paddings=[[1, 1]],
constant_values=event_size))
# Compute `fn(x)` then cyclically left-transpose one dim.
perm = tf.pad(tensor=tf.range(1, batch_ndims + 1), paddings=[[0, 1]])
return tf.transpose(a=fn(dist, x), perm=perm)
|
[
"OneHotCategorical",
"helper",
"computing",
"probs",
"cdf",
"etc",
"over",
"its",
"support",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L754-L768
|
[
"def",
"_eval_all_one_hot",
"(",
"fn",
",",
"dist",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'eval_all_one_hot'",
")",
":",
"event_size",
"=",
"dist",
".",
"event_shape_tensor",
"(",
")",
"[",
"-",
"1",
"]",
"batch_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"dist",
".",
"batch_shape_tensor",
"(",
")",
")",
"# Reshape `eye(d)` to: `[d] + [1]*batch_ndims + [d]`.",
"x",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"eye",
"(",
"event_size",
",",
"dtype",
"=",
"dist",
".",
"dtype",
")",
",",
"shape",
"=",
"tf",
".",
"pad",
"(",
"tensor",
"=",
"tf",
".",
"ones",
"(",
"batch_ndims",
",",
"tf",
".",
"int32",
")",
",",
"paddings",
"=",
"[",
"[",
"1",
",",
"1",
"]",
"]",
",",
"constant_values",
"=",
"event_size",
")",
")",
"# Compute `fn(x)` then cyclically left-transpose one dim.",
"perm",
"=",
"tf",
".",
"pad",
"(",
"tensor",
"=",
"tf",
".",
"range",
"(",
"1",
",",
"batch_ndims",
"+",
"1",
")",
",",
"paddings",
"=",
"[",
"[",
"0",
",",
"1",
"]",
"]",
")",
"return",
"tf",
".",
"transpose",
"(",
"a",
"=",
"fn",
"(",
"dist",
",",
"x",
")",
",",
"perm",
"=",
"perm",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_make_kl_divergence_fn
|
Creates a callable computing `KL[a,b]` from `a`, a `tfd.Distribution`.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def _make_kl_divergence_fn(
distribution_b,
use_exact_kl=False,
test_points_reduce_axis=(), # `None` == "all"; () == "none".
test_points_fn=tf.convert_to_tensor,
weight=None):
"""Creates a callable computing `KL[a,b]` from `a`, a `tfd.Distribution`."""
if use_exact_kl is None:
kl_divergence_fn = tfd.kl_divergence
else:
# Closure over: test_points_fn, test_points_reduce_axis.
def kl_divergence_fn(distribution_a, distribution_b):
z = test_points_fn(distribution_a)
return tf.reduce_mean(
input_tensor=distribution_a.log_prob(z) - distribution_b.log_prob(z),
axis=test_points_reduce_axis)
# Closure over: distribution_b, kl_divergence_fn, weight.
def _fn(distribution_a):
"""Closure that computes KLDiv as a function of `a` as in `KL[a, b]`."""
with tf.compat.v1.name_scope('kldivergence_loss'):
# TODO(b/119756336): Due to eager/graph Jacobian graph caching bug
# we add here the capability for deferred construction of the prior.
# This capability can probably be removed once b/119756336 is resolved.
distribution_b_ = (distribution_b() if callable(distribution_b)
else distribution_b)
kl = kl_divergence_fn(distribution_a, distribution_b_)
if weight is not None:
kl = tf.cast(weight, dtype=kl.dtype) * kl
# Losses appended with the model.add_loss and are expected to be a single
# scalar, unlike model.loss, which is expected to be the loss per sample.
# Therefore, we reduce over all dimensions, regardless of the shape.
# We take the sum because (apparently) Keras will add this to the *post*
# `reduce_sum` (total) loss.
# TODO(b/126259176): Add end-to-end Keras/TFP test to ensure the API's
# align, particularly wrt how losses are aggregated (across batch
# members).
return tf.reduce_sum(input_tensor=kl, name='batch_total_kl_divergence')
return _fn
|
def _make_kl_divergence_fn(
distribution_b,
use_exact_kl=False,
test_points_reduce_axis=(), # `None` == "all"; () == "none".
test_points_fn=tf.convert_to_tensor,
weight=None):
"""Creates a callable computing `KL[a,b]` from `a`, a `tfd.Distribution`."""
if use_exact_kl is None:
kl_divergence_fn = tfd.kl_divergence
else:
# Closure over: test_points_fn, test_points_reduce_axis.
def kl_divergence_fn(distribution_a, distribution_b):
z = test_points_fn(distribution_a)
return tf.reduce_mean(
input_tensor=distribution_a.log_prob(z) - distribution_b.log_prob(z),
axis=test_points_reduce_axis)
# Closure over: distribution_b, kl_divergence_fn, weight.
def _fn(distribution_a):
"""Closure that computes KLDiv as a function of `a` as in `KL[a, b]`."""
with tf.compat.v1.name_scope('kldivergence_loss'):
# TODO(b/119756336): Due to eager/graph Jacobian graph caching bug
# we add here the capability for deferred construction of the prior.
# This capability can probably be removed once b/119756336 is resolved.
distribution_b_ = (distribution_b() if callable(distribution_b)
else distribution_b)
kl = kl_divergence_fn(distribution_a, distribution_b_)
if weight is not None:
kl = tf.cast(weight, dtype=kl.dtype) * kl
# Losses appended with the model.add_loss and are expected to be a single
# scalar, unlike model.loss, which is expected to be the loss per sample.
# Therefore, we reduce over all dimensions, regardless of the shape.
# We take the sum because (apparently) Keras will add this to the *post*
# `reduce_sum` (total) loss.
# TODO(b/126259176): Add end-to-end Keras/TFP test to ensure the API's
# align, particularly wrt how losses are aggregated (across batch
# members).
return tf.reduce_sum(input_tensor=kl, name='batch_total_kl_divergence')
return _fn
|
[
"Creates",
"a",
"callable",
"computing",
"KL",
"[",
"a",
"b",
"]",
"from",
"a",
"a",
"tfd",
".",
"Distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1309-L1349
|
[
"def",
"_make_kl_divergence_fn",
"(",
"distribution_b",
",",
"use_exact_kl",
"=",
"False",
",",
"test_points_reduce_axis",
"=",
"(",
")",
",",
"# `None` == \"all\"; () == \"none\".",
"test_points_fn",
"=",
"tf",
".",
"convert_to_tensor",
",",
"weight",
"=",
"None",
")",
":",
"if",
"use_exact_kl",
"is",
"None",
":",
"kl_divergence_fn",
"=",
"tfd",
".",
"kl_divergence",
"else",
":",
"# Closure over: test_points_fn, test_points_reduce_axis.",
"def",
"kl_divergence_fn",
"(",
"distribution_a",
",",
"distribution_b",
")",
":",
"z",
"=",
"test_points_fn",
"(",
"distribution_a",
")",
"return",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"distribution_a",
".",
"log_prob",
"(",
"z",
")",
"-",
"distribution_b",
".",
"log_prob",
"(",
"z",
")",
",",
"axis",
"=",
"test_points_reduce_axis",
")",
"# Closure over: distribution_b, kl_divergence_fn, weight.",
"def",
"_fn",
"(",
"distribution_a",
")",
":",
"\"\"\"Closure that computes KLDiv as a function of `a` as in `KL[a, b]`.\"\"\"",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"'kldivergence_loss'",
")",
":",
"# TODO(b/119756336): Due to eager/graph Jacobian graph caching bug",
"# we add here the capability for deferred construction of the prior.",
"# This capability can probably be removed once b/119756336 is resolved.",
"distribution_b_",
"=",
"(",
"distribution_b",
"(",
")",
"if",
"callable",
"(",
"distribution_b",
")",
"else",
"distribution_b",
")",
"kl",
"=",
"kl_divergence_fn",
"(",
"distribution_a",
",",
"distribution_b_",
")",
"if",
"weight",
"is",
"not",
"None",
":",
"kl",
"=",
"tf",
".",
"cast",
"(",
"weight",
",",
"dtype",
"=",
"kl",
".",
"dtype",
")",
"*",
"kl",
"# Losses appended with the model.add_loss and are expected to be a single",
"# scalar, unlike model.loss, which is expected to be the loss per sample.",
"# Therefore, we reduce over all dimensions, regardless of the shape.",
"# We take the sum because (apparently) Keras will add this to the *post*",
"# `reduce_sum` (total) loss.",
"# TODO(b/126259176): Add end-to-end Keras/TFP test to ensure the API's",
"# align, particularly wrt how losses are aggregated (across batch",
"# members).",
"return",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"kl",
",",
"name",
"=",
"'batch_total_kl_divergence'",
")",
"return",
"_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_convert_to_tensor_fn
|
Return a convert-to-tensor func, given a name, config, callable, etc.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def _get_convert_to_tensor_fn(identifier):
"""Return a convert-to-tensor func, given a name, config, callable, etc."""
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return _deserialize(identifier)
if isinstance(identifier, dict):
return _deserialize(identifier)
if isinstance(identifier, property):
identifier = identifier.fget
if callable(identifier):
return identifier
raise ValueError('Could not interpret '
'convert-to-tensor function identifier:', identifier)
|
def _get_convert_to_tensor_fn(identifier):
"""Return a convert-to-tensor func, given a name, config, callable, etc."""
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return _deserialize(identifier)
if isinstance(identifier, dict):
return _deserialize(identifier)
if isinstance(identifier, property):
identifier = identifier.fget
if callable(identifier):
return identifier
raise ValueError('Could not interpret '
'convert-to-tensor function identifier:', identifier)
|
[
"Return",
"a",
"convert",
"-",
"to",
"-",
"tensor",
"func",
"given",
"a",
"name",
"config",
"callable",
"etc",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1912-L1930
|
[
"def",
"_get_convert_to_tensor_fn",
"(",
"identifier",
")",
":",
"if",
"identifier",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"identifier",
",",
"six",
".",
"string_types",
")",
":",
"identifier",
"=",
"str",
"(",
"identifier",
")",
"return",
"_deserialize",
"(",
"identifier",
")",
"if",
"isinstance",
"(",
"identifier",
",",
"dict",
")",
":",
"return",
"_deserialize",
"(",
"identifier",
")",
"if",
"isinstance",
"(",
"identifier",
",",
"property",
")",
":",
"identifier",
"=",
"identifier",
".",
"fget",
"if",
"callable",
"(",
"identifier",
")",
":",
"return",
"identifier",
"raise",
"ValueError",
"(",
"'Could not interpret '",
"'convert-to-tensor function identifier:'",
",",
"identifier",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
DistributionLambda.get_config
|
Returns the config of this layer.
This Layer's `make_distribution_fn` is serialized via a library built on
Python pickle. This serialization of Python functions is provided for
convenience, but:
1. The use of this format for long-term storage of models is discouraged.
In particular, it may not be possible to deserialize in a different
version of Python.
2. While serialization is generally supported for lambdas, local
functions, and static methods (and closures over these constructs),
complex functions may fail to serialize.
3. `Tensor` objects (and functions referencing `Tensor` objects) can only
be serialized when the tensor value is statically known. (Such Tensors
are serialized as numpy arrays.)
Instead of relying on `DistributionLambda.get_config`, consider subclassing
`DistributionLambda` and directly implementing Keras serialization via
`get_config` / `from_config`.
NOTE: At the moment, `DistributionLambda` can only be serialized if the
`convert_to_tensor_fn` is a serializable Keras object (i.e., implements
`get_config`) or one of the standard values:
- `Distribution.sample` (or `"sample"`)
- `Distribution.mean` (or `"mean"`)
- `Distribution.mode` (or `"mode"`)
- `Distribution.stddev` (or `"stddev"`)
- `Distribution.variance` (or `"variance"`)
|
tensorflow_probability/python/layers/distribution_layer.py
|
def get_config(self):
"""Returns the config of this layer.
This Layer's `make_distribution_fn` is serialized via a library built on
Python pickle. This serialization of Python functions is provided for
convenience, but:
1. The use of this format for long-term storage of models is discouraged.
In particular, it may not be possible to deserialize in a different
version of Python.
2. While serialization is generally supported for lambdas, local
functions, and static methods (and closures over these constructs),
complex functions may fail to serialize.
3. `Tensor` objects (and functions referencing `Tensor` objects) can only
be serialized when the tensor value is statically known. (Such Tensors
are serialized as numpy arrays.)
Instead of relying on `DistributionLambda.get_config`, consider subclassing
`DistributionLambda` and directly implementing Keras serialization via
`get_config` / `from_config`.
NOTE: At the moment, `DistributionLambda` can only be serialized if the
`convert_to_tensor_fn` is a serializable Keras object (i.e., implements
`get_config`) or one of the standard values:
- `Distribution.sample` (or `"sample"`)
- `Distribution.mean` (or `"mean"`)
- `Distribution.mode` (or `"mode"`)
- `Distribution.stddev` (or `"stddev"`)
- `Distribution.variance` (or `"variance"`)
"""
config = {
'make_distribution_fn': _serialize_function(self._make_distribution_fn),
'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn),
}
base_config = super(DistributionLambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
def get_config(self):
"""Returns the config of this layer.
This Layer's `make_distribution_fn` is serialized via a library built on
Python pickle. This serialization of Python functions is provided for
convenience, but:
1. The use of this format for long-term storage of models is discouraged.
In particular, it may not be possible to deserialize in a different
version of Python.
2. While serialization is generally supported for lambdas, local
functions, and static methods (and closures over these constructs),
complex functions may fail to serialize.
3. `Tensor` objects (and functions referencing `Tensor` objects) can only
be serialized when the tensor value is statically known. (Such Tensors
are serialized as numpy arrays.)
Instead of relying on `DistributionLambda.get_config`, consider subclassing
`DistributionLambda` and directly implementing Keras serialization via
`get_config` / `from_config`.
NOTE: At the moment, `DistributionLambda` can only be serialized if the
`convert_to_tensor_fn` is a serializable Keras object (i.e., implements
`get_config`) or one of the standard values:
- `Distribution.sample` (or `"sample"`)
- `Distribution.mean` (or `"mean"`)
- `Distribution.mode` (or `"mode"`)
- `Distribution.stddev` (or `"stddev"`)
- `Distribution.variance` (or `"variance"`)
"""
config = {
'make_distribution_fn': _serialize_function(self._make_distribution_fn),
'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn),
}
base_config = super(DistributionLambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"Returns",
"the",
"config",
"of",
"this",
"layer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L221-L258
|
[
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"{",
"'make_distribution_fn'",
":",
"_serialize_function",
"(",
"self",
".",
"_make_distribution_fn",
")",
",",
"'convert_to_tensor_fn'",
":",
"_serialize",
"(",
"self",
".",
"_convert_to_tensor_fn",
")",
",",
"}",
"base_config",
"=",
"super",
"(",
"DistributionLambda",
",",
"self",
")",
".",
"get_config",
"(",
")",
"return",
"dict",
"(",
"list",
"(",
"base_config",
".",
"items",
"(",
")",
")",
"+",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MultivariateNormalTriL.new
|
Create the distribution instance from a `params` vector.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def new(params, event_size, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL',
[params, event_size]):
params = tf.convert_to_tensor(value=params, name='params')
scale_tril = tfb.ScaleTriL(
diag_shift=np.array(1e-5, params.dtype.as_numpy_dtype()),
validate_args=validate_args)
return tfd.MultivariateNormalTriL(
loc=params[..., :event_size],
scale_tril=scale_tril(params[..., event_size:]),
validate_args=validate_args)
|
def new(params, event_size, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL',
[params, event_size]):
params = tf.convert_to_tensor(value=params, name='params')
scale_tril = tfb.ScaleTriL(
diag_shift=np.array(1e-5, params.dtype.as_numpy_dtype()),
validate_args=validate_args)
return tfd.MultivariateNormalTriL(
loc=params[..., :event_size],
scale_tril=scale_tril(params[..., event_size:]),
validate_args=validate_args)
|
[
"Create",
"the",
"distribution",
"instance",
"from",
"a",
"params",
"vector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L349-L360
|
[
"def",
"new",
"(",
"params",
",",
"event_size",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'MultivariateNormalTriL'",
",",
"[",
"params",
",",
"event_size",
"]",
")",
":",
"params",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"params",
",",
"name",
"=",
"'params'",
")",
"scale_tril",
"=",
"tfb",
".",
"ScaleTriL",
"(",
"diag_shift",
"=",
"np",
".",
"array",
"(",
"1e-5",
",",
"params",
".",
"dtype",
".",
"as_numpy_dtype",
"(",
")",
")",
",",
"validate_args",
"=",
"validate_args",
")",
"return",
"tfd",
".",
"MultivariateNormalTriL",
"(",
"loc",
"=",
"params",
"[",
"...",
",",
":",
"event_size",
"]",
",",
"scale_tril",
"=",
"scale_tril",
"(",
"params",
"[",
"...",
",",
"event_size",
":",
"]",
")",
",",
"validate_args",
"=",
"validate_args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MultivariateNormalTriL.params_size
|
The number of `params` needed to create a single distribution.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def params_size(event_size, name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL_params_size',
[event_size]):
return event_size + event_size * (event_size + 1) // 2
|
def params_size(event_size, name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL_params_size',
[event_size]):
return event_size + event_size * (event_size + 1) // 2
|
[
"The",
"number",
"of",
"params",
"needed",
"to",
"create",
"a",
"single",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L363-L367
|
[
"def",
"params_size",
"(",
"event_size",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'MultivariateNormalTriL_params_size'",
",",
"[",
"event_size",
"]",
")",
":",
"return",
"event_size",
"+",
"event_size",
"*",
"(",
"event_size",
"+",
"1",
")",
"//",
"2"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
OneHotCategorical.new
|
Create the distribution instance from a `params` vector.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def new(params, event_size, dtype=None, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'OneHotCategorical',
[params, event_size]):
return tfd.OneHotCategorical(
logits=params,
dtype=dtype or params.dtype.base_dtype,
validate_args=validate_args)
|
def new(params, event_size, dtype=None, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'OneHotCategorical',
[params, event_size]):
return tfd.OneHotCategorical(
logits=params,
dtype=dtype or params.dtype.base_dtype,
validate_args=validate_args)
|
[
"Create",
"the",
"distribution",
"instance",
"from",
"a",
"params",
"vector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L456-L463
|
[
"def",
"new",
"(",
"params",
",",
"event_size",
",",
"dtype",
"=",
"None",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'OneHotCategorical'",
",",
"[",
"params",
",",
"event_size",
"]",
")",
":",
"return",
"tfd",
".",
"OneHotCategorical",
"(",
"logits",
"=",
"params",
",",
"dtype",
"=",
"dtype",
"or",
"params",
".",
"dtype",
".",
"base_dtype",
",",
"validate_args",
"=",
"validate_args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
CategoricalMixtureOfOneHotCategorical.new
|
Create the distribution instance from a `params` vector.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def new(params, event_size, num_components,
dtype=None, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical',
[params, event_size, num_components]):
dist = MixtureSameFamily.new(
params,
num_components,
OneHotCategorical(
event_size,
validate_args=False, # So we can eval on simplex interior.
name=name),
validate_args=validate_args,
name=name)
# pylint: disable=protected-access
dist._mean = functools.partial(
_eval_all_one_hot, tfd.Distribution.prob, dist)
dist.log_mean = functools.partial(
_eval_all_one_hot, tfd.Distribution.log_prob, dist)
# pylint: enable=protected-access
return dist
|
def new(params, event_size, num_components,
dtype=None, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical',
[params, event_size, num_components]):
dist = MixtureSameFamily.new(
params,
num_components,
OneHotCategorical(
event_size,
validate_args=False, # So we can eval on simplex interior.
name=name),
validate_args=validate_args,
name=name)
# pylint: disable=protected-access
dist._mean = functools.partial(
_eval_all_one_hot, tfd.Distribution.prob, dist)
dist.log_mean = functools.partial(
_eval_all_one_hot, tfd.Distribution.log_prob, dist)
# pylint: enable=protected-access
return dist
|
[
"Create",
"the",
"distribution",
"instance",
"from",
"a",
"params",
"vector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L563-L583
|
[
"def",
"new",
"(",
"params",
",",
"event_size",
",",
"num_components",
",",
"dtype",
"=",
"None",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'CategoricalMixtureOfOneHotCategorical'",
",",
"[",
"params",
",",
"event_size",
",",
"num_components",
"]",
")",
":",
"dist",
"=",
"MixtureSameFamily",
".",
"new",
"(",
"params",
",",
"num_components",
",",
"OneHotCategorical",
"(",
"event_size",
",",
"validate_args",
"=",
"False",
",",
"# So we can eval on simplex interior.",
"name",
"=",
"name",
")",
",",
"validate_args",
"=",
"validate_args",
",",
"name",
"=",
"name",
")",
"# pylint: disable=protected-access",
"dist",
".",
"_mean",
"=",
"functools",
".",
"partial",
"(",
"_eval_all_one_hot",
",",
"tfd",
".",
"Distribution",
".",
"prob",
",",
"dist",
")",
"dist",
".",
"log_mean",
"=",
"functools",
".",
"partial",
"(",
"_eval_all_one_hot",
",",
"tfd",
".",
"Distribution",
".",
"log_prob",
",",
"dist",
")",
"# pylint: enable=protected-access",
"return",
"dist"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
CategoricalMixtureOfOneHotCategorical.params_size
|
The number of `params` needed to create a single distribution.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def params_size(event_size, num_components, name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(
name, 'CategoricalMixtureOfOneHotCategorical_params_size',
[event_size, num_components]):
return MixtureSameFamily.params_size(
num_components,
OneHotCategorical.params_size(event_size, name=name),
name=name)
|
def params_size(event_size, num_components, name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(
name, 'CategoricalMixtureOfOneHotCategorical_params_size',
[event_size, num_components]):
return MixtureSameFamily.params_size(
num_components,
OneHotCategorical.params_size(event_size, name=name),
name=name)
|
[
"The",
"number",
"of",
"params",
"needed",
"to",
"create",
"a",
"single",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L586-L594
|
[
"def",
"params_size",
"(",
"event_size",
",",
"num_components",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'CategoricalMixtureOfOneHotCategorical_params_size'",
",",
"[",
"event_size",
",",
"num_components",
"]",
")",
":",
"return",
"MixtureSameFamily",
".",
"params_size",
"(",
"num_components",
",",
"OneHotCategorical",
".",
"params_size",
"(",
"event_size",
",",
"name",
"=",
"name",
")",
",",
"name",
"=",
"name",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
IndependentBernoulli.new
|
Create the distribution instance from a `params` vector.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def new(params, event_shape=(), dtype=None, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentBernoulli',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
new_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
], axis=0)
dist = tfd.Independent(
tfd.Bernoulli(
logits=tf.reshape(params, new_shape),
dtype=dtype or params.dtype.base_dtype,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
dist._logits = dist.distribution._logits # pylint: disable=protected-access
dist._probs = dist.distribution._probs # pylint: disable=protected-access
dist.logits = tfd.Bernoulli.logits
dist.probs = tfd.Bernoulli.probs
return dist
|
def new(params, event_shape=(), dtype=None, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentBernoulli',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
new_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
], axis=0)
dist = tfd.Independent(
tfd.Bernoulli(
logits=tf.reshape(params, new_shape),
dtype=dtype or params.dtype.base_dtype,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
dist._logits = dist.distribution._logits # pylint: disable=protected-access
dist._probs = dist.distribution._probs # pylint: disable=protected-access
dist.logits = tfd.Bernoulli.logits
dist.probs = tfd.Bernoulli.probs
return dist
|
[
"Create",
"the",
"distribution",
"instance",
"from",
"a",
"params",
"vector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L696-L720
|
[
"def",
"new",
"(",
"params",
",",
"event_shape",
"=",
"(",
")",
",",
"dtype",
"=",
"None",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'IndependentBernoulli'",
",",
"[",
"params",
",",
"event_shape",
"]",
")",
":",
"params",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"params",
",",
"name",
"=",
"'params'",
")",
"event_shape",
"=",
"dist_util",
".",
"expand_to_vector",
"(",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"event_shape",
",",
"name",
"=",
"'event_shape'",
",",
"dtype_hint",
"=",
"tf",
".",
"int32",
")",
",",
"tensor_name",
"=",
"'event_shape'",
")",
"new_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"params",
")",
"[",
":",
"-",
"1",
"]",
",",
"event_shape",
",",
"]",
",",
"axis",
"=",
"0",
")",
"dist",
"=",
"tfd",
".",
"Independent",
"(",
"tfd",
".",
"Bernoulli",
"(",
"logits",
"=",
"tf",
".",
"reshape",
"(",
"params",
",",
"new_shape",
")",
",",
"dtype",
"=",
"dtype",
"or",
"params",
".",
"dtype",
".",
"base_dtype",
",",
"validate_args",
"=",
"validate_args",
")",
",",
"reinterpreted_batch_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"event_shape",
")",
",",
"validate_args",
"=",
"validate_args",
")",
"dist",
".",
"_logits",
"=",
"dist",
".",
"distribution",
".",
"_logits",
"# pylint: disable=protected-access",
"dist",
".",
"_probs",
"=",
"dist",
".",
"distribution",
".",
"_probs",
"# pylint: disable=protected-access",
"dist",
".",
"logits",
"=",
"tfd",
".",
"Bernoulli",
".",
"logits",
"dist",
".",
"probs",
"=",
"tfd",
".",
"Bernoulli",
".",
"probs",
"return",
"dist"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
IndependentBernoulli.get_config
|
Returns the config of this layer.
NOTE: At the moment, this configuration can only be serialized if the
Layer's `convert_to_tensor_fn` is a serializable Keras object (i.e.,
implements `get_config`) or one of the standard values:
- `Distribution.sample` (or `"sample"`)
- `Distribution.mean` (or `"mean"`)
- `Distribution.mode` (or `"mode"`)
- `Distribution.stddev` (or `"stddev"`)
- `Distribution.variance` (or `"variance"`)
|
tensorflow_probability/python/layers/distribution_layer.py
|
def get_config(self):
"""Returns the config of this layer.
NOTE: At the moment, this configuration can only be serialized if the
Layer's `convert_to_tensor_fn` is a serializable Keras object (i.e.,
implements `get_config`) or one of the standard values:
- `Distribution.sample` (or `"sample"`)
- `Distribution.mean` (or `"mean"`)
- `Distribution.mode` (or `"mode"`)
- `Distribution.stddev` (or `"stddev"`)
- `Distribution.variance` (or `"variance"`)
"""
config = {
'event_shape': self._event_shape,
'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn),
'sample_dtype': self._sample_dtype,
'validate_args': self._validate_args
}
base_config = super(IndependentBernoulli, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
def get_config(self):
"""Returns the config of this layer.
NOTE: At the moment, this configuration can only be serialized if the
Layer's `convert_to_tensor_fn` is a serializable Keras object (i.e.,
implements `get_config`) or one of the standard values:
- `Distribution.sample` (or `"sample"`)
- `Distribution.mean` (or `"mean"`)
- `Distribution.mode` (or `"mode"`)
- `Distribution.stddev` (or `"stddev"`)
- `Distribution.variance` (or `"variance"`)
"""
config = {
'event_shape': self._event_shape,
'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn),
'sample_dtype': self._sample_dtype,
'validate_args': self._validate_args
}
base_config = super(IndependentBernoulli, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"Returns",
"the",
"config",
"of",
"this",
"layer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L732-L751
|
[
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"{",
"'event_shape'",
":",
"self",
".",
"_event_shape",
",",
"'convert_to_tensor_fn'",
":",
"_serialize",
"(",
"self",
".",
"_convert_to_tensor_fn",
")",
",",
"'sample_dtype'",
":",
"self",
".",
"_sample_dtype",
",",
"'validate_args'",
":",
"self",
".",
"_validate_args",
"}",
"base_config",
"=",
"super",
"(",
"IndependentBernoulli",
",",
"self",
")",
".",
"get_config",
"(",
")",
"return",
"dict",
"(",
"list",
"(",
"base_config",
".",
"items",
"(",
")",
")",
"+",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
IndependentLogistic.new
|
Create the distribution instance from a `params` vector.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentLogistic',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
loc_params, scale_params = tf.split(params, 2, axis=-1)
return tfd.Independent(
tfd.Logistic(
loc=tf.reshape(loc_params, output_shape),
scale=tf.math.softplus(tf.reshape(scale_params, output_shape)),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
|
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentLogistic',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
loc_params, scale_params = tf.split(params, 2, axis=-1)
return tfd.Independent(
tfd.Logistic(
loc=tf.reshape(loc_params, output_shape),
scale=tf.math.softplus(tf.reshape(scale_params, output_shape)),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
|
[
"Create",
"the",
"distribution",
"instance",
"from",
"a",
"params",
"vector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L834-L855
|
[
"def",
"new",
"(",
"params",
",",
"event_shape",
"=",
"(",
")",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'IndependentLogistic'",
",",
"[",
"params",
",",
"event_shape",
"]",
")",
":",
"params",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"params",
",",
"name",
"=",
"'params'",
")",
"event_shape",
"=",
"dist_util",
".",
"expand_to_vector",
"(",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"event_shape",
",",
"name",
"=",
"'event_shape'",
",",
"dtype_hint",
"=",
"tf",
".",
"int32",
")",
",",
"tensor_name",
"=",
"'event_shape'",
")",
"output_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"params",
")",
"[",
":",
"-",
"1",
"]",
",",
"event_shape",
",",
"]",
",",
"axis",
"=",
"0",
")",
"loc_params",
",",
"scale_params",
"=",
"tf",
".",
"split",
"(",
"params",
",",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"tfd",
".",
"Independent",
"(",
"tfd",
".",
"Logistic",
"(",
"loc",
"=",
"tf",
".",
"reshape",
"(",
"loc_params",
",",
"output_shape",
")",
",",
"scale",
"=",
"tf",
".",
"math",
".",
"softplus",
"(",
"tf",
".",
"reshape",
"(",
"scale_params",
",",
"output_shape",
")",
")",
",",
"validate_args",
"=",
"validate_args",
")",
",",
"reinterpreted_batch_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"event_shape",
")",
",",
"validate_args",
"=",
"validate_args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
IndependentNormal.params_size
|
The number of `params` needed to create a single distribution.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'IndependentNormal_params_size',
[event_shape]):
event_shape = tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32)
return 2 * _event_size(
event_shape, name=name or 'IndependentNormal_params_size')
|
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'IndependentNormal_params_size',
[event_shape]):
event_shape = tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32)
return 2 * _event_size(
event_shape, name=name or 'IndependentNormal_params_size')
|
[
"The",
"number",
"of",
"params",
"needed",
"to",
"create",
"a",
"single",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L975-L982
|
[
"def",
"params_size",
"(",
"event_shape",
"=",
"(",
")",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'IndependentNormal_params_size'",
",",
"[",
"event_shape",
"]",
")",
":",
"event_shape",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"event_shape",
",",
"name",
"=",
"'event_shape'",
",",
"dtype_hint",
"=",
"tf",
".",
"int32",
")",
"return",
"2",
"*",
"_event_size",
"(",
"event_shape",
",",
"name",
"=",
"name",
"or",
"'IndependentNormal_params_size'",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
IndependentPoisson.new
|
Create the distribution instance from a `params` vector.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentPoisson',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
return tfd.Independent(
tfd.Poisson(
log_rate=tf.reshape(params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
|
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentPoisson',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
return tfd.Independent(
tfd.Poisson(
log_rate=tf.reshape(params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
|
[
"Create",
"the",
"distribution",
"instance",
"from",
"a",
"params",
"vector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1084-L1103
|
[
"def",
"new",
"(",
"params",
",",
"event_shape",
"=",
"(",
")",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'IndependentPoisson'",
",",
"[",
"params",
",",
"event_shape",
"]",
")",
":",
"params",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"params",
",",
"name",
"=",
"'params'",
")",
"event_shape",
"=",
"dist_util",
".",
"expand_to_vector",
"(",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"event_shape",
",",
"name",
"=",
"'event_shape'",
",",
"dtype_hint",
"=",
"tf",
".",
"int32",
")",
",",
"tensor_name",
"=",
"'event_shape'",
")",
"output_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"params",
")",
"[",
":",
"-",
"1",
"]",
",",
"event_shape",
",",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"tfd",
".",
"Independent",
"(",
"tfd",
".",
"Poisson",
"(",
"log_rate",
"=",
"tf",
".",
"reshape",
"(",
"params",
",",
"output_shape",
")",
",",
"validate_args",
"=",
"validate_args",
")",
",",
"reinterpreted_batch_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"event_shape",
")",
",",
"validate_args",
"=",
"validate_args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MixtureSameFamily.new
|
Create the distribution instance from a `params` vector.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def new(params, num_components, component_layer,
validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'MixtureSameFamily',
[params, num_components, component_layer]):
params = tf.convert_to_tensor(value=params, name='params')
num_components = tf.convert_to_tensor(
value=num_components, name='num_components', dtype_hint=tf.int32)
components_dist = component_layer(
tf.reshape(
params[..., num_components:],
tf.concat([tf.shape(input=params)[:-1], [num_components, -1]],
axis=0)))
mixture_dist = tfd.Categorical(logits=params[..., :num_components])
return tfd.MixtureSameFamily(
mixture_dist,
components_dist,
# TODO(b/120154797): Change following to `validate_args=True` after
# fixing: "ValueError: `mixture_distribution` must have scalar
# `event_dim`s." assertion in MixtureSameFamily.
validate_args=False)
|
def new(params, num_components, component_layer,
validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'MixtureSameFamily',
[params, num_components, component_layer]):
params = tf.convert_to_tensor(value=params, name='params')
num_components = tf.convert_to_tensor(
value=num_components, name='num_components', dtype_hint=tf.int32)
components_dist = component_layer(
tf.reshape(
params[..., num_components:],
tf.concat([tf.shape(input=params)[:-1], [num_components, -1]],
axis=0)))
mixture_dist = tfd.Categorical(logits=params[..., :num_components])
return tfd.MixtureSameFamily(
mixture_dist,
components_dist,
# TODO(b/120154797): Change following to `validate_args=True` after
# fixing: "ValueError: `mixture_distribution` must have scalar
# `event_dim`s." assertion in MixtureSameFamily.
validate_args=False)
|
[
"Create",
"the",
"distribution",
"instance",
"from",
"a",
"params",
"vector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1427-L1448
|
[
"def",
"new",
"(",
"params",
",",
"num_components",
",",
"component_layer",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'MixtureSameFamily'",
",",
"[",
"params",
",",
"num_components",
",",
"component_layer",
"]",
")",
":",
"params",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"params",
",",
"name",
"=",
"'params'",
")",
"num_components",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_components",
",",
"name",
"=",
"'num_components'",
",",
"dtype_hint",
"=",
"tf",
".",
"int32",
")",
"components_dist",
"=",
"component_layer",
"(",
"tf",
".",
"reshape",
"(",
"params",
"[",
"...",
",",
"num_components",
":",
"]",
",",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"params",
")",
"[",
":",
"-",
"1",
"]",
",",
"[",
"num_components",
",",
"-",
"1",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
")",
"mixture_dist",
"=",
"tfd",
".",
"Categorical",
"(",
"logits",
"=",
"params",
"[",
"...",
",",
":",
"num_components",
"]",
")",
"return",
"tfd",
".",
"MixtureSameFamily",
"(",
"mixture_dist",
",",
"components_dist",
",",
"# TODO(b/120154797): Change following to `validate_args=True` after",
"# fixing: \"ValueError: `mixture_distribution` must have scalar",
"# `event_dim`s.\" assertion in MixtureSameFamily.",
"validate_args",
"=",
"False",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MixtureSameFamily.params_size
|
Number of `params` needed to create a `MixtureSameFamily` distribution.
Arguments:
num_components: Number of component distributions in the mixture
distribution.
component_params_size: Number of parameters needed to create a single
component distribution.
name: The name to use for the op to compute the number of parameters
(if such an op needs to be created).
Returns:
params_size: The number of parameters needed to create the mixture
distribution.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def params_size(num_components, component_params_size, name=None):
"""Number of `params` needed to create a `MixtureSameFamily` distribution.
Arguments:
num_components: Number of component distributions in the mixture
distribution.
component_params_size: Number of parameters needed to create a single
component distribution.
name: The name to use for the op to compute the number of parameters
(if such an op needs to be created).
Returns:
params_size: The number of parameters needed to create the mixture
distribution.
"""
with tf.compat.v1.name_scope(name, 'MixtureSameFamily_params_size',
[num_components, component_params_size]):
num_components = tf.convert_to_tensor(
value=num_components, name='num_components', dtype_hint=tf.int32)
component_params_size = tf.convert_to_tensor(
value=component_params_size, name='component_params_size')
num_components = dist_util.prefer_static_value(num_components)
component_params_size = dist_util.prefer_static_value(
component_params_size)
return num_components + num_components * component_params_size
|
def params_size(num_components, component_params_size, name=None):
"""Number of `params` needed to create a `MixtureSameFamily` distribution.
Arguments:
num_components: Number of component distributions in the mixture
distribution.
component_params_size: Number of parameters needed to create a single
component distribution.
name: The name to use for the op to compute the number of parameters
(if such an op needs to be created).
Returns:
params_size: The number of parameters needed to create the mixture
distribution.
"""
with tf.compat.v1.name_scope(name, 'MixtureSameFamily_params_size',
[num_components, component_params_size]):
num_components = tf.convert_to_tensor(
value=num_components, name='num_components', dtype_hint=tf.int32)
component_params_size = tf.convert_to_tensor(
value=component_params_size, name='component_params_size')
num_components = dist_util.prefer_static_value(num_components)
component_params_size = dist_util.prefer_static_value(
component_params_size)
return num_components + num_components * component_params_size
|
[
"Number",
"of",
"params",
"needed",
"to",
"create",
"a",
"MixtureSameFamily",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1451-L1477
|
[
"def",
"params_size",
"(",
"num_components",
",",
"component_params_size",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'MixtureSameFamily_params_size'",
",",
"[",
"num_components",
",",
"component_params_size",
"]",
")",
":",
"num_components",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_components",
",",
"name",
"=",
"'num_components'",
",",
"dtype_hint",
"=",
"tf",
".",
"int32",
")",
"component_params_size",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"component_params_size",
",",
"name",
"=",
"'component_params_size'",
")",
"num_components",
"=",
"dist_util",
".",
"prefer_static_value",
"(",
"num_components",
")",
"component_params_size",
"=",
"dist_util",
".",
"prefer_static_value",
"(",
"component_params_size",
")",
"return",
"num_components",
"+",
"num_components",
"*",
"component_params_size"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MixtureNormal.params_size
|
The number of `params` needed to create a single distribution.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def params_size(num_components, event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
return MixtureSameFamily.params_size(
num_components,
IndependentNormal.params_size(event_shape, name=name),
name=name)
|
def params_size(num_components, event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
return MixtureSameFamily.params_size(
num_components,
IndependentNormal.params_size(event_shape, name=name),
name=name)
|
[
"The",
"number",
"of",
"params",
"needed",
"to",
"create",
"a",
"single",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1574-L1579
|
[
"def",
"params_size",
"(",
"num_components",
",",
"event_shape",
"=",
"(",
")",
",",
"name",
"=",
"None",
")",
":",
"return",
"MixtureSameFamily",
".",
"params_size",
"(",
"num_components",
",",
"IndependentNormal",
".",
"params_size",
"(",
"event_shape",
",",
"name",
"=",
"name",
")",
",",
"name",
"=",
"name",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MixtureLogistic.new
|
Create the distribution instance from a `params` vector.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def new(params, num_components, event_shape=(),
validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
return MixtureSameFamily.new(
params,
num_components,
IndependentLogistic(
event_shape, validate_args=validate_args, name=name),
validate_args=validate_args,
name=name)
|
def new(params, num_components, event_shape=(),
validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
return MixtureSameFamily.new(
params,
num_components,
IndependentLogistic(
event_shape, validate_args=validate_args, name=name),
validate_args=validate_args,
name=name)
|
[
"Create",
"the",
"distribution",
"instance",
"from",
"a",
"params",
"vector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1686-L1695
|
[
"def",
"new",
"(",
"params",
",",
"num_components",
",",
"event_shape",
"=",
"(",
")",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"return",
"MixtureSameFamily",
".",
"new",
"(",
"params",
",",
"num_components",
",",
"IndependentLogistic",
"(",
"event_shape",
",",
"validate_args",
"=",
"validate_args",
",",
"name",
"=",
"name",
")",
",",
"validate_args",
"=",
"validate_args",
",",
"name",
"=",
"name",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
MixtureLogistic.params_size
|
The number of `params` needed to create a single distribution.
|
tensorflow_probability/python/layers/distribution_layer.py
|
def params_size(num_components, event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
return MixtureSameFamily.params_size(
num_components,
IndependentLogistic.params_size(event_shape, name=name),
name=name)
|
def params_size(num_components, event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
return MixtureSameFamily.params_size(
num_components,
IndependentLogistic.params_size(event_shape, name=name),
name=name)
|
[
"The",
"number",
"of",
"params",
"needed",
"to",
"create",
"a",
"single",
"distribution",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1698-L1703
|
[
"def",
"params_size",
"(",
"num_components",
",",
"event_shape",
"=",
"(",
")",
",",
"name",
"=",
"None",
")",
":",
"return",
"MixtureSameFamily",
".",
"params_size",
"(",
"num_components",
",",
"IndependentLogistic",
".",
"params_size",
"(",
"event_shape",
",",
"name",
"=",
"name",
")",
",",
"name",
"=",
"name",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
get_next_interceptor
|
Yields the top-most interceptor on the thread-local interceptor stack.
Operations may be intercepted by multiple nested interceptors. Once reached,
an operation can be forwarded through nested interceptors until resolved.
To allow for nesting, implement interceptors by re-wrapping their first
argument (`f`) as an `interceptable`. To avoid nesting, manipulate the
computation without using `interceptable`.
This function allows for nesting by manipulating the thread-local interceptor
stack, so that operations are intercepted in the order of interceptor nesting.
#### Examples
```python
from tensorflow_probability import edward2 as ed
def model():
x = ed.Normal(loc=0., scale=1., name="x")
y = ed.Normal(loc=x, scale=1., name="y")
return x + y
def double(f, *args, **kwargs):
return 2. * interceptable(f)(*args, **kwargs)
def set_y(f, *args, **kwargs):
if kwargs.get("name") == "y":
kwargs["value"] = 0.42
return interceptable(f)(*args, **kwargs)
with interception(double):
with interception(set_y):
z = model()
```
This will firstly put `double` on the stack, and then `set_y`,
resulting in the stack:
(TOP) set_y -> double -> apply (BOTTOM)
The execution of `model` is then (top lines are current stack state):
1) (TOP) set_y -> double -> apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `set_y`, and as the name is not "y"
the operation is simply forwarded to the next interceptor on the stack.
2) (TOP) double -> apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `double`, to produce
`2*ed.Normal(0., 1., "x")`, with the operation being forwarded down the stack.
3) (TOP) apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `apply`, which simply calls the
constructor.
(At this point, the nested calls to `get_next_interceptor()`, produced by
forwarding operations, exit, and the current stack is again:
(TOP) set_y -> double -> apply (BOTTOM))
4) (TOP) set_y -> double -> apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `set_y`,
the value of `y` is set to 0.42 and the operation is forwarded down the stack.
5) (TOP) double -> apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `double`, to produce
`2*ed.Normal(0., 1., "y")`, with the operation being forwarded down the stack.
6) (TOP) apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `apply`, which simply calls the
constructor.
The final values for `x` and `y` inside of `model()` are tensors where `x` is
a random draw from Normal(0., 1.) doubled, and `y` is a constant 0.84, thus
z = 2 * Normal(0., 1.) + 0.84.
|
tensorflow_probability/python/edward2/interceptor.py
|
def get_next_interceptor():
"""Yields the top-most interceptor on the thread-local interceptor stack.
Operations may be intercepted by multiple nested interceptors. Once reached,
an operation can be forwarded through nested interceptors until resolved.
To allow for nesting, implement interceptors by re-wrapping their first
argument (`f`) as an `interceptable`. To avoid nesting, manipulate the
computation without using `interceptable`.
This function allows for nesting by manipulating the thread-local interceptor
stack, so that operations are intercepted in the order of interceptor nesting.
#### Examples
```python
from tensorflow_probability import edward2 as ed
def model():
x = ed.Normal(loc=0., scale=1., name="x")
y = ed.Normal(loc=x, scale=1., name="y")
return x + y
def double(f, *args, **kwargs):
return 2. * interceptable(f)(*args, **kwargs)
def set_y(f, *args, **kwargs):
if kwargs.get("name") == "y":
kwargs["value"] = 0.42
return interceptable(f)(*args, **kwargs)
with interception(double):
with interception(set_y):
z = model()
```
This will firstly put `double` on the stack, and then `set_y`,
resulting in the stack:
(TOP) set_y -> double -> apply (BOTTOM)
The execution of `model` is then (top lines are current stack state):
1) (TOP) set_y -> double -> apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `set_y`, and as the name is not "y"
the operation is simply forwarded to the next interceptor on the stack.
2) (TOP) double -> apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `double`, to produce
`2*ed.Normal(0., 1., "x")`, with the operation being forwarded down the stack.
3) (TOP) apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `apply`, which simply calls the
constructor.
(At this point, the nested calls to `get_next_interceptor()`, produced by
forwarding operations, exit, and the current stack is again:
(TOP) set_y -> double -> apply (BOTTOM))
4) (TOP) set_y -> double -> apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `set_y`,
the value of `y` is set to 0.42 and the operation is forwarded down the stack.
5) (TOP) double -> apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `double`, to produce
`2*ed.Normal(0., 1., "y")`, with the operation being forwarded down the stack.
6) (TOP) apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `apply`, which simply calls the
constructor.
The final values for `x` and `y` inside of `model()` are tensors where `x` is
a random draw from Normal(0., 1.) doubled, and `y` is a constant 0.84, thus
z = 2 * Normal(0., 1.) + 0.84.
"""
try:
interceptor = _interceptor_stack.stack.pop()
yield interceptor
finally:
_interceptor_stack.stack.append(interceptor)
|
def get_next_interceptor():
"""Yields the top-most interceptor on the thread-local interceptor stack.
Operations may be intercepted by multiple nested interceptors. Once reached,
an operation can be forwarded through nested interceptors until resolved.
To allow for nesting, implement interceptors by re-wrapping their first
argument (`f`) as an `interceptable`. To avoid nesting, manipulate the
computation without using `interceptable`.
This function allows for nesting by manipulating the thread-local interceptor
stack, so that operations are intercepted in the order of interceptor nesting.
#### Examples
```python
from tensorflow_probability import edward2 as ed
def model():
x = ed.Normal(loc=0., scale=1., name="x")
y = ed.Normal(loc=x, scale=1., name="y")
return x + y
def double(f, *args, **kwargs):
return 2. * interceptable(f)(*args, **kwargs)
def set_y(f, *args, **kwargs):
if kwargs.get("name") == "y":
kwargs["value"] = 0.42
return interceptable(f)(*args, **kwargs)
with interception(double):
with interception(set_y):
z = model()
```
This will firstly put `double` on the stack, and then `set_y`,
resulting in the stack:
(TOP) set_y -> double -> apply (BOTTOM)
The execution of `model` is then (top lines are current stack state):
1) (TOP) set_y -> double -> apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `set_y`, and as the name is not "y"
the operation is simply forwarded to the next interceptor on the stack.
2) (TOP) double -> apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `double`, to produce
`2*ed.Normal(0., 1., "x")`, with the operation being forwarded down the stack.
3) (TOP) apply (BOTTOM);
`ed.Normal(0., 1., "x")` is intercepted by `apply`, which simply calls the
constructor.
(At this point, the nested calls to `get_next_interceptor()`, produced by
forwarding operations, exit, and the current stack is again:
(TOP) set_y -> double -> apply (BOTTOM))
4) (TOP) set_y -> double -> apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `set_y`,
the value of `y` is set to 0.42 and the operation is forwarded down the stack.
5) (TOP) double -> apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `double`, to produce
`2*ed.Normal(0., 1., "y")`, with the operation being forwarded down the stack.
6) (TOP) apply (BOTTOM);
`ed.Normal(0., 1., "y")` is intercepted by `apply`, which simply calls the
constructor.
The final values for `x` and `y` inside of `model()` are tensors where `x` is
a random draw from Normal(0., 1.) doubled, and `y` is a constant 0.84, thus
z = 2 * Normal(0., 1.) + 0.84.
"""
try:
interceptor = _interceptor_stack.stack.pop()
yield interceptor
finally:
_interceptor_stack.stack.append(interceptor)
|
[
"Yields",
"the",
"top",
"-",
"most",
"interceptor",
"on",
"the",
"thread",
"-",
"local",
"interceptor",
"stack",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/interceptor.py#L96-L172
|
[
"def",
"get_next_interceptor",
"(",
")",
":",
"try",
":",
"interceptor",
"=",
"_interceptor_stack",
".",
"stack",
".",
"pop",
"(",
")",
"yield",
"interceptor",
"finally",
":",
"_interceptor_stack",
".",
"stack",
".",
"append",
"(",
"interceptor",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
interceptable
|
Decorator that wraps `func` so that its execution is intercepted.
The wrapper passes `func` to the interceptor for the current thread.
If there is no next interceptor, we perform an "immediate" call to `func`.
That is, `func` terminates without forwarding its execution to another
interceptor.
Args:
func: Function to wrap.
Returns:
The decorated function.
|
tensorflow_probability/python/edward2/interceptor.py
|
def interceptable(func):
"""Decorator that wraps `func` so that its execution is intercepted.
The wrapper passes `func` to the interceptor for the current thread.
If there is no next interceptor, we perform an "immediate" call to `func`.
That is, `func` terminates without forwarding its execution to another
interceptor.
Args:
func: Function to wrap.
Returns:
The decorated function.
"""
@functools.wraps(func)
def func_wrapped(*args, **kwargs):
with get_next_interceptor() as interceptor:
return interceptor(func, *args, **kwargs)
return func_wrapped
|
def interceptable(func):
"""Decorator that wraps `func` so that its execution is intercepted.
The wrapper passes `func` to the interceptor for the current thread.
If there is no next interceptor, we perform an "immediate" call to `func`.
That is, `func` terminates without forwarding its execution to another
interceptor.
Args:
func: Function to wrap.
Returns:
The decorated function.
"""
@functools.wraps(func)
def func_wrapped(*args, **kwargs):
with get_next_interceptor() as interceptor:
return interceptor(func, *args, **kwargs)
return func_wrapped
|
[
"Decorator",
"that",
"wraps",
"func",
"so",
"that",
"its",
"execution",
"is",
"intercepted",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/interceptor.py#L175-L195
|
[
"def",
"interceptable",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"func_wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"get_next_interceptor",
"(",
")",
"as",
"interceptor",
":",
"return",
"interceptor",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"func_wrapped"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
tape
|
Context manager for recording interceptable executions onto a tape.
Similar to `tf.GradientTape`, operations are recorded if they are executed
within this context manager. In addition, the operation must be registered
(wrapped) as `ed.interceptable`.
Yields:
tape: OrderedDict where operations are recorded in sequence. Keys are
the `name` keyword argument to the operation (typically, a random
variable's `name`) and values are the corresponding output of the
operation. If the operation has no name, it is not recorded.
#### Examples
```python
from tensorflow_probability import edward2 as ed
def probabilistic_matrix_factorization():
users = ed.Normal(0., 1., sample_shape=[5000, 128], name="users")
items = ed.Normal(0., 1., sample_shape=[7500, 128], name="items")
ratings = ed.Normal(loc=tf.matmul(users, items, transpose_b=True),
scale=0.1,
name="ratings")
return ratings
with ed.tape() as model_tape:
ratings = probabilistic_matrix_factorization()
assert model_tape["users"].shape == (5000, 128)
assert model_tape["items"].shape == (7500, 128)
assert model_tape["ratings"] == ratings
```
|
tensorflow_probability/python/edward2/interceptor.py
|
def tape():
"""Context manager for recording interceptable executions onto a tape.
Similar to `tf.GradientTape`, operations are recorded if they are executed
within this context manager. In addition, the operation must be registered
(wrapped) as `ed.interceptable`.
Yields:
tape: OrderedDict where operations are recorded in sequence. Keys are
the `name` keyword argument to the operation (typically, a random
variable's `name`) and values are the corresponding output of the
operation. If the operation has no name, it is not recorded.
#### Examples
```python
from tensorflow_probability import edward2 as ed
def probabilistic_matrix_factorization():
users = ed.Normal(0., 1., sample_shape=[5000, 128], name="users")
items = ed.Normal(0., 1., sample_shape=[7500, 128], name="items")
ratings = ed.Normal(loc=tf.matmul(users, items, transpose_b=True),
scale=0.1,
name="ratings")
return ratings
with ed.tape() as model_tape:
ratings = probabilistic_matrix_factorization()
assert model_tape["users"].shape == (5000, 128)
assert model_tape["items"].shape == (7500, 128)
assert model_tape["ratings"] == ratings
```
"""
tape_data = collections.OrderedDict({})
def record(f, *args, **kwargs):
"""Records execution to a tape."""
name = kwargs.get("name")
output = interceptable(f)(*args, **kwargs)
if name:
tape_data[name] = output
return output
with interception(record):
yield tape_data
|
def tape():
"""Context manager for recording interceptable executions onto a tape.
Similar to `tf.GradientTape`, operations are recorded if they are executed
within this context manager. In addition, the operation must be registered
(wrapped) as `ed.interceptable`.
Yields:
tape: OrderedDict where operations are recorded in sequence. Keys are
the `name` keyword argument to the operation (typically, a random
variable's `name`) and values are the corresponding output of the
operation. If the operation has no name, it is not recorded.
#### Examples
```python
from tensorflow_probability import edward2 as ed
def probabilistic_matrix_factorization():
users = ed.Normal(0., 1., sample_shape=[5000, 128], name="users")
items = ed.Normal(0., 1., sample_shape=[7500, 128], name="items")
ratings = ed.Normal(loc=tf.matmul(users, items, transpose_b=True),
scale=0.1,
name="ratings")
return ratings
with ed.tape() as model_tape:
ratings = probabilistic_matrix_factorization()
assert model_tape["users"].shape == (5000, 128)
assert model_tape["items"].shape == (7500, 128)
assert model_tape["ratings"] == ratings
```
"""
tape_data = collections.OrderedDict({})
def record(f, *args, **kwargs):
"""Records execution to a tape."""
name = kwargs.get("name")
output = interceptable(f)(*args, **kwargs)
if name:
tape_data[name] = output
return output
with interception(record):
yield tape_data
|
[
"Context",
"manager",
"for",
"recording",
"interceptable",
"executions",
"onto",
"a",
"tape",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/interceptor.py#L199-L245
|
[
"def",
"tape",
"(",
")",
":",
"tape_data",
"=",
"collections",
".",
"OrderedDict",
"(",
"{",
"}",
")",
"def",
"record",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Records execution to a tape.\"\"\"",
"name",
"=",
"kwargs",
".",
"get",
"(",
"\"name\"",
")",
"output",
"=",
"interceptable",
"(",
"f",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"name",
":",
"tape_data",
"[",
"name",
"]",
"=",
"output",
"return",
"output",
"with",
"interception",
"(",
"record",
")",
":",
"yield",
"tape_data"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
toy_logistic_data
|
Generates synthetic data for binary classification.
Args:
num_examples: The number of samples to generate (scalar Python `int`).
input_size: The input space dimension (scalar Python `int`).
weights_prior_stddev: The prior standard deviation of the weight
vector. (scalar Python `float`).
Returns:
random_weights: Sampled weights as a Numpy `array` of shape
`[input_size]`.
random_bias: Sampled bias as a scalar Python `float`.
design_matrix: Points sampled uniformly from the cube `[-1,
1]^{input_size}`, as a Numpy `array` of shape `(num_examples,
input_size)`.
labels: Labels sampled from the logistic model `p(label=1) =
logistic(dot(features, random_weights) + random_bias)`, as a Numpy
`int32` `array` of shape `(num_examples, 1)`.
|
tensorflow_probability/examples/logistic_regression.py
|
def toy_logistic_data(num_examples, input_size=2, weights_prior_stddev=5.0):
"""Generates synthetic data for binary classification.
Args:
num_examples: The number of samples to generate (scalar Python `int`).
input_size: The input space dimension (scalar Python `int`).
weights_prior_stddev: The prior standard deviation of the weight
vector. (scalar Python `float`).
Returns:
random_weights: Sampled weights as a Numpy `array` of shape
`[input_size]`.
random_bias: Sampled bias as a scalar Python `float`.
design_matrix: Points sampled uniformly from the cube `[-1,
1]^{input_size}`, as a Numpy `array` of shape `(num_examples,
input_size)`.
labels: Labels sampled from the logistic model `p(label=1) =
logistic(dot(features, random_weights) + random_bias)`, as a Numpy
`int32` `array` of shape `(num_examples, 1)`.
"""
random_weights = weights_prior_stddev * np.random.randn(input_size)
random_bias = np.random.randn()
design_matrix = np.random.rand(num_examples, input_size) * 2 - 1
logits = np.reshape(
np.dot(design_matrix, random_weights) + random_bias,
(-1, 1))
p_labels = 1. / (1 + np.exp(-logits))
labels = np.int32(p_labels > np.random.rand(num_examples, 1))
return random_weights, random_bias, np.float32(design_matrix), labels
|
def toy_logistic_data(num_examples, input_size=2, weights_prior_stddev=5.0):
"""Generates synthetic data for binary classification.
Args:
num_examples: The number of samples to generate (scalar Python `int`).
input_size: The input space dimension (scalar Python `int`).
weights_prior_stddev: The prior standard deviation of the weight
vector. (scalar Python `float`).
Returns:
random_weights: Sampled weights as a Numpy `array` of shape
`[input_size]`.
random_bias: Sampled bias as a scalar Python `float`.
design_matrix: Points sampled uniformly from the cube `[-1,
1]^{input_size}`, as a Numpy `array` of shape `(num_examples,
input_size)`.
labels: Labels sampled from the logistic model `p(label=1) =
logistic(dot(features, random_weights) + random_bias)`, as a Numpy
`int32` `array` of shape `(num_examples, 1)`.
"""
random_weights = weights_prior_stddev * np.random.randn(input_size)
random_bias = np.random.randn()
design_matrix = np.random.rand(num_examples, input_size) * 2 - 1
logits = np.reshape(
np.dot(design_matrix, random_weights) + random_bias,
(-1, 1))
p_labels = 1. / (1 + np.exp(-logits))
labels = np.int32(p_labels > np.random.rand(num_examples, 1))
return random_weights, random_bias, np.float32(design_matrix), labels
|
[
"Generates",
"synthetic",
"data",
"for",
"binary",
"classification",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/logistic_regression.py#L58-L86
|
[
"def",
"toy_logistic_data",
"(",
"num_examples",
",",
"input_size",
"=",
"2",
",",
"weights_prior_stddev",
"=",
"5.0",
")",
":",
"random_weights",
"=",
"weights_prior_stddev",
"*",
"np",
".",
"random",
".",
"randn",
"(",
"input_size",
")",
"random_bias",
"=",
"np",
".",
"random",
".",
"randn",
"(",
")",
"design_matrix",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"num_examples",
",",
"input_size",
")",
"*",
"2",
"-",
"1",
"logits",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"dot",
"(",
"design_matrix",
",",
"random_weights",
")",
"+",
"random_bias",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
"p_labels",
"=",
"1.",
"/",
"(",
"1",
"+",
"np",
".",
"exp",
"(",
"-",
"logits",
")",
")",
"labels",
"=",
"np",
".",
"int32",
"(",
"p_labels",
">",
"np",
".",
"random",
".",
"rand",
"(",
"num_examples",
",",
"1",
")",
")",
"return",
"random_weights",
",",
"random_bias",
",",
"np",
".",
"float32",
"(",
"design_matrix",
")",
",",
"labels"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
visualize_decision
|
Utility method to visualize decision boundaries in R^2.
Args:
features: Input points, as a Numpy `array` of shape `[num_examples, 2]`.
labels: Numpy `float`-like array of shape `[num_examples, 1]` giving a
label for each point.
true_w_b: A `tuple` `(w, b)` where `w` is a Numpy array of
shape `[2]` and `b` is a scalar `float`, interpreted as a
decision rule of the form `dot(features, w) + b > 0`.
candidate_w_bs: Python `iterable` containing tuples of the same form as
true_w_b.
fname: The filename to save the plot as a PNG image (Python `str`).
|
tensorflow_probability/examples/logistic_regression.py
|
def visualize_decision(features, labels, true_w_b, candidate_w_bs, fname):
"""Utility method to visualize decision boundaries in R^2.
Args:
features: Input points, as a Numpy `array` of shape `[num_examples, 2]`.
labels: Numpy `float`-like array of shape `[num_examples, 1]` giving a
label for each point.
true_w_b: A `tuple` `(w, b)` where `w` is a Numpy array of
shape `[2]` and `b` is a scalar `float`, interpreted as a
decision rule of the form `dot(features, w) + b > 0`.
candidate_w_bs: Python `iterable` containing tuples of the same form as
true_w_b.
fname: The filename to save the plot as a PNG image (Python `str`).
"""
fig = figure.Figure(figsize=(6, 6))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1)
ax.scatter(features[:, 0], features[:, 1],
c=np.float32(labels[:, 0]),
cmap=cm.get_cmap("binary"),
edgecolors="k")
def plot_weights(w, b, **kwargs):
w1, w2 = w
x1s = np.linspace(-1, 1, 100)
x2s = -(w1 * x1s + b) / w2
ax.plot(x1s, x2s, **kwargs)
for w, b in candidate_w_bs:
plot_weights(w, b,
alpha=1./np.sqrt(len(candidate_w_bs)),
lw=1, color="blue")
if true_w_b is not None:
plot_weights(*true_w_b, lw=4,
color="green", label="true separator")
ax.set_xlim([-1.5, 1.5])
ax.set_ylim([-1.5, 1.5])
ax.legend()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
|
def visualize_decision(features, labels, true_w_b, candidate_w_bs, fname):
"""Utility method to visualize decision boundaries in R^2.
Args:
features: Input points, as a Numpy `array` of shape `[num_examples, 2]`.
labels: Numpy `float`-like array of shape `[num_examples, 1]` giving a
label for each point.
true_w_b: A `tuple` `(w, b)` where `w` is a Numpy array of
shape `[2]` and `b` is a scalar `float`, interpreted as a
decision rule of the form `dot(features, w) + b > 0`.
candidate_w_bs: Python `iterable` containing tuples of the same form as
true_w_b.
fname: The filename to save the plot as a PNG image (Python `str`).
"""
fig = figure.Figure(figsize=(6, 6))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1)
ax.scatter(features[:, 0], features[:, 1],
c=np.float32(labels[:, 0]),
cmap=cm.get_cmap("binary"),
edgecolors="k")
def plot_weights(w, b, **kwargs):
w1, w2 = w
x1s = np.linspace(-1, 1, 100)
x2s = -(w1 * x1s + b) / w2
ax.plot(x1s, x2s, **kwargs)
for w, b in candidate_w_bs:
plot_weights(w, b,
alpha=1./np.sqrt(len(candidate_w_bs)),
lw=1, color="blue")
if true_w_b is not None:
plot_weights(*true_w_b, lw=4,
color="green", label="true separator")
ax.set_xlim([-1.5, 1.5])
ax.set_ylim([-1.5, 1.5])
ax.legend()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
|
[
"Utility",
"method",
"to",
"visualize",
"decision",
"boundaries",
"in",
"R^2",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/logistic_regression.py#L89-L131
|
[
"def",
"visualize_decision",
"(",
"features",
",",
"labels",
",",
"true_w_b",
",",
"candidate_w_bs",
",",
"fname",
")",
":",
"fig",
"=",
"figure",
".",
"Figure",
"(",
"figsize",
"=",
"(",
"6",
",",
"6",
")",
")",
"canvas",
"=",
"backend_agg",
".",
"FigureCanvasAgg",
"(",
"fig",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"1",
",",
"1",
")",
"ax",
".",
"scatter",
"(",
"features",
"[",
":",
",",
"0",
"]",
",",
"features",
"[",
":",
",",
"1",
"]",
",",
"c",
"=",
"np",
".",
"float32",
"(",
"labels",
"[",
":",
",",
"0",
"]",
")",
",",
"cmap",
"=",
"cm",
".",
"get_cmap",
"(",
"\"binary\"",
")",
",",
"edgecolors",
"=",
"\"k\"",
")",
"def",
"plot_weights",
"(",
"w",
",",
"b",
",",
"*",
"*",
"kwargs",
")",
":",
"w1",
",",
"w2",
"=",
"w",
"x1s",
"=",
"np",
".",
"linspace",
"(",
"-",
"1",
",",
"1",
",",
"100",
")",
"x2s",
"=",
"-",
"(",
"w1",
"*",
"x1s",
"+",
"b",
")",
"/",
"w2",
"ax",
".",
"plot",
"(",
"x1s",
",",
"x2s",
",",
"*",
"*",
"kwargs",
")",
"for",
"w",
",",
"b",
"in",
"candidate_w_bs",
":",
"plot_weights",
"(",
"w",
",",
"b",
",",
"alpha",
"=",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"len",
"(",
"candidate_w_bs",
")",
")",
",",
"lw",
"=",
"1",
",",
"color",
"=",
"\"blue\"",
")",
"if",
"true_w_b",
"is",
"not",
"None",
":",
"plot_weights",
"(",
"*",
"true_w_b",
",",
"lw",
"=",
"4",
",",
"color",
"=",
"\"green\"",
",",
"label",
"=",
"\"true separator\"",
")",
"ax",
".",
"set_xlim",
"(",
"[",
"-",
"1.5",
",",
"1.5",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"[",
"-",
"1.5",
",",
"1.5",
"]",
")",
"ax",
".",
"legend",
"(",
")",
"canvas",
".",
"print_figure",
"(",
"fname",
",",
"format",
"=",
"\"png\"",
")",
"print",
"(",
"\"saved {}\"",
".",
"format",
"(",
"fname",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
build_input_pipeline
|
Build a Dataset iterator for supervised classification.
Args:
x: Numpy `array` of features, indexed by the first dimension.
y: Numpy `array` of labels, with the same first dimension as `x`.
batch_size: Number of elements in each training batch.
Returns:
batch_features: `Tensor` feed features, of shape
`[batch_size] + x.shape[1:]`.
batch_labels: `Tensor` feed of labels, of shape
`[batch_size] + y.shape[1:]`.
|
tensorflow_probability/examples/logistic_regression.py
|
def build_input_pipeline(x, y, batch_size):
"""Build a Dataset iterator for supervised classification.
Args:
x: Numpy `array` of features, indexed by the first dimension.
y: Numpy `array` of labels, with the same first dimension as `x`.
batch_size: Number of elements in each training batch.
Returns:
batch_features: `Tensor` feed features, of shape
`[batch_size] + x.shape[1:]`.
batch_labels: `Tensor` feed of labels, of shape
`[batch_size] + y.shape[1:]`.
"""
training_dataset = tf.data.Dataset.from_tensor_slices((x, y))
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
batch_features, batch_labels = training_iterator.get_next()
return batch_features, batch_labels
|
def build_input_pipeline(x, y, batch_size):
"""Build a Dataset iterator for supervised classification.
Args:
x: Numpy `array` of features, indexed by the first dimension.
y: Numpy `array` of labels, with the same first dimension as `x`.
batch_size: Number of elements in each training batch.
Returns:
batch_features: `Tensor` feed features, of shape
`[batch_size] + x.shape[1:]`.
batch_labels: `Tensor` feed of labels, of shape
`[batch_size] + y.shape[1:]`.
"""
training_dataset = tf.data.Dataset.from_tensor_slices((x, y))
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
batch_features, batch_labels = training_iterator.get_next()
return batch_features, batch_labels
|
[
"Build",
"a",
"Dataset",
"iterator",
"for",
"supervised",
"classification",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/logistic_regression.py#L134-L152
|
[
"def",
"build_input_pipeline",
"(",
"x",
",",
"y",
",",
"batch_size",
")",
":",
"training_dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"(",
"x",
",",
"y",
")",
")",
"training_batches",
"=",
"training_dataset",
".",
"repeat",
"(",
")",
".",
"batch",
"(",
"batch_size",
")",
"training_iterator",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"training_batches",
")",
"batch_features",
",",
"batch_labels",
"=",
"training_iterator",
".",
"get_next",
"(",
")",
"return",
"batch_features",
",",
"batch_labels"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_maybe_check_valid_map_values
|
Validate `map_values` if `validate_args`==True.
|
tensorflow_probability/python/bijectors/categorical_to_discrete.py
|
def _maybe_check_valid_map_values(map_values, validate_args):
"""Validate `map_values` if `validate_args`==True."""
assertions = []
message = 'Rank of map_values must be 1.'
if tensorshape_util.rank(map_values.shape) is not None:
if tensorshape_util.rank(map_values.shape) != 1:
raise ValueError(message)
elif validate_args:
assertions.append(assert_util.assert_rank(map_values, 1, message=message))
message = 'Size of map_values must be greater than 0.'
if tensorshape_util.num_elements(map_values.shape) is not None:
if tensorshape_util.num_elements(map_values.shape) == 0:
raise ValueError(message)
elif validate_args:
assertions.append(
assert_util.assert_greater(
tf.size(input=map_values), 0, message=message))
if validate_args:
assertions.append(
assert_util.assert_equal(
tf.math.is_strictly_increasing(map_values),
True,
message='map_values is not strictly increasing.'))
return assertions
|
def _maybe_check_valid_map_values(map_values, validate_args):
"""Validate `map_values` if `validate_args`==True."""
assertions = []
message = 'Rank of map_values must be 1.'
if tensorshape_util.rank(map_values.shape) is not None:
if tensorshape_util.rank(map_values.shape) != 1:
raise ValueError(message)
elif validate_args:
assertions.append(assert_util.assert_rank(map_values, 1, message=message))
message = 'Size of map_values must be greater than 0.'
if tensorshape_util.num_elements(map_values.shape) is not None:
if tensorshape_util.num_elements(map_values.shape) == 0:
raise ValueError(message)
elif validate_args:
assertions.append(
assert_util.assert_greater(
tf.size(input=map_values), 0, message=message))
if validate_args:
assertions.append(
assert_util.assert_equal(
tf.math.is_strictly_increasing(map_values),
True,
message='map_values is not strictly increasing.'))
return assertions
|
[
"Validate",
"map_values",
"if",
"validate_args",
"==",
"True",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/categorical_to_discrete.py#L127-L154
|
[
"def",
"_maybe_check_valid_map_values",
"(",
"map_values",
",",
"validate_args",
")",
":",
"assertions",
"=",
"[",
"]",
"message",
"=",
"'Rank of map_values must be 1.'",
"if",
"tensorshape_util",
".",
"rank",
"(",
"map_values",
".",
"shape",
")",
"is",
"not",
"None",
":",
"if",
"tensorshape_util",
".",
"rank",
"(",
"map_values",
".",
"shape",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"message",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_rank",
"(",
"map_values",
",",
"1",
",",
"message",
"=",
"message",
")",
")",
"message",
"=",
"'Size of map_values must be greater than 0.'",
"if",
"tensorshape_util",
".",
"num_elements",
"(",
"map_values",
".",
"shape",
")",
"is",
"not",
"None",
":",
"if",
"tensorshape_util",
".",
"num_elements",
"(",
"map_values",
".",
"shape",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"message",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_greater",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"map_values",
")",
",",
"0",
",",
"message",
"=",
"message",
")",
")",
"if",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"assert_util",
".",
"assert_equal",
"(",
"tf",
".",
"math",
".",
"is_strictly_increasing",
"(",
"map_values",
")",
",",
"True",
",",
"message",
"=",
"'map_values is not strictly increasing.'",
")",
")",
"return",
"assertions"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
trace
|
`TransitionOperator` that runs `fn` repeatedly and traces its outputs.
Args:
state: A nest of `Tensor`s or None.
fn: A `TransitionOperator`.
num_steps: Number of steps to run the function for. Must be greater than 1.
trace_fn: Callable that the unpacked outputs of `fn` and returns a nest of
`Tensor`s. These will be stacked and returned.
Returns:
state: The final state returned by `fn`.
traces: Stacked outputs of `trace_fn`.
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def trace(state: State, fn: TransitionOperator, num_steps: IntTensor,
trace_fn: Callable[[State, TensorNest], TensorNest]
) -> Tuple[State, TensorNest]:
"""`TransitionOperator` that runs `fn` repeatedly and traces its outputs.
Args:
state: A nest of `Tensor`s or None.
fn: A `TransitionOperator`.
num_steps: Number of steps to run the function for. Must be greater than 1.
trace_fn: Callable that the unpacked outputs of `fn` and returns a nest of
`Tensor`s. These will be stacked and returned.
Returns:
state: The final state returned by `fn`.
traces: Stacked outputs of `trace_fn`.
"""
def fn_wrapper(args, _):
return tf.nest.map_structure(tf.convert_to_tensor, call_fn(fn, args[0]))
def trace_fn_wrapper(args):
return tf.nest.map_structure(tf.convert_to_tensor, call_fn(trace_fn, args))
state = call_fn(fn, state)
first_trace = trace_fn_wrapper(state)
state, full_trace = mcmc_util.trace_scan(
fn_wrapper, state, tf.ones(num_steps - 1), trace_fn=trace_fn_wrapper)
prepend = lambda x, y: tf.concat( # pylint: disable=g-long-lambda
[tf.convert_to_tensor(value=x)[tf.newaxis], y], 0)
return state, tf.nest.map_structure(prepend, first_trace, full_trace)
|
def trace(state: State, fn: TransitionOperator, num_steps: IntTensor,
trace_fn: Callable[[State, TensorNest], TensorNest]
) -> Tuple[State, TensorNest]:
"""`TransitionOperator` that runs `fn` repeatedly and traces its outputs.
Args:
state: A nest of `Tensor`s or None.
fn: A `TransitionOperator`.
num_steps: Number of steps to run the function for. Must be greater than 1.
trace_fn: Callable that the unpacked outputs of `fn` and returns a nest of
`Tensor`s. These will be stacked and returned.
Returns:
state: The final state returned by `fn`.
traces: Stacked outputs of `trace_fn`.
"""
def fn_wrapper(args, _):
return tf.nest.map_structure(tf.convert_to_tensor, call_fn(fn, args[0]))
def trace_fn_wrapper(args):
return tf.nest.map_structure(tf.convert_to_tensor, call_fn(trace_fn, args))
state = call_fn(fn, state)
first_trace = trace_fn_wrapper(state)
state, full_trace = mcmc_util.trace_scan(
fn_wrapper, state, tf.ones(num_steps - 1), trace_fn=trace_fn_wrapper)
prepend = lambda x, y: tf.concat( # pylint: disable=g-long-lambda
[tf.convert_to_tensor(value=x)[tf.newaxis], y], 0)
return state, tf.nest.map_structure(prepend, first_trace, full_trace)
|
[
"TransitionOperator",
"that",
"runs",
"fn",
"repeatedly",
"and",
"traces",
"its",
"outputs",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L87-L119
|
[
"def",
"trace",
"(",
"state",
":",
"State",
",",
"fn",
":",
"TransitionOperator",
",",
"num_steps",
":",
"IntTensor",
",",
"trace_fn",
":",
"Callable",
"[",
"[",
"State",
",",
"TensorNest",
"]",
",",
"TensorNest",
"]",
")",
"->",
"Tuple",
"[",
"State",
",",
"TensorNest",
"]",
":",
"def",
"fn_wrapper",
"(",
"args",
",",
"_",
")",
":",
"return",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"convert_to_tensor",
",",
"call_fn",
"(",
"fn",
",",
"args",
"[",
"0",
"]",
")",
")",
"def",
"trace_fn_wrapper",
"(",
"args",
")",
":",
"return",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"convert_to_tensor",
",",
"call_fn",
"(",
"trace_fn",
",",
"args",
")",
")",
"state",
"=",
"call_fn",
"(",
"fn",
",",
"state",
")",
"first_trace",
"=",
"trace_fn_wrapper",
"(",
"state",
")",
"state",
",",
"full_trace",
"=",
"mcmc_util",
".",
"trace_scan",
"(",
"fn_wrapper",
",",
"state",
",",
"tf",
".",
"ones",
"(",
"num_steps",
"-",
"1",
")",
",",
"trace_fn",
"=",
"trace_fn_wrapper",
")",
"prepend",
"=",
"lambda",
"x",
",",
"y",
":",
"tf",
".",
"concat",
"(",
"# pylint: disable=g-long-lambda",
"[",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
")",
"[",
"tf",
".",
"newaxis",
"]",
",",
"y",
"]",
",",
"0",
")",
"return",
"state",
",",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"prepend",
",",
"first_trace",
",",
"full_trace",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
call_fn
|
Calls a transition operator with args, unpacking args if its a sequence.
Args:
fn: A `TransitionOperator`.
args: Arguments to `fn`
Returns:
ret: Return value of `fn`.
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def call_fn(fn: TransitionOperator, args: Union[Tuple[Any], Any]) -> Any:
"""Calls a transition operator with args, unpacking args if its a sequence.
Args:
fn: A `TransitionOperator`.
args: Arguments to `fn`
Returns:
ret: Return value of `fn`.
"""
if isinstance(args, (list, tuple)) and not mcmc_util.is_namedtuple_like(args):
args = args # type: Tuple[Any]
return fn(*args)
else:
return fn(args)
|
def call_fn(fn: TransitionOperator, args: Union[Tuple[Any], Any]) -> Any:
"""Calls a transition operator with args, unpacking args if its a sequence.
Args:
fn: A `TransitionOperator`.
args: Arguments to `fn`
Returns:
ret: Return value of `fn`.
"""
if isinstance(args, (list, tuple)) and not mcmc_util.is_namedtuple_like(args):
args = args # type: Tuple[Any]
return fn(*args)
else:
return fn(args)
|
[
"Calls",
"a",
"transition",
"operator",
"with",
"args",
"unpacking",
"args",
"if",
"its",
"a",
"sequence",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L122-L137
|
[
"def",
"call_fn",
"(",
"fn",
":",
"TransitionOperator",
",",
"args",
":",
"Union",
"[",
"Tuple",
"[",
"Any",
"]",
",",
"Any",
"]",
")",
"->",
"Any",
":",
"if",
"isinstance",
"(",
"args",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"not",
"mcmc_util",
".",
"is_namedtuple_like",
"(",
"args",
")",
":",
"args",
"=",
"args",
"# type: Tuple[Any]",
"return",
"fn",
"(",
"*",
"args",
")",
"else",
":",
"return",
"fn",
"(",
"args",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
call_and_grads
|
Calls `fn` and returns the gradients with respect to `fn`'s first output.
Args:
fn: A `TransitionOperator`.
args: Arguments to `fn`
Returns:
ret: First output of `fn`.
extra: Second output of `fn`.
grads: Gradients of `ret` with respect to `args`.
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def call_and_grads(fn: TransitionOperator, args: Union[Tuple[Any], Any]
) -> Tuple[tf.Tensor, TensorNest, TensorNest]:
"""Calls `fn` and returns the gradients with respect to `fn`'s first output.
Args:
fn: A `TransitionOperator`.
args: Arguments to `fn`
Returns:
ret: First output of `fn`.
extra: Second output of `fn`.
grads: Gradients of `ret` with respect to `args`.
"""
with tf.GradientTape() as tape:
tape.watch(args)
ret, extra = call_fn(fn, args)
grads = tape.gradient(ret, args)
return ret, extra, grads
|
def call_and_grads(fn: TransitionOperator, args: Union[Tuple[Any], Any]
) -> Tuple[tf.Tensor, TensorNest, TensorNest]:
"""Calls `fn` and returns the gradients with respect to `fn`'s first output.
Args:
fn: A `TransitionOperator`.
args: Arguments to `fn`
Returns:
ret: First output of `fn`.
extra: Second output of `fn`.
grads: Gradients of `ret` with respect to `args`.
"""
with tf.GradientTape() as tape:
tape.watch(args)
ret, extra = call_fn(fn, args)
grads = tape.gradient(ret, args)
return ret, extra, grads
|
[
"Calls",
"fn",
"and",
"returns",
"the",
"gradients",
"with",
"respect",
"to",
"fn",
"s",
"first",
"output",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L140-L157
|
[
"def",
"call_and_grads",
"(",
"fn",
":",
"TransitionOperator",
",",
"args",
":",
"Union",
"[",
"Tuple",
"[",
"Any",
"]",
",",
"Any",
"]",
")",
"->",
"Tuple",
"[",
"tf",
".",
"Tensor",
",",
"TensorNest",
",",
"TensorNest",
"]",
":",
"with",
"tf",
".",
"GradientTape",
"(",
")",
"as",
"tape",
":",
"tape",
".",
"watch",
"(",
"args",
")",
"ret",
",",
"extra",
"=",
"call_fn",
"(",
"fn",
",",
"args",
")",
"grads",
"=",
"tape",
".",
"gradient",
"(",
"ret",
",",
"args",
")",
"return",
"ret",
",",
"extra",
",",
"grads"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
maybe_broadcast_structure
|
Maybe broadcasts `from_structure` to `to_structure`.
If `from_structure` is a singleton, it is tiled to match the structure of
`to_structure`. Note that the elements in `from_structure` are not copied if
this tiling occurs.
Args:
from_structure: A structure.
to_structure: A structure.
Returns:
new_from_structure: Same structure as `to_structure`.
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def maybe_broadcast_structure(from_structure: Any, to_structure: Any) -> Any:
"""Maybe broadcasts `from_structure` to `to_structure`.
If `from_structure` is a singleton, it is tiled to match the structure of
`to_structure`. Note that the elements in `from_structure` are not copied if
this tiling occurs.
Args:
from_structure: A structure.
to_structure: A structure.
Returns:
new_from_structure: Same structure as `to_structure`.
"""
flat_from = tf.nest.flatten(from_structure)
flat_to = tf.nest.flatten(to_structure)
if len(flat_from) == 1:
flat_from *= len(flat_to)
return tf.nest.pack_sequence_as(to_structure, flat_from)
|
def maybe_broadcast_structure(from_structure: Any, to_structure: Any) -> Any:
"""Maybe broadcasts `from_structure` to `to_structure`.
If `from_structure` is a singleton, it is tiled to match the structure of
`to_structure`. Note that the elements in `from_structure` are not copied if
this tiling occurs.
Args:
from_structure: A structure.
to_structure: A structure.
Returns:
new_from_structure: Same structure as `to_structure`.
"""
flat_from = tf.nest.flatten(from_structure)
flat_to = tf.nest.flatten(to_structure)
if len(flat_from) == 1:
flat_from *= len(flat_to)
return tf.nest.pack_sequence_as(to_structure, flat_from)
|
[
"Maybe",
"broadcasts",
"from_structure",
"to",
"to_structure",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L160-L178
|
[
"def",
"maybe_broadcast_structure",
"(",
"from_structure",
":",
"Any",
",",
"to_structure",
":",
"Any",
")",
"->",
"Any",
":",
"flat_from",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"from_structure",
")",
"flat_to",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"to_structure",
")",
"if",
"len",
"(",
"flat_from",
")",
"==",
"1",
":",
"flat_from",
"*=",
"len",
"(",
"flat_to",
")",
"return",
"tf",
".",
"nest",
".",
"pack_sequence_as",
"(",
"to_structure",
",",
"flat_from",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
transform_log_prob_fn
|
Transforms a log-prob function using a bijector.
This takes a log-prob function and creates a new log-prob function that now
takes takes state in the domain of the bijector, forward transforms that state
and calls the original log-prob function. It then returns the log-probability
that correctly accounts for this transformation.
The forward-transformed state is pre-pended to the original log-prob
function's extra returns and returned as the new extra return.
For convenience you can also pass the initial state (in the original space),
and this function will return the inverse transformed as the 2nd return value.
You'd use this to initialize MCMC operators that operate in the transformed
space.
Args:
log_prob_fn: Log prob fn.
bijector: Bijector(s), must be of the same structure as the `log_prob_fn`
inputs.
init_state: Initial state, in the original space.
Returns:
transformed_log_prob_fn: Transformed log prob fn.
transformed_init_state: If `init_state` is provided. Initial state in the
transformed space.
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def transform_log_prob_fn(log_prob_fn: PotentialFn,
bijector: BijectorNest,
init_state: State = None
) -> Union[PotentialFn, Tuple[PotentialFn, State]]:
"""Transforms a log-prob function using a bijector.
This takes a log-prob function and creates a new log-prob function that now
takes takes state in the domain of the bijector, forward transforms that state
and calls the original log-prob function. It then returns the log-probability
that correctly accounts for this transformation.
The forward-transformed state is pre-pended to the original log-prob
function's extra returns and returned as the new extra return.
For convenience you can also pass the initial state (in the original space),
and this function will return the inverse transformed as the 2nd return value.
You'd use this to initialize MCMC operators that operate in the transformed
space.
Args:
log_prob_fn: Log prob fn.
bijector: Bijector(s), must be of the same structure as the `log_prob_fn`
inputs.
init_state: Initial state, in the original space.
Returns:
transformed_log_prob_fn: Transformed log prob fn.
transformed_init_state: If `init_state` is provided. Initial state in the
transformed space.
"""
def wrapper(*args):
"""Transformed wrapper."""
bijector_ = bijector
args = tf.nest.map_structure(lambda x: 0. + x, args)
if len(args) == 1:
args = args[0]
elif isinstance(bijector_, list):
bijector_ = tuple(bijector_)
original_space_args = tf.nest.map_structure(lambda b, x: b.forward(x),
bijector_, args)
original_space_args = original_space_args # type: Tuple[Any]
original_space_log_prob, extra = call_fn(log_prob_fn, original_space_args)
event_ndims = tf.nest.map_structure(
lambda x: tf.rank(x) - tf.rank(original_space_log_prob), args)
return original_space_log_prob + sum(
tf.nest.flatten(
tf.nest.map_structure(
lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e),
bijector_, args, event_ndims))), [original_space_args, extra]
if init_state is None:
return wrapper
else:
return wrapper, tf.nest.map_structure(lambda b, s: b.inverse(s), bijector,
init_state)
|
def transform_log_prob_fn(log_prob_fn: PotentialFn,
bijector: BijectorNest,
init_state: State = None
) -> Union[PotentialFn, Tuple[PotentialFn, State]]:
"""Transforms a log-prob function using a bijector.
This takes a log-prob function and creates a new log-prob function that now
takes takes state in the domain of the bijector, forward transforms that state
and calls the original log-prob function. It then returns the log-probability
that correctly accounts for this transformation.
The forward-transformed state is pre-pended to the original log-prob
function's extra returns and returned as the new extra return.
For convenience you can also pass the initial state (in the original space),
and this function will return the inverse transformed as the 2nd return value.
You'd use this to initialize MCMC operators that operate in the transformed
space.
Args:
log_prob_fn: Log prob fn.
bijector: Bijector(s), must be of the same structure as the `log_prob_fn`
inputs.
init_state: Initial state, in the original space.
Returns:
transformed_log_prob_fn: Transformed log prob fn.
transformed_init_state: If `init_state` is provided. Initial state in the
transformed space.
"""
def wrapper(*args):
"""Transformed wrapper."""
bijector_ = bijector
args = tf.nest.map_structure(lambda x: 0. + x, args)
if len(args) == 1:
args = args[0]
elif isinstance(bijector_, list):
bijector_ = tuple(bijector_)
original_space_args = tf.nest.map_structure(lambda b, x: b.forward(x),
bijector_, args)
original_space_args = original_space_args # type: Tuple[Any]
original_space_log_prob, extra = call_fn(log_prob_fn, original_space_args)
event_ndims = tf.nest.map_structure(
lambda x: tf.rank(x) - tf.rank(original_space_log_prob), args)
return original_space_log_prob + sum(
tf.nest.flatten(
tf.nest.map_structure(
lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e),
bijector_, args, event_ndims))), [original_space_args, extra]
if init_state is None:
return wrapper
else:
return wrapper, tf.nest.map_structure(lambda b, s: b.inverse(s), bijector,
init_state)
|
[
"Transforms",
"a",
"log",
"-",
"prob",
"function",
"using",
"a",
"bijector",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L181-L239
|
[
"def",
"transform_log_prob_fn",
"(",
"log_prob_fn",
":",
"PotentialFn",
",",
"bijector",
":",
"BijectorNest",
",",
"init_state",
":",
"State",
"=",
"None",
")",
"->",
"Union",
"[",
"PotentialFn",
",",
"Tuple",
"[",
"PotentialFn",
",",
"State",
"]",
"]",
":",
"def",
"wrapper",
"(",
"*",
"args",
")",
":",
"\"\"\"Transformed wrapper.\"\"\"",
"bijector_",
"=",
"bijector",
"args",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
":",
"0.",
"+",
"x",
",",
"args",
")",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"args",
"=",
"args",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"bijector_",
",",
"list",
")",
":",
"bijector_",
"=",
"tuple",
"(",
"bijector_",
")",
"original_space_args",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"b",
",",
"x",
":",
"b",
".",
"forward",
"(",
"x",
")",
",",
"bijector_",
",",
"args",
")",
"original_space_args",
"=",
"original_space_args",
"# type: Tuple[Any]",
"original_space_log_prob",
",",
"extra",
"=",
"call_fn",
"(",
"log_prob_fn",
",",
"original_space_args",
")",
"event_ndims",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
":",
"tf",
".",
"rank",
"(",
"x",
")",
"-",
"tf",
".",
"rank",
"(",
"original_space_log_prob",
")",
",",
"args",
")",
"return",
"original_space_log_prob",
"+",
"sum",
"(",
"tf",
".",
"nest",
".",
"flatten",
"(",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"b",
",",
"x",
",",
"e",
":",
"b",
".",
"forward_log_det_jacobian",
"(",
"x",
",",
"event_ndims",
"=",
"e",
")",
",",
"bijector_",
",",
"args",
",",
"event_ndims",
")",
")",
")",
",",
"[",
"original_space_args",
",",
"extra",
"]",
"if",
"init_state",
"is",
"None",
":",
"return",
"wrapper",
"else",
":",
"return",
"wrapper",
",",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"b",
",",
"s",
":",
"b",
".",
"inverse",
"(",
"s",
")",
",",
"bijector",
",",
"init_state",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
leapfrog_step
|
Leapfrog `TransitionOperator`.
Args:
leapfrog_step_state: LeapFrogStepState.
step_size: Step size, structure broadcastable to the `target_log_prob_fn`
state.
target_log_prob_fn: Target log prob fn.
kinetic_energy_fn: Kinetic energy fn.
Returns:
leapfrog_step_state: LeapFrogStepState.
leapfrog_step_extras: LeapFrogStepExtras.
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def leapfrog_step(leapfrog_step_state: LeapFrogStepState,
step_size: FloatTensor, target_log_prob_fn: PotentialFn,
kinetic_energy_fn: PotentialFn
) -> Tuple[LeapFrogStepState, LeapFrogStepExtras]:
"""Leapfrog `TransitionOperator`.
Args:
leapfrog_step_state: LeapFrogStepState.
step_size: Step size, structure broadcastable to the `target_log_prob_fn`
state.
target_log_prob_fn: Target log prob fn.
kinetic_energy_fn: Kinetic energy fn.
Returns:
leapfrog_step_state: LeapFrogStepState.
leapfrog_step_extras: LeapFrogStepExtras.
"""
state = leapfrog_step_state.state
state_grads = leapfrog_step_state.state_grads
momentum = leapfrog_step_state.momentum
step_size = maybe_broadcast_structure(step_size, state)
state = tf.nest.map_structure(tf.convert_to_tensor, state)
momentum = tf.nest.map_structure(tf.convert_to_tensor, momentum)
state = tf.nest.map_structure(tf.convert_to_tensor, state)
if state_grads is None:
_, _, state_grads = call_and_grads(target_log_prob_fn, state)
else:
state_grads = tf.nest.map_structure(tf.convert_to_tensor, state_grads)
momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum,
state_grads, step_size)
kinetic_energy, kinetic_energy_extra, momentum_grads = call_and_grads(
kinetic_energy_fn, momentum)
state = tf.nest.map_structure(lambda x, mg, s: x + mg * s, state,
momentum_grads, step_size)
target_log_prob, state_extra, state_grads = call_and_grads(
target_log_prob_fn, state)
momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum,
state_grads, step_size)
return LeapFrogStepState(state, state_grads, momentum), LeapFrogStepExtras(
target_log_prob, state_extra, kinetic_energy, kinetic_energy_extra)
|
def leapfrog_step(leapfrog_step_state: LeapFrogStepState,
step_size: FloatTensor, target_log_prob_fn: PotentialFn,
kinetic_energy_fn: PotentialFn
) -> Tuple[LeapFrogStepState, LeapFrogStepExtras]:
"""Leapfrog `TransitionOperator`.
Args:
leapfrog_step_state: LeapFrogStepState.
step_size: Step size, structure broadcastable to the `target_log_prob_fn`
state.
target_log_prob_fn: Target log prob fn.
kinetic_energy_fn: Kinetic energy fn.
Returns:
leapfrog_step_state: LeapFrogStepState.
leapfrog_step_extras: LeapFrogStepExtras.
"""
state = leapfrog_step_state.state
state_grads = leapfrog_step_state.state_grads
momentum = leapfrog_step_state.momentum
step_size = maybe_broadcast_structure(step_size, state)
state = tf.nest.map_structure(tf.convert_to_tensor, state)
momentum = tf.nest.map_structure(tf.convert_to_tensor, momentum)
state = tf.nest.map_structure(tf.convert_to_tensor, state)
if state_grads is None:
_, _, state_grads = call_and_grads(target_log_prob_fn, state)
else:
state_grads = tf.nest.map_structure(tf.convert_to_tensor, state_grads)
momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum,
state_grads, step_size)
kinetic_energy, kinetic_energy_extra, momentum_grads = call_and_grads(
kinetic_energy_fn, momentum)
state = tf.nest.map_structure(lambda x, mg, s: x + mg * s, state,
momentum_grads, step_size)
target_log_prob, state_extra, state_grads = call_and_grads(
target_log_prob_fn, state)
momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum,
state_grads, step_size)
return LeapFrogStepState(state, state_grads, momentum), LeapFrogStepExtras(
target_log_prob, state_extra, kinetic_energy, kinetic_energy_extra)
|
[
"Leapfrog",
"TransitionOperator",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L249-L296
|
[
"def",
"leapfrog_step",
"(",
"leapfrog_step_state",
":",
"LeapFrogStepState",
",",
"step_size",
":",
"FloatTensor",
",",
"target_log_prob_fn",
":",
"PotentialFn",
",",
"kinetic_energy_fn",
":",
"PotentialFn",
")",
"->",
"Tuple",
"[",
"LeapFrogStepState",
",",
"LeapFrogStepExtras",
"]",
":",
"state",
"=",
"leapfrog_step_state",
".",
"state",
"state_grads",
"=",
"leapfrog_step_state",
".",
"state_grads",
"momentum",
"=",
"leapfrog_step_state",
".",
"momentum",
"step_size",
"=",
"maybe_broadcast_structure",
"(",
"step_size",
",",
"state",
")",
"state",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"convert_to_tensor",
",",
"state",
")",
"momentum",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"convert_to_tensor",
",",
"momentum",
")",
"state",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"convert_to_tensor",
",",
"state",
")",
"if",
"state_grads",
"is",
"None",
":",
"_",
",",
"_",
",",
"state_grads",
"=",
"call_and_grads",
"(",
"target_log_prob_fn",
",",
"state",
")",
"else",
":",
"state_grads",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"convert_to_tensor",
",",
"state_grads",
")",
"momentum",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"m",
",",
"sg",
",",
"s",
":",
"m",
"+",
"0.5",
"*",
"sg",
"*",
"s",
",",
"momentum",
",",
"state_grads",
",",
"step_size",
")",
"kinetic_energy",
",",
"kinetic_energy_extra",
",",
"momentum_grads",
"=",
"call_and_grads",
"(",
"kinetic_energy_fn",
",",
"momentum",
")",
"state",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
",",
"mg",
",",
"s",
":",
"x",
"+",
"mg",
"*",
"s",
",",
"state",
",",
"momentum_grads",
",",
"step_size",
")",
"target_log_prob",
",",
"state_extra",
",",
"state_grads",
"=",
"call_and_grads",
"(",
"target_log_prob_fn",
",",
"state",
")",
"momentum",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"m",
",",
"sg",
",",
"s",
":",
"m",
"+",
"0.5",
"*",
"sg",
"*",
"s",
",",
"momentum",
",",
"state_grads",
",",
"step_size",
")",
"return",
"LeapFrogStepState",
"(",
"state",
",",
"state_grads",
",",
"momentum",
")",
",",
"LeapFrogStepExtras",
"(",
"target_log_prob",
",",
"state_extra",
",",
"kinetic_energy",
",",
"kinetic_energy_extra",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
metropolis_hastings_step
|
Metropolis-Hastings step.
This probabilistically chooses between `current_state` and `proposed_state`
based on the `energy_change` so as to preserve detailed balance.
Energy change is the negative of `log_accept_ratio`.
Args:
current_state: Current state.
proposed_state: Proposed state.
energy_change: E(proposed_state) - E(previous_state).
seed: For reproducibility.
Returns:
new_state: The chosen state.
is_accepted: Whether the proposed state was accepted.
log_uniform: The random number that was used to select between the two
states.
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def metropolis_hastings_step(current_state: State,
proposed_state: State,
energy_change: FloatTensor,
seed=None) -> Tuple[State, tf.Tensor, tf.Tensor]:
"""Metropolis-Hastings step.
This probabilistically chooses between `current_state` and `proposed_state`
based on the `energy_change` so as to preserve detailed balance.
Energy change is the negative of `log_accept_ratio`.
Args:
current_state: Current state.
proposed_state: Proposed state.
energy_change: E(proposed_state) - E(previous_state).
seed: For reproducibility.
Returns:
new_state: The chosen state.
is_accepted: Whether the proposed state was accepted.
log_uniform: The random number that was used to select between the two
states.
"""
flat_current = tf.nest.flatten(current_state)
flat_proposed = nest.flatten_up_to(current_state, proposed_state)
# Impute the None's in the current state.
flat_current = [
p if c is None else c for p, c in zip(flat_proposed, flat_current)
]
current_state = tf.nest.pack_sequence_as(current_state, flat_current)
current_state = tf.nest.map_structure(tf.convert_to_tensor, current_state)
proposed_state = tf.nest.map_structure(tf.convert_to_tensor, proposed_state)
energy_change = tf.convert_to_tensor(value=energy_change)
log_accept_ratio = -energy_change
log_uniform = tf.math.log(
tf.random.uniform(
shape=tf.shape(input=log_accept_ratio),
dtype=log_accept_ratio.dtype.base_dtype,
seed=seed))
is_accepted = log_uniform < log_accept_ratio
next_state = mcmc_util.choose(
is_accepted, proposed_state, current_state, name='choose_next_state')
return next_state, is_accepted, log_uniform
|
def metropolis_hastings_step(current_state: State,
proposed_state: State,
energy_change: FloatTensor,
seed=None) -> Tuple[State, tf.Tensor, tf.Tensor]:
"""Metropolis-Hastings step.
This probabilistically chooses between `current_state` and `proposed_state`
based on the `energy_change` so as to preserve detailed balance.
Energy change is the negative of `log_accept_ratio`.
Args:
current_state: Current state.
proposed_state: Proposed state.
energy_change: E(proposed_state) - E(previous_state).
seed: For reproducibility.
Returns:
new_state: The chosen state.
is_accepted: Whether the proposed state was accepted.
log_uniform: The random number that was used to select between the two
states.
"""
flat_current = tf.nest.flatten(current_state)
flat_proposed = nest.flatten_up_to(current_state, proposed_state)
# Impute the None's in the current state.
flat_current = [
p if c is None else c for p, c in zip(flat_proposed, flat_current)
]
current_state = tf.nest.pack_sequence_as(current_state, flat_current)
current_state = tf.nest.map_structure(tf.convert_to_tensor, current_state)
proposed_state = tf.nest.map_structure(tf.convert_to_tensor, proposed_state)
energy_change = tf.convert_to_tensor(value=energy_change)
log_accept_ratio = -energy_change
log_uniform = tf.math.log(
tf.random.uniform(
shape=tf.shape(input=log_accept_ratio),
dtype=log_accept_ratio.dtype.base_dtype,
seed=seed))
is_accepted = log_uniform < log_accept_ratio
next_state = mcmc_util.choose(
is_accepted, proposed_state, current_state, name='choose_next_state')
return next_state, is_accepted, log_uniform
|
[
"Metropolis",
"-",
"Hastings",
"step",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L299-L345
|
[
"def",
"metropolis_hastings_step",
"(",
"current_state",
":",
"State",
",",
"proposed_state",
":",
"State",
",",
"energy_change",
":",
"FloatTensor",
",",
"seed",
"=",
"None",
")",
"->",
"Tuple",
"[",
"State",
",",
"tf",
".",
"Tensor",
",",
"tf",
".",
"Tensor",
"]",
":",
"flat_current",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"current_state",
")",
"flat_proposed",
"=",
"nest",
".",
"flatten_up_to",
"(",
"current_state",
",",
"proposed_state",
")",
"# Impute the None's in the current state.",
"flat_current",
"=",
"[",
"p",
"if",
"c",
"is",
"None",
"else",
"c",
"for",
"p",
",",
"c",
"in",
"zip",
"(",
"flat_proposed",
",",
"flat_current",
")",
"]",
"current_state",
"=",
"tf",
".",
"nest",
".",
"pack_sequence_as",
"(",
"current_state",
",",
"flat_current",
")",
"current_state",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"convert_to_tensor",
",",
"current_state",
")",
"proposed_state",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"convert_to_tensor",
",",
"proposed_state",
")",
"energy_change",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"energy_change",
")",
"log_accept_ratio",
"=",
"-",
"energy_change",
"log_uniform",
"=",
"tf",
".",
"math",
".",
"log",
"(",
"tf",
".",
"random",
".",
"uniform",
"(",
"shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"log_accept_ratio",
")",
",",
"dtype",
"=",
"log_accept_ratio",
".",
"dtype",
".",
"base_dtype",
",",
"seed",
"=",
"seed",
")",
")",
"is_accepted",
"=",
"log_uniform",
"<",
"log_accept_ratio",
"next_state",
"=",
"mcmc_util",
".",
"choose",
"(",
"is_accepted",
",",
"proposed_state",
",",
"current_state",
",",
"name",
"=",
"'choose_next_state'",
")",
"return",
"next_state",
",",
"is_accepted",
",",
"log_uniform"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
hamiltonian_monte_carlo
|
Hamiltonian Monte Carlo `TransitionOperator`.
#### Example
```python
step_size = 0.2
num_steps = 2000
num_leapfrog_steps = 10
state = tf.ones([16, 2])
base_mean = [1., 0]
base_cov = [[1, 0.5], [0.5, 1]]
bijector = tfb.Softplus()
base_dist = tfd.MultivariateNormalFullCovariance(
loc=base_mean, covariance_matrix=base_cov)
target_dist = bijector(base_dist)
def orig_target_log_prob_fn(x):
return target_dist.log_prob(x), ()
target_log_prob_fn, state = fun_mcmc.transform_log_prob_fn(
orig_target_log_prob_fn, bijector, state)
kernel = tf.function(lambda state: fun_mcmc.hamiltonian_monte_carlo(
state,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps,
target_log_prob_fn=target_log_prob_fn,
seed=tfp_test_util.test_seed()))
_, chain = fun_mcmc.trace(
state=fun_mcmc.HamiltonianMonteCarloState(
state=state,
state_grads=None,
target_log_prob=None,
state_extra=None),
fn=kernel,
num_steps=num_steps,
trace_fn=lambda state, extra: state.state_extra[0])
```
Args:
hmc_state: HamiltonianMonteCarloState.
target_log_prob_fn: Target log prob fn.
step_size: Step size, structure broadcastable to the `target_log_prob_fn`
state.
num_leapfrog_steps: Number of leapfrog steps to take.
momentum: Initial momentum, passed to `momentum_sample_fn`. Default: zeroes.
kinetic_energy_fn: Kinetic energy function.
momentum_sample_fn: Sampler for the momentum.
leapfrog_trace_fn: Trace function for the leapfrog integrator.
seed: For reproducibility.
Returns:
hmc_state: HamiltonianMonteCarloState
hmc_extra: HamiltonianMonteCarloExtra
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def hamiltonian_monte_carlo(
hmc_state: HamiltonianMonteCarloState,
target_log_prob_fn: PotentialFn,
step_size: Any,
num_leapfrog_steps: IntTensor,
momentum: State = None,
kinetic_energy_fn: PotentialFn = None,
momentum_sample_fn: MomentumSampleFn = None,
leapfrog_trace_fn: Callable[[LeapFrogStepState, LeapFrogStepExtras],
TensorNest] = lambda *args: (),
seed=None,
) -> Tuple[HamiltonianMonteCarloState, HamiltonianMonteCarloExtra]:
"""Hamiltonian Monte Carlo `TransitionOperator`.
#### Example
```python
step_size = 0.2
num_steps = 2000
num_leapfrog_steps = 10
state = tf.ones([16, 2])
base_mean = [1., 0]
base_cov = [[1, 0.5], [0.5, 1]]
bijector = tfb.Softplus()
base_dist = tfd.MultivariateNormalFullCovariance(
loc=base_mean, covariance_matrix=base_cov)
target_dist = bijector(base_dist)
def orig_target_log_prob_fn(x):
return target_dist.log_prob(x), ()
target_log_prob_fn, state = fun_mcmc.transform_log_prob_fn(
orig_target_log_prob_fn, bijector, state)
kernel = tf.function(lambda state: fun_mcmc.hamiltonian_monte_carlo(
state,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps,
target_log_prob_fn=target_log_prob_fn,
seed=tfp_test_util.test_seed()))
_, chain = fun_mcmc.trace(
state=fun_mcmc.HamiltonianMonteCarloState(
state=state,
state_grads=None,
target_log_prob=None,
state_extra=None),
fn=kernel,
num_steps=num_steps,
trace_fn=lambda state, extra: state.state_extra[0])
```
Args:
hmc_state: HamiltonianMonteCarloState.
target_log_prob_fn: Target log prob fn.
step_size: Step size, structure broadcastable to the `target_log_prob_fn`
state.
num_leapfrog_steps: Number of leapfrog steps to take.
momentum: Initial momentum, passed to `momentum_sample_fn`. Default: zeroes.
kinetic_energy_fn: Kinetic energy function.
momentum_sample_fn: Sampler for the momentum.
leapfrog_trace_fn: Trace function for the leapfrog integrator.
seed: For reproducibility.
Returns:
hmc_state: HamiltonianMonteCarloState
hmc_extra: HamiltonianMonteCarloExtra
"""
state = hmc_state.state
state_grads = hmc_state.state_grads
target_log_prob = hmc_state.target_log_prob
state_extra = hmc_state.state_extra
if kinetic_energy_fn is None:
# pylint: disable=function-redefined
def kinetic_energy_fn(*momentum):
return tf.add_n([
tf.reduce_sum(input_tensor=tf.square(x), axis=-1) / 2.
for x in tf.nest.flatten(momentum)
]), ()
if momentum_sample_fn is None:
# pylint: disable=function-redefined
def momentum_sample_fn(*momentum):
ret = tf.nest.map_structure(
lambda x: tf.random.normal(tf.shape(input=x), dtype=x.dtype),
momentum)
if len(ret) == 1:
return ret[0]
else:
return ret
if momentum is None:
momentum = call_fn(momentum_sample_fn,
tf.nest.map_structure(tf.zeros_like, state))
if target_log_prob is None:
target_log_prob, state_extra, state_grads = call_and_grads(
target_log_prob_fn, state)
kinetic_energy, _ = call_fn(kinetic_energy_fn, momentum)
current_energy = -target_log_prob + kinetic_energy
current_state = HamiltonianMonteCarloState(
state=state,
state_grads=state_grads,
state_extra=state_extra,
target_log_prob=target_log_prob)
def leapfrog_wrapper(leapfrog_state, target_log_prob, state_extra):
"""Leapfrog wrapper that tracks extra state."""
del target_log_prob
del state_extra
leapfrog_state, leapfrog_extra = leapfrog_step(
leapfrog_state,
step_size=step_size,
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn)
return [
leapfrog_state, leapfrog_extra.target_log_prob,
leapfrog_extra.state_extra
], leapfrog_extra
def leapfrog_trace_wrapper_fn(args, leapfrog_extra):
return leapfrog_trace_fn(args[0], leapfrog_extra)
leapfrog_wrapper_state = (LeapFrogStepState(state, state_grads, momentum),
target_log_prob, state_extra)
[[leapfrog_state, target_log_prob, state_extra], _], leapfrog_trace = trace(
leapfrog_wrapper_state,
leapfrog_wrapper,
num_leapfrog_steps,
trace_fn=leapfrog_trace_wrapper_fn)
kinetic_energy, _ = call_fn(kinetic_energy_fn, leapfrog_state.momentum)
proposed_energy = -target_log_prob + kinetic_energy
proposed_state = HamiltonianMonteCarloState(
state=leapfrog_state.state,
state_grads=leapfrog_state.state_grads,
target_log_prob=target_log_prob,
state_extra=state_extra)
energy_change = proposed_energy - current_energy
hmc_state, is_accepted, _ = metropolis_hastings_step(
current_state, proposed_state, energy_change, seed=seed)
hmc_state = hmc_state # type: HamiltonianMonteCarloState
return hmc_state, HamiltonianMonteCarloExtra(
is_accepted=is_accepted,
proposed_hmc_state=proposed_state,
log_accept_ratio=-energy_change,
leapfrog_trace=leapfrog_trace)
|
def hamiltonian_monte_carlo(
hmc_state: HamiltonianMonteCarloState,
target_log_prob_fn: PotentialFn,
step_size: Any,
num_leapfrog_steps: IntTensor,
momentum: State = None,
kinetic_energy_fn: PotentialFn = None,
momentum_sample_fn: MomentumSampleFn = None,
leapfrog_trace_fn: Callable[[LeapFrogStepState, LeapFrogStepExtras],
TensorNest] = lambda *args: (),
seed=None,
) -> Tuple[HamiltonianMonteCarloState, HamiltonianMonteCarloExtra]:
"""Hamiltonian Monte Carlo `TransitionOperator`.
#### Example
```python
step_size = 0.2
num_steps = 2000
num_leapfrog_steps = 10
state = tf.ones([16, 2])
base_mean = [1., 0]
base_cov = [[1, 0.5], [0.5, 1]]
bijector = tfb.Softplus()
base_dist = tfd.MultivariateNormalFullCovariance(
loc=base_mean, covariance_matrix=base_cov)
target_dist = bijector(base_dist)
def orig_target_log_prob_fn(x):
return target_dist.log_prob(x), ()
target_log_prob_fn, state = fun_mcmc.transform_log_prob_fn(
orig_target_log_prob_fn, bijector, state)
kernel = tf.function(lambda state: fun_mcmc.hamiltonian_monte_carlo(
state,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps,
target_log_prob_fn=target_log_prob_fn,
seed=tfp_test_util.test_seed()))
_, chain = fun_mcmc.trace(
state=fun_mcmc.HamiltonianMonteCarloState(
state=state,
state_grads=None,
target_log_prob=None,
state_extra=None),
fn=kernel,
num_steps=num_steps,
trace_fn=lambda state, extra: state.state_extra[0])
```
Args:
hmc_state: HamiltonianMonteCarloState.
target_log_prob_fn: Target log prob fn.
step_size: Step size, structure broadcastable to the `target_log_prob_fn`
state.
num_leapfrog_steps: Number of leapfrog steps to take.
momentum: Initial momentum, passed to `momentum_sample_fn`. Default: zeroes.
kinetic_energy_fn: Kinetic energy function.
momentum_sample_fn: Sampler for the momentum.
leapfrog_trace_fn: Trace function for the leapfrog integrator.
seed: For reproducibility.
Returns:
hmc_state: HamiltonianMonteCarloState
hmc_extra: HamiltonianMonteCarloExtra
"""
state = hmc_state.state
state_grads = hmc_state.state_grads
target_log_prob = hmc_state.target_log_prob
state_extra = hmc_state.state_extra
if kinetic_energy_fn is None:
# pylint: disable=function-redefined
def kinetic_energy_fn(*momentum):
return tf.add_n([
tf.reduce_sum(input_tensor=tf.square(x), axis=-1) / 2.
for x in tf.nest.flatten(momentum)
]), ()
if momentum_sample_fn is None:
# pylint: disable=function-redefined
def momentum_sample_fn(*momentum):
ret = tf.nest.map_structure(
lambda x: tf.random.normal(tf.shape(input=x), dtype=x.dtype),
momentum)
if len(ret) == 1:
return ret[0]
else:
return ret
if momentum is None:
momentum = call_fn(momentum_sample_fn,
tf.nest.map_structure(tf.zeros_like, state))
if target_log_prob is None:
target_log_prob, state_extra, state_grads = call_and_grads(
target_log_prob_fn, state)
kinetic_energy, _ = call_fn(kinetic_energy_fn, momentum)
current_energy = -target_log_prob + kinetic_energy
current_state = HamiltonianMonteCarloState(
state=state,
state_grads=state_grads,
state_extra=state_extra,
target_log_prob=target_log_prob)
def leapfrog_wrapper(leapfrog_state, target_log_prob, state_extra):
"""Leapfrog wrapper that tracks extra state."""
del target_log_prob
del state_extra
leapfrog_state, leapfrog_extra = leapfrog_step(
leapfrog_state,
step_size=step_size,
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn)
return [
leapfrog_state, leapfrog_extra.target_log_prob,
leapfrog_extra.state_extra
], leapfrog_extra
def leapfrog_trace_wrapper_fn(args, leapfrog_extra):
return leapfrog_trace_fn(args[0], leapfrog_extra)
leapfrog_wrapper_state = (LeapFrogStepState(state, state_grads, momentum),
target_log_prob, state_extra)
[[leapfrog_state, target_log_prob, state_extra], _], leapfrog_trace = trace(
leapfrog_wrapper_state,
leapfrog_wrapper,
num_leapfrog_steps,
trace_fn=leapfrog_trace_wrapper_fn)
kinetic_energy, _ = call_fn(kinetic_energy_fn, leapfrog_state.momentum)
proposed_energy = -target_log_prob + kinetic_energy
proposed_state = HamiltonianMonteCarloState(
state=leapfrog_state.state,
state_grads=leapfrog_state.state_grads,
target_log_prob=target_log_prob,
state_extra=state_extra)
energy_change = proposed_energy - current_energy
hmc_state, is_accepted, _ = metropolis_hastings_step(
current_state, proposed_state, energy_change, seed=seed)
hmc_state = hmc_state # type: HamiltonianMonteCarloState
return hmc_state, HamiltonianMonteCarloExtra(
is_accepted=is_accepted,
proposed_hmc_state=proposed_state,
log_accept_ratio=-energy_change,
leapfrog_trace=leapfrog_trace)
|
[
"Hamiltonian",
"Monte",
"Carlo",
"TransitionOperator",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L361-L517
|
[
"def",
"hamiltonian_monte_carlo",
"(",
"hmc_state",
":",
"HamiltonianMonteCarloState",
",",
"target_log_prob_fn",
":",
"PotentialFn",
",",
"step_size",
":",
"Any",
",",
"num_leapfrog_steps",
":",
"IntTensor",
",",
"momentum",
":",
"State",
"=",
"None",
",",
"kinetic_energy_fn",
":",
"PotentialFn",
"=",
"None",
",",
"momentum_sample_fn",
":",
"MomentumSampleFn",
"=",
"None",
",",
"leapfrog_trace_fn",
":",
"Callable",
"[",
"[",
"LeapFrogStepState",
",",
"LeapFrogStepExtras",
"]",
",",
"TensorNest",
"]",
"=",
"lambda",
"*",
"args",
":",
"(",
")",
",",
"seed",
"=",
"None",
",",
")",
"->",
"Tuple",
"[",
"HamiltonianMonteCarloState",
",",
"HamiltonianMonteCarloExtra",
"]",
":",
"state",
"=",
"hmc_state",
".",
"state",
"state_grads",
"=",
"hmc_state",
".",
"state_grads",
"target_log_prob",
"=",
"hmc_state",
".",
"target_log_prob",
"state_extra",
"=",
"hmc_state",
".",
"state_extra",
"if",
"kinetic_energy_fn",
"is",
"None",
":",
"# pylint: disable=function-redefined",
"def",
"kinetic_energy_fn",
"(",
"*",
"momentum",
")",
":",
"return",
"tf",
".",
"add_n",
"(",
"[",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"square",
"(",
"x",
")",
",",
"axis",
"=",
"-",
"1",
")",
"/",
"2.",
"for",
"x",
"in",
"tf",
".",
"nest",
".",
"flatten",
"(",
"momentum",
")",
"]",
")",
",",
"(",
")",
"if",
"momentum_sample_fn",
"is",
"None",
":",
"# pylint: disable=function-redefined",
"def",
"momentum_sample_fn",
"(",
"*",
"momentum",
")",
":",
"ret",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
":",
"tf",
".",
"random",
".",
"normal",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
",",
"momentum",
")",
"if",
"len",
"(",
"ret",
")",
"==",
"1",
":",
"return",
"ret",
"[",
"0",
"]",
"else",
":",
"return",
"ret",
"if",
"momentum",
"is",
"None",
":",
"momentum",
"=",
"call_fn",
"(",
"momentum_sample_fn",
",",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"tf",
".",
"zeros_like",
",",
"state",
")",
")",
"if",
"target_log_prob",
"is",
"None",
":",
"target_log_prob",
",",
"state_extra",
",",
"state_grads",
"=",
"call_and_grads",
"(",
"target_log_prob_fn",
",",
"state",
")",
"kinetic_energy",
",",
"_",
"=",
"call_fn",
"(",
"kinetic_energy_fn",
",",
"momentum",
")",
"current_energy",
"=",
"-",
"target_log_prob",
"+",
"kinetic_energy",
"current_state",
"=",
"HamiltonianMonteCarloState",
"(",
"state",
"=",
"state",
",",
"state_grads",
"=",
"state_grads",
",",
"state_extra",
"=",
"state_extra",
",",
"target_log_prob",
"=",
"target_log_prob",
")",
"def",
"leapfrog_wrapper",
"(",
"leapfrog_state",
",",
"target_log_prob",
",",
"state_extra",
")",
":",
"\"\"\"Leapfrog wrapper that tracks extra state.\"\"\"",
"del",
"target_log_prob",
"del",
"state_extra",
"leapfrog_state",
",",
"leapfrog_extra",
"=",
"leapfrog_step",
"(",
"leapfrog_state",
",",
"step_size",
"=",
"step_size",
",",
"target_log_prob_fn",
"=",
"target_log_prob_fn",
",",
"kinetic_energy_fn",
"=",
"kinetic_energy_fn",
")",
"return",
"[",
"leapfrog_state",
",",
"leapfrog_extra",
".",
"target_log_prob",
",",
"leapfrog_extra",
".",
"state_extra",
"]",
",",
"leapfrog_extra",
"def",
"leapfrog_trace_wrapper_fn",
"(",
"args",
",",
"leapfrog_extra",
")",
":",
"return",
"leapfrog_trace_fn",
"(",
"args",
"[",
"0",
"]",
",",
"leapfrog_extra",
")",
"leapfrog_wrapper_state",
"=",
"(",
"LeapFrogStepState",
"(",
"state",
",",
"state_grads",
",",
"momentum",
")",
",",
"target_log_prob",
",",
"state_extra",
")",
"[",
"[",
"leapfrog_state",
",",
"target_log_prob",
",",
"state_extra",
"]",
",",
"_",
"]",
",",
"leapfrog_trace",
"=",
"trace",
"(",
"leapfrog_wrapper_state",
",",
"leapfrog_wrapper",
",",
"num_leapfrog_steps",
",",
"trace_fn",
"=",
"leapfrog_trace_wrapper_fn",
")",
"kinetic_energy",
",",
"_",
"=",
"call_fn",
"(",
"kinetic_energy_fn",
",",
"leapfrog_state",
".",
"momentum",
")",
"proposed_energy",
"=",
"-",
"target_log_prob",
"+",
"kinetic_energy",
"proposed_state",
"=",
"HamiltonianMonteCarloState",
"(",
"state",
"=",
"leapfrog_state",
".",
"state",
",",
"state_grads",
"=",
"leapfrog_state",
".",
"state_grads",
",",
"target_log_prob",
"=",
"target_log_prob",
",",
"state_extra",
"=",
"state_extra",
")",
"energy_change",
"=",
"proposed_energy",
"-",
"current_energy",
"hmc_state",
",",
"is_accepted",
",",
"_",
"=",
"metropolis_hastings_step",
"(",
"current_state",
",",
"proposed_state",
",",
"energy_change",
",",
"seed",
"=",
"seed",
")",
"hmc_state",
"=",
"hmc_state",
"# type: HamiltonianMonteCarloState",
"return",
"hmc_state",
",",
"HamiltonianMonteCarloExtra",
"(",
"is_accepted",
"=",
"is_accepted",
",",
"proposed_hmc_state",
"=",
"proposed_state",
",",
"log_accept_ratio",
"=",
"-",
"energy_change",
",",
"leapfrog_trace",
"=",
"leapfrog_trace",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
sign_adaptation
|
A function to do simple sign-based control of a variable.
```
control = control * (1. + adaptation_rate) ** sign(output - set_point)
```
Args:
control: The control variable.
output: The output variable.
set_point: The set point for `output`. This function will adjust `control`
so that `output` matches `set_point`.
adaptation_rate: Adaptation rate.
Returns:
control: New control.
|
experimental/fun_mcmc/fun_mcmc_lib.py
|
def sign_adaptation(control: FloatNest,
output: FloatTensor,
set_point: FloatTensor,
adaptation_rate: FloatTensor = 0.01) -> FloatNest:
"""A function to do simple sign-based control of a variable.
```
control = control * (1. + adaptation_rate) ** sign(output - set_point)
```
Args:
control: The control variable.
output: The output variable.
set_point: The set point for `output`. This function will adjust `control`
so that `output` matches `set_point`.
adaptation_rate: Adaptation rate.
Returns:
control: New control.
"""
def _get_new_control(control, output, set_point):
new_control = mcmc_util.choose(output > set_point,
control * (1. + adaptation_rate),
control / (1. + adaptation_rate))
return new_control
output = maybe_broadcast_structure(output, control)
set_point = maybe_broadcast_structure(set_point, control)
return tf.nest.map_structure(_get_new_control, control, output, set_point)
|
def sign_adaptation(control: FloatNest,
output: FloatTensor,
set_point: FloatTensor,
adaptation_rate: FloatTensor = 0.01) -> FloatNest:
"""A function to do simple sign-based control of a variable.
```
control = control * (1. + adaptation_rate) ** sign(output - set_point)
```
Args:
control: The control variable.
output: The output variable.
set_point: The set point for `output`. This function will adjust `control`
so that `output` matches `set_point`.
adaptation_rate: Adaptation rate.
Returns:
control: New control.
"""
def _get_new_control(control, output, set_point):
new_control = mcmc_util.choose(output > set_point,
control * (1. + adaptation_rate),
control / (1. + adaptation_rate))
return new_control
output = maybe_broadcast_structure(output, control)
set_point = maybe_broadcast_structure(set_point, control)
return tf.nest.map_structure(_get_new_control, control, output, set_point)
|
[
"A",
"function",
"to",
"do",
"simple",
"sign",
"-",
"based",
"control",
"of",
"a",
"variable",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L520-L550
|
[
"def",
"sign_adaptation",
"(",
"control",
":",
"FloatNest",
",",
"output",
":",
"FloatTensor",
",",
"set_point",
":",
"FloatTensor",
",",
"adaptation_rate",
":",
"FloatTensor",
"=",
"0.01",
")",
"->",
"FloatNest",
":",
"def",
"_get_new_control",
"(",
"control",
",",
"output",
",",
"set_point",
")",
":",
"new_control",
"=",
"mcmc_util",
".",
"choose",
"(",
"output",
">",
"set_point",
",",
"control",
"*",
"(",
"1.",
"+",
"adaptation_rate",
")",
",",
"control",
"/",
"(",
"1.",
"+",
"adaptation_rate",
")",
")",
"return",
"new_control",
"output",
"=",
"maybe_broadcast_structure",
"(",
"output",
",",
"control",
")",
"set_point",
"=",
"maybe_broadcast_structure",
"(",
"set_point",
",",
"control",
")",
"return",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"_get_new_control",
",",
"control",
",",
"output",
",",
"set_point",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_ConvVariational.compute_output_shape
|
Computes the output shape of the layer.
Args:
input_shape: Shape tuple (tuple of integers) or list of shape tuples
(one per output tensor of the layer). Shape tuples can include None for
free dimensions, instead of an integer.
Returns:
output_shape: A tuple representing the output shape.
|
tensorflow_probability/python/layers/conv_variational.py
|
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
Args:
input_shape: Shape tuple (tuple of integers) or list of shape tuples
(one per output tensor of the layer). Shape tuples can include None for
free dimensions, instead of an integer.
Returns:
output_shape: A tuple representing the output shape.
"""
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = tf_layers_util.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tf.TensorShape([input_shape[0]] + new_space + [self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = tf_layers_util.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tf.TensorShape([input_shape[0], self.filters] + new_space)
|
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
Args:
input_shape: Shape tuple (tuple of integers) or list of shape tuples
(one per output tensor of the layer). Shape tuples can include None for
free dimensions, instead of an integer.
Returns:
output_shape: A tuple representing the output shape.
"""
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = tf_layers_util.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tf.TensorShape([input_shape[0]] + new_space + [self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = tf_layers_util.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tf.TensorShape([input_shape[0], self.filters] + new_space)
|
[
"Computes",
"the",
"output",
"shape",
"of",
"the",
"layer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/conv_variational.py#L249-L284
|
[
"def",
"compute_output_shape",
"(",
"self",
",",
"input_shape",
")",
":",
"input_shape",
"=",
"tf",
".",
"TensorShape",
"(",
"input_shape",
")",
".",
"as_list",
"(",
")",
"if",
"self",
".",
"data_format",
"==",
"'channels_last'",
":",
"space",
"=",
"input_shape",
"[",
"1",
":",
"-",
"1",
"]",
"new_space",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"space",
")",
")",
":",
"new_dim",
"=",
"tf_layers_util",
".",
"conv_output_length",
"(",
"space",
"[",
"i",
"]",
",",
"self",
".",
"kernel_size",
"[",
"i",
"]",
",",
"padding",
"=",
"self",
".",
"padding",
",",
"stride",
"=",
"self",
".",
"strides",
"[",
"i",
"]",
",",
"dilation",
"=",
"self",
".",
"dilation_rate",
"[",
"i",
"]",
")",
"new_space",
".",
"append",
"(",
"new_dim",
")",
"return",
"tf",
".",
"TensorShape",
"(",
"[",
"input_shape",
"[",
"0",
"]",
"]",
"+",
"new_space",
"+",
"[",
"self",
".",
"filters",
"]",
")",
"else",
":",
"space",
"=",
"input_shape",
"[",
"2",
":",
"]",
"new_space",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"space",
")",
")",
":",
"new_dim",
"=",
"tf_layers_util",
".",
"conv_output_length",
"(",
"space",
"[",
"i",
"]",
",",
"self",
".",
"kernel_size",
"[",
"i",
"]",
",",
"padding",
"=",
"self",
".",
"padding",
",",
"stride",
"=",
"self",
".",
"strides",
"[",
"i",
"]",
",",
"dilation",
"=",
"self",
".",
"dilation_rate",
"[",
"i",
"]",
")",
"new_space",
".",
"append",
"(",
"new_dim",
")",
"return",
"tf",
".",
"TensorShape",
"(",
"[",
"input_shape",
"[",
"0",
"]",
",",
"self",
".",
"filters",
"]",
"+",
"new_space",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_ConvVariational.get_config
|
Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
|
tensorflow_probability/python/layers/conv_variational.py
|
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
"""
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': (tf.keras.activations.serialize(self.activation)
if self.activation else None),
'activity_regularizer':
tf.keras.initializers.serialize(self.activity_regularizer),
}
function_keys = [
'kernel_posterior_fn',
'kernel_posterior_tensor_fn',
'kernel_prior_fn',
'kernel_divergence_fn',
'bias_posterior_fn',
'bias_posterior_tensor_fn',
'bias_prior_fn',
'bias_divergence_fn',
]
for function_key in function_keys:
function = getattr(self, function_key)
if function is None:
function_name = None
function_type = None
else:
function_name, function_type = tfp_layers_util.serialize_function(
function)
config[function_key] = function_name
config[function_key + '_type'] = function_type
base_config = super(_ConvVariational, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
"""
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': (tf.keras.activations.serialize(self.activation)
if self.activation else None),
'activity_regularizer':
tf.keras.initializers.serialize(self.activity_regularizer),
}
function_keys = [
'kernel_posterior_fn',
'kernel_posterior_tensor_fn',
'kernel_prior_fn',
'kernel_divergence_fn',
'bias_posterior_fn',
'bias_posterior_tensor_fn',
'bias_prior_fn',
'bias_divergence_fn',
]
for function_key in function_keys:
function = getattr(self, function_key)
if function is None:
function_name = None
function_type = None
else:
function_name, function_type = tfp_layers_util.serialize_function(
function)
config[function_key] = function_name
config[function_key + '_type'] = function_type
base_config = super(_ConvVariational, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"Returns",
"the",
"config",
"of",
"the",
"layer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/conv_variational.py#L286-L330
|
[
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"{",
"'filters'",
":",
"self",
".",
"filters",
",",
"'kernel_size'",
":",
"self",
".",
"kernel_size",
",",
"'strides'",
":",
"self",
".",
"strides",
",",
"'padding'",
":",
"self",
".",
"padding",
",",
"'data_format'",
":",
"self",
".",
"data_format",
",",
"'dilation_rate'",
":",
"self",
".",
"dilation_rate",
",",
"'activation'",
":",
"(",
"tf",
".",
"keras",
".",
"activations",
".",
"serialize",
"(",
"self",
".",
"activation",
")",
"if",
"self",
".",
"activation",
"else",
"None",
")",
",",
"'activity_regularizer'",
":",
"tf",
".",
"keras",
".",
"initializers",
".",
"serialize",
"(",
"self",
".",
"activity_regularizer",
")",
",",
"}",
"function_keys",
"=",
"[",
"'kernel_posterior_fn'",
",",
"'kernel_posterior_tensor_fn'",
",",
"'kernel_prior_fn'",
",",
"'kernel_divergence_fn'",
",",
"'bias_posterior_fn'",
",",
"'bias_posterior_tensor_fn'",
",",
"'bias_prior_fn'",
",",
"'bias_divergence_fn'",
",",
"]",
"for",
"function_key",
"in",
"function_keys",
":",
"function",
"=",
"getattr",
"(",
"self",
",",
"function_key",
")",
"if",
"function",
"is",
"None",
":",
"function_name",
"=",
"None",
"function_type",
"=",
"None",
"else",
":",
"function_name",
",",
"function_type",
"=",
"tfp_layers_util",
".",
"serialize_function",
"(",
"function",
")",
"config",
"[",
"function_key",
"]",
"=",
"function_name",
"config",
"[",
"function_key",
"+",
"'_type'",
"]",
"=",
"function_type",
"base_config",
"=",
"super",
"(",
"_ConvVariational",
",",
"self",
")",
".",
"get_config",
"(",
")",
"return",
"dict",
"(",
"list",
"(",
"base_config",
".",
"items",
"(",
")",
")",
"+",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_ConvVariational.from_config
|
Creates a layer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same layer from the config dictionary.
Args:
config: A Python dictionary, typically the output of `get_config`.
Returns:
layer: A layer instance.
|
tensorflow_probability/python/layers/conv_variational.py
|
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same layer from the config dictionary.
Args:
config: A Python dictionary, typically the output of `get_config`.
Returns:
layer: A layer instance.
"""
config = config.copy()
function_keys = [
'kernel_posterior_fn',
'kernel_posterior_tensor_fn',
'kernel_prior_fn',
'kernel_divergence_fn',
'bias_posterior_fn',
'bias_posterior_tensor_fn',
'bias_prior_fn',
'bias_divergence_fn',
]
for function_key in function_keys:
serial = config[function_key]
function_type = config.pop(function_key + '_type')
if serial is not None:
config[function_key] = tfp_layers_util.deserialize_function(
serial,
function_type=function_type)
return cls(**config)
|
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same layer from the config dictionary.
Args:
config: A Python dictionary, typically the output of `get_config`.
Returns:
layer: A layer instance.
"""
config = config.copy()
function_keys = [
'kernel_posterior_fn',
'kernel_posterior_tensor_fn',
'kernel_prior_fn',
'kernel_divergence_fn',
'bias_posterior_fn',
'bias_posterior_tensor_fn',
'bias_prior_fn',
'bias_divergence_fn',
]
for function_key in function_keys:
serial = config[function_key]
function_type = config.pop(function_key + '_type')
if serial is not None:
config[function_key] = tfp_layers_util.deserialize_function(
serial,
function_type=function_type)
return cls(**config)
|
[
"Creates",
"a",
"layer",
"from",
"its",
"config",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/conv_variational.py#L333-L363
|
[
"def",
"from_config",
"(",
"cls",
",",
"config",
")",
":",
"config",
"=",
"config",
".",
"copy",
"(",
")",
"function_keys",
"=",
"[",
"'kernel_posterior_fn'",
",",
"'kernel_posterior_tensor_fn'",
",",
"'kernel_prior_fn'",
",",
"'kernel_divergence_fn'",
",",
"'bias_posterior_fn'",
",",
"'bias_posterior_tensor_fn'",
",",
"'bias_prior_fn'",
",",
"'bias_divergence_fn'",
",",
"]",
"for",
"function_key",
"in",
"function_keys",
":",
"serial",
"=",
"config",
"[",
"function_key",
"]",
"function_type",
"=",
"config",
".",
"pop",
"(",
"function_key",
"+",
"'_type'",
")",
"if",
"serial",
"is",
"not",
"None",
":",
"config",
"[",
"function_key",
"]",
"=",
"tfp_layers_util",
".",
"deserialize_function",
"(",
"serial",
",",
"function_type",
"=",
"function_type",
")",
"return",
"cls",
"(",
"*",
"*",
"config",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_ConvFlipout.get_config
|
Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
|
tensorflow_probability/python/layers/conv_variational.py
|
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
"""
config = {
'seed': self.seed,
}
base_config = super(_ConvFlipout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
"""
config = {
'seed': self.seed,
}
base_config = super(_ConvFlipout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"Returns",
"the",
"config",
"of",
"the",
"layer",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/conv_variational.py#L1111-L1126
|
[
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"{",
"'seed'",
":",
"self",
".",
"seed",
",",
"}",
"base_config",
"=",
"super",
"(",
"_ConvFlipout",
",",
"self",
")",
".",
"get_config",
"(",
")",
"return",
"dict",
"(",
"list",
"(",
"base_config",
".",
"items",
"(",
")",
")",
"+",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_as_tensor
|
Convenience to convert to `Tensor` or leave as `None`.
|
tensorflow_probability/python/bijectors/affine.py
|
def _as_tensor(x, name, dtype):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else tf.convert_to_tensor(
value=x, name=name, dtype=dtype)
|
def _as_tensor(x, name, dtype):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else tf.convert_to_tensor(
value=x, name=name, dtype=dtype)
|
[
"Convenience",
"to",
"convert",
"to",
"Tensor",
"or",
"leave",
"as",
"None",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/affine.py#L34-L37
|
[
"def",
"_as_tensor",
"(",
"x",
",",
"name",
",",
"dtype",
")",
":",
"return",
"None",
"if",
"x",
"is",
"None",
"else",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"name",
",",
"dtype",
"=",
"dtype",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Affine._create_scale_operator
|
Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Floating-point `Tensor` representing the diagonal matrix.`diag` has
shape `[N1, N2, ... k]`, which represents a k x k diagonal matrix.
tril: Floating-point `Tensor` representing the lower triangular matrix.
`tril` has shape `[N1, N2, ... k, k]`, which represents a k x k lower
triangular matrix.
perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
the low rank update.
perturb_factor: Floating-point `Tensor` representing factor matrix.
shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
dtype: `DType` for arg `Tensor` conversions.
Returns:
scale. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is a `LinearOperator`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
|
tensorflow_probability/python/bijectors/affine.py
|
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, shift, validate_args,
dtype):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Floating-point `Tensor` representing the diagonal matrix.`diag` has
shape `[N1, N2, ... k]`, which represents a k x k diagonal matrix.
tril: Floating-point `Tensor` representing the lower triangular matrix.
`tril` has shape `[N1, N2, ... k, k]`, which represents a k x k lower
triangular matrix.
perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
the low rank update.
perturb_factor: Floating-point `Tensor` representing factor matrix.
shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
dtype: `DType` for arg `Tensor` conversions.
Returns:
scale. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is a `LinearOperator`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier",
dtype)
diag = _as_tensor(diag, "diag", dtype)
tril = _as_tensor(tril, "tril", dtype)
perturb_diag = _as_tensor(perturb_diag, "perturb_diag", dtype)
perturb_factor = _as_tensor(perturb_factor, "perturb_factor", dtype)
# If possible, use the low rank update to infer the shape of
# the identity matrix, when scale represents a scaled identity matrix
# with a low rank update.
shape_hint = None
if perturb_factor is not None:
shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2)
if self._is_only_identity_multiplier:
if validate_args:
return distribution_util.with_dependencies([
assert_util.assert_none_equal(
identity_multiplier, tf.zeros([], identity_multiplier.dtype),
["identity_multiplier should be non-zero."])
], identity_multiplier)
return identity_multiplier
scale = distribution_util.make_tril_scale(
loc=shift,
scale_tril=tril,
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=validate_args,
assert_positive=False,
shape_hint=shape_hint)
if perturb_factor is not None:
return tf.linalg.LinearOperatorLowRankUpdate(
scale,
u=perturb_factor,
diag_update=perturb_diag,
is_diag_update_positive=perturb_diag is None,
is_non_singular=True, # Implied by is_positive_definite=True.
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
return scale
|
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, shift, validate_args,
dtype):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Floating-point `Tensor` representing the diagonal matrix.`diag` has
shape `[N1, N2, ... k]`, which represents a k x k diagonal matrix.
tril: Floating-point `Tensor` representing the lower triangular matrix.
`tril` has shape `[N1, N2, ... k, k]`, which represents a k x k lower
triangular matrix.
perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
the low rank update.
perturb_factor: Floating-point `Tensor` representing factor matrix.
shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
dtype: `DType` for arg `Tensor` conversions.
Returns:
scale. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is a `LinearOperator`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier",
dtype)
diag = _as_tensor(diag, "diag", dtype)
tril = _as_tensor(tril, "tril", dtype)
perturb_diag = _as_tensor(perturb_diag, "perturb_diag", dtype)
perturb_factor = _as_tensor(perturb_factor, "perturb_factor", dtype)
# If possible, use the low rank update to infer the shape of
# the identity matrix, when scale represents a scaled identity matrix
# with a low rank update.
shape_hint = None
if perturb_factor is not None:
shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2)
if self._is_only_identity_multiplier:
if validate_args:
return distribution_util.with_dependencies([
assert_util.assert_none_equal(
identity_multiplier, tf.zeros([], identity_multiplier.dtype),
["identity_multiplier should be non-zero."])
], identity_multiplier)
return identity_multiplier
scale = distribution_util.make_tril_scale(
loc=shift,
scale_tril=tril,
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=validate_args,
assert_positive=False,
shape_hint=shape_hint)
if perturb_factor is not None:
return tf.linalg.LinearOperatorLowRankUpdate(
scale,
u=perturb_factor,
diag_update=perturb_diag,
is_diag_update_positive=perturb_diag is None,
is_non_singular=True, # Implied by is_positive_definite=True.
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
return scale
|
[
"Construct",
"scale",
"from",
"various",
"components",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/affine.py#L238-L309
|
[
"def",
"_create_scale_operator",
"(",
"self",
",",
"identity_multiplier",
",",
"diag",
",",
"tril",
",",
"perturb_diag",
",",
"perturb_factor",
",",
"shift",
",",
"validate_args",
",",
"dtype",
")",
":",
"identity_multiplier",
"=",
"_as_tensor",
"(",
"identity_multiplier",
",",
"\"identity_multiplier\"",
",",
"dtype",
")",
"diag",
"=",
"_as_tensor",
"(",
"diag",
",",
"\"diag\"",
",",
"dtype",
")",
"tril",
"=",
"_as_tensor",
"(",
"tril",
",",
"\"tril\"",
",",
"dtype",
")",
"perturb_diag",
"=",
"_as_tensor",
"(",
"perturb_diag",
",",
"\"perturb_diag\"",
",",
"dtype",
")",
"perturb_factor",
"=",
"_as_tensor",
"(",
"perturb_factor",
",",
"\"perturb_factor\"",
",",
"dtype",
")",
"# If possible, use the low rank update to infer the shape of",
"# the identity matrix, when scale represents a scaled identity matrix",
"# with a low rank update.",
"shape_hint",
"=",
"None",
"if",
"perturb_factor",
"is",
"not",
"None",
":",
"shape_hint",
"=",
"distribution_util",
".",
"dimension_size",
"(",
"perturb_factor",
",",
"axis",
"=",
"-",
"2",
")",
"if",
"self",
".",
"_is_only_identity_multiplier",
":",
"if",
"validate_args",
":",
"return",
"distribution_util",
".",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_none_equal",
"(",
"identity_multiplier",
",",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"identity_multiplier",
".",
"dtype",
")",
",",
"[",
"\"identity_multiplier should be non-zero.\"",
"]",
")",
"]",
",",
"identity_multiplier",
")",
"return",
"identity_multiplier",
"scale",
"=",
"distribution_util",
".",
"make_tril_scale",
"(",
"loc",
"=",
"shift",
",",
"scale_tril",
"=",
"tril",
",",
"scale_diag",
"=",
"diag",
",",
"scale_identity_multiplier",
"=",
"identity_multiplier",
",",
"validate_args",
"=",
"validate_args",
",",
"assert_positive",
"=",
"False",
",",
"shape_hint",
"=",
"shape_hint",
")",
"if",
"perturb_factor",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"linalg",
".",
"LinearOperatorLowRankUpdate",
"(",
"scale",
",",
"u",
"=",
"perturb_factor",
",",
"diag_update",
"=",
"perturb_diag",
",",
"is_diag_update_positive",
"=",
"perturb_diag",
"is",
"None",
",",
"is_non_singular",
"=",
"True",
",",
"# Implied by is_positive_definite=True.",
"is_self_adjoint",
"=",
"True",
",",
"is_positive_definite",
"=",
"True",
",",
"is_square",
"=",
"True",
")",
"return",
"scale"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
random_walk_normal_fn
|
Returns a callable that adds a random normal perturbation to the input.
This function returns a callable that accepts a Python `list` of `Tensor`s of
any shapes and `dtypes` representing the state parts of the `current_state`
and a random seed. The supplied argument `scale` must be a `Tensor` or Python
`list` of `Tensor`s representing the scale of the generated
proposal. `scale` must broadcast with the state parts of `current_state`.
The callable adds a sample from a zero-mean normal distribution with the
supplied scales to each state part and returns a same-type `list` of `Tensor`s
as the state parts of `current_state`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the scale of the normal proposal distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_normal_fn'.
Returns:
random_walk_normal_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed to be used to generate the proposal. The
callable returns the same-type `list` of `Tensor`s as the input and
represents the proposal for the RWM algorithm.
|
tensorflow_probability/python/mcmc/random_walk_metropolis.py
|
def random_walk_normal_fn(scale=1., name=None):
"""Returns a callable that adds a random normal perturbation to the input.
This function returns a callable that accepts a Python `list` of `Tensor`s of
any shapes and `dtypes` representing the state parts of the `current_state`
and a random seed. The supplied argument `scale` must be a `Tensor` or Python
`list` of `Tensor`s representing the scale of the generated
proposal. `scale` must broadcast with the state parts of `current_state`.
The callable adds a sample from a zero-mean normal distribution with the
supplied scales to each state part and returns a same-type `list` of `Tensor`s
as the state parts of `current_state`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the scale of the normal proposal distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_normal_fn'.
Returns:
random_walk_normal_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed to be used to generate the proposal. The
callable returns the same-type `list` of `Tensor`s as the input and
represents the proposal for the RWM algorithm.
"""
def _fn(state_parts, seed):
"""Adds a normal perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
"""
with tf.compat.v1.name_scope(
name, 'random_walk_normal_fn', values=[state_parts, scale, seed]):
scales = scale if mcmc_util.is_list_like(scale) else [scale]
if len(scales) == 1:
scales *= len(state_parts)
if len(state_parts) != len(scales):
raise ValueError('`scale` must broadcast with `state_parts`.')
seed_stream = distributions.SeedStream(seed, salt='RandomWalkNormalFn')
next_state_parts = [
tf.random.normal(
mean=state_part,
stddev=scale_part,
shape=tf.shape(input=state_part),
dtype=state_part.dtype.base_dtype,
seed=seed_stream())
for scale_part, state_part in zip(scales, state_parts)
]
return next_state_parts
return _fn
|
def random_walk_normal_fn(scale=1., name=None):
"""Returns a callable that adds a random normal perturbation to the input.
This function returns a callable that accepts a Python `list` of `Tensor`s of
any shapes and `dtypes` representing the state parts of the `current_state`
and a random seed. The supplied argument `scale` must be a `Tensor` or Python
`list` of `Tensor`s representing the scale of the generated
proposal. `scale` must broadcast with the state parts of `current_state`.
The callable adds a sample from a zero-mean normal distribution with the
supplied scales to each state part and returns a same-type `list` of `Tensor`s
as the state parts of `current_state`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the scale of the normal proposal distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_normal_fn'.
Returns:
random_walk_normal_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed to be used to generate the proposal. The
callable returns the same-type `list` of `Tensor`s as the input and
represents the proposal for the RWM algorithm.
"""
def _fn(state_parts, seed):
"""Adds a normal perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
"""
with tf.compat.v1.name_scope(
name, 'random_walk_normal_fn', values=[state_parts, scale, seed]):
scales = scale if mcmc_util.is_list_like(scale) else [scale]
if len(scales) == 1:
scales *= len(state_parts)
if len(state_parts) != len(scales):
raise ValueError('`scale` must broadcast with `state_parts`.')
seed_stream = distributions.SeedStream(seed, salt='RandomWalkNormalFn')
next_state_parts = [
tf.random.normal(
mean=state_part,
stddev=scale_part,
shape=tf.shape(input=state_part),
dtype=state_part.dtype.base_dtype,
seed=seed_stream())
for scale_part, state_part in zip(scales, state_parts)
]
return next_state_parts
return _fn
|
[
"Returns",
"a",
"callable",
"that",
"adds",
"a",
"random",
"normal",
"perturbation",
"to",
"the",
"input",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/random_walk_metropolis.py#L48-L109
|
[
"def",
"random_walk_normal_fn",
"(",
"scale",
"=",
"1.",
",",
"name",
"=",
"None",
")",
":",
"def",
"_fn",
"(",
"state_parts",
",",
"seed",
")",
":",
"\"\"\"Adds a normal perturbation to the input state.\n\n Args:\n state_parts: A list of `Tensor`s of any shape and real dtype representing\n the state parts of the `current_state` of the Markov chain.\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: `None`.\n\n Returns:\n perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same\n shape and type as the `state_parts`.\n\n Raises:\n ValueError: if `scale` does not broadcast with `state_parts`.\n \"\"\"",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'random_walk_normal_fn'",
",",
"values",
"=",
"[",
"state_parts",
",",
"scale",
",",
"seed",
"]",
")",
":",
"scales",
"=",
"scale",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"scale",
")",
"else",
"[",
"scale",
"]",
"if",
"len",
"(",
"scales",
")",
"==",
"1",
":",
"scales",
"*=",
"len",
"(",
"state_parts",
")",
"if",
"len",
"(",
"state_parts",
")",
"!=",
"len",
"(",
"scales",
")",
":",
"raise",
"ValueError",
"(",
"'`scale` must broadcast with `state_parts`.'",
")",
"seed_stream",
"=",
"distributions",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"'RandomWalkNormalFn'",
")",
"next_state_parts",
"=",
"[",
"tf",
".",
"random",
".",
"normal",
"(",
"mean",
"=",
"state_part",
",",
"stddev",
"=",
"scale_part",
",",
"shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"state_part",
")",
",",
"dtype",
"=",
"state_part",
".",
"dtype",
".",
"base_dtype",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"for",
"scale_part",
",",
"state_part",
"in",
"zip",
"(",
"scales",
",",
"state_parts",
")",
"]",
"return",
"next_state_parts",
"return",
"_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
random_walk_uniform_fn
|
Returns a callable that adds a random uniform perturbation to the input.
For more details on `random_walk_uniform_fn`, see
`random_walk_normal_fn`. `scale` might
be a `Tensor` or a list of `Tensor`s that should broadcast with state parts
of the `current_state`. The generated uniform perturbation is sampled as a
uniform point on the rectangle `[-scale, scale]`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the upper and lower bound of the uniform proposal
distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_uniform_fn'.
Returns:
random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed used to generate the proposal. The callable
returns the same-type `list` of `Tensor`s as the input and represents the
proposal for the RWM algorithm.
|
tensorflow_probability/python/mcmc/random_walk_metropolis.py
|
def random_walk_uniform_fn(scale=1., name=None):
"""Returns a callable that adds a random uniform perturbation to the input.
For more details on `random_walk_uniform_fn`, see
`random_walk_normal_fn`. `scale` might
be a `Tensor` or a list of `Tensor`s that should broadcast with state parts
of the `current_state`. The generated uniform perturbation is sampled as a
uniform point on the rectangle `[-scale, scale]`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the upper and lower bound of the uniform proposal
distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_uniform_fn'.
Returns:
random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed used to generate the proposal. The callable
returns the same-type `list` of `Tensor`s as the input and represents the
proposal for the RWM algorithm.
"""
def _fn(state_parts, seed):
"""Adds a uniform perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
"""
with tf.compat.v1.name_scope(
name, 'random_walk_uniform_fn', values=[state_parts, scale, seed]):
scales = scale if mcmc_util.is_list_like(scale) else [scale]
if len(scales) == 1:
scales *= len(state_parts)
if len(state_parts) != len(scales):
raise ValueError('`scale` must broadcast with `state_parts`.')
seed_stream = distributions.SeedStream(seed, salt='RandomWalkUniformFn')
next_state_parts = [
tf.random.uniform(
minval=state_part - scale_part,
maxval=state_part + scale_part,
shape=tf.shape(input=state_part),
dtype=state_part.dtype.base_dtype,
seed=seed_stream())
for scale_part, state_part in zip(scales, state_parts)
]
return next_state_parts
return _fn
|
def random_walk_uniform_fn(scale=1., name=None):
"""Returns a callable that adds a random uniform perturbation to the input.
For more details on `random_walk_uniform_fn`, see
`random_walk_normal_fn`. `scale` might
be a `Tensor` or a list of `Tensor`s that should broadcast with state parts
of the `current_state`. The generated uniform perturbation is sampled as a
uniform point on the rectangle `[-scale, scale]`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the upper and lower bound of the uniform proposal
distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_uniform_fn'.
Returns:
random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed used to generate the proposal. The callable
returns the same-type `list` of `Tensor`s as the input and represents the
proposal for the RWM algorithm.
"""
def _fn(state_parts, seed):
"""Adds a uniform perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
"""
with tf.compat.v1.name_scope(
name, 'random_walk_uniform_fn', values=[state_parts, scale, seed]):
scales = scale if mcmc_util.is_list_like(scale) else [scale]
if len(scales) == 1:
scales *= len(state_parts)
if len(state_parts) != len(scales):
raise ValueError('`scale` must broadcast with `state_parts`.')
seed_stream = distributions.SeedStream(seed, salt='RandomWalkUniformFn')
next_state_parts = [
tf.random.uniform(
minval=state_part - scale_part,
maxval=state_part + scale_part,
shape=tf.shape(input=state_part),
dtype=state_part.dtype.base_dtype,
seed=seed_stream())
for scale_part, state_part in zip(scales, state_parts)
]
return next_state_parts
return _fn
|
[
"Returns",
"a",
"callable",
"that",
"adds",
"a",
"random",
"uniform",
"perturbation",
"to",
"the",
"input",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/random_walk_metropolis.py#L112-L170
|
[
"def",
"random_walk_uniform_fn",
"(",
"scale",
"=",
"1.",
",",
"name",
"=",
"None",
")",
":",
"def",
"_fn",
"(",
"state_parts",
",",
"seed",
")",
":",
"\"\"\"Adds a uniform perturbation to the input state.\n\n Args:\n state_parts: A list of `Tensor`s of any shape and real dtype representing\n the state parts of the `current_state` of the Markov chain.\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: `None`.\n\n Returns:\n perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same\n shape and type as the `state_parts`.\n\n Raises:\n ValueError: if `scale` does not broadcast with `state_parts`.\n \"\"\"",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'random_walk_uniform_fn'",
",",
"values",
"=",
"[",
"state_parts",
",",
"scale",
",",
"seed",
"]",
")",
":",
"scales",
"=",
"scale",
"if",
"mcmc_util",
".",
"is_list_like",
"(",
"scale",
")",
"else",
"[",
"scale",
"]",
"if",
"len",
"(",
"scales",
")",
"==",
"1",
":",
"scales",
"*=",
"len",
"(",
"state_parts",
")",
"if",
"len",
"(",
"state_parts",
")",
"!=",
"len",
"(",
"scales",
")",
":",
"raise",
"ValueError",
"(",
"'`scale` must broadcast with `state_parts`.'",
")",
"seed_stream",
"=",
"distributions",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"'RandomWalkUniformFn'",
")",
"next_state_parts",
"=",
"[",
"tf",
".",
"random",
".",
"uniform",
"(",
"minval",
"=",
"state_part",
"-",
"scale_part",
",",
"maxval",
"=",
"state_part",
"+",
"scale_part",
",",
"shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"state_part",
")",
",",
"dtype",
"=",
"state_part",
".",
"dtype",
".",
"base_dtype",
",",
"seed",
"=",
"seed_stream",
"(",
")",
")",
"for",
"scale_part",
",",
"state_part",
"in",
"zip",
"(",
"scales",
",",
"state_parts",
")",
"]",
"return",
"next_state_parts",
"return",
"_fn"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_kl_independent
|
Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
|
tensorflow_probability/python/distributions/independent.py
|
def _kl_independent(a, b, name="kl_independent"):
"""Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
"""
p = a.distribution
q = b.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if (tensorshape_util.is_fully_defined(a.event_shape) and
tensorshape_util.is_fully_defined(b.event_shape)):
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = (tensorshape_util.rank(a.event_shape) -
tensorshape_util.rank(p.event_shape))
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return tf.reduce_sum(
input_tensor=kullback_leibler.kl_divergence(p, q, name=name),
axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with tf.control_dependencies(
[
assert_util.assert_equal(a.event_shape_tensor(),
b.event_shape_tensor()),
assert_util.assert_equal(p.event_shape_tensor(),
q.event_shape_tensor())
]):
num_reduce_dims = (
prefer_static.rank_from_shape(
a.event_shape_tensor, a.event_shape) -
prefer_static.rank_from_shape(
p.event_shape_tensor, a.event_shape))
reduce_dims = prefer_static.range(-num_reduce_dims - 1, -1, 1)
return tf.reduce_sum(
input_tensor=kullback_leibler.kl_divergence(p, q, name=name),
axis=reduce_dims)
|
def _kl_independent(a, b, name="kl_independent"):
"""Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
"""
p = a.distribution
q = b.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if (tensorshape_util.is_fully_defined(a.event_shape) and
tensorshape_util.is_fully_defined(b.event_shape)):
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = (tensorshape_util.rank(a.event_shape) -
tensorshape_util.rank(p.event_shape))
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return tf.reduce_sum(
input_tensor=kullback_leibler.kl_divergence(p, q, name=name),
axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with tf.control_dependencies(
[
assert_util.assert_equal(a.event_shape_tensor(),
b.event_shape_tensor()),
assert_util.assert_equal(p.event_shape_tensor(),
q.event_shape_tensor())
]):
num_reduce_dims = (
prefer_static.rank_from_shape(
a.event_shape_tensor, a.event_shape) -
prefer_static.rank_from_shape(
p.event_shape_tensor, a.event_shape))
reduce_dims = prefer_static.range(-num_reduce_dims - 1, -1, 1)
return tf.reduce_sum(
input_tensor=kullback_leibler.kl_divergence(p, q, name=name),
axis=reduce_dims)
|
[
"Batched",
"KL",
"divergence",
"KL",
"(",
"a",
"||",
"b",
")",
"for",
"Independent",
"distributions",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/independent.py#L279-L339
|
[
"def",
"_kl_independent",
"(",
"a",
",",
"b",
",",
"name",
"=",
"\"kl_independent\"",
")",
":",
"p",
"=",
"a",
".",
"distribution",
"q",
"=",
"b",
".",
"distribution",
"# The KL between any two (non)-batched distributions is a scalar.",
"# Given that the KL between two factored distributions is the sum, i.e.",
"# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute",
"# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.",
"if",
"(",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"a",
".",
"event_shape",
")",
"and",
"tensorshape_util",
".",
"is_fully_defined",
"(",
"b",
".",
"event_shape",
")",
")",
":",
"if",
"a",
".",
"event_shape",
"==",
"b",
".",
"event_shape",
":",
"if",
"p",
".",
"event_shape",
"==",
"q",
".",
"event_shape",
":",
"num_reduce_dims",
"=",
"(",
"tensorshape_util",
".",
"rank",
"(",
"a",
".",
"event_shape",
")",
"-",
"tensorshape_util",
".",
"rank",
"(",
"p",
".",
"event_shape",
")",
")",
"reduce_dims",
"=",
"[",
"-",
"i",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"num_reduce_dims",
")",
"]",
"return",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"kullback_leibler",
".",
"kl_divergence",
"(",
"p",
",",
"q",
",",
"name",
"=",
"name",
")",
",",
"axis",
"=",
"reduce_dims",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"KL between Independents with different \"",
"\"event shapes not supported.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Event shapes do not match.\"",
")",
"else",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_util",
".",
"assert_equal",
"(",
"a",
".",
"event_shape_tensor",
"(",
")",
",",
"b",
".",
"event_shape_tensor",
"(",
")",
")",
",",
"assert_util",
".",
"assert_equal",
"(",
"p",
".",
"event_shape_tensor",
"(",
")",
",",
"q",
".",
"event_shape_tensor",
"(",
")",
")",
"]",
")",
":",
"num_reduce_dims",
"=",
"(",
"prefer_static",
".",
"rank_from_shape",
"(",
"a",
".",
"event_shape_tensor",
",",
"a",
".",
"event_shape",
")",
"-",
"prefer_static",
".",
"rank_from_shape",
"(",
"p",
".",
"event_shape_tensor",
",",
"a",
".",
"event_shape",
")",
")",
"reduce_dims",
"=",
"prefer_static",
".",
"range",
"(",
"-",
"num_reduce_dims",
"-",
"1",
",",
"-",
"1",
",",
"1",
")",
"return",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"kullback_leibler",
".",
"kl_divergence",
"(",
"p",
",",
"q",
",",
"name",
"=",
"name",
")",
",",
"axis",
"=",
"reduce_dims",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Independent._get_default_reinterpreted_batch_ndims
|
Computes the default value for reinterpreted_batch_ndim __init__ arg.
|
tensorflow_probability/python/distributions/independent.py
|
def _get_default_reinterpreted_batch_ndims(self, distribution):
"""Computes the default value for reinterpreted_batch_ndim __init__ arg."""
ndims = prefer_static.rank_from_shape(
distribution.batch_shape_tensor, distribution.batch_shape)
return prefer_static.maximum(0, ndims - 1)
|
def _get_default_reinterpreted_batch_ndims(self, distribution):
"""Computes the default value for reinterpreted_batch_ndim __init__ arg."""
ndims = prefer_static.rank_from_shape(
distribution.batch_shape_tensor, distribution.batch_shape)
return prefer_static.maximum(0, ndims - 1)
|
[
"Computes",
"the",
"default",
"value",
"for",
"reinterpreted_batch_ndim",
"__init__",
"arg",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/independent.py#L271-L275
|
[
"def",
"_get_default_reinterpreted_batch_ndims",
"(",
"self",
",",
"distribution",
")",
":",
"ndims",
"=",
"prefer_static",
".",
"rank_from_shape",
"(",
"distribution",
".",
"batch_shape_tensor",
",",
"distribution",
".",
"batch_shape",
")",
"return",
"prefer_static",
".",
"maximum",
"(",
"0",
",",
"ndims",
"-",
"1",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Mixture._expand_to_event_rank
|
Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
|
tensorflow_probability/python/distributions/mixture.py
|
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(tensorshape_util.rank(self.event_shape)):
expanded_x = tf.expand_dims(expanded_x, -1)
return expanded_x
|
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(tensorshape_util.rank(self.event_shape)):
expanded_x = tf.expand_dims(expanded_x, -1)
return expanded_x
|
[
"Expand",
"the",
"rank",
"of",
"x",
"up",
"to",
"static_event_rank",
"times",
"for",
"broadcasting",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture.py#L235-L248
|
[
"def",
"_expand_to_event_rank",
"(",
"self",
",",
"x",
")",
":",
"expanded_x",
"=",
"x",
"for",
"_",
"in",
"range",
"(",
"tensorshape_util",
".",
"rank",
"(",
"self",
".",
"event_shape",
")",
")",
":",
"expanded_x",
"=",
"tf",
".",
"expand_dims",
"(",
"expanded_x",
",",
"-",
"1",
")",
"return",
"expanded_x"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Mixture.entropy_lower_bound
|
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
|
tensorflow_probability/python/distributions/mixture.py
|
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name):
with tf.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return tf.add_n(partial_entropies)
|
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name):
with tf.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return tf.add_n(partial_entropies)
|
[
"r",
"A",
"lower",
"bound",
"on",
"the",
"entropy",
"of",
"this",
"mixture",
"model",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture.py#L448-L495
|
[
"def",
"entropy_lower_bound",
"(",
"self",
",",
"name",
"=",
"\"entropy_lower_bound\"",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"name",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"self",
".",
"_assertions",
")",
":",
"distribution_entropies",
"=",
"[",
"d",
".",
"entropy",
"(",
")",
"for",
"d",
"in",
"self",
".",
"components",
"]",
"cat_probs",
"=",
"self",
".",
"_cat_probs",
"(",
"log_probs",
"=",
"False",
")",
"partial_entropies",
"=",
"[",
"c_p",
"*",
"m",
"for",
"(",
"c_p",
",",
"m",
")",
"in",
"zip",
"(",
"cat_probs",
",",
"distribution_entropies",
")",
"]",
"# These are all the same shape by virtue of matching batch_shape",
"return",
"tf",
".",
"add_n",
"(",
"partial_entropies",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Mixture._cat_probs
|
Get a list of num_components batchwise probabilities.
|
tensorflow_probability/python/distributions/mixture.py
|
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = tf.nn.log_softmax if log_probs else tf.nn.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = tf.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
|
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = tf.nn.log_softmax if log_probs else tf.nn.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = tf.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
|
[
"Get",
"a",
"list",
"of",
"num_components",
"batchwise",
"probabilities",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture.py#L497-L502
|
[
"def",
"_cat_probs",
"(",
"self",
",",
"log_probs",
")",
":",
"which_softmax",
"=",
"tf",
".",
"nn",
".",
"log_softmax",
"if",
"log_probs",
"else",
"tf",
".",
"nn",
".",
"softmax",
"cat_probs",
"=",
"which_softmax",
"(",
"self",
".",
"cat",
".",
"logits",
")",
"cat_probs",
"=",
"tf",
".",
"unstack",
"(",
"cat_probs",
",",
"num",
"=",
"self",
".",
"num_components",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"cat_probs"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_maybe_validate_args
|
Validate `outcomes`, `logits` and `probs`'s shapes.
|
tensorflow_probability/python/distributions/finite_discrete.py
|
def _maybe_validate_args(outcomes, logits, probs, validate_args):
"""Validate `outcomes`, `logits` and `probs`'s shapes."""
assertions = []
def validate_equal_last_dim(tensor_a, tensor_b, message):
if tensor_a.shape.is_fully_defined() and tensor_b.shape.is_fully_defined():
if tensor_a.shape[-1] != tensor_b.shape[-1]:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_equal(
tf.shape(input=tensor_a)[-1],
tf.shape(input=tensor_b)[-1],
message=message))
if logits is not None:
validate_equal_last_dim(
outcomes,
logits,
message='Last dimension of outcomes and logits must be equal size.')
if probs is not None:
validate_equal_last_dim(
outcomes,
probs,
message='Last dimension of outcomes and probs must be equal size.')
message = 'Rank of outcomes must be 1.'
if outcomes.shape.ndims is not None:
if outcomes.shape.ndims != 1:
raise ValueError(message)
elif validate_args:
assertions.append(tf.compat.v1.assert_rank(outcomes, 1, message=message))
message = 'Size of outcomes must be greater than 0.'
if outcomes.shape.num_elements() is not None:
if outcomes.shape.num_elements() == 0:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_greater(
tf.size(input=outcomes), 0, message=message))
if validate_args:
assertions.append(
tf.compat.v1.assert_equal(
tf.math.is_strictly_increasing(outcomes),
True,
message='outcomes is not strictly increasing.'))
return assertions
|
def _maybe_validate_args(outcomes, logits, probs, validate_args):
"""Validate `outcomes`, `logits` and `probs`'s shapes."""
assertions = []
def validate_equal_last_dim(tensor_a, tensor_b, message):
if tensor_a.shape.is_fully_defined() and tensor_b.shape.is_fully_defined():
if tensor_a.shape[-1] != tensor_b.shape[-1]:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_equal(
tf.shape(input=tensor_a)[-1],
tf.shape(input=tensor_b)[-1],
message=message))
if logits is not None:
validate_equal_last_dim(
outcomes,
logits,
message='Last dimension of outcomes and logits must be equal size.')
if probs is not None:
validate_equal_last_dim(
outcomes,
probs,
message='Last dimension of outcomes and probs must be equal size.')
message = 'Rank of outcomes must be 1.'
if outcomes.shape.ndims is not None:
if outcomes.shape.ndims != 1:
raise ValueError(message)
elif validate_args:
assertions.append(tf.compat.v1.assert_rank(outcomes, 1, message=message))
message = 'Size of outcomes must be greater than 0.'
if outcomes.shape.num_elements() is not None:
if outcomes.shape.num_elements() == 0:
raise ValueError(message)
elif validate_args:
assertions.append(
tf.compat.v1.assert_greater(
tf.size(input=outcomes), 0, message=message))
if validate_args:
assertions.append(
tf.compat.v1.assert_equal(
tf.math.is_strictly_increasing(outcomes),
True,
message='outcomes is not strictly increasing.'))
return assertions
|
[
"Validate",
"outcomes",
"logits",
"and",
"probs",
"s",
"shapes",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/finite_discrete.py#L248-L297
|
[
"def",
"_maybe_validate_args",
"(",
"outcomes",
",",
"logits",
",",
"probs",
",",
"validate_args",
")",
":",
"assertions",
"=",
"[",
"]",
"def",
"validate_equal_last_dim",
"(",
"tensor_a",
",",
"tensor_b",
",",
"message",
")",
":",
"if",
"tensor_a",
".",
"shape",
".",
"is_fully_defined",
"(",
")",
"and",
"tensor_b",
".",
"shape",
".",
"is_fully_defined",
"(",
")",
":",
"if",
"tensor_a",
".",
"shape",
"[",
"-",
"1",
"]",
"!=",
"tensor_b",
".",
"shape",
"[",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"message",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_equal",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"tensor_a",
")",
"[",
"-",
"1",
"]",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"tensor_b",
")",
"[",
"-",
"1",
"]",
",",
"message",
"=",
"message",
")",
")",
"if",
"logits",
"is",
"not",
"None",
":",
"validate_equal_last_dim",
"(",
"outcomes",
",",
"logits",
",",
"message",
"=",
"'Last dimension of outcomes and logits must be equal size.'",
")",
"if",
"probs",
"is",
"not",
"None",
":",
"validate_equal_last_dim",
"(",
"outcomes",
",",
"probs",
",",
"message",
"=",
"'Last dimension of outcomes and probs must be equal size.'",
")",
"message",
"=",
"'Rank of outcomes must be 1.'",
"if",
"outcomes",
".",
"shape",
".",
"ndims",
"is",
"not",
"None",
":",
"if",
"outcomes",
".",
"shape",
".",
"ndims",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"message",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_rank",
"(",
"outcomes",
",",
"1",
",",
"message",
"=",
"message",
")",
")",
"message",
"=",
"'Size of outcomes must be greater than 0.'",
"if",
"outcomes",
".",
"shape",
".",
"num_elements",
"(",
")",
"is",
"not",
"None",
":",
"if",
"outcomes",
".",
"shape",
".",
"num_elements",
"(",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"message",
")",
"elif",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_greater",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"outcomes",
")",
",",
"0",
",",
"message",
"=",
"message",
")",
")",
"if",
"validate_args",
":",
"assertions",
".",
"append",
"(",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_equal",
"(",
"tf",
".",
"math",
".",
"is_strictly_increasing",
"(",
"outcomes",
")",
",",
"True",
",",
"message",
"=",
"'outcomes is not strictly increasing.'",
")",
")",
"return",
"assertions"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_ensure_tf_install
|
Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
|
tensorflow_probability/__init__.py
|
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not "
"installed by default when you install TensorFlow Probability. This "
"is so that users can decide whether to install the GPU-enabled "
"TensorFlow package. To use TensorFlow Probability, please install "
"the most recent version of TensorFlow, by following instructions at "
"https://tensorflow.org/install.\n\n")
raise
import distutils.version
#
# Update this whenever we need to depend on a newer TensorFlow release.
#
required_tensorflow_version = "1.13"
if (distutils.version.LooseVersion(tf.__version__) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
"This version of TensorFlow Probability requires TensorFlow "
"version >= {required}; Detected an installation of version {present}. "
"Please upgrade TensorFlow to proceed.".format(
required=required_tensorflow_version,
present=tf.__version__))
|
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not "
"installed by default when you install TensorFlow Probability. This "
"is so that users can decide whether to install the GPU-enabled "
"TensorFlow package. To use TensorFlow Probability, please install "
"the most recent version of TensorFlow, by following instructions at "
"https://tensorflow.org/install.\n\n")
raise
import distutils.version
#
# Update this whenever we need to depend on a newer TensorFlow release.
#
required_tensorflow_version = "1.13"
if (distutils.version.LooseVersion(tf.__version__) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
"This version of TensorFlow Probability requires TensorFlow "
"version >= {required}; Detected an installation of version {present}. "
"Please upgrade TensorFlow to proceed.".format(
required=required_tensorflow_version,
present=tf.__version__))
|
[
"Attempt",
"to",
"import",
"tensorflow",
"and",
"ensure",
"its",
"version",
"is",
"sufficient",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/__init__.py#L32-L65
|
[
"def",
"_ensure_tf_install",
"(",
")",
":",
"# pylint: disable=g-statement-before-imports",
"try",
":",
"import",
"tensorflow",
"as",
"tf",
"except",
"ImportError",
":",
"# Print more informative error message, then reraise.",
"print",
"(",
"\"\\n\\nFailed to import TensorFlow. Please note that TensorFlow is not \"",
"\"installed by default when you install TensorFlow Probability. This \"",
"\"is so that users can decide whether to install the GPU-enabled \"",
"\"TensorFlow package. To use TensorFlow Probability, please install \"",
"\"the most recent version of TensorFlow, by following instructions at \"",
"\"https://tensorflow.org/install.\\n\\n\"",
")",
"raise",
"import",
"distutils",
".",
"version",
"#",
"# Update this whenever we need to depend on a newer TensorFlow release.",
"#",
"required_tensorflow_version",
"=",
"\"1.13\"",
"if",
"(",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"tf",
".",
"__version__",
")",
"<",
"distutils",
".",
"version",
".",
"LooseVersion",
"(",
"required_tensorflow_version",
")",
")",
":",
"raise",
"ImportError",
"(",
"\"This version of TensorFlow Probability requires TensorFlow \"",
"\"version >= {required}; Detected an installation of version {present}. \"",
"\"Please upgrade TensorFlow to proceed.\"",
".",
"format",
"(",
"required",
"=",
"required_tensorflow_version",
",",
"present",
"=",
"tf",
".",
"__version__",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
logistic_regression
|
Bayesian logistic regression, which returns labels given features.
|
experimental/no_u_turn_sampler/logistic_regression.py
|
def logistic_regression(features):
"""Bayesian logistic regression, which returns labels given features."""
coeffs = ed.MultivariateNormalDiag(
loc=tf.zeros(features.shape[1]), name="coeffs")
labels = ed.Bernoulli(
logits=tf.tensordot(features, coeffs, [[1], [0]]), name="labels")
return labels
|
def logistic_regression(features):
"""Bayesian logistic regression, which returns labels given features."""
coeffs = ed.MultivariateNormalDiag(
loc=tf.zeros(features.shape[1]), name="coeffs")
labels = ed.Bernoulli(
logits=tf.tensordot(features, coeffs, [[1], [0]]), name="labels")
return labels
|
[
"Bayesian",
"logistic",
"regression",
"which",
"returns",
"labels",
"given",
"features",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/logistic_regression.py#L58-L64
|
[
"def",
"logistic_regression",
"(",
"features",
")",
":",
"coeffs",
"=",
"ed",
".",
"MultivariateNormalDiag",
"(",
"loc",
"=",
"tf",
".",
"zeros",
"(",
"features",
".",
"shape",
"[",
"1",
"]",
")",
",",
"name",
"=",
"\"coeffs\"",
")",
"labels",
"=",
"ed",
".",
"Bernoulli",
"(",
"logits",
"=",
"tf",
".",
"tensordot",
"(",
"features",
",",
"coeffs",
",",
"[",
"[",
"1",
"]",
",",
"[",
"0",
"]",
"]",
")",
",",
"name",
"=",
"\"labels\"",
")",
"return",
"labels"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
covertype
|
Builds the Covertype data set.
|
experimental/no_u_turn_sampler/logistic_regression.py
|
def covertype():
"""Builds the Covertype data set."""
import sklearn.datasets # pylint: disable=g-import-not-at-top
data = sklearn.datasets.covtype.fetch_covtype()
features = data.data
labels = data.target
# Normalize features and append a column of ones for the intercept.
features -= features.mean(0)
features /= features.std(0)
features = np.hstack([features, np.ones([features.shape[0], 1])])
features = tf.cast(features, dtype=tf.float32)
# Binarize outcomes on whether it is a specific category.
_, counts = np.unique(labels, return_counts=True)
specific_category = np.argmax(counts)
labels = (labels == specific_category)
labels = tf.cast(labels, dtype=tf.int32)
return features, labels
|
def covertype():
"""Builds the Covertype data set."""
import sklearn.datasets # pylint: disable=g-import-not-at-top
data = sklearn.datasets.covtype.fetch_covtype()
features = data.data
labels = data.target
# Normalize features and append a column of ones for the intercept.
features -= features.mean(0)
features /= features.std(0)
features = np.hstack([features, np.ones([features.shape[0], 1])])
features = tf.cast(features, dtype=tf.float32)
# Binarize outcomes on whether it is a specific category.
_, counts = np.unique(labels, return_counts=True)
specific_category = np.argmax(counts)
labels = (labels == specific_category)
labels = tf.cast(labels, dtype=tf.int32)
return features, labels
|
[
"Builds",
"the",
"Covertype",
"data",
"set",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/logistic_regression.py#L67-L85
|
[
"def",
"covertype",
"(",
")",
":",
"import",
"sklearn",
".",
"datasets",
"# pylint: disable=g-import-not-at-top",
"data",
"=",
"sklearn",
".",
"datasets",
".",
"covtype",
".",
"fetch_covtype",
"(",
")",
"features",
"=",
"data",
".",
"data",
"labels",
"=",
"data",
".",
"target",
"# Normalize features and append a column of ones for the intercept.",
"features",
"-=",
"features",
".",
"mean",
"(",
"0",
")",
"features",
"/=",
"features",
".",
"std",
"(",
"0",
")",
"features",
"=",
"np",
".",
"hstack",
"(",
"[",
"features",
",",
"np",
".",
"ones",
"(",
"[",
"features",
".",
"shape",
"[",
"0",
"]",
",",
"1",
"]",
")",
"]",
")",
"features",
"=",
"tf",
".",
"cast",
"(",
"features",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Binarize outcomes on whether it is a specific category.",
"_",
",",
"counts",
"=",
"np",
".",
"unique",
"(",
"labels",
",",
"return_counts",
"=",
"True",
")",
"specific_category",
"=",
"np",
".",
"argmax",
"(",
"counts",
")",
"labels",
"=",
"(",
"labels",
"==",
"specific_category",
")",
"labels",
"=",
"tf",
".",
"cast",
"(",
"labels",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"return",
"features",
",",
"labels"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_kl_dirichlet_dirichlet
|
Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: (optional) Name to use for created operations.
default is "kl_dirichlet_dirichlet".
Returns:
Batchwise KL(d1 || d2)
|
tensorflow_probability/python/distributions/dirichlet.py
|
def _kl_dirichlet_dirichlet(d1, d2, name=None):
"""Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: (optional) Name to use for created operations.
default is "kl_dirichlet_dirichlet".
Returns:
Batchwise KL(d1 || d2)
"""
with tf.name_scope(name or "kl_dirichlet_dirichlet"):
# The KL between Dirichlet distributions can be derived as follows. We have
#
# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]
#
# where B(a) is the multivariate Beta function:
#
# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])
#
# The KL is
#
# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}
#
# so we'll need to know the log density of the Dirichlet. This is
#
# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)
#
# The only term that matters for the expectations is the log(x[i]). To
# compute the expectation of this term over the Dirichlet density, we can
# use the following facts about the Dirichlet in exponential family form:
# 1. log(x[i]) is a sufficient statistic
# 2. expected sufficient statistics (of any exp family distribution) are
# equal to derivatives of the log normalizer with respect to
# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])
#
# To proceed, we can rewrite the Dirichlet density in exponential family
# form as follows:
#
# Dir(x; a) = exp{eta(a) . T(x) - A(a)}
#
# where '.' is the dot product of vectors eta and T, and A is a scalar:
#
# eta[i](a) = a[i] - 1
# T[i](x) = log(x[i])
# A(a) = log B(a)
#
# Now, we can use fact (2) above to write
#
# E_Dir(x; a)[log(x[i])]
# = dA(a) / da[i]
# = d/da[i] log B(a)
# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])
# = digamma(a[i])) - digamma(sum_j a[j])
#
# Putting it all together, we have
#
# KL[Dir(x; a) || Dir(x; b)]
# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}
# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))
# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)
# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]
# - lbeta(a) + lbeta(b))
digamma_sum_d1 = tf.math.digamma(
tf.reduce_sum(input_tensor=d1.concentration, axis=-1, keepdims=True))
digamma_diff = tf.math.digamma(d1.concentration) - digamma_sum_d1
concentration_diff = d1.concentration - d2.concentration
return (
tf.reduce_sum(input_tensor=concentration_diff * digamma_diff, axis=-1) -
tf.math.lbeta(d1.concentration) + tf.math.lbeta(d2.concentration))
|
def _kl_dirichlet_dirichlet(d1, d2, name=None):
"""Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: (optional) Name to use for created operations.
default is "kl_dirichlet_dirichlet".
Returns:
Batchwise KL(d1 || d2)
"""
with tf.name_scope(name or "kl_dirichlet_dirichlet"):
# The KL between Dirichlet distributions can be derived as follows. We have
#
# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]
#
# where B(a) is the multivariate Beta function:
#
# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])
#
# The KL is
#
# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}
#
# so we'll need to know the log density of the Dirichlet. This is
#
# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)
#
# The only term that matters for the expectations is the log(x[i]). To
# compute the expectation of this term over the Dirichlet density, we can
# use the following facts about the Dirichlet in exponential family form:
# 1. log(x[i]) is a sufficient statistic
# 2. expected sufficient statistics (of any exp family distribution) are
# equal to derivatives of the log normalizer with respect to
# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])
#
# To proceed, we can rewrite the Dirichlet density in exponential family
# form as follows:
#
# Dir(x; a) = exp{eta(a) . T(x) - A(a)}
#
# where '.' is the dot product of vectors eta and T, and A is a scalar:
#
# eta[i](a) = a[i] - 1
# T[i](x) = log(x[i])
# A(a) = log B(a)
#
# Now, we can use fact (2) above to write
#
# E_Dir(x; a)[log(x[i])]
# = dA(a) / da[i]
# = d/da[i] log B(a)
# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])
# = digamma(a[i])) - digamma(sum_j a[j])
#
# Putting it all together, we have
#
# KL[Dir(x; a) || Dir(x; b)]
# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}
# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))
# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)
# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]
# - lbeta(a) + lbeta(b))
digamma_sum_d1 = tf.math.digamma(
tf.reduce_sum(input_tensor=d1.concentration, axis=-1, keepdims=True))
digamma_diff = tf.math.digamma(d1.concentration) - digamma_sum_d1
concentration_diff = d1.concentration - d2.concentration
return (
tf.reduce_sum(input_tensor=concentration_diff * digamma_diff, axis=-1) -
tf.math.lbeta(d1.concentration) + tf.math.lbeta(d2.concentration))
|
[
"Batchwise",
"KL",
"divergence",
"KL",
"(",
"d1",
"||",
"d2",
")",
"with",
"d1",
"and",
"d2",
"Dirichlet",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet.py#L331-L403
|
[
"def",
"_kl_dirichlet_dirichlet",
"(",
"d1",
",",
"d2",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"kl_dirichlet_dirichlet\"",
")",
":",
"# The KL between Dirichlet distributions can be derived as follows. We have",
"#",
"# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]",
"#",
"# where B(a) is the multivariate Beta function:",
"#",
"# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])",
"#",
"# The KL is",
"#",
"# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}",
"#",
"# so we'll need to know the log density of the Dirichlet. This is",
"#",
"# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)",
"#",
"# The only term that matters for the expectations is the log(x[i]). To",
"# compute the expectation of this term over the Dirichlet density, we can",
"# use the following facts about the Dirichlet in exponential family form:",
"# 1. log(x[i]) is a sufficient statistic",
"# 2. expected sufficient statistics (of any exp family distribution) are",
"# equal to derivatives of the log normalizer with respect to",
"# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])",
"#",
"# To proceed, we can rewrite the Dirichlet density in exponential family",
"# form as follows:",
"#",
"# Dir(x; a) = exp{eta(a) . T(x) - A(a)}",
"#",
"# where '.' is the dot product of vectors eta and T, and A is a scalar:",
"#",
"# eta[i](a) = a[i] - 1",
"# T[i](x) = log(x[i])",
"# A(a) = log B(a)",
"#",
"# Now, we can use fact (2) above to write",
"#",
"# E_Dir(x; a)[log(x[i])]",
"# = dA(a) / da[i]",
"# = d/da[i] log B(a)",
"# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])",
"# = digamma(a[i])) - digamma(sum_j a[j])",
"#",
"# Putting it all together, we have",
"#",
"# KL[Dir(x; a) || Dir(x; b)]",
"# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}",
"# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))",
"# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)",
"# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]",
"# - lbeta(a) + lbeta(b))",
"digamma_sum_d1",
"=",
"tf",
".",
"math",
".",
"digamma",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"d1",
".",
"concentration",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
")",
"digamma_diff",
"=",
"tf",
".",
"math",
".",
"digamma",
"(",
"d1",
".",
"concentration",
")",
"-",
"digamma_sum_d1",
"concentration_diff",
"=",
"d1",
".",
"concentration",
"-",
"d2",
".",
"concentration",
"return",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"concentration_diff",
"*",
"digamma_diff",
",",
"axis",
"=",
"-",
"1",
")",
"-",
"tf",
".",
"math",
".",
"lbeta",
"(",
"d1",
".",
"concentration",
")",
"+",
"tf",
".",
"math",
".",
"lbeta",
"(",
"d2",
".",
"concentration",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Dirichlet._maybe_assert_valid_concentration
|
Checks the validity of the concentration parameter.
|
tensorflow_probability/python/distributions/dirichlet.py
|
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return distribution_util.with_dependencies([
assert_util.assert_positive(
concentration, message="Concentration parameter must be positive."),
assert_util.assert_rank_at_least(
concentration,
1,
message="Concentration parameter must have >=1 dimensions."),
assert_util.assert_less(
1,
tf.shape(input=concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
|
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return distribution_util.with_dependencies([
assert_util.assert_positive(
concentration, message="Concentration parameter must be positive."),
assert_util.assert_rank_at_least(
concentration,
1,
message="Concentration parameter must have >=1 dimensions."),
assert_util.assert_less(
1,
tf.shape(input=concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
|
[
"Checks",
"the",
"validity",
"of",
"the",
"concentration",
"parameter",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet.py#L300-L315
|
[
"def",
"_maybe_assert_valid_concentration",
"(",
"self",
",",
"concentration",
",",
"validate_args",
")",
":",
"if",
"not",
"validate_args",
":",
"return",
"concentration",
"return",
"distribution_util",
".",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_positive",
"(",
"concentration",
",",
"message",
"=",
"\"Concentration parameter must be positive.\"",
")",
",",
"assert_util",
".",
"assert_rank_at_least",
"(",
"concentration",
",",
"1",
",",
"message",
"=",
"\"Concentration parameter must have >=1 dimensions.\"",
")",
",",
"assert_util",
".",
"assert_less",
"(",
"1",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"concentration",
")",
"[",
"-",
"1",
"]",
",",
"message",
"=",
"\"Concentration parameter must have event_size >= 2.\"",
")",
",",
"]",
",",
"concentration",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Dirichlet._maybe_assert_valid_sample
|
Checks the validity of a sample.
|
tensorflow_probability/python/distributions/dirichlet.py
|
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return distribution_util.with_dependencies([
assert_util.assert_positive(x, message="samples must be positive"),
assert_util.assert_near(
tf.ones([], dtype=self.dtype),
tf.reduce_sum(input_tensor=x, axis=-1),
message="sample last-dimension must sum to `1`"),
], x)
|
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return distribution_util.with_dependencies([
assert_util.assert_positive(x, message="samples must be positive"),
assert_util.assert_near(
tf.ones([], dtype=self.dtype),
tf.reduce_sum(input_tensor=x, axis=-1),
message="sample last-dimension must sum to `1`"),
], x)
|
[
"Checks",
"the",
"validity",
"of",
"a",
"sample",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet.py#L317-L327
|
[
"def",
"_maybe_assert_valid_sample",
"(",
"self",
",",
"x",
")",
":",
"if",
"not",
"self",
".",
"validate_args",
":",
"return",
"x",
"return",
"distribution_util",
".",
"with_dependencies",
"(",
"[",
"assert_util",
".",
"assert_positive",
"(",
"x",
",",
"message",
"=",
"\"samples must be positive\"",
")",
",",
"assert_util",
".",
"assert_near",
"(",
"tf",
".",
"ones",
"(",
"[",
"]",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
",",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"x",
",",
"axis",
"=",
"-",
"1",
")",
",",
"message",
"=",
"\"sample last-dimension must sum to `1`\"",
")",
",",
"]",
",",
"x",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
auto_correlation
|
Auto correlation along one axis.
Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
`RXX` may be defined as (with `E` expectation and `Conj` complex conjugate)
```
RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
W[n] := (X[n] - MU) / S,
MU := E{ X[0] },
S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }.
```
This function takes the viewpoint that `x` is (along one axis) a finite
sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
estimate of `RXX[m]` as follows:
After extending `x` from length `L` to `inf` by zero padding, the auto
correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as
```
rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
w[n] := (x[n] - mu) / s,
mu := L**-1 sum_n x[n],
s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
```
The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
often set `max_lags` small enough so that the entire output is meaningful.
Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
`len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
contains a slight bias, which goes to zero as `len(x) - m --> infinity`.
Args:
x: `float32` or `complex64` `Tensor`.
axis: Python `int`. The axis number along which to compute correlation.
Other dimensions index different batch members.
max_lags: Positive `int` tensor. The maximum value of `m` to consider (in
equation above). If `max_lags >= x.shape[axis]`, we effectively re-set
`max_lags` to `x.shape[axis] - 1`.
center: Python `bool`. If `False`, do not subtract the mean estimate `mu`
from `x[n]` when forming `w[n]`.
normalize: Python `bool`. If `False`, do not divide by the variance
estimate `s**2` when forming `w[n]`.
name: `String` name to prepend to created ops.
Returns:
`rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for
`i != axis`, and `rxx.shape[axis] = max_lags + 1`.
Raises:
TypeError: If `x` is not a supported type.
|
tensorflow_probability/python/stats/sample_stats.py
|
def auto_correlation(x,
axis=-1,
max_lags=None,
center=True,
normalize=True,
name='auto_correlation'):
"""Auto correlation along one axis.
Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
`RXX` may be defined as (with `E` expectation and `Conj` complex conjugate)
```
RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
W[n] := (X[n] - MU) / S,
MU := E{ X[0] },
S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }.
```
This function takes the viewpoint that `x` is (along one axis) a finite
sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
estimate of `RXX[m]` as follows:
After extending `x` from length `L` to `inf` by zero padding, the auto
correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as
```
rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
w[n] := (x[n] - mu) / s,
mu := L**-1 sum_n x[n],
s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
```
The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
often set `max_lags` small enough so that the entire output is meaningful.
Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
`len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
contains a slight bias, which goes to zero as `len(x) - m --> infinity`.
Args:
x: `float32` or `complex64` `Tensor`.
axis: Python `int`. The axis number along which to compute correlation.
Other dimensions index different batch members.
max_lags: Positive `int` tensor. The maximum value of `m` to consider (in
equation above). If `max_lags >= x.shape[axis]`, we effectively re-set
`max_lags` to `x.shape[axis] - 1`.
center: Python `bool`. If `False`, do not subtract the mean estimate `mu`
from `x[n]` when forming `w[n]`.
normalize: Python `bool`. If `False`, do not divide by the variance
estimate `s**2` when forming `w[n]`.
name: `String` name to prepend to created ops.
Returns:
`rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for
`i != axis`, and `rxx.shape[axis] = max_lags + 1`.
Raises:
TypeError: If `x` is not a supported type.
"""
# Implementation details:
# Extend length N / 2 1-D array x to length N by zero padding onto the end.
# Then, set
# F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
# It is not hard to see that
# F[x]_k Conj(F[x]_k) = F[R]_k, where
# R_m := sum_n x_n Conj(x_{(n - m) mod N}).
# One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].
# Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
# based version of estimating RXX.
# Note that this is a special case of the Wiener-Khinchin Theorem.
with tf.compat.v1.name_scope(name, values=[x]):
x = tf.convert_to_tensor(value=x, name='x')
# Rotate dimensions of x in order to put axis at the rightmost dim.
# FFT op requires this.
rank = util.prefer_static_rank(x)
if axis < 0:
axis = rank + axis
shift = rank - 1 - axis
# Suppose x.shape[axis] = T, so there are T 'time' steps.
# ==> x_rotated.shape = B + [T],
# where B is x_rotated's batch shape.
x_rotated = util.rotate_transpose(x, shift)
if center:
x_rotated -= tf.reduce_mean(
input_tensor=x_rotated, axis=-1, keepdims=True)
# x_len = N / 2 from above explanation. The length of x along axis.
# Get a value for x_len that works in all cases.
x_len = util.prefer_static_shape(x_rotated)[-1]
# TODO(langmore) Investigate whether this zero padding helps or hurts. At
# the moment is necessary so that all FFT implementations work.
# Zero pad to the next power of 2 greater than 2 * x_len, which equals
# 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).
x_len_float64 = tf.cast(x_len, np.float64)
target_length = tf.pow(
np.float64(2.), tf.math.ceil(
tf.math.log(x_len_float64 * 2) / np.log(2.)))
pad_length = tf.cast(target_length - x_len_float64, np.int32)
# We should have:
# x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
# = B + [T + pad_length]
x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)
dtype = x.dtype
if not dtype.is_complex:
if not dtype.is_floating:
raise TypeError('Argument x must have either float or complex dtype'
' found: {}'.format(dtype))
x_rotated_pad = tf.complex(x_rotated_pad,
dtype.real_dtype.as_numpy_dtype(0.))
# Autocorrelation is IFFT of power-spectral density (up to some scaling).
fft_x_rotated_pad = tf.signal.fft(x_rotated_pad)
spectral_density = fft_x_rotated_pad * tf.math.conj(fft_x_rotated_pad)
# shifted_product is R[m] from above detailed explanation.
# It is the inner product sum_n X[n] * Conj(X[n - m]).
shifted_product = tf.signal.ifft(spectral_density)
# Cast back to real-valued if x was real to begin with.
shifted_product = tf.cast(shifted_product, dtype)
# Figure out if we can deduce the final static shape, and set max_lags.
# Use x_rotated as a reference, because it has the time dimension in the far
# right, and was created before we performed all sorts of crazy shape
# manipulations.
know_static_shape = True
if not x_rotated.shape.is_fully_defined():
know_static_shape = False
if max_lags is None:
max_lags = x_len - 1
else:
max_lags = tf.convert_to_tensor(value=max_lags, name='max_lags')
max_lags_ = tf.get_static_value(max_lags)
if max_lags_ is None or not know_static_shape:
know_static_shape = False
max_lags = tf.minimum(x_len - 1, max_lags)
else:
max_lags = min(x_len - 1, max_lags_)
# Chop off the padding.
# We allow users to provide a huge max_lags, but cut it off here.
# shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
shifted_product_chopped = shifted_product[..., :max_lags + 1]
# If possible, set shape.
if know_static_shape:
chopped_shape = x_rotated.shape.as_list()
chopped_shape[-1] = min(x_len, max_lags + 1)
shifted_product_chopped.set_shape(chopped_shape)
# Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The
# other terms were zeros arising only due to zero padding.
# `denominator = (N / 2 - m)` (defined below) is the proper term to
# divide by to make this an unbiased estimate of the expectation
# E[X[n] Conj(X[n - m])].
x_len = tf.cast(x_len, dtype.real_dtype)
max_lags = tf.cast(max_lags, dtype.real_dtype)
denominator = x_len - tf.range(0., max_lags + 1.)
denominator = tf.cast(denominator, dtype)
shifted_product_rotated = shifted_product_chopped / denominator
if normalize:
shifted_product_rotated /= shifted_product_rotated[..., :1]
# Transpose dimensions back to those of x.
return util.rotate_transpose(shifted_product_rotated, -shift)
|
def auto_correlation(x,
axis=-1,
max_lags=None,
center=True,
normalize=True,
name='auto_correlation'):
"""Auto correlation along one axis.
Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
`RXX` may be defined as (with `E` expectation and `Conj` complex conjugate)
```
RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
W[n] := (X[n] - MU) / S,
MU := E{ X[0] },
S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }.
```
This function takes the viewpoint that `x` is (along one axis) a finite
sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
estimate of `RXX[m]` as follows:
After extending `x` from length `L` to `inf` by zero padding, the auto
correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as
```
rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
w[n] := (x[n] - mu) / s,
mu := L**-1 sum_n x[n],
s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
```
The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
often set `max_lags` small enough so that the entire output is meaningful.
Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
`len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
contains a slight bias, which goes to zero as `len(x) - m --> infinity`.
Args:
x: `float32` or `complex64` `Tensor`.
axis: Python `int`. The axis number along which to compute correlation.
Other dimensions index different batch members.
max_lags: Positive `int` tensor. The maximum value of `m` to consider (in
equation above). If `max_lags >= x.shape[axis]`, we effectively re-set
`max_lags` to `x.shape[axis] - 1`.
center: Python `bool`. If `False`, do not subtract the mean estimate `mu`
from `x[n]` when forming `w[n]`.
normalize: Python `bool`. If `False`, do not divide by the variance
estimate `s**2` when forming `w[n]`.
name: `String` name to prepend to created ops.
Returns:
`rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for
`i != axis`, and `rxx.shape[axis] = max_lags + 1`.
Raises:
TypeError: If `x` is not a supported type.
"""
# Implementation details:
# Extend length N / 2 1-D array x to length N by zero padding onto the end.
# Then, set
# F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
# It is not hard to see that
# F[x]_k Conj(F[x]_k) = F[R]_k, where
# R_m := sum_n x_n Conj(x_{(n - m) mod N}).
# One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].
# Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
# based version of estimating RXX.
# Note that this is a special case of the Wiener-Khinchin Theorem.
with tf.compat.v1.name_scope(name, values=[x]):
x = tf.convert_to_tensor(value=x, name='x')
# Rotate dimensions of x in order to put axis at the rightmost dim.
# FFT op requires this.
rank = util.prefer_static_rank(x)
if axis < 0:
axis = rank + axis
shift = rank - 1 - axis
# Suppose x.shape[axis] = T, so there are T 'time' steps.
# ==> x_rotated.shape = B + [T],
# where B is x_rotated's batch shape.
x_rotated = util.rotate_transpose(x, shift)
if center:
x_rotated -= tf.reduce_mean(
input_tensor=x_rotated, axis=-1, keepdims=True)
# x_len = N / 2 from above explanation. The length of x along axis.
# Get a value for x_len that works in all cases.
x_len = util.prefer_static_shape(x_rotated)[-1]
# TODO(langmore) Investigate whether this zero padding helps or hurts. At
# the moment is necessary so that all FFT implementations work.
# Zero pad to the next power of 2 greater than 2 * x_len, which equals
# 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).
x_len_float64 = tf.cast(x_len, np.float64)
target_length = tf.pow(
np.float64(2.), tf.math.ceil(
tf.math.log(x_len_float64 * 2) / np.log(2.)))
pad_length = tf.cast(target_length - x_len_float64, np.int32)
# We should have:
# x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
# = B + [T + pad_length]
x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)
dtype = x.dtype
if not dtype.is_complex:
if not dtype.is_floating:
raise TypeError('Argument x must have either float or complex dtype'
' found: {}'.format(dtype))
x_rotated_pad = tf.complex(x_rotated_pad,
dtype.real_dtype.as_numpy_dtype(0.))
# Autocorrelation is IFFT of power-spectral density (up to some scaling).
fft_x_rotated_pad = tf.signal.fft(x_rotated_pad)
spectral_density = fft_x_rotated_pad * tf.math.conj(fft_x_rotated_pad)
# shifted_product is R[m] from above detailed explanation.
# It is the inner product sum_n X[n] * Conj(X[n - m]).
shifted_product = tf.signal.ifft(spectral_density)
# Cast back to real-valued if x was real to begin with.
shifted_product = tf.cast(shifted_product, dtype)
# Figure out if we can deduce the final static shape, and set max_lags.
# Use x_rotated as a reference, because it has the time dimension in the far
# right, and was created before we performed all sorts of crazy shape
# manipulations.
know_static_shape = True
if not x_rotated.shape.is_fully_defined():
know_static_shape = False
if max_lags is None:
max_lags = x_len - 1
else:
max_lags = tf.convert_to_tensor(value=max_lags, name='max_lags')
max_lags_ = tf.get_static_value(max_lags)
if max_lags_ is None or not know_static_shape:
know_static_shape = False
max_lags = tf.minimum(x_len - 1, max_lags)
else:
max_lags = min(x_len - 1, max_lags_)
# Chop off the padding.
# We allow users to provide a huge max_lags, but cut it off here.
# shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
shifted_product_chopped = shifted_product[..., :max_lags + 1]
# If possible, set shape.
if know_static_shape:
chopped_shape = x_rotated.shape.as_list()
chopped_shape[-1] = min(x_len, max_lags + 1)
shifted_product_chopped.set_shape(chopped_shape)
# Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The
# other terms were zeros arising only due to zero padding.
# `denominator = (N / 2 - m)` (defined below) is the proper term to
# divide by to make this an unbiased estimate of the expectation
# E[X[n] Conj(X[n - m])].
x_len = tf.cast(x_len, dtype.real_dtype)
max_lags = tf.cast(max_lags, dtype.real_dtype)
denominator = x_len - tf.range(0., max_lags + 1.)
denominator = tf.cast(denominator, dtype)
shifted_product_rotated = shifted_product_chopped / denominator
if normalize:
shifted_product_rotated /= shifted_product_rotated[..., :1]
# Transpose dimensions back to those of x.
return util.rotate_transpose(shifted_product_rotated, -shift)
|
[
"Auto",
"correlation",
"along",
"one",
"axis",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L39-L209
|
[
"def",
"auto_correlation",
"(",
"x",
",",
"axis",
"=",
"-",
"1",
",",
"max_lags",
"=",
"None",
",",
"center",
"=",
"True",
",",
"normalize",
"=",
"True",
",",
"name",
"=",
"'auto_correlation'",
")",
":",
"# Implementation details:",
"# Extend length N / 2 1-D array x to length N by zero padding onto the end.",
"# Then, set",
"# F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.",
"# It is not hard to see that",
"# F[x]_k Conj(F[x]_k) = F[R]_k, where",
"# R_m := sum_n x_n Conj(x_{(n - m) mod N}).",
"# One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].",
"# Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT",
"# based version of estimating RXX.",
"# Note that this is a special case of the Wiener-Khinchin Theorem.",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"x",
"]",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
")",
"# Rotate dimensions of x in order to put axis at the rightmost dim.",
"# FFT op requires this.",
"rank",
"=",
"util",
".",
"prefer_static_rank",
"(",
"x",
")",
"if",
"axis",
"<",
"0",
":",
"axis",
"=",
"rank",
"+",
"axis",
"shift",
"=",
"rank",
"-",
"1",
"-",
"axis",
"# Suppose x.shape[axis] = T, so there are T 'time' steps.",
"# ==> x_rotated.shape = B + [T],",
"# where B is x_rotated's batch shape.",
"x_rotated",
"=",
"util",
".",
"rotate_transpose",
"(",
"x",
",",
"shift",
")",
"if",
"center",
":",
"x_rotated",
"-=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"x_rotated",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
"# x_len = N / 2 from above explanation. The length of x along axis.",
"# Get a value for x_len that works in all cases.",
"x_len",
"=",
"util",
".",
"prefer_static_shape",
"(",
"x_rotated",
")",
"[",
"-",
"1",
"]",
"# TODO(langmore) Investigate whether this zero padding helps or hurts. At",
"# the moment is necessary so that all FFT implementations work.",
"# Zero pad to the next power of 2 greater than 2 * x_len, which equals",
"# 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).",
"x_len_float64",
"=",
"tf",
".",
"cast",
"(",
"x_len",
",",
"np",
".",
"float64",
")",
"target_length",
"=",
"tf",
".",
"pow",
"(",
"np",
".",
"float64",
"(",
"2.",
")",
",",
"tf",
".",
"math",
".",
"ceil",
"(",
"tf",
".",
"math",
".",
"log",
"(",
"x_len_float64",
"*",
"2",
")",
"/",
"np",
".",
"log",
"(",
"2.",
")",
")",
")",
"pad_length",
"=",
"tf",
".",
"cast",
"(",
"target_length",
"-",
"x_len_float64",
",",
"np",
".",
"int32",
")",
"# We should have:",
"# x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]",
"# = B + [T + pad_length]",
"x_rotated_pad",
"=",
"util",
".",
"pad",
"(",
"x_rotated",
",",
"axis",
"=",
"-",
"1",
",",
"back",
"=",
"True",
",",
"count",
"=",
"pad_length",
")",
"dtype",
"=",
"x",
".",
"dtype",
"if",
"not",
"dtype",
".",
"is_complex",
":",
"if",
"not",
"dtype",
".",
"is_floating",
":",
"raise",
"TypeError",
"(",
"'Argument x must have either float or complex dtype'",
"' found: {}'",
".",
"format",
"(",
"dtype",
")",
")",
"x_rotated_pad",
"=",
"tf",
".",
"complex",
"(",
"x_rotated_pad",
",",
"dtype",
".",
"real_dtype",
".",
"as_numpy_dtype",
"(",
"0.",
")",
")",
"# Autocorrelation is IFFT of power-spectral density (up to some scaling).",
"fft_x_rotated_pad",
"=",
"tf",
".",
"signal",
".",
"fft",
"(",
"x_rotated_pad",
")",
"spectral_density",
"=",
"fft_x_rotated_pad",
"*",
"tf",
".",
"math",
".",
"conj",
"(",
"fft_x_rotated_pad",
")",
"# shifted_product is R[m] from above detailed explanation.",
"# It is the inner product sum_n X[n] * Conj(X[n - m]).",
"shifted_product",
"=",
"tf",
".",
"signal",
".",
"ifft",
"(",
"spectral_density",
")",
"# Cast back to real-valued if x was real to begin with.",
"shifted_product",
"=",
"tf",
".",
"cast",
"(",
"shifted_product",
",",
"dtype",
")",
"# Figure out if we can deduce the final static shape, and set max_lags.",
"# Use x_rotated as a reference, because it has the time dimension in the far",
"# right, and was created before we performed all sorts of crazy shape",
"# manipulations.",
"know_static_shape",
"=",
"True",
"if",
"not",
"x_rotated",
".",
"shape",
".",
"is_fully_defined",
"(",
")",
":",
"know_static_shape",
"=",
"False",
"if",
"max_lags",
"is",
"None",
":",
"max_lags",
"=",
"x_len",
"-",
"1",
"else",
":",
"max_lags",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"max_lags",
",",
"name",
"=",
"'max_lags'",
")",
"max_lags_",
"=",
"tf",
".",
"get_static_value",
"(",
"max_lags",
")",
"if",
"max_lags_",
"is",
"None",
"or",
"not",
"know_static_shape",
":",
"know_static_shape",
"=",
"False",
"max_lags",
"=",
"tf",
".",
"minimum",
"(",
"x_len",
"-",
"1",
",",
"max_lags",
")",
"else",
":",
"max_lags",
"=",
"min",
"(",
"x_len",
"-",
"1",
",",
"max_lags_",
")",
"# Chop off the padding.",
"# We allow users to provide a huge max_lags, but cut it off here.",
"# shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]",
"shifted_product_chopped",
"=",
"shifted_product",
"[",
"...",
",",
":",
"max_lags",
"+",
"1",
"]",
"# If possible, set shape.",
"if",
"know_static_shape",
":",
"chopped_shape",
"=",
"x_rotated",
".",
"shape",
".",
"as_list",
"(",
")",
"chopped_shape",
"[",
"-",
"1",
"]",
"=",
"min",
"(",
"x_len",
",",
"max_lags",
"+",
"1",
")",
"shifted_product_chopped",
".",
"set_shape",
"(",
"chopped_shape",
")",
"# Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The",
"# other terms were zeros arising only due to zero padding.",
"# `denominator = (N / 2 - m)` (defined below) is the proper term to",
"# divide by to make this an unbiased estimate of the expectation",
"# E[X[n] Conj(X[n - m])].",
"x_len",
"=",
"tf",
".",
"cast",
"(",
"x_len",
",",
"dtype",
".",
"real_dtype",
")",
"max_lags",
"=",
"tf",
".",
"cast",
"(",
"max_lags",
",",
"dtype",
".",
"real_dtype",
")",
"denominator",
"=",
"x_len",
"-",
"tf",
".",
"range",
"(",
"0.",
",",
"max_lags",
"+",
"1.",
")",
"denominator",
"=",
"tf",
".",
"cast",
"(",
"denominator",
",",
"dtype",
")",
"shifted_product_rotated",
"=",
"shifted_product_chopped",
"/",
"denominator",
"if",
"normalize",
":",
"shifted_product_rotated",
"/=",
"shifted_product_rotated",
"[",
"...",
",",
":",
"1",
"]",
"# Transpose dimensions back to those of x.",
"return",
"util",
".",
"rotate_transpose",
"(",
"shifted_product_rotated",
",",
"-",
"shift",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
cholesky_covariance
|
Cholesky factor of the covariance matrix of vector-variate random samples.
This function can be use to fit a multivariate normal to data.
```python
tf.enable_eager_execution()
import tensorflow_probability as tfp
tfd = tfp.distributions
# Assume data.shape = (1000, 2). 1000 samples of a random variable in R^2.
observed_data = read_data_samples(...)
# The mean is easy
mu = tf.reduce_mean(observed_data, axis=0)
# Get the scale matrix
L = tfp.stats.cholesky_covariance(observed_data)
# Make the best fit multivariate normal (under maximum likelihood condition).
mvn = tfd.MultivariateNormalTriL(loc=mu, scale_tril=L)
# Plot contours of the pdf.
xs, ys = tf.meshgrid(
tf.linspace(-5., 5., 50), tf.linspace(-5., 5., 50), indexing='ij')
xy = tf.stack((tf.reshape(xs, [-1]), tf.reshape(ys, [-1])), axis=-1)
pdf = tf.reshape(mvn.prob(xy), (50, 50))
CS = plt.contour(xs, ys, pdf, 10)
plt.clabel(CS, inline=1, fontsize=10)
```
Why does this work?
Given vector-variate random variables `X = (X1, ..., Xd)`, one may obtain the
sample covariance matrix in `R^{d x d}` (see `tfp.stats.covariance`).
The [Cholesky factor](https://en.wikipedia.org/wiki/Cholesky_decomposition)
of this matrix is analogous to standard deviation for scalar random variables:
Suppose `X` has covariance matrix `C`, with Cholesky factorization `C = L L^T`
Then multiplying a vector of iid random variables which have unit variance by
`L` produces a vector with covariance `L L^T`, which is the same as `X`.
```python
observed_data = read_data_samples(...)
L = tfp.stats.cholesky_covariance(observed_data, sample_axis=0)
# Make fake_data with the same covariance as observed_data.
uncorrelated_normal = tf.random_normal(shape=(500, 10))
fake_data = tf.linalg.matvec(L, uncorrelated_normal)
```
Args:
x: Numeric `Tensor`. The rightmost dimension of `x` indexes events. E.g.
dimensions of a random vector.
sample_axis: Scalar or vector `Tensor` designating axis holding samples.
Default value: `0` (leftmost dimension). Cannot be the rightmost dimension
(since this indexes events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'covariance'`).
Returns:
chol: `Tensor` of same `dtype` as `x`. The last two dimensions hold
lower triangular matrices (the Cholesky factors).
|
tensorflow_probability/python/stats/sample_stats.py
|
def cholesky_covariance(x, sample_axis=0, keepdims=False, name=None):
"""Cholesky factor of the covariance matrix of vector-variate random samples.
This function can be use to fit a multivariate normal to data.
```python
tf.enable_eager_execution()
import tensorflow_probability as tfp
tfd = tfp.distributions
# Assume data.shape = (1000, 2). 1000 samples of a random variable in R^2.
observed_data = read_data_samples(...)
# The mean is easy
mu = tf.reduce_mean(observed_data, axis=0)
# Get the scale matrix
L = tfp.stats.cholesky_covariance(observed_data)
# Make the best fit multivariate normal (under maximum likelihood condition).
mvn = tfd.MultivariateNormalTriL(loc=mu, scale_tril=L)
# Plot contours of the pdf.
xs, ys = tf.meshgrid(
tf.linspace(-5., 5., 50), tf.linspace(-5., 5., 50), indexing='ij')
xy = tf.stack((tf.reshape(xs, [-1]), tf.reshape(ys, [-1])), axis=-1)
pdf = tf.reshape(mvn.prob(xy), (50, 50))
CS = plt.contour(xs, ys, pdf, 10)
plt.clabel(CS, inline=1, fontsize=10)
```
Why does this work?
Given vector-variate random variables `X = (X1, ..., Xd)`, one may obtain the
sample covariance matrix in `R^{d x d}` (see `tfp.stats.covariance`).
The [Cholesky factor](https://en.wikipedia.org/wiki/Cholesky_decomposition)
of this matrix is analogous to standard deviation for scalar random variables:
Suppose `X` has covariance matrix `C`, with Cholesky factorization `C = L L^T`
Then multiplying a vector of iid random variables which have unit variance by
`L` produces a vector with covariance `L L^T`, which is the same as `X`.
```python
observed_data = read_data_samples(...)
L = tfp.stats.cholesky_covariance(observed_data, sample_axis=0)
# Make fake_data with the same covariance as observed_data.
uncorrelated_normal = tf.random_normal(shape=(500, 10))
fake_data = tf.linalg.matvec(L, uncorrelated_normal)
```
Args:
x: Numeric `Tensor`. The rightmost dimension of `x` indexes events. E.g.
dimensions of a random vector.
sample_axis: Scalar or vector `Tensor` designating axis holding samples.
Default value: `0` (leftmost dimension). Cannot be the rightmost dimension
(since this indexes events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'covariance'`).
Returns:
chol: `Tensor` of same `dtype` as `x`. The last two dimensions hold
lower triangular matrices (the Cholesky factors).
"""
with tf.compat.v1.name_scope(
name, 'cholesky_covariance', values=[x, sample_axis]):
sample_axis = tf.convert_to_tensor(value=sample_axis, dtype=tf.int32)
cov = covariance(
x, sample_axis=sample_axis, event_axis=-1, keepdims=keepdims)
return tf.linalg.cholesky(cov)
|
def cholesky_covariance(x, sample_axis=0, keepdims=False, name=None):
"""Cholesky factor of the covariance matrix of vector-variate random samples.
This function can be use to fit a multivariate normal to data.
```python
tf.enable_eager_execution()
import tensorflow_probability as tfp
tfd = tfp.distributions
# Assume data.shape = (1000, 2). 1000 samples of a random variable in R^2.
observed_data = read_data_samples(...)
# The mean is easy
mu = tf.reduce_mean(observed_data, axis=0)
# Get the scale matrix
L = tfp.stats.cholesky_covariance(observed_data)
# Make the best fit multivariate normal (under maximum likelihood condition).
mvn = tfd.MultivariateNormalTriL(loc=mu, scale_tril=L)
# Plot contours of the pdf.
xs, ys = tf.meshgrid(
tf.linspace(-5., 5., 50), tf.linspace(-5., 5., 50), indexing='ij')
xy = tf.stack((tf.reshape(xs, [-1]), tf.reshape(ys, [-1])), axis=-1)
pdf = tf.reshape(mvn.prob(xy), (50, 50))
CS = plt.contour(xs, ys, pdf, 10)
plt.clabel(CS, inline=1, fontsize=10)
```
Why does this work?
Given vector-variate random variables `X = (X1, ..., Xd)`, one may obtain the
sample covariance matrix in `R^{d x d}` (see `tfp.stats.covariance`).
The [Cholesky factor](https://en.wikipedia.org/wiki/Cholesky_decomposition)
of this matrix is analogous to standard deviation for scalar random variables:
Suppose `X` has covariance matrix `C`, with Cholesky factorization `C = L L^T`
Then multiplying a vector of iid random variables which have unit variance by
`L` produces a vector with covariance `L L^T`, which is the same as `X`.
```python
observed_data = read_data_samples(...)
L = tfp.stats.cholesky_covariance(observed_data, sample_axis=0)
# Make fake_data with the same covariance as observed_data.
uncorrelated_normal = tf.random_normal(shape=(500, 10))
fake_data = tf.linalg.matvec(L, uncorrelated_normal)
```
Args:
x: Numeric `Tensor`. The rightmost dimension of `x` indexes events. E.g.
dimensions of a random vector.
sample_axis: Scalar or vector `Tensor` designating axis holding samples.
Default value: `0` (leftmost dimension). Cannot be the rightmost dimension
(since this indexes events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'covariance'`).
Returns:
chol: `Tensor` of same `dtype` as `x`. The last two dimensions hold
lower triangular matrices (the Cholesky factors).
"""
with tf.compat.v1.name_scope(
name, 'cholesky_covariance', values=[x, sample_axis]):
sample_axis = tf.convert_to_tensor(value=sample_axis, dtype=tf.int32)
cov = covariance(
x, sample_axis=sample_axis, event_axis=-1, keepdims=keepdims)
return tf.linalg.cholesky(cov)
|
[
"Cholesky",
"factor",
"of",
"the",
"covariance",
"matrix",
"of",
"vector",
"-",
"variate",
"random",
"samples",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L212-L281
|
[
"def",
"cholesky_covariance",
"(",
"x",
",",
"sample_axis",
"=",
"0",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'cholesky_covariance'",
",",
"values",
"=",
"[",
"x",
",",
"sample_axis",
"]",
")",
":",
"sample_axis",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"sample_axis",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"cov",
"=",
"covariance",
"(",
"x",
",",
"sample_axis",
"=",
"sample_axis",
",",
"event_axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"keepdims",
")",
"return",
"tf",
".",
"linalg",
".",
"cholesky",
"(",
"cov",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
covariance
|
Sample covariance between observations indexed by `event_axis`.
Given `N` samples of scalar random variables `X` and `Y`, covariance may be
estimated as
```none
Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
Xbar := N^{-1} sum_{n=1}^N X_n
Ybar := N^{-1} sum_{n=1}^N Y_n
```
For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,
one is often interested in the covariance matrix, `C_{ij} := Cov[Xi, Yj]`.
```python
x = tf.random_normal(shape=(100, 2, 3))
y = tf.random_normal(shape=(100, 2, 3))
# cov[i, j] is the sample covariance between x[:, i, j] and y[:, i, j].
cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None)
# cov_matrix[i, m, n] is the sample covariance of x[:, i, m] and y[:, i, n]
cov_matrix = tfp.stats.covariance(x, y, sample_axis=0, event_axis=-1)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
y: Optional `Tensor` with same `dtype` and `shape` as `x`.
Default value: `None` (`y` is effectively set to `x`).
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
event_axis: Scalar or vector `Tensor`, or `None` (scalar events).
Axis indexing random events, whose covariance we are interested in.
If a vector, entries must form a contiguous block of dims. `sample_axis`
and `event_axis` should not intersect.
Default value: `-1` (rightmost axis holds events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'covariance'`).
Returns:
cov: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis) + 2 * len(event_axis)`.
Raises:
AssertionError: If `x` and `y` are found to have different shape.
ValueError: If `sample_axis` and `event_axis` are found to overlap.
ValueError: If `event_axis` is found to not be contiguous.
|
tensorflow_probability/python/stats/sample_stats.py
|
def covariance(x,
y=None,
sample_axis=0,
event_axis=-1,
keepdims=False,
name=None):
"""Sample covariance between observations indexed by `event_axis`.
Given `N` samples of scalar random variables `X` and `Y`, covariance may be
estimated as
```none
Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
Xbar := N^{-1} sum_{n=1}^N X_n
Ybar := N^{-1} sum_{n=1}^N Y_n
```
For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,
one is often interested in the covariance matrix, `C_{ij} := Cov[Xi, Yj]`.
```python
x = tf.random_normal(shape=(100, 2, 3))
y = tf.random_normal(shape=(100, 2, 3))
# cov[i, j] is the sample covariance between x[:, i, j] and y[:, i, j].
cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None)
# cov_matrix[i, m, n] is the sample covariance of x[:, i, m] and y[:, i, n]
cov_matrix = tfp.stats.covariance(x, y, sample_axis=0, event_axis=-1)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
y: Optional `Tensor` with same `dtype` and `shape` as `x`.
Default value: `None` (`y` is effectively set to `x`).
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
event_axis: Scalar or vector `Tensor`, or `None` (scalar events).
Axis indexing random events, whose covariance we are interested in.
If a vector, entries must form a contiguous block of dims. `sample_axis`
and `event_axis` should not intersect.
Default value: `-1` (rightmost axis holds events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'covariance'`).
Returns:
cov: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis) + 2 * len(event_axis)`.
Raises:
AssertionError: If `x` and `y` are found to have different shape.
ValueError: If `sample_axis` and `event_axis` are found to overlap.
ValueError: If `event_axis` is found to not be contiguous.
"""
with tf.compat.v1.name_scope(
name, 'covariance', values=[x, y, event_axis, sample_axis]):
x = tf.convert_to_tensor(value=x, name='x')
# Covariance *only* uses the centered versions of x (and y).
x -= tf.reduce_mean(input_tensor=x, axis=sample_axis, keepdims=True)
if y is None:
y = x
else:
y = tf.convert_to_tensor(value=y, name='y', dtype=x.dtype)
# If x and y have different shape, sample_axis and event_axis will likely
# be wrong for one of them!
x.shape.assert_is_compatible_with(y.shape)
y -= tf.reduce_mean(input_tensor=y, axis=sample_axis, keepdims=True)
if event_axis is None:
return tf.reduce_mean(
input_tensor=x * tf.math.conj(y), axis=sample_axis, keepdims=keepdims)
if sample_axis is None:
raise ValueError(
'sample_axis was None, which means all axis hold events, and this '
'overlaps with event_axis ({})'.format(event_axis))
event_axis = _make_positive_axis(event_axis, tf.rank(x))
sample_axis = _make_positive_axis(sample_axis, tf.rank(x))
# If we get lucky and axis is statically defined, we can do some checks.
if _is_list_like(event_axis) and _is_list_like(sample_axis):
if set(event_axis).intersection(sample_axis):
raise ValueError(
'sample_axis ({}) and event_axis ({}) overlapped'.format(
sample_axis, event_axis))
if (np.diff(sorted(event_axis)) > 1).any():
raise ValueError(
'event_axis must be contiguous. Found: {}'.format(event_axis))
batch_axis = list(
sorted(
set(range(x.shape.ndims)).difference(sample_axis + event_axis)))
else:
batch_axis, _ = tf.compat.v1.setdiff1d(
tf.range(0, tf.rank(x)), tf.concat((sample_axis, event_axis), 0))
event_axis = tf.convert_to_tensor(
value=event_axis, name='event_axis', dtype=tf.int32)
sample_axis = tf.convert_to_tensor(
value=sample_axis, name='sample_axis', dtype=tf.int32)
batch_axis = tf.convert_to_tensor(
value=batch_axis, name='batch_axis', dtype=tf.int32)
# Permute x/y until shape = B + E + S
perm_for_xy = tf.concat((batch_axis, event_axis, sample_axis), 0)
x_permed = tf.transpose(a=x, perm=perm_for_xy)
y_permed = tf.transpose(a=y, perm=perm_for_xy)
batch_ndims = tf.size(input=batch_axis)
batch_shape = tf.shape(input=x_permed)[:batch_ndims]
event_ndims = tf.size(input=event_axis)
event_shape = tf.shape(input=x_permed)[batch_ndims:batch_ndims +
event_ndims]
sample_shape = tf.shape(input=x_permed)[batch_ndims + event_ndims:]
sample_ndims = tf.size(input=sample_shape)
n_samples = tf.reduce_prod(input_tensor=sample_shape)
n_events = tf.reduce_prod(input_tensor=event_shape)
# Flatten sample_axis into one long dim.
x_permed_flat = tf.reshape(
x_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
y_permed_flat = tf.reshape(
y_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
# Do the same for event_axis.
x_permed_flat = tf.reshape(
x_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))
y_permed_flat = tf.reshape(
y_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))
# After matmul, cov.shape = batch_shape + [n_events, n_events]
cov = tf.matmul(
x_permed_flat, y_permed_flat, adjoint_b=True) / tf.cast(
n_samples, x.dtype)
# Insert some singletons to make
# cov.shape = batch_shape + event_shape**2 + [1,...,1]
# This is just like x_permed.shape, except the sample_axis is all 1's, and
# the [n_events] became event_shape**2.
cov = tf.reshape(
cov,
tf.concat(
(
batch_shape,
# event_shape**2 used here because it is the same length as
# event_shape, and has the same number of elements as one
# batch of covariance.
event_shape**2,
tf.ones([sample_ndims], tf.int32)),
0))
# Permuting by the argsort inverts the permutation, making
# cov.shape have ones in the position where there were samples, and
# [n_events * n_events] in the event position.
cov = tf.transpose(a=cov, perm=tf.math.invert_permutation(perm_for_xy))
# Now expand event_shape**2 into event_shape + event_shape.
# We here use (for the first time) the fact that we require event_axis to be
# contiguous.
e_start = event_axis[0]
e_len = 1 + event_axis[-1] - event_axis[0]
cov = tf.reshape(
cov,
tf.concat((tf.shape(input=cov)[:e_start], event_shape, event_shape,
tf.shape(input=cov)[e_start + e_len:]), 0))
# tf.squeeze requires python ints for axis, not Tensor. This is enough to
# require our axis args to be constants.
if not keepdims:
squeeze_axis = tf.where(sample_axis < e_start, sample_axis,
sample_axis + e_len)
cov = _squeeze(cov, axis=squeeze_axis)
return cov
|
def covariance(x,
y=None,
sample_axis=0,
event_axis=-1,
keepdims=False,
name=None):
"""Sample covariance between observations indexed by `event_axis`.
Given `N` samples of scalar random variables `X` and `Y`, covariance may be
estimated as
```none
Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
Xbar := N^{-1} sum_{n=1}^N X_n
Ybar := N^{-1} sum_{n=1}^N Y_n
```
For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,
one is often interested in the covariance matrix, `C_{ij} := Cov[Xi, Yj]`.
```python
x = tf.random_normal(shape=(100, 2, 3))
y = tf.random_normal(shape=(100, 2, 3))
# cov[i, j] is the sample covariance between x[:, i, j] and y[:, i, j].
cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None)
# cov_matrix[i, m, n] is the sample covariance of x[:, i, m] and y[:, i, n]
cov_matrix = tfp.stats.covariance(x, y, sample_axis=0, event_axis=-1)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
y: Optional `Tensor` with same `dtype` and `shape` as `x`.
Default value: `None` (`y` is effectively set to `x`).
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
event_axis: Scalar or vector `Tensor`, or `None` (scalar events).
Axis indexing random events, whose covariance we are interested in.
If a vector, entries must form a contiguous block of dims. `sample_axis`
and `event_axis` should not intersect.
Default value: `-1` (rightmost axis holds events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'covariance'`).
Returns:
cov: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis) + 2 * len(event_axis)`.
Raises:
AssertionError: If `x` and `y` are found to have different shape.
ValueError: If `sample_axis` and `event_axis` are found to overlap.
ValueError: If `event_axis` is found to not be contiguous.
"""
with tf.compat.v1.name_scope(
name, 'covariance', values=[x, y, event_axis, sample_axis]):
x = tf.convert_to_tensor(value=x, name='x')
# Covariance *only* uses the centered versions of x (and y).
x -= tf.reduce_mean(input_tensor=x, axis=sample_axis, keepdims=True)
if y is None:
y = x
else:
y = tf.convert_to_tensor(value=y, name='y', dtype=x.dtype)
# If x and y have different shape, sample_axis and event_axis will likely
# be wrong for one of them!
x.shape.assert_is_compatible_with(y.shape)
y -= tf.reduce_mean(input_tensor=y, axis=sample_axis, keepdims=True)
if event_axis is None:
return tf.reduce_mean(
input_tensor=x * tf.math.conj(y), axis=sample_axis, keepdims=keepdims)
if sample_axis is None:
raise ValueError(
'sample_axis was None, which means all axis hold events, and this '
'overlaps with event_axis ({})'.format(event_axis))
event_axis = _make_positive_axis(event_axis, tf.rank(x))
sample_axis = _make_positive_axis(sample_axis, tf.rank(x))
# If we get lucky and axis is statically defined, we can do some checks.
if _is_list_like(event_axis) and _is_list_like(sample_axis):
if set(event_axis).intersection(sample_axis):
raise ValueError(
'sample_axis ({}) and event_axis ({}) overlapped'.format(
sample_axis, event_axis))
if (np.diff(sorted(event_axis)) > 1).any():
raise ValueError(
'event_axis must be contiguous. Found: {}'.format(event_axis))
batch_axis = list(
sorted(
set(range(x.shape.ndims)).difference(sample_axis + event_axis)))
else:
batch_axis, _ = tf.compat.v1.setdiff1d(
tf.range(0, tf.rank(x)), tf.concat((sample_axis, event_axis), 0))
event_axis = tf.convert_to_tensor(
value=event_axis, name='event_axis', dtype=tf.int32)
sample_axis = tf.convert_to_tensor(
value=sample_axis, name='sample_axis', dtype=tf.int32)
batch_axis = tf.convert_to_tensor(
value=batch_axis, name='batch_axis', dtype=tf.int32)
# Permute x/y until shape = B + E + S
perm_for_xy = tf.concat((batch_axis, event_axis, sample_axis), 0)
x_permed = tf.transpose(a=x, perm=perm_for_xy)
y_permed = tf.transpose(a=y, perm=perm_for_xy)
batch_ndims = tf.size(input=batch_axis)
batch_shape = tf.shape(input=x_permed)[:batch_ndims]
event_ndims = tf.size(input=event_axis)
event_shape = tf.shape(input=x_permed)[batch_ndims:batch_ndims +
event_ndims]
sample_shape = tf.shape(input=x_permed)[batch_ndims + event_ndims:]
sample_ndims = tf.size(input=sample_shape)
n_samples = tf.reduce_prod(input_tensor=sample_shape)
n_events = tf.reduce_prod(input_tensor=event_shape)
# Flatten sample_axis into one long dim.
x_permed_flat = tf.reshape(
x_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
y_permed_flat = tf.reshape(
y_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
# Do the same for event_axis.
x_permed_flat = tf.reshape(
x_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))
y_permed_flat = tf.reshape(
y_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))
# After matmul, cov.shape = batch_shape + [n_events, n_events]
cov = tf.matmul(
x_permed_flat, y_permed_flat, adjoint_b=True) / tf.cast(
n_samples, x.dtype)
# Insert some singletons to make
# cov.shape = batch_shape + event_shape**2 + [1,...,1]
# This is just like x_permed.shape, except the sample_axis is all 1's, and
# the [n_events] became event_shape**2.
cov = tf.reshape(
cov,
tf.concat(
(
batch_shape,
# event_shape**2 used here because it is the same length as
# event_shape, and has the same number of elements as one
# batch of covariance.
event_shape**2,
tf.ones([sample_ndims], tf.int32)),
0))
# Permuting by the argsort inverts the permutation, making
# cov.shape have ones in the position where there were samples, and
# [n_events * n_events] in the event position.
cov = tf.transpose(a=cov, perm=tf.math.invert_permutation(perm_for_xy))
# Now expand event_shape**2 into event_shape + event_shape.
# We here use (for the first time) the fact that we require event_axis to be
# contiguous.
e_start = event_axis[0]
e_len = 1 + event_axis[-1] - event_axis[0]
cov = tf.reshape(
cov,
tf.concat((tf.shape(input=cov)[:e_start], event_shape, event_shape,
tf.shape(input=cov)[e_start + e_len:]), 0))
# tf.squeeze requires python ints for axis, not Tensor. This is enough to
# require our axis args to be constants.
if not keepdims:
squeeze_axis = tf.where(sample_axis < e_start, sample_axis,
sample_axis + e_len)
cov = _squeeze(cov, axis=squeeze_axis)
return cov
|
[
"Sample",
"covariance",
"between",
"observations",
"indexed",
"by",
"event_axis",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L284-L462
|
[
"def",
"covariance",
"(",
"x",
",",
"y",
"=",
"None",
",",
"sample_axis",
"=",
"0",
",",
"event_axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'covariance'",
",",
"values",
"=",
"[",
"x",
",",
"y",
",",
"event_axis",
",",
"sample_axis",
"]",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
")",
"# Covariance *only* uses the centered versions of x (and y).",
"x",
"-=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"x",
",",
"axis",
"=",
"sample_axis",
",",
"keepdims",
"=",
"True",
")",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"x",
"else",
":",
"y",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"y",
",",
"name",
"=",
"'y'",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"# If x and y have different shape, sample_axis and event_axis will likely",
"# be wrong for one of them!",
"x",
".",
"shape",
".",
"assert_is_compatible_with",
"(",
"y",
".",
"shape",
")",
"y",
"-=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"y",
",",
"axis",
"=",
"sample_axis",
",",
"keepdims",
"=",
"True",
")",
"if",
"event_axis",
"is",
"None",
":",
"return",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"x",
"*",
"tf",
".",
"math",
".",
"conj",
"(",
"y",
")",
",",
"axis",
"=",
"sample_axis",
",",
"keepdims",
"=",
"keepdims",
")",
"if",
"sample_axis",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'sample_axis was None, which means all axis hold events, and this '",
"'overlaps with event_axis ({})'",
".",
"format",
"(",
"event_axis",
")",
")",
"event_axis",
"=",
"_make_positive_axis",
"(",
"event_axis",
",",
"tf",
".",
"rank",
"(",
"x",
")",
")",
"sample_axis",
"=",
"_make_positive_axis",
"(",
"sample_axis",
",",
"tf",
".",
"rank",
"(",
"x",
")",
")",
"# If we get lucky and axis is statically defined, we can do some checks.",
"if",
"_is_list_like",
"(",
"event_axis",
")",
"and",
"_is_list_like",
"(",
"sample_axis",
")",
":",
"if",
"set",
"(",
"event_axis",
")",
".",
"intersection",
"(",
"sample_axis",
")",
":",
"raise",
"ValueError",
"(",
"'sample_axis ({}) and event_axis ({}) overlapped'",
".",
"format",
"(",
"sample_axis",
",",
"event_axis",
")",
")",
"if",
"(",
"np",
".",
"diff",
"(",
"sorted",
"(",
"event_axis",
")",
")",
">",
"1",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'event_axis must be contiguous. Found: {}'",
".",
"format",
"(",
"event_axis",
")",
")",
"batch_axis",
"=",
"list",
"(",
"sorted",
"(",
"set",
"(",
"range",
"(",
"x",
".",
"shape",
".",
"ndims",
")",
")",
".",
"difference",
"(",
"sample_axis",
"+",
"event_axis",
")",
")",
")",
"else",
":",
"batch_axis",
",",
"_",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"setdiff1d",
"(",
"tf",
".",
"range",
"(",
"0",
",",
"tf",
".",
"rank",
"(",
"x",
")",
")",
",",
"tf",
".",
"concat",
"(",
"(",
"sample_axis",
",",
"event_axis",
")",
",",
"0",
")",
")",
"event_axis",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"event_axis",
",",
"name",
"=",
"'event_axis'",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"sample_axis",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"sample_axis",
",",
"name",
"=",
"'sample_axis'",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"batch_axis",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"batch_axis",
",",
"name",
"=",
"'batch_axis'",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# Permute x/y until shape = B + E + S",
"perm_for_xy",
"=",
"tf",
".",
"concat",
"(",
"(",
"batch_axis",
",",
"event_axis",
",",
"sample_axis",
")",
",",
"0",
")",
"x_permed",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"x",
",",
"perm",
"=",
"perm_for_xy",
")",
"y_permed",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"y",
",",
"perm",
"=",
"perm_for_xy",
")",
"batch_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"batch_axis",
")",
"batch_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_permed",
")",
"[",
":",
"batch_ndims",
"]",
"event_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"event_axis",
")",
"event_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_permed",
")",
"[",
"batch_ndims",
":",
"batch_ndims",
"+",
"event_ndims",
"]",
"sample_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"x_permed",
")",
"[",
"batch_ndims",
"+",
"event_ndims",
":",
"]",
"sample_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"sample_shape",
")",
"n_samples",
"=",
"tf",
".",
"reduce_prod",
"(",
"input_tensor",
"=",
"sample_shape",
")",
"n_events",
"=",
"tf",
".",
"reduce_prod",
"(",
"input_tensor",
"=",
"event_shape",
")",
"# Flatten sample_axis into one long dim.",
"x_permed_flat",
"=",
"tf",
".",
"reshape",
"(",
"x_permed",
",",
"tf",
".",
"concat",
"(",
"(",
"batch_shape",
",",
"event_shape",
",",
"[",
"n_samples",
"]",
")",
",",
"0",
")",
")",
"y_permed_flat",
"=",
"tf",
".",
"reshape",
"(",
"y_permed",
",",
"tf",
".",
"concat",
"(",
"(",
"batch_shape",
",",
"event_shape",
",",
"[",
"n_samples",
"]",
")",
",",
"0",
")",
")",
"# Do the same for event_axis.",
"x_permed_flat",
"=",
"tf",
".",
"reshape",
"(",
"x_permed",
",",
"tf",
".",
"concat",
"(",
"(",
"batch_shape",
",",
"[",
"n_events",
"]",
",",
"[",
"n_samples",
"]",
")",
",",
"0",
")",
")",
"y_permed_flat",
"=",
"tf",
".",
"reshape",
"(",
"y_permed",
",",
"tf",
".",
"concat",
"(",
"(",
"batch_shape",
",",
"[",
"n_events",
"]",
",",
"[",
"n_samples",
"]",
")",
",",
"0",
")",
")",
"# After matmul, cov.shape = batch_shape + [n_events, n_events]",
"cov",
"=",
"tf",
".",
"matmul",
"(",
"x_permed_flat",
",",
"y_permed_flat",
",",
"adjoint_b",
"=",
"True",
")",
"/",
"tf",
".",
"cast",
"(",
"n_samples",
",",
"x",
".",
"dtype",
")",
"# Insert some singletons to make",
"# cov.shape = batch_shape + event_shape**2 + [1,...,1]",
"# This is just like x_permed.shape, except the sample_axis is all 1's, and",
"# the [n_events] became event_shape**2.",
"cov",
"=",
"tf",
".",
"reshape",
"(",
"cov",
",",
"tf",
".",
"concat",
"(",
"(",
"batch_shape",
",",
"# event_shape**2 used here because it is the same length as",
"# event_shape, and has the same number of elements as one",
"# batch of covariance.",
"event_shape",
"**",
"2",
",",
"tf",
".",
"ones",
"(",
"[",
"sample_ndims",
"]",
",",
"tf",
".",
"int32",
")",
")",
",",
"0",
")",
")",
"# Permuting by the argsort inverts the permutation, making",
"# cov.shape have ones in the position where there were samples, and",
"# [n_events * n_events] in the event position.",
"cov",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"cov",
",",
"perm",
"=",
"tf",
".",
"math",
".",
"invert_permutation",
"(",
"perm_for_xy",
")",
")",
"# Now expand event_shape**2 into event_shape + event_shape.",
"# We here use (for the first time) the fact that we require event_axis to be",
"# contiguous.",
"e_start",
"=",
"event_axis",
"[",
"0",
"]",
"e_len",
"=",
"1",
"+",
"event_axis",
"[",
"-",
"1",
"]",
"-",
"event_axis",
"[",
"0",
"]",
"cov",
"=",
"tf",
".",
"reshape",
"(",
"cov",
",",
"tf",
".",
"concat",
"(",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"cov",
")",
"[",
":",
"e_start",
"]",
",",
"event_shape",
",",
"event_shape",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"cov",
")",
"[",
"e_start",
"+",
"e_len",
":",
"]",
")",
",",
"0",
")",
")",
"# tf.squeeze requires python ints for axis, not Tensor. This is enough to",
"# require our axis args to be constants.",
"if",
"not",
"keepdims",
":",
"squeeze_axis",
"=",
"tf",
".",
"where",
"(",
"sample_axis",
"<",
"e_start",
",",
"sample_axis",
",",
"sample_axis",
"+",
"e_len",
")",
"cov",
"=",
"_squeeze",
"(",
"cov",
",",
"axis",
"=",
"squeeze_axis",
")",
"return",
"cov"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
correlation
|
Sample correlation (Pearson) between observations indexed by `event_axis`.
Given `N` samples of scalar random variables `X` and `Y`, correlation may be
estimated as
```none
Corr[X, Y] := Cov[X, Y] / Sqrt(Cov[X, X] * Cov[Y, Y]),
where
Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
Xbar := N^{-1} sum_{n=1}^N X_n
Ybar := N^{-1} sum_{n=1}^N Y_n
```
Correlation is always in the interval `[-1, 1]`, and `Corr[X, X] == 1`.
For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,
one is often interested in the correlation matrix, `C_{ij} := Corr[Xi, Yj]`.
```python
x = tf.random_normal(shape=(100, 2, 3))
y = tf.random_normal(shape=(100, 2, 3))
# corr[i, j] is the sample correlation between x[:, i, j] and y[:, i, j].
corr = tfp.stats.correlation(x, y, sample_axis=0, event_axis=None)
# corr_matrix[i, m, n] is the sample correlation of x[:, i, m] and y[:, i, n]
corr_matrix = tfp.stats.correlation(x, y, sample_axis=0, event_axis=-1)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
y: Optional `Tensor` with same `dtype` and `shape` as `x`.
Default value: `None` (`y` is effectively set to `x`).
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
event_axis: Scalar or vector `Tensor`, or `None` (scalar events).
Axis indexing random events, whose correlation we are interested in.
If a vector, entries must form a contiguous block of dims. `sample_axis`
and `event_axis` should not intersect.
Default value: `-1` (rightmost axis holds events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'correlation'`).
Returns:
corr: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis) + 2 * len(event_axis)`.
Raises:
AssertionError: If `x` and `y` are found to have different shape.
ValueError: If `sample_axis` and `event_axis` are found to overlap.
ValueError: If `event_axis` is found to not be contiguous.
|
tensorflow_probability/python/stats/sample_stats.py
|
def correlation(x,
y=None,
sample_axis=0,
event_axis=-1,
keepdims=False,
name=None):
"""Sample correlation (Pearson) between observations indexed by `event_axis`.
Given `N` samples of scalar random variables `X` and `Y`, correlation may be
estimated as
```none
Corr[X, Y] := Cov[X, Y] / Sqrt(Cov[X, X] * Cov[Y, Y]),
where
Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
Xbar := N^{-1} sum_{n=1}^N X_n
Ybar := N^{-1} sum_{n=1}^N Y_n
```
Correlation is always in the interval `[-1, 1]`, and `Corr[X, X] == 1`.
For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,
one is often interested in the correlation matrix, `C_{ij} := Corr[Xi, Yj]`.
```python
x = tf.random_normal(shape=(100, 2, 3))
y = tf.random_normal(shape=(100, 2, 3))
# corr[i, j] is the sample correlation between x[:, i, j] and y[:, i, j].
corr = tfp.stats.correlation(x, y, sample_axis=0, event_axis=None)
# corr_matrix[i, m, n] is the sample correlation of x[:, i, m] and y[:, i, n]
corr_matrix = tfp.stats.correlation(x, y, sample_axis=0, event_axis=-1)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
y: Optional `Tensor` with same `dtype` and `shape` as `x`.
Default value: `None` (`y` is effectively set to `x`).
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
event_axis: Scalar or vector `Tensor`, or `None` (scalar events).
Axis indexing random events, whose correlation we are interested in.
If a vector, entries must form a contiguous block of dims. `sample_axis`
and `event_axis` should not intersect.
Default value: `-1` (rightmost axis holds events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'correlation'`).
Returns:
corr: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis) + 2 * len(event_axis)`.
Raises:
AssertionError: If `x` and `y` are found to have different shape.
ValueError: If `sample_axis` and `event_axis` are found to overlap.
ValueError: If `event_axis` is found to not be contiguous.
"""
with tf.compat.v1.name_scope(
name, 'correlation', values=[x, y, event_axis, sample_axis]):
# Corr[X, Y] = Cov[X, Y] / (Stddev[X] * Stddev[Y])
# = Cov[X / Stddev[X], Y / Stddev[Y]]
# So we could compute covariance first then divide by stddev, or
# divide by stddev and compute covariance.
# Dividing by stddev then computing covariance is potentially more stable.
# But... computing covariance first then dividing involves 2 fewer large
# broadcasts. We choose to divide first, largely because it avoids
# difficulties with the various options for sample/event axis kwargs.
x /= stddev(x, sample_axis=sample_axis, keepdims=True)
if y is not None:
y /= stddev(y, sample_axis=sample_axis, keepdims=True)
return covariance(
x=x,
y=y,
event_axis=event_axis,
sample_axis=sample_axis,
keepdims=keepdims)
|
def correlation(x,
y=None,
sample_axis=0,
event_axis=-1,
keepdims=False,
name=None):
"""Sample correlation (Pearson) between observations indexed by `event_axis`.
Given `N` samples of scalar random variables `X` and `Y`, correlation may be
estimated as
```none
Corr[X, Y] := Cov[X, Y] / Sqrt(Cov[X, X] * Cov[Y, Y]),
where
Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
Xbar := N^{-1} sum_{n=1}^N X_n
Ybar := N^{-1} sum_{n=1}^N Y_n
```
Correlation is always in the interval `[-1, 1]`, and `Corr[X, X] == 1`.
For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,
one is often interested in the correlation matrix, `C_{ij} := Corr[Xi, Yj]`.
```python
x = tf.random_normal(shape=(100, 2, 3))
y = tf.random_normal(shape=(100, 2, 3))
# corr[i, j] is the sample correlation between x[:, i, j] and y[:, i, j].
corr = tfp.stats.correlation(x, y, sample_axis=0, event_axis=None)
# corr_matrix[i, m, n] is the sample correlation of x[:, i, m] and y[:, i, n]
corr_matrix = tfp.stats.correlation(x, y, sample_axis=0, event_axis=-1)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
y: Optional `Tensor` with same `dtype` and `shape` as `x`.
Default value: `None` (`y` is effectively set to `x`).
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
event_axis: Scalar or vector `Tensor`, or `None` (scalar events).
Axis indexing random events, whose correlation we are interested in.
If a vector, entries must form a contiguous block of dims. `sample_axis`
and `event_axis` should not intersect.
Default value: `-1` (rightmost axis holds events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'correlation'`).
Returns:
corr: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis) + 2 * len(event_axis)`.
Raises:
AssertionError: If `x` and `y` are found to have different shape.
ValueError: If `sample_axis` and `event_axis` are found to overlap.
ValueError: If `event_axis` is found to not be contiguous.
"""
with tf.compat.v1.name_scope(
name, 'correlation', values=[x, y, event_axis, sample_axis]):
# Corr[X, Y] = Cov[X, Y] / (Stddev[X] * Stddev[Y])
# = Cov[X / Stddev[X], Y / Stddev[Y]]
# So we could compute covariance first then divide by stddev, or
# divide by stddev and compute covariance.
# Dividing by stddev then computing covariance is potentially more stable.
# But... computing covariance first then dividing involves 2 fewer large
# broadcasts. We choose to divide first, largely because it avoids
# difficulties with the various options for sample/event axis kwargs.
x /= stddev(x, sample_axis=sample_axis, keepdims=True)
if y is not None:
y /= stddev(y, sample_axis=sample_axis, keepdims=True)
return covariance(
x=x,
y=y,
event_axis=event_axis,
sample_axis=sample_axis,
keepdims=keepdims)
|
[
"Sample",
"correlation",
"(",
"Pearson",
")",
"between",
"observations",
"indexed",
"by",
"event_axis",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L465-L549
|
[
"def",
"correlation",
"(",
"x",
",",
"y",
"=",
"None",
",",
"sample_axis",
"=",
"0",
",",
"event_axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'correlation'",
",",
"values",
"=",
"[",
"x",
",",
"y",
",",
"event_axis",
",",
"sample_axis",
"]",
")",
":",
"# Corr[X, Y] = Cov[X, Y] / (Stddev[X] * Stddev[Y])",
"# = Cov[X / Stddev[X], Y / Stddev[Y]]",
"# So we could compute covariance first then divide by stddev, or",
"# divide by stddev and compute covariance.",
"# Dividing by stddev then computing covariance is potentially more stable.",
"# But... computing covariance first then dividing involves 2 fewer large",
"# broadcasts. We choose to divide first, largely because it avoids",
"# difficulties with the various options for sample/event axis kwargs.",
"x",
"/=",
"stddev",
"(",
"x",
",",
"sample_axis",
"=",
"sample_axis",
",",
"keepdims",
"=",
"True",
")",
"if",
"y",
"is",
"not",
"None",
":",
"y",
"/=",
"stddev",
"(",
"y",
",",
"sample_axis",
"=",
"sample_axis",
",",
"keepdims",
"=",
"True",
")",
"return",
"covariance",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"event_axis",
"=",
"event_axis",
",",
"sample_axis",
"=",
"sample_axis",
",",
"keepdims",
"=",
"keepdims",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
stddev
|
Estimate standard deviation using samples.
Given `N` samples of scalar valued random variable `X`, standard deviation may
be estimated as
```none
Stddev[X] := Sqrt[Var[X]],
Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)},
Xbar := N^{-1} sum_{n=1}^N X_n
```
```python
x = tf.random_normal(shape=(100, 2, 3))
# stddev[i, j] is the sample standard deviation of the (i, j) batch member.
stddev = tfp.stats.stddev(x, sample_axis=0)
```
Scaling a unit normal by a standard deviation produces normal samples
with that standard deviation.
```python
observed_data = read_data_samples(...)
stddev = tfp.stats.stddev(observed_data)
# Make fake_data with the same standard deviation as observed_data.
fake_data = stddev * tf.random_normal(shape=(100,))
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'stddev'`).
Returns:
stddev: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis)`
|
tensorflow_probability/python/stats/sample_stats.py
|
def stddev(x, sample_axis=0, keepdims=False, name=None):
"""Estimate standard deviation using samples.
Given `N` samples of scalar valued random variable `X`, standard deviation may
be estimated as
```none
Stddev[X] := Sqrt[Var[X]],
Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)},
Xbar := N^{-1} sum_{n=1}^N X_n
```
```python
x = tf.random_normal(shape=(100, 2, 3))
# stddev[i, j] is the sample standard deviation of the (i, j) batch member.
stddev = tfp.stats.stddev(x, sample_axis=0)
```
Scaling a unit normal by a standard deviation produces normal samples
with that standard deviation.
```python
observed_data = read_data_samples(...)
stddev = tfp.stats.stddev(observed_data)
# Make fake_data with the same standard deviation as observed_data.
fake_data = stddev * tf.random_normal(shape=(100,))
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'stddev'`).
Returns:
stddev: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis)`
"""
with tf.compat.v1.name_scope(name, 'stddev', values=[x, sample_axis]):
return tf.sqrt(variance(x, sample_axis=sample_axis, keepdims=keepdims))
|
def stddev(x, sample_axis=0, keepdims=False, name=None):
"""Estimate standard deviation using samples.
Given `N` samples of scalar valued random variable `X`, standard deviation may
be estimated as
```none
Stddev[X] := Sqrt[Var[X]],
Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)},
Xbar := N^{-1} sum_{n=1}^N X_n
```
```python
x = tf.random_normal(shape=(100, 2, 3))
# stddev[i, j] is the sample standard deviation of the (i, j) batch member.
stddev = tfp.stats.stddev(x, sample_axis=0)
```
Scaling a unit normal by a standard deviation produces normal samples
with that standard deviation.
```python
observed_data = read_data_samples(...)
stddev = tfp.stats.stddev(observed_data)
# Make fake_data with the same standard deviation as observed_data.
fake_data = stddev * tf.random_normal(shape=(100,))
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'stddev'`).
Returns:
stddev: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis)`
"""
with tf.compat.v1.name_scope(name, 'stddev', values=[x, sample_axis]):
return tf.sqrt(variance(x, sample_axis=sample_axis, keepdims=keepdims))
|
[
"Estimate",
"standard",
"deviation",
"using",
"samples",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L552-L599
|
[
"def",
"stddev",
"(",
"x",
",",
"sample_axis",
"=",
"0",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'stddev'",
",",
"values",
"=",
"[",
"x",
",",
"sample_axis",
"]",
")",
":",
"return",
"tf",
".",
"sqrt",
"(",
"variance",
"(",
"x",
",",
"sample_axis",
"=",
"sample_axis",
",",
"keepdims",
"=",
"keepdims",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
variance
|
Estimate variance using samples.
Given `N` samples of scalar valued random variable `X`, variance may
be estimated as
```none
Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)}
Xbar := N^{-1} sum_{n=1}^N X_n
```
```python
x = tf.random_normal(shape=(100, 2, 3))
# var[i, j] is the sample variance of the (i, j) batch member of x.
var = tfp.stats.variance(x, sample_axis=0)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'variance'`).
Returns:
var: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis)`
|
tensorflow_probability/python/stats/sample_stats.py
|
def variance(x, sample_axis=0, keepdims=False, name=None):
"""Estimate variance using samples.
Given `N` samples of scalar valued random variable `X`, variance may
be estimated as
```none
Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)}
Xbar := N^{-1} sum_{n=1}^N X_n
```
```python
x = tf.random_normal(shape=(100, 2, 3))
# var[i, j] is the sample variance of the (i, j) batch member of x.
var = tfp.stats.variance(x, sample_axis=0)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'variance'`).
Returns:
var: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis)`
"""
with tf.compat.v1.name_scope(name, 'variance', values=[x, sample_axis]):
return covariance(
x, y=None, sample_axis=sample_axis, event_axis=None, keepdims=keepdims)
|
def variance(x, sample_axis=0, keepdims=False, name=None):
"""Estimate variance using samples.
Given `N` samples of scalar valued random variable `X`, variance may
be estimated as
```none
Var[X] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(X_n - Xbar)}
Xbar := N^{-1} sum_{n=1}^N X_n
```
```python
x = tf.random_normal(shape=(100, 2, 3))
# var[i, j] is the sample variance of the (i, j) batch member of x.
var = tfp.stats.variance(x, sample_axis=0)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'variance'`).
Returns:
var: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis)`
"""
with tf.compat.v1.name_scope(name, 'variance', values=[x, sample_axis]):
return covariance(
x, y=None, sample_axis=sample_axis, event_axis=None, keepdims=keepdims)
|
[
"Estimate",
"variance",
"using",
"samples",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L602-L638
|
[
"def",
"variance",
"(",
"x",
",",
"sample_axis",
"=",
"0",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'variance'",
",",
"values",
"=",
"[",
"x",
",",
"sample_axis",
"]",
")",
":",
"return",
"covariance",
"(",
"x",
",",
"y",
"=",
"None",
",",
"sample_axis",
"=",
"sample_axis",
",",
"event_axis",
"=",
"None",
",",
"keepdims",
"=",
"keepdims",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_make_list_or_1d_tensor
|
Return a list (preferred) or 1d Tensor from values, if values.ndims < 2.
|
tensorflow_probability/python/stats/sample_stats.py
|
def _make_list_or_1d_tensor(values):
"""Return a list (preferred) or 1d Tensor from values, if values.ndims < 2."""
values = tf.convert_to_tensor(value=values, name='values')
values_ = tf.get_static_value(values)
# Static didn't work.
if values_ is None:
# Cheap way to bring to at least 1d.
return values + tf.zeros([1], dtype=values.dtype)
# Static worked!
if values_.ndim > 1:
raise ValueError('values had > 1 dim: {}'.format(values_.shape))
# Cheap way to bring to at least 1d.
values_ = values_ + np.zeros([1], dtype=values_.dtype)
return list(values_)
|
def _make_list_or_1d_tensor(values):
"""Return a list (preferred) or 1d Tensor from values, if values.ndims < 2."""
values = tf.convert_to_tensor(value=values, name='values')
values_ = tf.get_static_value(values)
# Static didn't work.
if values_ is None:
# Cheap way to bring to at least 1d.
return values + tf.zeros([1], dtype=values.dtype)
# Static worked!
if values_.ndim > 1:
raise ValueError('values had > 1 dim: {}'.format(values_.shape))
# Cheap way to bring to at least 1d.
values_ = values_ + np.zeros([1], dtype=values_.dtype)
return list(values_)
|
[
"Return",
"a",
"list",
"(",
"preferred",
")",
"or",
"1d",
"Tensor",
"from",
"values",
"if",
"values",
".",
"ndims",
"<",
"2",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L646-L661
|
[
"def",
"_make_list_or_1d_tensor",
"(",
"values",
")",
":",
"values",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"values",
",",
"name",
"=",
"'values'",
")",
"values_",
"=",
"tf",
".",
"get_static_value",
"(",
"values",
")",
"# Static didn't work.",
"if",
"values_",
"is",
"None",
":",
"# Cheap way to bring to at least 1d.",
"return",
"values",
"+",
"tf",
".",
"zeros",
"(",
"[",
"1",
"]",
",",
"dtype",
"=",
"values",
".",
"dtype",
")",
"# Static worked!",
"if",
"values_",
".",
"ndim",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'values had > 1 dim: {}'",
".",
"format",
"(",
"values_",
".",
"shape",
")",
")",
"# Cheap way to bring to at least 1d.",
"values_",
"=",
"values_",
"+",
"np",
".",
"zeros",
"(",
"[",
"1",
"]",
",",
"dtype",
"=",
"values_",
".",
"dtype",
")",
"return",
"list",
"(",
"values_",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_make_positive_axis
|
Rectify possibly negatively axis. Prefer return Python list.
|
tensorflow_probability/python/stats/sample_stats.py
|
def _make_positive_axis(axis, ndims):
"""Rectify possibly negatively axis. Prefer return Python list."""
axis = _make_list_or_1d_tensor(axis)
ndims = tf.convert_to_tensor(value=ndims, name='ndims', dtype=tf.int32)
ndims_ = tf.get_static_value(ndims)
if _is_list_like(axis) and ndims_ is not None:
# Static case
positive_axis = []
for a in axis:
if a < 0:
a = ndims_ + a
positive_axis.append(a)
else:
# Dynamic case
axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32)
positive_axis = tf.where(axis >= 0, axis, axis + ndims)
return positive_axis
|
def _make_positive_axis(axis, ndims):
"""Rectify possibly negatively axis. Prefer return Python list."""
axis = _make_list_or_1d_tensor(axis)
ndims = tf.convert_to_tensor(value=ndims, name='ndims', dtype=tf.int32)
ndims_ = tf.get_static_value(ndims)
if _is_list_like(axis) and ndims_ is not None:
# Static case
positive_axis = []
for a in axis:
if a < 0:
a = ndims_ + a
positive_axis.append(a)
else:
# Dynamic case
axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32)
positive_axis = tf.where(axis >= 0, axis, axis + ndims)
return positive_axis
|
[
"Rectify",
"possibly",
"negatively",
"axis",
".",
"Prefer",
"return",
"Python",
"list",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L664-L683
|
[
"def",
"_make_positive_axis",
"(",
"axis",
",",
"ndims",
")",
":",
"axis",
"=",
"_make_list_or_1d_tensor",
"(",
"axis",
")",
"ndims",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"ndims",
",",
"name",
"=",
"'ndims'",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"ndims_",
"=",
"tf",
".",
"get_static_value",
"(",
"ndims",
")",
"if",
"_is_list_like",
"(",
"axis",
")",
"and",
"ndims_",
"is",
"not",
"None",
":",
"# Static case",
"positive_axis",
"=",
"[",
"]",
"for",
"a",
"in",
"axis",
":",
"if",
"a",
"<",
"0",
":",
"a",
"=",
"ndims_",
"+",
"a",
"positive_axis",
".",
"append",
"(",
"a",
")",
"else",
":",
"# Dynamic case",
"axis",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"axis",
",",
"name",
"=",
"'axis'",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"positive_axis",
"=",
"tf",
".",
"where",
"(",
"axis",
">=",
"0",
",",
"axis",
",",
"axis",
"+",
"ndims",
")",
"return",
"positive_axis"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_squeeze
|
A version of squeeze that works with dynamic axis.
|
tensorflow_probability/python/stats/sample_stats.py
|
def _squeeze(x, axis):
"""A version of squeeze that works with dynamic axis."""
x = tf.convert_to_tensor(value=x, name='x')
if axis is None:
return tf.squeeze(x, axis=None)
axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32)
axis += tf.zeros([1], dtype=axis.dtype) # Make axis at least 1d.
keep_axis, _ = tf.compat.v1.setdiff1d(tf.range(0, tf.rank(x)), axis)
return tf.reshape(x, tf.gather(tf.shape(input=x), keep_axis))
|
def _squeeze(x, axis):
"""A version of squeeze that works with dynamic axis."""
x = tf.convert_to_tensor(value=x, name='x')
if axis is None:
return tf.squeeze(x, axis=None)
axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32)
axis += tf.zeros([1], dtype=axis.dtype) # Make axis at least 1d.
keep_axis, _ = tf.compat.v1.setdiff1d(tf.range(0, tf.rank(x)), axis)
return tf.reshape(x, tf.gather(tf.shape(input=x), keep_axis))
|
[
"A",
"version",
"of",
"squeeze",
"that",
"works",
"with",
"dynamic",
"axis",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L686-L694
|
[
"def",
"_squeeze",
"(",
"x",
",",
"axis",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"'x'",
")",
"if",
"axis",
"is",
"None",
":",
"return",
"tf",
".",
"squeeze",
"(",
"x",
",",
"axis",
"=",
"None",
")",
"axis",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"axis",
",",
"name",
"=",
"'axis'",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"axis",
"+=",
"tf",
".",
"zeros",
"(",
"[",
"1",
"]",
",",
"dtype",
"=",
"axis",
".",
"dtype",
")",
"# Make axis at least 1d.",
"keep_axis",
",",
"_",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"setdiff1d",
"(",
"tf",
".",
"range",
"(",
"0",
",",
"tf",
".",
"rank",
"(",
"x",
")",
")",
",",
"axis",
")",
"return",
"tf",
".",
"reshape",
"(",
"x",
",",
"tf",
".",
"gather",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
",",
"keep_axis",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_kl_normal_normal
|
Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
|
tensorflow_probability/python/distributions/normal.py
|
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with tf.name_scope(name or "kl_normal_normal"):
one = tf.constant(1, dtype=n_a.dtype)
two = tf.constant(2, dtype=n_a.dtype)
half = tf.constant(0.5, dtype=n_a.dtype)
s_a_squared = tf.square(n_a.scale)
s_b_squared = tf.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (tf.square(n_a.loc - n_b.loc) / (two * s_b_squared) + half *
(ratio - one - tf.math.log(ratio)))
|
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with tf.name_scope(name or "kl_normal_normal"):
one = tf.constant(1, dtype=n_a.dtype)
two = tf.constant(2, dtype=n_a.dtype)
half = tf.constant(0.5, dtype=n_a.dtype)
s_a_squared = tf.square(n_a.scale)
s_b_squared = tf.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (tf.square(n_a.loc - n_b.loc) / (two * s_b_squared) + half *
(ratio - one - tf.math.log(ratio)))
|
[
"Calculate",
"the",
"batched",
"KL",
"divergence",
"KL",
"(",
"n_a",
"||",
"n_b",
")",
"with",
"n_a",
"and",
"n_b",
"Normal",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/normal.py#L241-L261
|
[
"def",
"_kl_normal_normal",
"(",
"n_a",
",",
"n_b",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"or",
"\"kl_normal_normal\"",
")",
":",
"one",
"=",
"tf",
".",
"constant",
"(",
"1",
",",
"dtype",
"=",
"n_a",
".",
"dtype",
")",
"two",
"=",
"tf",
".",
"constant",
"(",
"2",
",",
"dtype",
"=",
"n_a",
".",
"dtype",
")",
"half",
"=",
"tf",
".",
"constant",
"(",
"0.5",
",",
"dtype",
"=",
"n_a",
".",
"dtype",
")",
"s_a_squared",
"=",
"tf",
".",
"square",
"(",
"n_a",
".",
"scale",
")",
"s_b_squared",
"=",
"tf",
".",
"square",
"(",
"n_b",
".",
"scale",
")",
"ratio",
"=",
"s_a_squared",
"/",
"s_b_squared",
"return",
"(",
"tf",
".",
"square",
"(",
"n_a",
".",
"loc",
"-",
"n_b",
".",
"loc",
")",
"/",
"(",
"two",
"*",
"s_b_squared",
")",
"+",
"half",
"*",
"(",
"ratio",
"-",
"one",
"-",
"tf",
".",
"math",
".",
"log",
"(",
"ratio",
")",
")",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Normal._z
|
Standardize input `x` to a unit normal.
|
tensorflow_probability/python/distributions/normal.py
|
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with tf.name_scope("standardize"):
return (x - self.loc) / self.scale
|
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with tf.name_scope("standardize"):
return (x - self.loc) / self.scale
|
[
"Standardize",
"input",
"x",
"to",
"a",
"unit",
"normal",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/normal.py#L229-L232
|
[
"def",
"_z",
"(",
"self",
",",
"x",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"standardize\"",
")",
":",
"return",
"(",
"x",
"-",
"self",
".",
"loc",
")",
"/",
"self",
".",
"scale"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
Normal._inv_z
|
Reconstruct input `x` from a its normalized version.
|
tensorflow_probability/python/distributions/normal.py
|
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with tf.name_scope("reconstruct"):
return z * self.scale + self.loc
|
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with tf.name_scope("reconstruct"):
return z * self.scale + self.loc
|
[
"Reconstruct",
"input",
"x",
"from",
"a",
"its",
"normalized",
"version",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/normal.py#L234-L237
|
[
"def",
"_inv_z",
"(",
"self",
",",
"z",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"reconstruct\"",
")",
":",
"return",
"z",
"*",
"self",
".",
"scale",
"+",
"self",
".",
"loc"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
semilocal_linear_trend_transition_matrix
|
Build the transition matrix for a semi-local linear trend model.
|
tensorflow_probability/python/sts/semilocal_linear_trend.py
|
def semilocal_linear_trend_transition_matrix(autoregressive_coef):
"""Build the transition matrix for a semi-local linear trend model."""
# We want to write the following 2 x 2 matrix:
# [[1., 1., ], # level(t+1) = level(t) + slope(t)
# [0., ar_coef], # slope(t+1) = ar_coef * slope(t)
# but it's slightly tricky to properly incorporate the batch shape of
# autoregressive_coef. E.g., if autoregressive_coef has shape [4,6], we want
# to return shape [4, 6, 2, 2]. We do this by breaking the matrix into its
# fixed entries, written explicitly, and then the autoregressive_coef part
# which we add in after using a mask to broadcast to the correct matrix shape.
fixed_entries = tf.constant(
[[1., 1.],
[0., 0.]],
dtype=autoregressive_coef.dtype)
autoregressive_coef_mask = tf.constant([[0., 0.],
[0., 1.]],
dtype=autoregressive_coef.dtype)
bottom_right_entry = (autoregressive_coef[..., tf.newaxis, tf.newaxis] *
autoregressive_coef_mask)
return tf.linalg.LinearOperatorFullMatrix(
fixed_entries + bottom_right_entry)
|
def semilocal_linear_trend_transition_matrix(autoregressive_coef):
"""Build the transition matrix for a semi-local linear trend model."""
# We want to write the following 2 x 2 matrix:
# [[1., 1., ], # level(t+1) = level(t) + slope(t)
# [0., ar_coef], # slope(t+1) = ar_coef * slope(t)
# but it's slightly tricky to properly incorporate the batch shape of
# autoregressive_coef. E.g., if autoregressive_coef has shape [4,6], we want
# to return shape [4, 6, 2, 2]. We do this by breaking the matrix into its
# fixed entries, written explicitly, and then the autoregressive_coef part
# which we add in after using a mask to broadcast to the correct matrix shape.
fixed_entries = tf.constant(
[[1., 1.],
[0., 0.]],
dtype=autoregressive_coef.dtype)
autoregressive_coef_mask = tf.constant([[0., 0.],
[0., 1.]],
dtype=autoregressive_coef.dtype)
bottom_right_entry = (autoregressive_coef[..., tf.newaxis, tf.newaxis] *
autoregressive_coef_mask)
return tf.linalg.LinearOperatorFullMatrix(
fixed_entries + bottom_right_entry)
|
[
"Build",
"the",
"transition",
"matrix",
"for",
"a",
"semi",
"-",
"local",
"linear",
"trend",
"model",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/semilocal_linear_trend.py#L241-L263
|
[
"def",
"semilocal_linear_trend_transition_matrix",
"(",
"autoregressive_coef",
")",
":",
"# We want to write the following 2 x 2 matrix:",
"# [[1., 1., ], # level(t+1) = level(t) + slope(t)",
"# [0., ar_coef], # slope(t+1) = ar_coef * slope(t)",
"# but it's slightly tricky to properly incorporate the batch shape of",
"# autoregressive_coef. E.g., if autoregressive_coef has shape [4,6], we want",
"# to return shape [4, 6, 2, 2]. We do this by breaking the matrix into its",
"# fixed entries, written explicitly, and then the autoregressive_coef part",
"# which we add in after using a mask to broadcast to the correct matrix shape.",
"fixed_entries",
"=",
"tf",
".",
"constant",
"(",
"[",
"[",
"1.",
",",
"1.",
"]",
",",
"[",
"0.",
",",
"0.",
"]",
"]",
",",
"dtype",
"=",
"autoregressive_coef",
".",
"dtype",
")",
"autoregressive_coef_mask",
"=",
"tf",
".",
"constant",
"(",
"[",
"[",
"0.",
",",
"0.",
"]",
",",
"[",
"0.",
",",
"1.",
"]",
"]",
",",
"dtype",
"=",
"autoregressive_coef",
".",
"dtype",
")",
"bottom_right_entry",
"=",
"(",
"autoregressive_coef",
"[",
"...",
",",
"tf",
".",
"newaxis",
",",
"tf",
".",
"newaxis",
"]",
"*",
"autoregressive_coef_mask",
")",
"return",
"tf",
".",
"linalg",
".",
"LinearOperatorFullMatrix",
"(",
"fixed_entries",
"+",
"bottom_right_entry",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
semilocal_linear_trend_transition_noise
|
Build the transition noise model for a semi-local linear trend model.
|
tensorflow_probability/python/sts/semilocal_linear_trend.py
|
def semilocal_linear_trend_transition_noise(level_scale,
slope_mean,
slope_scale,
autoregressive_coef):
"""Build the transition noise model for a semi-local linear trend model."""
# At each timestep, the stochasticity of `level` and `slope` are given
# by `level_scale` and `slope_scale` respectively.
broadcast_batch_shape = dist_util.get_broadcast_shape(
level_scale, slope_mean, slope_scale, autoregressive_coef)
broadcast_ones = tf.ones(broadcast_batch_shape, dtype=level_scale.dtype)
scale_diag = tf.stack([level_scale * broadcast_ones,
slope_scale * broadcast_ones],
axis=-1)
# We additionally fold in a bias term implementing the nonzero `slope_mean`.
# The overall `slope` update is (from `SemiLocalLinearTrend` docstring)
# slope[t] = (slope_mean +
# autoregressive_coef * (slope[t-1] - slope_mean) +
# Normal(0., slope_scale))
# which we rewrite as
# slope[t] = (
# autoregressive_coef * slope[t-1] + # linear transition
# Normal(loc=slope_mean - autoregressive_coef * slope_mean, # noise bias
# scale=slope_scale)) # noise scale
bias = tf.stack([tf.zeros_like(broadcast_ones),
slope_mean * (1 - autoregressive_coef) * broadcast_ones],
axis=-1)
return tfd.MultivariateNormalDiag(
loc=bias,
scale_diag=scale_diag)
|
def semilocal_linear_trend_transition_noise(level_scale,
slope_mean,
slope_scale,
autoregressive_coef):
"""Build the transition noise model for a semi-local linear trend model."""
# At each timestep, the stochasticity of `level` and `slope` are given
# by `level_scale` and `slope_scale` respectively.
broadcast_batch_shape = dist_util.get_broadcast_shape(
level_scale, slope_mean, slope_scale, autoregressive_coef)
broadcast_ones = tf.ones(broadcast_batch_shape, dtype=level_scale.dtype)
scale_diag = tf.stack([level_scale * broadcast_ones,
slope_scale * broadcast_ones],
axis=-1)
# We additionally fold in a bias term implementing the nonzero `slope_mean`.
# The overall `slope` update is (from `SemiLocalLinearTrend` docstring)
# slope[t] = (slope_mean +
# autoregressive_coef * (slope[t-1] - slope_mean) +
# Normal(0., slope_scale))
# which we rewrite as
# slope[t] = (
# autoregressive_coef * slope[t-1] + # linear transition
# Normal(loc=slope_mean - autoregressive_coef * slope_mean, # noise bias
# scale=slope_scale)) # noise scale
bias = tf.stack([tf.zeros_like(broadcast_ones),
slope_mean * (1 - autoregressive_coef) * broadcast_ones],
axis=-1)
return tfd.MultivariateNormalDiag(
loc=bias,
scale_diag=scale_diag)
|
[
"Build",
"the",
"transition",
"noise",
"model",
"for",
"a",
"semi",
"-",
"local",
"linear",
"trend",
"model",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/semilocal_linear_trend.py#L266-L296
|
[
"def",
"semilocal_linear_trend_transition_noise",
"(",
"level_scale",
",",
"slope_mean",
",",
"slope_scale",
",",
"autoregressive_coef",
")",
":",
"# At each timestep, the stochasticity of `level` and `slope` are given",
"# by `level_scale` and `slope_scale` respectively.",
"broadcast_batch_shape",
"=",
"dist_util",
".",
"get_broadcast_shape",
"(",
"level_scale",
",",
"slope_mean",
",",
"slope_scale",
",",
"autoregressive_coef",
")",
"broadcast_ones",
"=",
"tf",
".",
"ones",
"(",
"broadcast_batch_shape",
",",
"dtype",
"=",
"level_scale",
".",
"dtype",
")",
"scale_diag",
"=",
"tf",
".",
"stack",
"(",
"[",
"level_scale",
"*",
"broadcast_ones",
",",
"slope_scale",
"*",
"broadcast_ones",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"# We additionally fold in a bias term implementing the nonzero `slope_mean`.",
"# The overall `slope` update is (from `SemiLocalLinearTrend` docstring)",
"# slope[t] = (slope_mean +",
"# autoregressive_coef * (slope[t-1] - slope_mean) +",
"# Normal(0., slope_scale))",
"# which we rewrite as",
"# slope[t] = (",
"# autoregressive_coef * slope[t-1] + # linear transition",
"# Normal(loc=slope_mean - autoregressive_coef * slope_mean, # noise bias",
"# scale=slope_scale)) # noise scale",
"bias",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"zeros_like",
"(",
"broadcast_ones",
")",
",",
"slope_mean",
"*",
"(",
"1",
"-",
"autoregressive_coef",
")",
"*",
"broadcast_ones",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"tfd",
".",
"MultivariateNormalDiag",
"(",
"loc",
"=",
"bias",
",",
"scale_diag",
"=",
"scale_diag",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
sample_halton_sequence
|
r"""Returns a sample from the `dim` dimensional Halton sequence.
Warning: The sequence elements take values only between 0 and 1. Care must be
taken to appropriately transform the domain of a function if it differs from
the unit cube before evaluating integrals using Halton samples. It is also
important to remember that quasi-random numbers without randomization are not
a replacement for pseudo-random numbers in every context. Quasi random numbers
are completely deterministic and typically have significant negative
autocorrelation unless randomization is used.
Computes the members of the low discrepancy Halton sequence in dimension
`dim`. The `dim`-dimensional sequence takes values in the unit hypercube in
`dim` dimensions. Currently, only dimensions up to 1000 are supported. The
prime base for the k-th axes is the k-th prime starting from 2. For example,
if `dim` = 3, then the bases will be [2, 3, 5] respectively and the first
element of the non-randomized sequence will be: [0.5, 0.333, 0.2]. For a more
complete description of the Halton sequences see
[here](https://en.wikipedia.org/wiki/Halton_sequence). For low discrepancy
sequences and their applications see
[here](https://en.wikipedia.org/wiki/Low-discrepancy_sequence).
If `randomized` is true, this function produces a scrambled version of the
Halton sequence introduced by [Owen (2017)][1]. For the advantages of
randomization of low discrepancy sequences see [here](
https://en.wikipedia.org/wiki/Quasi-Monte_Carlo_method#Randomization_of_quasi-Monte_Carlo).
The number of samples produced is controlled by the `num_results` and
`sequence_indices` parameters. The user must supply either `num_results` or
`sequence_indices` but not both.
The former is the number of samples to produce starting from the first
element. If `sequence_indices` is given instead, the specified elements of
the sequence are generated. For example, sequence_indices=tf.range(10) is
equivalent to specifying n=10.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
# Produce the first 1000 members of the Halton sequence in 3 dimensions.
num_results = 1000
dim = 3
sample = tfp.mcmc.sample_halton_sequence(
dim,
num_results=num_results,
seed=127)
# Evaluate the integral of x_1 * x_2^2 * x_3^3 over the three dimensional
# hypercube.
powers = tf.range(1.0, limit=dim + 1)
integral = tf.reduce_mean(tf.reduce_prod(sample ** powers, axis=-1))
true_value = 1.0 / tf.reduce_prod(powers + 1.0)
with tf.Session() as session:
values = session.run((integral, true_value))
# Produces a relative absolute error of 1.7%.
print ("Estimated: %f, True Value: %f" % values)
# Now skip the first 1000 samples and recompute the integral with the next
# thousand samples. The sequence_indices argument can be used to do this.
sequence_indices = tf.range(start=1000, limit=1000 + num_results,
dtype=tf.int32)
sample_leaped = tfp.mcmc.sample_halton_sequence(
dim,
sequence_indices=sequence_indices,
seed=111217)
integral_leaped = tf.reduce_mean(tf.reduce_prod(sample_leaped ** powers,
axis=-1))
with tf.Session() as session:
values = session.run((integral_leaped, true_value))
# Now produces a relative absolute error of 0.05%.
print ("Leaped Estimated: %f, True Value: %f" % values)
```
Args:
dim: Positive Python `int` representing each sample's `event_size.` Must
not be greater than 1000.
num_results: (Optional) Positive scalar `Tensor` of dtype int32. The number
of samples to generate. Either this parameter or sequence_indices must
be specified but not both. If this parameter is None, then the behaviour
is determined by the `sequence_indices`.
Default value: `None`.
sequence_indices: (Optional) `Tensor` of dtype int32 and rank 1. The
elements of the sequence to compute specified by their position in the
sequence. The entries index into the Halton sequence starting with 0 and
hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will
produce the first, sixth and seventh elements of the sequence. If this
parameter is None, then the `num_results` parameter must be specified
which gives the number of desired samples starting from the first sample.
Default value: `None`.
dtype: (Optional) The dtype of the sample. One of: `float16`, `float32` or
`float64`.
Default value: `tf.float32`.
randomized: (Optional) bool indicating whether to produce a randomized
Halton sequence. If True, applies the randomization described in
[Owen (2017)][1].
Default value: `True`.
seed: (Optional) Python integer to seed the random number generator. Only
used if `randomized` is True. If not supplied and `randomized` is True,
no seed is set.
Default value: `None`.
name: (Optional) Python `str` describing ops managed by this function. If
not supplied the name of this function is used.
Default value: "sample_halton_sequence".
Returns:
halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype
and `shape` `[num_results, dim]` if `num_results` was specified or shape
`[s, dim]` where s is the size of `sequence_indices` if `sequence_indices`
were specified.
Raises:
ValueError: if both `sequence_indices` and `num_results` were specified or
if dimension `dim` is less than 1 or greater than 1000.
#### References
[1]: Art B. Owen. A randomized Halton algorithm in R. _arXiv preprint
arXiv:1706.02808_, 2017. https://arxiv.org/abs/1706.02808
|
tensorflow_probability/python/mcmc/sample_halton_sequence.py
|
def sample_halton_sequence(dim,
num_results=None,
sequence_indices=None,
dtype=tf.float32,
randomized=True,
seed=None,
name=None):
r"""Returns a sample from the `dim` dimensional Halton sequence.
Warning: The sequence elements take values only between 0 and 1. Care must be
taken to appropriately transform the domain of a function if it differs from
the unit cube before evaluating integrals using Halton samples. It is also
important to remember that quasi-random numbers without randomization are not
a replacement for pseudo-random numbers in every context. Quasi random numbers
are completely deterministic and typically have significant negative
autocorrelation unless randomization is used.
Computes the members of the low discrepancy Halton sequence in dimension
`dim`. The `dim`-dimensional sequence takes values in the unit hypercube in
`dim` dimensions. Currently, only dimensions up to 1000 are supported. The
prime base for the k-th axes is the k-th prime starting from 2. For example,
if `dim` = 3, then the bases will be [2, 3, 5] respectively and the first
element of the non-randomized sequence will be: [0.5, 0.333, 0.2]. For a more
complete description of the Halton sequences see
[here](https://en.wikipedia.org/wiki/Halton_sequence). For low discrepancy
sequences and their applications see
[here](https://en.wikipedia.org/wiki/Low-discrepancy_sequence).
If `randomized` is true, this function produces a scrambled version of the
Halton sequence introduced by [Owen (2017)][1]. For the advantages of
randomization of low discrepancy sequences see [here](
https://en.wikipedia.org/wiki/Quasi-Monte_Carlo_method#Randomization_of_quasi-Monte_Carlo).
The number of samples produced is controlled by the `num_results` and
`sequence_indices` parameters. The user must supply either `num_results` or
`sequence_indices` but not both.
The former is the number of samples to produce starting from the first
element. If `sequence_indices` is given instead, the specified elements of
the sequence are generated. For example, sequence_indices=tf.range(10) is
equivalent to specifying n=10.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
# Produce the first 1000 members of the Halton sequence in 3 dimensions.
num_results = 1000
dim = 3
sample = tfp.mcmc.sample_halton_sequence(
dim,
num_results=num_results,
seed=127)
# Evaluate the integral of x_1 * x_2^2 * x_3^3 over the three dimensional
# hypercube.
powers = tf.range(1.0, limit=dim + 1)
integral = tf.reduce_mean(tf.reduce_prod(sample ** powers, axis=-1))
true_value = 1.0 / tf.reduce_prod(powers + 1.0)
with tf.Session() as session:
values = session.run((integral, true_value))
# Produces a relative absolute error of 1.7%.
print ("Estimated: %f, True Value: %f" % values)
# Now skip the first 1000 samples and recompute the integral with the next
# thousand samples. The sequence_indices argument can be used to do this.
sequence_indices = tf.range(start=1000, limit=1000 + num_results,
dtype=tf.int32)
sample_leaped = tfp.mcmc.sample_halton_sequence(
dim,
sequence_indices=sequence_indices,
seed=111217)
integral_leaped = tf.reduce_mean(tf.reduce_prod(sample_leaped ** powers,
axis=-1))
with tf.Session() as session:
values = session.run((integral_leaped, true_value))
# Now produces a relative absolute error of 0.05%.
print ("Leaped Estimated: %f, True Value: %f" % values)
```
Args:
dim: Positive Python `int` representing each sample's `event_size.` Must
not be greater than 1000.
num_results: (Optional) Positive scalar `Tensor` of dtype int32. The number
of samples to generate. Either this parameter or sequence_indices must
be specified but not both. If this parameter is None, then the behaviour
is determined by the `sequence_indices`.
Default value: `None`.
sequence_indices: (Optional) `Tensor` of dtype int32 and rank 1. The
elements of the sequence to compute specified by their position in the
sequence. The entries index into the Halton sequence starting with 0 and
hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will
produce the first, sixth and seventh elements of the sequence. If this
parameter is None, then the `num_results` parameter must be specified
which gives the number of desired samples starting from the first sample.
Default value: `None`.
dtype: (Optional) The dtype of the sample. One of: `float16`, `float32` or
`float64`.
Default value: `tf.float32`.
randomized: (Optional) bool indicating whether to produce a randomized
Halton sequence. If True, applies the randomization described in
[Owen (2017)][1].
Default value: `True`.
seed: (Optional) Python integer to seed the random number generator. Only
used if `randomized` is True. If not supplied and `randomized` is True,
no seed is set.
Default value: `None`.
name: (Optional) Python `str` describing ops managed by this function. If
not supplied the name of this function is used.
Default value: "sample_halton_sequence".
Returns:
halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype
and `shape` `[num_results, dim]` if `num_results` was specified or shape
`[s, dim]` where s is the size of `sequence_indices` if `sequence_indices`
were specified.
Raises:
ValueError: if both `sequence_indices` and `num_results` were specified or
if dimension `dim` is less than 1 or greater than 1000.
#### References
[1]: Art B. Owen. A randomized Halton algorithm in R. _arXiv preprint
arXiv:1706.02808_, 2017. https://arxiv.org/abs/1706.02808
"""
if dim < 1 or dim > _MAX_DIMENSION:
raise ValueError(
'Dimension must be between 1 and {}. Supplied {}'.format(_MAX_DIMENSION,
dim))
if (num_results is None) == (sequence_indices is None):
raise ValueError('Either `num_results` or `sequence_indices` must be'
' specified but not both.')
if not dtype.is_floating:
raise ValueError('dtype must be of `float`-type')
with tf.compat.v1.name_scope(
name, 'sample', values=[num_results, sequence_indices]):
# Here and in the following, the shape layout is as follows:
# [sample dimension, event dimension, coefficient dimension].
# The coefficient dimension is an intermediate axes which will hold the
# weights of the starting integer when expressed in the (prime) base for
# an event dimension.
if num_results is not None:
num_results = tf.convert_to_tensor(value=num_results)
if sequence_indices is not None:
sequence_indices = tf.convert_to_tensor(value=sequence_indices)
indices = _get_indices(num_results, sequence_indices, dtype)
radixes = tf.constant(_PRIMES[0:dim], dtype=dtype, shape=[dim, 1])
max_sizes_by_axes = _base_expansion_size(
tf.reduce_max(input_tensor=indices), radixes)
max_size = tf.reduce_max(input_tensor=max_sizes_by_axes)
# The powers of the radixes that we will need. Note that there is a bit
# of an excess here. Suppose we need the place value coefficients of 7
# in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits
# for base 3. However, we can only create rectangular tensors so we
# store both expansions in a [2, 3] tensor. This leads to the problem that
# we might end up attempting to raise large numbers to large powers. For
# example, base 2 expansion of 1024 has 10 digits. If we were in 10
# dimensions, then the 10th prime (29) we will end up computing 29^10 even
# though we don't need it. We avoid this by setting the exponents for each
# axes to 0 beyond the maximum value needed for that dimension.
exponents_by_axes = tf.tile([tf.range(max_size)], [dim, 1])
# The mask is true for those coefficients that are irrelevant.
weight_mask = exponents_by_axes >= max_sizes_by_axes
capped_exponents = tf.where(
weight_mask,
tf.zeros_like(exponents_by_axes),
exponents_by_axes)
weights = radixes ** capped_exponents
# The following computes the base b expansion of the indices. Suppose,
# x = a0 + a1*b + a2*b^2 + ... Then, performing a floor div of x with
# the vector (1, b, b^2, b^3, ...) will produce
# (a0 + s1 * b, a1 + s2 * b, ...) where s_i are coefficients we don't care
# about. Noting that all a_i < b by definition of place value expansion,
# we see that taking the elements mod b of the above vector produces the
# place value expansion coefficients.
coeffs = tf.math.floordiv(indices, weights)
coeffs *= 1. - tf.cast(weight_mask, dtype)
coeffs %= radixes
if not randomized:
coeffs /= radixes
return tf.reduce_sum(input_tensor=coeffs / weights, axis=-1)
stream = distributions.SeedStream(seed, salt='MCMCSampleHaltonSequence')
coeffs = _randomize(coeffs, radixes, seed=stream())
# Remove the contribution from randomizing the trailing zero for the
# axes where max_size_by_axes < max_size. This will be accounted
# for separately below (using zero_correction).
coeffs *= 1. - tf.cast(weight_mask, dtype)
coeffs /= radixes
base_values = tf.reduce_sum(input_tensor=coeffs / weights, axis=-1)
# The randomization used in Owen (2017) does not leave 0 invariant. While
# we have accounted for the randomization of the first `max_size_by_axes`
# coefficients, we still need to correct for the trailing zeros. Luckily,
# this is equivalent to adding a uniform random value scaled so the first
# `max_size_by_axes` coefficients are zero. The following statements perform
# this correction.
zero_correction = tf.random.uniform([dim, 1], seed=stream(), dtype=dtype)
zero_correction /= radixes ** max_sizes_by_axes
return base_values + tf.reshape(zero_correction, [-1])
|
def sample_halton_sequence(dim,
num_results=None,
sequence_indices=None,
dtype=tf.float32,
randomized=True,
seed=None,
name=None):
r"""Returns a sample from the `dim` dimensional Halton sequence.
Warning: The sequence elements take values only between 0 and 1. Care must be
taken to appropriately transform the domain of a function if it differs from
the unit cube before evaluating integrals using Halton samples. It is also
important to remember that quasi-random numbers without randomization are not
a replacement for pseudo-random numbers in every context. Quasi random numbers
are completely deterministic and typically have significant negative
autocorrelation unless randomization is used.
Computes the members of the low discrepancy Halton sequence in dimension
`dim`. The `dim`-dimensional sequence takes values in the unit hypercube in
`dim` dimensions. Currently, only dimensions up to 1000 are supported. The
prime base for the k-th axes is the k-th prime starting from 2. For example,
if `dim` = 3, then the bases will be [2, 3, 5] respectively and the first
element of the non-randomized sequence will be: [0.5, 0.333, 0.2]. For a more
complete description of the Halton sequences see
[here](https://en.wikipedia.org/wiki/Halton_sequence). For low discrepancy
sequences and their applications see
[here](https://en.wikipedia.org/wiki/Low-discrepancy_sequence).
If `randomized` is true, this function produces a scrambled version of the
Halton sequence introduced by [Owen (2017)][1]. For the advantages of
randomization of low discrepancy sequences see [here](
https://en.wikipedia.org/wiki/Quasi-Monte_Carlo_method#Randomization_of_quasi-Monte_Carlo).
The number of samples produced is controlled by the `num_results` and
`sequence_indices` parameters. The user must supply either `num_results` or
`sequence_indices` but not both.
The former is the number of samples to produce starting from the first
element. If `sequence_indices` is given instead, the specified elements of
the sequence are generated. For example, sequence_indices=tf.range(10) is
equivalent to specifying n=10.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
# Produce the first 1000 members of the Halton sequence in 3 dimensions.
num_results = 1000
dim = 3
sample = tfp.mcmc.sample_halton_sequence(
dim,
num_results=num_results,
seed=127)
# Evaluate the integral of x_1 * x_2^2 * x_3^3 over the three dimensional
# hypercube.
powers = tf.range(1.0, limit=dim + 1)
integral = tf.reduce_mean(tf.reduce_prod(sample ** powers, axis=-1))
true_value = 1.0 / tf.reduce_prod(powers + 1.0)
with tf.Session() as session:
values = session.run((integral, true_value))
# Produces a relative absolute error of 1.7%.
print ("Estimated: %f, True Value: %f" % values)
# Now skip the first 1000 samples and recompute the integral with the next
# thousand samples. The sequence_indices argument can be used to do this.
sequence_indices = tf.range(start=1000, limit=1000 + num_results,
dtype=tf.int32)
sample_leaped = tfp.mcmc.sample_halton_sequence(
dim,
sequence_indices=sequence_indices,
seed=111217)
integral_leaped = tf.reduce_mean(tf.reduce_prod(sample_leaped ** powers,
axis=-1))
with tf.Session() as session:
values = session.run((integral_leaped, true_value))
# Now produces a relative absolute error of 0.05%.
print ("Leaped Estimated: %f, True Value: %f" % values)
```
Args:
dim: Positive Python `int` representing each sample's `event_size.` Must
not be greater than 1000.
num_results: (Optional) Positive scalar `Tensor` of dtype int32. The number
of samples to generate. Either this parameter or sequence_indices must
be specified but not both. If this parameter is None, then the behaviour
is determined by the `sequence_indices`.
Default value: `None`.
sequence_indices: (Optional) `Tensor` of dtype int32 and rank 1. The
elements of the sequence to compute specified by their position in the
sequence. The entries index into the Halton sequence starting with 0 and
hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will
produce the first, sixth and seventh elements of the sequence. If this
parameter is None, then the `num_results` parameter must be specified
which gives the number of desired samples starting from the first sample.
Default value: `None`.
dtype: (Optional) The dtype of the sample. One of: `float16`, `float32` or
`float64`.
Default value: `tf.float32`.
randomized: (Optional) bool indicating whether to produce a randomized
Halton sequence. If True, applies the randomization described in
[Owen (2017)][1].
Default value: `True`.
seed: (Optional) Python integer to seed the random number generator. Only
used if `randomized` is True. If not supplied and `randomized` is True,
no seed is set.
Default value: `None`.
name: (Optional) Python `str` describing ops managed by this function. If
not supplied the name of this function is used.
Default value: "sample_halton_sequence".
Returns:
halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype
and `shape` `[num_results, dim]` if `num_results` was specified or shape
`[s, dim]` where s is the size of `sequence_indices` if `sequence_indices`
were specified.
Raises:
ValueError: if both `sequence_indices` and `num_results` were specified or
if dimension `dim` is less than 1 or greater than 1000.
#### References
[1]: Art B. Owen. A randomized Halton algorithm in R. _arXiv preprint
arXiv:1706.02808_, 2017. https://arxiv.org/abs/1706.02808
"""
if dim < 1 or dim > _MAX_DIMENSION:
raise ValueError(
'Dimension must be between 1 and {}. Supplied {}'.format(_MAX_DIMENSION,
dim))
if (num_results is None) == (sequence_indices is None):
raise ValueError('Either `num_results` or `sequence_indices` must be'
' specified but not both.')
if not dtype.is_floating:
raise ValueError('dtype must be of `float`-type')
with tf.compat.v1.name_scope(
name, 'sample', values=[num_results, sequence_indices]):
# Here and in the following, the shape layout is as follows:
# [sample dimension, event dimension, coefficient dimension].
# The coefficient dimension is an intermediate axes which will hold the
# weights of the starting integer when expressed in the (prime) base for
# an event dimension.
if num_results is not None:
num_results = tf.convert_to_tensor(value=num_results)
if sequence_indices is not None:
sequence_indices = tf.convert_to_tensor(value=sequence_indices)
indices = _get_indices(num_results, sequence_indices, dtype)
radixes = tf.constant(_PRIMES[0:dim], dtype=dtype, shape=[dim, 1])
max_sizes_by_axes = _base_expansion_size(
tf.reduce_max(input_tensor=indices), radixes)
max_size = tf.reduce_max(input_tensor=max_sizes_by_axes)
# The powers of the radixes that we will need. Note that there is a bit
# of an excess here. Suppose we need the place value coefficients of 7
# in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits
# for base 3. However, we can only create rectangular tensors so we
# store both expansions in a [2, 3] tensor. This leads to the problem that
# we might end up attempting to raise large numbers to large powers. For
# example, base 2 expansion of 1024 has 10 digits. If we were in 10
# dimensions, then the 10th prime (29) we will end up computing 29^10 even
# though we don't need it. We avoid this by setting the exponents for each
# axes to 0 beyond the maximum value needed for that dimension.
exponents_by_axes = tf.tile([tf.range(max_size)], [dim, 1])
# The mask is true for those coefficients that are irrelevant.
weight_mask = exponents_by_axes >= max_sizes_by_axes
capped_exponents = tf.where(
weight_mask,
tf.zeros_like(exponents_by_axes),
exponents_by_axes)
weights = radixes ** capped_exponents
# The following computes the base b expansion of the indices. Suppose,
# x = a0 + a1*b + a2*b^2 + ... Then, performing a floor div of x with
# the vector (1, b, b^2, b^3, ...) will produce
# (a0 + s1 * b, a1 + s2 * b, ...) where s_i are coefficients we don't care
# about. Noting that all a_i < b by definition of place value expansion,
# we see that taking the elements mod b of the above vector produces the
# place value expansion coefficients.
coeffs = tf.math.floordiv(indices, weights)
coeffs *= 1. - tf.cast(weight_mask, dtype)
coeffs %= radixes
if not randomized:
coeffs /= radixes
return tf.reduce_sum(input_tensor=coeffs / weights, axis=-1)
stream = distributions.SeedStream(seed, salt='MCMCSampleHaltonSequence')
coeffs = _randomize(coeffs, radixes, seed=stream())
# Remove the contribution from randomizing the trailing zero for the
# axes where max_size_by_axes < max_size. This will be accounted
# for separately below (using zero_correction).
coeffs *= 1. - tf.cast(weight_mask, dtype)
coeffs /= radixes
base_values = tf.reduce_sum(input_tensor=coeffs / weights, axis=-1)
# The randomization used in Owen (2017) does not leave 0 invariant. While
# we have accounted for the randomization of the first `max_size_by_axes`
# coefficients, we still need to correct for the trailing zeros. Luckily,
# this is equivalent to adding a uniform random value scaled so the first
# `max_size_by_axes` coefficients are zero. The following statements perform
# this correction.
zero_correction = tf.random.uniform([dim, 1], seed=stream(), dtype=dtype)
zero_correction /= radixes ** max_sizes_by_axes
return base_values + tf.reshape(zero_correction, [-1])
|
[
"r",
"Returns",
"a",
"sample",
"from",
"the",
"dim",
"dimensional",
"Halton",
"sequence",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample_halton_sequence.py#L39-L249
|
[
"def",
"sample_halton_sequence",
"(",
"dim",
",",
"num_results",
"=",
"None",
",",
"sequence_indices",
"=",
"None",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"randomized",
"=",
"True",
",",
"seed",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"dim",
"<",
"1",
"or",
"dim",
">",
"_MAX_DIMENSION",
":",
"raise",
"ValueError",
"(",
"'Dimension must be between 1 and {}. Supplied {}'",
".",
"format",
"(",
"_MAX_DIMENSION",
",",
"dim",
")",
")",
"if",
"(",
"num_results",
"is",
"None",
")",
"==",
"(",
"sequence_indices",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Either `num_results` or `sequence_indices` must be'",
"' specified but not both.'",
")",
"if",
"not",
"dtype",
".",
"is_floating",
":",
"raise",
"ValueError",
"(",
"'dtype must be of `float`-type'",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'sample'",
",",
"values",
"=",
"[",
"num_results",
",",
"sequence_indices",
"]",
")",
":",
"# Here and in the following, the shape layout is as follows:",
"# [sample dimension, event dimension, coefficient dimension].",
"# The coefficient dimension is an intermediate axes which will hold the",
"# weights of the starting integer when expressed in the (prime) base for",
"# an event dimension.",
"if",
"num_results",
"is",
"not",
"None",
":",
"num_results",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_results",
")",
"if",
"sequence_indices",
"is",
"not",
"None",
":",
"sequence_indices",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"sequence_indices",
")",
"indices",
"=",
"_get_indices",
"(",
"num_results",
",",
"sequence_indices",
",",
"dtype",
")",
"radixes",
"=",
"tf",
".",
"constant",
"(",
"_PRIMES",
"[",
"0",
":",
"dim",
"]",
",",
"dtype",
"=",
"dtype",
",",
"shape",
"=",
"[",
"dim",
",",
"1",
"]",
")",
"max_sizes_by_axes",
"=",
"_base_expansion_size",
"(",
"tf",
".",
"reduce_max",
"(",
"input_tensor",
"=",
"indices",
")",
",",
"radixes",
")",
"max_size",
"=",
"tf",
".",
"reduce_max",
"(",
"input_tensor",
"=",
"max_sizes_by_axes",
")",
"# The powers of the radixes that we will need. Note that there is a bit",
"# of an excess here. Suppose we need the place value coefficients of 7",
"# in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits",
"# for base 3. However, we can only create rectangular tensors so we",
"# store both expansions in a [2, 3] tensor. This leads to the problem that",
"# we might end up attempting to raise large numbers to large powers. For",
"# example, base 2 expansion of 1024 has 10 digits. If we were in 10",
"# dimensions, then the 10th prime (29) we will end up computing 29^10 even",
"# though we don't need it. We avoid this by setting the exponents for each",
"# axes to 0 beyond the maximum value needed for that dimension.",
"exponents_by_axes",
"=",
"tf",
".",
"tile",
"(",
"[",
"tf",
".",
"range",
"(",
"max_size",
")",
"]",
",",
"[",
"dim",
",",
"1",
"]",
")",
"# The mask is true for those coefficients that are irrelevant.",
"weight_mask",
"=",
"exponents_by_axes",
">=",
"max_sizes_by_axes",
"capped_exponents",
"=",
"tf",
".",
"where",
"(",
"weight_mask",
",",
"tf",
".",
"zeros_like",
"(",
"exponents_by_axes",
")",
",",
"exponents_by_axes",
")",
"weights",
"=",
"radixes",
"**",
"capped_exponents",
"# The following computes the base b expansion of the indices. Suppose,",
"# x = a0 + a1*b + a2*b^2 + ... Then, performing a floor div of x with",
"# the vector (1, b, b^2, b^3, ...) will produce",
"# (a0 + s1 * b, a1 + s2 * b, ...) where s_i are coefficients we don't care",
"# about. Noting that all a_i < b by definition of place value expansion,",
"# we see that taking the elements mod b of the above vector produces the",
"# place value expansion coefficients.",
"coeffs",
"=",
"tf",
".",
"math",
".",
"floordiv",
"(",
"indices",
",",
"weights",
")",
"coeffs",
"*=",
"1.",
"-",
"tf",
".",
"cast",
"(",
"weight_mask",
",",
"dtype",
")",
"coeffs",
"%=",
"radixes",
"if",
"not",
"randomized",
":",
"coeffs",
"/=",
"radixes",
"return",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"coeffs",
"/",
"weights",
",",
"axis",
"=",
"-",
"1",
")",
"stream",
"=",
"distributions",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"'MCMCSampleHaltonSequence'",
")",
"coeffs",
"=",
"_randomize",
"(",
"coeffs",
",",
"radixes",
",",
"seed",
"=",
"stream",
"(",
")",
")",
"# Remove the contribution from randomizing the trailing zero for the",
"# axes where max_size_by_axes < max_size. This will be accounted",
"# for separately below (using zero_correction).",
"coeffs",
"*=",
"1.",
"-",
"tf",
".",
"cast",
"(",
"weight_mask",
",",
"dtype",
")",
"coeffs",
"/=",
"radixes",
"base_values",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"coeffs",
"/",
"weights",
",",
"axis",
"=",
"-",
"1",
")",
"# The randomization used in Owen (2017) does not leave 0 invariant. While",
"# we have accounted for the randomization of the first `max_size_by_axes`",
"# coefficients, we still need to correct for the trailing zeros. Luckily,",
"# this is equivalent to adding a uniform random value scaled so the first",
"# `max_size_by_axes` coefficients are zero. The following statements perform",
"# this correction.",
"zero_correction",
"=",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"dim",
",",
"1",
"]",
",",
"seed",
"=",
"stream",
"(",
")",
",",
"dtype",
"=",
"dtype",
")",
"zero_correction",
"/=",
"radixes",
"**",
"max_sizes_by_axes",
"return",
"base_values",
"+",
"tf",
".",
"reshape",
"(",
"zero_correction",
",",
"[",
"-",
"1",
"]",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_randomize
|
Applies the Owen (2017) randomization to the coefficients.
|
tensorflow_probability/python/mcmc/sample_halton_sequence.py
|
def _randomize(coeffs, radixes, seed=None):
"""Applies the Owen (2017) randomization to the coefficients."""
given_dtype = coeffs.dtype
coeffs = tf.cast(coeffs, dtype=tf.int32)
num_coeffs = tf.shape(input=coeffs)[-1]
radixes = tf.reshape(tf.cast(radixes, dtype=tf.int32), shape=[-1])
stream = distributions.SeedStream(seed, salt='MCMCSampleHaltonSequence2')
perms = _get_permutations(num_coeffs, radixes, seed=stream())
perms = tf.reshape(perms, shape=[-1])
radix_sum = tf.reduce_sum(input_tensor=radixes)
radix_offsets = tf.reshape(tf.cumsum(radixes, exclusive=True),
shape=[-1, 1])
offsets = radix_offsets + tf.range(num_coeffs) * radix_sum
permuted_coeffs = tf.gather(perms, coeffs + offsets)
return tf.cast(permuted_coeffs, dtype=given_dtype)
|
def _randomize(coeffs, radixes, seed=None):
"""Applies the Owen (2017) randomization to the coefficients."""
given_dtype = coeffs.dtype
coeffs = tf.cast(coeffs, dtype=tf.int32)
num_coeffs = tf.shape(input=coeffs)[-1]
radixes = tf.reshape(tf.cast(radixes, dtype=tf.int32), shape=[-1])
stream = distributions.SeedStream(seed, salt='MCMCSampleHaltonSequence2')
perms = _get_permutations(num_coeffs, radixes, seed=stream())
perms = tf.reshape(perms, shape=[-1])
radix_sum = tf.reduce_sum(input_tensor=radixes)
radix_offsets = tf.reshape(tf.cumsum(radixes, exclusive=True),
shape=[-1, 1])
offsets = radix_offsets + tf.range(num_coeffs) * radix_sum
permuted_coeffs = tf.gather(perms, coeffs + offsets)
return tf.cast(permuted_coeffs, dtype=given_dtype)
|
[
"Applies",
"the",
"Owen",
"(",
"2017",
")",
"randomization",
"to",
"the",
"coefficients",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample_halton_sequence.py#L252-L266
|
[
"def",
"_randomize",
"(",
"coeffs",
",",
"radixes",
",",
"seed",
"=",
"None",
")",
":",
"given_dtype",
"=",
"coeffs",
".",
"dtype",
"coeffs",
"=",
"tf",
".",
"cast",
"(",
"coeffs",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"num_coeffs",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"coeffs",
")",
"[",
"-",
"1",
"]",
"radixes",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"cast",
"(",
"radixes",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"shape",
"=",
"[",
"-",
"1",
"]",
")",
"stream",
"=",
"distributions",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"'MCMCSampleHaltonSequence2'",
")",
"perms",
"=",
"_get_permutations",
"(",
"num_coeffs",
",",
"radixes",
",",
"seed",
"=",
"stream",
"(",
")",
")",
"perms",
"=",
"tf",
".",
"reshape",
"(",
"perms",
",",
"shape",
"=",
"[",
"-",
"1",
"]",
")",
"radix_sum",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"radixes",
")",
"radix_offsets",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"cumsum",
"(",
"radixes",
",",
"exclusive",
"=",
"True",
")",
",",
"shape",
"=",
"[",
"-",
"1",
",",
"1",
"]",
")",
"offsets",
"=",
"radix_offsets",
"+",
"tf",
".",
"range",
"(",
"num_coeffs",
")",
"*",
"radix_sum",
"permuted_coeffs",
"=",
"tf",
".",
"gather",
"(",
"perms",
",",
"coeffs",
"+",
"offsets",
")",
"return",
"tf",
".",
"cast",
"(",
"permuted_coeffs",
",",
"dtype",
"=",
"given_dtype",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
test
|
_get_permutations
|
Uniform iid sample from the space of permutations.
Draws a sample of size `num_results` from the group of permutations of degrees
specified by the `dims` tensor. These are packed together into one tensor
such that each row is one sample from each of the dimensions in `dims`. For
example, if dims = [2,3] and num_results = 2, the result is a tensor of shape
[2, 2 + 3] and the first row of the result might look like:
[1, 0, 2, 0, 1]. The first two elements are a permutation over 2 elements
while the next three are a permutation over 3 elements.
Args:
num_results: A positive scalar `Tensor` of integral type. The number of
draws from the discrete uniform distribution over the permutation groups.
dims: A 1D `Tensor` of the same dtype as `num_results`. The degree of the
permutation groups from which to sample.
seed: (Optional) Python integer to seed the random number generator.
Returns:
permutations: A `Tensor` of shape `[num_results, sum(dims)]` and the same
dtype as `dims`.
|
tensorflow_probability/python/mcmc/sample_halton_sequence.py
|
def _get_permutations(num_results, dims, seed=None):
"""Uniform iid sample from the space of permutations.
Draws a sample of size `num_results` from the group of permutations of degrees
specified by the `dims` tensor. These are packed together into one tensor
such that each row is one sample from each of the dimensions in `dims`. For
example, if dims = [2,3] and num_results = 2, the result is a tensor of shape
[2, 2 + 3] and the first row of the result might look like:
[1, 0, 2, 0, 1]. The first two elements are a permutation over 2 elements
while the next three are a permutation over 3 elements.
Args:
num_results: A positive scalar `Tensor` of integral type. The number of
draws from the discrete uniform distribution over the permutation groups.
dims: A 1D `Tensor` of the same dtype as `num_results`. The degree of the
permutation groups from which to sample.
seed: (Optional) Python integer to seed the random number generator.
Returns:
permutations: A `Tensor` of shape `[num_results, sum(dims)]` and the same
dtype as `dims`.
"""
sample_range = tf.range(num_results)
stream = distributions.SeedStream(seed, salt='MCMCSampleHaltonSequence3')
def generate_one(d):
seed = stream()
fn = lambda _: tf.random.shuffle(tf.range(d), seed=seed)
return tf.map_fn(
fn,
sample_range,
parallel_iterations=1 if seed is not None else 10)
return tf.concat([generate_one(d) for d in tf.unstack(dims)],
axis=-1)
|
def _get_permutations(num_results, dims, seed=None):
"""Uniform iid sample from the space of permutations.
Draws a sample of size `num_results` from the group of permutations of degrees
specified by the `dims` tensor. These are packed together into one tensor
such that each row is one sample from each of the dimensions in `dims`. For
example, if dims = [2,3] and num_results = 2, the result is a tensor of shape
[2, 2 + 3] and the first row of the result might look like:
[1, 0, 2, 0, 1]. The first two elements are a permutation over 2 elements
while the next three are a permutation over 3 elements.
Args:
num_results: A positive scalar `Tensor` of integral type. The number of
draws from the discrete uniform distribution over the permutation groups.
dims: A 1D `Tensor` of the same dtype as `num_results`. The degree of the
permutation groups from which to sample.
seed: (Optional) Python integer to seed the random number generator.
Returns:
permutations: A `Tensor` of shape `[num_results, sum(dims)]` and the same
dtype as `dims`.
"""
sample_range = tf.range(num_results)
stream = distributions.SeedStream(seed, salt='MCMCSampleHaltonSequence3')
def generate_one(d):
seed = stream()
fn = lambda _: tf.random.shuffle(tf.range(d), seed=seed)
return tf.map_fn(
fn,
sample_range,
parallel_iterations=1 if seed is not None else 10)
return tf.concat([generate_one(d) for d in tf.unstack(dims)],
axis=-1)
|
[
"Uniform",
"iid",
"sample",
"from",
"the",
"space",
"of",
"permutations",
"."
] |
tensorflow/probability
|
python
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample_halton_sequence.py#L269-L301
|
[
"def",
"_get_permutations",
"(",
"num_results",
",",
"dims",
",",
"seed",
"=",
"None",
")",
":",
"sample_range",
"=",
"tf",
".",
"range",
"(",
"num_results",
")",
"stream",
"=",
"distributions",
".",
"SeedStream",
"(",
"seed",
",",
"salt",
"=",
"'MCMCSampleHaltonSequence3'",
")",
"def",
"generate_one",
"(",
"d",
")",
":",
"seed",
"=",
"stream",
"(",
")",
"fn",
"=",
"lambda",
"_",
":",
"tf",
".",
"random",
".",
"shuffle",
"(",
"tf",
".",
"range",
"(",
"d",
")",
",",
"seed",
"=",
"seed",
")",
"return",
"tf",
".",
"map_fn",
"(",
"fn",
",",
"sample_range",
",",
"parallel_iterations",
"=",
"1",
"if",
"seed",
"is",
"not",
"None",
"else",
"10",
")",
"return",
"tf",
".",
"concat",
"(",
"[",
"generate_one",
"(",
"d",
")",
"for",
"d",
"in",
"tf",
".",
"unstack",
"(",
"dims",
")",
"]",
",",
"axis",
"=",
"-",
"1",
")"
] |
e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.