partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
H2OFrame.substring
|
For each string, return a new string that is a substring of the original string.
If end_index is not specified, then the substring extends to the end of the original string. If the start_index
is longer than the length of the string, or is greater than or equal to the end_index, an empty string is
returned. Negative start_index is coerced to 0.
:param int start_index: The index of the original string at which to start the substring, inclusive.
:param int end_index: The index of the original string at which to end the substring, exclusive.
:returns: An H2OFrame containing the specified substrings.
|
h2o-py/h2o/frame.py
|
def substring(self, start_index, end_index=None):
"""
For each string, return a new string that is a substring of the original string.
If end_index is not specified, then the substring extends to the end of the original string. If the start_index
is longer than the length of the string, or is greater than or equal to the end_index, an empty string is
returned. Negative start_index is coerced to 0.
:param int start_index: The index of the original string at which to start the substring, inclusive.
:param int end_index: The index of the original string at which to end the substring, exclusive.
:returns: An H2OFrame containing the specified substrings.
"""
fr = H2OFrame._expr(expr=ExprNode("substring", self, start_index, end_index))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
|
def substring(self, start_index, end_index=None):
"""
For each string, return a new string that is a substring of the original string.
If end_index is not specified, then the substring extends to the end of the original string. If the start_index
is longer than the length of the string, or is greater than or equal to the end_index, an empty string is
returned. Negative start_index is coerced to 0.
:param int start_index: The index of the original string at which to start the substring, inclusive.
:param int end_index: The index of the original string at which to end the substring, exclusive.
:returns: An H2OFrame containing the specified substrings.
"""
fr = H2OFrame._expr(expr=ExprNode("substring", self, start_index, end_index))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
|
[
"For",
"each",
"string",
"return",
"a",
"new",
"string",
"that",
"is",
"a",
"substring",
"of",
"the",
"original",
"string",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2391-L2406
|
[
"def",
"substring",
"(",
"self",
",",
"start_index",
",",
"end_index",
"=",
"None",
")",
":",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"substring\"",
",",
"self",
",",
"start_index",
",",
"end_index",
")",
")",
"fr",
".",
"_ex",
".",
"_cache",
".",
"nrows",
"=",
"self",
".",
"nrow",
"fr",
".",
"_ex",
".",
"_cache",
".",
"ncol",
"=",
"self",
".",
"ncol",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.lstrip
|
Return a copy of the column with leading characters removed.
The set argument is a string specifying the set of characters to be removed.
If omitted, the set argument defaults to removing whitespace.
:param character set: The set of characters to lstrip from strings in column.
:returns: a new H2OFrame with the same shape as the original frame and having all its values
trimmed from the left (equivalent of Python's ``str.lstrip()``).
|
h2o-py/h2o/frame.py
|
def lstrip(self, set=" "):
"""
Return a copy of the column with leading characters removed.
The set argument is a string specifying the set of characters to be removed.
If omitted, the set argument defaults to removing whitespace.
:param character set: The set of characters to lstrip from strings in column.
:returns: a new H2OFrame with the same shape as the original frame and having all its values
trimmed from the left (equivalent of Python's ``str.lstrip()``).
"""
# work w/ None; parity with python lstrip
if set is None: set = " "
fr = H2OFrame._expr(expr=ExprNode("lstrip", self, set))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
|
def lstrip(self, set=" "):
"""
Return a copy of the column with leading characters removed.
The set argument is a string specifying the set of characters to be removed.
If omitted, the set argument defaults to removing whitespace.
:param character set: The set of characters to lstrip from strings in column.
:returns: a new H2OFrame with the same shape as the original frame and having all its values
trimmed from the left (equivalent of Python's ``str.lstrip()``).
"""
# work w/ None; parity with python lstrip
if set is None: set = " "
fr = H2OFrame._expr(expr=ExprNode("lstrip", self, set))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
|
[
"Return",
"a",
"copy",
"of",
"the",
"column",
"with",
"leading",
"characters",
"removed",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2409-L2426
|
[
"def",
"lstrip",
"(",
"self",
",",
"set",
"=",
"\" \"",
")",
":",
"# work w/ None; parity with python lstrip",
"if",
"set",
"is",
"None",
":",
"set",
"=",
"\" \"",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"lstrip\"",
",",
"self",
",",
"set",
")",
")",
"fr",
".",
"_ex",
".",
"_cache",
".",
"nrows",
"=",
"self",
".",
"nrow",
"fr",
".",
"_ex",
".",
"_cache",
".",
"ncol",
"=",
"self",
".",
"ncol",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.entropy
|
For each string compute its Shannon entropy, if the string is empty the entropy is 0.
:returns: an H2OFrame of Shannon entropies.
|
h2o-py/h2o/frame.py
|
def entropy(self):
"""
For each string compute its Shannon entropy, if the string is empty the entropy is 0.
:returns: an H2OFrame of Shannon entropies.
"""
fr = H2OFrame._expr(expr=ExprNode("entropy", self))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
|
def entropy(self):
"""
For each string compute its Shannon entropy, if the string is empty the entropy is 0.
:returns: an H2OFrame of Shannon entropies.
"""
fr = H2OFrame._expr(expr=ExprNode("entropy", self))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
|
[
"For",
"each",
"string",
"compute",
"its",
"Shannon",
"entropy",
"if",
"the",
"string",
"is",
"empty",
"the",
"entropy",
"is",
"0",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2449-L2458
|
[
"def",
"entropy",
"(",
"self",
")",
":",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"entropy\"",
",",
"self",
")",
")",
"fr",
".",
"_ex",
".",
"_cache",
".",
"nrows",
"=",
"self",
".",
"nrow",
"fr",
".",
"_ex",
".",
"_cache",
".",
"ncol",
"=",
"self",
".",
"ncol",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.num_valid_substrings
|
For each string, find the count of all possible substrings with 2 characters or more that are contained in
the line-separated text file whose path is given.
:param str path_to_words: Path to file that contains a line-separated list of strings considered valid.
:returns: An H2OFrame with the number of substrings that are contained in the given word list.
|
h2o-py/h2o/frame.py
|
def num_valid_substrings(self, path_to_words):
"""
For each string, find the count of all possible substrings with 2 characters or more that are contained in
the line-separated text file whose path is given.
:param str path_to_words: Path to file that contains a line-separated list of strings considered valid.
:returns: An H2OFrame with the number of substrings that are contained in the given word list.
"""
assert_is_type(path_to_words, str)
fr = H2OFrame._expr(expr=ExprNode("num_valid_substrings", self, path_to_words))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
|
def num_valid_substrings(self, path_to_words):
"""
For each string, find the count of all possible substrings with 2 characters or more that are contained in
the line-separated text file whose path is given.
:param str path_to_words: Path to file that contains a line-separated list of strings considered valid.
:returns: An H2OFrame with the number of substrings that are contained in the given word list.
"""
assert_is_type(path_to_words, str)
fr = H2OFrame._expr(expr=ExprNode("num_valid_substrings", self, path_to_words))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
|
[
"For",
"each",
"string",
"find",
"the",
"count",
"of",
"all",
"possible",
"substrings",
"with",
"2",
"characters",
"or",
"more",
"that",
"are",
"contained",
"in",
"the",
"line",
"-",
"separated",
"text",
"file",
"whose",
"path",
"is",
"given",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2461-L2473
|
[
"def",
"num_valid_substrings",
"(",
"self",
",",
"path_to_words",
")",
":",
"assert_is_type",
"(",
"path_to_words",
",",
"str",
")",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"num_valid_substrings\"",
",",
"self",
",",
"path_to_words",
")",
")",
"fr",
".",
"_ex",
".",
"_cache",
".",
"nrows",
"=",
"self",
".",
"nrow",
"fr",
".",
"_ex",
".",
"_cache",
".",
"ncol",
"=",
"self",
".",
"ncol",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.table
|
Compute the counts of values appearing in a column, or co-occurence counts between two columns.
:param H2OFrame data2: An optional single column to aggregate counts by.
:param bool dense: If True (default) then use dense representation, which lists only non-zero counts,
1 combination per row. Set to False to expand counts across all combinations.
:returns: H2OFrame of the counts at each combination of factor levels
|
h2o-py/h2o/frame.py
|
def table(self, data2=None, dense=True):
"""
Compute the counts of values appearing in a column, or co-occurence counts between two columns.
:param H2OFrame data2: An optional single column to aggregate counts by.
:param bool dense: If True (default) then use dense representation, which lists only non-zero counts,
1 combination per row. Set to False to expand counts across all combinations.
:returns: H2OFrame of the counts at each combination of factor levels
"""
return H2OFrame._expr(expr=ExprNode("table", self, data2, dense)) if data2 is not None else H2OFrame._expr(
expr=ExprNode("table", self, dense))
|
def table(self, data2=None, dense=True):
"""
Compute the counts of values appearing in a column, or co-occurence counts between two columns.
:param H2OFrame data2: An optional single column to aggregate counts by.
:param bool dense: If True (default) then use dense representation, which lists only non-zero counts,
1 combination per row. Set to False to expand counts across all combinations.
:returns: H2OFrame of the counts at each combination of factor levels
"""
return H2OFrame._expr(expr=ExprNode("table", self, data2, dense)) if data2 is not None else H2OFrame._expr(
expr=ExprNode("table", self, dense))
|
[
"Compute",
"the",
"counts",
"of",
"values",
"appearing",
"in",
"a",
"column",
"or",
"co",
"-",
"occurence",
"counts",
"between",
"two",
"columns",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2485-L2496
|
[
"def",
"table",
"(",
"self",
",",
"data2",
"=",
"None",
",",
"dense",
"=",
"True",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"table\"",
",",
"self",
",",
"data2",
",",
"dense",
")",
")",
"if",
"data2",
"is",
"not",
"None",
"else",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"table\"",
",",
"self",
",",
"dense",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.hist
|
Compute a histogram over a numeric column.
:param breaks: Can be one of ``"sturges"``, ``"rice"``, ``"sqrt"``, ``"doane"``, ``"fd"``, ``"scott"``;
or a single number for the number of breaks; or a list containing the split points, e.g:
``[-50, 213.2123, 9324834]``. If breaks is "fd", the MAD is used over the IQR in computing bin width.
:param bool plot: If True (default), then a plot will be generated using ``matplotlib``.
:returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true,
mids, and density; otherwise this method draws a plot and returns nothing.
|
h2o-py/h2o/frame.py
|
def hist(self, breaks="sturges", plot=True, **kwargs):
"""
Compute a histogram over a numeric column.
:param breaks: Can be one of ``"sturges"``, ``"rice"``, ``"sqrt"``, ``"doane"``, ``"fd"``, ``"scott"``;
or a single number for the number of breaks; or a list containing the split points, e.g:
``[-50, 213.2123, 9324834]``. If breaks is "fd", the MAD is used over the IQR in computing bin width.
:param bool plot: If True (default), then a plot will be generated using ``matplotlib``.
:returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true,
mids, and density; otherwise this method draws a plot and returns nothing.
"""
server = kwargs.pop("server") if "server" in kwargs else False
assert_is_type(breaks, int, [numeric], Enum("sturges", "rice", "sqrt", "doane", "fd", "scott"))
assert_is_type(plot, bool)
assert_is_type(server, bool)
if kwargs:
raise H2OValueError("Unknown parameters to hist(): %r" % kwargs)
hist = H2OFrame._expr(expr=ExprNode("hist", self, breaks))._frame()
if plot:
try:
import matplotlib
if server:
matplotlib.use("Agg", warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("ERROR: matplotlib is required to make the histogram plot. "
"Set `plot` to False, if a plot is not desired.")
return
hist["widths"] = hist["breaks"].difflag1()
# [2:] because we're removing the title and the first row (which consists of NaNs)
lefts = [float(c[0]) for c in h2o.as_list(hist["breaks"], use_pandas=False)[2:]]
widths = [float(c[0]) for c in h2o.as_list(hist["widths"], use_pandas=False)[2:]]
counts = [float(c[0]) for c in h2o.as_list(hist["counts"], use_pandas=False)[2:]]
plt.xlabel(self.names[0])
plt.ylabel("Frequency")
plt.title("Histogram of %s" % self.names[0])
plt.bar(left=lefts, width=widths, height=counts, bottom=0)
if not server:
plt.show()
else:
hist["density"] = hist["counts"] / (hist["breaks"].difflag1() * hist["counts"].sum())
return hist
|
def hist(self, breaks="sturges", plot=True, **kwargs):
"""
Compute a histogram over a numeric column.
:param breaks: Can be one of ``"sturges"``, ``"rice"``, ``"sqrt"``, ``"doane"``, ``"fd"``, ``"scott"``;
or a single number for the number of breaks; or a list containing the split points, e.g:
``[-50, 213.2123, 9324834]``. If breaks is "fd", the MAD is used over the IQR in computing bin width.
:param bool plot: If True (default), then a plot will be generated using ``matplotlib``.
:returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true,
mids, and density; otherwise this method draws a plot and returns nothing.
"""
server = kwargs.pop("server") if "server" in kwargs else False
assert_is_type(breaks, int, [numeric], Enum("sturges", "rice", "sqrt", "doane", "fd", "scott"))
assert_is_type(plot, bool)
assert_is_type(server, bool)
if kwargs:
raise H2OValueError("Unknown parameters to hist(): %r" % kwargs)
hist = H2OFrame._expr(expr=ExprNode("hist", self, breaks))._frame()
if plot:
try:
import matplotlib
if server:
matplotlib.use("Agg", warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("ERROR: matplotlib is required to make the histogram plot. "
"Set `plot` to False, if a plot is not desired.")
return
hist["widths"] = hist["breaks"].difflag1()
# [2:] because we're removing the title and the first row (which consists of NaNs)
lefts = [float(c[0]) for c in h2o.as_list(hist["breaks"], use_pandas=False)[2:]]
widths = [float(c[0]) for c in h2o.as_list(hist["widths"], use_pandas=False)[2:]]
counts = [float(c[0]) for c in h2o.as_list(hist["counts"], use_pandas=False)[2:]]
plt.xlabel(self.names[0])
plt.ylabel("Frequency")
plt.title("Histogram of %s" % self.names[0])
plt.bar(left=lefts, width=widths, height=counts, bottom=0)
if not server:
plt.show()
else:
hist["density"] = hist["counts"] / (hist["breaks"].difflag1() * hist["counts"].sum())
return hist
|
[
"Compute",
"a",
"histogram",
"over",
"a",
"numeric",
"column",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2499-L2544
|
[
"def",
"hist",
"(",
"self",
",",
"breaks",
"=",
"\"sturges\"",
",",
"plot",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"server",
"=",
"kwargs",
".",
"pop",
"(",
"\"server\"",
")",
"if",
"\"server\"",
"in",
"kwargs",
"else",
"False",
"assert_is_type",
"(",
"breaks",
",",
"int",
",",
"[",
"numeric",
"]",
",",
"Enum",
"(",
"\"sturges\"",
",",
"\"rice\"",
",",
"\"sqrt\"",
",",
"\"doane\"",
",",
"\"fd\"",
",",
"\"scott\"",
")",
")",
"assert_is_type",
"(",
"plot",
",",
"bool",
")",
"assert_is_type",
"(",
"server",
",",
"bool",
")",
"if",
"kwargs",
":",
"raise",
"H2OValueError",
"(",
"\"Unknown parameters to hist(): %r\"",
"%",
"kwargs",
")",
"hist",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"hist\"",
",",
"self",
",",
"breaks",
")",
")",
".",
"_frame",
"(",
")",
"if",
"plot",
":",
"try",
":",
"import",
"matplotlib",
"if",
"server",
":",
"matplotlib",
".",
"use",
"(",
"\"Agg\"",
",",
"warn",
"=",
"False",
")",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"except",
"ImportError",
":",
"print",
"(",
"\"ERROR: matplotlib is required to make the histogram plot. \"",
"\"Set `plot` to False, if a plot is not desired.\"",
")",
"return",
"hist",
"[",
"\"widths\"",
"]",
"=",
"hist",
"[",
"\"breaks\"",
"]",
".",
"difflag1",
"(",
")",
"# [2:] because we're removing the title and the first row (which consists of NaNs)",
"lefts",
"=",
"[",
"float",
"(",
"c",
"[",
"0",
"]",
")",
"for",
"c",
"in",
"h2o",
".",
"as_list",
"(",
"hist",
"[",
"\"breaks\"",
"]",
",",
"use_pandas",
"=",
"False",
")",
"[",
"2",
":",
"]",
"]",
"widths",
"=",
"[",
"float",
"(",
"c",
"[",
"0",
"]",
")",
"for",
"c",
"in",
"h2o",
".",
"as_list",
"(",
"hist",
"[",
"\"widths\"",
"]",
",",
"use_pandas",
"=",
"False",
")",
"[",
"2",
":",
"]",
"]",
"counts",
"=",
"[",
"float",
"(",
"c",
"[",
"0",
"]",
")",
"for",
"c",
"in",
"h2o",
".",
"as_list",
"(",
"hist",
"[",
"\"counts\"",
"]",
",",
"use_pandas",
"=",
"False",
")",
"[",
"2",
":",
"]",
"]",
"plt",
".",
"xlabel",
"(",
"self",
".",
"names",
"[",
"0",
"]",
")",
"plt",
".",
"ylabel",
"(",
"\"Frequency\"",
")",
"plt",
".",
"title",
"(",
"\"Histogram of %s\"",
"%",
"self",
".",
"names",
"[",
"0",
"]",
")",
"plt",
".",
"bar",
"(",
"left",
"=",
"lefts",
",",
"width",
"=",
"widths",
",",
"height",
"=",
"counts",
",",
"bottom",
"=",
"0",
")",
"if",
"not",
"server",
":",
"plt",
".",
"show",
"(",
")",
"else",
":",
"hist",
"[",
"\"density\"",
"]",
"=",
"hist",
"[",
"\"counts\"",
"]",
"/",
"(",
"hist",
"[",
"\"breaks\"",
"]",
".",
"difflag1",
"(",
")",
"*",
"hist",
"[",
"\"counts\"",
"]",
".",
"sum",
"(",
")",
")",
"return",
"hist"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.isax
|
Compute the iSAX index for DataFrame which is assumed to be numeric time series data.
References:
- http://www.cs.ucr.edu/~eamonn/SAX.pdf
- http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf
:param int num_words: Number of iSAX words for the timeseries, i.e. granularity along the time series
:param int max_cardinality: Maximum cardinality of the iSAX word. Each word can have less than the max
:param bool optimized_card: An optimization flag that will find the max cardinality regardless of what is
passed in for ``max_cardinality``.
:returns: An H2OFrame with the name of time series, string representation of iSAX word, followed by
binary representation.
|
h2o-py/h2o/frame.py
|
def isax(self, num_words, max_cardinality, optimize_card=False, **kwargs):
"""
Compute the iSAX index for DataFrame which is assumed to be numeric time series data.
References:
- http://www.cs.ucr.edu/~eamonn/SAX.pdf
- http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf
:param int num_words: Number of iSAX words for the timeseries, i.e. granularity along the time series
:param int max_cardinality: Maximum cardinality of the iSAX word. Each word can have less than the max
:param bool optimized_card: An optimization flag that will find the max cardinality regardless of what is
passed in for ``max_cardinality``.
:returns: An H2OFrame with the name of time series, string representation of iSAX word, followed by
binary representation.
"""
if num_words <= 0: raise H2OValueError("num_words must be greater than 0")
if max_cardinality <= 0: raise H2OValueError("max_cardinality must be greater than 0")
return H2OFrame._expr(expr=ExprNode("isax", self, num_words, max_cardinality, optimize_card))
|
def isax(self, num_words, max_cardinality, optimize_card=False, **kwargs):
"""
Compute the iSAX index for DataFrame which is assumed to be numeric time series data.
References:
- http://www.cs.ucr.edu/~eamonn/SAX.pdf
- http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf
:param int num_words: Number of iSAX words for the timeseries, i.e. granularity along the time series
:param int max_cardinality: Maximum cardinality of the iSAX word. Each word can have less than the max
:param bool optimized_card: An optimization flag that will find the max cardinality regardless of what is
passed in for ``max_cardinality``.
:returns: An H2OFrame with the name of time series, string representation of iSAX word, followed by
binary representation.
"""
if num_words <= 0: raise H2OValueError("num_words must be greater than 0")
if max_cardinality <= 0: raise H2OValueError("max_cardinality must be greater than 0")
return H2OFrame._expr(expr=ExprNode("isax", self, num_words, max_cardinality, optimize_card))
|
[
"Compute",
"the",
"iSAX",
"index",
"for",
"DataFrame",
"which",
"is",
"assumed",
"to",
"be",
"numeric",
"time",
"series",
"data",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2547-L2566
|
[
"def",
"isax",
"(",
"self",
",",
"num_words",
",",
"max_cardinality",
",",
"optimize_card",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"num_words",
"<=",
"0",
":",
"raise",
"H2OValueError",
"(",
"\"num_words must be greater than 0\"",
")",
"if",
"max_cardinality",
"<=",
"0",
":",
"raise",
"H2OValueError",
"(",
"\"max_cardinality must be greater than 0\"",
")",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"isax\"",
",",
"self",
",",
"num_words",
",",
"max_cardinality",
",",
"optimize_card",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.convert_H2OFrame_2_DMatrix
|
This method requires that you import the following toolboxes: xgboost, pandas, numpy and scipy.sparse.
This method will convert an H2OFrame to a DMatrix that can be used by native XGBoost. The H2OFrame contains
numerical and enum columns alone. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
Follow the steps below to compare H2OXGBoost and native XGBoost:
1. Train the H2OXGBoost model with H2OFrame trainFile and generate a prediction:
h2oModelD = H2OXGBoostEstimator(**h2oParamsD) # parameters specified as a dict()
h2oModelD.train(x=myX, y=y, training_frame=trainFile) # train with H2OFrame trainFile
h2oPredict = h2oPredictD = h2oModelD.predict(trainFile)
2. Derive the DMatrix from H2OFrame:
nativeDMatrix = trainFile.convert_H2OFrame_2_DMatrix(myX, y, h2oModelD)
3. Derive the parameters for native XGBoost:
nativeParams = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()
4. Train your native XGBoost model and generate a prediction:
nativeModel = xgb.train(params=nativeParams[0], dtrain=nativeDMatrix, num_boost_round=nativeParams[1])
nativePredict = nativeModel.predict(data=nativeDMatrix, ntree_limit=nativeParams[1].
5. Compare the predictions h2oPredict from H2OXGBoost, nativePredict from native XGBoost.
:param h2oFrame: H2OFrame to be converted to DMatrix for native XGBoost
:param predictors: List of predictor columns, can be column names or indices
:param yresp: response column, can be column index or name
:param h2oXGBoostModel: H2OXGboost model that are built with the same H2OFrame as input earlier
:return: DMatrix that can be an input to a native XGBoost model
|
h2o-py/h2o/frame.py
|
def convert_H2OFrame_2_DMatrix(self, predictors, yresp, h2oXGBoostModel):
'''
This method requires that you import the following toolboxes: xgboost, pandas, numpy and scipy.sparse.
This method will convert an H2OFrame to a DMatrix that can be used by native XGBoost. The H2OFrame contains
numerical and enum columns alone. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
Follow the steps below to compare H2OXGBoost and native XGBoost:
1. Train the H2OXGBoost model with H2OFrame trainFile and generate a prediction:
h2oModelD = H2OXGBoostEstimator(**h2oParamsD) # parameters specified as a dict()
h2oModelD.train(x=myX, y=y, training_frame=trainFile) # train with H2OFrame trainFile
h2oPredict = h2oPredictD = h2oModelD.predict(trainFile)
2. Derive the DMatrix from H2OFrame:
nativeDMatrix = trainFile.convert_H2OFrame_2_DMatrix(myX, y, h2oModelD)
3. Derive the parameters for native XGBoost:
nativeParams = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()
4. Train your native XGBoost model and generate a prediction:
nativeModel = xgb.train(params=nativeParams[0], dtrain=nativeDMatrix, num_boost_round=nativeParams[1])
nativePredict = nativeModel.predict(data=nativeDMatrix, ntree_limit=nativeParams[1].
5. Compare the predictions h2oPredict from H2OXGBoost, nativePredict from native XGBoost.
:param h2oFrame: H2OFrame to be converted to DMatrix for native XGBoost
:param predictors: List of predictor columns, can be column names or indices
:param yresp: response column, can be column index or name
:param h2oXGBoostModel: H2OXGboost model that are built with the same H2OFrame as input earlier
:return: DMatrix that can be an input to a native XGBoost model
'''
import xgboost as xgb
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
assert isinstance(predictors, list) or isinstance(predictors, tuple)
assert h2oXGBoostModel._model_json['algo'] == 'xgboost', \
"convert_H2OFrame_2_DMatrix is used for H2OXGBoost model only."
tempFrame = self[predictors].cbind(self[yresp])
colnames = tempFrame.names
if type(predictors[0])==type(1): # convert integer indices to column names
temp = []
for colInd in predictors:
temp.append(colnames[colInd])
predictors = temp
if (type(yresp) == type(1)):
tempy = colnames[yresp]
yresp = tempy # column name of response column
enumCols = [] # extract enum columns out to process them
enumColsIndices = [] # store enum column indices
typeDict = self.types
for predName in predictors:
if str(typeDict[predName])=='enum':
enumCols.append(predName)
enumColsIndices.append(colnames.index(predName))
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True)
nrows = tempFrame.nrow
# convert H2OFrame to DMatrix starts here
if len(enumCols) > 0: # enumCols contain all enum column names
allDomain = tempFrame.levels() # list all domain levels with column indices
domainLen = []
for enumIndex in enumColsIndices:
if len(allDomain[enumIndex])>0:
domainLen.append(len(allDomain[enumIndex])*-1)
incLevel = np.argsort(domainLen) # indices of enum column indices with decreasing domain length
# need to move enum columns to the front, highest level first
c2 = tempFrame[enumCols[incLevel[0]]]
tempFrame = tempFrame.drop(enumCols[incLevel[0]])
for index in range(1, len(incLevel)):
c2 = c2.cbind(tempFrame[enumCols[incLevel[index]]])
tempFrame = tempFrame.drop(enumCols[incLevel[index]])
enumCols = c2.names
tempFrame = c2.cbind(tempFrame)
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True) # redo translation from H2O to panda
pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows, tempFrame[enumCols[0]].categories())
pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)
for colInd in range(1, len(enumCols)):
cname=enumCols[colInd]
ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows, tempFrame[enumCols[colInd]].categories())
pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)
pandaFtrain.drop([cname], axis=1, inplace=True)
pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)
c0= tempFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)
pandaFtrain.drop([yresp], axis=1, inplace=True)
pandaF = pd.concat([c0, pandaFtrain], axis=1)
pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)
newX = list(pandaFtrain.columns.values)
data = pandaF.as_matrix(newX)
label = pandaF.as_matrix([yresp])
return xgb.DMatrix(data=csr_matrix(data), label=label) \
if h2oXGBoostModel._model_json['output']['sparse'] else xgb.DMatrix(data=data, label=label)
|
def convert_H2OFrame_2_DMatrix(self, predictors, yresp, h2oXGBoostModel):
'''
This method requires that you import the following toolboxes: xgboost, pandas, numpy and scipy.sparse.
This method will convert an H2OFrame to a DMatrix that can be used by native XGBoost. The H2OFrame contains
numerical and enum columns alone. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
Follow the steps below to compare H2OXGBoost and native XGBoost:
1. Train the H2OXGBoost model with H2OFrame trainFile and generate a prediction:
h2oModelD = H2OXGBoostEstimator(**h2oParamsD) # parameters specified as a dict()
h2oModelD.train(x=myX, y=y, training_frame=trainFile) # train with H2OFrame trainFile
h2oPredict = h2oPredictD = h2oModelD.predict(trainFile)
2. Derive the DMatrix from H2OFrame:
nativeDMatrix = trainFile.convert_H2OFrame_2_DMatrix(myX, y, h2oModelD)
3. Derive the parameters for native XGBoost:
nativeParams = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()
4. Train your native XGBoost model and generate a prediction:
nativeModel = xgb.train(params=nativeParams[0], dtrain=nativeDMatrix, num_boost_round=nativeParams[1])
nativePredict = nativeModel.predict(data=nativeDMatrix, ntree_limit=nativeParams[1].
5. Compare the predictions h2oPredict from H2OXGBoost, nativePredict from native XGBoost.
:param h2oFrame: H2OFrame to be converted to DMatrix for native XGBoost
:param predictors: List of predictor columns, can be column names or indices
:param yresp: response column, can be column index or name
:param h2oXGBoostModel: H2OXGboost model that are built with the same H2OFrame as input earlier
:return: DMatrix that can be an input to a native XGBoost model
'''
import xgboost as xgb
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
assert isinstance(predictors, list) or isinstance(predictors, tuple)
assert h2oXGBoostModel._model_json['algo'] == 'xgboost', \
"convert_H2OFrame_2_DMatrix is used for H2OXGBoost model only."
tempFrame = self[predictors].cbind(self[yresp])
colnames = tempFrame.names
if type(predictors[0])==type(1): # convert integer indices to column names
temp = []
for colInd in predictors:
temp.append(colnames[colInd])
predictors = temp
if (type(yresp) == type(1)):
tempy = colnames[yresp]
yresp = tempy # column name of response column
enumCols = [] # extract enum columns out to process them
enumColsIndices = [] # store enum column indices
typeDict = self.types
for predName in predictors:
if str(typeDict[predName])=='enum':
enumCols.append(predName)
enumColsIndices.append(colnames.index(predName))
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True)
nrows = tempFrame.nrow
# convert H2OFrame to DMatrix starts here
if len(enumCols) > 0: # enumCols contain all enum column names
allDomain = tempFrame.levels() # list all domain levels with column indices
domainLen = []
for enumIndex in enumColsIndices:
if len(allDomain[enumIndex])>0:
domainLen.append(len(allDomain[enumIndex])*-1)
incLevel = np.argsort(domainLen) # indices of enum column indices with decreasing domain length
# need to move enum columns to the front, highest level first
c2 = tempFrame[enumCols[incLevel[0]]]
tempFrame = tempFrame.drop(enumCols[incLevel[0]])
for index in range(1, len(incLevel)):
c2 = c2.cbind(tempFrame[enumCols[incLevel[index]]])
tempFrame = tempFrame.drop(enumCols[incLevel[index]])
enumCols = c2.names
tempFrame = c2.cbind(tempFrame)
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True) # redo translation from H2O to panda
pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows, tempFrame[enumCols[0]].categories())
pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)
for colInd in range(1, len(enumCols)):
cname=enumCols[colInd]
ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows, tempFrame[enumCols[colInd]].categories())
pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)
pandaFtrain.drop([cname], axis=1, inplace=True)
pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)
c0= tempFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)
pandaFtrain.drop([yresp], axis=1, inplace=True)
pandaF = pd.concat([c0, pandaFtrain], axis=1)
pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)
newX = list(pandaFtrain.columns.values)
data = pandaF.as_matrix(newX)
label = pandaF.as_matrix([yresp])
return xgb.DMatrix(data=csr_matrix(data), label=label) \
if h2oXGBoostModel._model_json['output']['sparse'] else xgb.DMatrix(data=data, label=label)
|
[
"This",
"method",
"requires",
"that",
"you",
"import",
"the",
"following",
"toolboxes",
":",
"xgboost",
"pandas",
"numpy",
"and",
"scipy",
".",
"sparse",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2568-L2673
|
[
"def",
"convert_H2OFrame_2_DMatrix",
"(",
"self",
",",
"predictors",
",",
"yresp",
",",
"h2oXGBoostModel",
")",
":",
"import",
"xgboost",
"as",
"xgb",
"import",
"pandas",
"as",
"pd",
"import",
"numpy",
"as",
"np",
"from",
"scipy",
".",
"sparse",
"import",
"csr_matrix",
"assert",
"isinstance",
"(",
"predictors",
",",
"list",
")",
"or",
"isinstance",
"(",
"predictors",
",",
"tuple",
")",
"assert",
"h2oXGBoostModel",
".",
"_model_json",
"[",
"'algo'",
"]",
"==",
"'xgboost'",
",",
"\"convert_H2OFrame_2_DMatrix is used for H2OXGBoost model only.\"",
"tempFrame",
"=",
"self",
"[",
"predictors",
"]",
".",
"cbind",
"(",
"self",
"[",
"yresp",
"]",
")",
"colnames",
"=",
"tempFrame",
".",
"names",
"if",
"type",
"(",
"predictors",
"[",
"0",
"]",
")",
"==",
"type",
"(",
"1",
")",
":",
"# convert integer indices to column names",
"temp",
"=",
"[",
"]",
"for",
"colInd",
"in",
"predictors",
":",
"temp",
".",
"append",
"(",
"colnames",
"[",
"colInd",
"]",
")",
"predictors",
"=",
"temp",
"if",
"(",
"type",
"(",
"yresp",
")",
"==",
"type",
"(",
"1",
")",
")",
":",
"tempy",
"=",
"colnames",
"[",
"yresp",
"]",
"yresp",
"=",
"tempy",
"# column name of response column",
"enumCols",
"=",
"[",
"]",
"# extract enum columns out to process them",
"enumColsIndices",
"=",
"[",
"]",
"# store enum column indices",
"typeDict",
"=",
"self",
".",
"types",
"for",
"predName",
"in",
"predictors",
":",
"if",
"str",
"(",
"typeDict",
"[",
"predName",
"]",
")",
"==",
"'enum'",
":",
"enumCols",
".",
"append",
"(",
"predName",
")",
"enumColsIndices",
".",
"append",
"(",
"colnames",
".",
"index",
"(",
"predName",
")",
")",
"pandaFtrain",
"=",
"tempFrame",
".",
"as_data_frame",
"(",
"use_pandas",
"=",
"True",
",",
"header",
"=",
"True",
")",
"nrows",
"=",
"tempFrame",
".",
"nrow",
"# convert H2OFrame to DMatrix starts here",
"if",
"len",
"(",
"enumCols",
")",
">",
"0",
":",
"# enumCols contain all enum column names",
"allDomain",
"=",
"tempFrame",
".",
"levels",
"(",
")",
"# list all domain levels with column indices",
"domainLen",
"=",
"[",
"]",
"for",
"enumIndex",
"in",
"enumColsIndices",
":",
"if",
"len",
"(",
"allDomain",
"[",
"enumIndex",
"]",
")",
">",
"0",
":",
"domainLen",
".",
"append",
"(",
"len",
"(",
"allDomain",
"[",
"enumIndex",
"]",
")",
"*",
"-",
"1",
")",
"incLevel",
"=",
"np",
".",
"argsort",
"(",
"domainLen",
")",
"# indices of enum column indices with decreasing domain length",
"# need to move enum columns to the front, highest level first",
"c2",
"=",
"tempFrame",
"[",
"enumCols",
"[",
"incLevel",
"[",
"0",
"]",
"]",
"]",
"tempFrame",
"=",
"tempFrame",
".",
"drop",
"(",
"enumCols",
"[",
"incLevel",
"[",
"0",
"]",
"]",
")",
"for",
"index",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"incLevel",
")",
")",
":",
"c2",
"=",
"c2",
".",
"cbind",
"(",
"tempFrame",
"[",
"enumCols",
"[",
"incLevel",
"[",
"index",
"]",
"]",
"]",
")",
"tempFrame",
"=",
"tempFrame",
".",
"drop",
"(",
"enumCols",
"[",
"incLevel",
"[",
"index",
"]",
"]",
")",
"enumCols",
"=",
"c2",
".",
"names",
"tempFrame",
"=",
"c2",
".",
"cbind",
"(",
"tempFrame",
")",
"pandaFtrain",
"=",
"tempFrame",
".",
"as_data_frame",
"(",
"use_pandas",
"=",
"True",
",",
"header",
"=",
"True",
")",
"# redo translation from H2O to panda",
"pandaTrainPart",
"=",
"generatePandaEnumCols",
"(",
"pandaFtrain",
",",
"enumCols",
"[",
"0",
"]",
",",
"nrows",
",",
"tempFrame",
"[",
"enumCols",
"[",
"0",
"]",
"]",
".",
"categories",
"(",
")",
")",
"pandaFtrain",
".",
"drop",
"(",
"[",
"enumCols",
"[",
"0",
"]",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"for",
"colInd",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"enumCols",
")",
")",
":",
"cname",
"=",
"enumCols",
"[",
"colInd",
"]",
"ctemp",
"=",
"generatePandaEnumCols",
"(",
"pandaFtrain",
",",
"cname",
",",
"nrows",
",",
"tempFrame",
"[",
"enumCols",
"[",
"colInd",
"]",
"]",
".",
"categories",
"(",
")",
")",
"pandaTrainPart",
"=",
"pd",
".",
"concat",
"(",
"[",
"pandaTrainPart",
",",
"ctemp",
"]",
",",
"axis",
"=",
"1",
")",
"pandaFtrain",
".",
"drop",
"(",
"[",
"cname",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"pandaFtrain",
"=",
"pd",
".",
"concat",
"(",
"[",
"pandaTrainPart",
",",
"pandaFtrain",
"]",
",",
"axis",
"=",
"1",
")",
"c0",
"=",
"tempFrame",
"[",
"yresp",
"]",
".",
"asnumeric",
"(",
")",
".",
"as_data_frame",
"(",
"use_pandas",
"=",
"True",
",",
"header",
"=",
"True",
")",
"pandaFtrain",
".",
"drop",
"(",
"[",
"yresp",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"pandaF",
"=",
"pd",
".",
"concat",
"(",
"[",
"c0",
",",
"pandaFtrain",
"]",
",",
"axis",
"=",
"1",
")",
"pandaF",
".",
"rename",
"(",
"columns",
"=",
"{",
"c0",
".",
"columns",
"[",
"0",
"]",
":",
"yresp",
"}",
",",
"inplace",
"=",
"True",
")",
"newX",
"=",
"list",
"(",
"pandaFtrain",
".",
"columns",
".",
"values",
")",
"data",
"=",
"pandaF",
".",
"as_matrix",
"(",
"newX",
")",
"label",
"=",
"pandaF",
".",
"as_matrix",
"(",
"[",
"yresp",
"]",
")",
"return",
"xgb",
".",
"DMatrix",
"(",
"data",
"=",
"csr_matrix",
"(",
"data",
")",
",",
"label",
"=",
"label",
")",
"if",
"h2oXGBoostModel",
".",
"_model_json",
"[",
"'output'",
"]",
"[",
"'sparse'",
"]",
"else",
"xgb",
".",
"DMatrix",
"(",
"data",
"=",
"data",
",",
"label",
"=",
"label",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.pivot
|
Pivot the frame designated by the three columns: index, column, and value. Index and column should be
of type enum, int, or time.
For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame
:param index: Index is a column that will be the row label
:param column: The labels for the columns in the pivoted Frame
:param value: The column of values for the given index and column label
:returns:
|
h2o-py/h2o/frame.py
|
def pivot(self, index, column, value):
"""
Pivot the frame designated by the three columns: index, column, and value. Index and column should be
of type enum, int, or time.
For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame
:param index: Index is a column that will be the row label
:param column: The labels for the columns in the pivoted Frame
:param value: The column of values for the given index and column label
:returns:
"""
assert_is_type(index, str)
assert_is_type(column, str)
assert_is_type(value, str)
col_names = self.names
if index not in col_names:
raise H2OValueError("Index not in H2OFrame")
if column not in col_names:
raise H2OValueError("Column not in H2OFrame")
if value not in col_names:
raise H2OValueError("Value column not in H2OFrame")
if self.type(column) not in ["enum","time","int"]:
raise H2OValueError("'column' argument is not type enum, time or int")
if self.type(index) not in ["enum","time","int"]:
raise H2OValueError("'index' argument is not type enum, time or int")
return H2OFrame._expr(expr=ExprNode("pivot",self,index,column,value))
|
def pivot(self, index, column, value):
"""
Pivot the frame designated by the three columns: index, column, and value. Index and column should be
of type enum, int, or time.
For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame
:param index: Index is a column that will be the row label
:param column: The labels for the columns in the pivoted Frame
:param value: The column of values for the given index and column label
:returns:
"""
assert_is_type(index, str)
assert_is_type(column, str)
assert_is_type(value, str)
col_names = self.names
if index not in col_names:
raise H2OValueError("Index not in H2OFrame")
if column not in col_names:
raise H2OValueError("Column not in H2OFrame")
if value not in col_names:
raise H2OValueError("Value column not in H2OFrame")
if self.type(column) not in ["enum","time","int"]:
raise H2OValueError("'column' argument is not type enum, time or int")
if self.type(index) not in ["enum","time","int"]:
raise H2OValueError("'index' argument is not type enum, time or int")
return H2OFrame._expr(expr=ExprNode("pivot",self,index,column,value))
|
[
"Pivot",
"the",
"frame",
"designated",
"by",
"the",
"three",
"columns",
":",
"index",
"column",
"and",
"value",
".",
"Index",
"and",
"column",
"should",
"be",
"of",
"type",
"enum",
"int",
"or",
"time",
".",
"For",
"cases",
"of",
"multiple",
"indexes",
"for",
"a",
"column",
"label",
"the",
"aggregation",
"method",
"is",
"to",
"pick",
"the",
"first",
"occurrence",
"in",
"the",
"data",
"frame"
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2675-L2700
|
[
"def",
"pivot",
"(",
"self",
",",
"index",
",",
"column",
",",
"value",
")",
":",
"assert_is_type",
"(",
"index",
",",
"str",
")",
"assert_is_type",
"(",
"column",
",",
"str",
")",
"assert_is_type",
"(",
"value",
",",
"str",
")",
"col_names",
"=",
"self",
".",
"names",
"if",
"index",
"not",
"in",
"col_names",
":",
"raise",
"H2OValueError",
"(",
"\"Index not in H2OFrame\"",
")",
"if",
"column",
"not",
"in",
"col_names",
":",
"raise",
"H2OValueError",
"(",
"\"Column not in H2OFrame\"",
")",
"if",
"value",
"not",
"in",
"col_names",
":",
"raise",
"H2OValueError",
"(",
"\"Value column not in H2OFrame\"",
")",
"if",
"self",
".",
"type",
"(",
"column",
")",
"not",
"in",
"[",
"\"enum\"",
",",
"\"time\"",
",",
"\"int\"",
"]",
":",
"raise",
"H2OValueError",
"(",
"\"'column' argument is not type enum, time or int\"",
")",
"if",
"self",
".",
"type",
"(",
"index",
")",
"not",
"in",
"[",
"\"enum\"",
",",
"\"time\"",
",",
"\"int\"",
"]",
":",
"raise",
"H2OValueError",
"(",
"\"'index' argument is not type enum, time or int\"",
")",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"pivot\"",
",",
"self",
",",
"index",
",",
"column",
",",
"value",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.rank_within_group_by
|
This function will add a new column rank where the ranking is produced as follows:
1. sorts the H2OFrame by columns sorted in by columns specified in group_by_cols and sort_cols in the directions
specified by the ascending for the sort_cols. The sort directions for the group_by_cols are ascending only.
2. A new rank column is added to the frame which will contain a rank assignment performed next. The user can
choose to assign a name to this new column. The default name is New_Rank_column.
3. For each groupby groups, a rank is assigned to the row starting from 1, 2, ... to the end of that group.
4. If sort_cols_sorted is TRUE, a final sort on the frame will be performed frame according to the sort_cols and
the sort directions in ascending. If sort_cols_sorted is FALSE (by default), the frame from step 3 will be
returned as is with no extra sort. This may provide a small speedup if desired.
:param group_by_cols: The columns to group on (either a single column name/index, or a list of column names
or column indices
:param sort_cols: The columns to sort on (either a single column name/index, or a list of column names or
column indices
:param ascending: Optional Boolean array to denote sorting direction for each sorting column. True for
ascending, False for descending. Default is ascending sort. Sort direction for enums will be ignored.
:param new_col_name: Optional String to denote the new column names. Default to New_Rank_column.
:param sort_cols_sorted: Optional Boolean to denote if the returned frame should be sorted according to sort_cols
and sort directions specified in ascending. Default is False.
:return: a new Frame with new rank (sorted by columns in sort_cols) column within the grouping specified
by the group_by_cols.
The following example is generated by Nidhi Mehta.
If the input frame is train:
ID Group_by_column num data Column_to_arrange_by num_1 fdata
12 1 2941.552 1 3 -3177.9077 1
12 1 2941.552 1 5 -13311.8247 1
12 2 -22722.174 1 3 -3177.9077 1
12 2 -22722.174 1 5 -13311.8247 1
13 3 -12776.884 1 5 -18421.6171 0
13 3 -12776.884 1 4 28080.1607 0
13 1 -6049.830 1 5 -18421.6171 0
13 1 -6049.830 1 4 28080.1607 0
15 3 -16995.346 1 1 -9781.6373 0
16 1 -10003.593 0 3 -61284.6900 0
16 3 26052.495 1 3 -61284.6900 0
16 3 -22905.288 0 3 -61284.6900 0
17 2 -13465.496 1 2 12094.4851 1
17 2 -13465.496 1 3 -11772.1338 1
17 2 -13465.496 1 3 -415.1114 0
17 2 -3329.619 1 2 12094.4851 1
17 2 -3329.619 1 3 -11772.1338 1
17 2 -3329.619 1 3 -415.1114 0
If the following commands are issued:
rankedF1 = h2o.rank_within_group_by(train, ["Group_by_column"], ["Column_to_arrange_by"], [TRUE])
rankedF1.summary()
The returned frame rankedF1 will look like this:
ID Group_by_column num fdata Column_to_arrange_by num_1 fdata.1 New_Rank_column
12 1 2941.552 1 3 -3177.9077 1 1
16 1 -10003.593 0 3 -61284.6900 0 2
13 1 -6049.830 0 4 28080.1607 0 3
12 1 2941.552 1 5 -13311.8247 1 4
13 1 -6049.830 0 5 -18421.6171 0 5
17 2 -13465.496 0 2 12094.4851 1 1
17 2 -3329.619 0 2 12094.4851 1 2
12 2 -22722.174 1 3 -3177.9077 1 3
17 2 -13465.496 0 3 -11772.1338 1 4
17 2 -13465.496 0 3 -415.1114 0 5
17 2 -3329.619 0 3 -11772.1338 1 6
17 2 -3329.619 0 3 -415.1114 0 7
12 2 -22722.174 1 5 -13311.8247 1 8
15 3 -16995.346 1 1 -9781.6373 0 1
16 3 26052.495 0 3 -61284.6900 0 2
16 3 -22905.288 1 3 -61284.6900 0 3
13 3 -12776.884 1 4 28080.1607 0 4
13 3 -12776.884 1 5 -18421.6171 0 5
If the following commands are issued:
rankedF1 = h2o.rank_within_group_by(train, ["Group_by_column"], ["Column_to_arrange_by"], [TRUE], sort_cols_sorted=True)
h2o.summary(rankedF1)
The returned frame will be sorted according to sort_cols and hence look like this instead:
ID Group_by_column num fdata Column_to_arrange_by num_1 fdata.1 New_Rank_column
15 3 -16995.346 1 1 -9781.6373 0 1
17 2 -13465.496 0 2 12094.4851 1 1
17 2 -3329.619 0 2 12094.4851 1 2
12 1 2941.552 1 3 -3177.9077 1 1
12 2 -22722.174 1 3 -3177.9077 1 3
16 1 -10003.593 0 3 -61284.6900 0 2
16 3 26052.495 0 3 -61284.6900 0 2
16 3 -22905.288 1 3 -61284.6900 0 3
17 2 -13465.496 0 3 -11772.1338 1 4
17 2 -13465.496 0 3 -415.1114 0 5
17 2 -3329.619 0 3 -11772.1338 1 6
17 2 -3329.619 0 3 -415.1114 0 7
13 3 -12776.884 1 4 28080.1607 0 4
13 1 -6049.830 0 4 28080.1607 0 3
12 1 2941.552 1 5 -13311.8247 1 4
12 2 -22722.174 1 5 -13311.8247 1 8
13 3 -12776.884 1 5 -18421.6171 0 5
13 1 -6049.830 0 5 -18421.6171 0 5
|
h2o-py/h2o/frame.py
|
def rank_within_group_by(self, group_by_cols, sort_cols, ascending=[], new_col_name="New_Rank_column", sort_cols_sorted=False):
"""
This function will add a new column rank where the ranking is produced as follows:
1. sorts the H2OFrame by columns sorted in by columns specified in group_by_cols and sort_cols in the directions
specified by the ascending for the sort_cols. The sort directions for the group_by_cols are ascending only.
2. A new rank column is added to the frame which will contain a rank assignment performed next. The user can
choose to assign a name to this new column. The default name is New_Rank_column.
3. For each groupby groups, a rank is assigned to the row starting from 1, 2, ... to the end of that group.
4. If sort_cols_sorted is TRUE, a final sort on the frame will be performed frame according to the sort_cols and
the sort directions in ascending. If sort_cols_sorted is FALSE (by default), the frame from step 3 will be
returned as is with no extra sort. This may provide a small speedup if desired.
:param group_by_cols: The columns to group on (either a single column name/index, or a list of column names
or column indices
:param sort_cols: The columns to sort on (either a single column name/index, or a list of column names or
column indices
:param ascending: Optional Boolean array to denote sorting direction for each sorting column. True for
ascending, False for descending. Default is ascending sort. Sort direction for enums will be ignored.
:param new_col_name: Optional String to denote the new column names. Default to New_Rank_column.
:param sort_cols_sorted: Optional Boolean to denote if the returned frame should be sorted according to sort_cols
and sort directions specified in ascending. Default is False.
:return: a new Frame with new rank (sorted by columns in sort_cols) column within the grouping specified
by the group_by_cols.
The following example is generated by Nidhi Mehta.
If the input frame is train:
ID Group_by_column num data Column_to_arrange_by num_1 fdata
12 1 2941.552 1 3 -3177.9077 1
12 1 2941.552 1 5 -13311.8247 1
12 2 -22722.174 1 3 -3177.9077 1
12 2 -22722.174 1 5 -13311.8247 1
13 3 -12776.884 1 5 -18421.6171 0
13 3 -12776.884 1 4 28080.1607 0
13 1 -6049.830 1 5 -18421.6171 0
13 1 -6049.830 1 4 28080.1607 0
15 3 -16995.346 1 1 -9781.6373 0
16 1 -10003.593 0 3 -61284.6900 0
16 3 26052.495 1 3 -61284.6900 0
16 3 -22905.288 0 3 -61284.6900 0
17 2 -13465.496 1 2 12094.4851 1
17 2 -13465.496 1 3 -11772.1338 1
17 2 -13465.496 1 3 -415.1114 0
17 2 -3329.619 1 2 12094.4851 1
17 2 -3329.619 1 3 -11772.1338 1
17 2 -3329.619 1 3 -415.1114 0
If the following commands are issued:
rankedF1 = h2o.rank_within_group_by(train, ["Group_by_column"], ["Column_to_arrange_by"], [TRUE])
rankedF1.summary()
The returned frame rankedF1 will look like this:
ID Group_by_column num fdata Column_to_arrange_by num_1 fdata.1 New_Rank_column
12 1 2941.552 1 3 -3177.9077 1 1
16 1 -10003.593 0 3 -61284.6900 0 2
13 1 -6049.830 0 4 28080.1607 0 3
12 1 2941.552 1 5 -13311.8247 1 4
13 1 -6049.830 0 5 -18421.6171 0 5
17 2 -13465.496 0 2 12094.4851 1 1
17 2 -3329.619 0 2 12094.4851 1 2
12 2 -22722.174 1 3 -3177.9077 1 3
17 2 -13465.496 0 3 -11772.1338 1 4
17 2 -13465.496 0 3 -415.1114 0 5
17 2 -3329.619 0 3 -11772.1338 1 6
17 2 -3329.619 0 3 -415.1114 0 7
12 2 -22722.174 1 5 -13311.8247 1 8
15 3 -16995.346 1 1 -9781.6373 0 1
16 3 26052.495 0 3 -61284.6900 0 2
16 3 -22905.288 1 3 -61284.6900 0 3
13 3 -12776.884 1 4 28080.1607 0 4
13 3 -12776.884 1 5 -18421.6171 0 5
If the following commands are issued:
rankedF1 = h2o.rank_within_group_by(train, ["Group_by_column"], ["Column_to_arrange_by"], [TRUE], sort_cols_sorted=True)
h2o.summary(rankedF1)
The returned frame will be sorted according to sort_cols and hence look like this instead:
ID Group_by_column num fdata Column_to_arrange_by num_1 fdata.1 New_Rank_column
15 3 -16995.346 1 1 -9781.6373 0 1
17 2 -13465.496 0 2 12094.4851 1 1
17 2 -3329.619 0 2 12094.4851 1 2
12 1 2941.552 1 3 -3177.9077 1 1
12 2 -22722.174 1 3 -3177.9077 1 3
16 1 -10003.593 0 3 -61284.6900 0 2
16 3 26052.495 0 3 -61284.6900 0 2
16 3 -22905.288 1 3 -61284.6900 0 3
17 2 -13465.496 0 3 -11772.1338 1 4
17 2 -13465.496 0 3 -415.1114 0 5
17 2 -3329.619 0 3 -11772.1338 1 6
17 2 -3329.619 0 3 -415.1114 0 7
13 3 -12776.884 1 4 28080.1607 0 4
13 1 -6049.830 0 4 28080.1607 0 3
12 1 2941.552 1 5 -13311.8247 1 4
12 2 -22722.174 1 5 -13311.8247 1 8
13 3 -12776.884 1 5 -18421.6171 0 5
13 1 -6049.830 0 5 -18421.6171 0 5
"""
assert_is_type(group_by_cols, str, int, [str, int])
if type(group_by_cols) != list: group_by_cols = [group_by_cols]
if type(sort_cols) != list: sort_cols = [sort_cols]
if type(ascending) != list: ascending = [ascending] # convert to list
ascendingI=[1]*len(sort_cols) # intitalize sorting direction to ascending by default
for c in sort_cols:
if self.type(c) not in ["enum","time","int","real"]:
raise H2OValueError("Sort by column: " + str(c) + " not of enum, time, int or real type")
for c in group_by_cols:
if self.type(c) not in ["enum","time","int","real"]:
raise H2OValueError("Group by column: " + str(c) + " not of enum, time, int or real type")
if len(ascending)>0: # user specify sort direction, assume all columns ascending
assert len(ascending)==len(sort_cols), "Sorting direction must be specified for each sorted column."
for index in range(len(sort_cols)):
ascendingI[index]=1 if ascending[index] else -1
finalSortedOrder=0
if (sort_cols_sorted):
finalSortedOrder=1
return H2OFrame._expr(expr=ExprNode("rank_within_groupby",self,group_by_cols,sort_cols,ascendingI,new_col_name, finalSortedOrder))
|
def rank_within_group_by(self, group_by_cols, sort_cols, ascending=[], new_col_name="New_Rank_column", sort_cols_sorted=False):
"""
This function will add a new column rank where the ranking is produced as follows:
1. sorts the H2OFrame by columns sorted in by columns specified in group_by_cols and sort_cols in the directions
specified by the ascending for the sort_cols. The sort directions for the group_by_cols are ascending only.
2. A new rank column is added to the frame which will contain a rank assignment performed next. The user can
choose to assign a name to this new column. The default name is New_Rank_column.
3. For each groupby groups, a rank is assigned to the row starting from 1, 2, ... to the end of that group.
4. If sort_cols_sorted is TRUE, a final sort on the frame will be performed frame according to the sort_cols and
the sort directions in ascending. If sort_cols_sorted is FALSE (by default), the frame from step 3 will be
returned as is with no extra sort. This may provide a small speedup if desired.
:param group_by_cols: The columns to group on (either a single column name/index, or a list of column names
or column indices
:param sort_cols: The columns to sort on (either a single column name/index, or a list of column names or
column indices
:param ascending: Optional Boolean array to denote sorting direction for each sorting column. True for
ascending, False for descending. Default is ascending sort. Sort direction for enums will be ignored.
:param new_col_name: Optional String to denote the new column names. Default to New_Rank_column.
:param sort_cols_sorted: Optional Boolean to denote if the returned frame should be sorted according to sort_cols
and sort directions specified in ascending. Default is False.
:return: a new Frame with new rank (sorted by columns in sort_cols) column within the grouping specified
by the group_by_cols.
The following example is generated by Nidhi Mehta.
If the input frame is train:
ID Group_by_column num data Column_to_arrange_by num_1 fdata
12 1 2941.552 1 3 -3177.9077 1
12 1 2941.552 1 5 -13311.8247 1
12 2 -22722.174 1 3 -3177.9077 1
12 2 -22722.174 1 5 -13311.8247 1
13 3 -12776.884 1 5 -18421.6171 0
13 3 -12776.884 1 4 28080.1607 0
13 1 -6049.830 1 5 -18421.6171 0
13 1 -6049.830 1 4 28080.1607 0
15 3 -16995.346 1 1 -9781.6373 0
16 1 -10003.593 0 3 -61284.6900 0
16 3 26052.495 1 3 -61284.6900 0
16 3 -22905.288 0 3 -61284.6900 0
17 2 -13465.496 1 2 12094.4851 1
17 2 -13465.496 1 3 -11772.1338 1
17 2 -13465.496 1 3 -415.1114 0
17 2 -3329.619 1 2 12094.4851 1
17 2 -3329.619 1 3 -11772.1338 1
17 2 -3329.619 1 3 -415.1114 0
If the following commands are issued:
rankedF1 = h2o.rank_within_group_by(train, ["Group_by_column"], ["Column_to_arrange_by"], [TRUE])
rankedF1.summary()
The returned frame rankedF1 will look like this:
ID Group_by_column num fdata Column_to_arrange_by num_1 fdata.1 New_Rank_column
12 1 2941.552 1 3 -3177.9077 1 1
16 1 -10003.593 0 3 -61284.6900 0 2
13 1 -6049.830 0 4 28080.1607 0 3
12 1 2941.552 1 5 -13311.8247 1 4
13 1 -6049.830 0 5 -18421.6171 0 5
17 2 -13465.496 0 2 12094.4851 1 1
17 2 -3329.619 0 2 12094.4851 1 2
12 2 -22722.174 1 3 -3177.9077 1 3
17 2 -13465.496 0 3 -11772.1338 1 4
17 2 -13465.496 0 3 -415.1114 0 5
17 2 -3329.619 0 3 -11772.1338 1 6
17 2 -3329.619 0 3 -415.1114 0 7
12 2 -22722.174 1 5 -13311.8247 1 8
15 3 -16995.346 1 1 -9781.6373 0 1
16 3 26052.495 0 3 -61284.6900 0 2
16 3 -22905.288 1 3 -61284.6900 0 3
13 3 -12776.884 1 4 28080.1607 0 4
13 3 -12776.884 1 5 -18421.6171 0 5
If the following commands are issued:
rankedF1 = h2o.rank_within_group_by(train, ["Group_by_column"], ["Column_to_arrange_by"], [TRUE], sort_cols_sorted=True)
h2o.summary(rankedF1)
The returned frame will be sorted according to sort_cols and hence look like this instead:
ID Group_by_column num fdata Column_to_arrange_by num_1 fdata.1 New_Rank_column
15 3 -16995.346 1 1 -9781.6373 0 1
17 2 -13465.496 0 2 12094.4851 1 1
17 2 -3329.619 0 2 12094.4851 1 2
12 1 2941.552 1 3 -3177.9077 1 1
12 2 -22722.174 1 3 -3177.9077 1 3
16 1 -10003.593 0 3 -61284.6900 0 2
16 3 26052.495 0 3 -61284.6900 0 2
16 3 -22905.288 1 3 -61284.6900 0 3
17 2 -13465.496 0 3 -11772.1338 1 4
17 2 -13465.496 0 3 -415.1114 0 5
17 2 -3329.619 0 3 -11772.1338 1 6
17 2 -3329.619 0 3 -415.1114 0 7
13 3 -12776.884 1 4 28080.1607 0 4
13 1 -6049.830 0 4 28080.1607 0 3
12 1 2941.552 1 5 -13311.8247 1 4
12 2 -22722.174 1 5 -13311.8247 1 8
13 3 -12776.884 1 5 -18421.6171 0 5
13 1 -6049.830 0 5 -18421.6171 0 5
"""
assert_is_type(group_by_cols, str, int, [str, int])
if type(group_by_cols) != list: group_by_cols = [group_by_cols]
if type(sort_cols) != list: sort_cols = [sort_cols]
if type(ascending) != list: ascending = [ascending] # convert to list
ascendingI=[1]*len(sort_cols) # intitalize sorting direction to ascending by default
for c in sort_cols:
if self.type(c) not in ["enum","time","int","real"]:
raise H2OValueError("Sort by column: " + str(c) + " not of enum, time, int or real type")
for c in group_by_cols:
if self.type(c) not in ["enum","time","int","real"]:
raise H2OValueError("Group by column: " + str(c) + " not of enum, time, int or real type")
if len(ascending)>0: # user specify sort direction, assume all columns ascending
assert len(ascending)==len(sort_cols), "Sorting direction must be specified for each sorted column."
for index in range(len(sort_cols)):
ascendingI[index]=1 if ascending[index] else -1
finalSortedOrder=0
if (sort_cols_sorted):
finalSortedOrder=1
return H2OFrame._expr(expr=ExprNode("rank_within_groupby",self,group_by_cols,sort_cols,ascendingI,new_col_name, finalSortedOrder))
|
[
"This",
"function",
"will",
"add",
"a",
"new",
"column",
"rank",
"where",
"the",
"ranking",
"is",
"produced",
"as",
"follows",
":",
"1",
".",
"sorts",
"the",
"H2OFrame",
"by",
"columns",
"sorted",
"in",
"by",
"columns",
"specified",
"in",
"group_by_cols",
"and",
"sort_cols",
"in",
"the",
"directions",
"specified",
"by",
"the",
"ascending",
"for",
"the",
"sort_cols",
".",
"The",
"sort",
"directions",
"for",
"the",
"group_by_cols",
"are",
"ascending",
"only",
".",
"2",
".",
"A",
"new",
"rank",
"column",
"is",
"added",
"to",
"the",
"frame",
"which",
"will",
"contain",
"a",
"rank",
"assignment",
"performed",
"next",
".",
"The",
"user",
"can",
"choose",
"to",
"assign",
"a",
"name",
"to",
"this",
"new",
"column",
".",
"The",
"default",
"name",
"is",
"New_Rank_column",
".",
"3",
".",
"For",
"each",
"groupby",
"groups",
"a",
"rank",
"is",
"assigned",
"to",
"the",
"row",
"starting",
"from",
"1",
"2",
"...",
"to",
"the",
"end",
"of",
"that",
"group",
".",
"4",
".",
"If",
"sort_cols_sorted",
"is",
"TRUE",
"a",
"final",
"sort",
"on",
"the",
"frame",
"will",
"be",
"performed",
"frame",
"according",
"to",
"the",
"sort_cols",
"and",
"the",
"sort",
"directions",
"in",
"ascending",
".",
"If",
"sort_cols_sorted",
"is",
"FALSE",
"(",
"by",
"default",
")",
"the",
"frame",
"from",
"step",
"3",
"will",
"be",
"returned",
"as",
"is",
"with",
"no",
"extra",
"sort",
".",
"This",
"may",
"provide",
"a",
"small",
"speedup",
"if",
"desired",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2702-L2822
|
[
"def",
"rank_within_group_by",
"(",
"self",
",",
"group_by_cols",
",",
"sort_cols",
",",
"ascending",
"=",
"[",
"]",
",",
"new_col_name",
"=",
"\"New_Rank_column\"",
",",
"sort_cols_sorted",
"=",
"False",
")",
":",
"assert_is_type",
"(",
"group_by_cols",
",",
"str",
",",
"int",
",",
"[",
"str",
",",
"int",
"]",
")",
"if",
"type",
"(",
"group_by_cols",
")",
"!=",
"list",
":",
"group_by_cols",
"=",
"[",
"group_by_cols",
"]",
"if",
"type",
"(",
"sort_cols",
")",
"!=",
"list",
":",
"sort_cols",
"=",
"[",
"sort_cols",
"]",
"if",
"type",
"(",
"ascending",
")",
"!=",
"list",
":",
"ascending",
"=",
"[",
"ascending",
"]",
"# convert to list",
"ascendingI",
"=",
"[",
"1",
"]",
"*",
"len",
"(",
"sort_cols",
")",
"# intitalize sorting direction to ascending by default",
"for",
"c",
"in",
"sort_cols",
":",
"if",
"self",
".",
"type",
"(",
"c",
")",
"not",
"in",
"[",
"\"enum\"",
",",
"\"time\"",
",",
"\"int\"",
",",
"\"real\"",
"]",
":",
"raise",
"H2OValueError",
"(",
"\"Sort by column: \"",
"+",
"str",
"(",
"c",
")",
"+",
"\" not of enum, time, int or real type\"",
")",
"for",
"c",
"in",
"group_by_cols",
":",
"if",
"self",
".",
"type",
"(",
"c",
")",
"not",
"in",
"[",
"\"enum\"",
",",
"\"time\"",
",",
"\"int\"",
",",
"\"real\"",
"]",
":",
"raise",
"H2OValueError",
"(",
"\"Group by column: \"",
"+",
"str",
"(",
"c",
")",
"+",
"\" not of enum, time, int or real type\"",
")",
"if",
"len",
"(",
"ascending",
")",
">",
"0",
":",
"# user specify sort direction, assume all columns ascending",
"assert",
"len",
"(",
"ascending",
")",
"==",
"len",
"(",
"sort_cols",
")",
",",
"\"Sorting direction must be specified for each sorted column.\"",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"sort_cols",
")",
")",
":",
"ascendingI",
"[",
"index",
"]",
"=",
"1",
"if",
"ascending",
"[",
"index",
"]",
"else",
"-",
"1",
"finalSortedOrder",
"=",
"0",
"if",
"(",
"sort_cols_sorted",
")",
":",
"finalSortedOrder",
"=",
"1",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"rank_within_groupby\"",
",",
"self",
",",
"group_by_cols",
",",
"sort_cols",
",",
"ascendingI",
",",
"new_col_name",
",",
"finalSortedOrder",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.topNBottomN
|
Given a column name or one column index, a percent N, this function will return the top or bottom N% of the
values of the column of a frame. The column must be a numerical column.
:param column: a string for column name or an integer index
:param nPercent: a top or bottom percentage of the column values to return
:param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent
:returns: a H2OFrame containing two columns. The first column contains the original row indices where
the top/bottom values are extracted from. The second column contains the values.
|
h2o-py/h2o/frame.py
|
def topNBottomN(self, column=0, nPercent=10, grabTopN=-1):
"""
Given a column name or one column index, a percent N, this function will return the top or bottom N% of the
values of the column of a frame. The column must be a numerical column.
:param column: a string for column name or an integer index
:param nPercent: a top or bottom percentage of the column values to return
:param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent
:returns: a H2OFrame containing two columns. The first column contains the original row indices where
the top/bottom values are extracted from. The second column contains the values.
"""
assert (nPercent >= 0) and (nPercent<=100.0), "nPercent must be between 0.0 and 100.0"
assert round(nPercent*0.01*self.nrows)>0, "Increase nPercent. Current value will result in top 0 row."
if isinstance(column, int):
if (column < 0) or (column>=self.ncols):
raise H2OValueError("Invalid column index H2OFrame")
else:
colIndex = column
else: # column is a column name
col_names = self.names
if column not in col_names:
raise H2OValueError("Column name not found H2OFrame")
else:
colIndex = col_names.index(column)
if not(self[colIndex].isnumeric()):
raise H2OValueError("Wrong column type! Selected column must be numeric.")
return H2OFrame._expr(expr=ExprNode("topn", self, colIndex, nPercent, grabTopN))
|
def topNBottomN(self, column=0, nPercent=10, grabTopN=-1):
"""
Given a column name or one column index, a percent N, this function will return the top or bottom N% of the
values of the column of a frame. The column must be a numerical column.
:param column: a string for column name or an integer index
:param nPercent: a top or bottom percentage of the column values to return
:param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent
:returns: a H2OFrame containing two columns. The first column contains the original row indices where
the top/bottom values are extracted from. The second column contains the values.
"""
assert (nPercent >= 0) and (nPercent<=100.0), "nPercent must be between 0.0 and 100.0"
assert round(nPercent*0.01*self.nrows)>0, "Increase nPercent. Current value will result in top 0 row."
if isinstance(column, int):
if (column < 0) or (column>=self.ncols):
raise H2OValueError("Invalid column index H2OFrame")
else:
colIndex = column
else: # column is a column name
col_names = self.names
if column not in col_names:
raise H2OValueError("Column name not found H2OFrame")
else:
colIndex = col_names.index(column)
if not(self[colIndex].isnumeric()):
raise H2OValueError("Wrong column type! Selected column must be numeric.")
return H2OFrame._expr(expr=ExprNode("topn", self, colIndex, nPercent, grabTopN))
|
[
"Given",
"a",
"column",
"name",
"or",
"one",
"column",
"index",
"a",
"percent",
"N",
"this",
"function",
"will",
"return",
"the",
"top",
"or",
"bottom",
"N%",
"of",
"the",
"values",
"of",
"the",
"column",
"of",
"a",
"frame",
".",
"The",
"column",
"must",
"be",
"a",
"numerical",
"column",
".",
":",
"param",
"column",
":",
"a",
"string",
"for",
"column",
"name",
"or",
"an",
"integer",
"index",
":",
"param",
"nPercent",
":",
"a",
"top",
"or",
"bottom",
"percentage",
"of",
"the",
"column",
"values",
"to",
"return",
":",
"param",
"grabTopN",
":",
"-",
"1",
"to",
"grab",
"bottom",
"N",
"percent",
"and",
"1",
"to",
"grab",
"top",
"N",
"percent",
":",
"returns",
":",
"a",
"H2OFrame",
"containing",
"two",
"columns",
".",
"The",
"first",
"column",
"contains",
"the",
"original",
"row",
"indices",
"where",
"the",
"top",
"/",
"bottom",
"values",
"are",
"extracted",
"from",
".",
"The",
"second",
"column",
"contains",
"the",
"values",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2824-L2853
|
[
"def",
"topNBottomN",
"(",
"self",
",",
"column",
"=",
"0",
",",
"nPercent",
"=",
"10",
",",
"grabTopN",
"=",
"-",
"1",
")",
":",
"assert",
"(",
"nPercent",
">=",
"0",
")",
"and",
"(",
"nPercent",
"<=",
"100.0",
")",
",",
"\"nPercent must be between 0.0 and 100.0\"",
"assert",
"round",
"(",
"nPercent",
"*",
"0.01",
"*",
"self",
".",
"nrows",
")",
">",
"0",
",",
"\"Increase nPercent. Current value will result in top 0 row.\"",
"if",
"isinstance",
"(",
"column",
",",
"int",
")",
":",
"if",
"(",
"column",
"<",
"0",
")",
"or",
"(",
"column",
">=",
"self",
".",
"ncols",
")",
":",
"raise",
"H2OValueError",
"(",
"\"Invalid column index H2OFrame\"",
")",
"else",
":",
"colIndex",
"=",
"column",
"else",
":",
"# column is a column name",
"col_names",
"=",
"self",
".",
"names",
"if",
"column",
"not",
"in",
"col_names",
":",
"raise",
"H2OValueError",
"(",
"\"Column name not found H2OFrame\"",
")",
"else",
":",
"colIndex",
"=",
"col_names",
".",
"index",
"(",
"column",
")",
"if",
"not",
"(",
"self",
"[",
"colIndex",
"]",
".",
"isnumeric",
"(",
")",
")",
":",
"raise",
"H2OValueError",
"(",
"\"Wrong column type! Selected column must be numeric.\"",
")",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"topn\"",
",",
"self",
",",
"colIndex",
",",
"nPercent",
",",
"grabTopN",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.sub
|
Substitute the first occurrence of pattern in a string with replacement.
:param str pattern: A regular expression.
:param str replacement: A replacement string.
:param bool ignore_case: If True then pattern will match case-insensitively.
:returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``.
|
h2o-py/h2o/frame.py
|
def sub(self, pattern, replacement, ignore_case=False):
"""
Substitute the first occurrence of pattern in a string with replacement.
:param str pattern: A regular expression.
:param str replacement: A replacement string.
:param bool ignore_case: If True then pattern will match case-insensitively.
:returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``.
"""
return H2OFrame._expr(expr=ExprNode("replacefirst", self, pattern, replacement, ignore_case))
|
def sub(self, pattern, replacement, ignore_case=False):
"""
Substitute the first occurrence of pattern in a string with replacement.
:param str pattern: A regular expression.
:param str replacement: A replacement string.
:param bool ignore_case: If True then pattern will match case-insensitively.
:returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``.
"""
return H2OFrame._expr(expr=ExprNode("replacefirst", self, pattern, replacement, ignore_case))
|
[
"Substitute",
"the",
"first",
"occurrence",
"of",
"pattern",
"in",
"a",
"string",
"with",
"replacement",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2879-L2888
|
[
"def",
"sub",
"(",
"self",
",",
"pattern",
",",
"replacement",
",",
"ignore_case",
"=",
"False",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"replacefirst\"",
",",
"self",
",",
"pattern",
",",
"replacement",
",",
"ignore_case",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.interaction
|
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param factors: list of factor columns (either indices or column names).
:param bool pairwise: Whether to create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param int max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param int min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms.
:param str destination_frame: (internal) string indicating the key for the frame created.
:returns: an H2OFrame
|
h2o-py/h2o/frame.py
|
def interaction(self, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param factors: list of factor columns (either indices or column names).
:param bool pairwise: Whether to create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param int max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param int min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms.
:param str destination_frame: (internal) string indicating the key for the frame created.
:returns: an H2OFrame
"""
return h2o.interaction(data=self, factors=factors, pairwise=pairwise, max_factors=max_factors,
min_occurrence=min_occurrence, destination_frame=destination_frame)
|
def interaction(self, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param factors: list of factor columns (either indices or column names).
:param bool pairwise: Whether to create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param int max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param int min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms.
:param str destination_frame: (internal) string indicating the key for the frame created.
:returns: an H2OFrame
"""
return h2o.interaction(data=self, factors=factors, pairwise=pairwise, max_factors=max_factors,
min_occurrence=min_occurrence, destination_frame=destination_frame)
|
[
"Categorical",
"Interaction",
"Feature",
"Creation",
"in",
"H2O",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2903-L2921
|
[
"def",
"interaction",
"(",
"self",
",",
"factors",
",",
"pairwise",
",",
"max_factors",
",",
"min_occurrence",
",",
"destination_frame",
"=",
"None",
")",
":",
"return",
"h2o",
".",
"interaction",
"(",
"data",
"=",
"self",
",",
"factors",
"=",
"factors",
",",
"pairwise",
"=",
"pairwise",
",",
"max_factors",
"=",
"max_factors",
",",
"min_occurrence",
"=",
"min_occurrence",
",",
"destination_frame",
"=",
"destination_frame",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.toupper
|
Translate characters from lower to upper case for a particular column.
:returns: new H2OFrame with all strings in the current frame converted to the uppercase.
|
h2o-py/h2o/frame.py
|
def toupper(self):
"""
Translate characters from lower to upper case for a particular column.
:returns: new H2OFrame with all strings in the current frame converted to the uppercase.
"""
return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache)
|
def toupper(self):
"""
Translate characters from lower to upper case for a particular column.
:returns: new H2OFrame with all strings in the current frame converted to the uppercase.
"""
return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache)
|
[
"Translate",
"characters",
"from",
"lower",
"to",
"upper",
"case",
"for",
"a",
"particular",
"column",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2924-L2930
|
[
"def",
"toupper",
"(",
"self",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"toupper\"",
",",
"self",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.grep
|
Searches for matches to argument `pattern` within each element
of a string column.
Default behavior is to return indices of the elements matching the pattern. Parameter
`output_logical` can be used to return a logical vector indicating if the element matches
the pattern (1) or not (0).
:param str pattern: A character string containing a regular expression.
:param bool ignore_case: If True, then case is ignored during matching.
:param bool invert: If True, then identify elements that do not match the pattern.
:param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions
:return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled.
|
h2o-py/h2o/frame.py
|
def grep(self,pattern, ignore_case = False, invert = False, output_logical = False):
"""
Searches for matches to argument `pattern` within each element
of a string column.
Default behavior is to return indices of the elements matching the pattern. Parameter
`output_logical` can be used to return a logical vector indicating if the element matches
the pattern (1) or not (0).
:param str pattern: A character string containing a regular expression.
:param bool ignore_case: If True, then case is ignored during matching.
:param bool invert: If True, then identify elements that do not match the pattern.
:param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions
:return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled.
"""
return H2OFrame._expr(expr=ExprNode("grep", self, pattern, ignore_case, invert, output_logical))
|
def grep(self,pattern, ignore_case = False, invert = False, output_logical = False):
"""
Searches for matches to argument `pattern` within each element
of a string column.
Default behavior is to return indices of the elements matching the pattern. Parameter
`output_logical` can be used to return a logical vector indicating if the element matches
the pattern (1) or not (0).
:param str pattern: A character string containing a regular expression.
:param bool ignore_case: If True, then case is ignored during matching.
:param bool invert: If True, then identify elements that do not match the pattern.
:param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions
:return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled.
"""
return H2OFrame._expr(expr=ExprNode("grep", self, pattern, ignore_case, invert, output_logical))
|
[
"Searches",
"for",
"matches",
"to",
"argument",
"pattern",
"within",
"each",
"element",
"of",
"a",
"string",
"column",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2932-L2947
|
[
"def",
"grep",
"(",
"self",
",",
"pattern",
",",
"ignore_case",
"=",
"False",
",",
"invert",
"=",
"False",
",",
"output_logical",
"=",
"False",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"grep\"",
",",
"self",
",",
"pattern",
",",
"ignore_case",
",",
"invert",
",",
"output_logical",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.scale
|
Center and/or scale the columns of the current frame.
:param center: If True, then demean the data. If False, no shifting is done. If ``center`` is a list of
numbers then shift each column by the corresponding amount.
:param scale: If True, then scale the data by each column's standard deviation. If False, no scaling
is done. If ``scale`` is a list of numbers, then scale each column by the requested amount.
:returns: an H2OFrame with scaled values from the current frame.
|
h2o-py/h2o/frame.py
|
def scale(self, center=True, scale=True):
"""
Center and/or scale the columns of the current frame.
:param center: If True, then demean the data. If False, no shifting is done. If ``center`` is a list of
numbers then shift each column by the corresponding amount.
:param scale: If True, then scale the data by each column's standard deviation. If False, no scaling
is done. If ``scale`` is a list of numbers, then scale each column by the requested amount.
:returns: an H2OFrame with scaled values from the current frame.
"""
return H2OFrame._expr(expr=ExprNode("scale", self, center, scale), cache=self._ex._cache)
|
def scale(self, center=True, scale=True):
"""
Center and/or scale the columns of the current frame.
:param center: If True, then demean the data. If False, no shifting is done. If ``center`` is a list of
numbers then shift each column by the corresponding amount.
:param scale: If True, then scale the data by each column's standard deviation. If False, no scaling
is done. If ``scale`` is a list of numbers, then scale each column by the requested amount.
:returns: an H2OFrame with scaled values from the current frame.
"""
return H2OFrame._expr(expr=ExprNode("scale", self, center, scale), cache=self._ex._cache)
|
[
"Center",
"and",
"/",
"or",
"scale",
"the",
"columns",
"of",
"the",
"current",
"frame",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2975-L2985
|
[
"def",
"scale",
"(",
"self",
",",
"center",
"=",
"True",
",",
"scale",
"=",
"True",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"scale\"",
",",
"self",
",",
"center",
",",
"scale",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.signif
|
Round doubles/floats to the given number of significant digits.
:param int digits: Number of significant digits to retain.
:returns: new H2OFrame with rounded values from the original frame.
|
h2o-py/h2o/frame.py
|
def signif(self, digits=6):
"""
Round doubles/floats to the given number of significant digits.
:param int digits: Number of significant digits to retain.
:returns: new H2OFrame with rounded values from the original frame.
"""
return H2OFrame._expr(expr=ExprNode("signif", self, digits), cache=self._ex._cache)
|
def signif(self, digits=6):
"""
Round doubles/floats to the given number of significant digits.
:param int digits: Number of significant digits to retain.
:returns: new H2OFrame with rounded values from the original frame.
"""
return H2OFrame._expr(expr=ExprNode("signif", self, digits), cache=self._ex._cache)
|
[
"Round",
"doubles",
"/",
"floats",
"to",
"the",
"given",
"number",
"of",
"significant",
"digits",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2988-L2995
|
[
"def",
"signif",
"(",
"self",
",",
"digits",
"=",
"6",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"signif\"",
",",
"self",
",",
"digits",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.na_omit
|
Remove rows with NAs from the H2OFrame.
:returns: new H2OFrame with all rows from the original frame containing any NAs removed.
|
h2o-py/h2o/frame.py
|
def na_omit(self):
"""
Remove rows with NAs from the H2OFrame.
:returns: new H2OFrame with all rows from the original frame containing any NAs removed.
"""
fr = H2OFrame._expr(expr=ExprNode("na.omit", self), cache=self._ex._cache)
fr._ex._cache.nrows = -1
return fr
|
def na_omit(self):
"""
Remove rows with NAs from the H2OFrame.
:returns: new H2OFrame with all rows from the original frame containing any NAs removed.
"""
fr = H2OFrame._expr(expr=ExprNode("na.omit", self), cache=self._ex._cache)
fr._ex._cache.nrows = -1
return fr
|
[
"Remove",
"rows",
"with",
"NAs",
"from",
"the",
"H2OFrame",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3030-L3038
|
[
"def",
"na_omit",
"(",
"self",
")",
":",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"na.omit\"",
",",
"self",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")",
"fr",
".",
"_ex",
".",
"_cache",
".",
"nrows",
"=",
"-",
"1",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.difflag1
|
Conduct a diff-1 transform on a numeric frame column.
:returns: an H2OFrame where each element is equal to the corresponding element in the source
frame minus the previous-row element in the same frame.
|
h2o-py/h2o/frame.py
|
def difflag1(self):
"""
Conduct a diff-1 transform on a numeric frame column.
:returns: an H2OFrame where each element is equal to the corresponding element in the source
frame minus the previous-row element in the same frame.
"""
if self.ncols > 1:
raise H2OValueError("Only single-column frames supported")
if self.types[self.columns[0]] not in {"real", "int", "bool"}:
raise H2OValueError("Numeric column expected")
fr = H2OFrame._expr(expr=ExprNode("difflag1", self), cache=self._ex._cache)
return fr
|
def difflag1(self):
"""
Conduct a diff-1 transform on a numeric frame column.
:returns: an H2OFrame where each element is equal to the corresponding element in the source
frame minus the previous-row element in the same frame.
"""
if self.ncols > 1:
raise H2OValueError("Only single-column frames supported")
if self.types[self.columns[0]] not in {"real", "int", "bool"}:
raise H2OValueError("Numeric column expected")
fr = H2OFrame._expr(expr=ExprNode("difflag1", self), cache=self._ex._cache)
return fr
|
[
"Conduct",
"a",
"diff",
"-",
"1",
"transform",
"on",
"a",
"numeric",
"frame",
"column",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3041-L3053
|
[
"def",
"difflag1",
"(",
"self",
")",
":",
"if",
"self",
".",
"ncols",
">",
"1",
":",
"raise",
"H2OValueError",
"(",
"\"Only single-column frames supported\"",
")",
"if",
"self",
".",
"types",
"[",
"self",
".",
"columns",
"[",
"0",
"]",
"]",
"not",
"in",
"{",
"\"real\"",
",",
"\"int\"",
",",
"\"bool\"",
"}",
":",
"raise",
"H2OValueError",
"(",
"\"Numeric column expected\"",
")",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"difflag1\"",
",",
"self",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.isna
|
For each element in an H2OFrame, determine if it is NA or not.
:returns: an H2OFrame of 1s and 0s, where 1s mean the values were NAs.
|
h2o-py/h2o/frame.py
|
def isna(self):
"""
For each element in an H2OFrame, determine if it is NA or not.
:returns: an H2OFrame of 1s and 0s, where 1s mean the values were NAs.
"""
fr = H2OFrame._expr(expr=ExprNode("is.na", self))
fr._ex._cache.nrows = self._ex._cache.nrows
fr._ex._cache.ncols = self._ex._cache.ncols
if self._ex._cache.names:
fr._ex._cache.names = ["isNA(%s)" % n for n in self._ex._cache.names]
fr._ex._cache.types = {"isNA(%s)" % n: "int" for n in self._ex._cache.names}
return fr
|
def isna(self):
"""
For each element in an H2OFrame, determine if it is NA or not.
:returns: an H2OFrame of 1s and 0s, where 1s mean the values were NAs.
"""
fr = H2OFrame._expr(expr=ExprNode("is.na", self))
fr._ex._cache.nrows = self._ex._cache.nrows
fr._ex._cache.ncols = self._ex._cache.ncols
if self._ex._cache.names:
fr._ex._cache.names = ["isNA(%s)" % n for n in self._ex._cache.names]
fr._ex._cache.types = {"isNA(%s)" % n: "int" for n in self._ex._cache.names}
return fr
|
[
"For",
"each",
"element",
"in",
"an",
"H2OFrame",
"determine",
"if",
"it",
"is",
"NA",
"or",
"not",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3056-L3068
|
[
"def",
"isna",
"(",
"self",
")",
":",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"is.na\"",
",",
"self",
")",
")",
"fr",
".",
"_ex",
".",
"_cache",
".",
"nrows",
"=",
"self",
".",
"_ex",
".",
"_cache",
".",
"nrows",
"fr",
".",
"_ex",
".",
"_cache",
".",
"ncols",
"=",
"self",
".",
"_ex",
".",
"_cache",
".",
"ncols",
"if",
"self",
".",
"_ex",
".",
"_cache",
".",
"names",
":",
"fr",
".",
"_ex",
".",
"_cache",
".",
"names",
"=",
"[",
"\"isNA(%s)\"",
"%",
"n",
"for",
"n",
"in",
"self",
".",
"_ex",
".",
"_cache",
".",
"names",
"]",
"fr",
".",
"_ex",
".",
"_cache",
".",
"types",
"=",
"{",
"\"isNA(%s)\"",
"%",
"n",
":",
"\"int\"",
"for",
"n",
"in",
"self",
".",
"_ex",
".",
"_cache",
".",
"names",
"}",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.minute
|
Extract the "minute" part from a date column.
:returns: a single-column H2OFrame containing the "minute" part from the source frame.
|
h2o-py/h2o/frame.py
|
def minute(self):
"""
Extract the "minute" part from a date column.
:returns: a single-column H2OFrame containing the "minute" part from the source frame.
"""
fr = H2OFrame._expr(expr=ExprNode("minute", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
|
def minute(self):
"""
Extract the "minute" part from a date column.
:returns: a single-column H2OFrame containing the "minute" part from the source frame.
"""
fr = H2OFrame._expr(expr=ExprNode("minute", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
|
[
"Extract",
"the",
"minute",
"part",
"from",
"a",
"date",
"column",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3143-L3152
|
[
"def",
"minute",
"(",
"self",
")",
":",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"minute\"",
",",
"self",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")",
"if",
"fr",
".",
"_ex",
".",
"_cache",
".",
"types_valid",
"(",
")",
":",
"fr",
".",
"_ex",
".",
"_cache",
".",
"types",
"=",
"{",
"k",
":",
"\"int\"",
"for",
"k",
"in",
"self",
".",
"_ex",
".",
"_cache",
".",
"types",
".",
"keys",
"(",
")",
"}",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.runif
|
Generate a column of random numbers drawn from a uniform distribution [0,1) and
having the same data layout as the source frame.
:param int seed: seed for the random number generator.
:returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1).
|
h2o-py/h2o/frame.py
|
def runif(self, seed=None):
"""
Generate a column of random numbers drawn from a uniform distribution [0,1) and
having the same data layout as the source frame.
:param int seed: seed for the random number generator.
:returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1).
"""
fr = H2OFrame._expr(expr=ExprNode("h2o.runif", self, -1 if seed is None else seed))
fr._ex._cache.ncols = 1
fr._ex._cache.nrows = self.nrow
return fr
|
def runif(self, seed=None):
"""
Generate a column of random numbers drawn from a uniform distribution [0,1) and
having the same data layout as the source frame.
:param int seed: seed for the random number generator.
:returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1).
"""
fr = H2OFrame._expr(expr=ExprNode("h2o.runif", self, -1 if seed is None else seed))
fr._ex._cache.ncols = 1
fr._ex._cache.nrows = self.nrow
return fr
|
[
"Generate",
"a",
"column",
"of",
"random",
"numbers",
"drawn",
"from",
"a",
"uniform",
"distribution",
"[",
"0",
"1",
")",
"and",
"having",
"the",
"same",
"data",
"layout",
"as",
"the",
"source",
"frame",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3167-L3179
|
[
"def",
"runif",
"(",
"self",
",",
"seed",
"=",
"None",
")",
":",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"h2o.runif\"",
",",
"self",
",",
"-",
"1",
"if",
"seed",
"is",
"None",
"else",
"seed",
")",
")",
"fr",
".",
"_ex",
".",
"_cache",
".",
"ncols",
"=",
"1",
"fr",
".",
"_ex",
".",
"_cache",
".",
"nrows",
"=",
"self",
".",
"nrow",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.stratified_split
|
Construct a column that can be used to perform a random stratified split.
:param float test_frac: The fraction of rows that will belong to the "test".
:param int seed: The seed for the random number generator.
:returns: an H2OFrame having single categorical column with two levels: ``"train"`` and ``"test"``.
:examples:
>>> stratsplit = df["y"].stratified_split(test_frac=0.3, seed=12349453)
>>> train = df[stratsplit=="train"]
>>> test = df[stratsplit=="test"]
>>>
>>> # check that the distributions among the initial frame, and the
>>> # train/test frames match
>>> df["y"].table()["Count"] / df["y"].table()["Count"].sum()
>>> train["y"].table()["Count"] / train["y"].table()["Count"].sum()
>>> test["y"].table()["Count"] / test["y"].table()["Count"].sum()
|
h2o-py/h2o/frame.py
|
def stratified_split(self, test_frac=0.2, seed=-1):
"""
Construct a column that can be used to perform a random stratified split.
:param float test_frac: The fraction of rows that will belong to the "test".
:param int seed: The seed for the random number generator.
:returns: an H2OFrame having single categorical column with two levels: ``"train"`` and ``"test"``.
:examples:
>>> stratsplit = df["y"].stratified_split(test_frac=0.3, seed=12349453)
>>> train = df[stratsplit=="train"]
>>> test = df[stratsplit=="test"]
>>>
>>> # check that the distributions among the initial frame, and the
>>> # train/test frames match
>>> df["y"].table()["Count"] / df["y"].table()["Count"].sum()
>>> train["y"].table()["Count"] / train["y"].table()["Count"].sum()
>>> test["y"].table()["Count"] / test["y"].table()["Count"].sum()
"""
return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed))
|
def stratified_split(self, test_frac=0.2, seed=-1):
"""
Construct a column that can be used to perform a random stratified split.
:param float test_frac: The fraction of rows that will belong to the "test".
:param int seed: The seed for the random number generator.
:returns: an H2OFrame having single categorical column with two levels: ``"train"`` and ``"test"``.
:examples:
>>> stratsplit = df["y"].stratified_split(test_frac=0.3, seed=12349453)
>>> train = df[stratsplit=="train"]
>>> test = df[stratsplit=="test"]
>>>
>>> # check that the distributions among the initial frame, and the
>>> # train/test frames match
>>> df["y"].table()["Count"] / df["y"].table()["Count"].sum()
>>> train["y"].table()["Count"] / train["y"].table()["Count"].sum()
>>> test["y"].table()["Count"] / test["y"].table()["Count"].sum()
"""
return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed))
|
[
"Construct",
"a",
"column",
"that",
"can",
"be",
"used",
"to",
"perform",
"a",
"random",
"stratified",
"split",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3182-L3202
|
[
"def",
"stratified_split",
"(",
"self",
",",
"test_frac",
"=",
"0.2",
",",
"seed",
"=",
"-",
"1",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"'h2o.random_stratified_split'",
",",
"self",
",",
"test_frac",
",",
"seed",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.match
|
Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell.
|
h2o-py/h2o/frame.py
|
def match(self, table, nomatch=0):
"""
Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell.
"""
return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None))
|
def match(self, table, nomatch=0):
"""
Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell.
"""
return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None))
|
[
"Make",
"a",
"vector",
"of",
"the",
"positions",
"of",
"(",
"first",
")",
"matches",
"of",
"its",
"first",
"argument",
"in",
"its",
"second",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3205-L3216
|
[
"def",
"match",
"(",
"self",
",",
"table",
",",
"nomatch",
"=",
"0",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"match\"",
",",
"self",
",",
"table",
",",
"nomatch",
",",
"None",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.cut
|
Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data.
|
h2o-py/h2o/frame.py
|
def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):
"""
Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data.
"""
assert_is_type(breaks, [numeric])
if self.ncols != 1: raise H2OValueError("Single-column frame is expected")
if self.types[self.names[0]] not in {"int", "real"}: raise H2OValueError("A numeric column is expected")
fr = H2OFrame._expr(expr=ExprNode("cut", self, breaks, labels, include_lowest, right, dig_lab),
cache=self._ex._cache)
fr._ex._cache.types = {k: "enum" for k in self.names}
return fr
|
def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):
"""
Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data.
"""
assert_is_type(breaks, [numeric])
if self.ncols != 1: raise H2OValueError("Single-column frame is expected")
if self.types[self.names[0]] not in {"int", "real"}: raise H2OValueError("A numeric column is expected")
fr = H2OFrame._expr(expr=ExprNode("cut", self, breaks, labels, include_lowest, right, dig_lab),
cache=self._ex._cache)
fr._ex._cache.types = {k: "enum" for k in self.names}
return fr
|
[
"Cut",
"a",
"numeric",
"vector",
"into",
"categorical",
"buckets",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3219-L3241
|
[
"def",
"cut",
"(",
"self",
",",
"breaks",
",",
"labels",
"=",
"None",
",",
"include_lowest",
"=",
"False",
",",
"right",
"=",
"True",
",",
"dig_lab",
"=",
"3",
")",
":",
"assert_is_type",
"(",
"breaks",
",",
"[",
"numeric",
"]",
")",
"if",
"self",
".",
"ncols",
"!=",
"1",
":",
"raise",
"H2OValueError",
"(",
"\"Single-column frame is expected\"",
")",
"if",
"self",
".",
"types",
"[",
"self",
".",
"names",
"[",
"0",
"]",
"]",
"not",
"in",
"{",
"\"int\"",
",",
"\"real\"",
"}",
":",
"raise",
"H2OValueError",
"(",
"\"A numeric column is expected\"",
")",
"fr",
"=",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"cut\"",
",",
"self",
",",
"breaks",
",",
"labels",
",",
"include_lowest",
",",
"right",
",",
"dig_lab",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")",
"fr",
".",
"_ex",
".",
"_cache",
".",
"types",
"=",
"{",
"k",
":",
"\"enum\"",
"for",
"k",
"in",
"self",
".",
"names",
"}",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.idxmax
|
Get the index of the max value in a column or row
:param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the
result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched
rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.
:returns: either a list of max index values per-column or an H2OFrame containing max index values
per-row from the original frame.
|
h2o-py/h2o/frame.py
|
def idxmax(self,skipna=True, axis=0):
"""
Get the index of the max value in a column or row
:param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the
result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched
rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.
:returns: either a list of max index values per-column or an H2OFrame containing max index values
per-row from the original frame.
"""
return H2OFrame._expr(expr=ExprNode("which.max", self, skipna, axis))
|
def idxmax(self,skipna=True, axis=0):
"""
Get the index of the max value in a column or row
:param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the
result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched
rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.
:returns: either a list of max index values per-column or an H2OFrame containing max index values
per-row from the original frame.
"""
return H2OFrame._expr(expr=ExprNode("which.max", self, skipna, axis))
|
[
"Get",
"the",
"index",
"of",
"the",
"max",
"value",
"in",
"a",
"column",
"or",
"row"
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3256-L3268
|
[
"def",
"idxmax",
"(",
"self",
",",
"skipna",
"=",
"True",
",",
"axis",
"=",
"0",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"which.max\"",
",",
"self",
",",
"skipna",
",",
"axis",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.ifelse
|
Equivalent to ``[y if t else n for t,y,n in zip(self,yes,no)]``.
Based on the booleans in the test vector, the output has the values of the
yes and no vectors interleaved (or merged together). All Frames must have
the same row count. Single column frames are broadened to match wider
Frames. Scalars are allowed, and are also broadened to match wider frames.
:param yes: Frame to use if ``test`` is true; may be a scalar or single column
:param no: Frame to use if ``test`` is false; may be a scalar or single column
:returns: an H2OFrame of the merged yes/no frames/scalars according to the test input frame.
|
h2o-py/h2o/frame.py
|
def ifelse(self, yes, no):
"""
Equivalent to ``[y if t else n for t,y,n in zip(self,yes,no)]``.
Based on the booleans in the test vector, the output has the values of the
yes and no vectors interleaved (or merged together). All Frames must have
the same row count. Single column frames are broadened to match wider
Frames. Scalars are allowed, and are also broadened to match wider frames.
:param yes: Frame to use if ``test`` is true; may be a scalar or single column
:param no: Frame to use if ``test`` is false; may be a scalar or single column
:returns: an H2OFrame of the merged yes/no frames/scalars according to the test input frame.
"""
return H2OFrame._expr(expr=ExprNode("ifelse", self, yes, no))
|
def ifelse(self, yes, no):
"""
Equivalent to ``[y if t else n for t,y,n in zip(self,yes,no)]``.
Based on the booleans in the test vector, the output has the values of the
yes and no vectors interleaved (or merged together). All Frames must have
the same row count. Single column frames are broadened to match wider
Frames. Scalars are allowed, and are also broadened to match wider frames.
:param yes: Frame to use if ``test`` is true; may be a scalar or single column
:param no: Frame to use if ``test`` is false; may be a scalar or single column
:returns: an H2OFrame of the merged yes/no frames/scalars according to the test input frame.
"""
return H2OFrame._expr(expr=ExprNode("ifelse", self, yes, no))
|
[
"Equivalent",
"to",
"[",
"y",
"if",
"t",
"else",
"n",
"for",
"t",
"y",
"n",
"in",
"zip",
"(",
"self",
"yes",
"no",
")",
"]",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3285-L3299
|
[
"def",
"ifelse",
"(",
"self",
",",
"yes",
",",
"no",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"ifelse\"",
",",
"self",
",",
"yes",
",",
"no",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.apply
|
Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame.
|
h2o-py/h2o/frame.py
|
def apply(self, fun=None, axis=0):
"""
Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame.
"""
from .astfun import lambda_to_expr
assert_is_type(axis, 0, 1)
assert_is_type(fun, FunctionType)
assert_satisfies(fun, fun.__name__ == "<lambda>")
res = lambda_to_expr(fun)
return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res))
|
def apply(self, fun=None, axis=0):
"""
Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame.
"""
from .astfun import lambda_to_expr
assert_is_type(axis, 0, 1)
assert_is_type(fun, FunctionType)
assert_satisfies(fun, fun.__name__ == "<lambda>")
res = lambda_to_expr(fun)
return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res))
|
[
"Apply",
"a",
"lambda",
"expression",
"to",
"an",
"H2OFrame",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3302-L3315
|
[
"def",
"apply",
"(",
"self",
",",
"fun",
"=",
"None",
",",
"axis",
"=",
"0",
")",
":",
"from",
".",
"astfun",
"import",
"lambda_to_expr",
"assert_is_type",
"(",
"axis",
",",
"0",
",",
"1",
")",
"assert_is_type",
"(",
"fun",
",",
"FunctionType",
")",
"assert_satisfies",
"(",
"fun",
",",
"fun",
".",
"__name__",
"==",
"\"<lambda>\"",
")",
"res",
"=",
"lambda_to_expr",
"(",
"fun",
")",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"apply\"",
",",
"self",
",",
"1",
"+",
"(",
"axis",
"==",
"0",
")",
",",
"*",
"res",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.mktime
|
Deprecated, use :func:`moment` instead.
This function was left for backward-compatibility purposes only. It is
not very stable, and counterintuitively uses 0-based months and days,
so "January 4th, 2001" should be entered as ``mktime(2001, 0, 3)``.
|
h2o-py/h2o/frame.py
|
def mktime(year=1970, month=0, day=0, hour=0, minute=0, second=0, msec=0):
"""
Deprecated, use :func:`moment` instead.
This function was left for backward-compatibility purposes only. It is
not very stable, and counterintuitively uses 0-based months and days,
so "January 4th, 2001" should be entered as ``mktime(2001, 0, 3)``.
"""
return H2OFrame._expr(ExprNode("mktime", year, month, day, hour, minute, second, msec))
|
def mktime(year=1970, month=0, day=0, hour=0, minute=0, second=0, msec=0):
"""
Deprecated, use :func:`moment` instead.
This function was left for backward-compatibility purposes only. It is
not very stable, and counterintuitively uses 0-based months and days,
so "January 4th, 2001" should be entered as ``mktime(2001, 0, 3)``.
"""
return H2OFrame._expr(ExprNode("mktime", year, month, day, hour, minute, second, msec))
|
[
"Deprecated",
"use",
":",
"func",
":",
"moment",
"instead",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3326-L3334
|
[
"def",
"mktime",
"(",
"year",
"=",
"1970",
",",
"month",
"=",
"0",
",",
"day",
"=",
"0",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"msec",
"=",
"0",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"ExprNode",
"(",
"\"mktime\"",
",",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
",",
"msec",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OFrame.from_python
|
[DEPRECATED] Use constructor ``H2OFrame()`` instead.
|
h2o-py/h2o/frame.py
|
def from_python(python_obj, destination_frame=None, header=0, separator=",", column_names=None,
column_types=None, na_strings=None):
"""[DEPRECATED] Use constructor ``H2OFrame()`` instead."""
return H2OFrame(python_obj, destination_frame, header, separator, column_names, column_types,
na_strings)
|
def from_python(python_obj, destination_frame=None, header=0, separator=",", column_names=None,
column_types=None, na_strings=None):
"""[DEPRECATED] Use constructor ``H2OFrame()`` instead."""
return H2OFrame(python_obj, destination_frame, header, separator, column_names, column_types,
na_strings)
|
[
"[",
"DEPRECATED",
"]",
"Use",
"constructor",
"H2OFrame",
"()",
"instead",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3383-L3387
|
[
"def",
"from_python",
"(",
"python_obj",
",",
"destination_frame",
"=",
"None",
",",
"header",
"=",
"0",
",",
"separator",
"=",
"\",\"",
",",
"column_names",
"=",
"None",
",",
"column_types",
"=",
"None",
",",
"na_strings",
"=",
"None",
")",
":",
"return",
"H2OFrame",
"(",
"python_obj",
",",
"destination_frame",
",",
"header",
",",
"separator",
",",
"column_names",
",",
"column_types",
",",
"na_strings",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
parse_text
|
Parse code from a string of text.
|
h2o-bindings/bin/pyparser.py
|
def parse_text(text):
"""Parse code from a string of text."""
assert isinstance(text, _str_type), "`text` parameter should be a string, got %r" % type(text)
gen = iter(text.splitlines(True)) # True = keep newlines
readline = gen.next if hasattr(gen, "next") else gen.__next__
return Code(_tokenize(readline))
|
def parse_text(text):
"""Parse code from a string of text."""
assert isinstance(text, _str_type), "`text` parameter should be a string, got %r" % type(text)
gen = iter(text.splitlines(True)) # True = keep newlines
readline = gen.next if hasattr(gen, "next") else gen.__next__
return Code(_tokenize(readline))
|
[
"Parse",
"code",
"from",
"a",
"string",
"of",
"text",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pyparser.py#L42-L47
|
[
"def",
"parse_text",
"(",
"text",
")",
":",
"assert",
"isinstance",
"(",
"text",
",",
"_str_type",
")",
",",
"\"`text` parameter should be a string, got %r\"",
"%",
"type",
"(",
"text",
")",
"gen",
"=",
"iter",
"(",
"text",
".",
"splitlines",
"(",
"True",
")",
")",
"# True = keep newlines",
"readline",
"=",
"gen",
".",
"next",
"if",
"hasattr",
"(",
"gen",
",",
"\"next\"",
")",
"else",
"gen",
".",
"__next__",
"return",
"Code",
"(",
"_tokenize",
"(",
"readline",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
parse_file
|
Parse the provided file, and return Code object.
|
h2o-bindings/bin/pyparser.py
|
def parse_file(filename):
"""Parse the provided file, and return Code object."""
assert isinstance(filename, _str_type), "`filename` parameter should be a string, got %r" % type(filename)
with open(filename, "rt", encoding="utf-8") as f:
return Code(_tokenize(f.readline))
|
def parse_file(filename):
"""Parse the provided file, and return Code object."""
assert isinstance(filename, _str_type), "`filename` parameter should be a string, got %r" % type(filename)
with open(filename, "rt", encoding="utf-8") as f:
return Code(_tokenize(f.readline))
|
[
"Parse",
"the",
"provided",
"file",
"and",
"return",
"Code",
"object",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pyparser.py#L50-L54
|
[
"def",
"parse_file",
"(",
"filename",
")",
":",
"assert",
"isinstance",
"(",
"filename",
",",
"_str_type",
")",
",",
"\"`filename` parameter should be a string, got %r\"",
"%",
"type",
"(",
"filename",
")",
"with",
"open",
"(",
"filename",
",",
"\"rt\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"return",
"Code",
"(",
"_tokenize",
"(",
"f",
".",
"readline",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
_tokenize
|
Parse any object accessible through a readline interface into a list of :class:`Token`s.
This function is very similar to :func:`tokenize.generate_tokens`, with few differences. First, the returned list
is a list of :class:`Token` objects instead of 5-tuples. This may be slightly less efficient, but far more
convenient for subsequent parsing. Second, the list of tokens is **normalized** to better match the way humans
understand the code, not what is more suitable for the compiler.
To better understand the normalization process, consider the following code example::
def test():
pass
# funny function
def test_funny():
# not so funny
pass
Normally, Python will parse it as the following sequence of tokens:
['def', 'test', '(', ')', ':', NEWLINE, INDENT, 'pass', NEWLINE, NL, '# funny function', NL, DEDENT,
'def', 'test_funny', '(', ')', ':', NEWLINE, '# not so funny', NL, INDENT, 'pass', NEWLINE, DEDENT, END]
The problem here is that the DEDENT token is generated not after the first 'pass' but after the comment, which
means that if we treat INDENTs / DEDENTs as block boundaries, then the comment "belongs" to the first function.
This is contrary to how most people would understand this code. Similarly, the second comment visually goes
after the INDENT, not before. Consequently, after "normalization" this function will return the following list
of tokens:
['def', 'test', '(', ')', ':', NEWLINE, INDENT, 'pass', NEWLINE, DEDENT, NL, '# funny function', NL,
'def', 'test_funny', '(', ')', ':', NEWLINE, INDENT, '# not so funny', NL, 'pass', NEWLINE, DEDENT, END]
:param readline: a function that allows access to the code being parsed in a line-by-line fashion.
:returns: a list of :class:`Token`s.
|
h2o-bindings/bin/pyparser.py
|
def _tokenize(readline):
"""
Parse any object accessible through a readline interface into a list of :class:`Token`s.
This function is very similar to :func:`tokenize.generate_tokens`, with few differences. First, the returned list
is a list of :class:`Token` objects instead of 5-tuples. This may be slightly less efficient, but far more
convenient for subsequent parsing. Second, the list of tokens is **normalized** to better match the way humans
understand the code, not what is more suitable for the compiler.
To better understand the normalization process, consider the following code example::
def test():
pass
# funny function
def test_funny():
# not so funny
pass
Normally, Python will parse it as the following sequence of tokens:
['def', 'test', '(', ')', ':', NEWLINE, INDENT, 'pass', NEWLINE, NL, '# funny function', NL, DEDENT,
'def', 'test_funny', '(', ')', ':', NEWLINE, '# not so funny', NL, INDENT, 'pass', NEWLINE, DEDENT, END]
The problem here is that the DEDENT token is generated not after the first 'pass' but after the comment, which
means that if we treat INDENTs / DEDENTs as block boundaries, then the comment "belongs" to the first function.
This is contrary to how most people would understand this code. Similarly, the second comment visually goes
after the INDENT, not before. Consequently, after "normalization" this function will return the following list
of tokens:
['def', 'test', '(', ')', ':', NEWLINE, INDENT, 'pass', NEWLINE, DEDENT, NL, '# funny function', NL,
'def', 'test_funny', '(', ')', ':', NEWLINE, INDENT, '# not so funny', NL, 'pass', NEWLINE, DEDENT, END]
:param readline: a function that allows access to the code being parsed in a line-by-line fashion.
:returns: a list of :class:`Token`s.
"""
assert callable(readline), "`readline` should be a function"
# Generate the initial list of tokens.
tokens = [Token(tok) for tok in tokenize.generate_tokens(readline)]
# Determine the levels of all indents / dedents.
indents_stack = [0] # Stack of all indent levels up to the current parsing point
for tok in tokens:
if tok.op == INDENT:
tok.pre_indent = indents_stack[-1]
indents_stack.append(tok.end_col)
tok.post_indent = tok.end_col
elif tok.op == DEDENT:
tok.pre_indent = indents_stack.pop()
tok.post_indent = indents_stack[-1]
elif tok.op == COMMENT:
tok.pre_indent = tok.post_indent = indents_stack[-1]
# Iterate through tokens backwards and see whether it's necessary to swap any of them.
i = len(tokens) - 1
while i >= 2:
pptok, ptok, tok = tokens[i - 2:i + 1]
if tok.op == INDENT:
if ptok.op == NL and pptok.op == COMMENT:
# Comment preceding an INDENT token
indent, nl, comment = tok, ptok, pptok
assert nl.start_col == comment.end_col
underindent = indent.post_indent - comment.start_col
if underindent > 0:
_warn("Comment '%s' is under-indented. Fixing..." % comment.str)
comment.move(0, underindent)
nl.move(0, underindent)
indent.move(-1, 0)
tokens[i - 2:i + 1] = indent, comment, nl
comment.pre_indent = comment.post_indent = indent.post_indent
assert indent.end_row == comment.start_row and indent.end_col <= comment.start_col
elif ptok.op == NL and ptok.start_col == 0:
# Empty line before an INDENT
indent, nl = tok, ptok
indent.move(-1, 0)
tokens[i - 1:i + 1] = indent, nl
elif tok.op == DEDENT and ptok.op == NL:
if pptok.op == COMMENT:
# Comment preceding a DEDENT. Switch only if comment is not at the level of the previous block!
dedent, nl, comment = tok, ptok, pptok
if comment.start_col <= dedent.post_indent:
rel_indent = comment.start_col - dedent.start_col
if rel_indent < 0:
_warn("Comment '%s' has wrong indentation" % comment.str)
ptok.move(0, -rel_indent)
comment.move(0, -rel_indent)
dedent.move(-1)
tokens[i - 2:i + 1] = dedent, comment, nl
comment.pre_indent = comment.post_indent = dedent.post_indent
i += 1
continue
elif ptok.start_col == 0:
# Empty line before a DEDENT
dedent, nl = tok, ptok
dedent.move(-1, -dedent.start_col)
tokens[i - 1:i + 1] = dedent, nl
i += 1
continue
else:
assert False, "Unexpected sequence of tokens: %r %r %r" % (pptok, ptok, tok)
elif tok.op == COMMENT:
if tok.start_col < tok.pre_indent:
_warn("Comment '%s' is under-indented relative to the surrounding block" % tok.str)
i -= 1
return tokens
|
def _tokenize(readline):
"""
Parse any object accessible through a readline interface into a list of :class:`Token`s.
This function is very similar to :func:`tokenize.generate_tokens`, with few differences. First, the returned list
is a list of :class:`Token` objects instead of 5-tuples. This may be slightly less efficient, but far more
convenient for subsequent parsing. Second, the list of tokens is **normalized** to better match the way humans
understand the code, not what is more suitable for the compiler.
To better understand the normalization process, consider the following code example::
def test():
pass
# funny function
def test_funny():
# not so funny
pass
Normally, Python will parse it as the following sequence of tokens:
['def', 'test', '(', ')', ':', NEWLINE, INDENT, 'pass', NEWLINE, NL, '# funny function', NL, DEDENT,
'def', 'test_funny', '(', ')', ':', NEWLINE, '# not so funny', NL, INDENT, 'pass', NEWLINE, DEDENT, END]
The problem here is that the DEDENT token is generated not after the first 'pass' but after the comment, which
means that if we treat INDENTs / DEDENTs as block boundaries, then the comment "belongs" to the first function.
This is contrary to how most people would understand this code. Similarly, the second comment visually goes
after the INDENT, not before. Consequently, after "normalization" this function will return the following list
of tokens:
['def', 'test', '(', ')', ':', NEWLINE, INDENT, 'pass', NEWLINE, DEDENT, NL, '# funny function', NL,
'def', 'test_funny', '(', ')', ':', NEWLINE, INDENT, '# not so funny', NL, 'pass', NEWLINE, DEDENT, END]
:param readline: a function that allows access to the code being parsed in a line-by-line fashion.
:returns: a list of :class:`Token`s.
"""
assert callable(readline), "`readline` should be a function"
# Generate the initial list of tokens.
tokens = [Token(tok) for tok in tokenize.generate_tokens(readline)]
# Determine the levels of all indents / dedents.
indents_stack = [0] # Stack of all indent levels up to the current parsing point
for tok in tokens:
if tok.op == INDENT:
tok.pre_indent = indents_stack[-1]
indents_stack.append(tok.end_col)
tok.post_indent = tok.end_col
elif tok.op == DEDENT:
tok.pre_indent = indents_stack.pop()
tok.post_indent = indents_stack[-1]
elif tok.op == COMMENT:
tok.pre_indent = tok.post_indent = indents_stack[-1]
# Iterate through tokens backwards and see whether it's necessary to swap any of them.
i = len(tokens) - 1
while i >= 2:
pptok, ptok, tok = tokens[i - 2:i + 1]
if tok.op == INDENT:
if ptok.op == NL and pptok.op == COMMENT:
# Comment preceding an INDENT token
indent, nl, comment = tok, ptok, pptok
assert nl.start_col == comment.end_col
underindent = indent.post_indent - comment.start_col
if underindent > 0:
_warn("Comment '%s' is under-indented. Fixing..." % comment.str)
comment.move(0, underindent)
nl.move(0, underindent)
indent.move(-1, 0)
tokens[i - 2:i + 1] = indent, comment, nl
comment.pre_indent = comment.post_indent = indent.post_indent
assert indent.end_row == comment.start_row and indent.end_col <= comment.start_col
elif ptok.op == NL and ptok.start_col == 0:
# Empty line before an INDENT
indent, nl = tok, ptok
indent.move(-1, 0)
tokens[i - 1:i + 1] = indent, nl
elif tok.op == DEDENT and ptok.op == NL:
if pptok.op == COMMENT:
# Comment preceding a DEDENT. Switch only if comment is not at the level of the previous block!
dedent, nl, comment = tok, ptok, pptok
if comment.start_col <= dedent.post_indent:
rel_indent = comment.start_col - dedent.start_col
if rel_indent < 0:
_warn("Comment '%s' has wrong indentation" % comment.str)
ptok.move(0, -rel_indent)
comment.move(0, -rel_indent)
dedent.move(-1)
tokens[i - 2:i + 1] = dedent, comment, nl
comment.pre_indent = comment.post_indent = dedent.post_indent
i += 1
continue
elif ptok.start_col == 0:
# Empty line before a DEDENT
dedent, nl = tok, ptok
dedent.move(-1, -dedent.start_col)
tokens[i - 1:i + 1] = dedent, nl
i += 1
continue
else:
assert False, "Unexpected sequence of tokens: %r %r %r" % (pptok, ptok, tok)
elif tok.op == COMMENT:
if tok.start_col < tok.pre_indent:
_warn("Comment '%s' is under-indented relative to the surrounding block" % tok.str)
i -= 1
return tokens
|
[
"Parse",
"any",
"object",
"accessible",
"through",
"a",
"readline",
"interface",
"into",
"a",
"list",
"of",
":",
"class",
":",
"Token",
"s",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pyparser.py#L62-L169
|
[
"def",
"_tokenize",
"(",
"readline",
")",
":",
"assert",
"callable",
"(",
"readline",
")",
",",
"\"`readline` should be a function\"",
"# Generate the initial list of tokens.",
"tokens",
"=",
"[",
"Token",
"(",
"tok",
")",
"for",
"tok",
"in",
"tokenize",
".",
"generate_tokens",
"(",
"readline",
")",
"]",
"# Determine the levels of all indents / dedents.",
"indents_stack",
"=",
"[",
"0",
"]",
"# Stack of all indent levels up to the current parsing point",
"for",
"tok",
"in",
"tokens",
":",
"if",
"tok",
".",
"op",
"==",
"INDENT",
":",
"tok",
".",
"pre_indent",
"=",
"indents_stack",
"[",
"-",
"1",
"]",
"indents_stack",
".",
"append",
"(",
"tok",
".",
"end_col",
")",
"tok",
".",
"post_indent",
"=",
"tok",
".",
"end_col",
"elif",
"tok",
".",
"op",
"==",
"DEDENT",
":",
"tok",
".",
"pre_indent",
"=",
"indents_stack",
".",
"pop",
"(",
")",
"tok",
".",
"post_indent",
"=",
"indents_stack",
"[",
"-",
"1",
"]",
"elif",
"tok",
".",
"op",
"==",
"COMMENT",
":",
"tok",
".",
"pre_indent",
"=",
"tok",
".",
"post_indent",
"=",
"indents_stack",
"[",
"-",
"1",
"]",
"# Iterate through tokens backwards and see whether it's necessary to swap any of them.",
"i",
"=",
"len",
"(",
"tokens",
")",
"-",
"1",
"while",
"i",
">=",
"2",
":",
"pptok",
",",
"ptok",
",",
"tok",
"=",
"tokens",
"[",
"i",
"-",
"2",
":",
"i",
"+",
"1",
"]",
"if",
"tok",
".",
"op",
"==",
"INDENT",
":",
"if",
"ptok",
".",
"op",
"==",
"NL",
"and",
"pptok",
".",
"op",
"==",
"COMMENT",
":",
"# Comment preceding an INDENT token",
"indent",
",",
"nl",
",",
"comment",
"=",
"tok",
",",
"ptok",
",",
"pptok",
"assert",
"nl",
".",
"start_col",
"==",
"comment",
".",
"end_col",
"underindent",
"=",
"indent",
".",
"post_indent",
"-",
"comment",
".",
"start_col",
"if",
"underindent",
">",
"0",
":",
"_warn",
"(",
"\"Comment '%s' is under-indented. Fixing...\"",
"%",
"comment",
".",
"str",
")",
"comment",
".",
"move",
"(",
"0",
",",
"underindent",
")",
"nl",
".",
"move",
"(",
"0",
",",
"underindent",
")",
"indent",
".",
"move",
"(",
"-",
"1",
",",
"0",
")",
"tokens",
"[",
"i",
"-",
"2",
":",
"i",
"+",
"1",
"]",
"=",
"indent",
",",
"comment",
",",
"nl",
"comment",
".",
"pre_indent",
"=",
"comment",
".",
"post_indent",
"=",
"indent",
".",
"post_indent",
"assert",
"indent",
".",
"end_row",
"==",
"comment",
".",
"start_row",
"and",
"indent",
".",
"end_col",
"<=",
"comment",
".",
"start_col",
"elif",
"ptok",
".",
"op",
"==",
"NL",
"and",
"ptok",
".",
"start_col",
"==",
"0",
":",
"# Empty line before an INDENT",
"indent",
",",
"nl",
"=",
"tok",
",",
"ptok",
"indent",
".",
"move",
"(",
"-",
"1",
",",
"0",
")",
"tokens",
"[",
"i",
"-",
"1",
":",
"i",
"+",
"1",
"]",
"=",
"indent",
",",
"nl",
"elif",
"tok",
".",
"op",
"==",
"DEDENT",
"and",
"ptok",
".",
"op",
"==",
"NL",
":",
"if",
"pptok",
".",
"op",
"==",
"COMMENT",
":",
"# Comment preceding a DEDENT. Switch only if comment is not at the level of the previous block!",
"dedent",
",",
"nl",
",",
"comment",
"=",
"tok",
",",
"ptok",
",",
"pptok",
"if",
"comment",
".",
"start_col",
"<=",
"dedent",
".",
"post_indent",
":",
"rel_indent",
"=",
"comment",
".",
"start_col",
"-",
"dedent",
".",
"start_col",
"if",
"rel_indent",
"<",
"0",
":",
"_warn",
"(",
"\"Comment '%s' has wrong indentation\"",
"%",
"comment",
".",
"str",
")",
"ptok",
".",
"move",
"(",
"0",
",",
"-",
"rel_indent",
")",
"comment",
".",
"move",
"(",
"0",
",",
"-",
"rel_indent",
")",
"dedent",
".",
"move",
"(",
"-",
"1",
")",
"tokens",
"[",
"i",
"-",
"2",
":",
"i",
"+",
"1",
"]",
"=",
"dedent",
",",
"comment",
",",
"nl",
"comment",
".",
"pre_indent",
"=",
"comment",
".",
"post_indent",
"=",
"dedent",
".",
"post_indent",
"i",
"+=",
"1",
"continue",
"elif",
"ptok",
".",
"start_col",
"==",
"0",
":",
"# Empty line before a DEDENT",
"dedent",
",",
"nl",
"=",
"tok",
",",
"ptok",
"dedent",
".",
"move",
"(",
"-",
"1",
",",
"-",
"dedent",
".",
"start_col",
")",
"tokens",
"[",
"i",
"-",
"1",
":",
"i",
"+",
"1",
"]",
"=",
"dedent",
",",
"nl",
"i",
"+=",
"1",
"continue",
"else",
":",
"assert",
"False",
",",
"\"Unexpected sequence of tokens: %r %r %r\"",
"%",
"(",
"pptok",
",",
"ptok",
",",
"tok",
")",
"elif",
"tok",
".",
"op",
"==",
"COMMENT",
":",
"if",
"tok",
".",
"start_col",
"<",
"tok",
".",
"pre_indent",
":",
"_warn",
"(",
"\"Comment '%s' is under-indented relative to the surrounding block\"",
"%",
"tok",
".",
"str",
")",
"i",
"-=",
"1",
"return",
"tokens"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
Token.move
|
Move the token by `drow` rows and `dcol` columns.
|
h2o-bindings/bin/pyparser.py
|
def move(self, drow, dcol=0):
"""Move the token by `drow` rows and `dcol` columns."""
self._start_row += drow
self._start_col += dcol
self._end_row += drow
self._end_col += dcol
|
def move(self, drow, dcol=0):
"""Move the token by `drow` rows and `dcol` columns."""
self._start_row += drow
self._start_col += dcol
self._end_row += drow
self._end_col += dcol
|
[
"Move",
"the",
"token",
"by",
"drow",
"rows",
"and",
"dcol",
"columns",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pyparser.py#L246-L251
|
[
"def",
"move",
"(",
"self",
",",
"drow",
",",
"dcol",
"=",
"0",
")",
":",
"self",
".",
"_start_row",
"+=",
"drow",
"self",
".",
"_start_col",
"+=",
"dcol",
"self",
".",
"_end_row",
"+=",
"drow",
"self",
".",
"_end_col",
"+=",
"dcol"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
ParsedBase.unparse
|
Convert the parsed representation back into the source code.
|
h2o-bindings/bin/pyparser.py
|
def unparse(self):
"""Convert the parsed representation back into the source code."""
ut = Untokenizer(start_row=self._tokens[0].start_row)
self._unparse(ut)
return ut.result()
|
def unparse(self):
"""Convert the parsed representation back into the source code."""
ut = Untokenizer(start_row=self._tokens[0].start_row)
self._unparse(ut)
return ut.result()
|
[
"Convert",
"the",
"parsed",
"representation",
"back",
"into",
"the",
"source",
"code",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pyparser.py#L365-L369
|
[
"def",
"unparse",
"(",
"self",
")",
":",
"ut",
"=",
"Untokenizer",
"(",
"start_row",
"=",
"self",
".",
"_tokens",
"[",
"0",
"]",
".",
"start_row",
")",
"self",
".",
"_unparse",
"(",
"ut",
")",
"return",
"ut",
".",
"result",
"(",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
Code._parse1
|
First stage of parsing the code (stored as a raw stream of tokens).
This method will do the initial pass of the ``self._tokens`` list of tokens, and mark different section as
belonging to one of the categories: comment, whitespace, docstring, import, code, decorator, def, class, end.
These sections will be returned as a list of tuples ``(fragment_type, start, end)``, where ``start`` and ``end``
are indices in the list of raw tokens.
|
h2o-bindings/bin/pyparser.py
|
def _parse1(self):
"""
First stage of parsing the code (stored as a raw stream of tokens).
This method will do the initial pass of the ``self._tokens`` list of tokens, and mark different section as
belonging to one of the categories: comment, whitespace, docstring, import, code, decorator, def, class, end.
These sections will be returned as a list of tuples ``(fragment_type, start, end)``, where ``start`` and ``end``
are indices in the list of raw tokens.
"""
fragments = []
tokens = self._tokens
def advance_after_newline(i0):
"""Return the index of the first token after the end of the current (logical) line."""
for i in range(i0, len(tokens)):
if tokens[i].op == NEWLINE:
break
return i + 1
i = 0
while i < len(tokens):
# Assume that we always start at the beginning of a new block
i0 = i
tok = tokens[i]
fragment_type = "???" # to be determined in the switch clause below
if tok.op == ENDMARKER:
fragment_type = "end"
i += 1
assert i == len(tokens), "ENDMARKER token encountered before the end of the stream"
elif tok.op == NL:
fragment_type = "whitespace"
# If there are multiple whitespaces, gobble them all
while tokens[i].op == NL:
i += 1
elif tok.op == COMMENT:
fragment_type = "comment"
# Collapse multiple comment lines into a single comment fragment; but only if they are at the same
# level of indentation.
is_banner = False
while i < len(tokens) and tokens[i].op == COMMENT and tokens[i].start_col == tok.start_col:
assert tokens[i + 1].op == NL, "Unexpected token after a comment: %r" % tokens[i + 1]
s = tokens[i].str
if re.match(r"^#\s?[#*=-]{10,}$", s) or re.match(r"^#\s?[#*=-]{4,}.*?[#*=-]{4,}$", s):
is_banner = True
i += 2
if is_banner:
fragment_type = "banner-comment"
elif (tok.op == STRING and tokens[i + 1].op == NEWLINE and
all(frag[0] == "whitespace" or frag[0] == "comment" for frag in fragments)):
i += 2
fragment_type = "docstring"
elif tok.op == OP and tok.str == "@" and tokens[i + 1].op == NAME:
while tokens[i].op == OP and tokens[i].str == "@" and tokens[i + 1].op == NAME:
i = advance_after_newline(i)
fragment_type = "decorator"
elif tok.op == NAME and tok.str in {"from", "import"}:
while tokens[i].op == NAME and tokens[i].str in {"from", "import"}:
i = advance_after_newline(i)
fragment_type = "import"
elif tok.op in {INDENT, DEDENT, NEWLINE}:
assert False, "Unexpected token %d: %r" % (i, tok)
else:
i = advance_after_newline(i)
if i < len(tokens) and tokens[i].op == INDENT:
level = 1
while level > 0:
i += 1
level += tokens[i].indent()
assert tokens[i].op == DEDENT
i += 1 # consume the last DEDENT
while i < len(tokens) and tokens[i].op == COMMENT and tokens[i].start_col > tok.start_col:
assert tokens[i + 1].op == NL
i += 2
if tok.op == NAME and tok.str in {"def", "class"}:
fragment_type = tok.str
else:
fragment_type = "code"
assert i > i0, "Stuck at i = %d" % i
fragments.append((fragment_type, i0, i))
return fragments
|
def _parse1(self):
"""
First stage of parsing the code (stored as a raw stream of tokens).
This method will do the initial pass of the ``self._tokens`` list of tokens, and mark different section as
belonging to one of the categories: comment, whitespace, docstring, import, code, decorator, def, class, end.
These sections will be returned as a list of tuples ``(fragment_type, start, end)``, where ``start`` and ``end``
are indices in the list of raw tokens.
"""
fragments = []
tokens = self._tokens
def advance_after_newline(i0):
"""Return the index of the first token after the end of the current (logical) line."""
for i in range(i0, len(tokens)):
if tokens[i].op == NEWLINE:
break
return i + 1
i = 0
while i < len(tokens):
# Assume that we always start at the beginning of a new block
i0 = i
tok = tokens[i]
fragment_type = "???" # to be determined in the switch clause below
if tok.op == ENDMARKER:
fragment_type = "end"
i += 1
assert i == len(tokens), "ENDMARKER token encountered before the end of the stream"
elif tok.op == NL:
fragment_type = "whitespace"
# If there are multiple whitespaces, gobble them all
while tokens[i].op == NL:
i += 1
elif tok.op == COMMENT:
fragment_type = "comment"
# Collapse multiple comment lines into a single comment fragment; but only if they are at the same
# level of indentation.
is_banner = False
while i < len(tokens) and tokens[i].op == COMMENT and tokens[i].start_col == tok.start_col:
assert tokens[i + 1].op == NL, "Unexpected token after a comment: %r" % tokens[i + 1]
s = tokens[i].str
if re.match(r"^#\s?[#*=-]{10,}$", s) or re.match(r"^#\s?[#*=-]{4,}.*?[#*=-]{4,}$", s):
is_banner = True
i += 2
if is_banner:
fragment_type = "banner-comment"
elif (tok.op == STRING and tokens[i + 1].op == NEWLINE and
all(frag[0] == "whitespace" or frag[0] == "comment" for frag in fragments)):
i += 2
fragment_type = "docstring"
elif tok.op == OP and tok.str == "@" and tokens[i + 1].op == NAME:
while tokens[i].op == OP and tokens[i].str == "@" and tokens[i + 1].op == NAME:
i = advance_after_newline(i)
fragment_type = "decorator"
elif tok.op == NAME and tok.str in {"from", "import"}:
while tokens[i].op == NAME and tokens[i].str in {"from", "import"}:
i = advance_after_newline(i)
fragment_type = "import"
elif tok.op in {INDENT, DEDENT, NEWLINE}:
assert False, "Unexpected token %d: %r" % (i, tok)
else:
i = advance_after_newline(i)
if i < len(tokens) and tokens[i].op == INDENT:
level = 1
while level > 0:
i += 1
level += tokens[i].indent()
assert tokens[i].op == DEDENT
i += 1 # consume the last DEDENT
while i < len(tokens) and tokens[i].op == COMMENT and tokens[i].start_col > tok.start_col:
assert tokens[i + 1].op == NL
i += 2
if tok.op == NAME and tok.str in {"def", "class"}:
fragment_type = tok.str
else:
fragment_type = "code"
assert i > i0, "Stuck at i = %d" % i
fragments.append((fragment_type, i0, i))
return fragments
|
[
"First",
"stage",
"of",
"parsing",
"the",
"code",
"(",
"stored",
"as",
"a",
"raw",
"stream",
"of",
"tokens",
")",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pyparser.py#L436-L523
|
[
"def",
"_parse1",
"(",
"self",
")",
":",
"fragments",
"=",
"[",
"]",
"tokens",
"=",
"self",
".",
"_tokens",
"def",
"advance_after_newline",
"(",
"i0",
")",
":",
"\"\"\"Return the index of the first token after the end of the current (logical) line.\"\"\"",
"for",
"i",
"in",
"range",
"(",
"i0",
",",
"len",
"(",
"tokens",
")",
")",
":",
"if",
"tokens",
"[",
"i",
"]",
".",
"op",
"==",
"NEWLINE",
":",
"break",
"return",
"i",
"+",
"1",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"tokens",
")",
":",
"# Assume that we always start at the beginning of a new block",
"i0",
"=",
"i",
"tok",
"=",
"tokens",
"[",
"i",
"]",
"fragment_type",
"=",
"\"???\"",
"# to be determined in the switch clause below",
"if",
"tok",
".",
"op",
"==",
"ENDMARKER",
":",
"fragment_type",
"=",
"\"end\"",
"i",
"+=",
"1",
"assert",
"i",
"==",
"len",
"(",
"tokens",
")",
",",
"\"ENDMARKER token encountered before the end of the stream\"",
"elif",
"tok",
".",
"op",
"==",
"NL",
":",
"fragment_type",
"=",
"\"whitespace\"",
"# If there are multiple whitespaces, gobble them all",
"while",
"tokens",
"[",
"i",
"]",
".",
"op",
"==",
"NL",
":",
"i",
"+=",
"1",
"elif",
"tok",
".",
"op",
"==",
"COMMENT",
":",
"fragment_type",
"=",
"\"comment\"",
"# Collapse multiple comment lines into a single comment fragment; but only if they are at the same",
"# level of indentation.",
"is_banner",
"=",
"False",
"while",
"i",
"<",
"len",
"(",
"tokens",
")",
"and",
"tokens",
"[",
"i",
"]",
".",
"op",
"==",
"COMMENT",
"and",
"tokens",
"[",
"i",
"]",
".",
"start_col",
"==",
"tok",
".",
"start_col",
":",
"assert",
"tokens",
"[",
"i",
"+",
"1",
"]",
".",
"op",
"==",
"NL",
",",
"\"Unexpected token after a comment: %r\"",
"%",
"tokens",
"[",
"i",
"+",
"1",
"]",
"s",
"=",
"tokens",
"[",
"i",
"]",
".",
"str",
"if",
"re",
".",
"match",
"(",
"r\"^#\\s?[#*=-]{10,}$\"",
",",
"s",
")",
"or",
"re",
".",
"match",
"(",
"r\"^#\\s?[#*=-]{4,}.*?[#*=-]{4,}$\"",
",",
"s",
")",
":",
"is_banner",
"=",
"True",
"i",
"+=",
"2",
"if",
"is_banner",
":",
"fragment_type",
"=",
"\"banner-comment\"",
"elif",
"(",
"tok",
".",
"op",
"==",
"STRING",
"and",
"tokens",
"[",
"i",
"+",
"1",
"]",
".",
"op",
"==",
"NEWLINE",
"and",
"all",
"(",
"frag",
"[",
"0",
"]",
"==",
"\"whitespace\"",
"or",
"frag",
"[",
"0",
"]",
"==",
"\"comment\"",
"for",
"frag",
"in",
"fragments",
")",
")",
":",
"i",
"+=",
"2",
"fragment_type",
"=",
"\"docstring\"",
"elif",
"tok",
".",
"op",
"==",
"OP",
"and",
"tok",
".",
"str",
"==",
"\"@\"",
"and",
"tokens",
"[",
"i",
"+",
"1",
"]",
".",
"op",
"==",
"NAME",
":",
"while",
"tokens",
"[",
"i",
"]",
".",
"op",
"==",
"OP",
"and",
"tokens",
"[",
"i",
"]",
".",
"str",
"==",
"\"@\"",
"and",
"tokens",
"[",
"i",
"+",
"1",
"]",
".",
"op",
"==",
"NAME",
":",
"i",
"=",
"advance_after_newline",
"(",
"i",
")",
"fragment_type",
"=",
"\"decorator\"",
"elif",
"tok",
".",
"op",
"==",
"NAME",
"and",
"tok",
".",
"str",
"in",
"{",
"\"from\"",
",",
"\"import\"",
"}",
":",
"while",
"tokens",
"[",
"i",
"]",
".",
"op",
"==",
"NAME",
"and",
"tokens",
"[",
"i",
"]",
".",
"str",
"in",
"{",
"\"from\"",
",",
"\"import\"",
"}",
":",
"i",
"=",
"advance_after_newline",
"(",
"i",
")",
"fragment_type",
"=",
"\"import\"",
"elif",
"tok",
".",
"op",
"in",
"{",
"INDENT",
",",
"DEDENT",
",",
"NEWLINE",
"}",
":",
"assert",
"False",
",",
"\"Unexpected token %d: %r\"",
"%",
"(",
"i",
",",
"tok",
")",
"else",
":",
"i",
"=",
"advance_after_newline",
"(",
"i",
")",
"if",
"i",
"<",
"len",
"(",
"tokens",
")",
"and",
"tokens",
"[",
"i",
"]",
".",
"op",
"==",
"INDENT",
":",
"level",
"=",
"1",
"while",
"level",
">",
"0",
":",
"i",
"+=",
"1",
"level",
"+=",
"tokens",
"[",
"i",
"]",
".",
"indent",
"(",
")",
"assert",
"tokens",
"[",
"i",
"]",
".",
"op",
"==",
"DEDENT",
"i",
"+=",
"1",
"# consume the last DEDENT",
"while",
"i",
"<",
"len",
"(",
"tokens",
")",
"and",
"tokens",
"[",
"i",
"]",
".",
"op",
"==",
"COMMENT",
"and",
"tokens",
"[",
"i",
"]",
".",
"start_col",
">",
"tok",
".",
"start_col",
":",
"assert",
"tokens",
"[",
"i",
"+",
"1",
"]",
".",
"op",
"==",
"NL",
"i",
"+=",
"2",
"if",
"tok",
".",
"op",
"==",
"NAME",
"and",
"tok",
".",
"str",
"in",
"{",
"\"def\"",
",",
"\"class\"",
"}",
":",
"fragment_type",
"=",
"tok",
".",
"str",
"else",
":",
"fragment_type",
"=",
"\"code\"",
"assert",
"i",
">",
"i0",
",",
"\"Stuck at i = %d\"",
"%",
"i",
"fragments",
".",
"append",
"(",
"(",
"fragment_type",
",",
"i0",
",",
"i",
")",
")",
"return",
"fragments"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
Code._parse2
|
Second stage of parsing: convert ``fragments`` into the list of code objects.
This method in fact does more than simple conversion of fragments into objects. It also attempts to group
certain fragments into one, if they in fact seem like a single piece. For example, decorators are grouped
together with the objects they decorate, comments that explain certain objects or statements are attached to
those as well.
|
h2o-bindings/bin/pyparser.py
|
def _parse2(self, fragments):
"""
Second stage of parsing: convert ``fragments`` into the list of code objects.
This method in fact does more than simple conversion of fragments into objects. It also attempts to group
certain fragments into one, if they in fact seem like a single piece. For example, decorators are grouped
together with the objects they decorate, comments that explain certain objects or statements are attached to
those as well.
"""
out = []
tokens = self._tokens
i = 0
saved_start = None
while i < len(fragments):
ftype, start, end = fragments[i]
assert start == (0 if i == 0 else fragments[i - 1][2]), "Discontinuity in `fragments` at i = %d" % i
if ftype == "whitespace" or ftype == "end":
assert saved_start is None
obj = Whitespace(tokens[start:end])
elif ftype == "docstring":
assert saved_start is None
obj = Docstring(tokens[start:end])
elif ftype == "comment":
assert saved_start is None
next_frag = fragments[i + 1][0] if i + 1 < len(fragments) else "end"
if next_frag in {"docstring", "end", "whitespace", "comment", "banner-comment"}:
# Possibly merge with the previous Comment instance
# if (len(out) >= 2 and isinstance(out[-1], Whitespace) and isinstance(out[-2], Comment) and
# out[-2].type != "banner"):
# obj = Comment(out[-2].tokens + out[-1].tokens + tokens[start:end])
# del out[-2:]
# else:
obj = Comment(tokens[start:end])
elif next_frag in {"decorator", "import", "def", "class", "code"}:
# save this comment for later
saved_start = start
i += 1
continue
else:
raise RuntimeError("Unknown token type %s" % next_frag)
elif ftype == "banner-comment":
assert saved_start is None
obj = Comment(tokens[start:end])
obj.type = "banner"
elif ftype == "decorator":
if saved_start is None:
saved_start = start
i += 1
continue
elif ftype == "import":
real_start = start if saved_start is None else saved_start
saved_start = None
obj = ImportBlock(tokens[real_start:end])
elif ftype in {"class", "def"}:
real_start = start if saved_start is None else saved_start
saved_start = None
obj = Callable(tokens[real_start:end])
obj.type = ftype
elif ftype == "code":
real_start = start if saved_start is None else saved_start
saved_start = None
obj = Expression(tokens[real_start:end])
else:
assert False, "Unknown fragment type %s" % ftype
out.append(obj)
i += 1
return out
|
def _parse2(self, fragments):
"""
Second stage of parsing: convert ``fragments`` into the list of code objects.
This method in fact does more than simple conversion of fragments into objects. It also attempts to group
certain fragments into one, if they in fact seem like a single piece. For example, decorators are grouped
together with the objects they decorate, comments that explain certain objects or statements are attached to
those as well.
"""
out = []
tokens = self._tokens
i = 0
saved_start = None
while i < len(fragments):
ftype, start, end = fragments[i]
assert start == (0 if i == 0 else fragments[i - 1][2]), "Discontinuity in `fragments` at i = %d" % i
if ftype == "whitespace" or ftype == "end":
assert saved_start is None
obj = Whitespace(tokens[start:end])
elif ftype == "docstring":
assert saved_start is None
obj = Docstring(tokens[start:end])
elif ftype == "comment":
assert saved_start is None
next_frag = fragments[i + 1][0] if i + 1 < len(fragments) else "end"
if next_frag in {"docstring", "end", "whitespace", "comment", "banner-comment"}:
# Possibly merge with the previous Comment instance
# if (len(out) >= 2 and isinstance(out[-1], Whitespace) and isinstance(out[-2], Comment) and
# out[-2].type != "banner"):
# obj = Comment(out[-2].tokens + out[-1].tokens + tokens[start:end])
# del out[-2:]
# else:
obj = Comment(tokens[start:end])
elif next_frag in {"decorator", "import", "def", "class", "code"}:
# save this comment for later
saved_start = start
i += 1
continue
else:
raise RuntimeError("Unknown token type %s" % next_frag)
elif ftype == "banner-comment":
assert saved_start is None
obj = Comment(tokens[start:end])
obj.type = "banner"
elif ftype == "decorator":
if saved_start is None:
saved_start = start
i += 1
continue
elif ftype == "import":
real_start = start if saved_start is None else saved_start
saved_start = None
obj = ImportBlock(tokens[real_start:end])
elif ftype in {"class", "def"}:
real_start = start if saved_start is None else saved_start
saved_start = None
obj = Callable(tokens[real_start:end])
obj.type = ftype
elif ftype == "code":
real_start = start if saved_start is None else saved_start
saved_start = None
obj = Expression(tokens[real_start:end])
else:
assert False, "Unknown fragment type %s" % ftype
out.append(obj)
i += 1
return out
|
[
"Second",
"stage",
"of",
"parsing",
":",
"convert",
"fragments",
"into",
"the",
"list",
"of",
"code",
"objects",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pyparser.py#L526-L592
|
[
"def",
"_parse2",
"(",
"self",
",",
"fragments",
")",
":",
"out",
"=",
"[",
"]",
"tokens",
"=",
"self",
".",
"_tokens",
"i",
"=",
"0",
"saved_start",
"=",
"None",
"while",
"i",
"<",
"len",
"(",
"fragments",
")",
":",
"ftype",
",",
"start",
",",
"end",
"=",
"fragments",
"[",
"i",
"]",
"assert",
"start",
"==",
"(",
"0",
"if",
"i",
"==",
"0",
"else",
"fragments",
"[",
"i",
"-",
"1",
"]",
"[",
"2",
"]",
")",
",",
"\"Discontinuity in `fragments` at i = %d\"",
"%",
"i",
"if",
"ftype",
"==",
"\"whitespace\"",
"or",
"ftype",
"==",
"\"end\"",
":",
"assert",
"saved_start",
"is",
"None",
"obj",
"=",
"Whitespace",
"(",
"tokens",
"[",
"start",
":",
"end",
"]",
")",
"elif",
"ftype",
"==",
"\"docstring\"",
":",
"assert",
"saved_start",
"is",
"None",
"obj",
"=",
"Docstring",
"(",
"tokens",
"[",
"start",
":",
"end",
"]",
")",
"elif",
"ftype",
"==",
"\"comment\"",
":",
"assert",
"saved_start",
"is",
"None",
"next_frag",
"=",
"fragments",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
"if",
"i",
"+",
"1",
"<",
"len",
"(",
"fragments",
")",
"else",
"\"end\"",
"if",
"next_frag",
"in",
"{",
"\"docstring\"",
",",
"\"end\"",
",",
"\"whitespace\"",
",",
"\"comment\"",
",",
"\"banner-comment\"",
"}",
":",
"# Possibly merge with the previous Comment instance",
"# if (len(out) >= 2 and isinstance(out[-1], Whitespace) and isinstance(out[-2], Comment) and",
"# out[-2].type != \"banner\"):",
"# obj = Comment(out[-2].tokens + out[-1].tokens + tokens[start:end])",
"# del out[-2:]",
"# else:",
"obj",
"=",
"Comment",
"(",
"tokens",
"[",
"start",
":",
"end",
"]",
")",
"elif",
"next_frag",
"in",
"{",
"\"decorator\"",
",",
"\"import\"",
",",
"\"def\"",
",",
"\"class\"",
",",
"\"code\"",
"}",
":",
"# save this comment for later",
"saved_start",
"=",
"start",
"i",
"+=",
"1",
"continue",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown token type %s\"",
"%",
"next_frag",
")",
"elif",
"ftype",
"==",
"\"banner-comment\"",
":",
"assert",
"saved_start",
"is",
"None",
"obj",
"=",
"Comment",
"(",
"tokens",
"[",
"start",
":",
"end",
"]",
")",
"obj",
".",
"type",
"=",
"\"banner\"",
"elif",
"ftype",
"==",
"\"decorator\"",
":",
"if",
"saved_start",
"is",
"None",
":",
"saved_start",
"=",
"start",
"i",
"+=",
"1",
"continue",
"elif",
"ftype",
"==",
"\"import\"",
":",
"real_start",
"=",
"start",
"if",
"saved_start",
"is",
"None",
"else",
"saved_start",
"saved_start",
"=",
"None",
"obj",
"=",
"ImportBlock",
"(",
"tokens",
"[",
"real_start",
":",
"end",
"]",
")",
"elif",
"ftype",
"in",
"{",
"\"class\"",
",",
"\"def\"",
"}",
":",
"real_start",
"=",
"start",
"if",
"saved_start",
"is",
"None",
"else",
"saved_start",
"saved_start",
"=",
"None",
"obj",
"=",
"Callable",
"(",
"tokens",
"[",
"real_start",
":",
"end",
"]",
")",
"obj",
".",
"type",
"=",
"ftype",
"elif",
"ftype",
"==",
"\"code\"",
":",
"real_start",
"=",
"start",
"if",
"saved_start",
"is",
"None",
"else",
"saved_start",
"saved_start",
"=",
"None",
"obj",
"=",
"Expression",
"(",
"tokens",
"[",
"real_start",
":",
"end",
"]",
")",
"else",
":",
"assert",
"False",
",",
"\"Unknown fragment type %s\"",
"%",
"ftype",
"out",
".",
"append",
"(",
"obj",
")",
"i",
"+=",
"1",
"return",
"out"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OClusteringModel.size
|
Get the sizes of each cluster.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where
the keys are "train", "valid", and "xval".
:param bool train: If True, return the cluster sizes for the training data.
:param bool valid: If True, return the cluster sizes for the validation data.
:param bool xval: If True, return the cluster sizes for each of the cross-validated splits.
:returns: The cluster sizes for the specified key(s).
|
h2o-py/h2o/model/clustering.py
|
def size(self, train=False, valid=False, xval=False):
"""
Get the sizes of each cluster.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where
the keys are "train", "valid", and "xval".
:param bool train: If True, return the cluster sizes for the training data.
:param bool valid: If True, return the cluster sizes for the validation data.
:param bool xval: If True, return the cluster sizes for each of the cross-validated splits.
:returns: The cluster sizes for the specified key(s).
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in tm.items():
m[k] = None if v is None else [v[2] for v in v._metric_json["centroid_stats"].cell_values]
return list(m.values())[0] if len(m) == 1 else m
|
def size(self, train=False, valid=False, xval=False):
"""
Get the sizes of each cluster.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where
the keys are "train", "valid", and "xval".
:param bool train: If True, return the cluster sizes for the training data.
:param bool valid: If True, return the cluster sizes for the validation data.
:param bool xval: If True, return the cluster sizes for each of the cross-validated splits.
:returns: The cluster sizes for the specified key(s).
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in tm.items():
m[k] = None if v is None else [v[2] for v in v._metric_json["centroid_stats"].cell_values]
return list(m.values())[0] if len(m) == 1 else m
|
[
"Get",
"the",
"sizes",
"of",
"each",
"cluster",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/clustering.py#L9-L27
|
[
"def",
"size",
"(",
"self",
",",
"train",
"=",
"False",
",",
"valid",
"=",
"False",
",",
"xval",
"=",
"False",
")",
":",
"tm",
"=",
"ModelBase",
".",
"_get_metrics",
"(",
"self",
",",
"train",
",",
"valid",
",",
"xval",
")",
"m",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"tm",
".",
"items",
"(",
")",
":",
"m",
"[",
"k",
"]",
"=",
"None",
"if",
"v",
"is",
"None",
"else",
"[",
"v",
"[",
"2",
"]",
"for",
"v",
"in",
"v",
".",
"_metric_json",
"[",
"\"centroid_stats\"",
"]",
".",
"cell_values",
"]",
"return",
"list",
"(",
"m",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"m",
")",
"==",
"1",
"else",
"m"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OClusteringModel.centers
|
The centers for the KMeans model.
|
h2o-py/h2o/model/clustering.py
|
def centers(self):
"""The centers for the KMeans model."""
o = self._model_json["output"]
cvals = o["centers"].cell_values
centers = [list(cval[1:]) for cval in cvals]
return centers
|
def centers(self):
"""The centers for the KMeans model."""
o = self._model_json["output"]
cvals = o["centers"].cell_values
centers = [list(cval[1:]) for cval in cvals]
return centers
|
[
"The",
"centers",
"for",
"the",
"KMeans",
"model",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/clustering.py#L143-L148
|
[
"def",
"centers",
"(",
"self",
")",
":",
"o",
"=",
"self",
".",
"_model_json",
"[",
"\"output\"",
"]",
"cvals",
"=",
"o",
"[",
"\"centers\"",
"]",
".",
"cell_values",
"centers",
"=",
"[",
"list",
"(",
"cval",
"[",
"1",
":",
"]",
")",
"for",
"cval",
"in",
"cvals",
"]",
"return",
"centers"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OClusteringModel.centers_std
|
The standardized centers for the kmeans model.
|
h2o-py/h2o/model/clustering.py
|
def centers_std(self):
"""The standardized centers for the kmeans model."""
o = self._model_json["output"]
cvals = o["centers_std"].cell_values
centers_std = [list(cval[1:]) for cval in cvals]
centers_std = [list(x) for x in zip(*centers_std)]
return centers_std
|
def centers_std(self):
"""The standardized centers for the kmeans model."""
o = self._model_json["output"]
cvals = o["centers_std"].cell_values
centers_std = [list(cval[1:]) for cval in cvals]
centers_std = [list(x) for x in zip(*centers_std)]
return centers_std
|
[
"The",
"standardized",
"centers",
"for",
"the",
"kmeans",
"model",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/clustering.py#L151-L157
|
[
"def",
"centers_std",
"(",
"self",
")",
":",
"o",
"=",
"self",
".",
"_model_json",
"[",
"\"output\"",
"]",
"cvals",
"=",
"o",
"[",
"\"centers_std\"",
"]",
".",
"cell_values",
"centers_std",
"=",
"[",
"list",
"(",
"cval",
"[",
"1",
":",
"]",
")",
"for",
"cval",
"in",
"cvals",
"]",
"centers_std",
"=",
"[",
"list",
"(",
"x",
")",
"for",
"x",
"in",
"zip",
"(",
"*",
"centers_std",
")",
"]",
"return",
"centers_std"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
connect
|
Connect to an existing H2O server, remote or local.
There are two ways to connect to a server: either pass a `server` parameter containing an instance of
an H2OLocalServer, or specify `ip` and `port` of the server that you want to connect to.
:param server: An H2OLocalServer instance to connect to (optional).
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.
:param auth: Either a (username, password) pair for basic authentication, an instance of h2o.auth.SpnegoAuth
or one of the requests.auth authenticator objects.
:param proxy: Proxy server address.
:param cookies: Cookie (or list of) to add to request
:param verbose: Set to False to disable printing connection status messages.
:param connection_conf: Connection configuration object encapsulating connection parameters.
:returns: the new :class:`H2OConnection` object.
|
h2o-py/h2o/h2o.py
|
def connect(server=None, url=None, ip=None, port=None, https=None, verify_ssl_certificates=None, auth=None,
proxy=None, cookies=None, verbose=True, config=None):
"""
Connect to an existing H2O server, remote or local.
There are two ways to connect to a server: either pass a `server` parameter containing an instance of
an H2OLocalServer, or specify `ip` and `port` of the server that you want to connect to.
:param server: An H2OLocalServer instance to connect to (optional).
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.
:param auth: Either a (username, password) pair for basic authentication, an instance of h2o.auth.SpnegoAuth
or one of the requests.auth authenticator objects.
:param proxy: Proxy server address.
:param cookies: Cookie (or list of) to add to request
:param verbose: Set to False to disable printing connection status messages.
:param connection_conf: Connection configuration object encapsulating connection parameters.
:returns: the new :class:`H2OConnection` object.
"""
global h2oconn
if config:
if "connect_params" in config:
h2oconn = _connect_with_conf(config["connect_params"])
else:
h2oconn = _connect_with_conf(config)
else:
h2oconn = H2OConnection.open(server=server, url=url, ip=ip, port=port, https=https,
auth=auth, verify_ssl_certificates=verify_ssl_certificates,
proxy=proxy, cookies=cookies,
verbose=verbose)
if verbose:
h2oconn.cluster.show_status()
return h2oconn
|
def connect(server=None, url=None, ip=None, port=None, https=None, verify_ssl_certificates=None, auth=None,
proxy=None, cookies=None, verbose=True, config=None):
"""
Connect to an existing H2O server, remote or local.
There are two ways to connect to a server: either pass a `server` parameter containing an instance of
an H2OLocalServer, or specify `ip` and `port` of the server that you want to connect to.
:param server: An H2OLocalServer instance to connect to (optional).
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.
:param auth: Either a (username, password) pair for basic authentication, an instance of h2o.auth.SpnegoAuth
or one of the requests.auth authenticator objects.
:param proxy: Proxy server address.
:param cookies: Cookie (or list of) to add to request
:param verbose: Set to False to disable printing connection status messages.
:param connection_conf: Connection configuration object encapsulating connection parameters.
:returns: the new :class:`H2OConnection` object.
"""
global h2oconn
if config:
if "connect_params" in config:
h2oconn = _connect_with_conf(config["connect_params"])
else:
h2oconn = _connect_with_conf(config)
else:
h2oconn = H2OConnection.open(server=server, url=url, ip=ip, port=port, https=https,
auth=auth, verify_ssl_certificates=verify_ssl_certificates,
proxy=proxy, cookies=cookies,
verbose=verbose)
if verbose:
h2oconn.cluster.show_status()
return h2oconn
|
[
"Connect",
"to",
"an",
"existing",
"H2O",
"server",
"remote",
"or",
"local",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L59-L94
|
[
"def",
"connect",
"(",
"server",
"=",
"None",
",",
"url",
"=",
"None",
",",
"ip",
"=",
"None",
",",
"port",
"=",
"None",
",",
"https",
"=",
"None",
",",
"verify_ssl_certificates",
"=",
"None",
",",
"auth",
"=",
"None",
",",
"proxy",
"=",
"None",
",",
"cookies",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"config",
"=",
"None",
")",
":",
"global",
"h2oconn",
"if",
"config",
":",
"if",
"\"connect_params\"",
"in",
"config",
":",
"h2oconn",
"=",
"_connect_with_conf",
"(",
"config",
"[",
"\"connect_params\"",
"]",
")",
"else",
":",
"h2oconn",
"=",
"_connect_with_conf",
"(",
"config",
")",
"else",
":",
"h2oconn",
"=",
"H2OConnection",
".",
"open",
"(",
"server",
"=",
"server",
",",
"url",
"=",
"url",
",",
"ip",
"=",
"ip",
",",
"port",
"=",
"port",
",",
"https",
"=",
"https",
",",
"auth",
"=",
"auth",
",",
"verify_ssl_certificates",
"=",
"verify_ssl_certificates",
",",
"proxy",
"=",
"proxy",
",",
"cookies",
"=",
"cookies",
",",
"verbose",
"=",
"verbose",
")",
"if",
"verbose",
":",
"h2oconn",
".",
"cluster",
".",
"show_status",
"(",
")",
"return",
"h2oconn"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
api
|
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
|
h2o-py/h2o/h2o.py
|
def api(endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
"""
# type checks are performed in H2OConnection class
_check_connection()
return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to)
|
def api(endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
"""
# type checks are performed in H2OConnection class
_check_connection()
return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to)
|
[
"Perform",
"a",
"REST",
"API",
"request",
"to",
"a",
"previously",
"connected",
"server",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L97-L106
|
[
"def",
"api",
"(",
"endpoint",
",",
"data",
"=",
"None",
",",
"json",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"save_to",
"=",
"None",
")",
":",
"# type checks are performed in H2OConnection class",
"_check_connection",
"(",
")",
"return",
"h2oconn",
".",
"request",
"(",
"endpoint",
",",
"data",
"=",
"data",
",",
"json",
"=",
"json",
",",
"filename",
"=",
"filename",
",",
"save_to",
"=",
"save_to",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
version_check
|
Used to verify that h2o-python module and the H2O server are compatible with each other.
|
h2o-py/h2o/h2o.py
|
def version_check():
"""Used to verify that h2o-python module and the H2O server are compatible with each other."""
from .__init__ import __version__ as ver_pkg
ci = h2oconn.cluster
if not ci:
raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?")
ver_h2o = ci.version
if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN"
if str(ver_h2o) != str(ver_pkg):
branch_name_h2o = ci.branch_name
build_number_h2o = ci.build_number
if build_number_h2o is None or build_number_h2o == "unknown":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, ver_pkg))
elif build_number_h2o == "99999":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, ver_pkg))
else:
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o))
# Check age of the install
if ci.build_too_old:
print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest "
"version from http://h2o.ai/download/".format(ci.build_age))
|
def version_check():
"""Used to verify that h2o-python module and the H2O server are compatible with each other."""
from .__init__ import __version__ as ver_pkg
ci = h2oconn.cluster
if not ci:
raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?")
ver_h2o = ci.version
if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN"
if str(ver_h2o) != str(ver_pkg):
branch_name_h2o = ci.branch_name
build_number_h2o = ci.build_number
if build_number_h2o is None or build_number_h2o == "unknown":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, ver_pkg))
elif build_number_h2o == "99999":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, ver_pkg))
else:
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o))
# Check age of the install
if ci.build_too_old:
print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest "
"version from http://h2o.ai/download/".format(ci.build_age))
|
[
"Used",
"to",
"verify",
"that",
"h2o",
"-",
"python",
"module",
"and",
"the",
"H2O",
"server",
"are",
"compatible",
"with",
"each",
"other",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L115-L146
|
[
"def",
"version_check",
"(",
")",
":",
"from",
".",
"__init__",
"import",
"__version__",
"as",
"ver_pkg",
"ci",
"=",
"h2oconn",
".",
"cluster",
"if",
"not",
"ci",
":",
"raise",
"H2OConnectionError",
"(",
"\"Connection not initialized. Did you run h2o.connect()?\"",
")",
"ver_h2o",
"=",
"ci",
".",
"version",
"if",
"ver_pkg",
"==",
"\"SUBST_PROJECT_VERSION\"",
":",
"ver_pkg",
"=",
"\"UNKNOWN\"",
"if",
"str",
"(",
"ver_h2o",
")",
"!=",
"str",
"(",
"ver_pkg",
")",
":",
"branch_name_h2o",
"=",
"ci",
".",
"branch_name",
"build_number_h2o",
"=",
"ci",
".",
"build_number",
"if",
"build_number_h2o",
"is",
"None",
"or",
"build_number_h2o",
"==",
"\"unknown\"",
":",
"raise",
"H2OConnectionError",
"(",
"\"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. \"",
"\"Upgrade H2O and h2o-Python to latest stable version - \"",
"\"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html\"",
"\"\"",
".",
"format",
"(",
"ver_h2o",
",",
"ver_pkg",
")",
")",
"elif",
"build_number_h2o",
"==",
"\"99999\"",
":",
"raise",
"H2OConnectionError",
"(",
"\"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. \"",
"\"This is a developer build, please contact your developer.\"",
"\"\"",
".",
"format",
"(",
"ver_h2o",
",",
"ver_pkg",
")",
")",
"else",
":",
"raise",
"H2OConnectionError",
"(",
"\"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. \"",
"\"Install the matching h2o-Python version from - \"",
"\"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html.\"",
"\"\"",
".",
"format",
"(",
"ver_h2o",
",",
"ver_pkg",
",",
"branch_name_h2o",
",",
"build_number_h2o",
")",
")",
"# Check age of the install",
"if",
"ci",
".",
"build_too_old",
":",
"print",
"(",
"\"Warning: Your H2O cluster version is too old ({})! Please download and install the latest \"",
"\"version from http://h2o.ai/download/\"",
".",
"format",
"(",
"ci",
".",
"build_age",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
init
|
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: Cluster name. If None while connecting to an existing cluster it will not check the cluster name.
If set then will connect only if the target cluster name matches. If no instance is found and decides to start a local
one then this will be used as the cluster name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it.
|
h2o-py/h2o/h2o.py
|
def init(url=None, ip=None, port=None, name=None, https=None, insecure=None, username=None, password=None,
cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, log_dir=None, log_level=None,
enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, ignore_config=False,
extra_classpath=None, jvm_custom_args=None, bind_to_localhost=True, **kwargs):
"""
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: Cluster name. If None while connecting to an existing cluster it will not check the cluster name.
If set then will connect only if the target cluster name matches. If no instance is found and decides to start a local
one then this will be used as the cluster name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it.
"""
global h2oconn
assert_is_type(url, str, None)
assert_is_type(ip, str, None)
assert_is_type(port, int, str, None)
assert_is_type(name, str, None)
assert_is_type(https, bool, None)
assert_is_type(insecure, bool, None)
assert_is_type(username, str, None)
assert_is_type(password, str, None)
assert_is_type(cookies, str, [str], None)
assert_is_type(proxy, {str: str}, None)
assert_is_type(start_h2o, bool, None)
assert_is_type(nthreads, int)
assert_is_type(ice_root, str, None)
assert_is_type(log_dir, str, None)
assert_is_type(log_level, str, None)
assert_satisfies(log_level, log_level in [None, "TRACE", "DEBUG", "INFO", "WARN", "ERRR", "FATA"])
assert_is_type(enable_assertions, bool)
assert_is_type(max_mem_size, int, str, None)
assert_is_type(min_mem_size, int, str, None)
assert_is_type(strict_version_check, bool, None)
assert_is_type(extra_classpath, [str], None)
assert_is_type(jvm_custom_args, [str], None)
assert_is_type(bind_to_localhost, bool)
assert_is_type(kwargs, {"proxies": {str: str}, "max_mem_size_GB": int, "min_mem_size_GB": int,
"force_connect": bool, "as_port": bool})
def get_mem_size(mmint, mmgb):
if not mmint: # treat 0 and "" as if they were None
if mmgb is None: return None
return mmgb << 30
if is_type(mmint, int):
# If the user gives some small number just assume it's in Gigabytes...
if mmint < 1000: return mmint << 30
return mmint
if is_type(mmint, str):
last = mmint[-1].upper()
num = mmint[:-1]
if not (num.isdigit() and last in "MGT"):
raise H2OValueError("Wrong format for a *_memory_size argument: %s (should be a number followed by "
"a suffix 'M', 'G' or 'T')" % mmint)
if last == "T": return int(num) << 40
if last == "G": return int(num) << 30
if last == "M": return int(num) << 20
scheme = "https" if https else "http"
proxy = proxy[scheme] if proxy is not None and scheme in proxy else \
kwargs["proxies"][scheme] if "proxies" in kwargs and scheme in kwargs["proxies"] else None
mmax = get_mem_size(max_mem_size, kwargs.get("max_mem_size_GB"))
mmin = get_mem_size(min_mem_size, kwargs.get("min_mem_size_GB"))
auth = (username, password) if username and password else None
check_version = True
verify_ssl_certificates = True
# Apply the config file if ignore_config=False
if not ignore_config:
config = H2OConfigReader.get_config()
if url is None and ip is None and port is None and https is None and "init.url" in config:
url = config["init.url"]
if proxy is None and "init.proxy" in config:
proxy = config["init.proxy"]
if cookies is None and "init.cookies" in config:
cookies = config["init.cookies"].split(";")
if auth is None and "init.username" in config and "init.password" in config:
auth = (config["init.username"], config["init.password"])
if strict_version_check is None:
if "init.check_version" in config:
check_version = config["init.check_version"].lower() != "false"
elif os.environ.get("H2O_DISABLE_STRICT_VERSION_CHECK"):
check_version = False
else:
check_version = strict_version_check
if insecure is None:
if "init.verify_ssl_certificates" in config:
verify_ssl_certificates = config["init.verify_ssl_certificates"].lower() != "false"
else:
verify_ssl_certificates = not insecure
if not start_h2o:
print("Warning: if you don't want to start local H2O server, then use of `h2o.connect()` is preferred.")
try:
h2oconn = H2OConnection.open(url=url, ip=ip, port=port, name=name, https=https,
verify_ssl_certificates=verify_ssl_certificates,
auth=auth, proxy=proxy,cookies=cookies, verbose=True,
_msgs=("Checking whether there is an H2O instance running at {url} ",
"connected.", "not found."))
except H2OConnectionError:
# Backward compatibility: in init() port parameter really meant "baseport" when starting a local server...
if port and not str(port).endswith("+") and not kwargs.get("as_port", False):
port = str(port) + "+"
if not start_h2o: raise
if ip and not (ip == "localhost" or ip == "127.0.0.1"):
raise H2OConnectionError('Can only start H2O launcher if IP address is localhost.')
hs = H2OLocalServer.start(nthreads=nthreads, enable_assertions=enable_assertions, max_mem_size=mmax,
min_mem_size=mmin, ice_root=ice_root, log_dir=log_dir, log_level=log_level,
port=port, name=name,
extra_classpath=extra_classpath, jvm_custom_args=jvm_custom_args,
bind_to_localhost=bind_to_localhost)
h2oconn = H2OConnection.open(server=hs, https=https, verify_ssl_certificates=not insecure,
auth=auth, proxy=proxy,cookies=cookies, verbose=True)
if check_version:
version_check()
h2oconn.cluster.timezone = "UTC"
h2oconn.cluster.show_status()
|
def init(url=None, ip=None, port=None, name=None, https=None, insecure=None, username=None, password=None,
cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, log_dir=None, log_level=None,
enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, ignore_config=False,
extra_classpath=None, jvm_custom_args=None, bind_to_localhost=True, **kwargs):
"""
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: Cluster name. If None while connecting to an existing cluster it will not check the cluster name.
If set then will connect only if the target cluster name matches. If no instance is found and decides to start a local
one then this will be used as the cluster name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it.
"""
global h2oconn
assert_is_type(url, str, None)
assert_is_type(ip, str, None)
assert_is_type(port, int, str, None)
assert_is_type(name, str, None)
assert_is_type(https, bool, None)
assert_is_type(insecure, bool, None)
assert_is_type(username, str, None)
assert_is_type(password, str, None)
assert_is_type(cookies, str, [str], None)
assert_is_type(proxy, {str: str}, None)
assert_is_type(start_h2o, bool, None)
assert_is_type(nthreads, int)
assert_is_type(ice_root, str, None)
assert_is_type(log_dir, str, None)
assert_is_type(log_level, str, None)
assert_satisfies(log_level, log_level in [None, "TRACE", "DEBUG", "INFO", "WARN", "ERRR", "FATA"])
assert_is_type(enable_assertions, bool)
assert_is_type(max_mem_size, int, str, None)
assert_is_type(min_mem_size, int, str, None)
assert_is_type(strict_version_check, bool, None)
assert_is_type(extra_classpath, [str], None)
assert_is_type(jvm_custom_args, [str], None)
assert_is_type(bind_to_localhost, bool)
assert_is_type(kwargs, {"proxies": {str: str}, "max_mem_size_GB": int, "min_mem_size_GB": int,
"force_connect": bool, "as_port": bool})
def get_mem_size(mmint, mmgb):
if not mmint: # treat 0 and "" as if they were None
if mmgb is None: return None
return mmgb << 30
if is_type(mmint, int):
# If the user gives some small number just assume it's in Gigabytes...
if mmint < 1000: return mmint << 30
return mmint
if is_type(mmint, str):
last = mmint[-1].upper()
num = mmint[:-1]
if not (num.isdigit() and last in "MGT"):
raise H2OValueError("Wrong format for a *_memory_size argument: %s (should be a number followed by "
"a suffix 'M', 'G' or 'T')" % mmint)
if last == "T": return int(num) << 40
if last == "G": return int(num) << 30
if last == "M": return int(num) << 20
scheme = "https" if https else "http"
proxy = proxy[scheme] if proxy is not None and scheme in proxy else \
kwargs["proxies"][scheme] if "proxies" in kwargs and scheme in kwargs["proxies"] else None
mmax = get_mem_size(max_mem_size, kwargs.get("max_mem_size_GB"))
mmin = get_mem_size(min_mem_size, kwargs.get("min_mem_size_GB"))
auth = (username, password) if username and password else None
check_version = True
verify_ssl_certificates = True
# Apply the config file if ignore_config=False
if not ignore_config:
config = H2OConfigReader.get_config()
if url is None and ip is None and port is None and https is None and "init.url" in config:
url = config["init.url"]
if proxy is None and "init.proxy" in config:
proxy = config["init.proxy"]
if cookies is None and "init.cookies" in config:
cookies = config["init.cookies"].split(";")
if auth is None and "init.username" in config and "init.password" in config:
auth = (config["init.username"], config["init.password"])
if strict_version_check is None:
if "init.check_version" in config:
check_version = config["init.check_version"].lower() != "false"
elif os.environ.get("H2O_DISABLE_STRICT_VERSION_CHECK"):
check_version = False
else:
check_version = strict_version_check
if insecure is None:
if "init.verify_ssl_certificates" in config:
verify_ssl_certificates = config["init.verify_ssl_certificates"].lower() != "false"
else:
verify_ssl_certificates = not insecure
if not start_h2o:
print("Warning: if you don't want to start local H2O server, then use of `h2o.connect()` is preferred.")
try:
h2oconn = H2OConnection.open(url=url, ip=ip, port=port, name=name, https=https,
verify_ssl_certificates=verify_ssl_certificates,
auth=auth, proxy=proxy,cookies=cookies, verbose=True,
_msgs=("Checking whether there is an H2O instance running at {url} ",
"connected.", "not found."))
except H2OConnectionError:
# Backward compatibility: in init() port parameter really meant "baseport" when starting a local server...
if port and not str(port).endswith("+") and not kwargs.get("as_port", False):
port = str(port) + "+"
if not start_h2o: raise
if ip and not (ip == "localhost" or ip == "127.0.0.1"):
raise H2OConnectionError('Can only start H2O launcher if IP address is localhost.')
hs = H2OLocalServer.start(nthreads=nthreads, enable_assertions=enable_assertions, max_mem_size=mmax,
min_mem_size=mmin, ice_root=ice_root, log_dir=log_dir, log_level=log_level,
port=port, name=name,
extra_classpath=extra_classpath, jvm_custom_args=jvm_custom_args,
bind_to_localhost=bind_to_localhost)
h2oconn = H2OConnection.open(server=hs, https=https, verify_ssl_certificates=not insecure,
auth=auth, proxy=proxy,cookies=cookies, verbose=True)
if check_version:
version_check()
h2oconn.cluster.timezone = "UTC"
h2oconn.cluster.show_status()
|
[
"Attempt",
"to",
"connect",
"to",
"a",
"local",
"server",
"or",
"if",
"not",
"successful",
"start",
"a",
"new",
"server",
"and",
"connect",
"to",
"it",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L149-L285
|
[
"def",
"init",
"(",
"url",
"=",
"None",
",",
"ip",
"=",
"None",
",",
"port",
"=",
"None",
",",
"name",
"=",
"None",
",",
"https",
"=",
"None",
",",
"insecure",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"cookies",
"=",
"None",
",",
"proxy",
"=",
"None",
",",
"start_h2o",
"=",
"True",
",",
"nthreads",
"=",
"-",
"1",
",",
"ice_root",
"=",
"None",
",",
"log_dir",
"=",
"None",
",",
"log_level",
"=",
"None",
",",
"enable_assertions",
"=",
"True",
",",
"max_mem_size",
"=",
"None",
",",
"min_mem_size",
"=",
"None",
",",
"strict_version_check",
"=",
"None",
",",
"ignore_config",
"=",
"False",
",",
"extra_classpath",
"=",
"None",
",",
"jvm_custom_args",
"=",
"None",
",",
"bind_to_localhost",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"global",
"h2oconn",
"assert_is_type",
"(",
"url",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"ip",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"port",
",",
"int",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"name",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"https",
",",
"bool",
",",
"None",
")",
"assert_is_type",
"(",
"insecure",
",",
"bool",
",",
"None",
")",
"assert_is_type",
"(",
"username",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"password",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"cookies",
",",
"str",
",",
"[",
"str",
"]",
",",
"None",
")",
"assert_is_type",
"(",
"proxy",
",",
"{",
"str",
":",
"str",
"}",
",",
"None",
")",
"assert_is_type",
"(",
"start_h2o",
",",
"bool",
",",
"None",
")",
"assert_is_type",
"(",
"nthreads",
",",
"int",
")",
"assert_is_type",
"(",
"ice_root",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"log_dir",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"log_level",
",",
"str",
",",
"None",
")",
"assert_satisfies",
"(",
"log_level",
",",
"log_level",
"in",
"[",
"None",
",",
"\"TRACE\"",
",",
"\"DEBUG\"",
",",
"\"INFO\"",
",",
"\"WARN\"",
",",
"\"ERRR\"",
",",
"\"FATA\"",
"]",
")",
"assert_is_type",
"(",
"enable_assertions",
",",
"bool",
")",
"assert_is_type",
"(",
"max_mem_size",
",",
"int",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"min_mem_size",
",",
"int",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"strict_version_check",
",",
"bool",
",",
"None",
")",
"assert_is_type",
"(",
"extra_classpath",
",",
"[",
"str",
"]",
",",
"None",
")",
"assert_is_type",
"(",
"jvm_custom_args",
",",
"[",
"str",
"]",
",",
"None",
")",
"assert_is_type",
"(",
"bind_to_localhost",
",",
"bool",
")",
"assert_is_type",
"(",
"kwargs",
",",
"{",
"\"proxies\"",
":",
"{",
"str",
":",
"str",
"}",
",",
"\"max_mem_size_GB\"",
":",
"int",
",",
"\"min_mem_size_GB\"",
":",
"int",
",",
"\"force_connect\"",
":",
"bool",
",",
"\"as_port\"",
":",
"bool",
"}",
")",
"def",
"get_mem_size",
"(",
"mmint",
",",
"mmgb",
")",
":",
"if",
"not",
"mmint",
":",
"# treat 0 and \"\" as if they were None",
"if",
"mmgb",
"is",
"None",
":",
"return",
"None",
"return",
"mmgb",
"<<",
"30",
"if",
"is_type",
"(",
"mmint",
",",
"int",
")",
":",
"# If the user gives some small number just assume it's in Gigabytes...",
"if",
"mmint",
"<",
"1000",
":",
"return",
"mmint",
"<<",
"30",
"return",
"mmint",
"if",
"is_type",
"(",
"mmint",
",",
"str",
")",
":",
"last",
"=",
"mmint",
"[",
"-",
"1",
"]",
".",
"upper",
"(",
")",
"num",
"=",
"mmint",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"(",
"num",
".",
"isdigit",
"(",
")",
"and",
"last",
"in",
"\"MGT\"",
")",
":",
"raise",
"H2OValueError",
"(",
"\"Wrong format for a *_memory_size argument: %s (should be a number followed by \"",
"\"a suffix 'M', 'G' or 'T')\"",
"%",
"mmint",
")",
"if",
"last",
"==",
"\"T\"",
":",
"return",
"int",
"(",
"num",
")",
"<<",
"40",
"if",
"last",
"==",
"\"G\"",
":",
"return",
"int",
"(",
"num",
")",
"<<",
"30",
"if",
"last",
"==",
"\"M\"",
":",
"return",
"int",
"(",
"num",
")",
"<<",
"20",
"scheme",
"=",
"\"https\"",
"if",
"https",
"else",
"\"http\"",
"proxy",
"=",
"proxy",
"[",
"scheme",
"]",
"if",
"proxy",
"is",
"not",
"None",
"and",
"scheme",
"in",
"proxy",
"else",
"kwargs",
"[",
"\"proxies\"",
"]",
"[",
"scheme",
"]",
"if",
"\"proxies\"",
"in",
"kwargs",
"and",
"scheme",
"in",
"kwargs",
"[",
"\"proxies\"",
"]",
"else",
"None",
"mmax",
"=",
"get_mem_size",
"(",
"max_mem_size",
",",
"kwargs",
".",
"get",
"(",
"\"max_mem_size_GB\"",
")",
")",
"mmin",
"=",
"get_mem_size",
"(",
"min_mem_size",
",",
"kwargs",
".",
"get",
"(",
"\"min_mem_size_GB\"",
")",
")",
"auth",
"=",
"(",
"username",
",",
"password",
")",
"if",
"username",
"and",
"password",
"else",
"None",
"check_version",
"=",
"True",
"verify_ssl_certificates",
"=",
"True",
"# Apply the config file if ignore_config=False",
"if",
"not",
"ignore_config",
":",
"config",
"=",
"H2OConfigReader",
".",
"get_config",
"(",
")",
"if",
"url",
"is",
"None",
"and",
"ip",
"is",
"None",
"and",
"port",
"is",
"None",
"and",
"https",
"is",
"None",
"and",
"\"init.url\"",
"in",
"config",
":",
"url",
"=",
"config",
"[",
"\"init.url\"",
"]",
"if",
"proxy",
"is",
"None",
"and",
"\"init.proxy\"",
"in",
"config",
":",
"proxy",
"=",
"config",
"[",
"\"init.proxy\"",
"]",
"if",
"cookies",
"is",
"None",
"and",
"\"init.cookies\"",
"in",
"config",
":",
"cookies",
"=",
"config",
"[",
"\"init.cookies\"",
"]",
".",
"split",
"(",
"\";\"",
")",
"if",
"auth",
"is",
"None",
"and",
"\"init.username\"",
"in",
"config",
"and",
"\"init.password\"",
"in",
"config",
":",
"auth",
"=",
"(",
"config",
"[",
"\"init.username\"",
"]",
",",
"config",
"[",
"\"init.password\"",
"]",
")",
"if",
"strict_version_check",
"is",
"None",
":",
"if",
"\"init.check_version\"",
"in",
"config",
":",
"check_version",
"=",
"config",
"[",
"\"init.check_version\"",
"]",
".",
"lower",
"(",
")",
"!=",
"\"false\"",
"elif",
"os",
".",
"environ",
".",
"get",
"(",
"\"H2O_DISABLE_STRICT_VERSION_CHECK\"",
")",
":",
"check_version",
"=",
"False",
"else",
":",
"check_version",
"=",
"strict_version_check",
"if",
"insecure",
"is",
"None",
":",
"if",
"\"init.verify_ssl_certificates\"",
"in",
"config",
":",
"verify_ssl_certificates",
"=",
"config",
"[",
"\"init.verify_ssl_certificates\"",
"]",
".",
"lower",
"(",
")",
"!=",
"\"false\"",
"else",
":",
"verify_ssl_certificates",
"=",
"not",
"insecure",
"if",
"not",
"start_h2o",
":",
"print",
"(",
"\"Warning: if you don't want to start local H2O server, then use of `h2o.connect()` is preferred.\"",
")",
"try",
":",
"h2oconn",
"=",
"H2OConnection",
".",
"open",
"(",
"url",
"=",
"url",
",",
"ip",
"=",
"ip",
",",
"port",
"=",
"port",
",",
"name",
"=",
"name",
",",
"https",
"=",
"https",
",",
"verify_ssl_certificates",
"=",
"verify_ssl_certificates",
",",
"auth",
"=",
"auth",
",",
"proxy",
"=",
"proxy",
",",
"cookies",
"=",
"cookies",
",",
"verbose",
"=",
"True",
",",
"_msgs",
"=",
"(",
"\"Checking whether there is an H2O instance running at {url} \"",
",",
"\"connected.\"",
",",
"\"not found.\"",
")",
")",
"except",
"H2OConnectionError",
":",
"# Backward compatibility: in init() port parameter really meant \"baseport\" when starting a local server...",
"if",
"port",
"and",
"not",
"str",
"(",
"port",
")",
".",
"endswith",
"(",
"\"+\"",
")",
"and",
"not",
"kwargs",
".",
"get",
"(",
"\"as_port\"",
",",
"False",
")",
":",
"port",
"=",
"str",
"(",
"port",
")",
"+",
"\"+\"",
"if",
"not",
"start_h2o",
":",
"raise",
"if",
"ip",
"and",
"not",
"(",
"ip",
"==",
"\"localhost\"",
"or",
"ip",
"==",
"\"127.0.0.1\"",
")",
":",
"raise",
"H2OConnectionError",
"(",
"'Can only start H2O launcher if IP address is localhost.'",
")",
"hs",
"=",
"H2OLocalServer",
".",
"start",
"(",
"nthreads",
"=",
"nthreads",
",",
"enable_assertions",
"=",
"enable_assertions",
",",
"max_mem_size",
"=",
"mmax",
",",
"min_mem_size",
"=",
"mmin",
",",
"ice_root",
"=",
"ice_root",
",",
"log_dir",
"=",
"log_dir",
",",
"log_level",
"=",
"log_level",
",",
"port",
"=",
"port",
",",
"name",
"=",
"name",
",",
"extra_classpath",
"=",
"extra_classpath",
",",
"jvm_custom_args",
"=",
"jvm_custom_args",
",",
"bind_to_localhost",
"=",
"bind_to_localhost",
")",
"h2oconn",
"=",
"H2OConnection",
".",
"open",
"(",
"server",
"=",
"hs",
",",
"https",
"=",
"https",
",",
"verify_ssl_certificates",
"=",
"not",
"insecure",
",",
"auth",
"=",
"auth",
",",
"proxy",
"=",
"proxy",
",",
"cookies",
"=",
"cookies",
",",
"verbose",
"=",
"True",
")",
"if",
"check_version",
":",
"version_check",
"(",
")",
"h2oconn",
".",
"cluster",
".",
"timezone",
"=",
"\"UTC\"",
"h2oconn",
".",
"cluster",
".",
"show_status",
"(",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
lazy_import
|
Import a single file or collection of files.
:param path: A path to a data file (remote or local).
:param pattern: Character string containing a regular expression to match file(s) in the folder.
:returns: either a :class:`H2OFrame` with the content of the provided file, or a list of such frames if
importing multiple files.
|
h2o-py/h2o/h2o.py
|
def lazy_import(path, pattern=None):
"""
Import a single file or collection of files.
:param path: A path to a data file (remote or local).
:param pattern: Character string containing a regular expression to match file(s) in the folder.
:returns: either a :class:`H2OFrame` with the content of the provided file, or a list of such frames if
importing multiple files.
"""
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
paths = [path] if is_type(path, str) else path
return _import_multi(paths, pattern)
|
def lazy_import(path, pattern=None):
"""
Import a single file or collection of files.
:param path: A path to a data file (remote or local).
:param pattern: Character string containing a regular expression to match file(s) in the folder.
:returns: either a :class:`H2OFrame` with the content of the provided file, or a list of such frames if
importing multiple files.
"""
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
paths = [path] if is_type(path, str) else path
return _import_multi(paths, pattern)
|
[
"Import",
"a",
"single",
"file",
"or",
"collection",
"of",
"files",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L287-L299
|
[
"def",
"lazy_import",
"(",
"path",
",",
"pattern",
"=",
"None",
")",
":",
"assert_is_type",
"(",
"path",
",",
"str",
",",
"[",
"str",
"]",
")",
"assert_is_type",
"(",
"pattern",
",",
"str",
",",
"None",
")",
"paths",
"=",
"[",
"path",
"]",
"if",
"is_type",
"(",
"path",
",",
"str",
")",
"else",
"path",
"return",
"_import_multi",
"(",
"paths",
",",
"pattern",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
upload_file
|
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data")
|
h2o-py/h2o/h2o.py
|
def upload_file(path, destination_frame=None, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, skipped_columns=None):
"""
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str)
assert_is_type(destination_frame, str, None)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert (skipped_columns==None) or isinstance(skipped_columns, list), \
"The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
if path.startswith("~"):
path = os.path.expanduser(path)
return H2OFrame()._upload_parse(path, destination_frame, header, sep, col_names, col_types, na_strings, skipped_columns)
|
def upload_file(path, destination_frame=None, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, skipped_columns=None):
"""
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str)
assert_is_type(destination_frame, str, None)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert (skipped_columns==None) or isinstance(skipped_columns, list), \
"The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
if path.startswith("~"):
path = os.path.expanduser(path)
return H2OFrame()._upload_parse(path, destination_frame, header, sep, col_names, col_types, na_strings, skipped_columns)
|
[
"Upload",
"a",
"dataset",
"from",
"the",
"provided",
"local",
"path",
"to",
"the",
"H2O",
"cluster",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L310-L363
|
[
"def",
"upload_file",
"(",
"path",
",",
"destination_frame",
"=",
"None",
",",
"header",
"=",
"0",
",",
"sep",
"=",
"None",
",",
"col_names",
"=",
"None",
",",
"col_types",
"=",
"None",
",",
"na_strings",
"=",
"None",
",",
"skipped_columns",
"=",
"None",
")",
":",
"coltype",
"=",
"U",
"(",
"None",
",",
"\"unknown\"",
",",
"\"uuid\"",
",",
"\"string\"",
",",
"\"float\"",
",",
"\"real\"",
",",
"\"double\"",
",",
"\"int\"",
",",
"\"numeric\"",
",",
"\"categorical\"",
",",
"\"factor\"",
",",
"\"enum\"",
",",
"\"time\"",
")",
"natype",
"=",
"U",
"(",
"str",
",",
"[",
"str",
"]",
")",
"assert_is_type",
"(",
"path",
",",
"str",
")",
"assert_is_type",
"(",
"destination_frame",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"header",
",",
"-",
"1",
",",
"0",
",",
"1",
")",
"assert_is_type",
"(",
"sep",
",",
"None",
",",
"I",
"(",
"str",
",",
"lambda",
"s",
":",
"len",
"(",
"s",
")",
"==",
"1",
")",
")",
"assert_is_type",
"(",
"col_names",
",",
"[",
"str",
"]",
",",
"None",
")",
"assert_is_type",
"(",
"col_types",
",",
"[",
"coltype",
"]",
",",
"{",
"str",
":",
"coltype",
"}",
",",
"None",
")",
"assert_is_type",
"(",
"na_strings",
",",
"[",
"natype",
"]",
",",
"{",
"str",
":",
"natype",
"}",
",",
"None",
")",
"assert",
"(",
"skipped_columns",
"==",
"None",
")",
"or",
"isinstance",
"(",
"skipped_columns",
",",
"list",
")",
",",
"\"The skipped_columns should be an list of column names!\"",
"check_frame_id",
"(",
"destination_frame",
")",
"if",
"path",
".",
"startswith",
"(",
"\"~\"",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"return",
"H2OFrame",
"(",
")",
".",
"_upload_parse",
"(",
"path",
",",
"destination_frame",
",",
"header",
",",
"sep",
",",
"col_names",
",",
"col_types",
",",
"na_strings",
",",
"skipped_columns",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
import_file
|
Import a dataset that is already on the cluster.
The path to the data must be a valid path for each node in the H2O cluster. If some node in the H2O cluster
cannot see the file, then an exception will be thrown by the H2O cluster. Does a parallel/distributed
multi-threaded pull of the data. The main difference between this method and :func:`upload_file` is that
the latter works with local files, whereas this method imports remote files (i.e. files local to the server).
If you running H2O server on your own maching, then both methods behave the same.
:param path: path(s) specifying the location of the data to import or a path to a directory of files to import
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will be
automatically generated.
:param parse: If True, the file should be parsed after import. If False, then a list is returned containing the file path.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param pattern: Character string containing a regular expression to match file(s) in the folder if `path` is a
directory.
:param skipped_columns: an integer list of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> # Single file import
>>> iris = import_file("h2o-3/smalldata/iris.csv")
>>> # Return all files in the folder iris/ matching the regex r"iris_.*\.csv"
>>> iris_pattern = h2o.import_file(path = "h2o-3/smalldata/iris",
... pattern = "iris_.*\.csv")
|
h2o-py/h2o/h2o.py
|
def import_file(path=None, destination_frame=None, parse=True, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, pattern=None, skipped_columns=None, custom_non_data_line_markers = None):
"""
Import a dataset that is already on the cluster.
The path to the data must be a valid path for each node in the H2O cluster. If some node in the H2O cluster
cannot see the file, then an exception will be thrown by the H2O cluster. Does a parallel/distributed
multi-threaded pull of the data. The main difference between this method and :func:`upload_file` is that
the latter works with local files, whereas this method imports remote files (i.e. files local to the server).
If you running H2O server on your own maching, then both methods behave the same.
:param path: path(s) specifying the location of the data to import or a path to a directory of files to import
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will be
automatically generated.
:param parse: If True, the file should be parsed after import. If False, then a list is returned containing the file path.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param pattern: Character string containing a regular expression to match file(s) in the folder if `path` is a
directory.
:param skipped_columns: an integer list of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> # Single file import
>>> iris = import_file("h2o-3/smalldata/iris.csv")
>>> # Return all files in the folder iris/ matching the regex r"iris_.*\.csv"
>>> iris_pattern = h2o.import_file(path = "h2o-3/smalldata/iris",
... pattern = "iris_.*\.csv")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
assert_is_type(destination_frame, str, None)
assert_is_type(parse, bool)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert isinstance(skipped_columns, (type(None), list)), "The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
patharr = path if isinstance(path, list) else [path]
if any(os.path.split(p)[0] == "~" for p in patharr):
raise H2OValueError("Paths relative to a current user (~) are not valid in the server environment. "
"Please use absolute paths if possible.")
if not parse:
return lazy_import(path, pattern)
else:
return H2OFrame()._import_parse(path, pattern, destination_frame, header, sep, col_names, col_types, na_strings,
skipped_columns, custom_non_data_line_markers)
|
def import_file(path=None, destination_frame=None, parse=True, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, pattern=None, skipped_columns=None, custom_non_data_line_markers = None):
"""
Import a dataset that is already on the cluster.
The path to the data must be a valid path for each node in the H2O cluster. If some node in the H2O cluster
cannot see the file, then an exception will be thrown by the H2O cluster. Does a parallel/distributed
multi-threaded pull of the data. The main difference between this method and :func:`upload_file` is that
the latter works with local files, whereas this method imports remote files (i.e. files local to the server).
If you running H2O server on your own maching, then both methods behave the same.
:param path: path(s) specifying the location of the data to import or a path to a directory of files to import
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will be
automatically generated.
:param parse: If True, the file should be parsed after import. If False, then a list is returned containing the file path.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param pattern: Character string containing a regular expression to match file(s) in the folder if `path` is a
directory.
:param skipped_columns: an integer list of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> # Single file import
>>> iris = import_file("h2o-3/smalldata/iris.csv")
>>> # Return all files in the folder iris/ matching the regex r"iris_.*\.csv"
>>> iris_pattern = h2o.import_file(path = "h2o-3/smalldata/iris",
... pattern = "iris_.*\.csv")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
assert_is_type(destination_frame, str, None)
assert_is_type(parse, bool)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert isinstance(skipped_columns, (type(None), list)), "The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
patharr = path if isinstance(path, list) else [path]
if any(os.path.split(p)[0] == "~" for p in patharr):
raise H2OValueError("Paths relative to a current user (~) are not valid in the server environment. "
"Please use absolute paths if possible.")
if not parse:
return lazy_import(path, pattern)
else:
return H2OFrame()._import_parse(path, pattern, destination_frame, header, sep, col_names, col_types, na_strings,
skipped_columns, custom_non_data_line_markers)
|
[
"Import",
"a",
"dataset",
"that",
"is",
"already",
"on",
"the",
"cluster",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L366-L437
|
[
"def",
"import_file",
"(",
"path",
"=",
"None",
",",
"destination_frame",
"=",
"None",
",",
"parse",
"=",
"True",
",",
"header",
"=",
"0",
",",
"sep",
"=",
"None",
",",
"col_names",
"=",
"None",
",",
"col_types",
"=",
"None",
",",
"na_strings",
"=",
"None",
",",
"pattern",
"=",
"None",
",",
"skipped_columns",
"=",
"None",
",",
"custom_non_data_line_markers",
"=",
"None",
")",
":",
"coltype",
"=",
"U",
"(",
"None",
",",
"\"unknown\"",
",",
"\"uuid\"",
",",
"\"string\"",
",",
"\"float\"",
",",
"\"real\"",
",",
"\"double\"",
",",
"\"int\"",
",",
"\"numeric\"",
",",
"\"categorical\"",
",",
"\"factor\"",
",",
"\"enum\"",
",",
"\"time\"",
")",
"natype",
"=",
"U",
"(",
"str",
",",
"[",
"str",
"]",
")",
"assert_is_type",
"(",
"path",
",",
"str",
",",
"[",
"str",
"]",
")",
"assert_is_type",
"(",
"pattern",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"destination_frame",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"parse",
",",
"bool",
")",
"assert_is_type",
"(",
"header",
",",
"-",
"1",
",",
"0",
",",
"1",
")",
"assert_is_type",
"(",
"sep",
",",
"None",
",",
"I",
"(",
"str",
",",
"lambda",
"s",
":",
"len",
"(",
"s",
")",
"==",
"1",
")",
")",
"assert_is_type",
"(",
"col_names",
",",
"[",
"str",
"]",
",",
"None",
")",
"assert_is_type",
"(",
"col_types",
",",
"[",
"coltype",
"]",
",",
"{",
"str",
":",
"coltype",
"}",
",",
"None",
")",
"assert_is_type",
"(",
"na_strings",
",",
"[",
"natype",
"]",
",",
"{",
"str",
":",
"natype",
"}",
",",
"None",
")",
"assert",
"isinstance",
"(",
"skipped_columns",
",",
"(",
"type",
"(",
"None",
")",
",",
"list",
")",
")",
",",
"\"The skipped_columns should be an list of column names!\"",
"check_frame_id",
"(",
"destination_frame",
")",
"patharr",
"=",
"path",
"if",
"isinstance",
"(",
"path",
",",
"list",
")",
"else",
"[",
"path",
"]",
"if",
"any",
"(",
"os",
".",
"path",
".",
"split",
"(",
"p",
")",
"[",
"0",
"]",
"==",
"\"~\"",
"for",
"p",
"in",
"patharr",
")",
":",
"raise",
"H2OValueError",
"(",
"\"Paths relative to a current user (~) are not valid in the server environment. \"",
"\"Please use absolute paths if possible.\"",
")",
"if",
"not",
"parse",
":",
"return",
"lazy_import",
"(",
"path",
",",
"pattern",
")",
"else",
":",
"return",
"H2OFrame",
"(",
")",
".",
"_import_parse",
"(",
"path",
",",
"pattern",
",",
"destination_frame",
",",
"header",
",",
"sep",
",",
"col_names",
",",
"col_types",
",",
"na_strings",
",",
"skipped_columns",
",",
"custom_non_data_line_markers",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
import_hive_table
|
Import Hive table to H2OFrame in memory.
Make sure to start H2O with Hive on classpath. Uses hive-site.xml on classpath to connect to Hive.
:param database: Name of Hive database (default database will be used by default)
:param table: name of Hive table to import
:param partitions: a list of lists of strings - partition key column values of partitions you want to import.
:param allow_multi_format: enable import of partitioned tables with different storage formats used. WARNING:
this may fail on out-of-memory for tables with a large number of small partitions.
:returns: an :class:`H2OFrame` containing data of the specified Hive table.
:examples:
>>> my_citibike_data = h2o.import_hive_table("default", "table", [["2017", "01"], ["2017", "02"]])
|
h2o-py/h2o/h2o.py
|
def import_hive_table(database=None, table=None, partitions=None, allow_multi_format=False):
"""
Import Hive table to H2OFrame in memory.
Make sure to start H2O with Hive on classpath. Uses hive-site.xml on classpath to connect to Hive.
:param database: Name of Hive database (default database will be used by default)
:param table: name of Hive table to import
:param partitions: a list of lists of strings - partition key column values of partitions you want to import.
:param allow_multi_format: enable import of partitioned tables with different storage formats used. WARNING:
this may fail on out-of-memory for tables with a large number of small partitions.
:returns: an :class:`H2OFrame` containing data of the specified Hive table.
:examples:
>>> my_citibike_data = h2o.import_hive_table("default", "table", [["2017", "01"], ["2017", "02"]])
"""
assert_is_type(database, str, None)
assert_is_type(table, str)
assert_is_type(partitions, [[str]], None)
p = { "database": database, "table": table, "partitions": partitions, "allow_multi_format": allow_multi_format }
j = H2OJob(api("POST /3/ImportHiveTable", data=p), "Import Hive Table").poll()
return get_frame(j.dest_key)
|
def import_hive_table(database=None, table=None, partitions=None, allow_multi_format=False):
"""
Import Hive table to H2OFrame in memory.
Make sure to start H2O with Hive on classpath. Uses hive-site.xml on classpath to connect to Hive.
:param database: Name of Hive database (default database will be used by default)
:param table: name of Hive table to import
:param partitions: a list of lists of strings - partition key column values of partitions you want to import.
:param allow_multi_format: enable import of partitioned tables with different storage formats used. WARNING:
this may fail on out-of-memory for tables with a large number of small partitions.
:returns: an :class:`H2OFrame` containing data of the specified Hive table.
:examples:
>>> my_citibike_data = h2o.import_hive_table("default", "table", [["2017", "01"], ["2017", "02"]])
"""
assert_is_type(database, str, None)
assert_is_type(table, str)
assert_is_type(partitions, [[str]], None)
p = { "database": database, "table": table, "partitions": partitions, "allow_multi_format": allow_multi_format }
j = H2OJob(api("POST /3/ImportHiveTable", data=p), "Import Hive Table").poll()
return get_frame(j.dest_key)
|
[
"Import",
"Hive",
"table",
"to",
"H2OFrame",
"in",
"memory",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L440-L462
|
[
"def",
"import_hive_table",
"(",
"database",
"=",
"None",
",",
"table",
"=",
"None",
",",
"partitions",
"=",
"None",
",",
"allow_multi_format",
"=",
"False",
")",
":",
"assert_is_type",
"(",
"database",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"table",
",",
"str",
")",
"assert_is_type",
"(",
"partitions",
",",
"[",
"[",
"str",
"]",
"]",
",",
"None",
")",
"p",
"=",
"{",
"\"database\"",
":",
"database",
",",
"\"table\"",
":",
"table",
",",
"\"partitions\"",
":",
"partitions",
",",
"\"allow_multi_format\"",
":",
"allow_multi_format",
"}",
"j",
"=",
"H2OJob",
"(",
"api",
"(",
"\"POST /3/ImportHiveTable\"",
",",
"data",
"=",
"p",
")",
",",
"\"Import Hive Table\"",
")",
".",
"poll",
"(",
")",
"return",
"get_frame",
"(",
"j",
".",
"dest_key",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
import_sql_table
|
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
|
h2o-py/h2o/h2o.py
|
def import_sql_table(connection_url, table, username, password, columns=None, optimize=True, fetch_mode=None):
"""
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(table, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(columns, [str], None)
assert_is_type(optimize, bool)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "table": table, "username": username, "password": password,
"fetch_mode": fetch_mode}
if columns:
p["columns"] = ", ".join(columns)
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key)
|
def import_sql_table(connection_url, table, username, password, columns=None, optimize=True, fetch_mode=None):
"""
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(table, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(columns, [str], None)
assert_is_type(optimize, bool)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "table": table, "username": username, "password": password,
"fetch_mode": fetch_mode}
if columns:
p["columns"] = ", ".join(columns)
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key)
|
[
"Import",
"SQL",
"table",
"to",
"H2OFrame",
"in",
"memory",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L464-L508
|
[
"def",
"import_sql_table",
"(",
"connection_url",
",",
"table",
",",
"username",
",",
"password",
",",
"columns",
"=",
"None",
",",
"optimize",
"=",
"True",
",",
"fetch_mode",
"=",
"None",
")",
":",
"assert_is_type",
"(",
"connection_url",
",",
"str",
")",
"assert_is_type",
"(",
"table",
",",
"str",
")",
"assert_is_type",
"(",
"username",
",",
"str",
")",
"assert_is_type",
"(",
"password",
",",
"str",
")",
"assert_is_type",
"(",
"columns",
",",
"[",
"str",
"]",
",",
"None",
")",
"assert_is_type",
"(",
"optimize",
",",
"bool",
")",
"assert_is_type",
"(",
"fetch_mode",
",",
"str",
",",
"None",
")",
"p",
"=",
"{",
"\"connection_url\"",
":",
"connection_url",
",",
"\"table\"",
":",
"table",
",",
"\"username\"",
":",
"username",
",",
"\"password\"",
":",
"password",
",",
"\"fetch_mode\"",
":",
"fetch_mode",
"}",
"if",
"columns",
":",
"p",
"[",
"\"columns\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"columns",
")",
"j",
"=",
"H2OJob",
"(",
"api",
"(",
"\"POST /99/ImportSQLTable\"",
",",
"data",
"=",
"p",
")",
",",
"\"Import SQL Table\"",
")",
".",
"poll",
"(",
")",
"return",
"get_frame",
"(",
"j",
".",
"dest_key",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
import_sql_select
|
Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.
Creates a temporary SQL table from the specified sql_query.
Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle
and Microsoft SQL Server.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param use_temp_table: whether a temporary table should be created from select_query
:param temp_table_name: name of temporary table to be created from select_query
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL query.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> select_query = "SELECT bikeid from citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_select(conn_url, select_query,
... username, password, fetch_mode)
|
h2o-py/h2o/h2o.py
|
def import_sql_select(connection_url, select_query, username, password, optimize=True,
use_temp_table=None, temp_table_name=None, fetch_mode=None):
"""
Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.
Creates a temporary SQL table from the specified sql_query.
Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle
and Microsoft SQL Server.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param use_temp_table: whether a temporary table should be created from select_query
:param temp_table_name: name of temporary table to be created from select_query
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL query.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> select_query = "SELECT bikeid from citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_select(conn_url, select_query,
... username, password, fetch_mode)
"""
assert_is_type(connection_url, str)
assert_is_type(select_query, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(optimize, bool)
assert_is_type(use_temp_table, bool, None)
assert_is_type(temp_table_name, str, None)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "select_query": select_query, "username": username, "password": password,
"use_temp_table": use_temp_table, "temp_table_name": temp_table_name, "fetch_mode": fetch_mode}
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key)
|
def import_sql_select(connection_url, select_query, username, password, optimize=True,
use_temp_table=None, temp_table_name=None, fetch_mode=None):
"""
Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.
Creates a temporary SQL table from the specified sql_query.
Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle
and Microsoft SQL Server.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param use_temp_table: whether a temporary table should be created from select_query
:param temp_table_name: name of temporary table to be created from select_query
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL query.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> select_query = "SELECT bikeid from citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_select(conn_url, select_query,
... username, password, fetch_mode)
"""
assert_is_type(connection_url, str)
assert_is_type(select_query, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(optimize, bool)
assert_is_type(use_temp_table, bool, None)
assert_is_type(temp_table_name, str, None)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "select_query": select_query, "username": username, "password": password,
"use_temp_table": use_temp_table, "temp_table_name": temp_table_name, "fetch_mode": fetch_mode}
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key)
|
[
"Import",
"the",
"SQL",
"table",
"that",
"is",
"the",
"result",
"of",
"the",
"specified",
"SQL",
"query",
"to",
"H2OFrame",
"in",
"memory",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L511-L557
|
[
"def",
"import_sql_select",
"(",
"connection_url",
",",
"select_query",
",",
"username",
",",
"password",
",",
"optimize",
"=",
"True",
",",
"use_temp_table",
"=",
"None",
",",
"temp_table_name",
"=",
"None",
",",
"fetch_mode",
"=",
"None",
")",
":",
"assert_is_type",
"(",
"connection_url",
",",
"str",
")",
"assert_is_type",
"(",
"select_query",
",",
"str",
")",
"assert_is_type",
"(",
"username",
",",
"str",
")",
"assert_is_type",
"(",
"password",
",",
"str",
")",
"assert_is_type",
"(",
"optimize",
",",
"bool",
")",
"assert_is_type",
"(",
"use_temp_table",
",",
"bool",
",",
"None",
")",
"assert_is_type",
"(",
"temp_table_name",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"fetch_mode",
",",
"str",
",",
"None",
")",
"p",
"=",
"{",
"\"connection_url\"",
":",
"connection_url",
",",
"\"select_query\"",
":",
"select_query",
",",
"\"username\"",
":",
"username",
",",
"\"password\"",
":",
"password",
",",
"\"use_temp_table\"",
":",
"use_temp_table",
",",
"\"temp_table_name\"",
":",
"temp_table_name",
",",
"\"fetch_mode\"",
":",
"fetch_mode",
"}",
"j",
"=",
"H2OJob",
"(",
"api",
"(",
"\"POST /99/ImportSQLTable\"",
",",
"data",
"=",
"p",
")",
",",
"\"Import SQL Table\"",
")",
".",
"poll",
"(",
")",
"return",
"get_frame",
"(",
"j",
".",
"dest_key",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
parse_setup
|
Retrieve H2O's best guess as to what the structure of the data file is.
During parse setup, the H2O cluster will make several guesses about the attributes of
the data. This method allows a user to perform corrective measures by updating the
returning dictionary from this method. This dictionary is then fed into `parse_raw` to
produce the H2OFrame instance.
:param raw_frames: a collection of imported file frames
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
automatically be generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param separator: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param column_names: A list of column names for the file. If skipped_columns are specified, only list column names
of columns that are not skipped.
:param column_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. If skipped_columns are specified, only list column types of columns that are not skipped.
The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a dictionary containing parse parameters guessed by the H2O backend.
|
h2o-py/h2o/h2o.py
|
def parse_setup(raw_frames, destination_frame=None, header=0, separator=None, column_names=None,
column_types=None, na_strings=None, skipped_columns=None, custom_non_data_line_markers=None):
"""
Retrieve H2O's best guess as to what the structure of the data file is.
During parse setup, the H2O cluster will make several guesses about the attributes of
the data. This method allows a user to perform corrective measures by updating the
returning dictionary from this method. This dictionary is then fed into `parse_raw` to
produce the H2OFrame instance.
:param raw_frames: a collection of imported file frames
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
automatically be generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param separator: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param column_names: A list of column names for the file. If skipped_columns are specified, only list column names
of columns that are not skipped.
:param column_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. If skipped_columns are specified, only list column types of columns that are not skipped.
The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a dictionary containing parse parameters guessed by the H2O backend.
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(raw_frames, str, [str])
assert_is_type(destination_frame, None, str)
assert_is_type(header, -1, 0, 1)
assert_is_type(separator, None, I(str, lambda s: len(s) == 1))
assert_is_type(column_names, [str], None)
assert_is_type(column_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
check_frame_id(destination_frame)
# The H2O backend only accepts things that are quoted
if is_type(raw_frames, str): raw_frames = [raw_frames]
# temporary dictionary just to pass the following information to the parser: header, separator
kwargs = {"check_header": header, "source_frames": [quoted(frame_id) for frame_id in raw_frames]}
if separator:
kwargs["separator"] = ord(separator)
if custom_non_data_line_markers is not None:
kwargs["custom_non_data_line_markers"] = custom_non_data_line_markers;
j = api("POST /3/ParseSetup", data=kwargs)
if "warnings" in j and j["warnings"]:
for w in j["warnings"]:
warnings.warn(w)
# TODO: really should be url encoding...
if destination_frame:
j["destination_frame"] = destination_frame
parse_column_len = len(j["column_types"]) if skipped_columns is None else (len(j["column_types"])-len(skipped_columns))
tempColumnNames = j["column_names"] if j["column_names"] is not None else gen_header(j["number_columns"])
useType = [True]*len(tempColumnNames)
if skipped_columns is not None:
useType = [True]*len(tempColumnNames)
for ind in range(len(tempColumnNames)):
if ind in skipped_columns:
useType[ind]=False
if column_names is not None:
if not isinstance(column_names, list): raise ValueError("col_names should be a list")
if (skipped_columns is not None) and len(skipped_columns)>0:
if (len(column_names)) != parse_column_len:
raise ValueError(
"length of col_names should be equal to the number of columns parsed: %d vs %d"
% (len(column_names), parse_column_len))
else:
if len(column_names) != len(j["column_types"]): raise ValueError(
"length of col_names should be equal to the number of columns: %d vs %d"
% (len(column_names), len(j["column_types"])))
j["column_names"] = column_names
counter = 0
for ind in range(len(tempColumnNames)):
if useType[ind]:
tempColumnNames[ind]=column_names[counter]
counter=counter+1
if (column_types is not None): # keep the column types to include all columns
if isinstance(column_types, dict):
# overwrite dictionary to ordered list of column types. if user didn't specify column type for all names,
# use type provided by backend
if j["column_names"] is None: # no colnames discovered! (C1, C2, ...)
j["column_names"] = gen_header(j["number_columns"])
if not set(column_types.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in col_types is not a subset of the column names")
idx = 0
column_types_list = []
for name in tempColumnNames: # column_names may have already been changed
if name in column_types:
column_types_list.append(column_types[name])
else:
column_types_list.append(j["column_types"][idx])
idx += 1
column_types = column_types_list
elif isinstance(column_types, list):
if len(column_types) != parse_column_len: raise ValueError(
"length of col_types should be equal to the number of parsed columns")
# need to expand it out to all columns, not just the parsed ones
column_types_list = j["column_types"]
counter = 0
for ind in range(len(j["column_types"])):
if useType[ind] and (column_types[counter]!=None):
column_types_list[ind]=column_types[counter]
counter=counter+1
column_types = column_types_list
else: # not dictionary or list
raise ValueError("col_types should be a list of types or a dictionary of column names to types")
j["column_types"] = column_types
if na_strings is not None:
if isinstance(na_strings, dict):
# overwrite dictionary to ordered list of lists of na_strings
if not j["column_names"]: raise ValueError("column names should be specified")
if not set(na_strings.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in na_strings is not a subset of the column names")
j["na_strings"] = [[] for _ in range(len(j["column_names"]))]
for name, na in na_strings.items():
idx = j["column_names"].index(name)
if is_type(na, str): na = [na]
for n in na: j["na_strings"][idx].append(quoted(n))
elif is_type(na_strings, [[str]]):
if len(na_strings) != len(j["column_types"]):
raise ValueError("length of na_strings should be equal to the number of columns")
j["na_strings"] = [[quoted(na) for na in col] if col is not None else [] for col in na_strings]
elif isinstance(na_strings, list):
j["na_strings"] = [[quoted(na) for na in na_strings]] * len(j["column_types"])
else: # not a dictionary or list
raise ValueError(
"na_strings should be a list, a list of lists (one list per column), or a dictionary of column "
"names to strings which are to be interpreted as missing values")
if skipped_columns is not None:
if isinstance(skipped_columns, list):
j["skipped_columns"] = []
for colidx in skipped_columns:
if (colidx < 0): raise ValueError("skipped column index cannot be negative")
j["skipped_columns"].append(colidx)
# quote column names and column types also when not specified by user
if j["column_names"]: j["column_names"] = list(map(quoted, j["column_names"]))
j["column_types"] = list(map(quoted, j["column_types"]))
return j
|
def parse_setup(raw_frames, destination_frame=None, header=0, separator=None, column_names=None,
column_types=None, na_strings=None, skipped_columns=None, custom_non_data_line_markers=None):
"""
Retrieve H2O's best guess as to what the structure of the data file is.
During parse setup, the H2O cluster will make several guesses about the attributes of
the data. This method allows a user to perform corrective measures by updating the
returning dictionary from this method. This dictionary is then fed into `parse_raw` to
produce the H2OFrame instance.
:param raw_frames: a collection of imported file frames
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
automatically be generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param separator: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param column_names: A list of column names for the file. If skipped_columns are specified, only list column names
of columns that are not skipped.
:param column_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. If skipped_columns are specified, only list column types of columns that are not skipped.
The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a dictionary containing parse parameters guessed by the H2O backend.
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(raw_frames, str, [str])
assert_is_type(destination_frame, None, str)
assert_is_type(header, -1, 0, 1)
assert_is_type(separator, None, I(str, lambda s: len(s) == 1))
assert_is_type(column_names, [str], None)
assert_is_type(column_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
check_frame_id(destination_frame)
# The H2O backend only accepts things that are quoted
if is_type(raw_frames, str): raw_frames = [raw_frames]
# temporary dictionary just to pass the following information to the parser: header, separator
kwargs = {"check_header": header, "source_frames": [quoted(frame_id) for frame_id in raw_frames]}
if separator:
kwargs["separator"] = ord(separator)
if custom_non_data_line_markers is not None:
kwargs["custom_non_data_line_markers"] = custom_non_data_line_markers;
j = api("POST /3/ParseSetup", data=kwargs)
if "warnings" in j and j["warnings"]:
for w in j["warnings"]:
warnings.warn(w)
# TODO: really should be url encoding...
if destination_frame:
j["destination_frame"] = destination_frame
parse_column_len = len(j["column_types"]) if skipped_columns is None else (len(j["column_types"])-len(skipped_columns))
tempColumnNames = j["column_names"] if j["column_names"] is not None else gen_header(j["number_columns"])
useType = [True]*len(tempColumnNames)
if skipped_columns is not None:
useType = [True]*len(tempColumnNames)
for ind in range(len(tempColumnNames)):
if ind in skipped_columns:
useType[ind]=False
if column_names is not None:
if not isinstance(column_names, list): raise ValueError("col_names should be a list")
if (skipped_columns is not None) and len(skipped_columns)>0:
if (len(column_names)) != parse_column_len:
raise ValueError(
"length of col_names should be equal to the number of columns parsed: %d vs %d"
% (len(column_names), parse_column_len))
else:
if len(column_names) != len(j["column_types"]): raise ValueError(
"length of col_names should be equal to the number of columns: %d vs %d"
% (len(column_names), len(j["column_types"])))
j["column_names"] = column_names
counter = 0
for ind in range(len(tempColumnNames)):
if useType[ind]:
tempColumnNames[ind]=column_names[counter]
counter=counter+1
if (column_types is not None): # keep the column types to include all columns
if isinstance(column_types, dict):
# overwrite dictionary to ordered list of column types. if user didn't specify column type for all names,
# use type provided by backend
if j["column_names"] is None: # no colnames discovered! (C1, C2, ...)
j["column_names"] = gen_header(j["number_columns"])
if not set(column_types.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in col_types is not a subset of the column names")
idx = 0
column_types_list = []
for name in tempColumnNames: # column_names may have already been changed
if name in column_types:
column_types_list.append(column_types[name])
else:
column_types_list.append(j["column_types"][idx])
idx += 1
column_types = column_types_list
elif isinstance(column_types, list):
if len(column_types) != parse_column_len: raise ValueError(
"length of col_types should be equal to the number of parsed columns")
# need to expand it out to all columns, not just the parsed ones
column_types_list = j["column_types"]
counter = 0
for ind in range(len(j["column_types"])):
if useType[ind] and (column_types[counter]!=None):
column_types_list[ind]=column_types[counter]
counter=counter+1
column_types = column_types_list
else: # not dictionary or list
raise ValueError("col_types should be a list of types or a dictionary of column names to types")
j["column_types"] = column_types
if na_strings is not None:
if isinstance(na_strings, dict):
# overwrite dictionary to ordered list of lists of na_strings
if not j["column_names"]: raise ValueError("column names should be specified")
if not set(na_strings.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in na_strings is not a subset of the column names")
j["na_strings"] = [[] for _ in range(len(j["column_names"]))]
for name, na in na_strings.items():
idx = j["column_names"].index(name)
if is_type(na, str): na = [na]
for n in na: j["na_strings"][idx].append(quoted(n))
elif is_type(na_strings, [[str]]):
if len(na_strings) != len(j["column_types"]):
raise ValueError("length of na_strings should be equal to the number of columns")
j["na_strings"] = [[quoted(na) for na in col] if col is not None else [] for col in na_strings]
elif isinstance(na_strings, list):
j["na_strings"] = [[quoted(na) for na in na_strings]] * len(j["column_types"])
else: # not a dictionary or list
raise ValueError(
"na_strings should be a list, a list of lists (one list per column), or a dictionary of column "
"names to strings which are to be interpreted as missing values")
if skipped_columns is not None:
if isinstance(skipped_columns, list):
j["skipped_columns"] = []
for colidx in skipped_columns:
if (colidx < 0): raise ValueError("skipped column index cannot be negative")
j["skipped_columns"].append(colidx)
# quote column names and column types also when not specified by user
if j["column_names"]: j["column_names"] = list(map(quoted, j["column_names"]))
j["column_types"] = list(map(quoted, j["column_types"]))
return j
|
[
"Retrieve",
"H2O",
"s",
"best",
"guess",
"as",
"to",
"what",
"the",
"structure",
"of",
"the",
"data",
"file",
"is",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L560-L728
|
[
"def",
"parse_setup",
"(",
"raw_frames",
",",
"destination_frame",
"=",
"None",
",",
"header",
"=",
"0",
",",
"separator",
"=",
"None",
",",
"column_names",
"=",
"None",
",",
"column_types",
"=",
"None",
",",
"na_strings",
"=",
"None",
",",
"skipped_columns",
"=",
"None",
",",
"custom_non_data_line_markers",
"=",
"None",
")",
":",
"coltype",
"=",
"U",
"(",
"None",
",",
"\"unknown\"",
",",
"\"uuid\"",
",",
"\"string\"",
",",
"\"float\"",
",",
"\"real\"",
",",
"\"double\"",
",",
"\"int\"",
",",
"\"numeric\"",
",",
"\"categorical\"",
",",
"\"factor\"",
",",
"\"enum\"",
",",
"\"time\"",
")",
"natype",
"=",
"U",
"(",
"str",
",",
"[",
"str",
"]",
")",
"assert_is_type",
"(",
"raw_frames",
",",
"str",
",",
"[",
"str",
"]",
")",
"assert_is_type",
"(",
"destination_frame",
",",
"None",
",",
"str",
")",
"assert_is_type",
"(",
"header",
",",
"-",
"1",
",",
"0",
",",
"1",
")",
"assert_is_type",
"(",
"separator",
",",
"None",
",",
"I",
"(",
"str",
",",
"lambda",
"s",
":",
"len",
"(",
"s",
")",
"==",
"1",
")",
")",
"assert_is_type",
"(",
"column_names",
",",
"[",
"str",
"]",
",",
"None",
")",
"assert_is_type",
"(",
"column_types",
",",
"[",
"coltype",
"]",
",",
"{",
"str",
":",
"coltype",
"}",
",",
"None",
")",
"assert_is_type",
"(",
"na_strings",
",",
"[",
"natype",
"]",
",",
"{",
"str",
":",
"natype",
"}",
",",
"None",
")",
"check_frame_id",
"(",
"destination_frame",
")",
"# The H2O backend only accepts things that are quoted",
"if",
"is_type",
"(",
"raw_frames",
",",
"str",
")",
":",
"raw_frames",
"=",
"[",
"raw_frames",
"]",
"# temporary dictionary just to pass the following information to the parser: header, separator",
"kwargs",
"=",
"{",
"\"check_header\"",
":",
"header",
",",
"\"source_frames\"",
":",
"[",
"quoted",
"(",
"frame_id",
")",
"for",
"frame_id",
"in",
"raw_frames",
"]",
"}",
"if",
"separator",
":",
"kwargs",
"[",
"\"separator\"",
"]",
"=",
"ord",
"(",
"separator",
")",
"if",
"custom_non_data_line_markers",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"custom_non_data_line_markers\"",
"]",
"=",
"custom_non_data_line_markers",
"j",
"=",
"api",
"(",
"\"POST /3/ParseSetup\"",
",",
"data",
"=",
"kwargs",
")",
"if",
"\"warnings\"",
"in",
"j",
"and",
"j",
"[",
"\"warnings\"",
"]",
":",
"for",
"w",
"in",
"j",
"[",
"\"warnings\"",
"]",
":",
"warnings",
".",
"warn",
"(",
"w",
")",
"# TODO: really should be url encoding...",
"if",
"destination_frame",
":",
"j",
"[",
"\"destination_frame\"",
"]",
"=",
"destination_frame",
"parse_column_len",
"=",
"len",
"(",
"j",
"[",
"\"column_types\"",
"]",
")",
"if",
"skipped_columns",
"is",
"None",
"else",
"(",
"len",
"(",
"j",
"[",
"\"column_types\"",
"]",
")",
"-",
"len",
"(",
"skipped_columns",
")",
")",
"tempColumnNames",
"=",
"j",
"[",
"\"column_names\"",
"]",
"if",
"j",
"[",
"\"column_names\"",
"]",
"is",
"not",
"None",
"else",
"gen_header",
"(",
"j",
"[",
"\"number_columns\"",
"]",
")",
"useType",
"=",
"[",
"True",
"]",
"*",
"len",
"(",
"tempColumnNames",
")",
"if",
"skipped_columns",
"is",
"not",
"None",
":",
"useType",
"=",
"[",
"True",
"]",
"*",
"len",
"(",
"tempColumnNames",
")",
"for",
"ind",
"in",
"range",
"(",
"len",
"(",
"tempColumnNames",
")",
")",
":",
"if",
"ind",
"in",
"skipped_columns",
":",
"useType",
"[",
"ind",
"]",
"=",
"False",
"if",
"column_names",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"column_names",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"col_names should be a list\"",
")",
"if",
"(",
"skipped_columns",
"is",
"not",
"None",
")",
"and",
"len",
"(",
"skipped_columns",
")",
">",
"0",
":",
"if",
"(",
"len",
"(",
"column_names",
")",
")",
"!=",
"parse_column_len",
":",
"raise",
"ValueError",
"(",
"\"length of col_names should be equal to the number of columns parsed: %d vs %d\"",
"%",
"(",
"len",
"(",
"column_names",
")",
",",
"parse_column_len",
")",
")",
"else",
":",
"if",
"len",
"(",
"column_names",
")",
"!=",
"len",
"(",
"j",
"[",
"\"column_types\"",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"length of col_names should be equal to the number of columns: %d vs %d\"",
"%",
"(",
"len",
"(",
"column_names",
")",
",",
"len",
"(",
"j",
"[",
"\"column_types\"",
"]",
")",
")",
")",
"j",
"[",
"\"column_names\"",
"]",
"=",
"column_names",
"counter",
"=",
"0",
"for",
"ind",
"in",
"range",
"(",
"len",
"(",
"tempColumnNames",
")",
")",
":",
"if",
"useType",
"[",
"ind",
"]",
":",
"tempColumnNames",
"[",
"ind",
"]",
"=",
"column_names",
"[",
"counter",
"]",
"counter",
"=",
"counter",
"+",
"1",
"if",
"(",
"column_types",
"is",
"not",
"None",
")",
":",
"# keep the column types to include all columns",
"if",
"isinstance",
"(",
"column_types",
",",
"dict",
")",
":",
"# overwrite dictionary to ordered list of column types. if user didn't specify column type for all names,",
"# use type provided by backend",
"if",
"j",
"[",
"\"column_names\"",
"]",
"is",
"None",
":",
"# no colnames discovered! (C1, C2, ...)",
"j",
"[",
"\"column_names\"",
"]",
"=",
"gen_header",
"(",
"j",
"[",
"\"number_columns\"",
"]",
")",
"if",
"not",
"set",
"(",
"column_types",
".",
"keys",
"(",
")",
")",
".",
"issubset",
"(",
"set",
"(",
"j",
"[",
"\"column_names\"",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"names specified in col_types is not a subset of the column names\"",
")",
"idx",
"=",
"0",
"column_types_list",
"=",
"[",
"]",
"for",
"name",
"in",
"tempColumnNames",
":",
"# column_names may have already been changed",
"if",
"name",
"in",
"column_types",
":",
"column_types_list",
".",
"append",
"(",
"column_types",
"[",
"name",
"]",
")",
"else",
":",
"column_types_list",
".",
"append",
"(",
"j",
"[",
"\"column_types\"",
"]",
"[",
"idx",
"]",
")",
"idx",
"+=",
"1",
"column_types",
"=",
"column_types_list",
"elif",
"isinstance",
"(",
"column_types",
",",
"list",
")",
":",
"if",
"len",
"(",
"column_types",
")",
"!=",
"parse_column_len",
":",
"raise",
"ValueError",
"(",
"\"length of col_types should be equal to the number of parsed columns\"",
")",
"# need to expand it out to all columns, not just the parsed ones",
"column_types_list",
"=",
"j",
"[",
"\"column_types\"",
"]",
"counter",
"=",
"0",
"for",
"ind",
"in",
"range",
"(",
"len",
"(",
"j",
"[",
"\"column_types\"",
"]",
")",
")",
":",
"if",
"useType",
"[",
"ind",
"]",
"and",
"(",
"column_types",
"[",
"counter",
"]",
"!=",
"None",
")",
":",
"column_types_list",
"[",
"ind",
"]",
"=",
"column_types",
"[",
"counter",
"]",
"counter",
"=",
"counter",
"+",
"1",
"column_types",
"=",
"column_types_list",
"else",
":",
"# not dictionary or list",
"raise",
"ValueError",
"(",
"\"col_types should be a list of types or a dictionary of column names to types\"",
")",
"j",
"[",
"\"column_types\"",
"]",
"=",
"column_types",
"if",
"na_strings",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"na_strings",
",",
"dict",
")",
":",
"# overwrite dictionary to ordered list of lists of na_strings",
"if",
"not",
"j",
"[",
"\"column_names\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"column names should be specified\"",
")",
"if",
"not",
"set",
"(",
"na_strings",
".",
"keys",
"(",
")",
")",
".",
"issubset",
"(",
"set",
"(",
"j",
"[",
"\"column_names\"",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"names specified in na_strings is not a subset of the column names\"",
")",
"j",
"[",
"\"na_strings\"",
"]",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"j",
"[",
"\"column_names\"",
"]",
")",
")",
"]",
"for",
"name",
",",
"na",
"in",
"na_strings",
".",
"items",
"(",
")",
":",
"idx",
"=",
"j",
"[",
"\"column_names\"",
"]",
".",
"index",
"(",
"name",
")",
"if",
"is_type",
"(",
"na",
",",
"str",
")",
":",
"na",
"=",
"[",
"na",
"]",
"for",
"n",
"in",
"na",
":",
"j",
"[",
"\"na_strings\"",
"]",
"[",
"idx",
"]",
".",
"append",
"(",
"quoted",
"(",
"n",
")",
")",
"elif",
"is_type",
"(",
"na_strings",
",",
"[",
"[",
"str",
"]",
"]",
")",
":",
"if",
"len",
"(",
"na_strings",
")",
"!=",
"len",
"(",
"j",
"[",
"\"column_types\"",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"length of na_strings should be equal to the number of columns\"",
")",
"j",
"[",
"\"na_strings\"",
"]",
"=",
"[",
"[",
"quoted",
"(",
"na",
")",
"for",
"na",
"in",
"col",
"]",
"if",
"col",
"is",
"not",
"None",
"else",
"[",
"]",
"for",
"col",
"in",
"na_strings",
"]",
"elif",
"isinstance",
"(",
"na_strings",
",",
"list",
")",
":",
"j",
"[",
"\"na_strings\"",
"]",
"=",
"[",
"[",
"quoted",
"(",
"na",
")",
"for",
"na",
"in",
"na_strings",
"]",
"]",
"*",
"len",
"(",
"j",
"[",
"\"column_types\"",
"]",
")",
"else",
":",
"# not a dictionary or list",
"raise",
"ValueError",
"(",
"\"na_strings should be a list, a list of lists (one list per column), or a dictionary of column \"",
"\"names to strings which are to be interpreted as missing values\"",
")",
"if",
"skipped_columns",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"skipped_columns",
",",
"list",
")",
":",
"j",
"[",
"\"skipped_columns\"",
"]",
"=",
"[",
"]",
"for",
"colidx",
"in",
"skipped_columns",
":",
"if",
"(",
"colidx",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"skipped column index cannot be negative\"",
")",
"j",
"[",
"\"skipped_columns\"",
"]",
".",
"append",
"(",
"colidx",
")",
"# quote column names and column types also when not specified by user",
"if",
"j",
"[",
"\"column_names\"",
"]",
":",
"j",
"[",
"\"column_names\"",
"]",
"=",
"list",
"(",
"map",
"(",
"quoted",
",",
"j",
"[",
"\"column_names\"",
"]",
")",
")",
"j",
"[",
"\"column_types\"",
"]",
"=",
"list",
"(",
"map",
"(",
"quoted",
",",
"j",
"[",
"\"column_types\"",
"]",
")",
")",
"return",
"j"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
parse_raw
|
Parse dataset using the parse setup structure.
:param setup: Result of ``h2o.parse_setup()``
:param id: an id for the frame.
:param first_line_is_header: -1, 0, 1 if the first line is to be used as the header
:returns: an :class:`H2OFrame` object.
|
h2o-py/h2o/h2o.py
|
def parse_raw(setup, id=None, first_line_is_header=0):
"""
Parse dataset using the parse setup structure.
:param setup: Result of ``h2o.parse_setup()``
:param id: an id for the frame.
:param first_line_is_header: -1, 0, 1 if the first line is to be used as the header
:returns: an :class:`H2OFrame` object.
"""
assert_is_type(setup, dict)
assert_is_type(id, str, None)
assert_is_type(first_line_is_header, -1, 0, 1)
check_frame_id(id)
if id:
setup["destination_frame"] = id
if first_line_is_header != (-1, 0, 1):
if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1")
setup["check_header"] = first_line_is_header
fr = H2OFrame()
fr._parse_raw(setup)
return fr
|
def parse_raw(setup, id=None, first_line_is_header=0):
"""
Parse dataset using the parse setup structure.
:param setup: Result of ``h2o.parse_setup()``
:param id: an id for the frame.
:param first_line_is_header: -1, 0, 1 if the first line is to be used as the header
:returns: an :class:`H2OFrame` object.
"""
assert_is_type(setup, dict)
assert_is_type(id, str, None)
assert_is_type(first_line_is_header, -1, 0, 1)
check_frame_id(id)
if id:
setup["destination_frame"] = id
if first_line_is_header != (-1, 0, 1):
if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1")
setup["check_header"] = first_line_is_header
fr = H2OFrame()
fr._parse_raw(setup)
return fr
|
[
"Parse",
"dataset",
"using",
"the",
"parse",
"setup",
"structure",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L731-L752
|
[
"def",
"parse_raw",
"(",
"setup",
",",
"id",
"=",
"None",
",",
"first_line_is_header",
"=",
"0",
")",
":",
"assert_is_type",
"(",
"setup",
",",
"dict",
")",
"assert_is_type",
"(",
"id",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"first_line_is_header",
",",
"-",
"1",
",",
"0",
",",
"1",
")",
"check_frame_id",
"(",
"id",
")",
"if",
"id",
":",
"setup",
"[",
"\"destination_frame\"",
"]",
"=",
"id",
"if",
"first_line_is_header",
"!=",
"(",
"-",
"1",
",",
"0",
",",
"1",
")",
":",
"if",
"first_line_is_header",
"not",
"in",
"(",
"-",
"1",
",",
"0",
",",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"first_line_is_header should be -1, 0, or 1\"",
")",
"setup",
"[",
"\"check_header\"",
"]",
"=",
"first_line_is_header",
"fr",
"=",
"H2OFrame",
"(",
")",
"fr",
".",
"_parse_raw",
"(",
"setup",
")",
"return",
"fr"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
assign
|
(internal) Assign new id to the frame.
:param data: an H2OFrame whose id should be changed
:param xid: new id for the frame.
:returns: the passed frame.
|
h2o-py/h2o/h2o.py
|
def assign(data, xid):
"""
(internal) Assign new id to the frame.
:param data: an H2OFrame whose id should be changed
:param xid: new id for the frame.
:returns: the passed frame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
data._ex = ExprNode("assign", xid, data)._eval_driver(False)
data._ex._cache._id = xid
data._ex._children = None
return data
|
def assign(data, xid):
"""
(internal) Assign new id to the frame.
:param data: an H2OFrame whose id should be changed
:param xid: new id for the frame.
:returns: the passed frame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
data._ex = ExprNode("assign", xid, data)._eval_driver(False)
data._ex._cache._id = xid
data._ex._children = None
return data
|
[
"(",
"internal",
")",
"Assign",
"new",
"id",
"to",
"the",
"frame",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L755-L770
|
[
"def",
"assign",
"(",
"data",
",",
"xid",
")",
":",
"assert_is_type",
"(",
"data",
",",
"H2OFrame",
")",
"assert_is_type",
"(",
"xid",
",",
"str",
")",
"assert_satisfies",
"(",
"xid",
",",
"xid",
"!=",
"data",
".",
"frame_id",
")",
"check_frame_id",
"(",
"xid",
")",
"data",
".",
"_ex",
"=",
"ExprNode",
"(",
"\"assign\"",
",",
"xid",
",",
"data",
")",
".",
"_eval_driver",
"(",
"False",
")",
"data",
".",
"_ex",
".",
"_cache",
".",
"_id",
"=",
"xid",
"data",
".",
"_ex",
".",
"_children",
"=",
"None",
"return",
"data"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
deep_copy
|
Create a deep clone of the frame ``data``.
:param data: an H2OFrame to be cloned
:param xid: (internal) id to be assigned to the new frame.
:returns: new :class:`H2OFrame` which is the clone of the passed frame.
|
h2o-py/h2o/h2o.py
|
def deep_copy(data, xid):
"""
Create a deep clone of the frame ``data``.
:param data: an H2OFrame to be cloned
:param xid: (internal) id to be assigned to the new frame.
:returns: new :class:`H2OFrame` which is the clone of the passed frame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
duplicate = data.apply(lambda x: x)
duplicate._ex = ExprNode("assign", xid, duplicate)._eval_driver(False)
duplicate._ex._cache._id = xid
duplicate._ex._children = None
return duplicate
|
def deep_copy(data, xid):
"""
Create a deep clone of the frame ``data``.
:param data: an H2OFrame to be cloned
:param xid: (internal) id to be assigned to the new frame.
:returns: new :class:`H2OFrame` which is the clone of the passed frame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
duplicate = data.apply(lambda x: x)
duplicate._ex = ExprNode("assign", xid, duplicate)._eval_driver(False)
duplicate._ex._cache._id = xid
duplicate._ex._children = None
return duplicate
|
[
"Create",
"a",
"deep",
"clone",
"of",
"the",
"frame",
"data",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L773-L789
|
[
"def",
"deep_copy",
"(",
"data",
",",
"xid",
")",
":",
"assert_is_type",
"(",
"data",
",",
"H2OFrame",
")",
"assert_is_type",
"(",
"xid",
",",
"str",
")",
"assert_satisfies",
"(",
"xid",
",",
"xid",
"!=",
"data",
".",
"frame_id",
")",
"check_frame_id",
"(",
"xid",
")",
"duplicate",
"=",
"data",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
")",
"duplicate",
".",
"_ex",
"=",
"ExprNode",
"(",
"\"assign\"",
",",
"xid",
",",
"duplicate",
")",
".",
"_eval_driver",
"(",
"False",
")",
"duplicate",
".",
"_ex",
".",
"_cache",
".",
"_id",
"=",
"xid",
"duplicate",
".",
"_ex",
".",
"_children",
"=",
"None",
"return",
"duplicate"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
get_model
|
Load a model from the server.
:param model_id: The model identification in H2O
:returns: Model object, a subclass of H2OEstimator
|
h2o-py/h2o/h2o.py
|
def get_model(model_id):
"""
Load a model from the server.
:param model_id: The model identification in H2O
:returns: Model object, a subclass of H2OEstimator
"""
assert_is_type(model_id, str)
model_json = api("GET /3/Models/%s" % model_id)["models"][0]
algo = model_json["algo"]
if algo == "svd": m = H2OSVD()
elif algo == "pca": m = H2OPrincipalComponentAnalysisEstimator()
elif algo == "drf": m = H2ORandomForestEstimator()
elif algo == "naivebayes": m = H2ONaiveBayesEstimator()
elif algo == "kmeans": m = H2OKMeansEstimator()
elif algo == "glrm": m = H2OGeneralizedLowRankEstimator()
elif algo == "glm": m = H2OGeneralizedLinearEstimator()
elif algo == "gbm": m = H2OGradientBoostingEstimator()
elif algo == "deepwater": m = H2ODeepWaterEstimator()
elif algo == "xgboost": m = H2OXGBoostEstimator()
elif algo == "word2vec": m = H2OWord2vecEstimator()
elif algo == "generic": m = H2OGenericEstimator()
elif algo == "deeplearning":
if model_json["output"]["model_category"] == "AutoEncoder":
m = H2OAutoEncoderEstimator()
else:
m = H2ODeepLearningEstimator()
elif algo == "stackedensemble": m = H2OStackedEnsembleEstimator()
elif algo == "isolationforest": m = H2OIsolationForestEstimator()
else:
raise ValueError("Unknown algo type: " + algo)
m._resolve_model(model_id, model_json)
return m
|
def get_model(model_id):
"""
Load a model from the server.
:param model_id: The model identification in H2O
:returns: Model object, a subclass of H2OEstimator
"""
assert_is_type(model_id, str)
model_json = api("GET /3/Models/%s" % model_id)["models"][0]
algo = model_json["algo"]
if algo == "svd": m = H2OSVD()
elif algo == "pca": m = H2OPrincipalComponentAnalysisEstimator()
elif algo == "drf": m = H2ORandomForestEstimator()
elif algo == "naivebayes": m = H2ONaiveBayesEstimator()
elif algo == "kmeans": m = H2OKMeansEstimator()
elif algo == "glrm": m = H2OGeneralizedLowRankEstimator()
elif algo == "glm": m = H2OGeneralizedLinearEstimator()
elif algo == "gbm": m = H2OGradientBoostingEstimator()
elif algo == "deepwater": m = H2ODeepWaterEstimator()
elif algo == "xgboost": m = H2OXGBoostEstimator()
elif algo == "word2vec": m = H2OWord2vecEstimator()
elif algo == "generic": m = H2OGenericEstimator()
elif algo == "deeplearning":
if model_json["output"]["model_category"] == "AutoEncoder":
m = H2OAutoEncoderEstimator()
else:
m = H2ODeepLearningEstimator()
elif algo == "stackedensemble": m = H2OStackedEnsembleEstimator()
elif algo == "isolationforest": m = H2OIsolationForestEstimator()
else:
raise ValueError("Unknown algo type: " + algo)
m._resolve_model(model_id, model_json)
return m
|
[
"Load",
"a",
"model",
"from",
"the",
"server",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L792-L825
|
[
"def",
"get_model",
"(",
"model_id",
")",
":",
"assert_is_type",
"(",
"model_id",
",",
"str",
")",
"model_json",
"=",
"api",
"(",
"\"GET /3/Models/%s\"",
"%",
"model_id",
")",
"[",
"\"models\"",
"]",
"[",
"0",
"]",
"algo",
"=",
"model_json",
"[",
"\"algo\"",
"]",
"if",
"algo",
"==",
"\"svd\"",
":",
"m",
"=",
"H2OSVD",
"(",
")",
"elif",
"algo",
"==",
"\"pca\"",
":",
"m",
"=",
"H2OPrincipalComponentAnalysisEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"drf\"",
":",
"m",
"=",
"H2ORandomForestEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"naivebayes\"",
":",
"m",
"=",
"H2ONaiveBayesEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"kmeans\"",
":",
"m",
"=",
"H2OKMeansEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"glrm\"",
":",
"m",
"=",
"H2OGeneralizedLowRankEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"glm\"",
":",
"m",
"=",
"H2OGeneralizedLinearEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"gbm\"",
":",
"m",
"=",
"H2OGradientBoostingEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"deepwater\"",
":",
"m",
"=",
"H2ODeepWaterEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"xgboost\"",
":",
"m",
"=",
"H2OXGBoostEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"word2vec\"",
":",
"m",
"=",
"H2OWord2vecEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"generic\"",
":",
"m",
"=",
"H2OGenericEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"deeplearning\"",
":",
"if",
"model_json",
"[",
"\"output\"",
"]",
"[",
"\"model_category\"",
"]",
"==",
"\"AutoEncoder\"",
":",
"m",
"=",
"H2OAutoEncoderEstimator",
"(",
")",
"else",
":",
"m",
"=",
"H2ODeepLearningEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"stackedensemble\"",
":",
"m",
"=",
"H2OStackedEnsembleEstimator",
"(",
")",
"elif",
"algo",
"==",
"\"isolationforest\"",
":",
"m",
"=",
"H2OIsolationForestEstimator",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown algo type: \"",
"+",
"algo",
")",
"m",
".",
"_resolve_model",
"(",
"model_id",
",",
"model_json",
")",
"return",
"m"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
get_grid
|
Return the specified grid.
:param grid_id: The grid identification in h2o
:returns: an :class:`H2OGridSearch` instance.
|
h2o-py/h2o/h2o.py
|
def get_grid(grid_id):
"""
Return the specified grid.
:param grid_id: The grid identification in h2o
:returns: an :class:`H2OGridSearch` instance.
"""
assert_is_type(grid_id, str)
grid_json = api("GET /99/Grids/%s" % grid_id)
models = [get_model(key["name"]) for key in grid_json["model_ids"]]
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
first_model_json = api("GET /3/Models/%s" % grid_json["model_ids"][0]["name"])["models"][0]
gs = H2OGridSearch(None, {}, grid_id)
gs._resolve_grid(grid_id, grid_json, first_model_json)
gs.models = models
hyper_params = {param: set() for param in gs.hyper_names}
for param in gs.hyper_names:
for model in models:
if isinstance(model.full_parameters[param]["actual_value"], list):
hyper_params[param].add(model.full_parameters[param]["actual_value"][0])
else:
hyper_params[param].add(model.full_parameters[param]["actual_value"])
hyper_params = {str(param): list(vals) for param, vals in hyper_params.items()}
gs.hyper_params = hyper_params
gs.model = model.__class__()
return gs
|
def get_grid(grid_id):
"""
Return the specified grid.
:param grid_id: The grid identification in h2o
:returns: an :class:`H2OGridSearch` instance.
"""
assert_is_type(grid_id, str)
grid_json = api("GET /99/Grids/%s" % grid_id)
models = [get_model(key["name"]) for key in grid_json["model_ids"]]
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
first_model_json = api("GET /3/Models/%s" % grid_json["model_ids"][0]["name"])["models"][0]
gs = H2OGridSearch(None, {}, grid_id)
gs._resolve_grid(grid_id, grid_json, first_model_json)
gs.models = models
hyper_params = {param: set() for param in gs.hyper_names}
for param in gs.hyper_names:
for model in models:
if isinstance(model.full_parameters[param]["actual_value"], list):
hyper_params[param].add(model.full_parameters[param]["actual_value"][0])
else:
hyper_params[param].add(model.full_parameters[param]["actual_value"])
hyper_params = {str(param): list(vals) for param, vals in hyper_params.items()}
gs.hyper_params = hyper_params
gs.model = model.__class__()
return gs
|
[
"Return",
"the",
"specified",
"grid",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L828-L855
|
[
"def",
"get_grid",
"(",
"grid_id",
")",
":",
"assert_is_type",
"(",
"grid_id",
",",
"str",
")",
"grid_json",
"=",
"api",
"(",
"\"GET /99/Grids/%s\"",
"%",
"grid_id",
")",
"models",
"=",
"[",
"get_model",
"(",
"key",
"[",
"\"name\"",
"]",
")",
"for",
"key",
"in",
"grid_json",
"[",
"\"model_ids\"",
"]",
"]",
"# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)",
"first_model_json",
"=",
"api",
"(",
"\"GET /3/Models/%s\"",
"%",
"grid_json",
"[",
"\"model_ids\"",
"]",
"[",
"0",
"]",
"[",
"\"name\"",
"]",
")",
"[",
"\"models\"",
"]",
"[",
"0",
"]",
"gs",
"=",
"H2OGridSearch",
"(",
"None",
",",
"{",
"}",
",",
"grid_id",
")",
"gs",
".",
"_resolve_grid",
"(",
"grid_id",
",",
"grid_json",
",",
"first_model_json",
")",
"gs",
".",
"models",
"=",
"models",
"hyper_params",
"=",
"{",
"param",
":",
"set",
"(",
")",
"for",
"param",
"in",
"gs",
".",
"hyper_names",
"}",
"for",
"param",
"in",
"gs",
".",
"hyper_names",
":",
"for",
"model",
"in",
"models",
":",
"if",
"isinstance",
"(",
"model",
".",
"full_parameters",
"[",
"param",
"]",
"[",
"\"actual_value\"",
"]",
",",
"list",
")",
":",
"hyper_params",
"[",
"param",
"]",
".",
"add",
"(",
"model",
".",
"full_parameters",
"[",
"param",
"]",
"[",
"\"actual_value\"",
"]",
"[",
"0",
"]",
")",
"else",
":",
"hyper_params",
"[",
"param",
"]",
".",
"add",
"(",
"model",
".",
"full_parameters",
"[",
"param",
"]",
"[",
"\"actual_value\"",
"]",
")",
"hyper_params",
"=",
"{",
"str",
"(",
"param",
")",
":",
"list",
"(",
"vals",
")",
"for",
"param",
",",
"vals",
"in",
"hyper_params",
".",
"items",
"(",
")",
"}",
"gs",
".",
"hyper_params",
"=",
"hyper_params",
"gs",
".",
"model",
"=",
"model",
".",
"__class__",
"(",
")",
"return",
"gs"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
get_frame
|
Obtain a handle to the frame in H2O with the frame_id key.
:param str frame_id: id of the frame to retrieve.
:returns: an :class:`H2OFrame` object
|
h2o-py/h2o/h2o.py
|
def get_frame(frame_id, **kwargs):
"""
Obtain a handle to the frame in H2O with the frame_id key.
:param str frame_id: id of the frame to retrieve.
:returns: an :class:`H2OFrame` object
"""
assert_is_type(frame_id, str)
return H2OFrame.get_frame(frame_id, **kwargs)
|
def get_frame(frame_id, **kwargs):
"""
Obtain a handle to the frame in H2O with the frame_id key.
:param str frame_id: id of the frame to retrieve.
:returns: an :class:`H2OFrame` object
"""
assert_is_type(frame_id, str)
return H2OFrame.get_frame(frame_id, **kwargs)
|
[
"Obtain",
"a",
"handle",
"to",
"the",
"frame",
"in",
"H2O",
"with",
"the",
"frame_id",
"key",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L858-L866
|
[
"def",
"get_frame",
"(",
"frame_id",
",",
"*",
"*",
"kwargs",
")",
":",
"assert_is_type",
"(",
"frame_id",
",",
"str",
")",
"return",
"H2OFrame",
".",
"get_frame",
"(",
"frame_id",
",",
"*",
"*",
"kwargs",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
remove
|
Remove object(s) from H2O.
:param x: H2OFrame, H2OEstimator, or string, or a list of those things: the object(s) or unique id(s)
pointing to the object(s) to be removed.
|
h2o-py/h2o/h2o.py
|
def remove(x):
"""
Remove object(s) from H2O.
:param x: H2OFrame, H2OEstimator, or string, or a list of those things: the object(s) or unique id(s)
pointing to the object(s) to be removed.
"""
item_type = U(str, H2OFrame, H2OEstimator)
assert_is_type(x, item_type, [item_type])
if not isinstance(x, list): x = [x]
for xi in x:
if isinstance(xi, H2OFrame):
xi_id = xi._ex._cache._id # String or None
if xi_id is None: return # Lazy frame, never evaluated, nothing in cluster
rapids("(rm {})".format(xi_id))
xi._ex = None
elif isinstance(xi, H2OEstimator):
api("DELETE /3/DKV/%s" % xi.model_id)
xi._id = None
else:
# string may be a Frame key name part of a rapids session... need to call rm thru rapids here
try:
rapids("(rm {})".format(xi))
except:
api("DELETE /3/DKV/%s" % xi)
|
def remove(x):
"""
Remove object(s) from H2O.
:param x: H2OFrame, H2OEstimator, or string, or a list of those things: the object(s) or unique id(s)
pointing to the object(s) to be removed.
"""
item_type = U(str, H2OFrame, H2OEstimator)
assert_is_type(x, item_type, [item_type])
if not isinstance(x, list): x = [x]
for xi in x:
if isinstance(xi, H2OFrame):
xi_id = xi._ex._cache._id # String or None
if xi_id is None: return # Lazy frame, never evaluated, nothing in cluster
rapids("(rm {})".format(xi_id))
xi._ex = None
elif isinstance(xi, H2OEstimator):
api("DELETE /3/DKV/%s" % xi.model_id)
xi._id = None
else:
# string may be a Frame key name part of a rapids session... need to call rm thru rapids here
try:
rapids("(rm {})".format(xi))
except:
api("DELETE /3/DKV/%s" % xi)
|
[
"Remove",
"object",
"(",
"s",
")",
"from",
"H2O",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L908-L932
|
[
"def",
"remove",
"(",
"x",
")",
":",
"item_type",
"=",
"U",
"(",
"str",
",",
"H2OFrame",
",",
"H2OEstimator",
")",
"assert_is_type",
"(",
"x",
",",
"item_type",
",",
"[",
"item_type",
"]",
")",
"if",
"not",
"isinstance",
"(",
"x",
",",
"list",
")",
":",
"x",
"=",
"[",
"x",
"]",
"for",
"xi",
"in",
"x",
":",
"if",
"isinstance",
"(",
"xi",
",",
"H2OFrame",
")",
":",
"xi_id",
"=",
"xi",
".",
"_ex",
".",
"_cache",
".",
"_id",
"# String or None",
"if",
"xi_id",
"is",
"None",
":",
"return",
"# Lazy frame, never evaluated, nothing in cluster",
"rapids",
"(",
"\"(rm {})\"",
".",
"format",
"(",
"xi_id",
")",
")",
"xi",
".",
"_ex",
"=",
"None",
"elif",
"isinstance",
"(",
"xi",
",",
"H2OEstimator",
")",
":",
"api",
"(",
"\"DELETE /3/DKV/%s\"",
"%",
"xi",
".",
"model_id",
")",
"xi",
".",
"_id",
"=",
"None",
"else",
":",
"# string may be a Frame key name part of a rapids session... need to call rm thru rapids here",
"try",
":",
"rapids",
"(",
"\"(rm {})\"",
".",
"format",
"(",
"xi",
")",
")",
"except",
":",
"api",
"(",
"\"DELETE /3/DKV/%s\"",
"%",
"xi",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
download_pojo
|
Download the POJO for this model to the directory specified by path; if path is "", then dump to screen.
:param model: the model whose scoring POJO should be retrieved.
:param path: an absolute path to the directory where POJO should be saved.
:param get_jar: retrieve the h2o-genmodel.jar also (will be saved to the same folder ``path``).
:param jar_name: Custom name of genmodel jar.
:returns: location of the downloaded POJO file.
|
h2o-py/h2o/h2o.py
|
def download_pojo(model, path="", get_jar=True, jar_name=""):
"""
Download the POJO for this model to the directory specified by path; if path is "", then dump to screen.
:param model: the model whose scoring POJO should be retrieved.
:param path: an absolute path to the directory where POJO should be saved.
:param get_jar: retrieve the h2o-genmodel.jar also (will be saved to the same folder ``path``).
:param jar_name: Custom name of genmodel jar.
:returns: location of the downloaded POJO file.
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(get_jar, bool)
if not model.have_pojo:
raise H2OValueError("Export to POJO not supported")
if path == "":
java_code = api("GET /3/Models.java/%s" % model.model_id)
print(java_code)
return None
else:
filename = api("GET /3/Models.java/%s" % model.model_id, save_to=path)
if get_jar:
if jar_name == "":
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar"))
else:
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, jar_name))
return filename
|
def download_pojo(model, path="", get_jar=True, jar_name=""):
"""
Download the POJO for this model to the directory specified by path; if path is "", then dump to screen.
:param model: the model whose scoring POJO should be retrieved.
:param path: an absolute path to the directory where POJO should be saved.
:param get_jar: retrieve the h2o-genmodel.jar also (will be saved to the same folder ``path``).
:param jar_name: Custom name of genmodel jar.
:returns: location of the downloaded POJO file.
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(get_jar, bool)
if not model.have_pojo:
raise H2OValueError("Export to POJO not supported")
if path == "":
java_code = api("GET /3/Models.java/%s" % model.model_id)
print(java_code)
return None
else:
filename = api("GET /3/Models.java/%s" % model.model_id, save_to=path)
if get_jar:
if jar_name == "":
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar"))
else:
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, jar_name))
return filename
|
[
"Download",
"the",
"POJO",
"for",
"this",
"model",
"to",
"the",
"directory",
"specified",
"by",
"path",
";",
"if",
"path",
"is",
"then",
"dump",
"to",
"screen",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L978-L1006
|
[
"def",
"download_pojo",
"(",
"model",
",",
"path",
"=",
"\"\"",
",",
"get_jar",
"=",
"True",
",",
"jar_name",
"=",
"\"\"",
")",
":",
"assert_is_type",
"(",
"model",
",",
"ModelBase",
")",
"assert_is_type",
"(",
"path",
",",
"str",
")",
"assert_is_type",
"(",
"get_jar",
",",
"bool",
")",
"if",
"not",
"model",
".",
"have_pojo",
":",
"raise",
"H2OValueError",
"(",
"\"Export to POJO not supported\"",
")",
"if",
"path",
"==",
"\"\"",
":",
"java_code",
"=",
"api",
"(",
"\"GET /3/Models.java/%s\"",
"%",
"model",
".",
"model_id",
")",
"print",
"(",
"java_code",
")",
"return",
"None",
"else",
":",
"filename",
"=",
"api",
"(",
"\"GET /3/Models.java/%s\"",
"%",
"model",
".",
"model_id",
",",
"save_to",
"=",
"path",
")",
"if",
"get_jar",
":",
"if",
"jar_name",
"==",
"\"\"",
":",
"api",
"(",
"\"GET /3/h2o-genmodel.jar\"",
",",
"save_to",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"h2o-genmodel.jar\"",
")",
")",
"else",
":",
"api",
"(",
"\"GET /3/h2o-genmodel.jar\"",
",",
"save_to",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"jar_name",
")",
")",
"return",
"filename"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
download_csv
|
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
|
h2o-py/h2o/h2o.py
|
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
"""
assert_is_type(data, H2OFrame)
assert_is_type(filename, str)
url = h2oconn.make_url("DownloadDataset", 3) + "?frame_id={}&hex_string=false".format(data.frame_id)
with open(filename, "wb") as f:
f.write(urlopen()(url).read())
|
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
"""
assert_is_type(data, H2OFrame)
assert_is_type(filename, str)
url = h2oconn.make_url("DownloadDataset", 3) + "?frame_id={}&hex_string=false".format(data.frame_id)
with open(filename, "wb") as f:
f.write(urlopen()(url).read())
|
[
"Download",
"an",
"H2O",
"data",
"set",
"to",
"a",
"CSV",
"file",
"on",
"the",
"local",
"disk",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1009-L1023
|
[
"def",
"download_csv",
"(",
"data",
",",
"filename",
")",
":",
"assert_is_type",
"(",
"data",
",",
"H2OFrame",
")",
"assert_is_type",
"(",
"filename",
",",
"str",
")",
"url",
"=",
"h2oconn",
".",
"make_url",
"(",
"\"DownloadDataset\"",
",",
"3",
")",
"+",
"\"?frame_id={}&hex_string=false\"",
".",
"format",
"(",
"data",
".",
"frame_id",
")",
"with",
"open",
"(",
"filename",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"urlopen",
"(",
")",
"(",
"url",
")",
".",
"read",
"(",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
download_all_logs
|
Download H2O log files to disk.
:param dirname: a character string indicating the directory that the log file should be saved in.
:param filename: a string indicating the name that the CSV file should be. Note that the saved format is .zip, so the file name must include the .zip extension.
:returns: path of logs written in a zip file.
:examples: The following code will save the zip file `'autoh2o_log.zip'` in a directory that is one down from where you are currently working into a directory called `your_directory_name`. (Please note that `your_directory_name` should be replaced with the name of the directory that you've created and that already exists.)
>>> h2o.download_all_logs(dirname='./your_directory_name/', filename = 'autoh2o_log.zip')
|
h2o-py/h2o/h2o.py
|
def download_all_logs(dirname=".", filename=None):
"""
Download H2O log files to disk.
:param dirname: a character string indicating the directory that the log file should be saved in.
:param filename: a string indicating the name that the CSV file should be. Note that the saved format is .zip, so the file name must include the .zip extension.
:returns: path of logs written in a zip file.
:examples: The following code will save the zip file `'autoh2o_log.zip'` in a directory that is one down from where you are currently working into a directory called `your_directory_name`. (Please note that `your_directory_name` should be replaced with the name of the directory that you've created and that already exists.)
>>> h2o.download_all_logs(dirname='./your_directory_name/', filename = 'autoh2o_log.zip')
"""
assert_is_type(dirname, str)
assert_is_type(filename, str, None)
url = "%s/3/Logs/download" % h2oconn.base_url
opener = urlopen()
response = opener(url)
if not os.path.exists(dirname): os.mkdir(dirname)
if filename is None:
if PY3:
headers = [h[1] for h in response.headers._headers]
else:
headers = response.headers.headers
for h in headers:
if "filename=" in h:
filename = h.split("filename=")[1].strip()
break
path = os.path.join(dirname, filename)
response = opener(url).read()
print("Writing H2O logs to " + path)
with open(path, "wb") as f:
f.write(response)
return path
|
def download_all_logs(dirname=".", filename=None):
"""
Download H2O log files to disk.
:param dirname: a character string indicating the directory that the log file should be saved in.
:param filename: a string indicating the name that the CSV file should be. Note that the saved format is .zip, so the file name must include the .zip extension.
:returns: path of logs written in a zip file.
:examples: The following code will save the zip file `'autoh2o_log.zip'` in a directory that is one down from where you are currently working into a directory called `your_directory_name`. (Please note that `your_directory_name` should be replaced with the name of the directory that you've created and that already exists.)
>>> h2o.download_all_logs(dirname='./your_directory_name/', filename = 'autoh2o_log.zip')
"""
assert_is_type(dirname, str)
assert_is_type(filename, str, None)
url = "%s/3/Logs/download" % h2oconn.base_url
opener = urlopen()
response = opener(url)
if not os.path.exists(dirname): os.mkdir(dirname)
if filename is None:
if PY3:
headers = [h[1] for h in response.headers._headers]
else:
headers = response.headers.headers
for h in headers:
if "filename=" in h:
filename = h.split("filename=")[1].strip()
break
path = os.path.join(dirname, filename)
response = opener(url).read()
print("Writing H2O logs to " + path)
with open(path, "wb") as f:
f.write(response)
return path
|
[
"Download",
"H2O",
"log",
"files",
"to",
"disk",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1026-L1062
|
[
"def",
"download_all_logs",
"(",
"dirname",
"=",
"\".\"",
",",
"filename",
"=",
"None",
")",
":",
"assert_is_type",
"(",
"dirname",
",",
"str",
")",
"assert_is_type",
"(",
"filename",
",",
"str",
",",
"None",
")",
"url",
"=",
"\"%s/3/Logs/download\"",
"%",
"h2oconn",
".",
"base_url",
"opener",
"=",
"urlopen",
"(",
")",
"response",
"=",
"opener",
"(",
"url",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirname",
")",
":",
"os",
".",
"mkdir",
"(",
"dirname",
")",
"if",
"filename",
"is",
"None",
":",
"if",
"PY3",
":",
"headers",
"=",
"[",
"h",
"[",
"1",
"]",
"for",
"h",
"in",
"response",
".",
"headers",
".",
"_headers",
"]",
"else",
":",
"headers",
"=",
"response",
".",
"headers",
".",
"headers",
"for",
"h",
"in",
"headers",
":",
"if",
"\"filename=\"",
"in",
"h",
":",
"filename",
"=",
"h",
".",
"split",
"(",
"\"filename=\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"break",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"filename",
")",
"response",
"=",
"opener",
"(",
"url",
")",
".",
"read",
"(",
")",
"print",
"(",
"\"Writing H2O logs to \"",
"+",
"path",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"response",
")",
"return",
"path"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
save_model
|
Save an H2O Model object to disk. (Note that ensemble binary models can now be saved using this method.)
:param model: The model object to save.
:param path: a path to save the model at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:returns: the path of the saved model
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
|
h2o-py/h2o/h2o.py
|
def save_model(model, path="", force=False):
"""
Save an H2O Model object to disk. (Note that ensemble binary models can now be saved using this method.)
:param model: The model object to save.
:param path: a path to save the model at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:returns: the path of the saved model
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(force, bool)
path = os.path.join(os.getcwd() if path == "" else path, model.model_id)
return api("GET /99/Models.bin/%s" % model.model_id, data={"dir": path, "force": force})["dir"]
|
def save_model(model, path="", force=False):
"""
Save an H2O Model object to disk. (Note that ensemble binary models can now be saved using this method.)
:param model: The model object to save.
:param path: a path to save the model at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:returns: the path of the saved model
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(force, bool)
path = os.path.join(os.getcwd() if path == "" else path, model.model_id)
return api("GET /99/Models.bin/%s" % model.model_id, data={"dir": path, "force": force})["dir"]
|
[
"Save",
"an",
"H2O",
"Model",
"object",
"to",
"disk",
".",
"(",
"Note",
"that",
"ensemble",
"binary",
"models",
"can",
"now",
"be",
"saved",
"using",
"this",
"method",
".",
")"
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1065-L1082
|
[
"def",
"save_model",
"(",
"model",
",",
"path",
"=",
"\"\"",
",",
"force",
"=",
"False",
")",
":",
"assert_is_type",
"(",
"model",
",",
"ModelBase",
")",
"assert_is_type",
"(",
"path",
",",
"str",
")",
"assert_is_type",
"(",
"force",
",",
"bool",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
"if",
"path",
"==",
"\"\"",
"else",
"path",
",",
"model",
".",
"model_id",
")",
"return",
"api",
"(",
"\"GET /99/Models.bin/%s\"",
"%",
"model",
".",
"model_id",
",",
"data",
"=",
"{",
"\"dir\"",
":",
"path",
",",
"\"force\"",
":",
"force",
"}",
")",
"[",
"\"dir\"",
"]"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
load_model
|
Load a saved H2O model from disk. (Note that ensemble binary models can now be loaded using this method.)
:param path: the full path of the H2O Model to be imported.
:returns: an :class:`H2OEstimator` object
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
>>> h2o.load_model(path)
|
h2o-py/h2o/h2o.py
|
def load_model(path):
"""
Load a saved H2O model from disk. (Note that ensemble binary models can now be loaded using this method.)
:param path: the full path of the H2O Model to be imported.
:returns: an :class:`H2OEstimator` object
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
>>> h2o.load_model(path)
"""
assert_is_type(path, str)
res = api("POST /99/Models.bin/%s" % "", data={"dir": path})
return get_model(res["models"][0]["model_id"]["name"])
|
def load_model(path):
"""
Load a saved H2O model from disk. (Note that ensemble binary models can now be loaded using this method.)
:param path: the full path of the H2O Model to be imported.
:returns: an :class:`H2OEstimator` object
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
>>> h2o.load_model(path)
"""
assert_is_type(path, str)
res = api("POST /99/Models.bin/%s" % "", data={"dir": path})
return get_model(res["models"][0]["model_id"]["name"])
|
[
"Load",
"a",
"saved",
"H2O",
"model",
"from",
"disk",
".",
"(",
"Note",
"that",
"ensemble",
"binary",
"models",
"can",
"now",
"be",
"loaded",
"using",
"this",
"method",
".",
")"
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1085-L1099
|
[
"def",
"load_model",
"(",
"path",
")",
":",
"assert_is_type",
"(",
"path",
",",
"str",
")",
"res",
"=",
"api",
"(",
"\"POST /99/Models.bin/%s\"",
"%",
"\"\"",
",",
"data",
"=",
"{",
"\"dir\"",
":",
"path",
"}",
")",
"return",
"get_model",
"(",
"res",
"[",
"\"models\"",
"]",
"[",
"0",
"]",
"[",
"\"model_id\"",
"]",
"[",
"\"name\"",
"]",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
export_file
|
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param frame: the Frame to save to disk.
:param path: the path to the save point on disk.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
|
h2o-py/h2o/h2o.py
|
def export_file(frame, path, force=False, parts=1):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param frame: the Frame to save to disk.
:param path: the path to the save point on disk.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
"""
assert_is_type(frame, H2OFrame)
assert_is_type(path, str)
assert_is_type(force, bool)
assert_is_type(parts, int)
H2OJob(api("POST /3/Frames/%s/export" % (frame.frame_id), data={"path": path, "num_parts": parts, "force": force}),
"Export File").poll()
|
def export_file(frame, path, force=False, parts=1):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param frame: the Frame to save to disk.
:param path: the path to the save point on disk.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
"""
assert_is_type(frame, H2OFrame)
assert_is_type(path, str)
assert_is_type(force, bool)
assert_is_type(parts, int)
H2OJob(api("POST /3/Frames/%s/export" % (frame.frame_id), data={"path": path, "num_parts": parts, "force": force}),
"Export File").poll()
|
[
"Export",
"a",
"given",
"H2OFrame",
"to",
"a",
"path",
"on",
"the",
"machine",
"this",
"python",
"session",
"is",
"currently",
"connected",
"to",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1102-L1121
|
[
"def",
"export_file",
"(",
"frame",
",",
"path",
",",
"force",
"=",
"False",
",",
"parts",
"=",
"1",
")",
":",
"assert_is_type",
"(",
"frame",
",",
"H2OFrame",
")",
"assert_is_type",
"(",
"path",
",",
"str",
")",
"assert_is_type",
"(",
"force",
",",
"bool",
")",
"assert_is_type",
"(",
"parts",
",",
"int",
")",
"H2OJob",
"(",
"api",
"(",
"\"POST /3/Frames/%s/export\"",
"%",
"(",
"frame",
".",
"frame_id",
")",
",",
"data",
"=",
"{",
"\"path\"",
":",
"path",
",",
"\"num_parts\"",
":",
"parts",
",",
"\"force\"",
":",
"force",
"}",
")",
",",
"\"Export File\"",
")",
".",
"poll",
"(",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
create_frame
|
Create a new frame with random data.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
:param frame_id: the destination key. If empty, this will be auto-generated.
:param rows: the number of rows of data to generate.
:param cols: the number of columns of data to generate. Excludes the response column if has_response is True.
:param randomize: If True, data values will be randomly generated. This must be True if either
categorical_fraction or integer_fraction is non-zero.
:param value: if randomize is False, then all real-valued entries will be set to this value.
:param real_range: the range of randomly generated real values.
:param real_fraction: the fraction of columns that are real-valued.
:param categorical_fraction: the fraction of total columns that are categorical.
:param factors: the number of (unique) factor levels in each categorical column.
:param integer_fraction: the fraction of total columns that are integer-valued.
:param integer_range: the range of randomly generated integer values.
:param binary_fraction: the fraction of total columns that are binary-valued.
:param binary_ones_fraction: the fraction of values in a binary column that are set to 1.
:param time_fraction: the fraction of randomly created date/time columns.
:param string_fraction: the fraction of randomly created string columns.
:param missing_fraction: the fraction of total entries in the data frame that are set to NA.
:param has_response: A logical value indicating whether an additional response column should be prepended to the
final H2O data frame. If set to True, the total number of columns will be ``cols + 1``.
:param response_factors: if has_response is True, then this variable controls the type of the "response" column:
setting response_factors to 1 will generate real-valued response, any value greater or equal than 2 will
create categorical response with that many categories.
:param positive_reponse: when response variable is present and of real type, this will control whether it
contains positive values only, or both positive and negative.
:param seed: a seed used to generate random values when ``randomize`` is True.
:param seed_for_column_types: a seed used to generate random column types when ``randomize`` is True.
:returns: an :class:`H2OFrame` object
|
h2o-py/h2o/h2o.py
|
def create_frame(frame_id=None, rows=10000, cols=10, randomize=True,
real_fraction=None, categorical_fraction=None, integer_fraction=None,
binary_fraction=None, time_fraction=None, string_fraction=None,
value=0, real_range=100, factors=100, integer_range=100,
binary_ones_fraction=0.02, missing_fraction=0.01,
has_response=False, response_factors=2, positive_response=False,
seed=None, seed_for_column_types=None):
"""
Create a new frame with random data.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
:param frame_id: the destination key. If empty, this will be auto-generated.
:param rows: the number of rows of data to generate.
:param cols: the number of columns of data to generate. Excludes the response column if has_response is True.
:param randomize: If True, data values will be randomly generated. This must be True if either
categorical_fraction or integer_fraction is non-zero.
:param value: if randomize is False, then all real-valued entries will be set to this value.
:param real_range: the range of randomly generated real values.
:param real_fraction: the fraction of columns that are real-valued.
:param categorical_fraction: the fraction of total columns that are categorical.
:param factors: the number of (unique) factor levels in each categorical column.
:param integer_fraction: the fraction of total columns that are integer-valued.
:param integer_range: the range of randomly generated integer values.
:param binary_fraction: the fraction of total columns that are binary-valued.
:param binary_ones_fraction: the fraction of values in a binary column that are set to 1.
:param time_fraction: the fraction of randomly created date/time columns.
:param string_fraction: the fraction of randomly created string columns.
:param missing_fraction: the fraction of total entries in the data frame that are set to NA.
:param has_response: A logical value indicating whether an additional response column should be prepended to the
final H2O data frame. If set to True, the total number of columns will be ``cols + 1``.
:param response_factors: if has_response is True, then this variable controls the type of the "response" column:
setting response_factors to 1 will generate real-valued response, any value greater or equal than 2 will
create categorical response with that many categories.
:param positive_reponse: when response variable is present and of real type, this will control whether it
contains positive values only, or both positive and negative.
:param seed: a seed used to generate random values when ``randomize`` is True.
:param seed_for_column_types: a seed used to generate random column types when ``randomize`` is True.
:returns: an :class:`H2OFrame` object
"""
t_fraction = U(None, BoundNumeric(0, 1))
assert_is_type(frame_id, str, None)
assert_is_type(rows, BoundInt(1))
assert_is_type(cols, BoundInt(1))
assert_is_type(randomize, bool)
assert_is_type(value, numeric)
assert_is_type(real_range, BoundNumeric(0))
assert_is_type(real_fraction, t_fraction)
assert_is_type(categorical_fraction, t_fraction)
assert_is_type(integer_fraction, t_fraction)
assert_is_type(binary_fraction, t_fraction)
assert_is_type(time_fraction, t_fraction)
assert_is_type(string_fraction, t_fraction)
assert_is_type(missing_fraction, t_fraction)
assert_is_type(binary_ones_fraction, t_fraction)
assert_is_type(factors, BoundInt(1))
assert_is_type(integer_range, BoundInt(1))
assert_is_type(has_response, bool)
assert_is_type(response_factors, None, BoundInt(1))
assert_is_type(positive_response, bool)
assert_is_type(seed, int, None)
assert_is_type(seed_for_column_types, int, None)
check_frame_id(frame_id)
if randomize and value:
raise H2OValueError("Cannot set data to a `value` if `randomize` is true")
if (categorical_fraction or integer_fraction) and not randomize:
raise H2OValueError("`randomize` should be True when either categorical or integer columns are used.")
# The total column fraction that the user has specified explicitly. This sum should not exceed 1. We will respect
# all explicitly set fractions, and will auto-select the remaining fractions.
frcs = [real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction]
wgts = [0.5, 0.2, 0.2, 0.1, 0.0, 0.0]
sum_explicit_fractions = sum(0 if f is None else f for f in frcs)
count_explicit_fractions = sum(0 if f is None else 1 for f in frcs)
remainder = 1 - sum_explicit_fractions
if sum_explicit_fractions >= 1 + 1e-10:
raise H2OValueError("Fractions of binary, integer, categorical, time and string columns should add up "
"to a number less than 1.")
elif sum_explicit_fractions >= 1 - 1e-10:
# The fractions already add up to almost 1. No need to do anything (the server will absorb the tiny
# remainder into the real_fraction column).
pass
else:
# sum_explicit_fractions < 1 => distribute the remainder among the columns that were not set explicitly
if count_explicit_fractions == 6:
raise H2OValueError("Fraction of binary, integer, categorical, time and string columns add up to a "
"number less than 1.")
# Each column type receives a certain part (proportional to column's "weight") of the remaining fraction.
sum_implicit_weights = sum(wgts[i] if frcs[i] is None else 0 for i in range(6))
for i, f in enumerate(frcs):
if frcs[i] is not None: continue
if sum_implicit_weights == 0:
frcs[i] = remainder
else:
frcs[i] = remainder * wgts[i] / sum_implicit_weights
remainder -= frcs[i]
sum_implicit_weights -= wgts[i]
for i, f in enumerate(frcs):
if f is None:
frcs[i] = 0
real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction = frcs
parms = {"dest": frame_id if frame_id else py_tmp_key(append=h2oconn.session_id),
"rows": rows,
"cols": cols,
"randomize": randomize,
"categorical_fraction": categorical_fraction,
"integer_fraction": integer_fraction,
"binary_fraction": binary_fraction,
"time_fraction": time_fraction,
"string_fraction": string_fraction,
# "real_fraction" is not provided, the backend computes it as 1 - sum(5 other fractions)
"value": value,
"real_range": real_range,
"factors": factors,
"integer_range": integer_range,
"binary_ones_fraction": binary_ones_fraction,
"missing_fraction": missing_fraction,
"has_response": has_response,
"response_factors": response_factors,
"positive_response": positive_response,
"seed": -1 if seed is None else seed,
"seed_for_column_types": -1 if seed_for_column_types is None else seed_for_column_types,
}
H2OJob(api("POST /3/CreateFrame", data=parms), "Create Frame").poll()
return get_frame(parms["dest"])
|
def create_frame(frame_id=None, rows=10000, cols=10, randomize=True,
real_fraction=None, categorical_fraction=None, integer_fraction=None,
binary_fraction=None, time_fraction=None, string_fraction=None,
value=0, real_range=100, factors=100, integer_range=100,
binary_ones_fraction=0.02, missing_fraction=0.01,
has_response=False, response_factors=2, positive_response=False,
seed=None, seed_for_column_types=None):
"""
Create a new frame with random data.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
:param frame_id: the destination key. If empty, this will be auto-generated.
:param rows: the number of rows of data to generate.
:param cols: the number of columns of data to generate. Excludes the response column if has_response is True.
:param randomize: If True, data values will be randomly generated. This must be True if either
categorical_fraction or integer_fraction is non-zero.
:param value: if randomize is False, then all real-valued entries will be set to this value.
:param real_range: the range of randomly generated real values.
:param real_fraction: the fraction of columns that are real-valued.
:param categorical_fraction: the fraction of total columns that are categorical.
:param factors: the number of (unique) factor levels in each categorical column.
:param integer_fraction: the fraction of total columns that are integer-valued.
:param integer_range: the range of randomly generated integer values.
:param binary_fraction: the fraction of total columns that are binary-valued.
:param binary_ones_fraction: the fraction of values in a binary column that are set to 1.
:param time_fraction: the fraction of randomly created date/time columns.
:param string_fraction: the fraction of randomly created string columns.
:param missing_fraction: the fraction of total entries in the data frame that are set to NA.
:param has_response: A logical value indicating whether an additional response column should be prepended to the
final H2O data frame. If set to True, the total number of columns will be ``cols + 1``.
:param response_factors: if has_response is True, then this variable controls the type of the "response" column:
setting response_factors to 1 will generate real-valued response, any value greater or equal than 2 will
create categorical response with that many categories.
:param positive_reponse: when response variable is present and of real type, this will control whether it
contains positive values only, or both positive and negative.
:param seed: a seed used to generate random values when ``randomize`` is True.
:param seed_for_column_types: a seed used to generate random column types when ``randomize`` is True.
:returns: an :class:`H2OFrame` object
"""
t_fraction = U(None, BoundNumeric(0, 1))
assert_is_type(frame_id, str, None)
assert_is_type(rows, BoundInt(1))
assert_is_type(cols, BoundInt(1))
assert_is_type(randomize, bool)
assert_is_type(value, numeric)
assert_is_type(real_range, BoundNumeric(0))
assert_is_type(real_fraction, t_fraction)
assert_is_type(categorical_fraction, t_fraction)
assert_is_type(integer_fraction, t_fraction)
assert_is_type(binary_fraction, t_fraction)
assert_is_type(time_fraction, t_fraction)
assert_is_type(string_fraction, t_fraction)
assert_is_type(missing_fraction, t_fraction)
assert_is_type(binary_ones_fraction, t_fraction)
assert_is_type(factors, BoundInt(1))
assert_is_type(integer_range, BoundInt(1))
assert_is_type(has_response, bool)
assert_is_type(response_factors, None, BoundInt(1))
assert_is_type(positive_response, bool)
assert_is_type(seed, int, None)
assert_is_type(seed_for_column_types, int, None)
check_frame_id(frame_id)
if randomize and value:
raise H2OValueError("Cannot set data to a `value` if `randomize` is true")
if (categorical_fraction or integer_fraction) and not randomize:
raise H2OValueError("`randomize` should be True when either categorical or integer columns are used.")
# The total column fraction that the user has specified explicitly. This sum should not exceed 1. We will respect
# all explicitly set fractions, and will auto-select the remaining fractions.
frcs = [real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction]
wgts = [0.5, 0.2, 0.2, 0.1, 0.0, 0.0]
sum_explicit_fractions = sum(0 if f is None else f for f in frcs)
count_explicit_fractions = sum(0 if f is None else 1 for f in frcs)
remainder = 1 - sum_explicit_fractions
if sum_explicit_fractions >= 1 + 1e-10:
raise H2OValueError("Fractions of binary, integer, categorical, time and string columns should add up "
"to a number less than 1.")
elif sum_explicit_fractions >= 1 - 1e-10:
# The fractions already add up to almost 1. No need to do anything (the server will absorb the tiny
# remainder into the real_fraction column).
pass
else:
# sum_explicit_fractions < 1 => distribute the remainder among the columns that were not set explicitly
if count_explicit_fractions == 6:
raise H2OValueError("Fraction of binary, integer, categorical, time and string columns add up to a "
"number less than 1.")
# Each column type receives a certain part (proportional to column's "weight") of the remaining fraction.
sum_implicit_weights = sum(wgts[i] if frcs[i] is None else 0 for i in range(6))
for i, f in enumerate(frcs):
if frcs[i] is not None: continue
if sum_implicit_weights == 0:
frcs[i] = remainder
else:
frcs[i] = remainder * wgts[i] / sum_implicit_weights
remainder -= frcs[i]
sum_implicit_weights -= wgts[i]
for i, f in enumerate(frcs):
if f is None:
frcs[i] = 0
real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction = frcs
parms = {"dest": frame_id if frame_id else py_tmp_key(append=h2oconn.session_id),
"rows": rows,
"cols": cols,
"randomize": randomize,
"categorical_fraction": categorical_fraction,
"integer_fraction": integer_fraction,
"binary_fraction": binary_fraction,
"time_fraction": time_fraction,
"string_fraction": string_fraction,
# "real_fraction" is not provided, the backend computes it as 1 - sum(5 other fractions)
"value": value,
"real_range": real_range,
"factors": factors,
"integer_range": integer_range,
"binary_ones_fraction": binary_ones_fraction,
"missing_fraction": missing_fraction,
"has_response": has_response,
"response_factors": response_factors,
"positive_response": positive_response,
"seed": -1 if seed is None else seed,
"seed_for_column_types": -1 if seed_for_column_types is None else seed_for_column_types,
}
H2OJob(api("POST /3/CreateFrame", data=parms), "Create Frame").poll()
return get_frame(parms["dest"])
|
[
"Create",
"a",
"new",
"frame",
"with",
"random",
"data",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1130-L1258
|
[
"def",
"create_frame",
"(",
"frame_id",
"=",
"None",
",",
"rows",
"=",
"10000",
",",
"cols",
"=",
"10",
",",
"randomize",
"=",
"True",
",",
"real_fraction",
"=",
"None",
",",
"categorical_fraction",
"=",
"None",
",",
"integer_fraction",
"=",
"None",
",",
"binary_fraction",
"=",
"None",
",",
"time_fraction",
"=",
"None",
",",
"string_fraction",
"=",
"None",
",",
"value",
"=",
"0",
",",
"real_range",
"=",
"100",
",",
"factors",
"=",
"100",
",",
"integer_range",
"=",
"100",
",",
"binary_ones_fraction",
"=",
"0.02",
",",
"missing_fraction",
"=",
"0.01",
",",
"has_response",
"=",
"False",
",",
"response_factors",
"=",
"2",
",",
"positive_response",
"=",
"False",
",",
"seed",
"=",
"None",
",",
"seed_for_column_types",
"=",
"None",
")",
":",
"t_fraction",
"=",
"U",
"(",
"None",
",",
"BoundNumeric",
"(",
"0",
",",
"1",
")",
")",
"assert_is_type",
"(",
"frame_id",
",",
"str",
",",
"None",
")",
"assert_is_type",
"(",
"rows",
",",
"BoundInt",
"(",
"1",
")",
")",
"assert_is_type",
"(",
"cols",
",",
"BoundInt",
"(",
"1",
")",
")",
"assert_is_type",
"(",
"randomize",
",",
"bool",
")",
"assert_is_type",
"(",
"value",
",",
"numeric",
")",
"assert_is_type",
"(",
"real_range",
",",
"BoundNumeric",
"(",
"0",
")",
")",
"assert_is_type",
"(",
"real_fraction",
",",
"t_fraction",
")",
"assert_is_type",
"(",
"categorical_fraction",
",",
"t_fraction",
")",
"assert_is_type",
"(",
"integer_fraction",
",",
"t_fraction",
")",
"assert_is_type",
"(",
"binary_fraction",
",",
"t_fraction",
")",
"assert_is_type",
"(",
"time_fraction",
",",
"t_fraction",
")",
"assert_is_type",
"(",
"string_fraction",
",",
"t_fraction",
")",
"assert_is_type",
"(",
"missing_fraction",
",",
"t_fraction",
")",
"assert_is_type",
"(",
"binary_ones_fraction",
",",
"t_fraction",
")",
"assert_is_type",
"(",
"factors",
",",
"BoundInt",
"(",
"1",
")",
")",
"assert_is_type",
"(",
"integer_range",
",",
"BoundInt",
"(",
"1",
")",
")",
"assert_is_type",
"(",
"has_response",
",",
"bool",
")",
"assert_is_type",
"(",
"response_factors",
",",
"None",
",",
"BoundInt",
"(",
"1",
")",
")",
"assert_is_type",
"(",
"positive_response",
",",
"bool",
")",
"assert_is_type",
"(",
"seed",
",",
"int",
",",
"None",
")",
"assert_is_type",
"(",
"seed_for_column_types",
",",
"int",
",",
"None",
")",
"check_frame_id",
"(",
"frame_id",
")",
"if",
"randomize",
"and",
"value",
":",
"raise",
"H2OValueError",
"(",
"\"Cannot set data to a `value` if `randomize` is true\"",
")",
"if",
"(",
"categorical_fraction",
"or",
"integer_fraction",
")",
"and",
"not",
"randomize",
":",
"raise",
"H2OValueError",
"(",
"\"`randomize` should be True when either categorical or integer columns are used.\"",
")",
"# The total column fraction that the user has specified explicitly. This sum should not exceed 1. We will respect",
"# all explicitly set fractions, and will auto-select the remaining fractions.",
"frcs",
"=",
"[",
"real_fraction",
",",
"categorical_fraction",
",",
"integer_fraction",
",",
"binary_fraction",
",",
"time_fraction",
",",
"string_fraction",
"]",
"wgts",
"=",
"[",
"0.5",
",",
"0.2",
",",
"0.2",
",",
"0.1",
",",
"0.0",
",",
"0.0",
"]",
"sum_explicit_fractions",
"=",
"sum",
"(",
"0",
"if",
"f",
"is",
"None",
"else",
"f",
"for",
"f",
"in",
"frcs",
")",
"count_explicit_fractions",
"=",
"sum",
"(",
"0",
"if",
"f",
"is",
"None",
"else",
"1",
"for",
"f",
"in",
"frcs",
")",
"remainder",
"=",
"1",
"-",
"sum_explicit_fractions",
"if",
"sum_explicit_fractions",
">=",
"1",
"+",
"1e-10",
":",
"raise",
"H2OValueError",
"(",
"\"Fractions of binary, integer, categorical, time and string columns should add up \"",
"\"to a number less than 1.\"",
")",
"elif",
"sum_explicit_fractions",
">=",
"1",
"-",
"1e-10",
":",
"# The fractions already add up to almost 1. No need to do anything (the server will absorb the tiny",
"# remainder into the real_fraction column).",
"pass",
"else",
":",
"# sum_explicit_fractions < 1 => distribute the remainder among the columns that were not set explicitly",
"if",
"count_explicit_fractions",
"==",
"6",
":",
"raise",
"H2OValueError",
"(",
"\"Fraction of binary, integer, categorical, time and string columns add up to a \"",
"\"number less than 1.\"",
")",
"# Each column type receives a certain part (proportional to column's \"weight\") of the remaining fraction.",
"sum_implicit_weights",
"=",
"sum",
"(",
"wgts",
"[",
"i",
"]",
"if",
"frcs",
"[",
"i",
"]",
"is",
"None",
"else",
"0",
"for",
"i",
"in",
"range",
"(",
"6",
")",
")",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"frcs",
")",
":",
"if",
"frcs",
"[",
"i",
"]",
"is",
"not",
"None",
":",
"continue",
"if",
"sum_implicit_weights",
"==",
"0",
":",
"frcs",
"[",
"i",
"]",
"=",
"remainder",
"else",
":",
"frcs",
"[",
"i",
"]",
"=",
"remainder",
"*",
"wgts",
"[",
"i",
"]",
"/",
"sum_implicit_weights",
"remainder",
"-=",
"frcs",
"[",
"i",
"]",
"sum_implicit_weights",
"-=",
"wgts",
"[",
"i",
"]",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"frcs",
")",
":",
"if",
"f",
"is",
"None",
":",
"frcs",
"[",
"i",
"]",
"=",
"0",
"real_fraction",
",",
"categorical_fraction",
",",
"integer_fraction",
",",
"binary_fraction",
",",
"time_fraction",
",",
"string_fraction",
"=",
"frcs",
"parms",
"=",
"{",
"\"dest\"",
":",
"frame_id",
"if",
"frame_id",
"else",
"py_tmp_key",
"(",
"append",
"=",
"h2oconn",
".",
"session_id",
")",
",",
"\"rows\"",
":",
"rows",
",",
"\"cols\"",
":",
"cols",
",",
"\"randomize\"",
":",
"randomize",
",",
"\"categorical_fraction\"",
":",
"categorical_fraction",
",",
"\"integer_fraction\"",
":",
"integer_fraction",
",",
"\"binary_fraction\"",
":",
"binary_fraction",
",",
"\"time_fraction\"",
":",
"time_fraction",
",",
"\"string_fraction\"",
":",
"string_fraction",
",",
"# \"real_fraction\" is not provided, the backend computes it as 1 - sum(5 other fractions)",
"\"value\"",
":",
"value",
",",
"\"real_range\"",
":",
"real_range",
",",
"\"factors\"",
":",
"factors",
",",
"\"integer_range\"",
":",
"integer_range",
",",
"\"binary_ones_fraction\"",
":",
"binary_ones_fraction",
",",
"\"missing_fraction\"",
":",
"missing_fraction",
",",
"\"has_response\"",
":",
"has_response",
",",
"\"response_factors\"",
":",
"response_factors",
",",
"\"positive_response\"",
":",
"positive_response",
",",
"\"seed\"",
":",
"-",
"1",
"if",
"seed",
"is",
"None",
"else",
"seed",
",",
"\"seed_for_column_types\"",
":",
"-",
"1",
"if",
"seed_for_column_types",
"is",
"None",
"else",
"seed_for_column_types",
",",
"}",
"H2OJob",
"(",
"api",
"(",
"\"POST /3/CreateFrame\"",
",",
"data",
"=",
"parms",
")",
",",
"\"Create Frame\"",
")",
".",
"poll",
"(",
")",
"return",
"get_frame",
"(",
"parms",
"[",
"\"dest\"",
"]",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
interaction
|
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param data: the H2OFrame that holds the target categorical columns.
:param factors: factor columns (either indices or column names).
:param pairwise: If True, create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms
:param destination_frame: a string indicating the destination key. If empty, this will be auto-generated by H2O.
:returns: :class:`H2OFrame`
|
h2o-py/h2o/h2o.py
|
def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param data: the H2OFrame that holds the target categorical columns.
:param factors: factor columns (either indices or column names).
:param pairwise: If True, create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms
:param destination_frame: a string indicating the destination key. If empty, this will be auto-generated by H2O.
:returns: :class:`H2OFrame`
"""
assert_is_type(data, H2OFrame)
assert_is_type(factors, [str, int])
assert_is_type(pairwise, bool)
assert_is_type(max_factors, int)
assert_is_type(min_occurrence, int)
assert_is_type(destination_frame, str, None)
factors = [data.names[n] if is_type(n, int) else n for n in factors]
parms = {"dest": py_tmp_key(append=h2oconn.session_id) if destination_frame is None else destination_frame,
"source_frame": data.frame_id,
"factor_columns": [quoted(f) for f in factors],
"pairwise": pairwise,
"max_factors": max_factors,
"min_occurrence": min_occurrence,
}
H2OJob(api("POST /3/Interaction", data=parms), "Interactions").poll()
return get_frame(parms["dest"])
|
def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param data: the H2OFrame that holds the target categorical columns.
:param factors: factor columns (either indices or column names).
:param pairwise: If True, create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms
:param destination_frame: a string indicating the destination key. If empty, this will be auto-generated by H2O.
:returns: :class:`H2OFrame`
"""
assert_is_type(data, H2OFrame)
assert_is_type(factors, [str, int])
assert_is_type(pairwise, bool)
assert_is_type(max_factors, int)
assert_is_type(min_occurrence, int)
assert_is_type(destination_frame, str, None)
factors = [data.names[n] if is_type(n, int) else n for n in factors]
parms = {"dest": py_tmp_key(append=h2oconn.session_id) if destination_frame is None else destination_frame,
"source_frame": data.frame_id,
"factor_columns": [quoted(f) for f in factors],
"pairwise": pairwise,
"max_factors": max_factors,
"min_occurrence": min_occurrence,
}
H2OJob(api("POST /3/Interaction", data=parms), "Interactions").poll()
return get_frame(parms["dest"])
|
[
"Categorical",
"Interaction",
"Feature",
"Creation",
"in",
"H2O",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1261-L1294
|
[
"def",
"interaction",
"(",
"data",
",",
"factors",
",",
"pairwise",
",",
"max_factors",
",",
"min_occurrence",
",",
"destination_frame",
"=",
"None",
")",
":",
"assert_is_type",
"(",
"data",
",",
"H2OFrame",
")",
"assert_is_type",
"(",
"factors",
",",
"[",
"str",
",",
"int",
"]",
")",
"assert_is_type",
"(",
"pairwise",
",",
"bool",
")",
"assert_is_type",
"(",
"max_factors",
",",
"int",
")",
"assert_is_type",
"(",
"min_occurrence",
",",
"int",
")",
"assert_is_type",
"(",
"destination_frame",
",",
"str",
",",
"None",
")",
"factors",
"=",
"[",
"data",
".",
"names",
"[",
"n",
"]",
"if",
"is_type",
"(",
"n",
",",
"int",
")",
"else",
"n",
"for",
"n",
"in",
"factors",
"]",
"parms",
"=",
"{",
"\"dest\"",
":",
"py_tmp_key",
"(",
"append",
"=",
"h2oconn",
".",
"session_id",
")",
"if",
"destination_frame",
"is",
"None",
"else",
"destination_frame",
",",
"\"source_frame\"",
":",
"data",
".",
"frame_id",
",",
"\"factor_columns\"",
":",
"[",
"quoted",
"(",
"f",
")",
"for",
"f",
"in",
"factors",
"]",
",",
"\"pairwise\"",
":",
"pairwise",
",",
"\"max_factors\"",
":",
"max_factors",
",",
"\"min_occurrence\"",
":",
"min_occurrence",
",",
"}",
"H2OJob",
"(",
"api",
"(",
"\"POST /3/Interaction\"",
",",
"data",
"=",
"parms",
")",
",",
"\"Interactions\"",
")",
".",
"poll",
"(",
")",
"return",
"get_frame",
"(",
"parms",
"[",
"\"dest\"",
"]",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
as_list
|
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
|
h2o-py/h2o/h2o.py
|
def as_list(data, use_pandas=True, header=True):
"""
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
"""
assert_is_type(data, H2OFrame)
assert_is_type(use_pandas, bool)
assert_is_type(header, bool)
return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header)
|
def as_list(data, use_pandas=True, header=True):
"""
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
"""
assert_is_type(data, H2OFrame)
assert_is_type(use_pandas, bool)
assert_is_type(header, bool)
return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header)
|
[
"Convert",
"an",
"H2O",
"data",
"object",
"into",
"a",
"python",
"-",
"specific",
"object",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1297-L1316
|
[
"def",
"as_list",
"(",
"data",
",",
"use_pandas",
"=",
"True",
",",
"header",
"=",
"True",
")",
":",
"assert_is_type",
"(",
"data",
",",
"H2OFrame",
")",
"assert_is_type",
"(",
"use_pandas",
",",
"bool",
")",
"assert_is_type",
"(",
"header",
",",
"bool",
")",
"return",
"H2OFrame",
".",
"as_data_frame",
"(",
"data",
",",
"use_pandas",
"=",
"use_pandas",
",",
"header",
"=",
"header",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
demo
|
H2O built-in demo facility.
:param funcname: A string that identifies the h2o python function to demonstrate.
:param interactive: If True, the user will be prompted to continue the demonstration after every segment.
:param echo: If True, the python commands that are executed will be displayed.
:param test: If True, `h2o.init()` will not be called (used for pyunit testing).
:example:
>>> import h2o
>>> h2o.demo("gbm")
|
h2o-py/h2o/h2o.py
|
def demo(funcname, interactive=True, echo=True, test=False):
"""
H2O built-in demo facility.
:param funcname: A string that identifies the h2o python function to demonstrate.
:param interactive: If True, the user will be prompted to continue the demonstration after every segment.
:param echo: If True, the python commands that are executed will be displayed.
:param test: If True, `h2o.init()` will not be called (used for pyunit testing).
:example:
>>> import h2o
>>> h2o.demo("gbm")
"""
import h2o.demos as h2odemo
assert_is_type(funcname, str)
assert_is_type(interactive, bool)
assert_is_type(echo, bool)
assert_is_type(test, bool)
demo_function = getattr(h2odemo, funcname, None)
if demo_function and type(demo_function) is type(demo):
demo_function(interactive, echo, test)
else:
print("Demo for %s is not available." % funcname)
|
def demo(funcname, interactive=True, echo=True, test=False):
"""
H2O built-in demo facility.
:param funcname: A string that identifies the h2o python function to demonstrate.
:param interactive: If True, the user will be prompted to continue the demonstration after every segment.
:param echo: If True, the python commands that are executed will be displayed.
:param test: If True, `h2o.init()` will not be called (used for pyunit testing).
:example:
>>> import h2o
>>> h2o.demo("gbm")
"""
import h2o.demos as h2odemo
assert_is_type(funcname, str)
assert_is_type(interactive, bool)
assert_is_type(echo, bool)
assert_is_type(test, bool)
demo_function = getattr(h2odemo, funcname, None)
if demo_function and type(demo_function) is type(demo):
demo_function(interactive, echo, test)
else:
print("Demo for %s is not available." % funcname)
|
[
"H2O",
"built",
"-",
"in",
"demo",
"facility",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1319-L1342
|
[
"def",
"demo",
"(",
"funcname",
",",
"interactive",
"=",
"True",
",",
"echo",
"=",
"True",
",",
"test",
"=",
"False",
")",
":",
"import",
"h2o",
".",
"demos",
"as",
"h2odemo",
"assert_is_type",
"(",
"funcname",
",",
"str",
")",
"assert_is_type",
"(",
"interactive",
",",
"bool",
")",
"assert_is_type",
"(",
"echo",
",",
"bool",
")",
"assert_is_type",
"(",
"test",
",",
"bool",
")",
"demo_function",
"=",
"getattr",
"(",
"h2odemo",
",",
"funcname",
",",
"None",
")",
"if",
"demo_function",
"and",
"type",
"(",
"demo_function",
")",
"is",
"type",
"(",
"demo",
")",
":",
"demo_function",
"(",
"interactive",
",",
"echo",
",",
"test",
")",
"else",
":",
"print",
"(",
"\"Demo for %s is not available.\"",
"%",
"funcname",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
load_dataset
|
Imports a data file within the 'h2o_data' folder.
|
h2o-py/h2o/h2o.py
|
def load_dataset(relative_path):
"""Imports a data file within the 'h2o_data' folder."""
assert_is_type(relative_path, str)
h2o_dir = os.path.split(__file__)[0]
for possible_file in [os.path.join(h2o_dir, relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path + ".csv")]:
if os.path.exists(possible_file):
return upload_file(possible_file)
# File not found -- raise an error!
raise H2OValueError("Data file %s cannot be found" % relative_path)
|
def load_dataset(relative_path):
"""Imports a data file within the 'h2o_data' folder."""
assert_is_type(relative_path, str)
h2o_dir = os.path.split(__file__)[0]
for possible_file in [os.path.join(h2o_dir, relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path + ".csv")]:
if os.path.exists(possible_file):
return upload_file(possible_file)
# File not found -- raise an error!
raise H2OValueError("Data file %s cannot be found" % relative_path)
|
[
"Imports",
"a",
"data",
"file",
"within",
"the",
"h2o_data",
"folder",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1345-L1355
|
[
"def",
"load_dataset",
"(",
"relative_path",
")",
":",
"assert_is_type",
"(",
"relative_path",
",",
"str",
")",
"h2o_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"__file__",
")",
"[",
"0",
"]",
"for",
"possible_file",
"in",
"[",
"os",
".",
"path",
".",
"join",
"(",
"h2o_dir",
",",
"relative_path",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"h2o_dir",
",",
"\"h2o_data\"",
",",
"relative_path",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"h2o_dir",
",",
"\"h2o_data\"",
",",
"relative_path",
"+",
"\".csv\"",
")",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"possible_file",
")",
":",
"return",
"upload_file",
"(",
"possible_file",
")",
"# File not found -- raise an error!",
"raise",
"H2OValueError",
"(",
"\"Data file %s cannot be found\"",
"%",
"relative_path",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
make_metrics
|
Create Model Metrics from predicted and actual values in H2O.
:param H2OFrame predicted: an H2OFrame containing predictions.
:param H2OFrame actuals: an H2OFrame containing actual values.
:param domain: list of response factors for classification.
:param distribution: distribution for regression.
|
h2o-py/h2o/h2o.py
|
def make_metrics(predicted, actual, domain=None, distribution=None):
"""
Create Model Metrics from predicted and actual values in H2O.
:param H2OFrame predicted: an H2OFrame containing predictions.
:param H2OFrame actuals: an H2OFrame containing actual values.
:param domain: list of response factors for classification.
:param distribution: distribution for regression.
"""
assert_is_type(predicted, H2OFrame)
assert_is_type(actual, H2OFrame)
# assert predicted.ncol == 1, "`predicted` frame should have exactly 1 column"
assert actual.ncol == 1, "`actual` frame should have exactly 1 column"
assert_is_type(distribution, str, None)
assert_satisfies(actual.ncol, actual.ncol == 1)
if domain is None and any(actual.isfactor()):
domain = actual.levels()[0]
res = api("POST /3/ModelMetrics/predictions_frame/%s/actuals_frame/%s" % (predicted.frame_id, actual.frame_id),
data={"domain": domain, "distribution": distribution})
return res["model_metrics"]
|
def make_metrics(predicted, actual, domain=None, distribution=None):
"""
Create Model Metrics from predicted and actual values in H2O.
:param H2OFrame predicted: an H2OFrame containing predictions.
:param H2OFrame actuals: an H2OFrame containing actual values.
:param domain: list of response factors for classification.
:param distribution: distribution for regression.
"""
assert_is_type(predicted, H2OFrame)
assert_is_type(actual, H2OFrame)
# assert predicted.ncol == 1, "`predicted` frame should have exactly 1 column"
assert actual.ncol == 1, "`actual` frame should have exactly 1 column"
assert_is_type(distribution, str, None)
assert_satisfies(actual.ncol, actual.ncol == 1)
if domain is None and any(actual.isfactor()):
domain = actual.levels()[0]
res = api("POST /3/ModelMetrics/predictions_frame/%s/actuals_frame/%s" % (predicted.frame_id, actual.frame_id),
data={"domain": domain, "distribution": distribution})
return res["model_metrics"]
|
[
"Create",
"Model",
"Metrics",
"from",
"predicted",
"and",
"actual",
"values",
"in",
"H2O",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1358-L1377
|
[
"def",
"make_metrics",
"(",
"predicted",
",",
"actual",
",",
"domain",
"=",
"None",
",",
"distribution",
"=",
"None",
")",
":",
"assert_is_type",
"(",
"predicted",
",",
"H2OFrame",
")",
"assert_is_type",
"(",
"actual",
",",
"H2OFrame",
")",
"# assert predicted.ncol == 1, \"`predicted` frame should have exactly 1 column\"",
"assert",
"actual",
".",
"ncol",
"==",
"1",
",",
"\"`actual` frame should have exactly 1 column\"",
"assert_is_type",
"(",
"distribution",
",",
"str",
",",
"None",
")",
"assert_satisfies",
"(",
"actual",
".",
"ncol",
",",
"actual",
".",
"ncol",
"==",
"1",
")",
"if",
"domain",
"is",
"None",
"and",
"any",
"(",
"actual",
".",
"isfactor",
"(",
")",
")",
":",
"domain",
"=",
"actual",
".",
"levels",
"(",
")",
"[",
"0",
"]",
"res",
"=",
"api",
"(",
"\"POST /3/ModelMetrics/predictions_frame/%s/actuals_frame/%s\"",
"%",
"(",
"predicted",
".",
"frame_id",
",",
"actual",
".",
"frame_id",
")",
",",
"data",
"=",
"{",
"\"domain\"",
":",
"domain",
",",
"\"distribution\"",
":",
"distribution",
"}",
")",
"return",
"res",
"[",
"\"model_metrics\"",
"]"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
_put_key
|
Upload given file into DKV and save it under give key as raw object.
:param dest_key: name of destination key in DKV
:param file_path: path to file to upload
:return: key name if object was uploaded successfully
|
h2o-py/h2o/h2o.py
|
def _put_key(file_path, dest_key=None, overwrite=True):
"""
Upload given file into DKV and save it under give key as raw object.
:param dest_key: name of destination key in DKV
:param file_path: path to file to upload
:return: key name if object was uploaded successfully
"""
ret = api("POST /3/PutKey?destination_key={}&overwrite={}".format(dest_key if dest_key else '', overwrite),
filename=file_path)
return ret["destination_key"]
|
def _put_key(file_path, dest_key=None, overwrite=True):
"""
Upload given file into DKV and save it under give key as raw object.
:param dest_key: name of destination key in DKV
:param file_path: path to file to upload
:return: key name if object was uploaded successfully
"""
ret = api("POST /3/PutKey?destination_key={}&overwrite={}".format(dest_key if dest_key else '', overwrite),
filename=file_path)
return ret["destination_key"]
|
[
"Upload",
"given",
"file",
"into",
"DKV",
"and",
"save",
"it",
"under",
"give",
"key",
"as",
"raw",
"object",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1388-L1398
|
[
"def",
"_put_key",
"(",
"file_path",
",",
"dest_key",
"=",
"None",
",",
"overwrite",
"=",
"True",
")",
":",
"ret",
"=",
"api",
"(",
"\"POST /3/PutKey?destination_key={}&overwrite={}\"",
".",
"format",
"(",
"dest_key",
"if",
"dest_key",
"else",
"''",
",",
"overwrite",
")",
",",
"filename",
"=",
"file_path",
")",
"return",
"ret",
"[",
"\"destination_key\"",
"]"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
upload_custom_metric
|
Upload given metrics function into H2O cluster.
The metrics can have different representation:
- class: needs to implement map(pred, act, weight, offset, model), reduce(l, r) and metric(l) methods
- string: the same as in class case, but the class is given as a string
:param func: metric representation: string, class
:param func_file: internal name of file to save given metrics representation
:param func_name: name for h2o key under which the given metric is saved
:param class_name: name of class wrapping the metrics function (when supplied as string)
:param source_provider: a function which provides a source code for given function
:return: reference to uploaded metrics function
:examples:
>>> class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]
>>>
>>>
>>> h2o.upload_custom_metric(CustomMaeFunc, func_name="mae")
>>>
>>> custom_func_str = '''class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]'''
>>>
>>>
>>> h2o.upload_custom_metric(custom_func_str, class_name="CustomMaeFunc", func_name="mae")
|
h2o-py/h2o/h2o.py
|
def upload_custom_metric(func, func_file="metrics.py", func_name=None, class_name=None, source_provider=None):
"""
Upload given metrics function into H2O cluster.
The metrics can have different representation:
- class: needs to implement map(pred, act, weight, offset, model), reduce(l, r) and metric(l) methods
- string: the same as in class case, but the class is given as a string
:param func: metric representation: string, class
:param func_file: internal name of file to save given metrics representation
:param func_name: name for h2o key under which the given metric is saved
:param class_name: name of class wrapping the metrics function (when supplied as string)
:param source_provider: a function which provides a source code for given function
:return: reference to uploaded metrics function
:examples:
>>> class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]
>>>
>>>
>>> h2o.upload_custom_metric(CustomMaeFunc, func_name="mae")
>>>
>>> custom_func_str = '''class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]'''
>>>
>>>
>>> h2o.upload_custom_metric(custom_func_str, class_name="CustomMaeFunc", func_name="mae")
"""
import tempfile
import inspect
# Use default source provider
if not source_provider:
source_provider = _default_source_provider
# The template wraps given metrics representation
_CFUNC_CODE_TEMPLATE = """# Generated code
import water.udf.CMetricFunc as MetricFunc
# User given metric function as a class implementing
# 3 methods defined by interface CMetricFunc
{}
# Generated user metric which satisfies the interface
# of Java MetricFunc
class {}Wrapper({}, MetricFunc, object):
pass
"""
assert_satisfies(func, inspect.isclass(func) or isinstance(func, str),
"The argument func needs to be string or class !")
assert_satisfies(func_file, func_file is not None,
"The argument func_file is missing!")
assert_satisfies(func_file, func_file.endswith('.py'),
"The argument func_file needs to end with '.py'")
code = None
derived_func_name = None
module_name = func_file[:-3]
if isinstance(func, str):
assert_satisfies(class_name, class_name is not None,
"The argument class_name is missing! " +
"It needs to reference the class in given string!")
code = _CFUNC_CODE_TEMPLATE.format(func, class_name, class_name)
derived_func_name = "metrics_{}".format(class_name)
class_name = "{}.{}Wrapper".format(module_name, class_name)
else:
assert_satisfies(func, inspect.isclass(func), "The parameter `func` should be str or class")
for method in ['map', 'reduce', 'metric']:
assert_satisfies(func, method in func.__dict__, "The class `func` needs to define method `{}`".format(method))
assert_satisfies(class_name, class_name is None,
"If class is specified then class_name parameter needs to be None")
class_name = "{}.{}Wrapper".format(module_name, func.__name__)
derived_func_name = "metrics_{}".format(func.__name__)
code = _CFUNC_CODE_TEMPLATE.format(source_provider(func), func.__name__, func.__name__)
# If the func name is not given, use whatever we can derived from given definition
if not func_name:
func_name = derived_func_name
# Saved into jar file
tmpdir = tempfile.mkdtemp(prefix="h2o-func")
func_arch_file = _create_zip_file("{}/func.jar".format(tmpdir), (func_file, code))
# Upload into K/V
dest_key = _put_key(func_arch_file, dest_key=func_name)
# Reference
return "python:{}={}".format(dest_key, class_name)
|
def upload_custom_metric(func, func_file="metrics.py", func_name=None, class_name=None, source_provider=None):
"""
Upload given metrics function into H2O cluster.
The metrics can have different representation:
- class: needs to implement map(pred, act, weight, offset, model), reduce(l, r) and metric(l) methods
- string: the same as in class case, but the class is given as a string
:param func: metric representation: string, class
:param func_file: internal name of file to save given metrics representation
:param func_name: name for h2o key under which the given metric is saved
:param class_name: name of class wrapping the metrics function (when supplied as string)
:param source_provider: a function which provides a source code for given function
:return: reference to uploaded metrics function
:examples:
>>> class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]
>>>
>>>
>>> h2o.upload_custom_metric(CustomMaeFunc, func_name="mae")
>>>
>>> custom_func_str = '''class CustomMaeFunc:
>>> def map(self, pred, act, w, o, model):
>>> return [abs(act[0] - pred[0]), 1]
>>>
>>> def reduce(self, l, r):
>>> return [l[0] + r[0], l[1] + r[1]]
>>>
>>> def metric(self, l):
>>> return l[0] / l[1]'''
>>>
>>>
>>> h2o.upload_custom_metric(custom_func_str, class_name="CustomMaeFunc", func_name="mae")
"""
import tempfile
import inspect
# Use default source provider
if not source_provider:
source_provider = _default_source_provider
# The template wraps given metrics representation
_CFUNC_CODE_TEMPLATE = """# Generated code
import water.udf.CMetricFunc as MetricFunc
# User given metric function as a class implementing
# 3 methods defined by interface CMetricFunc
{}
# Generated user metric which satisfies the interface
# of Java MetricFunc
class {}Wrapper({}, MetricFunc, object):
pass
"""
assert_satisfies(func, inspect.isclass(func) or isinstance(func, str),
"The argument func needs to be string or class !")
assert_satisfies(func_file, func_file is not None,
"The argument func_file is missing!")
assert_satisfies(func_file, func_file.endswith('.py'),
"The argument func_file needs to end with '.py'")
code = None
derived_func_name = None
module_name = func_file[:-3]
if isinstance(func, str):
assert_satisfies(class_name, class_name is not None,
"The argument class_name is missing! " +
"It needs to reference the class in given string!")
code = _CFUNC_CODE_TEMPLATE.format(func, class_name, class_name)
derived_func_name = "metrics_{}".format(class_name)
class_name = "{}.{}Wrapper".format(module_name, class_name)
else:
assert_satisfies(func, inspect.isclass(func), "The parameter `func` should be str or class")
for method in ['map', 'reduce', 'metric']:
assert_satisfies(func, method in func.__dict__, "The class `func` needs to define method `{}`".format(method))
assert_satisfies(class_name, class_name is None,
"If class is specified then class_name parameter needs to be None")
class_name = "{}.{}Wrapper".format(module_name, func.__name__)
derived_func_name = "metrics_{}".format(func.__name__)
code = _CFUNC_CODE_TEMPLATE.format(source_provider(func), func.__name__, func.__name__)
# If the func name is not given, use whatever we can derived from given definition
if not func_name:
func_name = derived_func_name
# Saved into jar file
tmpdir = tempfile.mkdtemp(prefix="h2o-func")
func_arch_file = _create_zip_file("{}/func.jar".format(tmpdir), (func_file, code))
# Upload into K/V
dest_key = _put_key(func_arch_file, dest_key=func_name)
# Reference
return "python:{}={}".format(dest_key, class_name)
|
[
"Upload",
"given",
"metrics",
"function",
"into",
"H2O",
"cluster",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1430-L1531
|
[
"def",
"upload_custom_metric",
"(",
"func",
",",
"func_file",
"=",
"\"metrics.py\"",
",",
"func_name",
"=",
"None",
",",
"class_name",
"=",
"None",
",",
"source_provider",
"=",
"None",
")",
":",
"import",
"tempfile",
"import",
"inspect",
"# Use default source provider",
"if",
"not",
"source_provider",
":",
"source_provider",
"=",
"_default_source_provider",
"# The template wraps given metrics representation",
"_CFUNC_CODE_TEMPLATE",
"=",
"\"\"\"# Generated code\nimport water.udf.CMetricFunc as MetricFunc\n\n# User given metric function as a class implementing\n# 3 methods defined by interface CMetricFunc\n{}\n\n# Generated user metric which satisfies the interface\n# of Java MetricFunc\nclass {}Wrapper({}, MetricFunc, object):\n pass\n\n\"\"\"",
"assert_satisfies",
"(",
"func",
",",
"inspect",
".",
"isclass",
"(",
"func",
")",
"or",
"isinstance",
"(",
"func",
",",
"str",
")",
",",
"\"The argument func needs to be string or class !\"",
")",
"assert_satisfies",
"(",
"func_file",
",",
"func_file",
"is",
"not",
"None",
",",
"\"The argument func_file is missing!\"",
")",
"assert_satisfies",
"(",
"func_file",
",",
"func_file",
".",
"endswith",
"(",
"'.py'",
")",
",",
"\"The argument func_file needs to end with '.py'\"",
")",
"code",
"=",
"None",
"derived_func_name",
"=",
"None",
"module_name",
"=",
"func_file",
"[",
":",
"-",
"3",
"]",
"if",
"isinstance",
"(",
"func",
",",
"str",
")",
":",
"assert_satisfies",
"(",
"class_name",
",",
"class_name",
"is",
"not",
"None",
",",
"\"The argument class_name is missing! \"",
"+",
"\"It needs to reference the class in given string!\"",
")",
"code",
"=",
"_CFUNC_CODE_TEMPLATE",
".",
"format",
"(",
"func",
",",
"class_name",
",",
"class_name",
")",
"derived_func_name",
"=",
"\"metrics_{}\"",
".",
"format",
"(",
"class_name",
")",
"class_name",
"=",
"\"{}.{}Wrapper\"",
".",
"format",
"(",
"module_name",
",",
"class_name",
")",
"else",
":",
"assert_satisfies",
"(",
"func",
",",
"inspect",
".",
"isclass",
"(",
"func",
")",
",",
"\"The parameter `func` should be str or class\"",
")",
"for",
"method",
"in",
"[",
"'map'",
",",
"'reduce'",
",",
"'metric'",
"]",
":",
"assert_satisfies",
"(",
"func",
",",
"method",
"in",
"func",
".",
"__dict__",
",",
"\"The class `func` needs to define method `{}`\"",
".",
"format",
"(",
"method",
")",
")",
"assert_satisfies",
"(",
"class_name",
",",
"class_name",
"is",
"None",
",",
"\"If class is specified then class_name parameter needs to be None\"",
")",
"class_name",
"=",
"\"{}.{}Wrapper\"",
".",
"format",
"(",
"module_name",
",",
"func",
".",
"__name__",
")",
"derived_func_name",
"=",
"\"metrics_{}\"",
".",
"format",
"(",
"func",
".",
"__name__",
")",
"code",
"=",
"_CFUNC_CODE_TEMPLATE",
".",
"format",
"(",
"source_provider",
"(",
"func",
")",
",",
"func",
".",
"__name__",
",",
"func",
".",
"__name__",
")",
"# If the func name is not given, use whatever we can derived from given definition",
"if",
"not",
"func_name",
":",
"func_name",
"=",
"derived_func_name",
"# Saved into jar file",
"tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"\"h2o-func\"",
")",
"func_arch_file",
"=",
"_create_zip_file",
"(",
"\"{}/func.jar\"",
".",
"format",
"(",
"tmpdir",
")",
",",
"(",
"func_file",
",",
"code",
")",
")",
"# Upload into K/V",
"dest_key",
"=",
"_put_key",
"(",
"func_arch_file",
",",
"dest_key",
"=",
"func_name",
")",
"# Reference",
"return",
"\"python:{}={}\"",
".",
"format",
"(",
"dest_key",
",",
"class_name",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
main
|
Main program.
@return: none
|
scripts/grabGLRMJenkinRunResults.py
|
def main(argv):
"""
Main program.
@return: none
"""
global g_log_base_dir
global g_airline_java
global g_milsongs_java
global g_airline_python
global g_milsongs_python
if len(argv) < 2:
print "python grabGLRMrunLogs logsBaseDirectory\n"
sys.exit(1)
else: # we may be in business
g_log_base_dir = argv[1]
if (os.path.isdir(g_log_base_dir)): # open directory and start to process logs in each one
airline_java_dict = init_java_dict()
milsongs_java_dict = init_java_dict()
airline_py_dict = init_python_dict()
milsongs_py_dict = init_python_dict()
allBuilds = os.listdir(g_log_base_dir)
for dirName in allBuilds:
airline_java_dict = grab_java_results(dirName, g_airline_java, airline_java_dict)
milsongs_java_dict = grab_java_results(dirName, g_milsongs_java, milsongs_java_dict)
airline_py_dict = grab_py_results(dirName, g_airline_python, airline_py_dict)
milsongs_py_dict = grab_py_results(dirName, g_milsongs_python, milsongs_py_dict)
airline_py_dict = transform_time_python(airline_py_dict) # calculate time taken per iteration
milsongs_py_dict = transform_time_python(milsongs_py_dict)
print("Airline Java log results: \n {0}".format(airline_java_dict))
print("Airline Python log results: \n {0}".format(airline_py_dict))
print("Milsongs Java log results: \n {0}".format(milsongs_java_dict))
print("Milsongs Python log results: \n {0}".format(milsongs_py_dict))
# dump dictionary into json files for later analysis
with open(os.path.join(g_log_base_dir, "airline_java_dict"),'wb') as test_file:
json.dump(airline_java_dict, test_file)
with open(os.path.join(g_log_base_dir, "airline_py_dict"),'wb') as test_file:
json.dump(airline_py_dict, test_file)
with open(os.path.join(g_log_base_dir, "milsongs_java_dict"),'wb') as test_file:
json.dump(milsongs_java_dict, test_file)
with open(os.path.join(g_log_base_dir, "milsongs_py_dict"),'wb') as test_file:
json.dump(milsongs_py_dict, test_file)
# dump analysis results into json format that octave can understand and process
generate_octave_java_ascii(airline_java_dict, "airline_java_octave")
generate_octave_java_ascii(milsongs_java_dict, "milsongs_java_octave")
generate_octave_py_ascii(airline_py_dict, "airline_py_octave")
generate_octave_py_ascii(milsongs_py_dict, "milsongs_py_octave")
|
def main(argv):
"""
Main program.
@return: none
"""
global g_log_base_dir
global g_airline_java
global g_milsongs_java
global g_airline_python
global g_milsongs_python
if len(argv) < 2:
print "python grabGLRMrunLogs logsBaseDirectory\n"
sys.exit(1)
else: # we may be in business
g_log_base_dir = argv[1]
if (os.path.isdir(g_log_base_dir)): # open directory and start to process logs in each one
airline_java_dict = init_java_dict()
milsongs_java_dict = init_java_dict()
airline_py_dict = init_python_dict()
milsongs_py_dict = init_python_dict()
allBuilds = os.listdir(g_log_base_dir)
for dirName in allBuilds:
airline_java_dict = grab_java_results(dirName, g_airline_java, airline_java_dict)
milsongs_java_dict = grab_java_results(dirName, g_milsongs_java, milsongs_java_dict)
airline_py_dict = grab_py_results(dirName, g_airline_python, airline_py_dict)
milsongs_py_dict = grab_py_results(dirName, g_milsongs_python, milsongs_py_dict)
airline_py_dict = transform_time_python(airline_py_dict) # calculate time taken per iteration
milsongs_py_dict = transform_time_python(milsongs_py_dict)
print("Airline Java log results: \n {0}".format(airline_java_dict))
print("Airline Python log results: \n {0}".format(airline_py_dict))
print("Milsongs Java log results: \n {0}".format(milsongs_java_dict))
print("Milsongs Python log results: \n {0}".format(milsongs_py_dict))
# dump dictionary into json files for later analysis
with open(os.path.join(g_log_base_dir, "airline_java_dict"),'wb') as test_file:
json.dump(airline_java_dict, test_file)
with open(os.path.join(g_log_base_dir, "airline_py_dict"),'wb') as test_file:
json.dump(airline_py_dict, test_file)
with open(os.path.join(g_log_base_dir, "milsongs_java_dict"),'wb') as test_file:
json.dump(milsongs_java_dict, test_file)
with open(os.path.join(g_log_base_dir, "milsongs_py_dict"),'wb') as test_file:
json.dump(milsongs_py_dict, test_file)
# dump analysis results into json format that octave can understand and process
generate_octave_java_ascii(airline_java_dict, "airline_java_octave")
generate_octave_java_ascii(milsongs_java_dict, "milsongs_java_octave")
generate_octave_py_ascii(airline_py_dict, "airline_py_octave")
generate_octave_py_ascii(milsongs_py_dict, "milsongs_py_octave")
|
[
"Main",
"program",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/scripts/grabGLRMJenkinRunResults.py#L199-L255
|
[
"def",
"main",
"(",
"argv",
")",
":",
"global",
"g_log_base_dir",
"global",
"g_airline_java",
"global",
"g_milsongs_java",
"global",
"g_airline_python",
"global",
"g_milsongs_python",
"if",
"len",
"(",
"argv",
")",
"<",
"2",
":",
"print",
"\"python grabGLRMrunLogs logsBaseDirectory\\n\"",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"# we may be in business",
"g_log_base_dir",
"=",
"argv",
"[",
"1",
"]",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"g_log_base_dir",
")",
")",
":",
"# open directory and start to process logs in each one",
"airline_java_dict",
"=",
"init_java_dict",
"(",
")",
"milsongs_java_dict",
"=",
"init_java_dict",
"(",
")",
"airline_py_dict",
"=",
"init_python_dict",
"(",
")",
"milsongs_py_dict",
"=",
"init_python_dict",
"(",
")",
"allBuilds",
"=",
"os",
".",
"listdir",
"(",
"g_log_base_dir",
")",
"for",
"dirName",
"in",
"allBuilds",
":",
"airline_java_dict",
"=",
"grab_java_results",
"(",
"dirName",
",",
"g_airline_java",
",",
"airline_java_dict",
")",
"milsongs_java_dict",
"=",
"grab_java_results",
"(",
"dirName",
",",
"g_milsongs_java",
",",
"milsongs_java_dict",
")",
"airline_py_dict",
"=",
"grab_py_results",
"(",
"dirName",
",",
"g_airline_python",
",",
"airline_py_dict",
")",
"milsongs_py_dict",
"=",
"grab_py_results",
"(",
"dirName",
",",
"g_milsongs_python",
",",
"milsongs_py_dict",
")",
"airline_py_dict",
"=",
"transform_time_python",
"(",
"airline_py_dict",
")",
"# calculate time taken per iteration",
"milsongs_py_dict",
"=",
"transform_time_python",
"(",
"milsongs_py_dict",
")",
"print",
"(",
"\"Airline Java log results: \\n {0}\"",
".",
"format",
"(",
"airline_java_dict",
")",
")",
"print",
"(",
"\"Airline Python log results: \\n {0}\"",
".",
"format",
"(",
"airline_py_dict",
")",
")",
"print",
"(",
"\"Milsongs Java log results: \\n {0}\"",
".",
"format",
"(",
"milsongs_java_dict",
")",
")",
"print",
"(",
"\"Milsongs Python log results: \\n {0}\"",
".",
"format",
"(",
"milsongs_py_dict",
")",
")",
"# dump dictionary into json files for later analysis",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"g_log_base_dir",
",",
"\"airline_java_dict\"",
")",
",",
"'wb'",
")",
"as",
"test_file",
":",
"json",
".",
"dump",
"(",
"airline_java_dict",
",",
"test_file",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"g_log_base_dir",
",",
"\"airline_py_dict\"",
")",
",",
"'wb'",
")",
"as",
"test_file",
":",
"json",
".",
"dump",
"(",
"airline_py_dict",
",",
"test_file",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"g_log_base_dir",
",",
"\"milsongs_java_dict\"",
")",
",",
"'wb'",
")",
"as",
"test_file",
":",
"json",
".",
"dump",
"(",
"milsongs_java_dict",
",",
"test_file",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"g_log_base_dir",
",",
"\"milsongs_py_dict\"",
")",
",",
"'wb'",
")",
"as",
"test_file",
":",
"json",
".",
"dump",
"(",
"milsongs_py_dict",
",",
"test_file",
")",
"# dump analysis results into json format that octave can understand and process",
"generate_octave_java_ascii",
"(",
"airline_java_dict",
",",
"\"airline_java_octave\"",
")",
"generate_octave_java_ascii",
"(",
"milsongs_java_dict",
",",
"\"milsongs_java_octave\"",
")",
"generate_octave_py_ascii",
"(",
"airline_py_dict",
",",
"\"airline_py_octave\"",
")",
"generate_octave_py_ascii",
"(",
"milsongs_py_dict",
",",
"\"milsongs_py_octave\"",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
main
|
Main program.
@return: none
|
h2o-r/scripts/build_minicran.py
|
def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
global g_tmp_dir
g_script_name = os.path.basename(argv[0])
# Override any defaults with the user's choices.
parse_args(argv)
# Create tmp dir and clean up on exit with a callback.
g_tmp_dir = tempfile.mkdtemp(suffix=".tmp_minicran")
print "Created tmp directory: " + g_tmp_dir
atexit.register(remove_tmp_dir)
# Do the work.
try:
b = MinicranBuilder(g_print_only, g_output_dir, g_tmp_dir, g_platform, g_rversion, g_branch, g_buildnum)
b.build()
except KeyboardInterrupt:
print("")
pass
|
def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
global g_tmp_dir
g_script_name = os.path.basename(argv[0])
# Override any defaults with the user's choices.
parse_args(argv)
# Create tmp dir and clean up on exit with a callback.
g_tmp_dir = tempfile.mkdtemp(suffix=".tmp_minicran")
print "Created tmp directory: " + g_tmp_dir
atexit.register(remove_tmp_dir)
# Do the work.
try:
b = MinicranBuilder(g_print_only, g_output_dir, g_tmp_dir, g_platform, g_rversion, g_branch, g_buildnum)
b.build()
except KeyboardInterrupt:
print("")
pass
|
[
"Main",
"program",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-r/scripts/build_minicran.py#L400-L425
|
[
"def",
"main",
"(",
"argv",
")",
":",
"global",
"g_script_name",
"global",
"g_tmp_dir",
"g_script_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"argv",
"[",
"0",
"]",
")",
"# Override any defaults with the user's choices.",
"parse_args",
"(",
"argv",
")",
"# Create tmp dir and clean up on exit with a callback.",
"g_tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"suffix",
"=",
"\".tmp_minicran\"",
")",
"print",
"\"Created tmp directory: \"",
"+",
"g_tmp_dir",
"atexit",
".",
"register",
"(",
"remove_tmp_dir",
")",
"# Do the work.",
"try",
":",
"b",
"=",
"MinicranBuilder",
"(",
"g_print_only",
",",
"g_output_dir",
",",
"g_tmp_dir",
",",
"g_platform",
",",
"g_rversion",
",",
"g_branch",
",",
"g_buildnum",
")",
"b",
".",
"build",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"\"\"",
")",
"pass"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
check_frame_id
|
Check that the provided frame id is valid in Rapids language.
|
h2o-py/h2o/utils/shared_utils.py
|
def check_frame_id(frame_id):
"""Check that the provided frame id is valid in Rapids language."""
if frame_id is None:
return
if frame_id.strip() == "":
raise H2OValueError("Frame id cannot be an empty string: %r" % frame_id)
for i, ch in enumerate(frame_id):
# '$' character has special meaning at the beginning of the string; and prohibited anywhere else
if ch == "$" and i == 0: continue
if ch not in _id_allowed_characters:
raise H2OValueError("Character '%s' is illegal in frame id: %s" % (ch, frame_id))
if re.match(r"-?[0-9]", frame_id):
raise H2OValueError("Frame id cannot start with a number: %s" % frame_id)
|
def check_frame_id(frame_id):
"""Check that the provided frame id is valid in Rapids language."""
if frame_id is None:
return
if frame_id.strip() == "":
raise H2OValueError("Frame id cannot be an empty string: %r" % frame_id)
for i, ch in enumerate(frame_id):
# '$' character has special meaning at the beginning of the string; and prohibited anywhere else
if ch == "$" and i == 0: continue
if ch not in _id_allowed_characters:
raise H2OValueError("Character '%s' is illegal in frame id: %s" % (ch, frame_id))
if re.match(r"-?[0-9]", frame_id):
raise H2OValueError("Frame id cannot start with a number: %s" % frame_id)
|
[
"Check",
"that",
"the",
"provided",
"frame",
"id",
"is",
"valid",
"in",
"Rapids",
"language",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L46-L58
|
[
"def",
"check_frame_id",
"(",
"frame_id",
")",
":",
"if",
"frame_id",
"is",
"None",
":",
"return",
"if",
"frame_id",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"raise",
"H2OValueError",
"(",
"\"Frame id cannot be an empty string: %r\"",
"%",
"frame_id",
")",
"for",
"i",
",",
"ch",
"in",
"enumerate",
"(",
"frame_id",
")",
":",
"# '$' character has special meaning at the beginning of the string; and prohibited anywhere else",
"if",
"ch",
"==",
"\"$\"",
"and",
"i",
"==",
"0",
":",
"continue",
"if",
"ch",
"not",
"in",
"_id_allowed_characters",
":",
"raise",
"H2OValueError",
"(",
"\"Character '%s' is illegal in frame id: %s\"",
"%",
"(",
"ch",
",",
"frame_id",
")",
")",
"if",
"re",
".",
"match",
"(",
"r\"-?[0-9]\"",
",",
"frame_id",
")",
":",
"raise",
"H2OValueError",
"(",
"\"Frame id cannot start with a number: %s\"",
"%",
"frame_id",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
_locate
|
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
|
h2o-py/h2o/utils/shared_utils.py
|
def _locate(path):
"""Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while True:
if os.path.exists(possible_result):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if next_tmp_dir == tmp_dir:
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
|
def _locate(path):
"""Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while True:
if os.path.exists(possible_result):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if next_tmp_dir == tmp_dir:
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
|
[
"Search",
"for",
"a",
"relative",
"path",
"and",
"turn",
"it",
"into",
"an",
"absolute",
"path",
".",
"This",
"is",
"handy",
"when",
"hunting",
"for",
"data",
"files",
"to",
"be",
"passed",
"into",
"h2o",
"and",
"used",
"by",
"import",
"file",
".",
"Note",
":",
"This",
"function",
"is",
"for",
"unit",
"testing",
"purposes",
"only",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L220-L244
|
[
"def",
"_locate",
"(",
"path",
")",
":",
"tmp_dir",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
"possible_result",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"path",
")",
"while",
"True",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"possible_result",
")",
":",
"return",
"possible_result",
"next_tmp_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"tmp_dir",
")",
"if",
"next_tmp_dir",
"==",
"tmp_dir",
":",
"raise",
"ValueError",
"(",
"\"File not found: \"",
"+",
"path",
")",
"tmp_dir",
"=",
"next_tmp_dir",
"possible_result",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"path",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
get_human_readable_bytes
|
Convert given number of bytes into a human readable representation, i.e. add prefix such as kb, Mb, Gb,
etc. The `size` argument must be a non-negative integer.
:param size: integer representing byte size of something
:return: string representation of the size, in human-readable form
|
h2o-py/h2o/utils/shared_utils.py
|
def get_human_readable_bytes(size):
"""
Convert given number of bytes into a human readable representation, i.e. add prefix such as kb, Mb, Gb,
etc. The `size` argument must be a non-negative integer.
:param size: integer representing byte size of something
:return: string representation of the size, in human-readable form
"""
if size == 0: return "0"
if size is None: return ""
assert_is_type(size, int)
assert size >= 0, "`size` cannot be negative, got %d" % size
suffixes = "PTGMk"
maxl = len(suffixes)
for i in range(maxl + 1):
shift = (maxl - i) * 10
if size >> shift == 0: continue
ndigits = 0
for nd in [3, 2, 1]:
if size >> (shift + 12 - nd * 3) == 0:
ndigits = nd
break
if ndigits == 0 or size == (size >> shift) << shift:
rounded_val = str(size >> shift)
else:
rounded_val = "%.*f" % (ndigits, size / (1 << shift))
return "%s %sb" % (rounded_val, suffixes[i] if i < maxl else "")
|
def get_human_readable_bytes(size):
"""
Convert given number of bytes into a human readable representation, i.e. add prefix such as kb, Mb, Gb,
etc. The `size` argument must be a non-negative integer.
:param size: integer representing byte size of something
:return: string representation of the size, in human-readable form
"""
if size == 0: return "0"
if size is None: return ""
assert_is_type(size, int)
assert size >= 0, "`size` cannot be negative, got %d" % size
suffixes = "PTGMk"
maxl = len(suffixes)
for i in range(maxl + 1):
shift = (maxl - i) * 10
if size >> shift == 0: continue
ndigits = 0
for nd in [3, 2, 1]:
if size >> (shift + 12 - nd * 3) == 0:
ndigits = nd
break
if ndigits == 0 or size == (size >> shift) << shift:
rounded_val = str(size >> shift)
else:
rounded_val = "%.*f" % (ndigits, size / (1 << shift))
return "%s %sb" % (rounded_val, suffixes[i] if i < maxl else "")
|
[
"Convert",
"given",
"number",
"of",
"bytes",
"into",
"a",
"human",
"readable",
"representation",
"i",
".",
"e",
".",
"add",
"prefix",
"such",
"as",
"kb",
"Mb",
"Gb",
"etc",
".",
"The",
"size",
"argument",
"must",
"be",
"a",
"non",
"-",
"negative",
"integer",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L253-L279
|
[
"def",
"get_human_readable_bytes",
"(",
"size",
")",
":",
"if",
"size",
"==",
"0",
":",
"return",
"\"0\"",
"if",
"size",
"is",
"None",
":",
"return",
"\"\"",
"assert_is_type",
"(",
"size",
",",
"int",
")",
"assert",
"size",
">=",
"0",
",",
"\"`size` cannot be negative, got %d\"",
"%",
"size",
"suffixes",
"=",
"\"PTGMk\"",
"maxl",
"=",
"len",
"(",
"suffixes",
")",
"for",
"i",
"in",
"range",
"(",
"maxl",
"+",
"1",
")",
":",
"shift",
"=",
"(",
"maxl",
"-",
"i",
")",
"*",
"10",
"if",
"size",
">>",
"shift",
"==",
"0",
":",
"continue",
"ndigits",
"=",
"0",
"for",
"nd",
"in",
"[",
"3",
",",
"2",
",",
"1",
"]",
":",
"if",
"size",
">>",
"(",
"shift",
"+",
"12",
"-",
"nd",
"*",
"3",
")",
"==",
"0",
":",
"ndigits",
"=",
"nd",
"break",
"if",
"ndigits",
"==",
"0",
"or",
"size",
"==",
"(",
"size",
">>",
"shift",
")",
"<<",
"shift",
":",
"rounded_val",
"=",
"str",
"(",
"size",
">>",
"shift",
")",
"else",
":",
"rounded_val",
"=",
"\"%.*f\"",
"%",
"(",
"ndigits",
",",
"size",
"/",
"(",
"1",
"<<",
"shift",
")",
")",
"return",
"\"%s %sb\"",
"%",
"(",
"rounded_val",
",",
"suffixes",
"[",
"i",
"]",
"if",
"i",
"<",
"maxl",
"else",
"\"\"",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
get_human_readable_time
|
Convert given duration in milliseconds into a human-readable representation, i.e. hours, minutes, seconds,
etc. More specifically, the returned string may look like following:
1 day 3 hours 12 mins
3 days 0 hours 0 mins
8 hours 12 mins
34 mins 02 secs
13 secs
541 ms
In particular, the following rules are applied:
* milliseconds are printed only if the duration is less than a second;
* seconds are printed only if the duration is less than an hour;
* for durations greater than 1 hour we print days, hours and minutes keeping zeros in the middle (i.e. we
return "4 days 0 hours 12 mins" instead of "4 days 12 mins").
:param time_ms: duration, as a number of elapsed milliseconds.
:return: human-readable string representation of the provided duration.
|
h2o-py/h2o/utils/shared_utils.py
|
def get_human_readable_time(time_ms):
"""
Convert given duration in milliseconds into a human-readable representation, i.e. hours, minutes, seconds,
etc. More specifically, the returned string may look like following:
1 day 3 hours 12 mins
3 days 0 hours 0 mins
8 hours 12 mins
34 mins 02 secs
13 secs
541 ms
In particular, the following rules are applied:
* milliseconds are printed only if the duration is less than a second;
* seconds are printed only if the duration is less than an hour;
* for durations greater than 1 hour we print days, hours and minutes keeping zeros in the middle (i.e. we
return "4 days 0 hours 12 mins" instead of "4 days 12 mins").
:param time_ms: duration, as a number of elapsed milliseconds.
:return: human-readable string representation of the provided duration.
"""
millis = time_ms % 1000
secs = (time_ms // 1000) % 60
mins = (time_ms // 60000) % 60
hours = (time_ms // 3600000) % 24
days = (time_ms // 86400000)
res = ""
if days > 1:
res += "%d days" % days
elif days == 1:
res += "1 day"
if hours > 1 or (hours == 0 and res):
res += " %d hours" % hours
elif hours == 1:
res += " 1 hour"
if mins > 1 or (mins == 0 and res):
res += " %d mins" % mins
elif mins == 1:
res += " 1 min"
if days == 0 and hours == 0:
res += " %02d secs" % secs
if not res:
res = " %d ms" % millis
return res.strip()
|
def get_human_readable_time(time_ms):
"""
Convert given duration in milliseconds into a human-readable representation, i.e. hours, minutes, seconds,
etc. More specifically, the returned string may look like following:
1 day 3 hours 12 mins
3 days 0 hours 0 mins
8 hours 12 mins
34 mins 02 secs
13 secs
541 ms
In particular, the following rules are applied:
* milliseconds are printed only if the duration is less than a second;
* seconds are printed only if the duration is less than an hour;
* for durations greater than 1 hour we print days, hours and minutes keeping zeros in the middle (i.e. we
return "4 days 0 hours 12 mins" instead of "4 days 12 mins").
:param time_ms: duration, as a number of elapsed milliseconds.
:return: human-readable string representation of the provided duration.
"""
millis = time_ms % 1000
secs = (time_ms // 1000) % 60
mins = (time_ms // 60000) % 60
hours = (time_ms // 3600000) % 24
days = (time_ms // 86400000)
res = ""
if days > 1:
res += "%d days" % days
elif days == 1:
res += "1 day"
if hours > 1 or (hours == 0 and res):
res += " %d hours" % hours
elif hours == 1:
res += " 1 hour"
if mins > 1 or (mins == 0 and res):
res += " %d mins" % mins
elif mins == 1:
res += " 1 min"
if days == 0 and hours == 0:
res += " %02d secs" % secs
if not res:
res = " %d ms" % millis
return res.strip()
|
[
"Convert",
"given",
"duration",
"in",
"milliseconds",
"into",
"a",
"human",
"-",
"readable",
"representation",
"i",
".",
"e",
".",
"hours",
"minutes",
"seconds",
"etc",
".",
"More",
"specifically",
"the",
"returned",
"string",
"may",
"look",
"like",
"following",
":",
"1",
"day",
"3",
"hours",
"12",
"mins",
"3",
"days",
"0",
"hours",
"0",
"mins",
"8",
"hours",
"12",
"mins",
"34",
"mins",
"02",
"secs",
"13",
"secs",
"541",
"ms",
"In",
"particular",
"the",
"following",
"rules",
"are",
"applied",
":",
"*",
"milliseconds",
"are",
"printed",
"only",
"if",
"the",
"duration",
"is",
"less",
"than",
"a",
"second",
";",
"*",
"seconds",
"are",
"printed",
"only",
"if",
"the",
"duration",
"is",
"less",
"than",
"an",
"hour",
";",
"*",
"for",
"durations",
"greater",
"than",
"1",
"hour",
"we",
"print",
"days",
"hours",
"and",
"minutes",
"keeping",
"zeros",
"in",
"the",
"middle",
"(",
"i",
".",
"e",
".",
"we",
"return",
"4",
"days",
"0",
"hours",
"12",
"mins",
"instead",
"of",
"4",
"days",
"12",
"mins",
")",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L282-L328
|
[
"def",
"get_human_readable_time",
"(",
"time_ms",
")",
":",
"millis",
"=",
"time_ms",
"%",
"1000",
"secs",
"=",
"(",
"time_ms",
"//",
"1000",
")",
"%",
"60",
"mins",
"=",
"(",
"time_ms",
"//",
"60000",
")",
"%",
"60",
"hours",
"=",
"(",
"time_ms",
"//",
"3600000",
")",
"%",
"24",
"days",
"=",
"(",
"time_ms",
"//",
"86400000",
")",
"res",
"=",
"\"\"",
"if",
"days",
">",
"1",
":",
"res",
"+=",
"\"%d days\"",
"%",
"days",
"elif",
"days",
"==",
"1",
":",
"res",
"+=",
"\"1 day\"",
"if",
"hours",
">",
"1",
"or",
"(",
"hours",
"==",
"0",
"and",
"res",
")",
":",
"res",
"+=",
"\" %d hours\"",
"%",
"hours",
"elif",
"hours",
"==",
"1",
":",
"res",
"+=",
"\" 1 hour\"",
"if",
"mins",
">",
"1",
"or",
"(",
"mins",
"==",
"0",
"and",
"res",
")",
":",
"res",
"+=",
"\" %d mins\"",
"%",
"mins",
"elif",
"mins",
"==",
"1",
":",
"res",
"+=",
"\" 1 min\"",
"if",
"days",
"==",
"0",
"and",
"hours",
"==",
"0",
":",
"res",
"+=",
"\" %02d secs\"",
"%",
"secs",
"if",
"not",
"res",
":",
"res",
"=",
"\" %d ms\"",
"%",
"millis",
"return",
"res",
".",
"strip",
"(",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
print2
|
This function exists here ONLY because Sphinx.ext.autodoc gets into a bad state when seeing the print()
function. When in that state, autodoc doesn't display any errors or warnings, but instead completely
ignores the "bysource" member-order option.
|
h2o-py/h2o/utils/shared_utils.py
|
def print2(msg, flush=False, end="\n"):
"""
This function exists here ONLY because Sphinx.ext.autodoc gets into a bad state when seeing the print()
function. When in that state, autodoc doesn't display any errors or warnings, but instead completely
ignores the "bysource" member-order option.
"""
print(msg, end=end)
if flush: sys.stdout.flush()
|
def print2(msg, flush=False, end="\n"):
"""
This function exists here ONLY because Sphinx.ext.autodoc gets into a bad state when seeing the print()
function. When in that state, autodoc doesn't display any errors or warnings, but instead completely
ignores the "bysource" member-order option.
"""
print(msg, end=end)
if flush: sys.stdout.flush()
|
[
"This",
"function",
"exists",
"here",
"ONLY",
"because",
"Sphinx",
".",
"ext",
".",
"autodoc",
"gets",
"into",
"a",
"bad",
"state",
"when",
"seeing",
"the",
"print",
"()",
"function",
".",
"When",
"in",
"that",
"state",
"autodoc",
"doesn",
"t",
"display",
"any",
"errors",
"or",
"warnings",
"but",
"instead",
"completely",
"ignores",
"the",
"bysource",
"member",
"-",
"order",
"option",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L331-L338
|
[
"def",
"print2",
"(",
"msg",
",",
"flush",
"=",
"False",
",",
"end",
"=",
"\"\\n\"",
")",
":",
"print",
"(",
"msg",
",",
"end",
"=",
"end",
")",
"if",
"flush",
":",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
normalize_slice
|
Return a "canonical" version of slice ``s``.
:param slice s: the original slice expression
:param total int: total number of elements in the collection sliced by ``s``
:return slice: a slice equivalent to ``s`` but not containing any negative indices or Nones.
|
h2o-py/h2o/utils/shared_utils.py
|
def normalize_slice(s, total):
"""
Return a "canonical" version of slice ``s``.
:param slice s: the original slice expression
:param total int: total number of elements in the collection sliced by ``s``
:return slice: a slice equivalent to ``s`` but not containing any negative indices or Nones.
"""
newstart = 0 if s.start is None else max(0, s.start + total) if s.start < 0 else min(s.start, total)
newstop = total if s.stop is None else max(0, s.stop + total) if s.stop < 0 else min(s.stop, total)
newstep = 1 if s.step is None else s.step
return slice(newstart, newstop, newstep)
|
def normalize_slice(s, total):
"""
Return a "canonical" version of slice ``s``.
:param slice s: the original slice expression
:param total int: total number of elements in the collection sliced by ``s``
:return slice: a slice equivalent to ``s`` but not containing any negative indices or Nones.
"""
newstart = 0 if s.start is None else max(0, s.start + total) if s.start < 0 else min(s.start, total)
newstop = total if s.stop is None else max(0, s.stop + total) if s.stop < 0 else min(s.stop, total)
newstep = 1 if s.step is None else s.step
return slice(newstart, newstop, newstep)
|
[
"Return",
"a",
"canonical",
"version",
"of",
"slice",
"s",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L341-L352
|
[
"def",
"normalize_slice",
"(",
"s",
",",
"total",
")",
":",
"newstart",
"=",
"0",
"if",
"s",
".",
"start",
"is",
"None",
"else",
"max",
"(",
"0",
",",
"s",
".",
"start",
"+",
"total",
")",
"if",
"s",
".",
"start",
"<",
"0",
"else",
"min",
"(",
"s",
".",
"start",
",",
"total",
")",
"newstop",
"=",
"total",
"if",
"s",
".",
"stop",
"is",
"None",
"else",
"max",
"(",
"0",
",",
"s",
".",
"stop",
"+",
"total",
")",
"if",
"s",
".",
"stop",
"<",
"0",
"else",
"min",
"(",
"s",
".",
"stop",
",",
"total",
")",
"newstep",
"=",
"1",
"if",
"s",
".",
"step",
"is",
"None",
"else",
"s",
".",
"step",
"return",
"slice",
"(",
"newstart",
",",
"newstop",
",",
"newstep",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
slice_is_normalized
|
Return True if slice ``s`` in "normalized" form.
|
h2o-py/h2o/utils/shared_utils.py
|
def slice_is_normalized(s):
"""Return True if slice ``s`` in "normalized" form."""
return (s.start is not None and s.stop is not None and s.step is not None and s.start <= s.stop)
|
def slice_is_normalized(s):
"""Return True if slice ``s`` in "normalized" form."""
return (s.start is not None and s.stop is not None and s.step is not None and s.start <= s.stop)
|
[
"Return",
"True",
"if",
"slice",
"s",
"in",
"normalized",
"form",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L355-L357
|
[
"def",
"slice_is_normalized",
"(",
"s",
")",
":",
"return",
"(",
"s",
".",
"start",
"is",
"not",
"None",
"and",
"s",
".",
"stop",
"is",
"not",
"None",
"and",
"s",
".",
"step",
"is",
"not",
"None",
"and",
"s",
".",
"start",
"<=",
"s",
".",
"stop",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
mojo_predict_pandas
|
MOJO scoring function to take a Pandas frame and use MOJO model as zip file to score.
:param dataframe: Pandas frame to score.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g`` is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: Pandas frame with predictions
|
h2o-py/h2o/utils/shared_utils.py
|
def mojo_predict_pandas(dataframe, mojo_zip_path, genmodel_jar_path=None, classpath=None, java_options=None, verbose=False):
"""
MOJO scoring function to take a Pandas frame and use MOJO model as zip file to score.
:param dataframe: Pandas frame to score.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g`` is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: Pandas frame with predictions
"""
tmp_dir = tempfile.mkdtemp()
try:
if not can_use_pandas():
raise RuntimeException('Cannot import pandas')
import pandas
assert_is_type(dataframe, pandas.DataFrame)
input_csv_path = os.path.join(tmp_dir, 'input.csv')
prediction_csv_path = os.path.join(tmp_dir, 'prediction.csv')
dataframe.to_csv(input_csv_path)
mojo_predict_csv(input_csv_path=input_csv_path, mojo_zip_path=mojo_zip_path,
output_csv_path=prediction_csv_path, genmodel_jar_path=genmodel_jar_path,
classpath=classpath, java_options=java_options, verbose=verbose)
return pandas.read_csv(prediction_csv_path)
finally:
shutil.rmtree(tmp_dir)
|
def mojo_predict_pandas(dataframe, mojo_zip_path, genmodel_jar_path=None, classpath=None, java_options=None, verbose=False):
"""
MOJO scoring function to take a Pandas frame and use MOJO model as zip file to score.
:param dataframe: Pandas frame to score.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g`` is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: Pandas frame with predictions
"""
tmp_dir = tempfile.mkdtemp()
try:
if not can_use_pandas():
raise RuntimeException('Cannot import pandas')
import pandas
assert_is_type(dataframe, pandas.DataFrame)
input_csv_path = os.path.join(tmp_dir, 'input.csv')
prediction_csv_path = os.path.join(tmp_dir, 'prediction.csv')
dataframe.to_csv(input_csv_path)
mojo_predict_csv(input_csv_path=input_csv_path, mojo_zip_path=mojo_zip_path,
output_csv_path=prediction_csv_path, genmodel_jar_path=genmodel_jar_path,
classpath=classpath, java_options=java_options, verbose=verbose)
return pandas.read_csv(prediction_csv_path)
finally:
shutil.rmtree(tmp_dir)
|
[
"MOJO",
"scoring",
"function",
"to",
"take",
"a",
"Pandas",
"frame",
"and",
"use",
"MOJO",
"model",
"as",
"zip",
"file",
"to",
"score",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L379-L407
|
[
"def",
"mojo_predict_pandas",
"(",
"dataframe",
",",
"mojo_zip_path",
",",
"genmodel_jar_path",
"=",
"None",
",",
"classpath",
"=",
"None",
",",
"java_options",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"if",
"not",
"can_use_pandas",
"(",
")",
":",
"raise",
"RuntimeException",
"(",
"'Cannot import pandas'",
")",
"import",
"pandas",
"assert_is_type",
"(",
"dataframe",
",",
"pandas",
".",
"DataFrame",
")",
"input_csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'input.csv'",
")",
"prediction_csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'prediction.csv'",
")",
"dataframe",
".",
"to_csv",
"(",
"input_csv_path",
")",
"mojo_predict_csv",
"(",
"input_csv_path",
"=",
"input_csv_path",
",",
"mojo_zip_path",
"=",
"mojo_zip_path",
",",
"output_csv_path",
"=",
"prediction_csv_path",
",",
"genmodel_jar_path",
"=",
"genmodel_jar_path",
",",
"classpath",
"=",
"classpath",
",",
"java_options",
"=",
"java_options",
",",
"verbose",
"=",
"verbose",
")",
"return",
"pandas",
".",
"read_csv",
"(",
"prediction_csv_path",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmp_dir",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
mojo_predict_csv
|
MOJO scoring function to take a CSV file and use MOJO model as zip file to score.
:param input_csv_path: Path to input CSV file.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param output_csv_path: Optional, name of the output CSV file with computed predictions. If None (default), then
predictions will be saved as prediction.csv in the same folder as the MOJO zip.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g -XX:ReservedCodeCacheSize=256m`` is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: List of computed predictions
|
h2o-py/h2o/utils/shared_utils.py
|
def mojo_predict_csv(input_csv_path, mojo_zip_path, output_csv_path=None, genmodel_jar_path=None, classpath=None, java_options=None, verbose=False):
"""
MOJO scoring function to take a CSV file and use MOJO model as zip file to score.
:param input_csv_path: Path to input CSV file.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param output_csv_path: Optional, name of the output CSV file with computed predictions. If None (default), then
predictions will be saved as prediction.csv in the same folder as the MOJO zip.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g -XX:ReservedCodeCacheSize=256m`` is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: List of computed predictions
"""
default_java_options = '-Xmx4g -XX:ReservedCodeCacheSize=256m'
prediction_output_file = 'prediction.csv'
# Checking java
java = H2OLocalServer._find_java()
H2OLocalServer._check_java(java=java, verbose=verbose)
# Ensure input_csv exists
if verbose:
print("input_csv:\t%s" % input_csv_path)
if not os.path.isfile(input_csv_path):
raise RuntimeError("Input csv cannot be found at %s" % input_csv_path)
# Ensure mojo_zip exists
mojo_zip_path = os.path.abspath(mojo_zip_path)
if verbose:
print("mojo_zip:\t%s" % mojo_zip_path)
if not os.path.isfile(mojo_zip_path):
raise RuntimeError("MOJO zip cannot be found at %s" % mojo_zip_path)
parent_dir = os.path.dirname(mojo_zip_path)
# Set output_csv if necessary
if output_csv_path is None:
output_csv_path = os.path.join(parent_dir, prediction_output_file)
# Set path to h2o-genmodel.jar if necessary and check it's valid
if genmodel_jar_path is None:
genmodel_jar_path = os.path.join(parent_dir, gen_model_file_name)
if verbose:
print("genmodel_jar:\t%s" % genmodel_jar_path)
if not os.path.isfile(genmodel_jar_path):
raise RuntimeError("Genmodel jar cannot be found at %s" % genmodel_jar_path)
if verbose and output_csv_path is not None:
print("output_csv:\t%s" % output_csv_path)
# Set classpath if necessary
if classpath is None:
classpath = genmodel_jar_path
if verbose:
print("classpath:\t%s" % classpath)
# Set java_options if necessary
if java_options is None:
java_options = default_java_options
if verbose:
print("java_options:\t%s" % java_options)
# Construct command to invoke java
cmd = [java]
for option in java_options.split(' '):
cmd += [option]
cmd += ["-cp", classpath, h2o_predictor_class, "--mojo", mojo_zip_path, "--input", input_csv_path,
'--output', output_csv_path, '--decimal']
if verbose:
cmd_str = " ".join(cmd)
print("java cmd:\t%s" % cmd_str)
# invoke the command
subprocess.check_call(cmd, shell=False)
# load predictions in form of a dict
with open(output_csv_path) as csv_file:
result = list(csv.DictReader(csv_file))
return result
|
def mojo_predict_csv(input_csv_path, mojo_zip_path, output_csv_path=None, genmodel_jar_path=None, classpath=None, java_options=None, verbose=False):
"""
MOJO scoring function to take a CSV file and use MOJO model as zip file to score.
:param input_csv_path: Path to input CSV file.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param output_csv_path: Optional, name of the output CSV file with computed predictions. If None (default), then
predictions will be saved as prediction.csv in the same folder as the MOJO zip.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default ``-Xmx4g -XX:ReservedCodeCacheSize=256m`` is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: List of computed predictions
"""
default_java_options = '-Xmx4g -XX:ReservedCodeCacheSize=256m'
prediction_output_file = 'prediction.csv'
# Checking java
java = H2OLocalServer._find_java()
H2OLocalServer._check_java(java=java, verbose=verbose)
# Ensure input_csv exists
if verbose:
print("input_csv:\t%s" % input_csv_path)
if not os.path.isfile(input_csv_path):
raise RuntimeError("Input csv cannot be found at %s" % input_csv_path)
# Ensure mojo_zip exists
mojo_zip_path = os.path.abspath(mojo_zip_path)
if verbose:
print("mojo_zip:\t%s" % mojo_zip_path)
if not os.path.isfile(mojo_zip_path):
raise RuntimeError("MOJO zip cannot be found at %s" % mojo_zip_path)
parent_dir = os.path.dirname(mojo_zip_path)
# Set output_csv if necessary
if output_csv_path is None:
output_csv_path = os.path.join(parent_dir, prediction_output_file)
# Set path to h2o-genmodel.jar if necessary and check it's valid
if genmodel_jar_path is None:
genmodel_jar_path = os.path.join(parent_dir, gen_model_file_name)
if verbose:
print("genmodel_jar:\t%s" % genmodel_jar_path)
if not os.path.isfile(genmodel_jar_path):
raise RuntimeError("Genmodel jar cannot be found at %s" % genmodel_jar_path)
if verbose and output_csv_path is not None:
print("output_csv:\t%s" % output_csv_path)
# Set classpath if necessary
if classpath is None:
classpath = genmodel_jar_path
if verbose:
print("classpath:\t%s" % classpath)
# Set java_options if necessary
if java_options is None:
java_options = default_java_options
if verbose:
print("java_options:\t%s" % java_options)
# Construct command to invoke java
cmd = [java]
for option in java_options.split(' '):
cmd += [option]
cmd += ["-cp", classpath, h2o_predictor_class, "--mojo", mojo_zip_path, "--input", input_csv_path,
'--output', output_csv_path, '--decimal']
if verbose:
cmd_str = " ".join(cmd)
print("java cmd:\t%s" % cmd_str)
# invoke the command
subprocess.check_call(cmd, shell=False)
# load predictions in form of a dict
with open(output_csv_path) as csv_file:
result = list(csv.DictReader(csv_file))
return result
|
[
"MOJO",
"scoring",
"function",
"to",
"take",
"a",
"CSV",
"file",
"and",
"use",
"MOJO",
"model",
"as",
"zip",
"file",
"to",
"score",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L410-L491
|
[
"def",
"mojo_predict_csv",
"(",
"input_csv_path",
",",
"mojo_zip_path",
",",
"output_csv_path",
"=",
"None",
",",
"genmodel_jar_path",
"=",
"None",
",",
"classpath",
"=",
"None",
",",
"java_options",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"default_java_options",
"=",
"'-Xmx4g -XX:ReservedCodeCacheSize=256m'",
"prediction_output_file",
"=",
"'prediction.csv'",
"# Checking java",
"java",
"=",
"H2OLocalServer",
".",
"_find_java",
"(",
")",
"H2OLocalServer",
".",
"_check_java",
"(",
"java",
"=",
"java",
",",
"verbose",
"=",
"verbose",
")",
"# Ensure input_csv exists",
"if",
"verbose",
":",
"print",
"(",
"\"input_csv:\\t%s\"",
"%",
"input_csv_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"input_csv_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Input csv cannot be found at %s\"",
"%",
"input_csv_path",
")",
"# Ensure mojo_zip exists",
"mojo_zip_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"mojo_zip_path",
")",
"if",
"verbose",
":",
"print",
"(",
"\"mojo_zip:\\t%s\"",
"%",
"mojo_zip_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"mojo_zip_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"MOJO zip cannot be found at %s\"",
"%",
"mojo_zip_path",
")",
"parent_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"mojo_zip_path",
")",
"# Set output_csv if necessary",
"if",
"output_csv_path",
"is",
"None",
":",
"output_csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"prediction_output_file",
")",
"# Set path to h2o-genmodel.jar if necessary and check it's valid",
"if",
"genmodel_jar_path",
"is",
"None",
":",
"genmodel_jar_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"gen_model_file_name",
")",
"if",
"verbose",
":",
"print",
"(",
"\"genmodel_jar:\\t%s\"",
"%",
"genmodel_jar_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"genmodel_jar_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Genmodel jar cannot be found at %s\"",
"%",
"genmodel_jar_path",
")",
"if",
"verbose",
"and",
"output_csv_path",
"is",
"not",
"None",
":",
"print",
"(",
"\"output_csv:\\t%s\"",
"%",
"output_csv_path",
")",
"# Set classpath if necessary",
"if",
"classpath",
"is",
"None",
":",
"classpath",
"=",
"genmodel_jar_path",
"if",
"verbose",
":",
"print",
"(",
"\"classpath:\\t%s\"",
"%",
"classpath",
")",
"# Set java_options if necessary",
"if",
"java_options",
"is",
"None",
":",
"java_options",
"=",
"default_java_options",
"if",
"verbose",
":",
"print",
"(",
"\"java_options:\\t%s\"",
"%",
"java_options",
")",
"# Construct command to invoke java",
"cmd",
"=",
"[",
"java",
"]",
"for",
"option",
"in",
"java_options",
".",
"split",
"(",
"' '",
")",
":",
"cmd",
"+=",
"[",
"option",
"]",
"cmd",
"+=",
"[",
"\"-cp\"",
",",
"classpath",
",",
"h2o_predictor_class",
",",
"\"--mojo\"",
",",
"mojo_zip_path",
",",
"\"--input\"",
",",
"input_csv_path",
",",
"'--output'",
",",
"output_csv_path",
",",
"'--decimal'",
"]",
"if",
"verbose",
":",
"cmd_str",
"=",
"\" \"",
".",
"join",
"(",
"cmd",
")",
"print",
"(",
"\"java cmd:\\t%s\"",
"%",
"cmd_str",
")",
"# invoke the command",
"subprocess",
".",
"check_call",
"(",
"cmd",
",",
"shell",
"=",
"False",
")",
"# load predictions in form of a dict",
"with",
"open",
"(",
"output_csv_path",
")",
"as",
"csv_file",
":",
"result",
"=",
"list",
"(",
"csv",
".",
"DictReader",
"(",
"csv_file",
")",
")",
"return",
"result"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
deprecated
|
The decorator to mark deprecated functions.
|
h2o-py/h2o/utils/shared_utils.py
|
def deprecated(message):
"""The decorator to mark deprecated functions."""
from traceback import extract_stack
assert message, "`message` argument in @deprecated is required."
def deprecated_decorator(fun):
def decorator_invisible(*args, **kwargs):
stack = extract_stack()
assert len(stack) >= 2 and stack[-1][2] == "decorator_invisible", "Got confusing stack... %r" % stack
print("[WARNING] in %s line %d:" % (stack[-2][0], stack[-2][1]))
print(" >>> %s" % (stack[-2][3] or "????"))
print(" ^^^^ %s" % message)
return fun(*args, **kwargs)
decorator_invisible.__doc__ = message
decorator_invisible.__name__ = fun.__name__
decorator_invisible.__module__ = fun.__module__
decorator_invisible.__deprecated__ = True
return decorator_invisible
return deprecated_decorator
|
def deprecated(message):
"""The decorator to mark deprecated functions."""
from traceback import extract_stack
assert message, "`message` argument in @deprecated is required."
def deprecated_decorator(fun):
def decorator_invisible(*args, **kwargs):
stack = extract_stack()
assert len(stack) >= 2 and stack[-1][2] == "decorator_invisible", "Got confusing stack... %r" % stack
print("[WARNING] in %s line %d:" % (stack[-2][0], stack[-2][1]))
print(" >>> %s" % (stack[-2][3] or "????"))
print(" ^^^^ %s" % message)
return fun(*args, **kwargs)
decorator_invisible.__doc__ = message
decorator_invisible.__name__ = fun.__name__
decorator_invisible.__module__ = fun.__module__
decorator_invisible.__deprecated__ = True
return decorator_invisible
return deprecated_decorator
|
[
"The",
"decorator",
"to",
"mark",
"deprecated",
"functions",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/shared_utils.py#L494-L514
|
[
"def",
"deprecated",
"(",
"message",
")",
":",
"from",
"traceback",
"import",
"extract_stack",
"assert",
"message",
",",
"\"`message` argument in @deprecated is required.\"",
"def",
"deprecated_decorator",
"(",
"fun",
")",
":",
"def",
"decorator_invisible",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"stack",
"=",
"extract_stack",
"(",
")",
"assert",
"len",
"(",
"stack",
")",
">=",
"2",
"and",
"stack",
"[",
"-",
"1",
"]",
"[",
"2",
"]",
"==",
"\"decorator_invisible\"",
",",
"\"Got confusing stack... %r\"",
"%",
"stack",
"print",
"(",
"\"[WARNING] in %s line %d:\"",
"%",
"(",
"stack",
"[",
"-",
"2",
"]",
"[",
"0",
"]",
",",
"stack",
"[",
"-",
"2",
"]",
"[",
"1",
"]",
")",
")",
"print",
"(",
"\" >>> %s\"",
"%",
"(",
"stack",
"[",
"-",
"2",
"]",
"[",
"3",
"]",
"or",
"\"????\"",
")",
")",
"print",
"(",
"\" ^^^^ %s\"",
"%",
"message",
")",
"return",
"fun",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"decorator_invisible",
".",
"__doc__",
"=",
"message",
"decorator_invisible",
".",
"__name__",
"=",
"fun",
".",
"__name__",
"decorator_invisible",
".",
"__module__",
"=",
"fun",
".",
"__module__",
"decorator_invisible",
".",
"__deprecated__",
"=",
"True",
"return",
"decorator_invisible",
"return",
"deprecated_decorator"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.join
|
Wait until grid finishes computing.
|
h2o-py/h2o/grid/grid_search.py
|
def join(self):
"""Wait until grid finishes computing."""
self._future = False
self._job.poll()
self._job = None
|
def join(self):
"""Wait until grid finishes computing."""
self._future = False
self._job.poll()
self._job = None
|
[
"Wait",
"until",
"grid",
"finishes",
"computing",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L147-L151
|
[
"def",
"join",
"(",
"self",
")",
":",
"self",
".",
"_future",
"=",
"False",
"self",
".",
"_job",
".",
"poll",
"(",
")",
"self",
".",
"_job",
"=",
"None"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.train
|
Train the model synchronously (i.e. do not return until the model finishes training).
To train asynchronously call :meth:`start`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
|
h2o-py/h2o/grid/grid_search.py
|
def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,
validation_frame=None, **params):
"""
Train the model synchronously (i.e. do not return until the model finishes training).
To train asynchronously call :meth:`start`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
"""
algo_params = locals()
parms = self._parms.copy()
parms.update({k: v for k, v in algo_params.items() if k not in ["self", "params", "algo_params", "parms"]})
# dictionaries have special handling in grid search, avoid the implicit conversion
parms["search_criteria"] = None if self.search_criteria is None else str(self.search_criteria)
parms["hyper_parameters"] = None if self.hyper_params is None else str(self.hyper_params) # unique to grid search
parms.update({k: v for k, v in list(self.model._parms.items()) if v is not None}) # unique to grid search
parms.update(params)
if '__class__' in parms: # FIXME: hackt for PY3
del parms['__class__']
y = algo_params["y"]
tframe = algo_params["training_frame"]
if tframe is None: raise ValueError("Missing training_frame")
if y is not None:
if is_type(y, list, tuple):
if len(y) == 1:
parms["y"] = y[0]
else:
raise ValueError('y must be a single column reference')
if x is None:
if(isinstance(y, int)):
xset = set(range(training_frame.ncols)) - {y}
else:
xset = set(training_frame.names) - {y}
else:
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-training_frame.ncols <= xi < training_frame.ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(training_frame.names[xi])
else:
if xi not in training_frame.names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
x = list(xset)
parms["x"] = x
self.build_model(parms)
|
def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,
validation_frame=None, **params):
"""
Train the model synchronously (i.e. do not return until the model finishes training).
To train asynchronously call :meth:`start`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
"""
algo_params = locals()
parms = self._parms.copy()
parms.update({k: v for k, v in algo_params.items() if k not in ["self", "params", "algo_params", "parms"]})
# dictionaries have special handling in grid search, avoid the implicit conversion
parms["search_criteria"] = None if self.search_criteria is None else str(self.search_criteria)
parms["hyper_parameters"] = None if self.hyper_params is None else str(self.hyper_params) # unique to grid search
parms.update({k: v for k, v in list(self.model._parms.items()) if v is not None}) # unique to grid search
parms.update(params)
if '__class__' in parms: # FIXME: hackt for PY3
del parms['__class__']
y = algo_params["y"]
tframe = algo_params["training_frame"]
if tframe is None: raise ValueError("Missing training_frame")
if y is not None:
if is_type(y, list, tuple):
if len(y) == 1:
parms["y"] = y[0]
else:
raise ValueError('y must be a single column reference')
if x is None:
if(isinstance(y, int)):
xset = set(range(training_frame.ncols)) - {y}
else:
xset = set(training_frame.names) - {y}
else:
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-training_frame.ncols <= xi < training_frame.ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(training_frame.names[xi])
else:
if xi not in training_frame.names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
x = list(xset)
parms["x"] = x
self.build_model(parms)
|
[
"Train",
"the",
"model",
"synchronously",
"(",
"i",
".",
"e",
".",
"do",
"not",
"return",
"until",
"the",
"model",
"finishes",
"training",
")",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L154-L209
|
[
"def",
"train",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"training_frame",
"=",
"None",
",",
"offset_column",
"=",
"None",
",",
"fold_column",
"=",
"None",
",",
"weights_column",
"=",
"None",
",",
"validation_frame",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"algo_params",
"=",
"locals",
"(",
")",
"parms",
"=",
"self",
".",
"_parms",
".",
"copy",
"(",
")",
"parms",
".",
"update",
"(",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"algo_params",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"[",
"\"self\"",
",",
"\"params\"",
",",
"\"algo_params\"",
",",
"\"parms\"",
"]",
"}",
")",
"# dictionaries have special handling in grid search, avoid the implicit conversion",
"parms",
"[",
"\"search_criteria\"",
"]",
"=",
"None",
"if",
"self",
".",
"search_criteria",
"is",
"None",
"else",
"str",
"(",
"self",
".",
"search_criteria",
")",
"parms",
"[",
"\"hyper_parameters\"",
"]",
"=",
"None",
"if",
"self",
".",
"hyper_params",
"is",
"None",
"else",
"str",
"(",
"self",
".",
"hyper_params",
")",
"# unique to grid search",
"parms",
".",
"update",
"(",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"self",
".",
"model",
".",
"_parms",
".",
"items",
"(",
")",
")",
"if",
"v",
"is",
"not",
"None",
"}",
")",
"# unique to grid search",
"parms",
".",
"update",
"(",
"params",
")",
"if",
"'__class__'",
"in",
"parms",
":",
"# FIXME: hackt for PY3",
"del",
"parms",
"[",
"'__class__'",
"]",
"y",
"=",
"algo_params",
"[",
"\"y\"",
"]",
"tframe",
"=",
"algo_params",
"[",
"\"training_frame\"",
"]",
"if",
"tframe",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Missing training_frame\"",
")",
"if",
"y",
"is",
"not",
"None",
":",
"if",
"is_type",
"(",
"y",
",",
"list",
",",
"tuple",
")",
":",
"if",
"len",
"(",
"y",
")",
"==",
"1",
":",
"parms",
"[",
"\"y\"",
"]",
"=",
"y",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'y must be a single column reference'",
")",
"if",
"x",
"is",
"None",
":",
"if",
"(",
"isinstance",
"(",
"y",
",",
"int",
")",
")",
":",
"xset",
"=",
"set",
"(",
"range",
"(",
"training_frame",
".",
"ncols",
")",
")",
"-",
"{",
"y",
"}",
"else",
":",
"xset",
"=",
"set",
"(",
"training_frame",
".",
"names",
")",
"-",
"{",
"y",
"}",
"else",
":",
"xset",
"=",
"set",
"(",
")",
"if",
"is_type",
"(",
"x",
",",
"int",
",",
"str",
")",
":",
"x",
"=",
"[",
"x",
"]",
"for",
"xi",
"in",
"x",
":",
"if",
"is_type",
"(",
"xi",
",",
"int",
")",
":",
"if",
"not",
"(",
"-",
"training_frame",
".",
"ncols",
"<=",
"xi",
"<",
"training_frame",
".",
"ncols",
")",
":",
"raise",
"H2OValueError",
"(",
"\"Column %d does not exist in the training frame\"",
"%",
"xi",
")",
"xset",
".",
"add",
"(",
"training_frame",
".",
"names",
"[",
"xi",
"]",
")",
"else",
":",
"if",
"xi",
"not",
"in",
"training_frame",
".",
"names",
":",
"raise",
"H2OValueError",
"(",
"\"Column %s not in the training frame\"",
"%",
"xi",
")",
"xset",
".",
"add",
"(",
"xi",
")",
"x",
"=",
"list",
"(",
"xset",
")",
"parms",
"[",
"\"x\"",
"]",
"=",
"x",
"self",
".",
"build_model",
"(",
"parms",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.build_model
|
(internal)
|
h2o-py/h2o/grid/grid_search.py
|
def build_model(self, algo_params):
"""(internal)"""
if algo_params["training_frame"] is None: raise ValueError("Missing training_frame")
x = algo_params.pop("x")
y = algo_params.pop("y", None)
training_frame = algo_params.pop("training_frame")
validation_frame = algo_params.pop("validation_frame", None)
is_auto_encoder = (algo_params is not None) and ("autoencoder" in algo_params and algo_params["autoencoder"])
algo = self.model._compute_algo() # unique to grid search
is_unsupervised = is_auto_encoder or algo == "pca" or algo == "svd" or algo == "kmeans" or algo == "glrm"
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_unsupervised and y is None: raise ValueError("Missing response")
if not is_unsupervised:
y = y if y in training_frame.names else training_frame.names[y]
self.model._estimator_type = "classifier" if training_frame.types[y] == "enum" else "regressor"
self._model_build(x, y, training_frame, validation_frame, algo_params)
|
def build_model(self, algo_params):
"""(internal)"""
if algo_params["training_frame"] is None: raise ValueError("Missing training_frame")
x = algo_params.pop("x")
y = algo_params.pop("y", None)
training_frame = algo_params.pop("training_frame")
validation_frame = algo_params.pop("validation_frame", None)
is_auto_encoder = (algo_params is not None) and ("autoencoder" in algo_params and algo_params["autoencoder"])
algo = self.model._compute_algo() # unique to grid search
is_unsupervised = is_auto_encoder or algo == "pca" or algo == "svd" or algo == "kmeans" or algo == "glrm"
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_unsupervised and y is None: raise ValueError("Missing response")
if not is_unsupervised:
y = y if y in training_frame.names else training_frame.names[y]
self.model._estimator_type = "classifier" if training_frame.types[y] == "enum" else "regressor"
self._model_build(x, y, training_frame, validation_frame, algo_params)
|
[
"(",
"internal",
")"
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L212-L227
|
[
"def",
"build_model",
"(",
"self",
",",
"algo_params",
")",
":",
"if",
"algo_params",
"[",
"\"training_frame\"",
"]",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Missing training_frame\"",
")",
"x",
"=",
"algo_params",
".",
"pop",
"(",
"\"x\"",
")",
"y",
"=",
"algo_params",
".",
"pop",
"(",
"\"y\"",
",",
"None",
")",
"training_frame",
"=",
"algo_params",
".",
"pop",
"(",
"\"training_frame\"",
")",
"validation_frame",
"=",
"algo_params",
".",
"pop",
"(",
"\"validation_frame\"",
",",
"None",
")",
"is_auto_encoder",
"=",
"(",
"algo_params",
"is",
"not",
"None",
")",
"and",
"(",
"\"autoencoder\"",
"in",
"algo_params",
"and",
"algo_params",
"[",
"\"autoencoder\"",
"]",
")",
"algo",
"=",
"self",
".",
"model",
".",
"_compute_algo",
"(",
")",
"# unique to grid search",
"is_unsupervised",
"=",
"is_auto_encoder",
"or",
"algo",
"==",
"\"pca\"",
"or",
"algo",
"==",
"\"svd\"",
"or",
"algo",
"==",
"\"kmeans\"",
"or",
"algo",
"==",
"\"glrm\"",
"if",
"is_auto_encoder",
"and",
"y",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"y should not be specified for autoencoder.\"",
")",
"if",
"not",
"is_unsupervised",
"and",
"y",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Missing response\"",
")",
"if",
"not",
"is_unsupervised",
":",
"y",
"=",
"y",
"if",
"y",
"in",
"training_frame",
".",
"names",
"else",
"training_frame",
".",
"names",
"[",
"y",
"]",
"self",
".",
"model",
".",
"_estimator_type",
"=",
"\"classifier\"",
"if",
"training_frame",
".",
"types",
"[",
"y",
"]",
"==",
"\"enum\"",
"else",
"\"regressor\"",
"self",
".",
"_model_build",
"(",
"x",
",",
"y",
",",
"training_frame",
",",
"validation_frame",
",",
"algo_params",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.predict
|
Predict on a dataset.
:param H2OFrame test_data: Data to be predicted on.
:returns: H2OFrame filled with predictions.
|
h2o-py/h2o/grid/grid_search.py
|
def predict(self, test_data):
"""
Predict on a dataset.
:param H2OFrame test_data: Data to be predicted on.
:returns: H2OFrame filled with predictions.
"""
return {model.model_id: model.predict(test_data) for model in self.models}
|
def predict(self, test_data):
"""
Predict on a dataset.
:param H2OFrame test_data: Data to be predicted on.
:returns: H2OFrame filled with predictions.
"""
return {model.model_id: model.predict(test_data) for model in self.models}
|
[
"Predict",
"on",
"a",
"dataset",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L322-L329
|
[
"def",
"predict",
"(",
"self",
",",
"test_data",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"predict",
"(",
"test_data",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.get_xval_models
|
Return a Model object.
:param str key: If None, return all cross-validated models; otherwise return the model
specified by the key.
:returns: A model or a list of models.
|
h2o-py/h2o/grid/grid_search.py
|
def get_xval_models(self, key=None):
"""
Return a Model object.
:param str key: If None, return all cross-validated models; otherwise return the model
specified by the key.
:returns: A model or a list of models.
"""
return {model.model_id: model.get_xval_models(key) for model in self.models}
|
def get_xval_models(self, key=None):
"""
Return a Model object.
:param str key: If None, return all cross-validated models; otherwise return the model
specified by the key.
:returns: A model or a list of models.
"""
return {model.model_id: model.get_xval_models(key) for model in self.models}
|
[
"Return",
"a",
"Model",
"object",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L342-L350
|
[
"def",
"get_xval_models",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"get_xval_models",
"(",
"key",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.deepfeatures
|
Obtain a hidden layer's details on a dataset.
:param test_data: Data to create a feature space on.
:param int layer: Index of the hidden layer.
:returns: A dictionary of hidden layer details for each model.
|
h2o-py/h2o/grid/grid_search.py
|
def deepfeatures(self, test_data, layer):
"""
Obtain a hidden layer's details on a dataset.
:param test_data: Data to create a feature space on.
:param int layer: Index of the hidden layer.
:returns: A dictionary of hidden layer details for each model.
"""
return {model.model_id: model.deepfeatures(test_data, layer) for model in self.models}
|
def deepfeatures(self, test_data, layer):
"""
Obtain a hidden layer's details on a dataset.
:param test_data: Data to create a feature space on.
:param int layer: Index of the hidden layer.
:returns: A dictionary of hidden layer details for each model.
"""
return {model.model_id: model.deepfeatures(test_data, layer) for model in self.models}
|
[
"Obtain",
"a",
"hidden",
"layer",
"s",
"details",
"on",
"a",
"dataset",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L358-L366
|
[
"def",
"deepfeatures",
"(",
"self",
",",
"test_data",
",",
"layer",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"deepfeatures",
"(",
"test_data",
",",
"layer",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.weights
|
Return the frame for the respective weight matrix.
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
|
h2o-py/h2o/grid/grid_search.py
|
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix.
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
"""
return {model.model_id: model.weights(matrix_id) for model in self.models}
|
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix.
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
"""
return {model.model_id: model.weights(matrix_id) for model in self.models}
|
[
"Return",
"the",
"frame",
"for",
"the",
"respective",
"weight",
"matrix",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L369-L376
|
[
"def",
"weights",
"(",
"self",
",",
"matrix_id",
"=",
"0",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"weights",
"(",
"matrix_id",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.biases
|
Return the frame for the respective bias vector.
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:returns: an H2OFrame which represents the bias vector identified by vector_id
|
h2o-py/h2o/grid/grid_search.py
|
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector.
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:returns: an H2OFrame which represents the bias vector identified by vector_id
"""
return {model.model_id: model.biases(vector_id) for model in self.models}
|
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector.
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:returns: an H2OFrame which represents the bias vector identified by vector_id
"""
return {model.model_id: model.biases(vector_id) for model in self.models}
|
[
"Return",
"the",
"frame",
"for",
"the",
"respective",
"bias",
"vector",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L379-L386
|
[
"def",
"biases",
"(",
"self",
",",
"vector_id",
"=",
"0",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"biases",
"(",
"vector_id",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.model_performance
|
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
|
h2o-py/h2o/grid/grid_search.py
|
def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
"""
return {model.model_id: model.model_performance(test_data, train, valid, xval) for model in self.models}
|
def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
"""
return {model.model_id: model.model_performance(test_data, train, valid, xval) for model in self.models}
|
[
"Generate",
"model",
"metrics",
"for",
"this",
"model",
"on",
"test_data",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L416-L427
|
[
"def",
"model_performance",
"(",
"self",
",",
"test_data",
"=",
"None",
",",
"train",
"=",
"False",
",",
"valid",
"=",
"False",
",",
"xval",
"=",
"False",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"model_performance",
"(",
"test_data",
",",
"train",
",",
"valid",
",",
"xval",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.summary
|
Print a detailed summary of the explored models.
|
h2o-py/h2o/grid/grid_search.py
|
def summary(self, header=True):
"""Print a detailed summary of the explored models."""
table = []
for model in self.models:
model_summary = model._model_json["output"]["model_summary"]
r_values = list(model_summary.cell_values[0])
r_values[0] = model.model_id
table.append(r_values)
# if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(table,columns=self.col_header)
# return
print()
if header:
print('Grid Summary:')
print()
H2ODisplay(table, ['Model Id'] + model_summary.col_header[1:], numalign="left", stralign="left")
|
def summary(self, header=True):
"""Print a detailed summary of the explored models."""
table = []
for model in self.models:
model_summary = model._model_json["output"]["model_summary"]
r_values = list(model_summary.cell_values[0])
r_values[0] = model.model_id
table.append(r_values)
# if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(table,columns=self.col_header)
# return
print()
if header:
print('Grid Summary:')
print()
H2ODisplay(table, ['Model Id'] + model_summary.col_header[1:], numalign="left", stralign="left")
|
[
"Print",
"a",
"detailed",
"summary",
"of",
"the",
"explored",
"models",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L439-L457
|
[
"def",
"summary",
"(",
"self",
",",
"header",
"=",
"True",
")",
":",
"table",
"=",
"[",
"]",
"for",
"model",
"in",
"self",
".",
"models",
":",
"model_summary",
"=",
"model",
".",
"_model_json",
"[",
"\"output\"",
"]",
"[",
"\"model_summary\"",
"]",
"r_values",
"=",
"list",
"(",
"model_summary",
".",
"cell_values",
"[",
"0",
"]",
")",
"r_values",
"[",
"0",
"]",
"=",
"model",
".",
"model_id",
"table",
".",
"append",
"(",
"r_values",
")",
"# if h2o.can_use_pandas():",
"# import pandas",
"# pandas.options.display.max_rows = 20",
"# print pandas.DataFrame(table,columns=self.col_header)",
"# return",
"print",
"(",
")",
"if",
"header",
":",
"print",
"(",
"'Grid Summary:'",
")",
"print",
"(",
")",
"H2ODisplay",
"(",
"table",
",",
"[",
"'Model Id'",
"]",
"+",
"model_summary",
".",
"col_header",
"[",
"1",
":",
"]",
",",
"numalign",
"=",
"\"left\"",
",",
"stralign",
"=",
"\"left\"",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.show
|
Print models sorted by metric.
|
h2o-py/h2o/grid/grid_search.py
|
def show(self):
"""Print models sorted by metric."""
hyper_combos = itertools.product(*list(self.hyper_params.values()))
if not self.models:
c_values = [[idx + 1, list(val)] for idx, val in enumerate(hyper_combos)]
print(H2OTwoDimTable(
col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']'],
table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values))
else:
print(self.sorted_metric_table())
|
def show(self):
"""Print models sorted by metric."""
hyper_combos = itertools.product(*list(self.hyper_params.values()))
if not self.models:
c_values = [[idx + 1, list(val)] for idx, val in enumerate(hyper_combos)]
print(H2OTwoDimTable(
col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']'],
table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values))
else:
print(self.sorted_metric_table())
|
[
"Print",
"models",
"sorted",
"by",
"metric",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L460-L469
|
[
"def",
"show",
"(",
"self",
")",
":",
"hyper_combos",
"=",
"itertools",
".",
"product",
"(",
"*",
"list",
"(",
"self",
".",
"hyper_params",
".",
"values",
"(",
")",
")",
")",
"if",
"not",
"self",
".",
"models",
":",
"c_values",
"=",
"[",
"[",
"idx",
"+",
"1",
",",
"list",
"(",
"val",
")",
"]",
"for",
"idx",
",",
"val",
"in",
"enumerate",
"(",
"hyper_combos",
")",
"]",
"print",
"(",
"H2OTwoDimTable",
"(",
"col_header",
"=",
"[",
"'Model'",
",",
"'Hyperparameters: ['",
"+",
"', '",
".",
"join",
"(",
"list",
"(",
"self",
".",
"hyper_params",
".",
"keys",
"(",
")",
")",
")",
"+",
"']'",
"]",
",",
"table_header",
"=",
"'Grid Search of Model '",
"+",
"self",
".",
"model",
".",
"__class__",
".",
"__name__",
",",
"cell_values",
"=",
"c_values",
")",
")",
"else",
":",
"print",
"(",
"self",
".",
"sorted_metric_table",
"(",
")",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.varimp
|
Pretty print the variable importances, or return them in a list/pandas DataFrame.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A dictionary of lists or Pandas DataFrame instances.
|
h2o-py/h2o/grid/grid_search.py
|
def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list/pandas DataFrame.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A dictionary of lists or Pandas DataFrame instances.
"""
return {model.model_id: model.varimp(use_pandas) for model in self.models}
|
def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list/pandas DataFrame.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A dictionary of lists or Pandas DataFrame instances.
"""
return {model.model_id: model.varimp(use_pandas) for model in self.models}
|
[
"Pretty",
"print",
"the",
"variable",
"importances",
"or",
"return",
"them",
"in",
"a",
"list",
"/",
"pandas",
"DataFrame",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L472-L480
|
[
"def",
"varimp",
"(",
"self",
",",
"use_pandas",
"=",
"False",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"varimp",
"(",
"use_pandas",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.pprint_coef
|
Pretty print the coefficents table (includes normalized coefficients).
|
h2o-py/h2o/grid/grid_search.py
|
def pprint_coef(self):
"""Pretty print the coefficents table (includes normalized coefficients)."""
for i, model in enumerate(self.models):
print('Model', i)
model.pprint_coef()
print()
|
def pprint_coef(self):
"""Pretty print the coefficents table (includes normalized coefficients)."""
for i, model in enumerate(self.models):
print('Model', i)
model.pprint_coef()
print()
|
[
"Pretty",
"print",
"the",
"coefficents",
"table",
"(",
"includes",
"normalized",
"coefficients",
")",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L543-L548
|
[
"def",
"pprint_coef",
"(",
"self",
")",
":",
"for",
"i",
",",
"model",
"in",
"enumerate",
"(",
"self",
".",
"models",
")",
":",
"print",
"(",
"'Model'",
",",
"i",
")",
"model",
".",
"pprint_coef",
"(",
")",
"print",
"(",
")"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.get_hyperparams
|
Get the hyperparameters of a model explored by grid search.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A list of the hyperparameters for the specified model.
|
h2o-py/h2o/grid/grid_search.py
|
def get_hyperparams(self, id, display=True):
"""
Get the hyperparameters of a model explored by grid search.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A list of the hyperparameters for the specified model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
# if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
res = [model.params[h]['actual'][0] if isinstance(model.params[h]['actual'], list)
else model.params[h]['actual']
for h in self.hyper_params]
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return res
|
def get_hyperparams(self, id, display=True):
"""
Get the hyperparameters of a model explored by grid search.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A list of the hyperparameters for the specified model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
# if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
res = [model.params[h]['actual'][0] if isinstance(model.params[h]['actual'], list)
else model.params[h]['actual']
for h in self.hyper_params]
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return res
|
[
"Get",
"the",
"hyperparameters",
"of",
"a",
"model",
"explored",
"by",
"grid",
"search",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L686-L707
|
[
"def",
"get_hyperparams",
"(",
"self",
",",
"id",
",",
"display",
"=",
"True",
")",
":",
"idx",
"=",
"id",
"if",
"is_type",
"(",
"id",
",",
"int",
")",
"else",
"self",
".",
"model_ids",
".",
"index",
"(",
"id",
")",
"model",
"=",
"self",
"[",
"idx",
"]",
"# if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs",
"# parameter and not the main model that is returned.",
"if",
"model",
".",
"_is_xvalidated",
":",
"model",
"=",
"h2o",
".",
"get_model",
"(",
"model",
".",
"_xval_keys",
"[",
"0",
"]",
")",
"res",
"=",
"[",
"model",
".",
"params",
"[",
"h",
"]",
"[",
"'actual'",
"]",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"model",
".",
"params",
"[",
"h",
"]",
"[",
"'actual'",
"]",
",",
"list",
")",
"else",
"model",
".",
"params",
"[",
"h",
"]",
"[",
"'actual'",
"]",
"for",
"h",
"in",
"self",
".",
"hyper_params",
"]",
"if",
"display",
":",
"print",
"(",
"'Hyperparameters: ['",
"+",
"', '",
".",
"join",
"(",
"list",
"(",
"self",
".",
"hyper_params",
".",
"keys",
"(",
")",
")",
")",
"+",
"']'",
")",
"return",
"res"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.get_hyperparams_dict
|
Derived and returned the model parameters used to train the particular grid search model.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.
|
h2o-py/h2o/grid/grid_search.py
|
def get_hyperparams_dict(self, id, display=True):
"""
Derived and returned the model parameters used to train the particular grid search model.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
model_params = dict()
# if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
for param_name in self.hyper_names:
model_params[param_name] = model.params[param_name]['actual'][0] if \
isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual']
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return model_params
|
def get_hyperparams_dict(self, id, display=True):
"""
Derived and returned the model parameters used to train the particular grid search model.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
model_params = dict()
# if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
for param_name in self.hyper_names:
model_params[param_name] = model.params[param_name]['actual'][0] if \
isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual']
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return model_params
|
[
"Derived",
"and",
"returned",
"the",
"model",
"parameters",
"used",
"to",
"train",
"the",
"particular",
"grid",
"search",
"model",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L710-L734
|
[
"def",
"get_hyperparams_dict",
"(",
"self",
",",
"id",
",",
"display",
"=",
"True",
")",
":",
"idx",
"=",
"id",
"if",
"is_type",
"(",
"id",
",",
"int",
")",
"else",
"self",
".",
"model_ids",
".",
"index",
"(",
"id",
")",
"model",
"=",
"self",
"[",
"idx",
"]",
"model_params",
"=",
"dict",
"(",
")",
"# if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs",
"# parameter and not the main model that is returned.",
"if",
"model",
".",
"_is_xvalidated",
":",
"model",
"=",
"h2o",
".",
"get_model",
"(",
"model",
".",
"_xval_keys",
"[",
"0",
"]",
")",
"for",
"param_name",
"in",
"self",
".",
"hyper_names",
":",
"model_params",
"[",
"param_name",
"]",
"=",
"model",
".",
"params",
"[",
"param_name",
"]",
"[",
"'actual'",
"]",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"model",
".",
"params",
"[",
"param_name",
"]",
"[",
"'actual'",
"]",
",",
"list",
")",
"else",
"model",
".",
"params",
"[",
"param_name",
"]",
"[",
"'actual'",
"]",
"if",
"display",
":",
"print",
"(",
"'Hyperparameters: ['",
"+",
"', '",
".",
"join",
"(",
"list",
"(",
"self",
".",
"hyper_params",
".",
"keys",
"(",
")",
")",
")",
"+",
"']'",
")",
"return",
"model_params"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
test
|
H2OGridSearch.get_grid
|
Retrieve an H2OGridSearch instance.
Optionally specify a metric by which to sort models and a sort order.
Note that if neither cross-validation nor a validation frame is used in the grid search, then the
training metrics will display in the "get grid" output. If a validation frame is passed to the grid, and
``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation
metrics will display even if a validation frame is provided.
:param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``"logloss"``,
``"residual_deviance"``, ``"mse"``, ``"auc"``, ``"r2"``, ``"accuracy"``, ``"precision"``, ``"recall"``,
``"f1"``, etc.
:param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing
order (default).
:returns: A new H2OGridSearch instance optionally sorted on the specified metric.
|
h2o-py/h2o/grid/grid_search.py
|
def get_grid(self, sort_by=None, decreasing=None):
"""
Retrieve an H2OGridSearch instance.
Optionally specify a metric by which to sort models and a sort order.
Note that if neither cross-validation nor a validation frame is used in the grid search, then the
training metrics will display in the "get grid" output. If a validation frame is passed to the grid, and
``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation
metrics will display even if a validation frame is provided.
:param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``"logloss"``,
``"residual_deviance"``, ``"mse"``, ``"auc"``, ``"r2"``, ``"accuracy"``, ``"precision"``, ``"recall"``,
``"f1"``, etc.
:param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing
order (default).
:returns: A new H2OGridSearch instance optionally sorted on the specified metric.
"""
if sort_by is None and decreasing is None: return self
grid_json = h2o.api("GET /99/Grids/%s" % self._id, data={"sort_by": sort_by, "decreasing": decreasing})
grid = H2OGridSearch(self.model, self.hyper_params, self._id)
grid.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] # reordered
first_model_json = h2o.api("GET /99/Models/%s" % grid_json['model_ids'][0]['name'])['models'][0]
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = self._id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = grid._parms
H2OEstimator.mixin(grid, model_class)
grid.__dict__.update(m.__dict__.copy())
return grid
|
def get_grid(self, sort_by=None, decreasing=None):
"""
Retrieve an H2OGridSearch instance.
Optionally specify a metric by which to sort models and a sort order.
Note that if neither cross-validation nor a validation frame is used in the grid search, then the
training metrics will display in the "get grid" output. If a validation frame is passed to the grid, and
``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation
metrics will display even if a validation frame is provided.
:param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``"logloss"``,
``"residual_deviance"``, ``"mse"``, ``"auc"``, ``"r2"``, ``"accuracy"``, ``"precision"``, ``"recall"``,
``"f1"``, etc.
:param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing
order (default).
:returns: A new H2OGridSearch instance optionally sorted on the specified metric.
"""
if sort_by is None and decreasing is None: return self
grid_json = h2o.api("GET /99/Grids/%s" % self._id, data={"sort_by": sort_by, "decreasing": decreasing})
grid = H2OGridSearch(self.model, self.hyper_params, self._id)
grid.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] # reordered
first_model_json = h2o.api("GET /99/Models/%s" % grid_json['model_ids'][0]['name'])['models'][0]
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = self._id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = grid._parms
H2OEstimator.mixin(grid, model_class)
grid.__dict__.update(m.__dict__.copy())
return grid
|
[
"Retrieve",
"an",
"H2OGridSearch",
"instance",
"."
] |
h2oai/h2o-3
|
python
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/grid/grid_search.py#L770-L802
|
[
"def",
"get_grid",
"(",
"self",
",",
"sort_by",
"=",
"None",
",",
"decreasing",
"=",
"None",
")",
":",
"if",
"sort_by",
"is",
"None",
"and",
"decreasing",
"is",
"None",
":",
"return",
"self",
"grid_json",
"=",
"h2o",
".",
"api",
"(",
"\"GET /99/Grids/%s\"",
"%",
"self",
".",
"_id",
",",
"data",
"=",
"{",
"\"sort_by\"",
":",
"sort_by",
",",
"\"decreasing\"",
":",
"decreasing",
"}",
")",
"grid",
"=",
"H2OGridSearch",
"(",
"self",
".",
"model",
",",
"self",
".",
"hyper_params",
",",
"self",
".",
"_id",
")",
"grid",
".",
"models",
"=",
"[",
"h2o",
".",
"get_model",
"(",
"key",
"[",
"'name'",
"]",
")",
"for",
"key",
"in",
"grid_json",
"[",
"'model_ids'",
"]",
"]",
"# reordered",
"first_model_json",
"=",
"h2o",
".",
"api",
"(",
"\"GET /99/Models/%s\"",
"%",
"grid_json",
"[",
"'model_ids'",
"]",
"[",
"0",
"]",
"[",
"'name'",
"]",
")",
"[",
"'models'",
"]",
"[",
"0",
"]",
"model_class",
"=",
"H2OGridSearch",
".",
"_metrics_class",
"(",
"first_model_json",
")",
"m",
"=",
"model_class",
"(",
")",
"m",
".",
"_id",
"=",
"self",
".",
"_id",
"m",
".",
"_grid_json",
"=",
"grid_json",
"# m._metrics_class = metrics_class",
"m",
".",
"_parms",
"=",
"grid",
".",
"_parms",
"H2OEstimator",
".",
"mixin",
"(",
"grid",
",",
"model_class",
")",
"grid",
".",
"__dict__",
".",
"update",
"(",
"m",
".",
"__dict__",
".",
"copy",
"(",
")",
")",
"return",
"grid"
] |
dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.