INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Deep Learning model demo.
def deeplearning(interactive=True, echo=True, testing=False): """Deep Learning model demo.""" def demo_body(go): """ Demo of H2O's Deep Learning model. This demo uploads a dataset to h2o, parses it, and shows a description. Then it divides the dataset into training and test sets, builds a GLM from the training set, and makes predictions for the test set. Finally, default performance metrics are displayed. """ go() # Connect to H2O h2o.init() go() # Upload the prostate dataset that comes included in the h2o python package prostate = h2o.load_dataset("prostate") go() # Print a description of the prostate data prostate.describe() go() # Randomly split the dataset into ~70/30, training/test sets train, test = prostate.split_frame(ratios=[0.70]) go() # Convert the response columns to factors (for binary classification problems) train["CAPSULE"] = train["CAPSULE"].asfactor() test["CAPSULE"] = test["CAPSULE"].asfactor() go() # Build a (classification) GLM from h2o.estimators import H2ODeepLearningEstimator prostate_dl = H2ODeepLearningEstimator(activation="Tanh", hidden=[10, 10, 10], epochs=10000) prostate_dl.train(x=list(set(prostate.col_names) - {"ID", "CAPSULE"}), y="CAPSULE", training_frame=train) go() # Show the model prostate_dl.show() go() # Predict on the test set and show the first ten predictions predictions = prostate_dl.predict(test) predictions.show() go() # Show default performance metrics performance = prostate_dl.model_performance(test) performance.show() # Execute: _run_demo(demo_body, interactive, echo, testing)
GLM model demo.
def glm(interactive=True, echo=True, testing=False): """GLM model demo.""" def demo_body(go): """ Demo of H2O's Generalized Linear Estimator. This demo uploads a dataset to h2o, parses it, and shows a description. Then it divides the dataset into training and test sets, builds a GLM from the training set, and makes predictions for the test set. Finally, default performance metrics are displayed. """ go() # Connect to H2O h2o.init() go() # Upload the prostate dataset that comes included in the h2o python package prostate = h2o.load_dataset("prostate") go() # Print a description of the prostate data prostate.describe() go() # Randomly split the dataset into ~70/30, training/test sets train, test = prostate.split_frame(ratios=[0.70]) go() # Convert the response columns to factors (for binary classification problems) train["CAPSULE"] = train["CAPSULE"].asfactor() test["CAPSULE"] = test["CAPSULE"].asfactor() go() # Build a (classification) GLM from h2o.estimators import H2OGeneralizedLinearEstimator prostate_glm = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0.5]) prostate_glm.train(x=["AGE", "RACE", "PSA", "VOL", "GLEASON"], y="CAPSULE", training_frame=train) go() # Show the model prostate_glm.show() go() # Predict on the test set and show the first ten predictions predictions = prostate_glm.predict(test) predictions.show() go() # Show default performance metrics performance = prostate_glm.model_performance(test) performance.show() # Execute: _run_demo(demo_body, interactive, echo, testing)
Execute the demo echoing commands and pausing for user input.
def _run_demo(body_fn, interactive, echo, testing): """ Execute the demo, echoing commands and pausing for user input. :param body_fn: function that contains the sequence of demo's commands. :param interactive: If True, the user will be prompted to continue the demonstration after every segment. :param echo: If True, the python commands that are executed will be displayed. :param testing: Used for pyunit testing. h2o.init() will not be called if set to True. :type body_fn: function """ import colorama from colorama import Style, Fore colorama.init() class StopExecution(Exception): """Helper class for cancelling the demo.""" assert_is_type(body_fn, type(_run_demo)) # Reformat description by removing extra spaces; then print it. if body_fn.__doc__: desc_lines = body_fn.__doc__.split("\n") while desc_lines[0].strip() == "": desc_lines = desc_lines[1:] while desc_lines[-1].strip() == "": desc_lines = desc_lines[:-1] strip_spaces = min(len(line) - len(line.lstrip(" ")) for line in desc_lines[1:] if line.strip() != "") maxlen = max(len(line) for line in desc_lines) print(Fore.CYAN) print("-" * maxlen) for line in desc_lines: print(line[strip_spaces:].rstrip()) print("-" * maxlen) print(Style.RESET_ALL, end="") # Prepare the executor function def controller(): """Print to console the next block of commands, and wait for keypress.""" try: raise RuntimeError("Catch me!") except RuntimeError: print() # Extract and print lines that will be executed next if echo: tb = sys.exc_info()[2] fr = tb.tb_frame.f_back filename = fr.f_code.co_filename linecache.checkcache(filename) line = linecache.getline(filename, fr.f_lineno, fr.f_globals).rstrip() indent_len = len(line) - len(line.lstrip(" ")) assert line[indent_len:] == "go()" i = fr.f_lineno output_lines = [] n_blank_lines = 0 while True: i += 1 line = linecache.getline(filename, i, fr.f_globals).rstrip() # Detect dedent if line[:indent_len].strip() != "": break line = line[indent_len:] if line == "go()": break style = Fore.LIGHTBLACK_EX if line.lstrip().startswith("#") else Style.BRIGHT prompt = "... " if line.startswith(" ") else ">>> " output_lines.append(Fore.CYAN + prompt + Fore.RESET + style + line + Style.RESET_ALL) del style # Otherwise exception print-outs may get messed-up... if line.strip() == "": n_blank_lines += 1 if n_blank_lines > 5: break # Just in case we hit file end or something else: n_blank_lines = 0 for line in output_lines[:-n_blank_lines]: print(line) # Prompt for user input if interactive: print("\n" + Style.DIM + "(press any key)" + Style.RESET_ALL, end="") key = _wait_for_keypress() print("\r \r", end="") if key.lower() == "q": raise StopExecution() # Replace h2o.init() with a stub when running in "test" mode _h2o_init = h2o.init if testing: h2o.init = lambda *args, **kwargs: None # Run the test try: body_fn(controller) print("\n" + Fore.CYAN + "---- End of Demo ----" + Style.RESET_ALL) except (StopExecution, KeyboardInterrupt): print("\n" + Fore.RED + "---- Demo aborted ----" + Style.RESET_ALL) # Clean-up if testing: h2o.init = _h2o_init print() colorama.deinit()
Wait for a key press on the console and return it.
def _wait_for_keypress(): """ Wait for a key press on the console and return it. Borrowed from http://stackoverflow.com/questions/983354/how-do-i-make-python-to-wait-for-a-pressed-key """ result = None if os.name == "nt": # noinspection PyUnresolvedReferences import msvcrt result = msvcrt.getch() else: import termios fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO termios.tcsetattr(fd, termios.TCSANOW, newattr) try: result = sys.stdin.read(1) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) return result
Create new H2OTwoDimTable object from list of ( key value ) tuples which are a pre - cursor to JSON dict.
def make(keyvals): """ Create new H2OTwoDimTable object from list of (key,value) tuples which are a pre-cursor to JSON dict. :param keyvals: list of (key, value) tuples :return: new H2OTwoDimTable object """ kwargs = {} for key, value in keyvals: if key == "columns": kwargs["col_formats"] = [c["format"] for c in value] kwargs["col_types"] = [c["type"] for c in value] kwargs["col_header"] = [c["name"] for c in value] kwargs["row_header"] = len(value) if key == "name": kwargs["table_header"] = value if key == "description": kwargs["table_description"] = value if key == "data": kwargs["raw_cell_values"] = value return H2OTwoDimTable(**kwargs)
Convert to a python data frame.
def as_data_frame(self): """Convert to a python 'data frame'.""" if can_use_pandas(): import pandas pandas.options.display.max_colwidth = 70 return pandas.DataFrame(self._cell_values, columns=self._col_header) return self
Print the contents of this table.
def show(self, header=True): """Print the contents of this table.""" # if h2o.can_use_pandas(): # import pandas # pandas.options.display.max_rows = 20 # print pandas.DataFrame(self._cell_values,columns=self._col_header) # return if header and self._table_header: print(self._table_header + ":", end=' ') if self._table_description: print(self._table_description) print() table = copy.deepcopy(self._cell_values) nr = 0 if _is_list_of_lists(table): nr = len( table) # only set if we truly have multiple rows... not just one long row :) if nr > 20: # create a truncated view of the table, first/last 5 rows trunc_table = [] trunc_table += [v for v in table[:5]] trunc_table.append(["---"] * len(table[0])) trunc_table += [v for v in table[(nr - 5):]] table = trunc_table H2ODisplay(table, self._col_header, numalign="left", stralign="left") if nr > 20 and can_use_pandas(): print('\nSee the whole table with table.as_data_frame()')
r CSV reader yielding lists of unicode strings ( PY3: str ).
def reader(stream, dialect=DIALECT, encoding=False, **fmtparams): r"""CSV reader yielding lists of ``unicode`` strings (PY3: ``str``). Args: stream: Iterable of text (``unicode``, PY3: ``str``) lines. If an ``encoding`` is given, iterable of encoded (``str``, PY3: ``bytes``) lines in the given (8-bit clean) ``encoding``. dialect: Dialect argument for the underlying :func:`py:csv.reader`. encoding: If not ``False`` (default): name of the encoding needed to decode the encoded (``str``, PY3: ``bytes``) lines from ``stream``. \**fmtparams: Keyword arguments (formatting parameters) for the underlying :func:`py:csv.reader`. Returns: A Python 3 :func:`py3:csv.reader` stand-in yielding a list of ``unicode`` strings (PY3: ``str``) for each row. >>> import io >>> text = u'Spam!,Spam!,Spam!\r\nSpam!,Lovely Spam!,Lovely Spam!\r\n' >>> with io.StringIO(text, newline='') as f: ... for row in reader(f): ... print(', '.join(row)) Spam!, Spam!, Spam! Spam!, Lovely Spam!, Lovely Spam! Raises: NotImplementedError: If ``encoding`` is not 8-bit clean. """ if encoding is False: return UnicodeTextReader(stream, dialect, **fmtparams) if encoding is None: encoding = none_encoding() if not is_8bit_clean(encoding): raise NotImplementedError return UnicodeBytesReader(stream, dialect, encoding, **fmtparams)
Start new H2O server on the local machine.
def start(jar_path=None, nthreads=-1, enable_assertions=True, max_mem_size=None, min_mem_size=None, ice_root=None, log_dir=None, log_level=None, port="54321+", name=None, extra_classpath=None, verbose=True, jvm_custom_args=None, bind_to_localhost=True): """ Start new H2O server on the local machine. :param jar_path: Path to the h2o.jar executable. If not given, then we will search for h2o.jar in the locations returned by `._jar_paths()`. :param nthreads: Number of threads in the thread pool. This should be related to the number of CPUs used. -1 means use all CPUs on the host. A positive integer specifies the number of CPUs directly. :param enable_assertions: If True, pass `-ea` option to the JVM. :param max_mem_size: Maximum heap size (jvm option Xmx), in bytes. :param min_mem_size: Minimum heap size (jvm option Xms), in bytes. :param log_dir: Directory for H2O logs to be stored if a new instance is started. Default directory is determined by H2O internally. :param log_level: The logger level for H2O if a new instance is started. :param ice_root: A directory where H2O stores its temporary files. Default location is determined by tempfile.mkdtemp(). :param port: Port where to start the new server. This could be either an integer, or a string of the form "DDDDD+", indicating that the server should start looking for an open port starting from DDDDD and up. :param name: name of the h2o cluster to be started :param extra_classpath List of paths to libraries that should be included on the Java classpath. :param verbose: If True, then connection info will be printed to the stdout. :param jvm_custom_args Custom, user-defined arguments for the JVM H2O is instantiated in :param bind_to_localhost A flag indicating whether access to the H2O instance should be restricted to the local machine (default) or if it can be reached from other computers on the network. Only applicable when H2O is started from the Python client. :returns: a new H2OLocalServer instance """ assert_is_type(jar_path, None, str) assert_is_type(port, None, int, str) assert_is_type(name, None, str) assert_is_type(nthreads, -1, BoundInt(1, 4096)) assert_is_type(enable_assertions, bool) assert_is_type(min_mem_size, None, int) assert_is_type(max_mem_size, None, BoundInt(1 << 25)) assert_is_type(log_dir, str, None) assert_is_type(log_level, str, None) assert_satisfies(log_level, log_level in [None, "TRACE", "DEBUG", "INFO", "WARN", "ERRR", "FATA"]) assert_is_type(ice_root, None, I(str, os.path.isdir)) assert_is_type(extra_classpath, None, [str]) assert_is_type(jvm_custom_args, list, None) assert_is_type(bind_to_localhost, bool) if jar_path: assert_satisfies(jar_path, jar_path.endswith("h2o.jar")) if min_mem_size is not None and max_mem_size is not None and min_mem_size > max_mem_size: raise H2OValueError("`min_mem_size`=%d is larger than the `max_mem_size`=%d" % (min_mem_size, max_mem_size)) if port is None: port = "54321+" baseport = None # TODO: get rid of this port gimmick and have 2 separate parameters. if is_type(port, str): if port.isdigit(): port = int(port) else: if not(port[-1] == "+" and port[:-1].isdigit()): raise H2OValueError("`port` should be of the form 'DDDD+', where D is a digit. Got: %s" % port) baseport = int(port[:-1]) port = 0 hs = H2OLocalServer() hs._verbose = bool(verbose) hs._jar_path = hs._find_jar(jar_path) hs._extra_classpath = extra_classpath hs._ice_root = ice_root hs._name = name if not ice_root: hs._ice_root = tempfile.mkdtemp() hs._tempdir = hs._ice_root if verbose: print("Attempting to start a local H2O server...") hs._launch_server(port=port, baseport=baseport, nthreads=int(nthreads), ea=enable_assertions, mmax=max_mem_size, mmin=min_mem_size, jvm_custom_args=jvm_custom_args, bind_to_localhost=bind_to_localhost, log_dir=log_dir, log_level=log_level) if verbose: print(" Server is running at %s://%s:%d" % (hs.scheme, hs.ip, hs.port)) atexit.register(lambda: hs.shutdown()) return hs
Shut down the server by trying to terminate/ kill its process.
def shutdown(self): """ Shut down the server by trying to terminate/kill its process. First we attempt to terminate the server process gracefully (sending SIGTERM signal). However after _TIME_TO_KILL seconds if the process didn't shutdown, we forcefully kill it with a SIGKILL signal. """ if not self._process: return try: kill_time = time.time() + self._TIME_TO_KILL while self._process.poll() is None and time.time() < kill_time: self._process.terminate() time.sleep(0.2) if self._process().poll() is None: self._process.kill() time.sleep(0.2) if self._verbose: print("Local H2O server %s:%s stopped." % (self.ip, self.port)) except: pass self._process = None
Return the location of an h2o. jar executable.
def _find_jar(self, path0=None): """ Return the location of an h2o.jar executable. :param path0: Explicitly given h2o.jar path. If provided, then we will simply check whether the file is there, otherwise we will search for an executable in locations returned by ._jar_paths(). :raises H2OStartupError: if no h2o.jar executable can be found. """ jar_paths = [path0] if path0 else self._jar_paths() searched_paths = [] for jp in jar_paths: searched_paths.append(jp) if os.path.exists(jp): return jp raise H2OStartupError("Cannot start local server: h2o.jar not found. Paths searched:\n" + "".join(" %s\n" % s for s in searched_paths))
Produce potential paths for an h2o. jar executable.
def _jar_paths(): """Produce potential paths for an h2o.jar executable.""" # PUBDEV-3534 hook to use arbitrary h2o.jar own_jar = os.getenv("H2O_JAR_PATH", "") if own_jar != "": if not os.path.isfile(own_jar): raise H2OStartupError("Environment variable H2O_JAR_PATH is set to '%d' but file does not exists, unset environment variable or provide valid path to h2o.jar file." % own_jar) yield own_jar # Check if running from an h2o-3 src folder (or any subfolder), in which case use the freshly-built h2o.jar cwd_chunks = os.path.abspath(".").split(os.path.sep) for i in range(len(cwd_chunks), 0, -1): if cwd_chunks[i - 1] == "h2o-3": yield os.path.sep.join(cwd_chunks[:i] + ["build", "h2o.jar"]) # Then check the backend/bin folder: # (the following works assuming this code is located in h2o/backend/server.py file) backend_dir = os.path.split(os.path.realpath(__file__))[0] yield os.path.join(backend_dir, "bin", "h2o.jar") # Then try several old locations where h2o.jar might have been installed prefix1 = prefix2 = sys.prefix # On Unix-like systems Python typically gets installed into /Library/... or /System/Library/... If one of # those paths is sys.prefix, then we also build its counterpart. if prefix1.startswith(os.path.sep + "Library"): prefix2 = os.path.join("", "System", prefix1) elif prefix1.startswith(os.path.sep + "System"): prefix2 = prefix1[len(os.path.join("", "System")):] yield os.path.join(prefix1, "h2o_jar", "h2o.jar") yield os.path.join(os.path.abspath(os.sep), "usr", "local", "h2o_jar", "h2o.jar") yield os.path.join(prefix1, "local", "h2o_jar", "h2o.jar") yield os.path.join(get_config_var("userbase"), "h2o_jar", "h2o.jar") yield os.path.join(prefix2, "h2o_jar", "h2o.jar")
Actually start the h2o. jar executable ( helper method for. start () ).
def _launch_server(self, port, baseport, mmax, mmin, ea, nthreads, jvm_custom_args, bind_to_localhost, log_dir=None, log_level=None): """Actually start the h2o.jar executable (helper method for `.start()`).""" self._ip = "127.0.0.1" # Find Java and check version. (Note that subprocess.check_output returns the output as a bytes object) java = self._find_java() self._check_java(java, self._verbose) if self._verbose: print(" Starting server from " + self._jar_path) print(" Ice root: " + self._ice_root) # Combine jar path with the optional extra classpath classpath = [self._jar_path] if self._extra_classpath is None else [self._jar_path] + self._extra_classpath # Construct java command to launch the process cmd = [java] # ...add JVM options cmd += ["-ea"] if ea else [] for (mq, num) in [("-Xms", mmin), ("-Xmx", mmax)]: if num is None: continue numstr = "%dG" % (num >> 30) if num == (num >> 30) << 30 else \ "%dM" % (num >> 20) if num == (num >> 20) << 20 else \ str(num) cmd += [mq + numstr] if jvm_custom_args is not None: for arg in jvm_custom_args: assert type(arg) is str cmd += [arg] cmd += ["-cp", os.pathsep.join(classpath), "water.H2OApp"] # This should be the last JVM option # ...add H2O options cmd += ["-ip", self._ip] if bind_to_localhost: cmd += ["-web_ip", self._ip] cmd += ["-port", str(port)] if port else [] cmd += ["-baseport", str(baseport)] if baseport else [] cmd += ["-ice_root", self._ice_root] cmd += ["-nthreads", str(nthreads)] if nthreads > 0 else [] if log_dir: cmd += ["-log_dir", log_dir] if log_level: cmd += ["-log_level", log_level] if not self._name: self._name = "H2O_from_python_%s" % self._tmp_file("salt") cmd += ["-name", self._name] # Warning: do not change to any higher log-level, otherwise we won't be able to know which port the # server is listening to. cmd += ["-log_level", "INFO"] # Create stdout and stderr files self._stdout = self._tmp_file("stdout") self._stderr = self._tmp_file("stderr") cwd = os.path.abspath(os.getcwd()) out = open(self._stdout, "w") err = open(self._stderr, "w") if self._verbose: print(" JVM stdout: " + out.name) print(" JVM stderr: " + err.name) # Launch the process win32 = sys.platform == "win32" flags = getattr(subprocess, "CREATE_NEW_PROCESS_GROUP", 0) if win32 else 0 prex = os.setsid if not win32 else None try: proc = subprocess.Popen(args=cmd, stdout=out, stderr=err, cwd=cwd, creationflags=flags, preexec_fn=prex) except OSError as e: traceback = getattr(e, "child_traceback", None) raise H2OServerError("Unable to start server: %s" % e, traceback) # Wait until the server is up-and-running giveup_time = time.time() + self._TIME_TO_START while True: if proc.poll() is not None: raise H2OServerError("Server process terminated with error code %d" % proc.returncode) ret = self._get_server_info_from_logs() if ret: self._scheme = ret[0] self._ip = ret[1] self._port = ret[2] self._process = proc break if time.time() > giveup_time: elapsed_time = time.time() - (giveup_time - self._TIME_TO_START) raise H2OServerError("Server wasn't able to start in %f seconds." % elapsed_time) time.sleep(0.2)
Find location of the java executable ( helper for. _launch_server () ).
def _find_java(): """ Find location of the java executable (helper for `._launch_server()`). This method is not particularly robust, and may require additional tweaking for different platforms... :return: Path to the java executable. :raises H2OStartupError: if java cannot be found. """ # is java callable directly (doesn't work on windows it seems)? java = "java.exe" if sys.platform == "win32" else "java" if os.access(java, os.X_OK): return java # Can Java be found on the PATH? for path in os.getenv("PATH").split(os.pathsep): # not same as os.path.sep! full_path = os.path.join(path, java) if os.access(full_path, os.X_OK): return full_path # check if JAVA_HOME is set (for Windows) if os.getenv("JAVA_HOME"): full_path = os.path.join(os.getenv("JAVA_HOME"), "bin", java) if os.path.exists(full_path): return full_path # check "/Program Files" and "/Program Files (x86)" on Windows if sys.platform == "win32": # On Windows, backslash on the drive letter is necessary, otherwise os.path.join produces an invalid path program_folders = [os.path.join("C:\\", "Program Files", "Java"), os.path.join("C:\\", "Program Files (x86)", "Java"), os.path.join("C:\\", "ProgramData", "Oracle", "Java")] for folder in program_folders: for dirpath, dirnames, filenames in os.walk(folder): if java in filenames: return os.path.join(dirpath, java) # not found... raise H2OStartupError("Cannot find Java. Please install the latest JRE from\n" "http://www.oracle.com/technetwork/java/javase/downloads/index.html")
Generate names for temporary files ( helper method for. _launch_server () ).
def _tmp_file(self, kind): """ Generate names for temporary files (helper method for `._launch_server()`). :param kind: one of "stdout", "stderr" or "salt". The "salt" kind is used for process name, not for a file, so it doesn't contain a path. All generated names are based on the user name of the currently logged-in user. """ if sys.platform == "win32": username = os.getenv("USERNAME") else: username = os.getenv("USER") if not username: username = "unknownUser" usr = "".join(ch if ch.isalnum() else "_" for ch in username) if kind == "salt": return usr + "_" + "".join(choice("0123456789abcdefghijklmnopqrstuvwxyz") for _ in range(6)) else: if not self._tempdir: self._tempdir = tempfile.mkdtemp() return os.path.join(self._tempdir, "h2o_%s_started_from_python.%s" % (usr, kind[3:]))
Check server s output log and determine its scheme/ IP/ port ( helper method for. _launch_server () ).
def _get_server_info_from_logs(self): """ Check server's output log, and determine its scheme / IP / port (helper method for `._launch_server()`). This method is polled during process startup. It looks at the server output log and checks for a presence of a particular string ("INFO: Open H2O Flow in your web browser:") which indicates that the server is up-and-running. If the method detects this string, it extracts the server's scheme, ip and port and returns them; otherwise it returns None. :returns: (scheme, ip, port) tuple if the server has already started, None otherwise. """ searchstr = "INFO: Open H2O Flow in your web browser:" with open(self._stdout, "rt") as f: for line in f: if searchstr in line: url = line[line.index(searchstr) + len(searchstr):].strip().rstrip("/") parts = url.split(":") assert len(parts) == 3 and (parts[0] == "http" or parts[1] == "https") and parts[2].isdigit(), \ "Unexpected URL: %s" % url return parts[0], parts[1][2:], int(parts[2]) return None
Returns a confusion matrix based of H2O s default prediction threshold for a dataset.
def confusion_matrix(self, data): """ Returns a confusion matrix based of H2O's default prediction threshold for a dataset. :param H2OFrame data: the frame with the prediction results for which the confusion matrix should be extracted. """ assert_is_type(data, H2OFrame) j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self._id, data.frame_id)) return j["model_metrics"][0]["cm"]["table"]
Retrieve the Hit Ratios.
def hit_ratio_table(self, train=False, valid=False, xval=False): """ Retrieve the Hit Ratios. If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval". :param train: If train is True, then return the hit ratio value for the training data. :param valid: If valid is True, then return the hit ratio value for the validation data. :param xval: If xval is True, then return the hit ratio value for the cross validation data. :return: The hit ratio for this regression model. """ tm = ModelBase._get_metrics(self, train, valid, xval) m = {} for k, v in zip(list(tm.keys()), list(tm.values())): m[k] = None if v is None else v.hit_ratio_table() return list(m.values())[0] if len(m) == 1 else m
Equivalent of csv. DictWriter but allows delimiter to be a unicode string on Py2.
def csv_dict_writer(f, fieldnames, **kwargs): """Equivalent of csv.DictWriter, but allows `delimiter` to be a unicode string on Py2.""" import csv if "delimiter" in kwargs: kwargs["delimiter"] = str(kwargs["delimiter"]) return csv.DictWriter(f, fieldnames, **kwargs)
Given a string return an iterator over this string s bytes ( as ints ).
def bytes_iterator(s): """Given a string, return an iterator over this string's bytes (as ints).""" if s is None: return if PY2 or PY3 and isinstance(s, str): for ch in s: yield ord(ch) elif PY3 and isinstance(s, bytes): for ch in s: yield ch else: raise TypeError("String argument expected, got %s" % type(s))
Analogous to repr () but will suppress u prefix when repr - ing a unicode string.
def repr2(x): """Analogous to repr(), but will suppress 'u' prefix when repr-ing a unicode string.""" s = repr(x) if len(s) >= 2 and s[0] == "u" and (s[1] == "'" or s[1] == '"'): s = s[1:] return s
Get second token in line >>> docwriter = ApiDocWriter ( sphinx ) >>> docwriter. _get_object_name ( def func (): ) func >>> docwriter. _get_object_name ( class Klass ( object ): ) Klass >>> docwriter. _get_object_name ( class Klass: ) Klass
def _get_object_name(self, line): ''' Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass' ''' name = line.split()[1].split('(')[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(':')
Convert uri to absolute filepath
def _uri2path(self, uri): ''' Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples -------- >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path('sphinx.builder') >>> res == os.path.join(modpath, 'builder.py') True >>> res = docwriter._uri2path('sphinx') >>> res == os.path.join(modpath, '__init__.py') True >>> docwriter._uri2path('sphinx.does_not_exist') ''' if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace('.', os.path.sep) path = path.replace(self.package_name + os.path.sep, '') path = os.path.join(self.root_path, path) # XXX maybe check for extensions as well? if os.path.exists(path + '.py'): # file path += '.py' elif os.path.exists(os.path.join(path, '__init__.py')): path = os.path.join(path, '__init__.py') else: return None return path
Convert directory path to uri
def _path2uri(self, dirpath): ''' Convert directory path to uri ''' relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, '.')
Parse module defined in * uri *
def _parse_module(self, uri): ''' Parse module defined in *uri* ''' filename = self._uri2path(uri) if filename is None: # nothing that we could handle here. return ([],[]) f = open(filename, 'rt') functions, classes = self._parse_lines(f) f.close() return functions, classes
Parse lines of text for functions and classes
def _parse_lines(self, linesource): ''' Parse lines of text for functions and classes ''' functions = [] classes = [] for line in linesource: if line.startswith('def ') and line.count('('): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): functions.append(name) elif line.startswith('class '): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): classes.append(name) else: pass functions.sort() classes.sort() return functions, classes
Make autodoc documentation template string for a module
def generate_api_doc(self, uri): '''Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- S : string Contents of API doc ''' # get the names of all classes and functions functions, classes = self._parse_module(uri) if not len(functions) and not len(classes): print 'WARNING: Empty -',uri # dbg return '' # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' chap_title = uri_short ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title) + '\n\n') # Set the chapter title to read 'module' for all modules except for the # main packages if '.' in uri: title = 'Module: :mod:`' + uri_short + '`' else: title = ':mod:`' + uri_short + '`' ad += title + '\n' + self.rst_section_levels[2] * len(title) if len(classes): ad += '\nInheritance diagram for ``%s``:\n\n' % uri ad += '.. inheritance-diagram:: %s \n' % uri ad += ' :parts: 3\n' ad += '\n.. automodule:: ' + uri + '\n' ad += '\n.. currentmodule:: ' + uri + '\n' multi_class = len(classes) > 1 multi_fx = len(functions) > 1 if multi_class: ad += '\n' + 'Classes' + '\n' + \ self.rst_section_levels[2] * 7 + '\n' elif len(classes) and multi_fx: ad += '\n' + 'Class' + '\n' + \ self.rst_section_levels[2] * 5 + '\n' for c in classes: ad += '\n:class:`' + c + '`\n' \ + self.rst_section_levels[multi_class + 2 ] * \ (len(c)+9) + '\n\n' ad += '\n.. autoclass:: ' + c + '\n' # must NOT exclude from index to keep cross-refs working ad += ' :members:\n' \ ' :undoc-members:\n' \ ' :show-inheritance:\n' \ ' :inherited-members:\n' \ '\n' \ ' .. automethod:: __init__\n' if multi_fx: ad += '\n' + 'Functions' + '\n' + \ self.rst_section_levels[2] * 9 + '\n\n' elif len(functions) and multi_class: ad += '\n' + 'Function' + '\n' + \ self.rst_section_levels[2] * 8 + '\n\n' for f in functions: # must NOT exclude from index to keep cross-refs working ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n' return ad
Returns True if * matchstr * does not match patterns
def _survives_exclude(self, matchstr, match_type): ''' Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append('^\\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False ''' if match_type == 'module': patterns = self.module_skip_patterns elif match_type == 'package': patterns = self.package_skip_patterns else: raise ValueError('Cannot interpret match type "%s"' % match_type) # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True
Return module sequence discovered from self. package_name
def discover_modules(self): ''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>> ''' modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: # copy list - we modify inplace package_uri = '.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) else: dirnames.remove(dirname) # Check filenames for modules for filename in filenames: module_name = filename[:-3] module_uri = '.'.join((root_uri, module_name)) if (self._uri2path(module_uri) and self._survives_exclude(module_uri, 'module')): modules.append(module_uri) return sorted(modules)
Generate API reST files.
def write_api_docs(self, outdir): """Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules """ if not os.path.exists(outdir): os.mkdir(outdir) # compose list of modules modules = self.discover_modules() self.write_modules_api(modules,outdir)
Make a reST API index file from written files
def write_index(self, outdir, froot='gen', relative_to=None): """Make a reST API index file from written files Parameters ---------- path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to 'gen'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is. """ if self.written_modules is None: raise ValueError('No modules written') # Get full filename path path = os.path.join(outdir, froot+self.rst_extension) # Path written into index is relative to rootpath if relative_to is not None: relpath = outdir.replace(relative_to + os.path.sep, '') else: relpath = outdir idx = open(path,'wt') w = idx.write w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') w('.. toctree::\n\n') for f in self.written_modules: w(' %s\n' % os.path.join(relpath,f)) idx.close()
Main program.
def main(argv): """ Main program. @return: none """ global g_script_name g_script_name = os.path.basename(argv[0]) parse_config_file() parse_args(argv) url = 'https://0xdata.atlassian.net/rest/api/2/search?jql=sprint="' + urllib.quote(g_sprint) + '"&maxResults=1000' r = requests.get(url, auth=(g_user, g_pass)) if (r.status_code != 200): print("ERROR: status code is " + str(r.status_code)) sys.exit(1) j = r.json() issues = j[u'issues'] pm = PeopleManager() for issue in issues: pm.add(issue) pm.emit() print("")
Convert this confusion matrix into a 2x2 plain list of values.
def to_list(self): """Convert this confusion matrix into a 2x2 plain list of values.""" return [[int(self.table.cell_values[0][1]), int(self.table.cell_values[0][2])], [int(self.table.cell_values[1][1]), int(self.table.cell_values[1][2])]]
Read confusion matrices from the list of sources ( ? ).
def read_cms(cms=None, domains=None): """Read confusion matrices from the list of sources (?).""" assert_is_type(cms, [list]) return [ConfusionMatrix(cm, domains) for cm in cms]
Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages.
def load_dict(): """ Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages. :return: none """ global g_load_java_message_filename global g_ok_java_messages if os.path.isfile(g_load_java_message_filename): # only load dict from file if it exists. with open(g_load_java_message_filename,'rb') as ofile: g_ok_java_messages = pickle.load(ofile) else: # no previous java messages to be excluded are found g_ok_java_messages["general"] = []
Add new java messages to ignore from user text file. It first reads in the new java ignored messages from the user text file and generate a dict structure to out of the new java ignored messages. This is achieved by function extract_message_to_dict. Next new java messages will be added to the original ignored java messages dict g_ok_java_messages. Again this is achieved by function update_message_dict.
def add_new_message(): """ Add new java messages to ignore from user text file. It first reads in the new java ignored messages from the user text file and generate a dict structure to out of the new java ignored messages. This is achieved by function extract_message_to_dict. Next, new java messages will be added to the original ignored java messages dict g_ok_java_messages. Again, this is achieved by function update_message_dict. :return: none """ global g_new_messages_to_exclude # filename containing text file from user containing new java ignored messages global g_dict_changed # True if new ignored java messages are added. new_message_dict = extract_message_to_dict(g_new_messages_to_exclude) if new_message_dict: g_dict_changed = True update_message_dict(new_message_dict,1)
Remove java messages from ignored list if users desired it. It first reads in the java ignored messages from user stored in g_old_messages_to_remove and build a dict structure ( old_message_dict ) out of it. Next it removes the java messages contained in old_message_dict from g_ok_java_messages.: return: none
def remove_old_message(): """ Remove java messages from ignored list if users desired it. It first reads in the java ignored messages from user stored in g_old_messages_to_remove and build a dict structure (old_message_dict) out of it. Next, it removes the java messages contained in old_message_dict from g_ok_java_messages. :return: none """ global g_old_messages_to_remove global g_dict_changed # extract old java ignored messages to be removed in old_message_dict old_message_dict = extract_message_to_dict(g_old_messages_to_remove) if old_message_dict: g_dict_changed = True update_message_dict(old_message_dict,2)
Update the g_ok_java_messages dict structure by 1. add the new java ignored messages stored in message_dict if action == 1 2. remove the java ignored messages stired in message_dict if action == 2.
def update_message_dict(message_dict,action): """ Update the g_ok_java_messages dict structure by 1. add the new java ignored messages stored in message_dict if action == 1 2. remove the java ignored messages stired in message_dict if action == 2. Parameters ---------- message_dict : Python dict key: unit test name or "general" value: list of java messages that are to be ignored if they are found when running the test stored as the key. If the key is "general", the list of java messages are to be ignored when running all tests. action : int if 1: add java ignored messages stored in message_dict to g_ok_java_messages dict; if 2: remove java ignored messages stored in message_dict from g_ok_java_messages dict. :return: none """ global g_ok_java_messages allKeys = g_ok_java_messages.keys() for key in message_dict.keys(): if key in allKeys: # key already exists, just add to it for message in message_dict[key]: if action == 1: if message not in g_ok_java_messages[key]: g_ok_java_messages[key].append(message) if action == 2: if message in g_ok_java_messages[key]: g_ok_java_messages[key].remove(message) else: # new key here. Can only add and cannot remove if action == 1: g_ok_java_messages[key] = message_dict[key]
Read in a text file that java messages to be ignored and generate a dictionary structure out of it with key and value pairs. The keys are test names and the values are lists of java message strings associated with that test name where we are either going to add to the existing java messages to ignore or remove them from g_ok_java_messages.
def extract_message_to_dict(filename): """ Read in a text file that java messages to be ignored and generate a dictionary structure out of it with key and value pairs. The keys are test names and the values are lists of java message strings associated with that test name where we are either going to add to the existing java messages to ignore or remove them from g_ok_java_messages. Parameters ---------- filename : Str filename that contains ignored java messages. The text file shall contain something like this: keyName = general Message = nfolds: nfolds cannot be larger than the number of rows (406). KeyName = pyunit_cv_cars_gbm.py Message = Caught exception: Illegal argument(s) for GBM model: GBM_model_python_1452503348770_2586. \ Details: ERRR on field: _nfolds: nfolds must be either 0 or >1. ... :return: message_dict : dict contains java message to be ignored with key as unit test name or "general" and values as list of ignored java messages. """ message_dict = {} if os.path.isfile(filename): # open file to read in new exclude messages if it exists with open(filename,'r') as wfile: key = "" val = "" startMess = False while 1: each_line = wfile.readline() if not each_line: # reached EOF if startMess: add_to_dict(val.strip(),key,message_dict) break # found a test name or general with values to follow if "keyname" in each_line.lower(): # name of test file or the word "general" temp_strings = each_line.strip().split('=') if (len(temp_strings) > 1): # make sure the line is formatted sort of correctly if startMess: # this is the start of a new key/value pair add_to_dict(val.strip(),key,message_dict) val = "" key = temp_strings[1].strip() startMess = False if (len(each_line) > 1) and startMess: val += each_line if "ignoredmessage" in each_line.lower(): startMess = True # start of a Java message. temp_mess = each_line.split('=') if (len(temp_mess) > 1): val = temp_mess[1] return message_dict
Add new key val ( ignored java message ) to dict message_dict.
def add_to_dict(val,key,message_dict): """ Add new key, val (ignored java message) to dict message_dict. Parameters ---------- val : Str contains ignored java messages. key : Str key for the ignored java messages. It can be "general" or any R or Python unit test names message_dict : dict stored ignored java message for key ("general" or any R or Python unit test names) :return: none """ allKeys = message_dict.keys() if (len(val) > 0): # got a valid message here if (key in allKeys) and (val not in message_dict[key]): message_dict[key].append(val) # only include this message if it has not been added before else: message_dict[key] = [val]
Save the ignored java message dict stored in g_ok_java_messages into a pickle file for future use.
def save_dict(): """ Save the ignored java message dict stored in g_ok_java_messages into a pickle file for future use. :return: none """ global g_ok_java_messages global g_save_java_message_filename global g_dict_changed if g_dict_changed: with open(g_save_java_message_filename,'wb') as ofile: pickle.dump(g_ok_java_messages,ofile)
Write the java ignored messages in g_ok_java_messages into a text file for humans to read.
def print_dict(): """ Write the java ignored messages in g_ok_java_messages into a text file for humans to read. :return: none """ global g_ok_java_messages global g_java_messages_to_ignore_text_filename allKeys = sorted(g_ok_java_messages.keys()) with open(g_java_messages_to_ignore_text_filename,'w') as ofile: for key in allKeys: for mess in g_ok_java_messages[key]: ofile.write('KeyName: '+key+'\n') ofile.write('IgnoredMessage: '+mess+'\n') print('KeyName: ',key) print('IgnoredMessage: ',g_ok_java_messages[key]) print('\n')
Parse user inputs and set the corresponing global variables to perform the necessary tasks.
def parse_args(argv): """ Parse user inputs and set the corresponing global variables to perform the necessary tasks. Parameters ---------- argv : string array contains flags and input options from users :return: """ global g_new_messages_to_exclude global g_old_messages_to_remove global g_load_java_message_filename global g_save_java_message_filename global g_print_java_messages if len(argv) < 2: # print out help menu if user did not enter any arguments. usage() i = 1 while (i < len(argv)): s = argv[i] if (s == "--inputfileadd"): # input text file where new java messages are stored i += 1 if (i > len(argv)): usage() g_new_messages_to_exclude = argv[i] elif (s == "--inputfilerm"): # input text file containing java messages to be removed from the ignored list i += 1 if (i > len(argv)): usage() g_old_messages_to_remove = argv[i] elif (s == "--loadjavamessage"): # load previously saved java message pickle file from file other than i += 1 # the default one before performing update if i > len(argv): usage() g_load_java_message_filename = argv[i] elif (s == "--savejavamessage"): # save updated java message in this file instead of default file i += 1 if (i > len(argv)): usage() g_save_java_message_filename = argv[i] elif (s == '--printjavamessage'): # will print java message out to console and save in a text file i += 1 g_print_java_messages = True g_load_java_message_filename = argv[i] elif (s == '--help'): # print help menu and exit usage() else: unknown_arg(s) i += 1
Illustrate what the various input flags are and the options should be.
def usage(): """ Illustrate what the various input flags are and the options should be. :return: none """ global g_script_name # name of the script being run. print("") print("Usage: " + g_script_name + " [...options...]") print("") print(" --help print out this help menu and show all the valid flags and inputs.") print("") print(" --inputfileadd filename where the new java messages to ignore are stored in.") print("") print(" --inputfilerm filename where the java messages are removed from the ignored list.") print("") print(" --loadjavamessage filename pickle file that stores the dict structure containing java messages to include.") print("") print(" --savejavamessage filename pickle file that saves the final dict structure after update.") print("") print(" --printjavamessage filename print java ignored java messages stored in pickle file filenam onto console and save into a text file.") print("") sys.exit(1)
Main program.
def main(argv): """ Main program. @return: none """ global g_script_name global g_test_root_dir global g_new_messages_to_exclude global g_old_messages_to_remove global g_load_java_message_filename global g_save_java_message_filename global g_print_java_messages global g_java_messages_to_ignore_text_filename g_script_name = os.path.basename(argv[0]) # get name of script being run. # Override any defaults with the user's choices. parse_args(argv) g_load_java_message_filename = os.path.join(g_test_root_dir,g_load_java_message_filename) load_dict() # load previously stored java messages to g_ok_java_messages if len(g_new_messages_to_exclude) > 0: g_new_messages_to_exclude = os.path.join(g_test_root_dir,g_new_messages_to_exclude) add_new_message() # add new java messages to exclude to dictionary if len(g_old_messages_to_remove) > 0: g_old_messages_to_remove = os.path.join(g_test_root_dir,g_old_messages_to_remove) remove_old_message() # remove java messages from ignored list if users desired it g_save_java_message_filename = os.path.join(g_test_root_dir,g_save_java_message_filename) save_dict() # save the updated dict g_ok_java_messages to pickle file if g_print_java_messages: # print java ignored messages to console and text file g_java_messages_to_ignore_text_filename = os.path.join(g_test_root_dir,g_java_messages_to_ignore_text_filename) print_dict()
Find all python files in the given directory and all subfolders.
def locate_files(root_dir): """Find all python files in the given directory and all subfolders.""" all_files = [] root_dir = os.path.abspath(root_dir) for dir_name, subdirs, files in os.walk(root_dir): for f in files: if f.endswith(".py"): all_files.append(os.path.join(dir_name, f)) return all_files
Search the file for any magic incantations.
def find_magic_in_file(filename): """ Search the file for any magic incantations. :param filename: file to search :returns: a tuple containing the spell and then maybe some extra words (or None if no magic present) """ with open(filename, "rt", encoding="utf-8") as f: for line in f: if line.startswith("#"): comment = line[1:].strip() if comment.startswith("~~~~* ") or comment.startswith("----* ") or comment.startswith("====* "): spell = comment[5:].strip() return tuple(spell.split()) else: break return None
Parse file into chunks/ objects.
def parse_python_file(filename): """Parse file into chunks / objects.""" with open(filename, "rt", encoding="utf-8") as f: tokens = list(tokenize.generate_tokens(f.readline)) tokens = normalize_tokens(tokens) module = ChunkCode(tokens, 0, len(tokens)) module.parse() print(module)
Executed when script is run as - is.
def main(): """Executed when script is run as-is.""" # magic_files = {} for filename in locate_files(ROOT_DIR): print("Processing %s" % filename) with open(filename, "rt") as f: tokens = list(tokenize.generate_tokens(f.readline)) text1 = tokenize.untokenize(tokens) ntokens = normalize_tokens(tokens) text2 = tokenize.untokenize(ntokens) assert text1 == text2
Returns H2OPCA object which implements fit and transform method to be used in sklearn. Pipeline properly. All parameters defined in self. __params should be input parameters in H2OPCA. __init__ method.
def init_for_pipeline(self): """ Returns H2OPCA object which implements fit and transform method to be used in sklearn.Pipeline properly. All parameters defined in self.__params, should be input parameters in H2OPCA.__init__ method. :returns: H2OPCA object """ import inspect from h2o.transforms.decomposition import H2OPCA # check which parameters can be passed to H2OPCA init var_names = list(dict(inspect.getmembers(H2OPCA.__init__.__code__))['co_varnames']) parameters = {k: v for k, v in self._parms.items() if k in var_names} return H2OPCA(**parameters)
Transform H2OFrame using a MOJO Pipeline.
def transform(self, data, allow_timestamps=False): """ Transform H2OFrame using a MOJO Pipeline. :param data: Frame to be transformed. :param allow_timestamps: Allows datetime columns to be used directly with MOJO pipelines. It is recommended to parse your datetime columns as Strings when using pipelines because pipelines can interpret certain datetime formats in a different way. If your H2OFrame is parsed from a binary file format (eg. Parquet) instead of CSV it is safe to turn this option on and use datetime columns directly. :returns: A new H2OFrame. """ assert_is_type(data, H2OFrame) assert_is_type(allow_timestamps, bool) return H2OFrame._expr(ExprNode("mojo.pipeline.transform", self.pipeline_id[0], data, allow_timestamps))
This function will look at the local directory and pick out files that have the correct start name and summarize the results into one giant dict.
def summarizeFailedRuns(): """ This function will look at the local directory and pick out files that have the correct start name and summarize the results into one giant dict. :return: None """ global g_summary_dict_all onlyFiles = [x for x in listdir(g_test_root_dir) if isfile(join(g_test_root_dir, x))] # grab files for f in onlyFiles: for fileStart in g_file_start: if (fileStart in f) and (os.path.getsize(f) > 10): # found the file containing failed tests fFullPath = os.path.join(g_test_root_dir, f) try: temp_dict = json.load(open(fFullPath,'r')) # scrape through temp_dict and see if we need to add the test to intermittents for ind in range(len(temp_dict["TestName"])): addFailedTests(g_summary_dict_all, temp_dict, ind) except: continue break
This function will print out the intermittents onto the screen for casual viewing. It will also print out where the giant summary dictionary is going to be stored.
def extractPrintSaveIntermittens(): """ This function will print out the intermittents onto the screen for casual viewing. It will also print out where the giant summary dictionary is going to be stored. :return: None """ # extract intermittents from collected failed tests global g_summary_dict_intermittents localtz = time.tzname[0] for ind in range(len(g_summary_dict_all["TestName"])): if g_summary_dict_all["TestInfo"][ind]["FailureCount"] >= g_threshold_failure: addFailedTests(g_summary_dict_intermittents, g_summary_dict_all, ind) # save dict in file if len(g_summary_dict_intermittents["TestName"]) > 0: json.dump(g_summary_dict_intermittents, open(g_summary_dict_name, 'w')) with open(g_summary_csv_filename, 'w') as summaryFile: for ind in range(len(g_summary_dict_intermittents["TestName"])): testName = g_summary_dict_intermittents["TestName"][ind] numberFailure = g_summary_dict_intermittents["TestInfo"][ind]["FailureCount"] firstFailedTS = parser.parse(time.ctime(min(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ ' '+localtz) firstFailedStr = firstFailedTS.strftime("%a %b %d %H:%M:%S %Y %Z") recentFail = parser.parse(time.ctime(max(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ ' '+localtz) recentFailStr = recentFail.strftime("%a %b %d %H:%M:%S %Y %Z") eachTest = "{0}, {1}, {2}, {3}\n".format(testName, recentFailStr, numberFailure, g_summary_dict_intermittents["TestInfo"][ind]["TestCategory"][0]) summaryFile.write(eachTest) print("Intermittent: {0}, Last failed: {1}, Failed {2} times since " "{3}".format(testName, recentFailStr, numberFailure, firstFailedStr))
Main program. Expect script name plus inputs in the following order: - This script name 1. threshold: integer that will denote when a failed test will be declared an intermittent 2. string denote filename of where our final dict structure will be stored. 3. string that denote the beginning of a file containing failed tests info. 4. Optional strings that denote the beginning of a file containing failed tests info.
def main(argv): """ Main program. Expect script name plus inputs in the following order: - This script name 1. threshold: integer that will denote when a failed test will be declared an intermittent 2. string denote filename of where our final dict structure will be stored. 3. string that denote the beginning of a file containing failed tests info. 4. Optional strings that denote the beginning of a file containing failed tests info. @return: none """ global g_script_name global g_test_root_dir global g_threshold_failure global g_file_start global g_summary_dict_name global g_summary_dict_all global g_summary_dict_intermittents global g_summary_csv_filename if len(argv) < 5: print "Wrong call. Not enough arguments.\n" usage() sys.exit(1) else: # we may be in business g_threshold_failure = int(argv[1]) g_summary_dict_name = os.path.join(g_test_root_dir, argv[2]) g_summary_csv_filename = g_summary_dict_name+".csv" for ind in range(3, len(argv)): g_file_start.append(argv[ind]) init_intermittents_dict(g_summary_dict_all) init_intermittents_dict(g_summary_dict_intermittents) summarizeFailedRuns() extractPrintSaveIntermittens()
Display a short summary of the metrics.
def show(self): """Display a short summary of the metrics.""" if self._metric_json==None: print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.") return metric_type = self._metric_json['__meta']['schema_type'] types_w_glm = ['ModelMetricsRegressionGLM', 'ModelMetricsBinomialGLM'] types_w_clustering = ['ModelMetricsClustering'] types_w_mult = ['ModelMetricsMultinomial'] types_w_ord = ['ModelMetricsOrdinal'] types_w_bin = ['ModelMetricsBinomial', 'ModelMetricsBinomialGLM'] types_w_r2 = ['ModelMetricsRegressionGLM'] types_w_mean_residual_deviance = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression'] types_w_mean_absolute_error = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression'] types_w_logloss = types_w_bin + types_w_mult+types_w_ord types_w_dim = ["ModelMetricsGLRM"] types_w_anomaly = ['ModelMetricsAnomaly'] print() print(metric_type + ": " + self._algo) reported_on = "** Reported on {} data. **" if self._on_train: print(reported_on.format("train")) elif self._on_valid: print(reported_on.format("validation")) elif self._on_xval: print(reported_on.format("cross-validation")) else: print(reported_on.format("test")) print() if metric_type not in types_w_anomaly: print("MSE: " + str(self.mse())) print("RMSE: " + str(self.rmse())) if metric_type in types_w_mean_absolute_error: print("MAE: " + str(self.mae())) print("RMSLE: " + str(self.rmsle())) if metric_type in types_w_r2: print("R^2: " + str(self.r2())) if metric_type in types_w_mean_residual_deviance: print("Mean Residual Deviance: " + str(self.mean_residual_deviance())) if metric_type in types_w_logloss: print("LogLoss: " + str(self.logloss())) if metric_type == 'ModelMetricsBinomial': # second element for first threshold is the actual mean per class error print("Mean Per-Class Error: %s" % self.mean_per_class_error()[0][1]) if metric_type == 'ModelMetricsMultinomial' or metric_type == 'ModelMetricsOrdinal': print("Mean Per-Class Error: " + str(self.mean_per_class_error())) if metric_type in types_w_glm: print("Null degrees of freedom: " + str(self.null_degrees_of_freedom())) print("Residual degrees of freedom: " + str(self.residual_degrees_of_freedom())) print("Null deviance: " + str(self.null_deviance())) print("Residual deviance: " + str(self.residual_deviance())) print("AIC: " + str(self.aic())) if metric_type in types_w_bin: print("AUC: " + str(self.auc())) print("pr_auc: " + str(self.pr_auc())) print("Gini: " + str(self.gini())) self.confusion_matrix().show() self._metric_json["max_criteria_and_metric_scores"].show() if self.gains_lift(): print(self.gains_lift()) if metric_type in types_w_anomaly: print("Anomaly Score: " + str(self.mean_score())) print("Normalized Anomaly Score: " + str(self.mean_normalized_score())) if (metric_type in types_w_mult) or (metric_type in types_w_ord): self.confusion_matrix().show() self.hit_ratio_table().show() if metric_type in types_w_clustering: print("Total Within Cluster Sum of Square Error: " + str(self.tot_withinss())) print("Total Sum of Square Error to Grand Mean: " + str(self.totss())) print("Between Cluster Sum of Square Error: " + str(self.betweenss())) self._metric_json['centroid_stats'].show() if metric_type in types_w_dim: print("Sum of Squared Error (Numeric): " + str(self.num_err())) print("Misclassification Error (Categorical): " + str(self.cat_err())) if self.custom_metric_name(): print("{}: {}".format(self.custom_metric_name(), self.custom_metric_value()))
: param thresholds: thresholds parameter must be a list ( i. e. [ 0. 01 0. 5 0. 99 ] ). If None then the thresholds in this set of metrics will be used.: returns: mean per class error.
def mean_per_class_error(self, thresholds=None): """ :param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used. :returns: mean per class error. """ return [[x[0], 1 - x[1]] for x in self.metric("mean_per_class_accuracy", thresholds=thresholds)]
: param str metric: The desired metric.: param thresholds: thresholds parameter must be a list ( i. e. [ 0. 01 0. 5 0. 99 ] ). If None then the thresholds in this set of metrics will be used.: returns: The set of metrics for the list of thresholds.
def metric(self, metric, thresholds=None): """ :param str metric: The desired metric. :param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used. :returns: The set of metrics for the list of thresholds. """ assert_is_type(thresholds, None, [numeric]) if not thresholds: thresholds = [self.find_threshold_by_max_metric(metric)] thresh2d = self._metric_json['thresholds_and_metric_scores'] metrics = [] for t in thresholds: idx = self.find_idx_by_threshold(t) metrics.append([t, thresh2d[metric][idx]]) return metrics
Produce the desired metric plot.
def plot(self, type="roc", server=False): """ Produce the desired metric plot. :param type: the type of metric plot (currently, only ROC supported). :param server: if True, generate plot inline using matplotlib's "Agg" backend. :returns: None """ # TODO: add more types (i.e. cutoffs) assert_is_type(type, "roc") # check for matplotlib. exit if absent. try: imp.find_module('matplotlib') import matplotlib if server: matplotlib.use('Agg', warn=False) import matplotlib.pyplot as plt except ImportError: print("matplotlib is required for this function!") return if type == "roc": plt.xlabel('False Positive Rate (FPR)') plt.ylabel('True Positive Rate (TPR)') plt.title('ROC Curve') plt.text(0.5, 0.5, r'AUC={0:.4f}'.format(self._metric_json["AUC"])) plt.plot(self.fprs, self.tprs, 'b--') plt.axis([0, 1, 0, 1]) if not server: plt.show()
Get the confusion matrix for the specified metric
def confusion_matrix(self, metrics=None, thresholds=None): """ Get the confusion matrix for the specified metric :param metrics: A string (or list of strings) among metrics listed in :const:`max_metrics`. Defaults to 'f1'. :param thresholds: A value (or list of values) between 0 and 1. :returns: a list of ConfusionMatrix objects (if there are more than one to return), or a single ConfusionMatrix (if there is only one). """ # make lists out of metrics and thresholds arguments if metrics is None and thresholds is None: metrics = ['f1'] if isinstance(metrics, list): metrics_list = metrics elif metrics is None: metrics_list = [] else: metrics_list = [metrics] if isinstance(thresholds, list): thresholds_list = thresholds elif thresholds is None: thresholds_list = [] else: thresholds_list = [thresholds] # error check the metrics_list and thresholds_list assert_is_type(thresholds_list, [numeric]) assert_satisfies(thresholds_list, all(0 <= t <= 1 for t in thresholds_list)) if not all(m.lower() in H2OBinomialModelMetrics.max_metrics for m in metrics_list): raise ValueError("The only allowable metrics are {}", ', '.join(H2OBinomialModelMetrics.max_metrics)) # make one big list that combines the thresholds and metric-thresholds metrics_thresholds = [self.find_threshold_by_max_metric(m) for m in metrics_list] for mt in metrics_thresholds: thresholds_list.append(mt) first_metrics_thresholds_offset = len(thresholds_list) - len(metrics_thresholds) thresh2d = self._metric_json['thresholds_and_metric_scores'] actual_thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)] cms = [] for i, t in enumerate(thresholds_list): idx = self.find_idx_by_threshold(t) row = thresh2d.cell_values[idx] tns = row[11] fns = row[12] fps = row[13] tps = row[14] p = tps + fns n = tns + fps c0 = n - fps c1 = p - tps if t in metrics_thresholds: m = metrics_list[i - first_metrics_thresholds_offset] table_header = "Confusion Matrix (Act/Pred) for max {} @ threshold = {}".format(m, actual_thresholds[idx]) else: table_header = "Confusion Matrix (Act/Pred) @ threshold = {}".format(actual_thresholds[idx]) cms.append(ConfusionMatrix(cm=[[c0, fps], [c1, tps]], domains=self._metric_json['domain'], table_header=table_header)) if len(cms) == 1: return cms[0] else: return cms
: param metrics: A string among the metrics listed in: const: max_metrics.: returns: the threshold at which the given metric is maximal.
def find_threshold_by_max_metric(self, metric): """ :param metrics: A string among the metrics listed in :const:`max_metrics`. :returns: the threshold at which the given metric is maximal. """ crit2d = self._metric_json['max_criteria_and_metric_scores'] for e in crit2d.cell_values: if e[0] == "max " + metric.lower(): return e[1] raise ValueError("No metric " + str(metric.lower()))
Retrieve the index in this metric s threshold list at which the given threshold is located.
def find_idx_by_threshold(self, threshold): """ Retrieve the index in this metric's threshold list at which the given threshold is located. :param threshold: Find the index of this input threshold. :returns: the index :raises ValueError: if no such index can be found. """ assert_is_type(threshold, numeric) thresh2d = self._metric_json['thresholds_and_metric_scores'] for i, e in enumerate(thresh2d.cell_values): t = float(e[0]) if abs(t - threshold) < 1e-8 * max(t, threshold): return i if 0 <= threshold <= 1: thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)] threshold_diffs = [abs(t - threshold) for t in thresholds] closest_idx = threshold_diffs.index(min(threshold_diffs)) closest_threshold = thresholds[closest_idx] print("Could not find exact threshold {0}; using closest threshold found {1}." .format(threshold, closest_threshold)) return closest_idx raise ValueError("Threshold must be between 0 and 1, but got {0} ".format(threshold))
Generate C# declaration file for a schema.
def generate_schema(class_name, schema): """ Generate C# declaration file for a schema. """ has_map = False for field in schema["fields"]: if field["type"].startswith("Map"): has_map = True superclass = schema["superclass"] if superclass == "Iced": superclass = "Object" yield "/**" yield " * This file is auto-generated by h2o-3/h2o-bindings/bin/gen_csharp.py" yield " * Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)" yield " */" yield "namespace ai.h2o" yield "{" yield " using System;" yield " using System.Collections.Generic;" if has_map else None yield "" yield " public class {name}: {super} {{".format(name=class_name, super=superclass) for field in schema["fields"]: if field["name"] == "__meta": continue csharp_type = translate_type(field["type"], field["schema_name"]) yield " /// <summary>" yield bi.wrap(field["help"], " /// ") yield " /// </summary>" yield " public {type} {name} {{ get; set; }}".format(type=csharp_type, name=field["name"]) yield "" yield " }" yield "}"
Returns True if a deep water model can be built or False otherwise.
def available(): """Returns True if a deep water model can be built, or False otherwise.""" builder_json = h2o.api("GET /3/ModelBuilders", data={"algo": "deepwater"}) visibility = builder_json["model_builders"]["deepwater"]["visibility"] if visibility == "Experimental": print("Cannot build a Deep Water model - no backend found.") return False else: return True
Grab the console output from Jenkins and save the content into a temp file ( g_temp_filename ). From the saved text file we can grab the names of failed tests.
def get_console_out(url_string): """ Grab the console output from Jenkins and save the content into a temp file (g_temp_filename). From the saved text file, we can grab the names of failed tests. Parameters ---------- url_string : str contains information on the jenkins job whose console output we are interested in. It is in the context of resource_url/job/job_name/build_id/testReport/ :return: none """ full_command = 'curl ' + '"'+ url_string +'"'+ ' --user '+'"admin:admin"'+' > ' + g_temp_filename subprocess.call(full_command,shell=True)
This method will remove data from the summary text file and the dictionary file for tests that occurs before the number of months specified by monthToKeep.
def trim_data_back_to(monthToKeep): """ This method will remove data from the summary text file and the dictionary file for tests that occurs before the number of months specified by monthToKeep. :param monthToKeep: :return: """ global g_failed_tests_info_dict current_time = time.time() # unit in seconds oldest_time_allowed = current_time - monthToKeep*30*24*3600 # in seconds clean_up_failed_test_dict(oldest_time_allowed) clean_up_summary_text(oldest_time_allowed)
Main program. Expect script name plus 7 inputs in the following order: - This script name 1. timestamp: time in s 2. jenkins_job_name ( JOB_NAME ) 3. build_id ( BUILD_ID ) 4. git hash ( GIT_COMMIT ) 5. node name ( NODE_NAME ) 6. unit test category ( JUnit PyUnit RUnit Hadoop ) 7. Jenkins URL ( JENKINS_URL ) 8. Text file name where failure summaries are stored 9. Filename that stored all failed test info as a dictionary 10. duration ( month ) to keep data: data older tghan this input will be removed
def main(argv): """ Main program. Expect script name plus 7 inputs in the following order: - This script name 1. timestamp: time in s 2. jenkins_job_name (JOB_NAME) 3. build_id (BUILD_ID) 4. git hash (GIT_COMMIT) 5. node name (NODE_NAME) 6. unit test category (JUnit, PyUnit, RUnit, Hadoop) 7. Jenkins URL (JENKINS_URL) 8. Text file name where failure summaries are stored 9. Filename that stored all failed test info as a dictionary 10. duration (month) to keep data: data older tghan this input will be removed @return: none """ global g_script_name global g_test_root_dir global g_timestamp global g_job_name global g_build_id global g_git_hash global g_node_name global g_unit_test_type global g_jenkins_url global g_temp_filename global g_summary_text_filename # store failed test info in csv format global g_failed_tests_dict # store failed test info as a dictionary global g_resource_url global g_timestring global g_daily_failure_csv if len(argv) < 12: print "Wrong call. Not enough arguments.\n" usage() sys.exit(1) else: # we may be in business g_script_name = os.path.basename(argv[0]) # get name of script being run. g_timestamp = float(argv[1]) g_job_name = argv[2] g_build_id = argv[3] g_git_hash = argv[4] g_node_name= argv[5] g_unit_test_type = argv[6] g_jenkins_url = argv[7] localtz = time.tzname[0] dt = parser.parse(time.ctime(g_timestamp)+ ' '+localtz) g_timestring = dt.strftime("%a %b %d %H:%M:%S %Y %Z") g_temp_filename = os.path.join(g_test_root_dir,'tempText') g_summary_text_filename = os.path.join(g_test_root_dir, argv[8]) g_failed_tests_dict = os.path.join(g_test_root_dir, argv[9]) monthToKeep = float(argv[10]) g_daily_failure_csv = os.path.join(g_test_root_dir, argv[11]) g_resource_url = '/'.join([g_jenkins_url, "job", g_job_name, g_build_id]) get_console_out(g_resource_url+"/#showFailuresLink/") # save remote console output in local directory extract_failed_tests_info() # grab the console text and stored the failed tests/paths save_failed_tests_info() # save new failed test info into a file if monthToKeep > 0: trim_data_back_to(monthToKeep)
Entry point for the bindings module. It parses the command line arguments and verifies their correctness.: param language -- name of the target language ( used to show the command - line description ).: param output_dir -- folder where the bindings files will be generated. If the folder does not exist it will be created. This folder is relative to../ src - gen/ main/. The user may specify a different output dir through the commandline argument.: param clear_dir -- if True ( default ) the target folder will be cleared before any new files created in it.
def init(language, output_dir, clear_dir=True): """ Entry point for the bindings module. It parses the command line arguments and verifies their correctness. :param language -- name of the target language (used to show the command-line description). :param output_dir -- folder where the bindings files will be generated. If the folder does not exist, it will be created. This folder is relative to ../src-gen/main/. The user may specify a different output dir through the commandline argument. :param clear_dir -- if True (default), the target folder will be cleared before any new files created in it. """ if config["start_time"]: done() config["start_time"] = time.time() print("Generating %s bindings... " % language, end="") sys.stdout.flush() this_module_dir = os.path.dirname(os.path.realpath(__file__)) default_output_dir = os.path.abspath(this_module_dir + "/../src-gen/main/" + output_dir) # Parse command-line options parser = argparse.ArgumentParser( description=""" Generate %s REST API bindings (with docs) and write them to the filesystem. Must attach to a running H2O instance to query the interface.""" % language, ) parser.add_argument("-v", "--verbose", help="Verbose output", action="store_true") parser.add_argument("--usecloud", metavar="IP:PORT", default="localhost:54321", help="Address of an H2O server (defaults to http://localhost:54321/)") # Note: Output folder should be in build directory, however, Idea has problems to recognize them parser.add_argument("--dest", metavar="DIR", default=default_output_dir, help="Destination directory for generated bindings") args = parser.parse_args() # Post-process the options base_url = args.usecloud if not(base_url.startswith("http://") or base_url.startswith("https://")): base_url = "http://" + base_url if not(base_url.endswith("/")): base_url += "/" config["baseurl"] = base_url config["verbose"] = args.verbose config["destdir"] = os.path.abspath(args.dest) vprint("\n\n") # Attempt to create the output directory try: vprint("Output directory = " + config["destdir"]) os.makedirs(config["destdir"]) except OSError as e: if e.errno != errno.EEXIST: print("Cannot create directory " + config["destdir"]) print("Error %d: %s" % (e.errno, e.strerror)) sys.exit(6) # Clear the content of the output directory. Note: deleting the directory and then recreating it may be # faster, but it creates side-effects that we want to avoid (i.e. clears permissions on the folder). if clear_dir: filepath = "?" try: vprint("Deleting contents of the output directory...") for filename in os.listdir(config["destdir"]): filepath = os.path.join(config["destdir"], filename) if os.path.isdir(filepath): shutil.rmtree(filepath) else: os.unlink(filepath) except Exception as e: print("Unable to remove file %s: %r" % (filepath, e)) sys.exit(9) # Check that the provided server is accessible; then print its status (if in --verbose mode). json = _request_or_exit("/3/About") l1 = max(len(e["name"]) for e in json["entries"]) l2 = max(len(e["value"]) for e in json["entries"]) ll = max(29 + len(config["baseurl"]), l1 + l2 + 2) vprint("-" * ll) vprint("Connected to an H2O instance " + config["baseurl"] + "\n") for e in json["entries"]: vprint(e["name"] + ":" + " " * (1 + l1 - len(e["name"])) + e["value"]) vprint("-" * ll)
Print the provided string { msg } but only when the -- verbose option is on.: param msg String to print.: param pretty If on then pprint () will be used instead of the regular print function.
def vprint(msg, pretty=False): """ Print the provided string {msg}, but only when the --verbose option is on. :param msg String to print. :param pretty If on, then pprint() will be used instead of the regular print function. """ if not config["verbose"]: return if pretty: pp(msg) else: print(msg)
Helper function that wraps msg to 120 - chars page width. All lines ( except maybe 1st ) will be prefixed with string { indent }. First line is prefixed only if { indent_first } is True.: param msg: string to indent: param indent: string that will be used for indentation: param indent_first: if True then the first line will be indented as well otherwise not
def wrap(msg, indent, indent_first=True): """ Helper function that wraps msg to 120-chars page width. All lines (except maybe 1st) will be prefixed with string {indent}. First line is prefixed only if {indent_first} is True. :param msg: string to indent :param indent: string that will be used for indentation :param indent_first: if True then the first line will be indented as well, otherwise not """ wrapper.width = 120 wrapper.initial_indent = indent wrapper.subsequent_indent = indent msg = wrapper.fill(msg) return msg if indent_first else msg[len(indent):]
Return the list of REST API endpoints. The data is enriched with the following fields: class_name: which back - end class handles this endpoint ( the class is derived from the URL ) ; ischema: input schema object ( input_schema is the schema s name ) oschema: output schema object ( output_schema is the schema s name ) algo: for special - cased calls ( ModelBuilders/ train and Grid/ train ) -- name of the ML algo requested input_params: list of all input parameters ( first path parameters then all the others ). The parameters are given as objects not just names. There is a flag is_path_param on each field. Additionally certain buggy/ deprecated endpoints are removed. For Grid/ train and ModelBuilders/ train endpoints we fix the method name and parameters info ( there is some mangling of those on the server side ).
def endpoints(raw=False): """ Return the list of REST API endpoints. The data is enriched with the following fields: class_name: which back-end class handles this endpoint (the class is derived from the URL); ischema: input schema object (input_schema is the schema's name) oschema: output schema object (output_schema is the schema's name) algo: for special-cased calls (ModelBuilders/train and Grid/train) -- name of the ML algo requested input_params: list of all input parameters (first path parameters, then all the others). The parameters are given as objects, not just names. There is a flag "is_path_param" on each field. Additionally certain buggy/deprecated endpoints are removed. For Grid/train and ModelBuilders/train endpoints we fix the method name and parameters info (there is some mangling of those on the server side). :param raw: if True, then the complete untouched response to .../endpoints is returned (including the metadata) """ json = _request_or_exit("/3/Metadata/endpoints") if raw: return json schmap = schemas_map() apinames = {} # Used for checking for api name duplicates assert "routes" in json, "Unexpected result from /3/Metadata/endpoints call" re_api_name = re.compile(r"^\w+$") def gen_rich_route(): for e in json["routes"]: path = e["url_pattern"] method = e["handler_method"] apiname = e["api_name"] assert apiname not in apinames, "Duplicate api name %s (for %s and %s)" % (apiname, apinames[apiname], path) assert re_api_name.match(apiname), "Bad api name %s" % apiname apinames[apiname] = path # These redundant paths cause conflicts, remove them if path == "/3/NodePersistentStorage/categories/{category}/exists": continue if path == "/3/ModelMetrics/frames/{frame}/models/{model}": continue if path == "/3/ModelMetrics/frames/{frame}": continue if path == "/3/ModelMetrics/models/{model}": continue if path == "/3/ModelMetrics": continue if "AutoML" in path: continue # Generation code doesn't know how to deal with defaults for complex objects yet if apiname.endswith("_deprecated"): continue # Resolve one name conflict if path == "/3/DKV": e["handler_method"] = "removeAll" # Find the class_name (first part of the URL after the version: "/3/About" => "About") mm = classname_pattern.match(path) assert mm, "Cannot determine class name in URL " + path e["class_name"] = mm.group(1) if e["class_name"].islower(): e["class_name"] = e["class_name"].capitalize() # Resolve input/output schemas into actual objects assert e["input_schema"] in schmap, "Encountered unknown schema %s in %s" % (e["input_schema"], path) assert e["output_schema"] in schmap, "Encountered unknown schema %s in %s" % (e["output_schema"], path) e["ischema"] = schmap[e["input_schema"]] e["oschema"] = schmap[e["output_schema"]] # For these special cases, the actual input schema is not the one reported by the endpoint, but the schema # of the 'parameters' field (which is fake). if (e["class_name"], method) in {("Grid", "train"), ("ModelBuilders", "train"), ("ModelBuilders", "validate_parameters")}: pieces = path.split("/") assert len(pieces) >= 4, "Expected to see algo name in the path: " + path e["algo"] = pieces[3] method = method + e["algo"].capitalize() # e.g. trainGlm() e["handler_method"] = method for field in e["ischema"]["fields"]: if field["name"] == "parameters": e["input_schema"] = field["schema_name"] e["ischema"] = schmap[e["input_schema"]] break # Create the list of input_params (as objects, not just names) e["input_params"] = [] for parm in e["path_params"]: # find the metadata for the field from the input schema: fields = [field for field in e["ischema"]["fields"] if field["name"] == parm] assert len(fields) == 1, \ "Failed to find parameter: %s for endpoint: %s in the input schema %s" \ % (parm, e["url_pattern"], e["ischema"]["name"]) field = fields[0].copy() schema = field["schema_name"] or "" # {schema} is null for primitive types ftype = field["type"] assert ftype == "string" or ftype == "int" or schema.endswith("KeyV3") or schema == "ColSpecifierV3", \ "Unexpected param %s of type %s (schema %s)" % (field["name"], ftype, schema) assert field["direction"] != "OUTPUT", "A path param %s cannot be of type OUTPUT" % field["name"] field["is_path_param"] = True field["required"] = True e["input_params"].append(field) for parm in e["ischema"]["fields"]: if parm["direction"] == "OUTPUT" or parm["name"] in e["path_params"]: continue field = parm.copy() field["is_path_param"] = False e["input_params"].append(field) yield e return list(gen_rich_route())
Return endpoints grouped by the class which handles them.
def endpoint_groups(): """Return endpoints, grouped by the class which handles them.""" groups = defaultdict(list) for e in endpoints(): groups[e["class_name"]].append(e) return groups
Return the list of H₂O schemas.
def schemas(raw=False): """ Return the list of H₂O schemas. :param raw: if True, then the complete response to .../schemas is returned (including the metadata) """ json = _request_or_exit("/3/Metadata/schemas") if raw: return json assert "schemas" in json, "Unexpected result from /3/Metadata/schemas call" # Simplify names of some horribly sounding enums pattern0 = re.compile(r"^\w+(V\d+)\D\w+$") pattern1 = re.compile(r"^(\w{3,})(\1)Model\1Parameters(\w+)$", re.IGNORECASE) pattern2 = re.compile(r"^(\w{3,})(\1)(\w+)$", re.IGNORECASE) def translate_name(name): if name is None: return if name.startswith("Apischemas3"): name = name[len("Apischemas3"):] if name.startswith("Apischemas4input"): name = name[len("Apischemas4input"):] if name.startswith("Apischemas4output"): name = name[len("Apischemas4output"):] if "V3" in name: name = name.replace("V3", "") + "V3" # force multiple "V3"s at the end if "V4" in name: name = name.replace("V4", "") + "V4" # force multiple "V4"s at the end if name == "CreateframerecipesSimpleCreateFrameRecipeResponseType": return "SimpleRecipeResponseType" assert not pattern0.match(name), "Bad schema name %s (version number in the middle)" % name mm = pattern1.match(name) or pattern2.match(name) if mm: return mm.group(2) + mm.group(3) return name for schema in json["schemas"]: schema["name"] = translate_name(schema["name"]) for field in schema["fields"]: field["schema_name"] = translate_name(field["schema_name"]) return json["schemas"]
Returns a dictionary of H₂O schemas indexed by their name.
def schemas_map(add_generics=False): """ Returns a dictionary of H₂O schemas, indexed by their name. """ m = {} for schema in schemas(): if schema["name"].startswith('AutoML'): continue # Generation code doesn't know how to deal with defaults for complex objects yet if schema["name"].startswith('UserFeedback'): continue # UserFeedback schema contains an AutoMLKeyV3 m[schema["name"]] = schema def find_field(fields, field_name): """Finds a field with the given `field_name` among the list of fields.""" for f in fields: if f["is_inherited"] and f["name"] == field_name: return f raise RuntimeError("Unable to find field %s" % (field_name)) # Add information about the generics. This is rather hacky at the moment. if add_generics: for base, generics in [ # Note: derived classes must come before base classes here ("SharedTreeModelV3", [("P", "ModelParametersSchemaV3"), ("O", "ModelOutputSchemaV3")]), ("ModelSchemaV3", [("P", "ModelParametersSchemaV3"), ("O", "ModelOutputSchemaV3")]), ("SharedTreeV3", [("P", "ModelParametersSchemaV3")]), ("ModelBuilderSchema", [("P", "ModelParametersSchemaV3")]), ]: # Write the generic information about the base class schema = m[base] schema["generics"] = generics generic_map = {long_type: gen_type for gen_type, long_type in generics} generic_index = {geninfo[0]: i for i, geninfo in enumerate(generics)} mapped_fields = {} for field in schema["fields"]: ftype = field["schema_name"] if ftype in generic_map: gen_type = generic_map[ftype] field["schema_name"] = gen_type mapped_fields[field["name"]] = generic_index[gen_type] assert len(mapped_fields) == len(generics), ( "Unable to find generic types %r in base class %s. Schema: %r" % (generic_map, base, {f["name"]: f["schema_name"] for f in schema["fields"]})) # Find all the derived classes, and fill in their derived information for schema_name, schema in m.items(): if schema["superclass"] == base: base_generics = [None] * len(generics) for mapped_field_name, generic_index in mapped_fields.items(): field = find_field(schema["fields"], mapped_field_name) base_generics[generic_index] = field["schema_name"] assert None not in base_generics, ( "Unable to find mapped super types in schema %s: base = %r, map = %r" % (schema_name, base_generics, mapped_fields)) schema["super_generics"] = base_generics return m
Return the dictionary of H₂O enums retrieved from data in schemas (). For each entry in the dictionary its key is the name of the enum and the value is the set of all enum values.
def enums(): """ Return the dictionary of H₂O enums, retrieved from data in schemas(). For each entry in the dictionary its key is the name of the enum, and the value is the set of all enum values. """ enumset = defaultdict(set) for schema in schemas(): for field in schema["fields"]: if field["type"] == "enum": enumset[field["schema_name"]].update(field["values"]) return enumset
Writes content to the given file. The file s directory will be created if needed.: param filename: name of the output file relative to the destination folder provided by the user: param content: iterable ( line - by - line ) that should be written to the file. Either a list or a generator. Each line will be appended with a \ n. Lines containing None will be skipped.
def write_to_file(filename, content): """ Writes content to the given file. The file's directory will be created if needed. :param filename: name of the output file, relative to the "destination folder" provided by the user :param content: iterable (line-by-line) that should be written to the file. Either a list or a generator. Each line will be appended with a "\n". Lines containing None will be skipped. """ if not config["destdir"]: print("{destdir} config variable not present. Did you forget to run init()?") sys.exit(8) abs_filename = os.path.abspath(config["destdir"] + "/" + filename) abs_filepath = os.path.dirname(abs_filename) if not os.path.exists(abs_filepath): try: os.makedirs(abs_filepath) except OSError as e: print("Cannot create directory " + abs_filepath) print("Error %d: %s" % (e.errno, e.strerror)) sys.exit(6) with codecs.open(abs_filename, "w", "utf-8") as out: if isinstance(content, str): content = [content] for line in content: if line is not None: out.write(line) out.write("\n")
Internal function: retrieve and return json data from the provided endpoint or die with an error message if the URL cannot be retrieved.
def _request_or_exit(endpoint): """ Internal function: retrieve and return json data from the provided endpoint, or die with an error message if the URL cannot be retrieved. """ if endpoint[0] == "/": endpoint = endpoint[1:] if endpoint in requests_memo: return requests_memo[endpoint] if not config["baseurl"]: print("Configuration not present. Did you forget to run init()?") sys.exit(8) url = config["baseurl"] + endpoint try: resp = requests.get(url) except requests.exceptions.InvalidURL: print("Invalid url address of an H2O server: " + config["baseurl"]) sys.exit(2) except requests.ConnectionError: print("Cannot connect to the server " + config["baseurl"]) print("Please check that you have an H2O instance running, and its address is passed in " + "the --usecloud argument.") sys.exit(3) except requests.Timeout: print("Request timeout when fetching " + url + ". Check your internet connection and try again.") sys.exit(4) if resp.status_code == 200: try: json = resp.json() except ValueError: print("Invalid JSON response from " + url + " :\n") print(resp.text) sys.exit(5) if "__meta" not in json or "schema_type" not in json["__meta"]: print("Unexpected JSON returned from " + url + ":") pp(json) sys.exit(6) if json["__meta"]["schema_type"] == "H2OError": print("Server returned an error message for %s:" % url) print(json["msg"]) pp(json) sys.exit(7) requests_memo[endpoint] = json return json else: print("[HTTP %d] Cannot retrieve %s" % (resp.status_code, url)) sys.exit(1)
Creates a new Amazon S3 client internally with specified credentials. There are no validations done to the credentials. Incorrect credentials are thus revealed with first S3 import call. secretKeyId Amazon S3 Secret Key ID ( provided by Amazon ) secretAccessKey Amazon S3 Secret Access Key ( provided by Amazon )
def set_s3_credentials(secret_key_id, secret_access_key): """Creates a new Amazon S3 client internally with specified credentials. There are no validations done to the credentials. Incorrect credentials are thus revealed with first S3 import call. secretKeyId Amazon S3 Secret Key ID (provided by Amazon) secretAccessKey Amazon S3 Secret Access Key (provided by Amazon) """ if(secret_key_id is None): raise H2OValueError("Secret key ID must be specified") if(secret_access_key is None): raise H2OValueError("Secret access key must be specified") if(not secret_key_id): raise H2OValueError("Secret key ID must not be empty") if(not secret_access_key): raise H2OValueError("Secret access key must not be empty") params = {"secret_key_id": secret_key_id, "secret_access_key": secret_access_key } h2o.api(endpoint="POST /3/PersistS3", data=params) print("Credentials successfully set.")
Return the resulting H2OFrame containing the result ( s ) of aggregation ( s ) of the group by.
def get_frame(self): """ Return the resulting H2OFrame containing the result(s) of aggregation(s) of the group by. The number of rows denote the number of groups generated by the group by operation. The number of columns depend on the number of aggregations performed, the number of columns specified in the col parameter. Generally, expect the number of columns to be (len(col) of aggregation 0 + len(col) of aggregation 1 +...+ len(col) of aggregation n) x (number of groups of the GroupBy object) +1 (for group-by group names). Note: - the count aggregation only generates one column; - if col is a str or int, len(col) = 1. """ if self._res is None: aggs = [] cols_operated = [] for k in self._aggs: aggs += (self._aggs[k]) col_used = self._aggs[k][1] if col_used not in cols_operated: cols_operated.append(col_used) for cind in cols_operated: if cind not in self._by: self._check_string_columns(cind) self._res = h2o.H2OFrame._expr(expr=ExprNode("GB", self._fr, self._by, *aggs)) return self._res
This is a helper function to order all schemas according to their usage. For example if schema A uses schemas B and C then they should be reordered as { B C A }.: param schema: schema object that we are processing right now: param ordered_schemas: an OrderedDict of schemas that were already encountered. This is also the output variable -- all schemas/ enums that are needed will be recorded here in the correct order of their supposed declaration.: param schemas_map: dictionary ( schemaname = > schemaobject )
def add_schema_to_dependency_array(schema, ordered_schemas, schemas_map): """ This is a helper function to order all schemas according to their usage. For example, if schema A uses schemas B and C, then they should be reordered as {B, C, A}. :param schema: schema object that we are processing right now :param ordered_schemas: an OrderedDict of schemas that were already encountered. This is also the "output" variable -- all schemas/enums that are needed will be recorded here in the correct order of their supposed declaration. :param schemas_map: dictionary(schemaname => schemaobject) """ ordered_schemas[schema["name"]] = schema for field in schema["fields"]: field_schema_name = field["schema_name"] if field_schema_name is None: continue if field_schema_name in ordered_schemas: continue if field["type"].startswith("enum"): ordered_schemas[field_schema_name] = field["values"] else: field_schema = schemas_map[field_schema_name] if field_schema["name"] not in ordered_schemas: add_schema_to_dependency_array(field_schema, ordered_schemas, schemas_map)
Set site domain and name.
def update_site_forward(apps, schema_editor): """Set site domain and name.""" Site = apps.get_model("sites", "Site") Site.objects.update_or_create( id=settings.SITE_ID, defaults={ "domain": "{{cookiecutter.domain_name}}", "name": "{{cookiecutter.project_name}}", }, )
Example: opting out for 50 symbol - long [ a - z ] [ A - Z ] [ 0 - 9 ] string would yield log_2 (( 26 + 26 + 50 ) ^50 ) ~ = 334 bit strength.
def generate_random_string( length, using_digits=False, using_ascii_letters=False, using_punctuation=False ): """ Example: opting out for 50 symbol-long, [a-z][A-Z][0-9] string would yield log_2((26+26+50)^50) ~= 334 bit strength. """ if not using_sysrandom: return None symbols = [] if using_digits: symbols += string.digits if using_ascii_letters: symbols += string.ascii_letters if using_punctuation: all_punctuation = set(string.punctuation) # These symbols can cause issues in environment variables unsuitable = {"'", '"', "\\", "$"} suitable = all_punctuation.difference(unsuitable) symbols += "".join(suitable) return "".join([random.choice(symbols) for _ in range(length)])
: param self: bot: param text: text of message: param user_ids: list of user_ids for creating group or one user_id for send to one person: param thread_id: thread_id
def send_message(self, text, user_ids, thread_id=None): """ :param self: bot :param text: text of message :param user_ids: list of user_ids for creating group or one user_id for send to one person :param thread_id: thread_id """ user_ids = _get_user_ids(self, user_ids) if not isinstance(text, str) and isinstance(user_ids, (list, str)): self.logger.error('Text must be an string, user_ids must be an list or string') return False if self.reached_limit('messages'): self.logger.info("Out of messages for today.") return False self.delay('message') urls = self.extract_urls(text) item_type = 'link' if urls else 'text' if self.api.send_direct_item( item_type, user_ids, text=text, thread=thread_id, urls=urls ): self.total['messages'] += 1 return True self.logger.info("Message to {user_ids} wasn't sent".format(user_ids=user_ids)) return False
: param media_id:: param self: bot: param text: text of message: param user_ids: list of user_ids for creating group or one user_id for send to one person: param thread_id: thread_id
def send_media(self, media_id, user_ids, text='', thread_id=None): """ :param media_id: :param self: bot :param text: text of message :param user_ids: list of user_ids for creating group or one user_id for send to one person :param thread_id: thread_id """ user_ids = _get_user_ids(self, user_ids) if not isinstance(text, str) and not isinstance(user_ids, (list, str)): self.logger.error('Text must be an string, user_ids must be an list or string') return False if self.reached_limit('messages'): self.logger.info("Out of messages for today.") return False media = self.get_media_info(media_id) media = media[0] if isinstance(media, list) else media self.delay('message') if self.api.send_direct_item( 'media_share', user_ids, text=text, thread=thread_id, media_type=media.get('media_type'), media_id=media.get('id') ): self.total['messages'] += 1 return True self.logger.info("Message to {user_ids} wasn't sent".format(user_ids=user_ids)) return False
: param hashtag: hashtag: param self: bot: param text: text of message: param user_ids: list of user_ids for creating group or one user_id for send to one person: param thread_id: thread_id
def send_hashtag(self, hashtag, user_ids, text='', thread_id=None): """ :param hashtag: hashtag :param self: bot :param text: text of message :param user_ids: list of user_ids for creating group or one user_id for send to one person :param thread_id: thread_id """ user_ids = _get_user_ids(self, user_ids) if not isinstance(text, str) and not isinstance(user_ids, (list, str)): self.logger.error('Text must be an string, user_ids must be an list or string') return False if self.reached_limit('messages'): self.logger.info("Out of messages for today.") return False self.delay('message') if self.api.send_direct_item( 'hashtag', user_ids, text=text, thread=thread_id, hashtag=hashtag ): self.total['messages'] += 1 return True self.logger.info("Message to {user_ids} wasn't sent".format(user_ids=user_ids)) return False
: param profile_user_id: profile_id: param self: bot: param text: text of message: param user_ids: list of user_ids for creating group or one user_id for send to one person: param thread_id: thread_id
def send_profile(self, profile_user_id, user_ids, text='', thread_id=None): """ :param profile_user_id: profile_id :param self: bot :param text: text of message :param user_ids: list of user_ids for creating group or one user_id for send to one person :param thread_id: thread_id """ profile_id = self.convert_to_user_id(profile_user_id) user_ids = _get_user_ids(self, user_ids) if not isinstance(text, str) and not isinstance(user_ids, (list, str)): self.logger.error('Text must be an string, user_ids must be an list or string') return False if self.reached_limit('messages'): self.logger.info("Out of messages for today.") return False self.delay('message') if self.api.send_direct_item( 'profile', user_ids, text=text, thread=thread_id, profile_user_id=profile_id ): self.total['messages'] += 1 return True self.logger.info("Message to {user_ids} wasn't sent".format(user_ids=user_ids)) return False
Adds the default_data to data and dumps it to a json.
def json_data(self, data=None): """Adds the default_data to data and dumps it to a json.""" if data is None: data = {} data.update(self.default_data) return json.dumps(data)
Input: user_ids - a list of user_id Output: dictionary: user_id - stories data. Basically for each user output the same as after self. get_user_reel
def get_users_reel(self, user_ids): """ Input: user_ids - a list of user_id Output: dictionary: user_id - stories data. Basically, for each user output the same as after self.get_user_reel """ url = 'feed/reels_media/' res = self.send_request( url, post=self.json_data({ 'user_ids': [str(x) for x in user_ids] }) ) if res: if "reels" in self.last_json: return self.last_json["reels"] return [] return []
Input - the list of reels jsons They can be aquired by using get_users_reel () or get_user_reel () methods
def see_reels(self, reels): """ Input - the list of reels jsons They can be aquired by using get_users_reel() or get_user_reel() methods """ if not isinstance(reels, list): reels = [reels] story_seen = {} now = int(time.time()) for i, story in enumerate(sorted(reels, key=lambda m: m['taken_at'], reverse=True)): story_seen_at = now - min(i + 1 + random.randint(0, 2), max(0, now - story['taken_at'])) story_seen[ '{0!s}_{1!s}'.format(story['id'], story['user']['pk']) ] = [ '{0!s}_{1!s}'.format(story['taken_at'], story_seen_at) ] data = self.json_data({ 'reels': story_seen, '_csrftoken': self.token, '_uuid': self.uuid, '_uid': self.user_id }) data = self.generate_signature(data) return self.session.post('https://i.instagram.com/api/v2/' + 'media/seen/', data=data).ok
Comments last user_id s medias
def comment_user(self, user_id, amount=None): """ Comments last user_id's medias """ if not self.check_user(user_id, filter_closed_acc=True): return False self.logger.info("Going to comment user_%s's feed:" % user_id) user_id = self.convert_to_user_id(user_id) medias = self.get_user_medias(user_id, is_comment=True) if not medias: self.logger.info( "None medias received: account is closed or medias have been filtered.") return False return self.comment_medias(medias[:amount])
Sleep only if elapsed time since self. last [ key ] < self. delay [ key ].
def delay(self, key): """Sleep only if elapsed time since `self.last[key]` < `self.delay[key]`.""" last_action, target_delay = self.last[key], self.delays[key] elapsed_time = time.time() - last_action if elapsed_time < target_delay: t_remaining = target_delay - elapsed_time time.sleep(t_remaining * random.uniform(0.25, 1.25)) self.last[key] = time.time()
Returns login and password stored in secret. txt.
def get_credentials(username=None): """Returns login and password stored in `secret.txt`.""" while not check_secret(): pass while True: try: with open(SECRET_FILE, "r") as f: lines = [line.strip().split(":", 2) for line in f.readlines()] except ValueError: msg = 'Problem with opening `{}`, will remove the file.' raise Exception(msg.format(SECRET_FILE)) if username is not None: for login, password in lines: if login == username.strip(): return login, password print("Which account do you want to use? (Type number)") for ind, (login, password) in enumerate(lines): print("%d: %s" % (ind + 1, login)) print("%d: %s" % (0, "add another account.")) print("%d: %s" % (-1, "delete all accounts.")) try: ind = int(sys.stdin.readline()) if ind == 0: add_credentials() continue elif ind == -1: delete_credentials() check_secret() continue elif 0 <= ind - 1 < len(lines): return lines[ind - 1] except Exception: print("Wrong input, enter the number of the account to use.")
Likes last user_id s medias
def like_user(self, user_id, amount=None, filtration=True): """ Likes last user_id's medias """ if filtration: if not self.check_user(user_id): return False self.logger.info("Liking user_%s's feed:" % user_id) user_id = self.convert_to_user_id(user_id) medias = self.get_user_medias(user_id, filtration=filtration) if not medias: self.logger.info( "None medias received: account is closed or medias have been filtered.") return False return self.like_medias(medias[:amount])
Likes last medias from hashtag
def like_hashtag(self, hashtag, amount=None): """ Likes last medias from hashtag """ self.logger.info("Going to like media with hashtag #%s." % hashtag) medias = self.get_total_hashtag_medias(hashtag, amount) return self.like_medias(medias)
Filter bot from real users.
def check_not_bot(self, user_id): """ Filter bot from real users. """ self.small_delay() user_id = self.convert_to_user_id(user_id) if not user_id: return False if user_id in self.whitelist: return True if user_id in self.blacklist: return False user_info = self.get_user_info(user_id) if not user_info: return True # closed acc skipped = self.skipped_file if "following_count" in user_info and user_info["following_count"] > self.max_following_to_block: msg = 'following_count > bot.max_following_to_block, skipping!' self.console_print(msg, 'red') skipped.append(user_id) return False # massfollower if search_stop_words_in_user(self, user_info): msg = '`bot.search_stop_words_in_user` found in user, skipping!' skipped.append(user_id) return False return True
Reads list from file. One line - one item. Returns the list if file items.
def read_list_from_file(file_path, quiet=False): """ Reads list from file. One line - one item. Returns the list if file items. """ try: if not check_if_file_exists(file_path, quiet=quiet): return [] with codecs.open(file_path, "r", encoding="utf-8") as f: content = f.readlines() if sys.version_info[0] < 3: content = [str(item.encode('utf8')) for item in content] content = [item.strip() for item in content] return [i for i in content if i] except Exception as exception: print(str(exception)) return []
Gets tweets for a given user via the Twitter frontend API.
def get_tweets(user, pages=25): """Gets tweets for a given user, via the Twitter frontend API.""" url = f'https://twitter.com/i/profiles/show/{user}/timeline/tweets?include_available_features=1&include_entities=1&include_new_items_bar=true' headers = { 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Referer': f'https://twitter.com/{user}', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8', 'X-Twitter-Active-User': 'yes', 'X-Requested-With': 'XMLHttpRequest', 'Accept-Language': 'en-US' } def gen_tweets(pages): r = session.get(url, headers=headers) while pages > 0: try: html = HTML(html=r.json()['items_html'], url='bunk', default_encoding='utf-8') except KeyError: raise ValueError( f'Oops! Either "{user}" does not exist or is private.') comma = "," dot = "." tweets = [] for tweet in html.find('html > .stream-item'): # 10~11 html elements have `.stream-item` class and also their `data-item-type` is `tweet` # but their content doesn't look like a tweet's content try: text = tweet.find('.tweet-text')[0].full_text except IndexError: # issue #50 continue tweet_id = tweet.find('.js-permalink')[0].attrs['data-conversation-id'] time = datetime.fromtimestamp(int(tweet.find('._timestamp')[0].attrs['data-time-ms']) / 1000.0) interactions = [ x.text for x in tweet.find('.ProfileTweet-actionCount') ] replies = int( interactions[0].split(' ')[0].replace(comma, '').replace(dot, '') or interactions[3] ) retweets = int( interactions[1].split(' ')[0].replace(comma, '').replace(dot, '') or interactions[4] or interactions[5] ) likes = int( interactions[2].split(' ')[0].replace(comma, '').replace(dot, '') or interactions[6] or interactions[7] ) hashtags = [ hashtag_node.full_text for hashtag_node in tweet.find('.twitter-hashtag') ] urls = [ url_node.attrs['data-expanded-url'] for url_node in tweet.find('a.twitter-timeline-link:not(.u-hidden)') ] photos = [ photo_node.attrs['data-image-url'] for photo_node in tweet.find('.AdaptiveMedia-photoContainer') ] videos = [] video_nodes = tweet.find(".PlayableMedia-player") for node in video_nodes: styles = node.attrs['style'].split() for style in styles: if style.startswith('background'): tmp = style.split('/')[-1] video_id = tmp[:tmp.index('.jpg')] videos.append({'id': video_id}) tweets.append({ 'tweetId': tweet_id, 'time': time, 'text': text, 'replies': replies, 'retweets': retweets, 'likes': likes, 'entries': { 'hashtags': hashtags, 'urls': urls, 'photos': photos, 'videos': videos } }) last_tweet = html.find('.stream-item')[-1].attrs['data-item-id'] for tweet in tweets: if tweet: tweet['text'] = re.sub('http', ' http', tweet['text'], 1) yield tweet r = session.get(url, params={'max_position': last_tweet}, headers=headers) pages += -1 yield from gen_tweets(pages)
Add a specific enqueue time to the message.
def schedule(self, schedule_time): """Add a specific enqueue time to the message. :param schedule_time: The scheduled time to enqueue the message. :type schedule_time: ~datetime.datetime """ if not self.properties.message_id: self.properties.message_id = str(uuid.uuid4()) if not self.message.annotations: self.message.annotations = {} self.message.annotations[types.AMQPSymbol(self._x_OPT_SCHEDULED_ENQUEUE_TIME)] = schedule_time
Complete the message.
def complete(self): """Complete the message. This removes the message from the queue. :raises: ~azure.servicebus.common.errors.MessageAlreadySettled if the message has been settled. :raises: ~azure.servicebus.common.errors.MessageLockExpired if message lock has already expired. :raises: ~azure.servicebus.common.errors.SessionLockExpired if session lock has already expired. :raises: ~azure.servicebus.common.errors.MessageSettleFailed if message settle operation fails. """ self._is_live('complete') try: self.message.accept() except Exception as e: raise MessageSettleFailed("complete", e)
Move the message to the Dead Letter queue.
def dead_letter(self, description=None): """Move the message to the Dead Letter queue. The Dead Letter queue is a sub-queue that can be used to store messages that failed to process correctly, or otherwise require further inspection or processing. The queue can also be configured to send expired messages to the Dead Letter queue. To receive dead-lettered messages, use `QueueClient.get_deadletter_receiver()` or `SubscriptionClient.get_deadletter_receiver()`. :param description: The reason for dead-lettering the message. :type description: str :raises: ~azure.servicebus.common.errors.MessageAlreadySettled if the message has been settled. :raises: ~azure.servicebus.common.errors.MessageLockExpired if message lock has already expired. :raises: ~azure.servicebus.common.errors.SessionLockExpired if session lock has already expired. :raises: ~azure.servicebus.common.errors.MessageSettleFailed if message settle operation fails. """ self._is_live('reject') try: self.message.reject(condition=DEADLETTERNAME, description=description) except Exception as e: raise MessageSettleFailed("reject", e)
Abandon the message.
def abandon(self): """Abandon the message. This message will be returned to the queue to be reprocessed. :raises: ~azure.servicebus.common.errors.MessageAlreadySettled if the message has been settled. :raises: ~azure.servicebus.common.errors.MessageLockExpired if message lock has already expired. :raises: ~azure.servicebus.common.errors.SessionLockExpired if session lock has already expired. :raises: ~azure.servicebus.common.errors.MessageSettleFailed if message settle operation fails. """ self._is_live('abandon') try: self.message.modify(True, False) except Exception as e: raise MessageSettleFailed("abandon", e)