index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/HashNotMatchException.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; /** * Typically used when validation of a file checksum after downloading fails due to a checksum mismatch. */ public class HashNotMatchException extends Exception { public HashNotMatchException() { super(); } public HashNotMatchException(final String s) { super(s); } public HashNotMatchException(final String s, final Exception e) { super(s, e); } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/HashUtils.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.security.DigestInputStream; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import org.apache.commons.codec.DecoderException; import org.apache.commons.codec.binary.Hex; import static java.nio.charset.StandardCharsets.*; /** * Helper class that can find a hash for a file or string. */ public enum HashUtils { MD5("MD5"), SHA1("SHA1"); private final String type; private static final int BYTE_BUFFER_SIZE = 1024; // Size of a MD5 or SHA1 hash in bytes private static final int MD5_SIZE_BYTES = 32; private static final int SHA1_SIZE_BYTES = 40; HashUtils(final String type) { this.type = type; } public String getName() { return type; } private MessageDigest getDigest() { MessageDigest digest; try { digest = MessageDigest.getInstance(getName()); } catch (final NoSuchAlgorithmException e) { // Should never get here. throw new RuntimeException(e); } return digest; } public String getHashStr(final String str) { return bytesHashToString(getHashBytes(str)).toLowerCase(); } public byte[] getHashBytes(final String str) { final MessageDigest digest = getDigest(); digest.update(str.getBytes(UTF_8)); return digest.digest(); } public String getHashStr(final File file) throws IOException { return bytesHashToString(getHashBytes(file)).toLowerCase(); } public byte[] getHashBytes(final File file) throws IOException { final MessageDigest digest = getDigest(); final FileInputStream fStream = new FileInputStream(file); final BufferedInputStream bStream = new BufferedInputStream(fStream); final DigestInputStream blobStream = new DigestInputStream(bStream, digest); final byte[] buffer = new byte[BYTE_BUFFER_SIZE]; int num = 0; do { num = blobStream.read(buffer); } while (num > 0); bStream.close(); return digest.digest(); } /** * Validates and sanitizes a hash string. Ensures the hash does not include any non-alphanumeric characters * and ensures it is the correct length for its type. If the hash is valid, a lowercase version is returned. * * @param raw raw hash string * @return lowercase raw hash string * @throws InvalidHashException if the hash is invalid for any of the reasons described above. */ public String sanitizeHashStr(final String raw) throws InvalidHashException { if (this == HashUtils.MD5 && raw.length() != MD5_SIZE_BYTES) { throw new InvalidHashException( String.format("MD5 hash %s has incorrect length %d, expected %d", raw, raw.length(), MD5_SIZE_BYTES)); } else if (this == HashUtils.SHA1 && raw.length() != SHA1_SIZE_BYTES) { throw new InvalidHashException( String.format("SHA1 hash %s has incorrect length %d, expected %d", raw, raw.length(), SHA1_SIZE_BYTES)); } else if (!raw.matches("^[a-zA-Z0-9]*$")) { throw new InvalidHashException( String.format("Hash %s has invalid characters. Should be only alphanumeric.", raw)); } return raw.toLowerCase(); } public static boolean isSameHash(final String a, final byte[] b) throws DecoderException { return isSameHash(stringHashToBytes(a), b); } public static boolean isSameHash(final byte[] a, final byte[] b) { return Arrays.equals(a, b); } public static byte[] stringHashToBytes(final String a) throws DecoderException { return Hex.decodeHex(a.toCharArray()); } public static String bytesHashToString(final byte[] a) { return String.valueOf(Hex.encodeHex(a)).toLowerCase(); } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/InvalidHashException.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; /** * Indicates that a base64 encoded hash string (MD5 or SHA1) is invalid (wrong length, invalid characters, etc.) */ public class InvalidHashException extends Exception { public InvalidHashException() { super(); } public InvalidHashException(final String s) { super(s); } public InvalidHashException(final String s, final Exception e) { super(s, e); } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/JSONUtils.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.Reader; import java.io.Writer; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.JsonParser; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.ObjectWriter; public class JSONUtils { /** * The constructor. Cannot construct this class. */ private JSONUtils() { } public static String toJSON(final Object obj) { return toJSON(obj, false); } public static String toJSON(final Object obj, final boolean prettyPrint) { final ObjectMapper mapper = new ObjectMapper(); try { if (prettyPrint) { final ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter(); return writer.writeValueAsString(obj); } return mapper.writeValueAsString(obj); } catch (final Exception e) { throw new RuntimeException(e); } } public static void toJSON(final Object obj, final OutputStream stream) { toJSON(obj, stream, false); } public static void toJSON(final Object obj, final OutputStream stream, final boolean prettyPrint) { final ObjectMapper mapper = new ObjectMapper(); try { if (prettyPrint) { final ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter(); writer.writeValue(stream, obj); return; } mapper.writeValue(stream, obj); } catch (final Exception e) { throw new RuntimeException(e); } } public static void toJSON(final Object obj, final File file) throws IOException { toJSON(obj, file, false); } public static void toJSON(final Object obj, final File file, final boolean prettyPrint) throws IOException { final BufferedOutputStream stream = new BufferedOutputStream(new FileOutputStream(file)); try { toJSON(obj, stream, prettyPrint); } finally { stream.close(); } } public static Object parseJSONFromStringQuiet(final String json) { try { return parseJSONFromString(json); } catch (final IOException e) { e.printStackTrace(); return null; } } public static Object parseJSONFromString(final String json) throws IOException { final ObjectMapper mapper = new ObjectMapper(); final JsonFactory factory = new JsonFactory(); final JsonParser parser = factory.createJsonParser(json); final JsonNode node = mapper.readTree(parser); return toObjectFromJSONNode(node); } public static Object parseJSONFromFile(final File file) throws IOException { final ObjectMapper mapper = new ObjectMapper(); final JsonFactory factory = new JsonFactory(); final JsonParser parser = factory.createJsonParser(file); final JsonNode node = mapper.readTree(parser); return toObjectFromJSONNode(node); } public static Object parseJSONFromReader(final Reader reader) throws IOException { final ObjectMapper mapper = new ObjectMapper(); final JsonFactory factory = new JsonFactory(); final JsonParser parser = factory.createJsonParser(reader); final JsonNode node = mapper.readTree(parser); return toObjectFromJSONNode(node); } private static Object toObjectFromJSONNode(final JsonNode node) { if (node.isObject()) { final HashMap<String, Object> obj = new HashMap<>(); final Iterator<String> iter = node.getFieldNames(); while (iter.hasNext()) { final String fieldName = iter.next(); final JsonNode subNode = node.get(fieldName); final Object subObj = toObjectFromJSONNode(subNode); obj.put(fieldName, subObj); } return obj; } else if (node.isArray()) { final ArrayList<Object> array = new ArrayList<>(); final Iterator<JsonNode> iter = node.getElements(); while (iter.hasNext()) { final JsonNode element = iter.next(); final Object subObject = toObjectFromJSONNode(element); array.add(subObject); } return array; } else if (node.isTextual()) { return node.asText(); } else if (node.isNumber()) { if (node.isInt()) { return node.asInt(); } else if (node.isLong()) { return node.asLong(); } else if (node.isDouble()) { return node.asDouble(); } else { System.err.println("ERROR What is this!? " + node.getNumberType()); return null; } } else if (node.isBoolean()) { return node.asBoolean(); } else { return null; } } public static long getLongFromObject(final Object obj) { if (obj instanceof Integer) { return Long.valueOf((Integer) obj); } return (Long) obj; } /* * Writes json to a stream without using any external dependencies. * * This is useful for plugins or extensions that want to write properties to a * writer without having to import the jackson, or json libraries. The * properties are expected to be a map of String keys and String values. * * The other json writing methods are more robust and will handle more cases. */ public static void writePropsNoJarDependency(final Map<String, String> properties, final Writer writer) throws IOException { writer.write("{\n"); int size = properties.size(); for (final Map.Entry<String, String> entry : properties.entrySet()) { // tab the space writer.write('\t'); // Write key writer.write(quoteAndClean(entry.getKey())); writer.write(':'); writer.write(quoteAndClean(entry.getValue())); size -= 1; // Add comma only if it's not the last one if (size > 0) { writer.write(','); } writer.write('\n'); } writer.write("}"); } private static String quoteAndClean(final String str) { if (str == null || str.isEmpty()) { return "\"\""; } final StringBuffer buffer = new StringBuffer(str.length()); buffer.append('"'); for (int i = 0; i < str.length(); ++i) { final char ch = str.charAt(i); switch (ch) { case '\b': buffer.append("\\b"); break; case '\t': buffer.append("\\t"); break; case '\n': buffer.append("\\n"); break; case '\f': buffer.append("\\f"); break; case '\r': buffer.append("\\r"); break; case '"': case '\\': case '/': buffer.append('\\'); buffer.append(ch); break; default: if (isCharSpecialUnicode(ch)) { buffer.append("\\u"); final String hexCode = Integer.toHexString(ch); final int lengthHexCode = hexCode.length(); if (lengthHexCode < 4) { buffer.append("0000".substring(0, 4 - lengthHexCode)); } buffer.append(hexCode); } else { buffer.append(ch); } } } buffer.append('"'); return buffer.toString(); } private static boolean isCharSpecialUnicode(final char ch) { if (ch < ' ') { return true; } else if (ch >= '\u0080' && ch < '\u00a0') { return true; } else if (ch >= '\u2000' && ch < '\u2100') { return true; } return false; } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/MemConfValue.java
package azkaban.utils; import azkaban.Constants; import com.google.common.base.Preconditions; import org.apache.commons.lang.StringUtils; public class MemConfValue { private final String string; private final long size; public static MemConfValue parseMaxXms(final Props props) { return parse(props, Constants.JobProperties.JOB_MAX_XMS, Constants.JobProperties.MAX_XMS_DEFAULT); } public static MemConfValue parseMaxXmx(final Props props) { return parse(props, Constants.JobProperties.JOB_MAX_XMX, Constants.JobProperties.MAX_XMX_DEFAULT); } private static MemConfValue parse(final Props props, final String key, final String defaultValue) { final String stringValue = props.getString(key, defaultValue); Preconditions.checkArgument(!StringUtils.isBlank(stringValue), String.format("%s must not have an empty value. " + "Remove the property to use default or specify a valid value.", key)); final long size = Utils.parseMemString(stringValue); return new MemConfValue(stringValue, size); } private MemConfValue(final String string, final long size) { this.string = string; this.size = size; } public String getString() { return this.string; } public long getSize() { return this.size; } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/PluginUtils.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import java.io.File; import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class PluginUtils { private static final Logger logger = LoggerFactory.getLogger(PluginUtils.class); private static String LIBRARY_FOLDER_NAME = "lib"; /** * Private constructor. */ private PluginUtils() { } /** * Convert a list of files to a list of files' URLs * * @param files list of file handles * @return an arrayList of the corresponding files' URLs */ private static ArrayList<URL> getUrls(File[] files) { final ArrayList<URL> urls = new ArrayList<>(); for (File file : files) { try { final URL url = file.toURI().toURL(); urls.add(url); } catch (final MalformedURLException e) { logger.error("File is not convertible to URL.", e); } } return urls; } /** * Get URLClassLoader */ public static URLClassLoader getURLClassLoader(final File pluginDir, List<String> extLibClassPaths, ClassLoader parentLoader) { final File libDir = new File(pluginDir, LIBRARY_FOLDER_NAME); if (libDir.exists() && libDir.isDirectory()) { final File[] files = libDir.listFiles(); final ArrayList<URL> urls = getUrls(files); if (extLibClassPaths != null) { for (final String extLibClassPath : extLibClassPaths) { try { final File extLibFile = new File(pluginDir, extLibClassPath); if (extLibFile.exists()) { if (extLibFile.isDirectory()) { // extLibFile is a directory; load all the files in the // directory. final File[] extLibFiles = extLibFile.listFiles(); urls.addAll(getUrls(extLibFiles)); } else { final URL url = extLibFile.toURI().toURL(); urls.add(url); } } else { logger.error( "External library path not found. path = " + extLibFile.getAbsolutePath() ); continue; } } catch (final MalformedURLException e) { logger.error( "Invalid External library path. path = " + extLibClassPath + " dir = " + pluginDir, e ); } } } return new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader); } else { logger.error("Library path not found. path = " + libDir); return null; } } /** * Get Plugin Class * * @param pluginClass plugin class name * @param pluginDir plugin root directory * @param extLibClassPaths external Library Class Paths * @param parentClassLoader parent class loader * @return Plugin class or Null */ public static Class<?> getPluginClass(final String pluginClass, final File pluginDir, final List<String> extLibClassPaths, ClassLoader parentClassLoader) { URLClassLoader urlClassLoader = getURLClassLoader(pluginDir, extLibClassPaths, parentClassLoader); return getPluginClass(pluginClass, urlClassLoader); } /** * Get Plugin Class * * @param pluginClass plugin class name * @param urlClassLoader url class loader * @return Plugin class or Null */ public static Class<?> getPluginClass(final String pluginClass, URLClassLoader urlClassLoader) { if (urlClassLoader == null) { return null; } try { return urlClassLoader.loadClass(pluginClass); } catch (final ClassNotFoundException e) { logger.error("Class not found. class = " + pluginClass); return null; } } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/Props.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.TreeMap; import org.apache.log4j.Logger; /** * Hashmap implementation of a hierarchical properties with helpful converter functions and * Exception throwing. This class is not threadsafe. */ public class Props { private final Map<String, String> _current; private Props _parent; private String source = null; /** * Constructor for empty props with empty parent. */ public Props() { this(null); } /** * Constructor for empty Props with parent override. */ public Props(final Props parent) { this._current = new HashMap<>(); this._parent = parent; } /** * Load props from a file. */ public Props(final Props parent, final String filepath) throws IOException { this(parent, new File(filepath)); } /** * Load props from a file. */ public Props(final Props parent, final File file) throws IOException { this(parent); if (file.exists()) { setSource(file.getPath()); final InputStream input = new BufferedInputStream(new FileInputStream(file)); try { loadFrom(input); } catch (final IOException e) { throw e; } finally { input.close(); } } } /** * Create props from property input streams */ public Props(final Props parent, final InputStream inputStream) throws IOException { this(parent); loadFrom(inputStream); } /** * Create properties from maps of properties */ public Props(final Props parent, final Map<String, String>... props) { this(parent); for (int i = props.length - 1; i >= 0; i--) { this.putAll(props[i]); } } /** * Create properties from Properties objects */ public Props(final Props parent, final Properties... properties) { this(parent); for (int i = properties.length - 1; i >= 0; i--) { this.put(properties[i]); } } /** * Create a Props object with the contents set to that of props. */ public Props(final Props parent, final Props props) { this(parent); if (props != null) { putAll(props); } } /** * Create a Props with a null parent from a list of key value pairing. i.e. [key1, value1, key2, * value2 ...] */ public static Props of(final String... args) { return of((Props) null, args); } /** * Create a Props from a list of key value pairing. i.e. [key1, value1, key2, value2 ...] */ public static Props of(final Props parent, final String... args) { if (args.length % 2 != 0) { throw new IllegalArgumentException( "Must have an equal number of keys and values."); } final Map<String, String> vals = new HashMap<>(args.length / 2); for (int i = 0; i < args.length; i += 2) { vals.put(args[i], args[i + 1]); } return new Props(parent, vals); } /** * Clones the Props p object and all of its parents. */ public static Props clone(final Props p) { return copyNext(p); } /** * Recursive Clone function of Props * * @param source the source Props object * @return the cloned Props object */ private static Props copyNext(final Props source) { Props priorNodeCopy = null; if (source.getParent() != null) { priorNodeCopy = copyNext(source.getParent()); } final Props dest = new Props(priorNodeCopy); for (final String key : source.localKeySet()) { dest.put(key, source.get(key)); } return dest; } /** * Create a new Props instance * * @param parent parent props * @param current current props * @param source source value * @return new Prop Instance */ public static Props getInstance(Props parent, Props current, String source) { Props props = new Props(parent, current); props.setSource(source); return props; } /** * load this Prop Object from a @Properties formatted InputStream * * @param inputStream inputStream for loading Properties Object * @throws IOException read exception */ private void loadFrom(final InputStream inputStream) throws IOException { final Properties properties = new Properties(); properties.load(inputStream); this.put(properties); } /** * Get the Root Props Object * * @return the root Props Object or this Props itself */ public Props getEarliestAncestor() { if (this._parent == null) { return this; } return this._parent.getEarliestAncestor(); } /** * Set the Props Object as the root of this Props Object * * @param parent the earliest ancestor Props Object */ public void setEarliestAncestor(final Props parent) { final Props props = getEarliestAncestor(); props.setParent(parent); } /** * Clear the current Props, but leaves the parent untouched. */ public void clearLocal() { this._current.clear(); } /** * Check key in current Props then search in parent */ public boolean containsKey(final Object k) { return this._current.containsKey(k) || (this._parent != null && this._parent.containsKey(k)); } /** * Check value in current Props then search in parent */ public boolean containsValue(final Object value) { return this._current.containsValue(value) || (this._parent != null && this._parent.containsValue(value)); } /** * Return value if available in current Props otherwise return from parent */ public String get(final Object key) { if (this._current.containsKey(key)) { return this._current.get(key); } else if (this._parent != null) { return this._parent.get(key); } else { return null; } } /** * Get the key set from the current Props */ public Set<String> localKeySet() { return this._current.keySet(); } /** * Get parent Props */ public Props getParent() { return this._parent; } public void setParent(final Props prop) { this._parent = prop; } /** * Put the given string value for the string key. This method performs any variable substitution * in the value replacing any occurance of ${name} with the value of get("name"). * * @param key The key to put the value to * @param value The value to do substitution on and store * @throws IllegalArgumentException If the variable given for substitution is not a valid key in * this Props. */ public String put(final String key, final String value) { return this._current.put(key, value); } /** * Put the given Properties into the Props. This method performs any variable substitution in the * value replacing any occurrence of ${name} with the value of get("name"). get() is called first * on the Props and next on the Properties object. * * @param properties The properties to put * @throws IllegalArgumentException If the variable given for substitution is not a valid key in * this Props. */ public void put(final Properties properties) { for (final String propName : properties.stringPropertyNames()) { this._current.put(propName, properties.getProperty(propName)); } } /** * Put integer */ public String put(final String key, final Integer value) { return this._current.put(key, value.toString()); } /** * Put Long. Stores as String. */ public String put(final String key, final Long value) { return this._current.put(key, value.toString()); } /** * Put Double. Stores as String. */ public String put(final String key, final Double value) { return this._current.put(key, value.toString()); } /** * Put everything in the map into the props. */ public void putAll(final Map<? extends String, ? extends String> m) { if (m == null) { return; } for (final Map.Entry<? extends String, ? extends String> entry : m.entrySet()) { this.put(entry.getKey(), entry.getValue()); } } /** * Put all properties in the props into the current props. Will handle null p. */ public void putAll(final Props p) { if (p == null) { return; } for (final String key : p.getKeySet()) { this.put(key, p.get(key)); } } /** * Puts only the local props from p into the current properties */ public void putLocal(final Props p) { for (final String key : p.localKeySet()) { this.put(key, p.get(key)); } } /** * Remove only the local value of key s, and not the parents. */ public String removeLocal(final Object s) { return this._current.remove(s); } /** * The number of unique keys defined by this Props and all parent Props */ public int size() { return getKeySet().size(); } /** * The number of unique keys defined by this Props (keys defined only in parent Props are not * counted) */ public int localSize() { return this._current.size(); } /** * Attempts to return the Class that corresponds to the Props value. If the class doesn't exit, an * IllegalArgumentException will be thrown. */ public Class<?> getClass(final String key) { try { if (containsKey(key)) { return Class.forName(get(key)); } else { throw new UndefinedPropertyException("Missing required property '" + key + "'"); } } catch (final ClassNotFoundException e) { throw new IllegalArgumentException(e); } } public Class<?> getClass(final String key, final boolean initialize, final ClassLoader cl) { try { if (containsKey(key)) { return Class.forName(get(key), initialize, cl); } else { throw new UndefinedPropertyException("Missing required property '" + key + "'"); } } catch (final ClassNotFoundException e) { throw new IllegalArgumentException(e); } } /** * Gets the class from the Props. If it doesn't exist, it will return the defaultClass */ public Class<?> getClass(final String key, final Class<?> defaultClass) { if (containsKey(key)) { return getClass(key); } else { return defaultClass; } } /** * Gets the string from the Props. If it doesn't exist, it will return the defaultValue */ public String getString(final String key, final String defaultValue) { if (containsKey(key)) { return get(key); } else { return defaultValue; } } /** * Gets the string from the Props. If it doesn't exist, throw and UndefinedPropertiesException */ public String getString(final String key) { if (containsKey(key)) { return get(key); } else { throw new UndefinedPropertyException("Missing required property '" + key + "'"); } } /** * Returns a list of strings with the comma as the separator of the value */ public List<String> getStringList(final String key) { return getStringList(key, "\\s*,\\s*"); } /** * Returns a list of clusters with the comma as the separator of the value * e.g., for input string: "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port;" * we will get ["thrift://hcat1:port,thrift://hcat2:port", "thrift://hcat3:port,thrift://hcat4:port"] * as output */ public List<String> getStringListFromCluster(final String key) { final List<String> curlist = getStringList(key, "\\s*;\\s*"); // remove empty elements in the array for (final Iterator<String> iter = curlist.listIterator(); iter.hasNext(); ) { final String a = iter.next(); if (a.length() == 0) { iter.remove(); } } return curlist; } /** * Returns a list of strings with the sep as the separator of the value */ public List<String> getStringList(final String key, final String sep) { final String val = get(key); if (val == null || val.trim().length() == 0) { return Collections.emptyList(); } if (containsKey(key)) { return Arrays.asList(val.split(sep)); } else { throw new UndefinedPropertyException("Missing required property '" + key + "'"); } } /** * Returns a list of strings with the comma as the separator of the value. If the value is null, * it'll return the defaultValue. */ public List<String> getStringList(final String key, final List<String> defaultValue) { if (containsKey(key)) { return getStringList(key); } else { return defaultValue; } } /** * Returns a list of strings with the sep as the separator of the value. If the value is null, * it'll return the defaultValue. */ public List<String> getStringList(final String key, final List<String> defaultValue, final String sep) { if (containsKey(key)) { return getStringList(key, sep); } else { return defaultValue; } } /** * Returns true if the value equals "true". If the value is null, then the default value is * returned. */ public boolean getBoolean(final String key, final boolean defaultValue) { if (containsKey(key)) { return "true".equalsIgnoreCase(get(key).trim()); } else { return defaultValue; } } /** * Returns true if the value equals "true". If the value is null, then an * UndefinedPropertyException is thrown. */ public boolean getBoolean(final String key) { if (containsKey(key)) { return "true".equalsIgnoreCase(get(key)); } else { throw new UndefinedPropertyException("Missing required property '" + key + "'"); } } /** * Returns the long representation of the value. If the value is null, then the default value is * returned. If the value isn't a long, then a parse exception will be thrown. */ public long getLong(final String name, final long defaultValue) { if (containsKey(name)) { return Long.parseLong(get(name)); } else { return defaultValue; } } /** * Returns the long representation of the value. If the value is null, then a * UndefinedPropertyException will be thrown. If the value isn't a long, then a parse exception * will be thrown. */ //todo burgerkingeater: it might be better to return null instead of throwing exception to // avoid repetitive exception handling public long getLong(final String name) { if (containsKey(name)) { return Long.parseLong(get(name)); } else { throw new UndefinedPropertyException("Missing required property '" + name + "'"); } } /** * Returns the int representation of the value. If the value is null, then the default value is * returned. If the value isn't a int, then a parse exception will be thrown. */ public int getInt(final String name, final int defaultValue) { if (containsKey(name)) { return Integer.parseInt(get(name).trim()); } else { return defaultValue; } } /** * Returns the int representation of the value. If the value is null, then a * UndefinedPropertyException will be thrown. If the value isn't a int, then a parse exception * will be thrown. */ public int getInt(final String name) { if (containsKey(name)) { return Integer.parseInt(get(name).trim()); } else { throw new UndefinedPropertyException("Missing required property '" + name + "'"); } } /** * Returns the double representation of the value. If the value is null, then the default value is * returned. If the value isn't a double, then a parse exception will be thrown. */ public double getDouble(final String name, final double defaultValue) { if (containsKey(name)) { return Double.parseDouble(get(name).trim()); } else { return defaultValue; } } /** * Returns the double representation of the value. If the value is null, then a * UndefinedPropertyException will be thrown. If the value isn't a double, then a parse exception * will be thrown. */ public double getDouble(final String name) { if (containsKey(name)) { return Double.parseDouble(get(name).trim()); } else { throw new UndefinedPropertyException("Missing required property '" + name + "'"); } } /** * Returns the uri representation of the value. If the value is null, then an * UndefinedPropertyException will be thrown. If the value isn't a uri, then an * IllegalArgumentException will be thrown. * * If addTrailingSlash is true and the value isn't null, a trailing forward slash will be added * to the URI. */ public URI getUri(final String name) { return getUri(name, false); } public URI getUri(final String name, final Boolean addTrailingSlash) { if (containsKey(name)) { try { String rawValue = get(name); if (rawValue == null) return null; String finalValue = !addTrailingSlash || rawValue.endsWith("/") ? rawValue : rawValue + "/"; return new URI(finalValue); } catch (final URISyntaxException e) { throw new IllegalArgumentException(e.getMessage()); } } else { throw new UndefinedPropertyException("Missing required property '" + name + "'"); } } /** * Returns the double representation of the value. If the value is null, then the default value is * returned. If the value isn't a uri, then a IllegalArgumentException will be thrown. * * If addTrailingSlash is true and the value isn't null, a trailing forward slash will be added * to the URI. */ public URI getUri(final String name, final URI defaultValue) { return getUri(name, defaultValue, false); } public URI getUri(final String name, final URI defaultValue, final Boolean addTrailingSlash) { if (containsKey(name)) { return getUri(name, addTrailingSlash); } else { return defaultValue; } } /** * Convert a URI-formatted string value to URI object */ public URI getUri(final String name, final String defaultValue) { try { return getUri(name, new URI(defaultValue)); } catch (final URISyntaxException e) { throw new IllegalArgumentException(e.getMessage()); } } /** * Store only those properties defined at this local level * * @param file The file to write to * @throws IOException If the file can't be found or there is an io error */ public void storeLocal(final File file) throws IOException { final BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(file)); try { storeLocal(out); } finally { out.close(); } } /** * Returns a copy of only the local values of this props */ public Props local() { return new Props(null, this._current); } /** * Store only those properties defined at this local level * * @param out The output stream to write to * @throws IOException If the file can't be found or there is an io error */ public void storeLocal(final OutputStream out) throws IOException { final Properties p = new Properties(); for (final String key : this._current.keySet()) { p.setProperty(key, get(key)); } p.store(out, null); } /** * Returns a java.util.Properties file populated with the current Properties in here. * Note: if you want to import parent properties (e.g., database credentials), please use * toAllProperties */ public Properties toProperties() { final Properties p = new Properties(); for (final String key : this._current.keySet()) { p.setProperty(key, get(key)); } return p; } /** * Returns a java.util.Properties file populated with both current and parent properties. */ public Properties toAllProperties() { final Properties allProp = new Properties(); // import local properties allProp.putAll(toProperties()); // import parent properties if (this._parent != null) { allProp.putAll(this._parent.toProperties()); } return allProp; } /** * Store all properties, those local and also those in parent props * * @param file The file to store to * @throws IOException If there is an error writing */ public void storeFlattened(final File file) throws IOException { final BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(file)); try { storeFlattened(out); } finally { out.close(); } } /** * Store all properties, those local and also those in parent props * * @param out The stream to write to * @throws IOException If there is an error writing */ public void storeFlattened(final OutputStream out) throws IOException { final Properties p = new Properties(); for (Props curr = this; curr != null; curr = curr.getParent()) { for (final String key : curr.localKeySet()) { if (!p.containsKey(key)) { p.setProperty(key, get(key)); } } } p.store(out, null); } /** * Returns a new constructed map of all the flattened properties, the item in the returned * map is sorted alphabetically by the key value. * * @Return a new constructed TreeMap (sorted map) of all properties (including parents' * properties) */ public Map<String, String> getFlattened() { final TreeMap<String, String> returnVal = new TreeMap<>(); returnVal.putAll(getMapByPrefix("")); return returnVal; } /** * Get a new de-duplicated map of all the flattened properties by given prefix. The prefix will * be removed in the return map's keySet. * * @param prefix the prefix string * @return a new constructed de-duplicated HashMap of all properties (including parents' * properties) with the give prefix */ public Map<String, String> getMapByPrefix(final String prefix) { final Map<String, String> values = (this._parent == null) ? new HashMap<>() : this._parent.getMapByPrefix(prefix); // when there is a conflict, value from the child takes the priority. if (prefix == null) { // when prefix is null, return an empty map return values; } for (final String key : this.localKeySet()) { if (key != null && key.length() >= prefix.length()) { if (key.startsWith(prefix)) { values.put(key.substring(prefix.length()), get(key)); } } } return values; } /** * Returns a set of all keys, including the parents */ public Set<String> getKeySet() { final HashSet<String> keySet = new HashSet<>(); keySet.addAll(localKeySet()); if (this._parent != null) { keySet.addAll(this._parent.getKeySet()); } return keySet; } /** * Logs the property in the given logger */ public void logProperties(final Logger logger, final String comment) { logger.info(comment); for (final String key : getKeySet()) { logger.info(" key=" + key + " value=" + get(key)); } } /** * override object's default equal function */ @Override public boolean equals(final Object o) { if (o == this) { return true; } else if (o == null) { return false; } else if (o.getClass() != Props.class) { return false; } final Props p = (Props) o; return this._current.equals(p._current) && Utils.equals(this._parent, p._parent); } /** * override object's default hash code function */ @Override public int hashCode() { int code = this._current.hashCode(); if (this._parent != null) { code += this._parent.hashCode(); } return code; } /** * override object's default toString function */ @Override public String toString() { final StringBuilder builder = new StringBuilder("{"); for (final Map.Entry<String, String> entry : this._current.entrySet()) { builder.append(entry.getKey()); builder.append(": "); builder.append(entry.getValue()); builder.append(", "); } if (this._parent != null) { builder.append(" parent = "); builder.append(this._parent.toString()); } builder.append("}"); return builder.toString(); } /** * Get Source information */ public String getSource() { return this.source; } /** * Set Source information */ public Props setSource(final String source) { this.source = source; return this; } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/PropsUtils.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import com.google.common.collect.MapDifference; import com.google.common.collect.Maps; import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.jexl2.Expression; import org.apache.commons.jexl2.JexlEngine; import org.apache.commons.jexl2.JexlException; import org.apache.commons.jexl2.MapContext; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Utility Functions related to Prop Operations */ public class PropsUtils { private static final Logger LOGGER = LoggerFactory.getLogger(PropsUtils.class); private static final Pattern VARIABLE_REPLACEMENT_PATTERN = Pattern .compile("\\$\\{([a-zA-Z_.0-9]+)\\}"); /** * Private constructor. */ private PropsUtils() { } /** * Load job schedules from the given directories * * @param dir The directory to look in * @param suffixes File suffixes to load * @return The loaded set of schedules */ public static Props loadPropsInDir(final File dir, final String... suffixes) { return loadPropsInDir(null, dir, suffixes); } /** * Load job schedules from the given directories * * @param parent The parent properties for these properties * @param dir The directory to look in * @param suffixes File suffixes to load * @return The loaded set of schedules */ public static Props loadPropsInDir(final Props parent, final File dir, final String... suffixes) { try { final Props props = new Props(parent); final File[] files = dir.listFiles(); Arrays.sort(files); if (files != null) { for (final File f : files) { if (f.isFile() && endsWith(f, suffixes)) { props.putAll(new Props(null, f.getAbsolutePath())); } } } return props; } catch (final IOException e) { throw new RuntimeException("Error loading properties.", e); } } /** * Load Props * * @param parent parent prop * @param propFiles prop files * @return constructed new Prop */ public static Props loadProps(final Props parent, final File... propFiles) { try { Props props = new Props(parent); for (final File f : propFiles) { if (f.isFile()) { props = new Props(props, f); } } return props; } catch (final IOException e) { throw new RuntimeException("Error loading properties.", e); } } /** * Load plugin properties * * @param pluginDir plugin's Base Directory * @return The properties */ public static Props loadPluginProps(final File pluginDir) { if (!pluginDir.exists()) { LOGGER.error("Error! Plugin path " + pluginDir.getPath() + " doesn't exist."); return null; } if (!pluginDir.isDirectory()) { LOGGER.error("The plugin path " + pluginDir + " is not a directory."); return null; } final File propertiesDir = new File(pluginDir, "conf"); if (propertiesDir.exists() && propertiesDir.isDirectory()) { final File propertiesFile = new File(propertiesDir, "plugin.properties"); final File propertiesOverrideFile = new File(propertiesDir, "override.properties"); if (propertiesFile.exists()) { if (propertiesOverrideFile.exists()) { return loadProps(null, propertiesFile, propertiesOverrideFile); } else { return loadProps(null, propertiesFile); } } else { LOGGER.error("Plugin conf file " + propertiesFile + " not found."); return null; } } else { LOGGER.error("Plugin conf path " + propertiesDir + " not found."); return null; } } /** * Load job schedules from the given directories * * @param dirs The directories to check for properties * @param suffixes The suffixes to load * @return The properties */ public static Props loadPropsInDirs(final List<File> dirs, final String... suffixes) { final Props props = new Props(); for (final File dir : dirs) { props.putLocal(loadPropsInDir(dir, suffixes)); } return props; } /** * Load properties from the given path * * @param jobPath The path to load from * @param props The parent properties for loaded properties * @param suffixes The suffixes of files to load */ public static void loadPropsBySuffix(final File jobPath, final Props props, final String... suffixes) { try { if (jobPath.isDirectory()) { final File[] files = jobPath.listFiles(); if (files != null) { for (final File file : files) { loadPropsBySuffix(file, props, suffixes); } } } else if (endsWith(jobPath, suffixes)) { props.putAll(new Props(null, jobPath.getAbsolutePath())); } } catch (final IOException e) { throw new RuntimeException("Error loading schedule properties.", e); } } private static boolean endsWith(final File file, final String... suffixes) { for (final String suffix : suffixes) { if (file.getName().endsWith(suffix)) { return true; } } return false; } /** * Check if the prop value is a variable replacement pattern */ public static boolean isVariableReplacementPattern(final String value) { final Matcher matcher = VARIABLE_REPLACEMENT_PATTERN.matcher(value); return matcher.matches(); } /** * Resolve Props * * @param props props * @return resolved props */ public static Props resolveProps(final Props props) { if (props == null) { return null; } final Props resolvedProps = new Props(); final LinkedHashSet<String> visitedVariables = new LinkedHashSet<>(); for (final String key : props.getKeySet()) { String value = props.get(key); if (value == null) { LOGGER.warn("Null value in props for key '" + key + "'. Replacing with empty string."); value = ""; } visitedVariables.add(key); final String replacedValue = resolveVariableReplacement(value, props, visitedVariables); visitedVariables.clear(); resolvedProps.put(key, replacedValue); } for (final String key : resolvedProps.getKeySet()) { final String value = resolvedProps.get(key); final String expressedValue = resolveVariableExpression(value); resolvedProps.put(key, expressedValue); } return resolvedProps; } /** * new Props based on default Props and expand it from external prop file * * @param parentProps parent Props * @param filePath filePath * @return combined props * @throws IOException */ public static Props newProps(final Props parentProps, final String filePath) throws IOException { return (filePath == null) ? (parentProps == null ? null : new Props(parentProps)) : newProps(parentProps, new File(filePath)); } /** * new Props based on default Props and expand it from external prop file * * @param parentProps parent Props * @param file prop file * @return combined props * @throws IOException */ public static Props newProps(final Props parentProps, final File file) throws IOException { if (file.exists()) { LOGGER.info("Prop file " + file + "found. Attempted to load."); return new Props(parentProps, file); } else { LOGGER.info("Prop file " + file + "not found. Using the default props only."); return (parentProps == null ? null : new Props(parentProps)); } } private static String resolveVariableReplacement(final String value, final Props props, final LinkedHashSet<String> visitedVariables) { final StringBuffer buffer = new StringBuffer(); int startIndex = 0; final Matcher matcher = VARIABLE_REPLACEMENT_PATTERN.matcher(value); while (matcher.find(startIndex)) { if (startIndex < matcher.start()) { // Copy everything up front to the buffer buffer.append(value.substring(startIndex, matcher.start())); } final String subVariable = matcher.group(1); // Detected a cycle if (visitedVariables.contains(subVariable)) { throw new IllegalArgumentException(String.format( "Circular variable substitution found: [%s] -> [%s]", StringUtils.join(visitedVariables, "->"), subVariable)); } else { // Add substitute variable and recurse. final String replacement = props.get(subVariable); visitedVariables.add(subVariable); if (replacement == null) { throw new UndefinedPropertyException(String.format( "Could not find variable substitution for variable(s) [%s]", StringUtils.join(visitedVariables, "->"))); } buffer.append(resolveVariableReplacement(replacement, props, visitedVariables)); visitedVariables.remove(subVariable); } startIndex = matcher.end(); } if (startIndex < value.length()) { buffer.append(value.substring(startIndex)); } return buffer.toString(); } private static String resolveVariableExpression(final String value) { final JexlEngine jexl = new JexlEngine(); return resolveVariableExpression(value, value.length(), jexl); } /** * Function that looks for expressions to parse. It parses backwards to capture embedded * expressions */ private static String resolveVariableExpression(final String value, final int last, final JexlEngine jexl) { final int lastIndex = value.lastIndexOf("$(", last); if (lastIndex == -1) { return value; } // Want to check that everything is well formed, and that // we properly capture $( ...(...)...). int bracketCount = 0; int nextClosed = lastIndex + 2; for (; nextClosed < value.length(); ++nextClosed) { if (value.charAt(nextClosed) == '(') { bracketCount++; } else if (value.charAt(nextClosed) == ')') { bracketCount--; if (bracketCount == -1) { break; } } } if (nextClosed == value.length()) { throw new IllegalArgumentException("Expression " + value + " not well formed."); } final String innerExpression = value.substring(lastIndex + 2, nextClosed); Object result = null; try { final Expression e = jexl.createExpression(innerExpression); result = e.evaluate(new MapContext()); } catch (final JexlException e) { throw new IllegalArgumentException("Expression " + value + " not well formed. " + e.getMessage(), e); } if (result == null) { // for backward compatibility it is best to return value return value; } final String newValue = value.substring(0, lastIndex) + result.toString() + value.substring(nextClosed + 1); return resolveVariableExpression(newValue, lastIndex, jexl); } /** * Convert props to json string * * @param props props * @param localOnly include local prop sets only or not * @return json string format of props */ public static String toJSONString(final Props props, final boolean localOnly) { final Map<String, String> map = toStringMap(props, localOnly); return JSONUtils.toJSON(map); } /** * Convert props to Map * * @param props props * @param localOnly include local prop sets only or not * @return String Map of props */ public static Map<String, String> toStringMap(final Props props, final boolean localOnly) { final HashMap<String, String> map = new HashMap<>(); final Set<String> keyset = localOnly ? props.localKeySet() : props.getKeySet(); for (final String key : keyset) { final String value = props.get(key); map.put(key, value); } return map; } /** * Convert json String to Prop Object * * @param json json formatted string * @return a new constructed Prop Object * @throws IOException exception on parsing json string to prop object */ public static Props fromJSONString(final String json) throws IOException { final Map<String, String> obj = (Map<String, String>) JSONUtils.parseJSONFromString(json); final Props props = new Props(null, obj); return props; } /** * Convert a hierarchical Map to Prop Object * * @param propsMap a hierarchical Map * @return a new constructed Props Object */ public static Props fromHierarchicalMap(final Map<String, Object> propsMap) { if (propsMap == null) { return null; } final String source = (String) propsMap.get("source"); final Map<String, String> propsParams = (Map<String, String>) propsMap.get("props"); final Map<String, Object> parent = (Map<String, Object>) propsMap.get("parent"); final Props parentProps = fromHierarchicalMap(parent); final Props props = new Props(parentProps, propsParams); props.setSource(source); return props; } /** * Convert a Props object to a hierarchical Map * * @param props props object * @return a hierarchical Map presented Props object */ public static Map<String, Object> toHierarchicalMap(final Props props) { final Map<String, Object> propsMap = new HashMap<>(); propsMap.put("source", props.getSource()); propsMap.put("props", toStringMap(props, true)); if (props.getParent() != null) { propsMap.put("parent", toHierarchicalMap(props.getParent())); } return propsMap; } /** * The difference between old and new Props * * @param oldProps old Props * @param newProps new Props * @return string formatted difference */ public static String getPropertyDiff(Props oldProps, Props newProps) { final StringBuilder builder = new StringBuilder(""); // oldProps can not be null during the below comparison process. if (oldProps == null) { oldProps = new Props(); } if (newProps == null) { newProps = new Props(); } final MapDifference<String, String> md = Maps.difference(toStringMap(oldProps, false), toStringMap(newProps, false)); final Map<String, String> newlyCreatedProperty = md.entriesOnlyOnRight(); if (newlyCreatedProperty != null && newlyCreatedProperty.size() > 0) { builder.append("Newly created Properties: "); newlyCreatedProperty.forEach((k, v) -> { builder.append("[ " + k + ", " + v + "], "); }); builder.append("\n"); } final Map<String, String> deletedProperty = md.entriesOnlyOnLeft(); if (deletedProperty != null && deletedProperty.size() > 0) { builder.append("Deleted Properties: "); deletedProperty.forEach((k, v) -> { builder.append("[ " + k + ", " + v + "], "); }); builder.append("\n"); } final Map<String, MapDifference.ValueDifference<String>> diffProperties = md.entriesDiffering(); if (diffProperties != null && diffProperties.size() > 0) { builder.append("Modified Properties: "); diffProperties.forEach((k, v) -> { builder.append("[ " + k + ", " + v.leftValue() + "-->" + v.rightValue() + "], "); }); } return builder.toString(); } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/TimeUtils.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import org.joda.time.Days; import org.joda.time.DurationFieldType; import org.joda.time.Hours; import org.joda.time.Minutes; import org.joda.time.Months; import org.joda.time.ReadablePeriod; import org.joda.time.Seconds; import org.joda.time.Weeks; import org.joda.time.Years; /** * Utilities for Time Operations */ public class TimeUtils { private static final String DATE_TIME_ZONE_PATTERN = "yyyy/MM/dd HH:mm:ss z"; private static final String DATE_TIME_PATTERN = "yyyy-MM-dd HH:mm:ss"; private static int ONE_DAY = 86400; /** * Formats the given millisecond instant into a string using the pattern "yyyy/MM/dd HH:mm:ss z" */ public static String formatDateTimeZone(final long timestampMs) { return format(timestampMs, DATE_TIME_ZONE_PATTERN); } /** * Formats the given millisecond instant into a string using the pattern "yyyy-MM-dd HH:mm:ss" */ public static String formatDateTime(final long timestampMs) { return format(timestampMs, DATE_TIME_PATTERN); } private static String format(final long timestampMs, final String pattern) { if (timestampMs < 0) { return "-"; } final DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern); final ZonedDateTime zonedDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(timestampMs), ZoneId.systemDefault()); return formatter.format(zonedDateTime); } /** * Takes a date string formatted as "yyyy-MM-dd HH:mm:ss" and converts it into milliseconds * since the Epoch in UTC */ public static long convertDateTimeToUTCMillis(final String dateTime) { final DateTimeFormatter formatter = DateTimeFormatter.ofPattern(DATE_TIME_PATTERN); final LocalDateTime parsedDate = LocalDateTime.parse(dateTime, formatter); return parsedDate.atZone(ZoneOffset.UTC).toInstant().toEpochMilli(); } /** * Format time period pair to Duration String * * @param startTime start time * @param endTime end time * @return Duration String */ public static String formatDuration(final long startTime, final long endTime) { if (startTime == -1) { return "-"; } final long durationMS; if (endTime == -1) { durationMS = System.currentTimeMillis() - startTime; } else { durationMS = endTime - startTime; } long seconds = durationMS / 1000; if (seconds < 60) { return seconds + " sec"; } long minutes = seconds / 60; seconds %= 60; if (minutes < 60) { return minutes + "m " + seconds + "s"; } long hours = minutes / 60; minutes %= 60; if (hours < 24) { return hours + "h " + minutes + "m " + seconds + "s"; } final long days = hours / 24; hours %= 24; return days + "d " + hours + "h " + minutes + "m"; } /** * Format ReadablePeriod object to string * * @param period readable period object * @return String presentation of ReadablePeriod Object */ public static String formatPeriod(final ReadablePeriod period) { String periodStr = "null"; if (period == null) { return periodStr; } if (period.get(DurationFieldType.years()) > 0) { final int years = period.get(DurationFieldType.years()); periodStr = years + " year(s)"; } else if (period.get(DurationFieldType.months()) > 0) { final int months = period.get(DurationFieldType.months()); periodStr = months + " month(s)"; } else if (period.get(DurationFieldType.weeks()) > 0) { final int weeks = period.get(DurationFieldType.weeks()); periodStr = weeks + " week(s)"; } else if (period.get(DurationFieldType.days()) > 0) { final int days = period.get(DurationFieldType.days()); periodStr = days + " day(s)"; } else if (period.get(DurationFieldType.hours()) > 0) { final int hours = period.get(DurationFieldType.hours()); periodStr = hours + " hour(s)"; } else if (period.get(DurationFieldType.minutes()) > 0) { final int minutes = period.get(DurationFieldType.minutes()); periodStr = minutes + " minute(s)"; } else if (period.get(DurationFieldType.seconds()) > 0) { final int seconds = period.get(DurationFieldType.seconds()); periodStr = seconds + " second(s)"; } return periodStr; } /** * Parse Period String to a ReadablePeriod Object * * @param periodStr string formatted period * @return ReadablePeriod Object */ public static ReadablePeriod parsePeriodString(final String periodStr) { final ReadablePeriod period; final char periodUnit = periodStr.charAt(periodStr.length() - 1); if (periodStr.equals("null") || periodUnit == 'n') { return null; } final int periodInt = Integer.parseInt(periodStr.substring(0, periodStr.length() - 1)); switch (periodUnit) { case 'y': period = Years.years(periodInt); break; case 'M': period = Months.months(periodInt); break; case 'w': period = Weeks.weeks(periodInt); break; case 'd': period = Days.days(periodInt); break; case 'h': period = Hours.hours(periodInt); break; case 'm': period = Minutes.minutes(periodInt); break; case 's': period = Seconds.seconds(periodInt); break; default: throw new IllegalArgumentException("Invalid schedule period unit '" + periodUnit); } return period; } /** * Convert ReadablePeriod Object to string * * @param period ReadablePeriod Object * @return string formatted ReadablePeriod Object */ public static String createPeriodString(final ReadablePeriod period) { String periodStr = "null"; if (period == null) { return periodStr; } if (period.get(DurationFieldType.years()) > 0) { final int years = period.get(DurationFieldType.years()); periodStr = years + "y"; } else if (period.get(DurationFieldType.months()) > 0) { final int months = period.get(DurationFieldType.months()); periodStr = months + "M"; } else if (period.get(DurationFieldType.weeks()) > 0) { final int weeks = period.get(DurationFieldType.weeks()); periodStr = weeks + "w"; } else if (period.get(DurationFieldType.days()) > 0) { final int days = period.get(DurationFieldType.days()); periodStr = days + "d"; } else if (period.get(DurationFieldType.hours()) > 0) { final int hours = period.get(DurationFieldType.hours()); periodStr = hours + "h"; } else if (period.get(DurationFieldType.minutes()) > 0) { final int minutes = period.get(DurationFieldType.minutes()); periodStr = minutes + "m"; } else if (period.get(DurationFieldType.seconds()) > 0) { final int seconds = period.get(DurationFieldType.seconds()); periodStr = seconds + "s"; } return periodStr; } /** * Check the time escaped over n seconds * * @param referenceTime reference time * @param second number of seconds * @return true when the time escaped more than n seconds */ public static boolean timeEscapedOver(long referenceTime, int second) { return ((System.currentTimeMillis() - referenceTime) / 1000F) > (second * 1.0); } /** * Check how many days escaped over * @param referenceTime reference time * @return number of days */ public static int daysEscapedOver(long referenceTime) { return Math.round(((System.currentTimeMillis() - referenceTime) / 1000f) / (ONE_DAY * 1.0f) - 0.5f); } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/UndefinedPropertyException.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; /** * Indicates that a required property is missing from the Props */ public class UndefinedPropertyException extends RuntimeException { private static final long serialVersionUID = 1; public UndefinedPropertyException(final String message) { super(message); } }
0
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban
java-sources/ai/databand/azkaban/az-core/3.90.0/azkaban/utils/Utils.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.utils; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; import java.text.ParseException; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.TimeZone; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import java.util.zip.ZipOutputStream; import org.apache.commons.io.IOUtils; import org.apache.log4j.Logger; import org.joda.time.DateTimeZone; import org.quartz.CronExpression; /** * A util helper class full of static methods that are commonly used. */ public class Utils { private static final Random RANDOM = new Random(); private static final Logger logger = Logger.getLogger(Utils.class); /** * Private constructor. */ private Utils() { } /** * Equivalent to Object.equals except that it handles nulls. If a and b are both null, true is * returned. */ public static boolean equals(final Object a, final Object b) { if (a == null || b == null) { return a == b; } return a.equals(b); } /** * Return the object if it is non-null, otherwise throw an exception * * @param <T> The type of the object * @param t The object * @return The object if it is not null * @throws IllegalArgumentException if the object is null */ public static <T> T nonNull(final T t) { if (t == null) { throw new IllegalArgumentException("Null value not allowed."); } else { return t; } } public static File findFilefromDir(final File dir, final String fn) { if (dir.isDirectory()) { for (final File f : dir.listFiles()) { if (f.getName().equals(fn)) { return f; } } } return null; } /** * Return the value itself if it is non-null, otherwise return the default value * * @param value The object * @param defaultValue default value if object == null * @param <T> The type of the object * @return The object itself or default value when it is null */ public static <T> T ifNull(final T value, final T defaultValue) { return (value == null) ? defaultValue : value; } /** * Print the message and then exit with the given exit code * * @param message The message to print * @param exitCode The exit code */ public static void croak(final String message, final int exitCode) { System.err.println(message); System.exit(exitCode); } /** * Tests whether a port is valid or not * * @return true, if port is valid */ public static boolean isValidPort(final int port) { if (port >= 1 && port <= 65535) { return true; } return false; } public static File createTempDir() { return createTempDir(new File(System.getProperty("java.io.tmpdir"))); } public static File createTempDir(final File parent) { final File temp = new File(parent, Integer.toString(Math.abs(RANDOM.nextInt()) % 100000000)); temp.delete(); temp.mkdir(); temp.deleteOnExit(); return temp; } public static void zip(final File input, final File output) throws IOException { final FileOutputStream out = new FileOutputStream(output); final ZipOutputStream zOut = new ZipOutputStream(out); try { zipFile("", input, zOut); } finally { zOut.close(); } } public static void zipFolderContent(final File folder, final File output) throws IOException { final FileOutputStream out = new FileOutputStream(output); final ZipOutputStream zOut = new ZipOutputStream(out); try { final File[] files = folder.listFiles(); if (files != null) { for (final File f : files) { zipFile("", f, zOut); } } } finally { zOut.close(); } } private static void zipFile(final String path, final File input, final ZipOutputStream zOut) throws IOException { if (input.isDirectory()) { final File[] files = input.listFiles(); if (files != null) { for (final File f : files) { final String childPath = path + input.getName() + (f.isDirectory() ? File.separator : ""); zipFile(childPath, f, zOut); } } } else { final String childPath = path + (path.length() > 0 ? "/" : "") + input.getName(); final ZipEntry entry = new ZipEntry(childPath); zOut.putNextEntry(entry); final InputStream fileInputStream = new BufferedInputStream(new FileInputStream(input)); try { IOUtils.copy(fileInputStream, zOut); } finally { fileInputStream.close(); } } } public static void unzip(final ZipFile source, final File dest) throws IOException { final Enumeration<?> entries = source.entries(); while (entries.hasMoreElements()) { final ZipEntry entry = (ZipEntry) entries.nextElement(); final File newFile = new File(dest, entry.getName()); if (!newFile.getCanonicalPath().startsWith(dest.getCanonicalPath())) { throw new IOException( "Extracting zip entry would have resulted in a file outside the specified destination" + " directory."); } if (entry.isDirectory()) { newFile.mkdirs(); } else { newFile.getParentFile().mkdirs(); final InputStream src = source.getInputStream(entry); try { final OutputStream output = new BufferedOutputStream(new FileOutputStream(newFile)); try { IOUtils.copy(src, output); } finally { output.close(); } } finally { src.close(); } } } } public static String flattenToString(final Collection<?> collection, final String delimiter) { final StringBuffer buffer = new StringBuffer(); for (final Object obj : collection) { buffer.append(obj.toString()); buffer.append(delimiter); } if (buffer.length() > 0) { buffer.setLength(buffer.length() - 1); } return buffer.toString(); } public static Double convertToDouble(final Object obj) { if (obj instanceof String) { return Double.parseDouble((String) obj); } return (Double) obj; } /** * Get the root cause of the Exception * * @param e The Exception * @return The root cause of the Exception */ private static RuntimeException getCause(final InvocationTargetException e) { final Throwable cause = e.getCause(); if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else { throw new IllegalStateException(e.getCause()); } } /** * Construct a class object with the given arguments * * @param cls The class * @param args The arguments * @return Constructed Object */ public static Object callConstructor(final Class<?> cls, final Object... args) { return callConstructor(cls, getTypes(args), args); } /** * Get the Class of all the objects * * @param args The objects to get the Classes from * @return The classes as an array */ private static Class<?>[] getTypes(final Object... args) { final Class<?>[] argTypes = new Class<?>[args.length]; for (int i = 0; i < argTypes.length; i++) { argTypes[i] = args[i].getClass(); } return argTypes; } /** * Call the class constructor with the given arguments * * @param cls The class * @param args The arguments * @return The constructed object */ private static Object callConstructor(final Class<?> cls, final Class<?>[] argTypes, final Object[] args) { try { final Constructor<?> cons = cls.getConstructor(argTypes); return cons.newInstance(args); } catch (final InvocationTargetException e) { throw getCause(e); } catch (final IllegalAccessException | NoSuchMethodException | InstantiationException e) { throw new IllegalStateException(e); } } public static Object invokeStaticMethod(final ClassLoader loader, final String className, final String methodName, final Object... args) throws ClassNotFoundException, SecurityException, NoSuchMethodException, IllegalArgumentException, IllegalAccessException, InvocationTargetException { final Class<?> clazz = loader.loadClass(className); final Class<?>[] argTypes = new Class[args.length]; for (int i = 0; i < args.length; ++i) { // argTypes[i] = args[i].getClass(); argTypes[i] = args[i].getClass(); } final Method method = clazz.getDeclaredMethod(methodName, argTypes); return method.invoke(null, args); } public static void copyStream(final InputStream input, final OutputStream output) throws IOException { final byte[] buffer = new byte[1024]; int bytesRead; while ((bytesRead = input.read(buffer)) != -1) { output.write(buffer, 0, bytesRead); } } /** * @param strMemSize : memory string in the format such as 1G, 500M, 3000K, 5000 * @return : long value of memory amount in kb */ public static long parseMemString(final String strMemSize) { if (strMemSize == null) { return 0L; } final long size; if (strMemSize.endsWith("g") || strMemSize.endsWith("G") || strMemSize.endsWith("m") || strMemSize.endsWith("M") || strMemSize.endsWith("k") || strMemSize.endsWith("K")) { final String strSize = strMemSize.substring(0, strMemSize.length() - 1); size = Long.parseLong(strSize); } else { size = Long.parseLong(strMemSize); } final long sizeInKb; if (strMemSize.endsWith("g") || strMemSize.endsWith("G")) { sizeInKb = size * 1024L * 1024L; } else if (strMemSize.endsWith("m") || strMemSize.endsWith("M")) { sizeInKb = size * 1024L; } else if (strMemSize.endsWith("k") || strMemSize.endsWith("K")) { sizeInKb = size; } else { sizeInKb = size / 1024L; } return sizeInKb; } /** * @param cronExpression: A cron expression is a string separated by white space, to provide a * parser and evaluator for Quartz cron expressions. * @return : org.quartz.CronExpression object. * * TODO: Currently, we have to transform Joda Timezone to Java Timezone due to CronExpression. * Since Java8 enhanced Time functionalities, We consider transform all Jodatime to Java Time in * future. */ public static CronExpression parseCronExpression(final String cronExpression, final DateTimeZone timezone) { if (cronExpression != null) { try { final CronExpression ce = new CronExpression(cronExpression); ce.setTimeZone(TimeZone.getTimeZone(timezone.getID())); return ce; } catch (final ParseException pe) { logger.error("this cron expression {" + cronExpression + "} can not be parsed. " + "Please Check Quartz Cron Syntax."); } return null; } else { return null; } } /** * @return if the cronExpression is valid or not. */ public static boolean isCronExpressionValid(final String cronExpression, final DateTimeZone timezone) { if (!CronExpression.isValidExpression(cronExpression)) { return false; } /* * The below code is aimed at checking some cases that the above code can not identify, * e.g. <0 0 3 ? * * 22> OR <0 0 3 ? * 8>. Under these cases, the below code is able to tell. */ final CronExpression cronExecutionTime = parseCronExpression(cronExpression, timezone); return (!(cronExecutionTime == null || cronExecutionTime.getNextValidTimeAfter(new Date()) == null)); } /** * Run a sequence of commands * * @param commands sequence of commands * @return list of output result */ public static ArrayList<String> runProcess(String... commands) throws InterruptedException, IOException { final java.lang.ProcessBuilder processBuilder = new java.lang.ProcessBuilder(commands); final ArrayList<String> output = new ArrayList<>(); final Process process = processBuilder.start(); process.waitFor(); final InputStream inputStream = process.getInputStream(); try { final java.io.BufferedReader reader = new java.io.BufferedReader( new InputStreamReader(inputStream, StandardCharsets.UTF_8)); String line; while ((line = reader.readLine()) != null) { output.add(line); } } finally { inputStream.close(); } return output; } /** * Merge the absolute paths of source paths into the list of destination paths * * @param destinationPaths the path list which the source paths will be merged into * @param sourcePaths source paths * @param rootPath defined root path for source paths when they are not absolute path */ public static void mergeTypeClassPaths( List<String> destinationPaths, final List<String> sourcePaths, final String rootPath) { if (sourcePaths != null) { for (String jar : sourcePaths) { File file = new File(jar); if (!file.isAbsolute()) { file = new File(rootPath + File.separatorChar + jar); } String path = file.getAbsolutePath(); if (!destinationPaths.contains(path)) { destinationPaths.add(path); } } } } /** * Merge elements in Source List into the Destination List * * @param destinationList the list which the source elements will be merged into * @param sourceList source List */ public static void mergeStringList( final List<String> destinationList, final List<String> sourceList) { if (sourceList != null) { for (String item : sourceList) { if (!destinationList.contains(item)) { destinationList.add(item); } } } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/AzkabanCommonModule.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package azkaban; import azkaban.Constants.ConfigurationKeys; import azkaban.db.AzkabanDataSource; import azkaban.db.H2FileDataSource; import azkaban.db.MySQLDataSource; import azkaban.executor.ExecutorLoader; import azkaban.executor.JdbcExecutorLoader; import azkaban.project.JdbcProjectImpl; import azkaban.project.ProjectLoader; import azkaban.spi.Storage; import azkaban.spi.StorageException; import azkaban.storage.StorageImplementationType; import azkaban.trigger.JdbcTriggerImpl; import azkaban.trigger.TriggerLoader; import azkaban.utils.OsCpuUtil; import azkaban.utils.Props; import com.google.inject.AbstractModule; import com.google.inject.Provides; import org.apache.commons.dbutils.QueryRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This Guice module is currently a one place container for all bindings in the current module. This * is intended to help during the migration process to Guice. Once this class starts growing we can * move towards more modular structuring of Guice components. */ public class AzkabanCommonModule extends AbstractModule { private static final Logger log = LoggerFactory.getLogger(AzkabanCommonModule.class); private final Props props; private final AzkabanCommonModuleConfig config; public AzkabanCommonModule(final Props props) { this.props = props; this.config = new AzkabanCommonModuleConfig(props); } @Override protected void configure() { install(new AzkabanCoreModule(this.props)); bind(Storage.class).to(resolveStorageClassType()); bind(AzkabanDataSource.class).to(resolveDataSourceType()); bind(TriggerLoader.class).to(JdbcTriggerImpl.class); bind(ProjectLoader.class).to(JdbcProjectImpl.class); bind(ExecutorLoader.class).to(JdbcExecutorLoader.class); bind(OsCpuUtil.class).toProvider(() -> { final int cpuLoadPeriodSec = this.props .getInt(ConfigurationKeys.AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC, Constants.DEFAULT_AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC); final int pollingIntervalMs = this.props .getInt(ConfigurationKeys.AZKABAN_POLLING_INTERVAL_MS, Constants.DEFAULT_AZKABAN_POLLING_INTERVAL_MS); return new OsCpuUtil(Math.max(1, (cpuLoadPeriodSec * 1000) / pollingIntervalMs)); }); } public Class<? extends Storage> resolveStorageClassType() { final StorageImplementationType type = StorageImplementationType .from(this.config.getStorageImplementation()); if (type == StorageImplementationType.HDFS || type == StorageImplementationType.LOCAL_HADOOP) { install(new HadoopModule(this.props)); } if (type != null) { return type.getImplementationClass(); } else { return loadCustomStorageClass(this.config.getStorageImplementation()); } } private Class<? extends Storage> loadCustomStorageClass(final String storageImplementation) { try { return (Class<? extends Storage>) Class.forName(storageImplementation); } catch (final ClassNotFoundException e) { throw new StorageException(e); } } private Class<? extends AzkabanDataSource> resolveDataSourceType() { final String databaseType = this.props.getString("database.type"); if (databaseType.equals("h2")) { return H2FileDataSource.class; } else { return MySQLDataSource.class; } } @Provides public QueryRunner createQueryRunner(final AzkabanDataSource dataSource) { return new QueryRunner(dataSource); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/AzkabanCommonModuleConfig.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package azkaban; import static azkaban.Constants.ConfigurationKeys.*; import static azkaban.storage.StorageImplementationType.DATABASE; import azkaban.storage.StorageImplementationType; import azkaban.utils.Props; import javax.inject.Inject; import java.net.URI; import org.apache.log4j.Logger; public class AzkabanCommonModuleConfig { private static final Logger log = Logger.getLogger(AzkabanCommonModuleConfig.class); private final Props props; private final URI hdfsProjectRootUri; private final URI cacheDependencyRootUri; private final URI originDependencyRootUri; private final boolean dependencyCachingEnabled; /** * Storage Implementation This can be any of the {@link StorageImplementationType} values in which * case {@link StorageFactory} will create the appropriate storage instance. Or one can feed in a * custom implementation class using the full qualified path required by a classloader. * * examples: LOCAL, DATABASE, azkaban.storage.MyFavStorage */ private String storageImplementation = DATABASE.name(); private String localStorageBaseDirPath = "./local/storage"; @Inject public AzkabanCommonModuleConfig(final Props props) { this.props = props; this.storageImplementation = props.getString(AZKABAN_STORAGE_TYPE, this.storageImplementation); this.localStorageBaseDirPath = props.getString(AZKABAN_STORAGE_LOCAL_BASEDIR, this.localStorageBaseDirPath); this.hdfsProjectRootUri = props.getUri(AZKABAN_STORAGE_HDFS_PROJECT_ROOT_URI, null, true); this.cacheDependencyRootUri = props.getUri(AZKABAN_STORAGE_CACHE_DEPENDENCY_ROOT_URI, null, true); this.originDependencyRootUri = props.getUri(AZKABAN_STORAGE_ORIGIN_DEPENDENCY_ROOT_URI, null, true); this.dependencyCachingEnabled = props.getBoolean(AZKABAN_STORAGE_CACHE_DEPENDENCY_ENABLED, true); } public Props getProps() { return this.props; } public String getStorageImplementation() { return this.storageImplementation; } public String getLocalStorageBaseDirPath() { return this.localStorageBaseDirPath; } public URI getHdfsProjectRootUri() { return this.hdfsProjectRootUri; } public URI getCacheDependencyRootUri() { return this.cacheDependencyRootUri; } public URI getOriginDependencyRootUri() { return this.originDependencyRootUri; } public boolean getDependencyCachingEnabled() { return this.dependencyCachingEnabled; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/HadoopModule.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package azkaban; import static azkaban.Constants.ConfigurationKeys.HADOOP_CONF_DIR_PATH; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; import azkaban.cachedhttpfilesystem.CachedHttpFileSystem; import azkaban.spi.AzkabanException; import azkaban.storage.HdfsAuth; import azkaban.utils.Props; import com.google.inject.AbstractModule; import com.google.inject.Provides; import java.io.File; import java.io.IOException; import java.net.URI; import javax.annotation.Nullable; import javax.inject.Inject; import javax.inject.Named; import javax.inject.Singleton; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Place Hadoop dependencies in this module. Since Hadoop is not included in the Azkaban Runtime * dependency, we only install this module when Hadoop related injection (e.g., HDFS storage) is * needed. */ public class HadoopModule extends AbstractModule { private static final String CHTTP_SCHEME = "chttp"; private static final String LOCAL_SCHEME = "file"; private static final String HDFS_SCHEME = "hdfs"; private static final Logger log = LoggerFactory.getLogger(HadoopModule.class); private final Props props; HadoopModule(final Props props) { this.props = props; } @Inject @Provides @Singleton @Named("hdfsConf") public Configuration createHDFSConfiguration() { final String hadoopConfDirPath = requireNonNull(this.props.get(HADOOP_CONF_DIR_PATH)); final File hadoopConfDir = new File(requireNonNull(hadoopConfDirPath)); checkArgument(hadoopConfDir.exists() && hadoopConfDir.isDirectory()); final Configuration conf = new Configuration(false); conf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "core-site.xml")); conf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "hdfs-site.xml")); conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName()); return conf; } @Inject @Provides @Singleton @Named("httpConf") public Configuration createHTTPConfiguration(final AzkabanCommonModuleConfig azConfig) { // NOTE (for the future): If we want to permanently remove the caching layer and simply pull dependencies // directly from the HTTP origin, swap out CachedHttpFileSystem for Hadoop's native HttpFileSystem // by editing this configuration here. In addition it will no longer be necessary to have two // separate createHDFSCachedHttpFileSystem and createLocalCachedHttpFileSystem methods but we can // just have one method createHttpFileSystem that creates one uncached HttpFileSystem. LocalHadoopStorage // and HdfsStorage constructors can be updated to inject the same HttpFileSystem instead of different versions // like we do right now: the locally cached version for LocalHadoopStorage and the hdfs cached version // for HdfsStorage. final Configuration conf = new Configuration(false); conf.set("fs.chttp.impl", azkaban.cachedhttpfilesystem.CachedHttpFileSystem.class.getName()); boolean cachingEnabled = azConfig.getDependencyCachingEnabled(); if (cachingEnabled) { // If caching is not disabled BUT the cache dependency root URI is not specified, return null // for this configuration (indicating this configuration cannot be generated - thin archives // should be disabled) if (azConfig.getCacheDependencyRootUri() == null) { return null; } // If caching is enabled, tell the CachedHttpFileSystem where to cache its files conf.set(CachedHttpFileSystem.CACHE_ROOT_URI, azConfig.getCacheDependencyRootUri().toString()); } else { // If caching is disabled, tell the CachedHttpFileSystem to disable caching conf.set(CachedHttpFileSystem.CACHE_ENABLED_FLAG, "false"); } return conf; } @Inject @Provides @Singleton @Named("localConf") public Configuration createLocalConfiguration() { final Configuration conf = new Configuration(false); conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName()); return conf; } @Inject @Provides @Singleton @Named("hdfsFS") public FileSystem createHDFSFileSystem(@Named("hdfsConf") final Configuration hdfsConf, final HdfsAuth auth) { try { auth.authorize(); return FileSystem.get(hdfsConf); } catch (final IOException e) { log.error("Unable to initialize HDFS FileSystem.", e); throw new AzkabanException(e); } } @Inject @Provides @Singleton @Named("hdfs_cached_httpFS") public FileSystem createHDFSCachedHttpFileSystem(@Named("hdfsConf") final Configuration hdfsConf, @Named("httpConf") @Nullable final Configuration httpConf, final HdfsAuth auth, final AzkabanCommonModuleConfig azConfig) { if (httpConf == null) { return null; } // If caching is enabled, ensure the URI where cached files will be stored has the correct scheme // and has an authority. if (azConfig.getDependencyCachingEnabled()) { validateURI(azConfig.getCacheDependencyRootUri(), HDFS_SCHEME, true); } final Configuration finalConf = new Configuration(false); finalConf.addResource(hdfsConf); finalConf.addResource(httpConf); auth.authorize(); return getCachedHttpFileSystem(finalConf, azConfig); } @Inject @Provides @Singleton @Named("local_cached_httpFS") public FileSystem createLocalCachedHttpFileSystem(@Named("localConf") final Configuration localConf, @Named("httpConf") @Nullable final Configuration httpConf, final AzkabanCommonModuleConfig azConfig) { if (httpConf == null) { return null; } // If caching is enabled, ensure the URI where cached files will be stored has the correct scheme. if (azConfig.getDependencyCachingEnabled()) { validateURI(azConfig.getCacheDependencyRootUri(), LOCAL_SCHEME, false); } final Configuration finalConf = new Configuration(false); finalConf.addResource(localConf); finalConf.addResource(httpConf); return getCachedHttpFileSystem(finalConf, azConfig); } private static FileSystem getCachedHttpFileSystem(final Configuration conf, final AzkabanCommonModuleConfig azConfig) { // Ensure the necessary props are not specified to enable CachedHttpFileSystem if (azConfig.getOriginDependencyRootUri() == null) { return null; } // Ensure the origin URI has the correct scheme and has an authority. validateURI(azConfig.getOriginDependencyRootUri(), CHTTP_SCHEME, true); try { return FileSystem.get(azConfig.getOriginDependencyRootUri(), conf); } catch (final IOException e) { log.error("Unable to initialize CachedHttpFileSystem.", e); throw new AzkabanException(e); } } /** * Ensure a URI is valid for a given scheme and contains an authority (if required). */ private static void validateURI(final URI uri, final String scheme, final boolean mustHaveAuthority) { if (mustHaveAuthority) { requireNonNull(uri.getAuthority(), "URI must have host:port mentioned."); } checkArgument(scheme.equals(uri.getScheme())); } @Override protected void configure() { } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/ServiceProvider.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package azkaban; import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; import com.google.inject.Injector; /** * The {@link ServiceProvider} class is an interface to fetch any external dependency. Under the * hood it simply maintains a Guice {@link Injector} which is used to fetch the required service * type. The current direction of utilization of Guice is to gradually move classes into the Guice * scope so that Guice can automatically resolve dependencies and provide the required services * directly. */ @SuppressWarnings("ImmutableEnumChecker") public enum ServiceProvider { SERVICE_PROVIDER; private Injector injector = null; /** * Ensure that injector is set only once! * * @param injector Guice injector is itself used for providing services. */ public synchronized void setInjector(final Injector injector) { checkState(this.injector == null, "Injector is already set"); this.injector = requireNonNull(injector, "arg injector is null"); } public synchronized void unsetInjector() { this.injector = null; } public <T> T getInstance(final Class<T> clazz) { return requireNonNull(this.injector).getInstance(clazz); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/alert/Alerter.java
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.alert; import azkaban.executor.ExecutableFlow; import azkaban.executor.Executor; import azkaban.executor.ExecutorManagerException; import azkaban.sla.SlaOption; import java.util.List; public interface Alerter { void alertOnSuccess(ExecutableFlow exflow) throws Exception; void alertOnError(ExecutableFlow exflow, String... extraReasons) throws Exception; void alertOnFirstError(ExecutableFlow exflow) throws Exception; void alertOnSla(SlaOption slaOption, String slaMessage) throws Exception; void alertOnFailedUpdate(Executor executor, List<ExecutableFlow> executions, ExecutorManagerException e); void alertOnFailedExecutorHealthCheck(Executor executor, List<ExecutableFlow> executions, ExecutorManagerException e, List<String> alertEmails); }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/database/AzkabanDataSource.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.database; import org.apache.commons.dbcp2.BasicDataSource; public abstract class AzkabanDataSource extends BasicDataSource { public abstract boolean allowsOnDuplicateKey(); public abstract String getDBType(); }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/database/AzkabanDatabaseSetup.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.database; import azkaban.database.DataSourceUtils.PropertyType; import azkaban.utils.FileIOUtils; import azkaban.utils.Props; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.dbutils.DbUtils; import org.apache.commons.dbutils.QueryRunner; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.commons.io.IOUtils; import org.apache.log4j.Logger; /** * @deprecated in favor of {@link azkaban.db.DatabaseSetup}. */ @Deprecated public class AzkabanDatabaseSetup { public static final String DATABASE_CHECK_VERSION = "database.check.version"; public static final String DATABASE_AUTO_UPDATE_TABLES = "database.auto.update.tables"; public static final String DATABASE_SQL_SCRIPT_DIR = "database.sql.scripts.dir"; private static final Logger logger = Logger .getLogger(AzkabanDatabaseSetup.class); private static final String DEFAULT_SCRIPT_PATH = "sql"; private static final String CREATE_SCRIPT_PREFIX = "create."; private static final String UPDATE_SCRIPT_PREFIX = "update."; private static final String SQL_SCRIPT_SUFFIX = ".sql"; private static final String FETCH_PROPERTY_BY_TYPE = "SELECT name, value FROM properties WHERE type=?"; private static final String INSERT_DB_PROPERTY = "INSERT INTO properties (name, type, value, modified_time) values (?,?,?,?)"; private static final String UPDATE_DB_PROPERTY = "UPDATE properties SET value=?,modified_time=? WHERE name=? AND type=?"; private final AzkabanDataSource dataSource; private Map<String, String> tables; private Map<String, String> installedVersions; private Set<String> missingTables; private Map<String, List<String>> upgradeList; private String version; private boolean needsUpdating; private String scriptPath = null; public AzkabanDatabaseSetup(final Props props) { this(DataSourceUtils.getDataSource(props)); this.scriptPath = props.getString(DATABASE_SQL_SCRIPT_DIR, DEFAULT_SCRIPT_PATH); } public AzkabanDatabaseSetup(final AzkabanDataSource ds) { this.dataSource = ds; if (this.scriptPath == null) { this.scriptPath = DEFAULT_SCRIPT_PATH; } } // TODO kunkun-tang: Refactor this class. loadTableInfo method should sit inside constructor public AzkabanDatabaseSetup(final AzkabanDataSource ds, final Props props) { this.dataSource = ds; this.scriptPath = props.getString(DATABASE_SQL_SCRIPT_DIR, DEFAULT_SCRIPT_PATH); } public void loadTableInfo() throws IOException, SQLException { this.tables = new HashMap<>(); this.installedVersions = new HashMap<>(); this.missingTables = new HashSet<>(); this.upgradeList = new HashMap<>(); final Props dbProps = loadDBProps(); this.version = dbProps.getString("version"); loadInstalledTables(); loadTableVersion(); findMissingTables(); findOutOfDateTables(); this.needsUpdating = !this.upgradeList.isEmpty() || !this.missingTables.isEmpty(); } public boolean needsUpdating() { if (this.version == null) { throw new RuntimeException("Uninitialized. Call loadTableInfo first."); } return this.needsUpdating; } public void printUpgradePlan() { if (!this.tables.isEmpty()) { logger.info("The following are installed tables"); for (final Map.Entry<String, String> installedTable : this.tables.entrySet()) { logger.info(" " + installedTable.getKey() + " version:" + installedTable.getValue()); } } else { logger.info("No installed tables found."); } if (!this.missingTables.isEmpty()) { logger.info("The following are missing tables that need to be installed"); for (final String table : this.missingTables) { logger.info(" " + table); } } else { logger.info("There are no missing tables."); } if (!this.upgradeList.isEmpty()) { logger.info("The following tables need to be updated."); for (final Map.Entry<String, List<String>> upgradeTable : this.upgradeList .entrySet()) { String tableInfo = " " + upgradeTable.getKey() + " versions:"; for (final String upVersion : upgradeTable.getValue()) { tableInfo += upVersion + ","; } logger.info(tableInfo); } } else { logger.info("No tables need to be updated."); } } public void updateDatabase(final boolean createTable, final boolean updateTable) throws SQLException, IOException { // We call this because it has an unitialize check. if (!needsUpdating()) { logger.info("Nothing to be done."); return; } if (createTable && !this.missingTables.isEmpty()) { createNewTables(); } if (updateTable && !this.upgradeList.isEmpty()) { updateTables(); } } private Props loadDBProps() throws IOException { final File dbPropsFile = new File(this.scriptPath, "database.properties"); if (!dbPropsFile.exists()) { throw new IOException( "Cannot find 'database.properties' file in " + dbPropsFile.getAbsolutePath()); } return new Props(null, dbPropsFile); } private void loadTableVersion() throws SQLException { logger.info("Searching for table versions in the properties table"); if (this.tables.containsKey("properties")) { // Load version from settings final QueryRunner runner = new QueryRunner(this.dataSource); final Map<String, String> map = runner.query(FETCH_PROPERTY_BY_TYPE, new PropertiesHandler(), PropertyType.DB.getNumVal()); for (final String key : map.keySet()) { final String value = map.get(key); if (key.endsWith(".version")) { final String tableName = key.substring(0, key.length() - ".version".length()); this.installedVersions.put(tableName, value); if (this.tables.containsKey(tableName)) { this.tables.put(tableName, value); } } } } else { logger.info("Properties table doesn't exist."); } } private void loadInstalledTables() throws SQLException { logger.info("Searching for installed tables"); Connection conn = null; try { conn = this.dataSource.getConnection(); final ResultSet rs = conn.getMetaData().getTables(conn.getCatalog(), null, null, new String[]{"TABLE"}); while (rs.next()) { this.tables.put(rs.getString("TABLE_NAME").toLowerCase(), "2.1"); } } finally { DbUtils.commitAndCloseQuietly(conn); } } private void findMissingTables() { final File directory = new File(this.scriptPath); final File[] createScripts = directory.listFiles(new FileIOUtils.PrefixSuffixFileFilter( CREATE_SCRIPT_PREFIX, SQL_SCRIPT_SUFFIX)); if (createScripts != null) { for (final File script : createScripts) { final String name = script.getName(); final String[] nameSplit = name.split("\\."); final String tableName = nameSplit[1]; // TODO temporary fix for Issue #1569: // "Startup fails: missing tables that need to be installed: quartz-tables-all" // this doesn't work because the file actually contains multiple tables and the file name // pattern doesn't match with any of those. Until this new file the convention has been that // each file has a single table and the file name matches the table name. if ("quartz-tables-all".equals(tableName)) { continue; } if (!this.tables.containsKey(tableName)) { this.missingTables.add(tableName); } } } } private void findOutOfDateTables() { for (final String key : this.tables.keySet()) { final String version = this.tables.get(key); final List<String> upgradeVersions = findOutOfDateTable(key, version); if (upgradeVersions != null && !upgradeVersions.isEmpty()) { this.upgradeList.put(key, upgradeVersions); } } for (final String key : this.missingTables) { final List<String> upgradeVersions = findOutOfDateTable(key, ""); if (upgradeVersions != null && !upgradeVersions.isEmpty()) { this.upgradeList.put(key, upgradeVersions); } } } private List<String> findOutOfDateTable(final String table, final String currentVersion) { final File directory = new File(this.scriptPath); final ArrayList<String> versions = new ArrayList<>(); final File[] createScripts = directory.listFiles(new FileIOUtils.PrefixSuffixFileFilter( UPDATE_SCRIPT_PREFIX + table, SQL_SCRIPT_SUFFIX)); if (createScripts == null || createScripts.length == 0) { return null; } final String updateFileNameVersion = UPDATE_SCRIPT_PREFIX + table + "." + currentVersion; for (final File file : createScripts) { final String fileName = file.getName(); if (fileName.compareTo(updateFileNameVersion) > 0) { final String[] split = fileName.split("\\."); String updateScriptVersion = ""; for (int i = 2; i < split.length - 1; ++i) { try { Integer.parseInt(split[i]); updateScriptVersion += split[i] + "."; } catch (final NumberFormatException e) { break; } } if (updateScriptVersion.endsWith(".")) { updateScriptVersion = updateScriptVersion.substring(0, updateScriptVersion.length() - 1); // add to update list if updateScript will update above current // version and upto targetVersion in database.properties if (updateScriptVersion.compareTo(currentVersion) > 0 && updateScriptVersion.compareTo(this.version) <= 0) { versions.add(updateScriptVersion); } } } } Collections.sort(versions); return versions; } private void createNewTables() throws SQLException, IOException { final Connection conn = this.dataSource.getConnection(); conn.setAutoCommit(false); try { // Make sure that properties table is created first. if (this.missingTables.contains("properties")) { runTableScripts(conn, "properties", this.version, this.dataSource.getDBType(), false); } for (final String table : this.missingTables) { if (!table.equals("properties")) { runTableScripts(conn, table, this.version, this.dataSource.getDBType(), false); // update version as we have create a new table this.installedVersions.put(table, this.version); } } } finally { conn.close(); } } private void updateTables() throws SQLException, IOException { final Connection conn = this.dataSource.getConnection(); conn.setAutoCommit(false); try { // Make sure that properties table is created first. if (this.upgradeList.containsKey("properties")) { for (final String version : this.upgradeList.get("properties")) { runTableScripts(conn, "properties", version, this.dataSource.getDBType(), true); } } for (final String table : this.upgradeList.keySet()) { if (!table.equals("properties")) { for (final String version : this.upgradeList.get(table)) { runTableScripts(conn, table, version, this.dataSource.getDBType(), true); } } } } finally { conn.close(); } } private void runTableScripts(final Connection conn, final String table, final String version, final String dbType, final boolean update) throws IOException, SQLException { String scriptName = ""; if (update) { scriptName = "update." + table + "." + version; logger.info("Update table " + table + " to version " + version); } else { scriptName = "create." + table; logger.info("Creating new table " + table + " version " + version); } final String dbSpecificScript = scriptName + "." + dbType + ".sql"; File script = new File(this.scriptPath, dbSpecificScript); if (!script.exists()) { final String dbScript = scriptName + ".sql"; script = new File(this.scriptPath, dbScript); if (!script.exists()) { throw new IOException("Creation files do not exist for table " + table); } } BufferedInputStream buff = null; try { buff = new BufferedInputStream(new FileInputStream(script)); final String queryStr = IOUtils.toString(buff); final String[] splitQuery = queryStr.split(";\\s*\n"); final QueryRunner runner = new QueryRunner(); for (final String query : splitQuery) { runner.update(conn, query); } // If it's properties, then we want to commit the table before we update // it if (table.equals("properties")) { conn.commit(); } final String propertyName = table + ".version"; if (!this.installedVersions.containsKey(table)) { runner.update(conn, INSERT_DB_PROPERTY, propertyName, DataSourceUtils.PropertyType.DB.getNumVal(), version, System.currentTimeMillis()); } else { runner.update(conn, UPDATE_DB_PROPERTY, version, System.currentTimeMillis(), propertyName, DataSourceUtils.PropertyType.DB.getNumVal()); } conn.commit(); } finally { IOUtils.closeQuietly(buff); } } public static class PropertiesHandler implements ResultSetHandler<Map<String, String>> { @Override public Map<String, String> handle(final ResultSet rs) throws SQLException { final Map<String, String> results = new HashMap<>(); while (rs.next()) { final String key = rs.getString(1); final String value = rs.getString(2); results.put(key, value); } return results; } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/database/AzkabanDatabaseUpdater.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.database; import azkaban.server.AzkabanServer; import azkaban.utils.Props; import java.io.IOException; import java.sql.SQLException; import java.util.Arrays; import joptsimple.OptionParser; import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.apache.log4j.Logger; public class AzkabanDatabaseUpdater { private static final Logger logger = Logger .getLogger(AzkabanDatabaseUpdater.class); public static void main(final String[] args) throws Exception { final OptionParser parser = new OptionParser(); final OptionSpec<String> scriptDirectory = parser .acceptsAll(Arrays.asList("s", "script"), "Directory of update scripts.").withRequiredArg() .describedAs("script").ofType(String.class); final OptionSpec<Void> updateOption = parser.acceptsAll(Arrays.asList("u", "update"), "Will update if necessary"); final Props props = AzkabanServer.loadProps(args, parser); if (props == null) { logger.error("Properties not found. Need it to connect to the db."); logger.error("Exiting..."); return; } final OptionSet options = parser.parse(args); boolean updateDB = false; if (options.has(updateOption)) { updateDB = true; } else { logger.info("Running DatabaseUpdater in test mode"); } String scriptDir = "sql"; if (options.has(scriptDirectory)) { scriptDir = options.valueOf(scriptDirectory); } runDatabaseUpdater(props, scriptDir, updateDB); } public static void runDatabaseUpdater(final Props props, final String sqlDir, final boolean updateDB) throws IOException, SQLException { logger.info("Use scripting directory " + sqlDir); if (updateDB) { logger.info("Will auto update any changes."); } else { logger.info("Running DatabaseUpdater in test mode. Use -u to update"); } final AzkabanDatabaseSetup setup = new AzkabanDatabaseSetup(props); setup.loadTableInfo(); if (!setup.needsUpdating()) { logger.info("Everything looks up to date."); return; } logger.info("Need to update the db."); setup.printUpgradePlan(); if (updateDB) { logger.info("Updating DB"); setup.updateDatabase(true, true); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/database/DataSourceUtils.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.database; import azkaban.utils.Props; import java.nio.file.Path; import java.nio.file.Paths; import org.apache.log4j.Logger; public class DataSourceUtils { private static final Logger logger = Logger.getLogger(DataSourceUtils.class); /** * Hidden datasource */ private DataSourceUtils() { } /** * Create Datasource from parameters in the properties */ public static AzkabanDataSource getDataSource(final Props props) { final String databaseType = props.getString("database.type"); AzkabanDataSource dataSource = null; if (databaseType.equals("mysql")) { final int port = props.getInt("mysql.port"); final String host = props.getString("mysql.host"); final String database = props.getString("mysql.database"); final String user = props.getString("mysql.user"); final String password = props.getString("mysql.password"); final int numConnections = props.getInt("mysql.numconnections"); dataSource = getMySQLDataSource(host, port, database, user, password, numConnections); } else if (databaseType.equals("h2")) { final String path = props.getString("h2.path"); final Path h2DbPath = Paths.get(path).toAbsolutePath(); logger.info("h2 DB path: " + h2DbPath); dataSource = getH2DataSource(h2DbPath); } return dataSource; } /** * Create a MySQL DataSource */ public static AzkabanDataSource getMySQLDataSource(final String host, final Integer port, final String dbName, final String user, final String password, final Integer numConnections) { return new MySQLBasicDataSource(host, port, dbName, user, password, numConnections); } /** * Create H2 DataSource */ public static AzkabanDataSource getH2DataSource(final Path file) { return new EmbeddedH2BasicDataSource(file); } /** * Property types */ public static enum PropertyType { DB(1); private final int numVal; PropertyType(final int numVal) { this.numVal = numVal; } public static PropertyType fromInteger(final int x) { switch (x) { case 1: return DB; default: return DB; } } public int getNumVal() { return this.numVal; } } /** * MySQL data source based on AzkabanDataSource */ public static class MySQLBasicDataSource extends AzkabanDataSource { private final String url; private MySQLBasicDataSource(final String host, final int port, final String dbName, final String user, final String password, final int numConnections) { super(); this.url = "jdbc:mysql://" + (host + ":" + port + "/" + dbName); addConnectionProperty("useUnicode", "yes"); addConnectionProperty("characterEncoding", "UTF-8"); setDriverClassName("com.mysql.jdbc.Driver"); setUsername(user); setPassword(password); setUrl(this.url); setMaxTotal(numConnections); setValidationQuery("/* ping */ select 1"); setTestOnBorrow(true); } @Override public boolean allowsOnDuplicateKey() { return true; } @Override public String getDBType() { return "mysql"; } } /** * H2 Datasource */ public static class EmbeddedH2BasicDataSource extends AzkabanDataSource { private EmbeddedH2BasicDataSource(final Path filePath) { super(); final String url = "jdbc:h2:file:" + filePath + ";IGNORECASE=TRUE"; setDriverClassName("org.h2.Driver"); setUrl(url); } @Override public boolean allowsOnDuplicateKey() { return false; } @Override public String getDBType() { return "h2"; } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/event/Event.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.event; import azkaban.spi.EventType; import com.google.common.base.Preconditions; public class Event { private final Object runner; private final EventType type; private final EventData eventData; private final long time; private Event(final Object runner, final EventType type, final EventData eventData) { this.runner = runner; this.type = type; this.eventData = eventData; this.time = System.currentTimeMillis(); } /** * Creates a new event. * * @param runner runner. * @param type type. * @param eventData EventData, null is not allowed. * @return New Event instance. * @throws NullPointerException if EventData is null. */ public static Event create(final Object runner, final EventType type, final EventData eventData) throws NullPointerException { Preconditions.checkNotNull(eventData, "EventData was null"); return new Event(runner, type, eventData); } public Object getRunner() { return this.runner; } public EventType getType() { return this.type; } public long getTime() { return this.time; } public EventData getData() { return this.eventData; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/event/EventData.java
package azkaban.event; import azkaban.executor.ExecutableNode; import azkaban.executor.Status; /** * Carries an immutable snapshot of the status data, suitable for asynchronous message passing. */ public class EventData { private final Status status; private final String nestedId; /** * Creates a new EventData instance. * * @param status node status. * @param nestedId node id, corresponds to {@link ExecutableNode#getNestedId()}. */ public EventData(final Status status, final String nestedId) { this.status = status; this.nestedId = nestedId; } /** * Creates a new EventData instance. * * @param node node. */ public EventData(final ExecutableNode node) { this(node.getStatus(), node.getNestedId()); } public Status getStatus() { return this.status; } public String getNestedId() { return this.nestedId; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/event/EventHandler.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.event; import java.util.ArrayList; import java.util.HashSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class EventHandler { private final HashSet<EventListener> listeners = new HashSet<>(); private static final Logger logger = LoggerFactory.getLogger(EventHandler.class); public EventHandler() { } public EventHandler addListener(final EventListener listener) { this.listeners.add(listener); return this; } public EventHandler addListeners(final EventListener... listeners) { for (int i = listeners.length - 1; i >= 0; i--) { this.listeners.add(listeners[i]); } return this; } public void fireEventListeners(final Event event) { final ArrayList<EventListener> listeners = new ArrayList<>(this.listeners); for (final EventListener listener : listeners) { try { listener.handleEvent(event); } catch (RuntimeException e) { logger.warn("Error while calling handleEvent for: " + listener.getClass()); logger.warn(e.getMessage(), e); } } } public void removeListener(final EventListener listener) { this.listeners.remove(listener); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/event/EventListener.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.event; public interface EventListener { public void handleEvent(Event event); }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ActiveExecutingFlowsDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import java.sql.SQLException; import javax.inject.Inject; import javax.inject.Singleton; //TODO jamiesjc: This class is deprecated as we don't fetch active_execution_flow table any longer. // So this class should be removed onwards. @Deprecated @Singleton public class ActiveExecutingFlowsDao { private final DatabaseOperator dbOperator; @Inject ActiveExecutingFlowsDao(final DatabaseOperator dbOperator) { this.dbOperator = dbOperator; } void addActiveExecutableReference(final ExecutionReference reference) throws ExecutorManagerException { final String INSERT = "INSERT INTO active_executing_flows " + "(exec_id, update_time) values (?,?)"; try { this.dbOperator.update(INSERT, reference.getExecId(), reference.getUpdateTime()); } catch (final SQLException e) { throw new ExecutorManagerException( "Error updating active flow reference " + reference.getExecId(), e); } } void removeActiveExecutableReference(final int execId) throws ExecutorManagerException { final String DELETE = "DELETE FROM active_executing_flows WHERE exec_id=?"; try { this.dbOperator.update(DELETE, execId); } catch (final SQLException e) { throw new ExecutorManagerException( "Error deleting active flow reference " + execId, e); } } boolean updateExecutableReference(final int execId, final long updateTime) throws ExecutorManagerException { final String DELETE = "UPDATE active_executing_flows set update_time=? WHERE exec_id=?"; try { // Should be 1. return this.dbOperator.update(DELETE, updateTime, execId) > 0; } catch (final SQLException e) { throw new ExecutorManagerException( "Error deleting active flow reference " + execId, e); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ActiveExecutors.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import com.google.common.collect.ImmutableSet; import java.util.Collection; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.log4j.Logger; /** * Loads & provides executors. */ @Singleton public class ActiveExecutors { private static final Logger logger = Logger.getLogger(ExecutorManager.class); private volatile ImmutableSet<Executor> activeExecutors; private final ExecutorLoader executorLoader; @Inject public ActiveExecutors(final ExecutorLoader executorLoader) { this.executorLoader = executorLoader; } /** * Loads executors. Can be also used to reload executors if there have been changes in the DB. * * @throws ExecutorManagerException if no active executors are found or if loading executors * fails. */ public void setupExecutors() throws ExecutorManagerException { final ImmutableSet<Executor> newExecutors = loadExecutors(); if (newExecutors.isEmpty()) { final String error = "No active executors found"; logger.error(error); throw new ExecutorManagerException(error); } else { this.activeExecutors = newExecutors; } } /** * Returns all executors. The result is cached. To reload, call {@link #setupExecutors()}. * * @return all executors */ public Collection<Executor> getAll() { return this.activeExecutors; } private ImmutableSet<Executor> loadExecutors() throws ExecutorManagerException { logger.info("Initializing executors from database."); return ImmutableSet.copyOf(this.executorLoader.fetchActiveExecutors()); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/AlerterHolder.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.alert.Alerter; import azkaban.utils.Emailer; import azkaban.utils.FileIOUtils; import azkaban.utils.PluginUtils; import azkaban.utils.Props; import azkaban.utils.PropsUtils; import javax.inject.Inject; import javax.inject.Singleton; import java.io.File; import java.lang.reflect.Constructor; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.log4j.Logger; @Singleton public class AlerterHolder { private static final Logger logger = Logger.getLogger(AlerterHolder.class); private Map<String, Alerter> alerters; @Inject public AlerterHolder(final Props props, final Emailer mailAlerter) { try { this.alerters = loadAlerters(props, mailAlerter); } catch (final Exception ex) { logger.error(ex); this.alerters = new HashMap<>(); } } private Map<String, Alerter> loadAlerters(final Props props, final Emailer mailAlerter) { final Map<String, Alerter> allAlerters = new HashMap<>(); // load built-in alerters allAlerters.put("email", mailAlerter); // load all plugin alerters final String pluginDir = props.getString("alerter.plugin.dir", "plugins/alerter"); allAlerters.putAll(loadPluginAlerters(pluginDir)); return allAlerters; } private Map<String, Alerter> loadPluginAlerters(final String pluginPath) { final File alerterPluginPath = new File(pluginPath); if (!alerterPluginPath.exists()) { return Collections.<String, Alerter>emptyMap(); } final Map<String, Alerter> installedAlerterPlugins = new HashMap<>(); final ClassLoader parentLoader = getClass().getClassLoader(); final File[] pluginDirs = alerterPluginPath.listFiles(); final ArrayList<String> jarPaths = new ArrayList<>(); for (final File pluginDir : pluginDirs) { // load plugin properties final Props pluginProps = PropsUtils.loadPluginProps(pluginDir); if (pluginProps == null) { continue; } final String pluginName = pluginProps.getString("alerter.name"); final List<String> extLibClassPaths = pluginProps.getStringList("alerter.external.classpaths", (List<String>) null); final String pluginClass = pluginProps.getString("alerter.class"); if (pluginClass == null) { logger.error("Alerter class is not set."); continue; } else { logger.info("Plugin class " + pluginClass); } Class<?> alerterClass = PluginUtils.getPluginClass(pluginClass, pluginDir, extLibClassPaths, parentLoader); if (alerterClass == null) { continue; } final String source = FileIOUtils.getSourcePathFromClass(alerterClass); logger.info("Source jar " + source); jarPaths.add("jar:file:" + source); Constructor<?> constructor = null; try { constructor = alerterClass.getConstructor(Props.class); } catch (final NoSuchMethodException e) { logger.error("Constructor not found in " + pluginClass); continue; } Object obj = null; try { obj = constructor.newInstance(pluginProps); } catch (final Exception e) { logger.error(e); } if (!(obj instanceof Alerter)) { logger.error("The object is not an Alerter"); continue; } final Alerter plugin = (Alerter) obj; installedAlerterPlugins.put(pluginName, plugin); } return installedAlerterPlugins; } public Alerter get(final String alerterType) { return this.alerters.get(alerterType); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/AssignExecutorDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import java.sql.SQLException; import javax.inject.Inject; import javax.inject.Singleton; @Singleton public class AssignExecutorDao { private final ExecutorDao executorDao; private final DatabaseOperator dbOperator; @Inject public AssignExecutorDao(final DatabaseOperator dbOperator, final ExecutorDao executorDao) { this.dbOperator = dbOperator; this.executorDao = executorDao; } public void assignExecutor(final int executorId, final int executionId) throws ExecutorManagerException { final String UPDATE = "UPDATE execution_flows SET executor_id=? where exec_id=?"; try { if (this.executorDao.fetchExecutor(executorId) == null) { throw new ExecutorManagerException(String.format( "Failed to assign non-existent executor Id: %d to execution : %d ", executorId, executionId)); } if (this.dbOperator.update(UPDATE, executorId, executionId) == 0) { throw new ExecutorManagerException(String.format( "Failed to assign executor Id: %d to non-existent execution : %d ", executorId, executionId)); } } catch (final SQLException e) { throw new ExecutorManagerException("Error updating executor id " + executorId, e); } } void unassignExecutor(final int executionId) throws ExecutorManagerException { final String UPDATE = "UPDATE execution_flows SET executor_id=NULL where exec_id=?"; try { final int rows = this.dbOperator.update(UPDATE, executionId); if (rows == 0) { throw new ExecutorManagerException(String.format( "Failed to unassign executor for execution : %d ", executionId)); } } catch (final SQLException e) { throw new ExecutorManagerException("Error updating execution id " + executionId, e); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/BaseRefreshableMap.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import java.util.HashMap; import java.util.HashSet; import java.util.Set; /** * Template Base Class to be capable to refresh Map Items from source object */ public class BaseRefreshableMap<K, V extends IRefreshable> extends HashMap<K, V> implements IRefreshable<BaseRefreshableMap<K, V>> { public BaseRefreshableMap<K, V> add(K key, V ramp) { this.put(key, ramp); return this; } public BaseRefreshableMap<K, V> delete(K id) { this.remove(id); return this; } @Override public BaseRefreshableMap<K, V> refresh(BaseRefreshableMap<K, V> source) { Set<K> mergedKeys = new HashSet(); mergedKeys.addAll(this.keySet()); mergedKeys.addAll(source.keySet()); mergedKeys.stream().forEach(key -> { if (this.containsKey(key)) { if (source.containsKey(key)) { this.get(key).refresh(source.get(key)); } else { this.remove(key); } } else { this.add(key, source.get(key)); } }); return this; } @Override public BaseRefreshableMap<K, V> clone() { return (BaseRefreshableMap<K, V>) super.clone(); } @Override public int elementCount() { return this.values().stream().mapToInt(elem -> elem.elementCount()).sum(); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ConnectorParams.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; public interface ConnectorParams { public static final String EXECUTOR_ID_PARAM = "executorId"; public static final String ACTION_PARAM = "action"; public static final String EXECID_PARAM = "execid"; public static final String SHAREDTOKEN_PARAM = "token"; public static final String USER_PARAM = "user"; public static final String UPDATE_ACTION = "update"; public static final String STATUS_ACTION = "status"; public static final String EXECUTE_ACTION = "execute"; public static final String CANCEL_ACTION = "cancel"; public static final String PAUSE_ACTION = "pause"; public static final String RESUME_ACTION = "resume"; public static final String PING_ACTION = "ping"; public static final String LOG_ACTION = "log"; public static final String ATTACHMENTS_ACTION = "attachments"; public static final String METADATA_ACTION = "metadata"; public static final String RELOAD_JOBTYPE_PLUGINS_ACTION = "reloadJobTypePlugins"; public static final String ACTIVATE = "activate"; public static final String DEACTIVATE = "deactivate"; public static final String GET_STATUS = "getStatus"; public static final String SHUTDOWN = "shutdown"; public static final String MODIFY_EXECUTION_ACTION = "modifyExecution"; public static final String MODIFY_EXECUTION_ACTION_TYPE = "modifyType"; public static final String MODIFY_RETRY_FAILURES = "retryFailures"; public static final String MODIFY_JOBS_LIST = "jobIds"; public static final String START_PARAM = "start"; public static final String END_PARAM = "end"; public static final String STATUS_PARAM = "status"; public static final String NODES_PARAM = "nodes"; public static final String EXECPATH_PARAM = "execpath"; public static final String RESPONSE_NOTFOUND = "notfound"; public static final String RESPONSE_ERROR = "error"; public static final String RESPONSE_SUCCESS = "success"; public static final String RESPONSE_ALIVE = "alive"; public static final String RESPONSE_UPDATETIME = "lasttime"; public static final String RESPONSE_UPDATED_FLOWS = "updated"; public static final int NODE_NAME_INDEX = 0; public static final int NODE_STATUS_INDEX = 1; public static final int NODE_START_INDEX = 2; public static final int NODE_END_INDEX = 3; public static final String UPDATE_TIME_LIST_PARAM = "updatetime"; public static final String EXEC_ID_LIST_PARAM = "executionId"; public static final String FORCED_FAILED_MARKER = ".failed"; public static final String UPDATE_MAP_EXEC_ID = "executionId"; public static final String UPDATE_MAP_JOBID = "jobId"; public static final String UPDATE_MAP_UPDATE_TIME = "updateTime"; public static final String UPDATE_MAP_STATUS = "status"; public static final String UPDATE_MAP_START_TIME = "startTime"; public static final String UPDATE_MAP_END_TIME = "endTime"; public static final String UPDATE_MAP_NODES = "nodes"; public static final String JMX_GET_MBEANS = "getMBeans"; public static final String JMX_GET_MBEAN_INFO = "getMBeanInfo"; public static final String JMX_GET_MBEAN_ATTRIBUTE = "getAttribute"; public static final String JMX_GET_ALL_MBEAN_ATTRIBUTES = "getAllMBeanAttributes"; public static final String JMX_ATTRIBUTE = "attribute"; public static final String JMX_MBEAN = "mBean"; public static final String JMX_GET_ALL_EXECUTOR_ATTRIBUTES = "getAllExecutorAttributes"; public static final String JMX_HOSTPORT = "hostPort"; public static final String STATS_GET_ALLMETRICSNAME = "getAllMetricNames"; public static final String STATS_GET_METRICHISTORY = "getMetricHistory"; public static final String STATS_SET_REPORTINGINTERVAL = "changeMetricInterval"; public static final String STATS_SET_CLEANINGINTERVAL = "changeCleaningInterval"; public static final String STATS_SET_MAXREPORTERPOINTS = "changeEmitterPoints"; public static final String STATS_SET_ENABLEMETRICS = "enableMetrics"; public static final String STATS_SET_DISABLEMETRICS = "disableMetrics"; public static final String STATS_MAP_METRICNAMEPARAM = "metricName"; /** * useStats param is used to filter datapoints on /stats graph by using standard deviation and * means By default, we consider only top/bottom 5% datapoints */ public static final String STATS_MAP_METRICRETRIEVALMODE = "useStats"; public static final String STATS_MAP_STARTDATE = "from"; public static final String STATS_MAP_ENDDATE = "to"; public static final String STATS_MAP_REPORTINGINTERVAL = "interval"; public static final String STATS_MAP_CLEANINGINTERVAL = "interval"; public static final String STATS_MAP_EMITTERNUMINSTANCES = "numInstances"; }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/DisabledJob.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; /** * Disabled job, or disabled sub-jobs for an embedded flow. */ public class DisabledJob { static final private String SUBFLOW_ID_KEY = "id"; static final private String SUBFLOW_CHILDREN_KEY = "children"; final private String name; // name of the disabled job, or embedded flow final private ImmutableList<DisabledJob> children; // disabled sub-jobs for an embedded flow /** * Constructor. * * @param name the name of the disabled job or embedded flow. * @param children if an embedded flow, the disabled jobs for the embedded flow. null if it is * a job. */ public DisabledJob(String name, List<DisabledJob> children) { this.name = Preconditions.checkNotNull(name, "name is null"); if (children == null) { this.children = null; } else { this.children = ImmutableList.copyOf(children); } } /** * Constructor. * * @param name the name of the disabled job. */ public DisabledJob(String name) { this(name, null); } public String getName() { return this.name; } public List<DisabledJob> getChildren() { return this.children; } /** @return True if this is an embedded flow, false if it is a single job. */ public boolean isEmbeddedFlow() { return (this.children != null); } /** @return The original Object/JSON format, for {@link azkaban.sla.SlaOptionDeprecated}. */ public Object toDeprecatedObject() { if (this.children != null) { Object childrenObj = this.children .stream().map(DisabledJob::toDeprecatedObject).collect(Collectors.toList()); return ImmutableMap.of(SUBFLOW_ID_KEY, name, SUBFLOW_CHILDREN_KEY, childrenObj); } else { return this.name; } } /** * Convert a list of DisabledJobs to the original Object/JSON format, for * {@link azkaban.sla.SlaOptionDeprecated}. * * @param disabledJobs list of disabled jobs to convert. * @return List of original Object/JSON format for the jobs. */ static public List<Object> toDeprecatedObjectList(List<DisabledJob> disabledJobs) { return disabledJobs.stream().map( x -> { if (x == null) { return null; } else { return x.toDeprecatedObject(); } } ).collect(Collectors.toList()); } /** * Create a DisabledJob from the original Object/JSON format, for * {@link azkaban.sla.SlaOptionDeprecated}. * * @param obj the Object/JSON (in {@link azkaban.sla.SlaOptionDeprecated} format) value. * @return the disabled job. */ static public DisabledJob fromDeprecatedObject(Object obj) { if (obj == null) { return null; } if (obj instanceof String) { return new DisabledJob((String)obj); } else if (obj instanceof Map) { Map<String, Object> map = (Map<String, Object>)obj; String name = (String)map.get(SUBFLOW_ID_KEY); List<DisabledJob> childJobs = fromDeprecatedObjectList((List<Object>)map.get (SUBFLOW_CHILDREN_KEY)); if (name != null && childJobs != null) { return new DisabledJob(name, childJobs); } } return null; } /** * Construct a list of disabled jobs from a list of original Object/JSON formats, for * * * {@link azkaban.sla.SlaOptionDeprecated}. * @param objList the list of original Object/JSON formats representing the disabled jobs. * @return the list of disabled jobs. */ static public List<DisabledJob> fromDeprecatedObjectList(List<Object> objList) { if (objList == null) { return null; } return objList.stream().map(x -> { if (x == null) { return null; } else { return fromDeprecatedObject(x); } }).collect(Collectors.toList()); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableFlow.java
/* * Copyright 2013 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.flow.Flow; import azkaban.project.Project; import azkaban.sla.SlaOption; import azkaban.utils.Props; import azkaban.utils.TypedMapWrapper; import com.sun.istack.NotNull; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; public class ExecutableFlow extends ExecutableFlowBase { public static final String EXECUTIONID_PARAM = "executionId"; public static final String EXECUTIONPATH_PARAM = "executionPath"; public static final String EXECUTIONOPTIONS_PARAM = "executionOptions"; public static final String PROJECTID_PARAM = "projectId"; public static final String SCHEDULEID_PARAM = "scheduleId"; public static final String SUBMITUSER_PARAM = "submitUser"; public static final String SUBMITTIME_PARAM = "submitTime"; public static final String VERSION_PARAM = "version"; public static final String PROXYUSERS_PARAM = "proxyUsers"; public static final String PROJECTNAME_PARAM = "projectName"; public static final String LASTMODIFIEDTIME_PARAM = "lastModfiedTime"; public static final String LASTMODIFIEDUSER_PARAM = "lastModifiedUser"; public static final String SLAOPTIONS_PARAM = "slaOptions"; public static final String AZKABANFLOWVERSION_PARAM = "azkabanFlowVersion"; public static final String IS_LOCKED_PARAM = "isLocked"; public static final String FLOW_LOCK_ERROR_MESSAGE_PARAM = "flowLockErrorMessage"; private final HashSet<String> proxyUsers = new HashSet<>(); private int executionId = -1; private int scheduleId = -1; private int projectId; private String projectName; private String lastModifiedUser; private int version; private long submitTime = -1; private long lastModifiedTimestamp; private String submitUser; private String executionPath; private ExecutionOptions executionOptions; private double azkabanFlowVersion; private boolean isLocked; private ExecutableFlowRampMetadata executableFlowRampMetadata; private String flowLockErrorMessage; public ExecutableFlow(final Project project, final Flow flow) { this.projectId = project.getId(); this.projectName = project.getName(); this.version = project.getVersion(); this.scheduleId = -1; this.lastModifiedTimestamp = project.getLastModifiedTimestamp(); this.lastModifiedUser = project.getLastModifiedUser(); setAzkabanFlowVersion(flow.getAzkabanFlowVersion()); setLocked(flow.isLocked()); setFlowLockErrorMessage(flow.getFlowLockErrorMessage()); this.setFlow(project, flow); } public ExecutableFlow() { } public static ExecutableFlow createExecutableFlow(final Object obj, final Status status) { final ExecutableFlow exFlow = new ExecutableFlow(); final HashMap<String, Object> flowObj = (HashMap<String, Object>) obj; exFlow.fillExecutableFromMapObject(flowObj); // overwrite status from the flow data blob as that one should NOT be used exFlow.setStatus(status); return exFlow; } @Override public String getId() { return getFlowId(); } @Override public ExecutableFlow getExecutableFlow() { return this; } public void addAllProxyUsers(final Collection<String> proxyUsers) { this.proxyUsers.addAll(proxyUsers); } public Set<String> getProxyUsers() { return new HashSet<>(this.proxyUsers); } public ExecutionOptions getExecutionOptions() { return this.executionOptions; } public void setExecutionOptions(final ExecutionOptions options) { this.executionOptions = options; } @Override protected void setFlow(final Project project, final Flow flow) { super.setFlow(project, flow); this.executionOptions = new ExecutionOptions(); this.executionOptions.setMailCreator(flow.getMailCreator()); if (flow.getSuccessEmails() != null) { this.executionOptions.setSuccessEmails(flow.getSuccessEmails()); } if (flow.getFailureEmails() != null) { this.executionOptions.setFailureEmails(flow.getFailureEmails()); } } @Override public int getExecutionId() { return this.executionId; } public void setExecutionId(final int executionId) { this.executionId = executionId; } @Override public long getLastModifiedTimestamp() { return this.lastModifiedTimestamp; } public void setLastModifiedTimestamp(final long lastModifiedTimestamp) { this.lastModifiedTimestamp = lastModifiedTimestamp; } @Override public String getLastModifiedByUser() { return this.lastModifiedUser; } public void setLastModifiedByUser(final String lastModifiedUser) { this.lastModifiedUser = lastModifiedUser; } @Override public int getProjectId() { return this.projectId; } public void setProjectId(final int projectId) { this.projectId = projectId; } @Override public String getProjectName() { return this.projectName; } public int getScheduleId() { return this.scheduleId; } public void setScheduleId(final int scheduleId) { this.scheduleId = scheduleId; } public String getExecutionPath() { return this.executionPath; } public void setExecutionPath(final String executionPath) { this.executionPath = executionPath; } public String getSubmitUser() { return this.submitUser; } public void setSubmitUser(final String submitUser) { this.submitUser = submitUser; } @Override public int getVersion() { return this.version; } public void setVersion(final int version) { this.version = version; } public long getSubmitTime() { return this.submitTime; } public void setSubmitTime(final long submitTime) { this.submitTime = submitTime; } public double getAzkabanFlowVersion() { return this.azkabanFlowVersion; } public void setAzkabanFlowVersion(final double azkabanFlowVersion) { this.azkabanFlowVersion = azkabanFlowVersion; } public boolean isLocked() { return this.isLocked; } public void setLocked(boolean locked) { this.isLocked = locked; } public String getFlowLockErrorMessage() { return this.flowLockErrorMessage; } public void setFlowLockErrorMessage(final String flowLockErrorMessage) { this.flowLockErrorMessage = flowLockErrorMessage; } @Override public Map<String, Object> toObject() { final HashMap<String, Object> flowObj = new HashMap<>(); fillMapFromExecutable(flowObj); flowObj.put(EXECUTIONID_PARAM, this.executionId); flowObj.put(EXECUTIONPATH_PARAM, this.executionPath); flowObj.put(PROJECTID_PARAM, this.projectId); flowObj.put(PROJECTNAME_PARAM, this.projectName); if (this.scheduleId >= 0) { flowObj.put(SCHEDULEID_PARAM, this.scheduleId); } flowObj.put(SUBMITUSER_PARAM, this.submitUser); flowObj.put(VERSION_PARAM, this.version); flowObj.put(LASTMODIFIEDTIME_PARAM, this.lastModifiedTimestamp); flowObj.put(LASTMODIFIEDUSER_PARAM, this.lastModifiedUser); flowObj.put(AZKABANFLOWVERSION_PARAM, this.azkabanFlowVersion); flowObj.put(EXECUTIONOPTIONS_PARAM, this.executionOptions.toObject()); final ArrayList<String> proxyUserList = new ArrayList<>(this.proxyUsers); flowObj.put(PROXYUSERS_PARAM, proxyUserList); flowObj.put(SUBMITTIME_PARAM, this.submitTime); final List<Map<String, Object>> slaOptions = new ArrayList<>(); List<SlaOption> slaOptionList = this.executionOptions.getSlaOptions(); if (slaOptionList != null) { for (SlaOption slaOption : slaOptionList) { slaOptions.add(slaOption.toObject()); } } flowObj.put(SLAOPTIONS_PARAM, slaOptions); flowObj.put(IS_LOCKED_PARAM, this.isLocked); flowObj.put(FLOW_LOCK_ERROR_MESSAGE_PARAM, this.flowLockErrorMessage); return flowObj; } @Override public void fillExecutableFromMapObject( final TypedMapWrapper<String, Object> flowObj) { super.fillExecutableFromMapObject(flowObj); this.executionId = flowObj.getInt(EXECUTIONID_PARAM); this.executionPath = flowObj.getString(EXECUTIONPATH_PARAM); this.projectId = flowObj.getInt(PROJECTID_PARAM); this.projectName = flowObj.getString(PROJECTNAME_PARAM); this.scheduleId = flowObj.getInt(SCHEDULEID_PARAM); this.submitUser = flowObj.getString(SUBMITUSER_PARAM); this.version = flowObj.getInt(VERSION_PARAM); this.lastModifiedTimestamp = flowObj.getLong(LASTMODIFIEDTIME_PARAM); this.lastModifiedUser = flowObj.getString(LASTMODIFIEDUSER_PARAM); this.submitTime = flowObj.getLong(SUBMITTIME_PARAM); this.azkabanFlowVersion = flowObj.getDouble(AZKABANFLOWVERSION_PARAM); if (flowObj.containsKey(EXECUTIONOPTIONS_PARAM)) { this.executionOptions = ExecutionOptions.createFromObject(flowObj .getObject(EXECUTIONOPTIONS_PARAM)); } else { // for backwards compatibility should remove in a few versions. this.executionOptions = ExecutionOptions.createFromObject(flowObj); } if (flowObj.containsKey(PROXYUSERS_PARAM)) { final List<String> proxyUserList = flowObj.<String>getList(PROXYUSERS_PARAM); this.addAllProxyUsers(proxyUserList); } if (flowObj.containsKey(SLAOPTIONS_PARAM)) { final List<SlaOption> slaOptions = flowObj.getList(SLAOPTIONS_PARAM).stream().map(SlaOption::fromObject) .collect(Collectors.toList()); this.executionOptions.setSlaOptions(slaOptions); } this.setLocked(flowObj.getBool(IS_LOCKED_PARAM, false)); this.setFlowLockErrorMessage(flowObj.getString(FLOW_LOCK_ERROR_MESSAGE_PARAM, null)); } @Override public Map<String, Object> toUpdateObject(final long lastUpdateTime) { final Map<String, Object> updateData = super.toUpdateObject(lastUpdateTime); updateData.put(EXECUTIONID_PARAM, this.executionId); return updateData; } @Override public void resetForRetry() { super.resetForRetry(); this.setStatus(Status.RUNNING); } public ExecutableFlowRampMetadata getExecutableFlowRampMetadata() { return executableFlowRampMetadata; } public void setExecutableFlowRampMetadata(ExecutableFlowRampMetadata executableFlowRampMetadata) { this.executableFlowRampMetadata = executableFlowRampMetadata; } /** * Get the Relative Flow Directory against project directory */ public String getDirectory() { return String.valueOf(getProjectId()) + "." + String.valueOf(getVersion()); } /** * Get Ramp Props For Job * @param jobId job Id * @param jobType jobType aka job plugin type * @return ramp Props */ synchronized public Props getRampPropsForJob(@NotNull final String jobId, @NotNull final String jobType) { return Optional.ofNullable(executableFlowRampMetadata) .map(metadata -> metadata.selectRampPropsForJob(jobId, jobType)) .orElse(null); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableFlowBase.java
/* * Copyright 2012 LinkedIn, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.flow.Edge; import azkaban.flow.Flow; import azkaban.flow.FlowProps; import azkaban.flow.Node; import azkaban.flow.SpecialJobTypes; import azkaban.project.Project; import azkaban.utils.TypedMapWrapper; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ExecutableFlowBase extends ExecutableNode { public static final String FLOW_ID_PARAM = "flowId"; public static final String NODES_PARAM = "nodes"; public static final String PROPERTIES_PARAM = "properties"; public static final String SOURCE_PARAM = "source"; public static final String INHERITED_PARAM = "inherited"; private static final String FLOW_ID_FORMAT_PATTERN = "%s.%s"; private static final Logger logger = LoggerFactory.getLogger(ExecutableFlowBase.class); private final HashMap<String, ExecutableNode> executableNodes = new HashMap<>(); private final HashMap<String, FlowProps> flowProps = new HashMap<>(); private ArrayList<String> startNodes; private ArrayList<String> endNodes; private String flowId; public ExecutableFlowBase(final Project project, final Node node, final Flow flow, final ExecutableFlowBase parent) { super(node, parent); setFlow(project, flow); } public ExecutableFlowBase() { } public int getExecutionId() { if (this.getParentFlow() != null) { return this.getParentFlow().getExecutionId(); } return -1; } public int getProjectId() { if (this.getParentFlow() != null) { return this.getParentFlow().getProjectId(); } return -1; } public String getProjectName() { if (this.getParentFlow() != null) { return this.getParentFlow().getProjectName(); } return null; } public int getVersion() { if (this.getParentFlow() != null) { return this.getParentFlow().getVersion(); } return -1; } public String getLastModifiedByUser() { if (this.getParentFlow() != null) { return this.getParentFlow().getLastModifiedByUser(); } return null; } public long getLastModifiedTimestamp() { if (this.getParentFlow() != null) { return this.getParentFlow().getLastModifiedTimestamp(); } return -1; } public Collection<FlowProps> getFlowProps() { return this.flowProps.values(); } public String getFlowId() { return this.flowId; } public String getFlowName() { return String.format(FLOW_ID_FORMAT_PATTERN, this.getProjectName(), this.getFlowId()); } public int getRampPercentageId() { return Math.abs(getFlowName().hashCode() % 100); } protected void setFlow(final Project project, final Flow flow) { this.flowId = flow.getId(); this.flowProps.putAll(flow.getAllFlowProps()); for (final Node node : flow.getNodes()) { final String id = node.getId(); if (node.getType().equals(SpecialJobTypes.EMBEDDED_FLOW_TYPE)) { final String embeddedFlowId = node.getEmbeddedFlowId(); final Flow subFlow = project.getFlow(embeddedFlowId); final ExecutableFlowBase embeddedFlow = new ExecutableFlowBase(project, node, subFlow, this); this.executableNodes.put(id, embeddedFlow); } else { final ExecutableNode exNode = new ExecutableNode(node, this); this.executableNodes.put(id, exNode); } } for (final Edge edge : flow.getEdges()) { final ExecutableNode sourceNode = this.executableNodes.get(edge.getSourceId()); final ExecutableNode targetNode = this.executableNodes.get(edge.getTargetId()); if (sourceNode == null) { logger.info("Source node " + edge.getSourceId() + " doesn't exist"); } sourceNode.addOutNode(edge.getTargetId()); targetNode.addInNode(edge.getSourceId()); } } public List<ExecutableNode> getExecutableNodes() { return new ArrayList<>(this.executableNodes.values()); } public ExecutableNode getExecutableNode(final String id) { return this.executableNodes.get(id); } public ExecutableNode getExecutableNodePath(final String ids) { final String[] split = ids.split(":"); return getExecutableNodePath(split); } public ExecutableNode getExecutableNodePath(final String... ids) { return getExecutableNodePath(this, ids, 0); } private ExecutableNode getExecutableNodePath(final ExecutableFlowBase flow, final String[] ids, int currentIdIdx) { final ExecutableNode node = flow.getExecutableNode(ids[currentIdIdx]); currentIdIdx++; if (node == null) { return null; } if (ids.length == currentIdIdx) { return node; } else if (node instanceof ExecutableFlowBase) { return getExecutableNodePath((ExecutableFlowBase) node, ids, currentIdIdx); } else { return null; } } public List<String> getStartNodes() { if (this.startNodes == null) { this.startNodes = new ArrayList<>(); for (final ExecutableNode node : this.executableNodes.values()) { if (node.getInNodes().isEmpty()) { this.startNodes.add(node.getId()); } } } return this.startNodes; } public List<String> getEndNodes() { if (this.endNodes == null) { this.endNodes = new ArrayList<>(); for (final ExecutableNode node : this.executableNodes.values()) { if (node.getOutNodes().isEmpty()) { this.endNodes.add(node.getId()); } } } return this.endNodes; } @Override public Map<String, Object> toObject() { final Map<String, Object> mapObj = new HashMap<>(); fillMapFromExecutable(mapObj); return mapObj; } @Override protected void fillMapFromExecutable(final Map<String, Object> flowObjMap) { super.fillMapFromExecutable(flowObjMap); flowObjMap.put(FLOW_ID_PARAM, this.flowId); final ArrayList<Object> nodes = new ArrayList<>(); for (final ExecutableNode node : this.executableNodes.values()) { nodes.add(node.toObject()); } flowObjMap.put(NODES_PARAM, nodes); // Flow properties final ArrayList<Object> props = new ArrayList<>(); for (final FlowProps fprop : this.flowProps.values()) { final HashMap<String, Object> propObj = new HashMap<>(); final String source = fprop.getSource(); final String inheritedSource = fprop.getInheritedSource(); propObj.put(SOURCE_PARAM, source); if (inheritedSource != null) { propObj.put(INHERITED_PARAM, inheritedSource); } props.add(propObj); } flowObjMap.put(PROPERTIES_PARAM, props); } @Override public void fillExecutableFromMapObject( final TypedMapWrapper<String, Object> flowObjMap) { super.fillExecutableFromMapObject(flowObjMap); this.flowId = flowObjMap.getString(FLOW_ID_PARAM); final List<Object> nodes = flowObjMap.<Object>getList(NODES_PARAM); if (nodes != null) { for (final Object nodeObj : nodes) { final Map<String, Object> nodeObjMap = (Map<String, Object>) nodeObj; final TypedMapWrapper<String, Object> wrapper = new TypedMapWrapper<>(nodeObjMap); final String type = wrapper.getString(TYPE_PARAM); if (type != null && type.equals(SpecialJobTypes.EMBEDDED_FLOW_TYPE)) { final ExecutableFlowBase exFlow = new ExecutableFlowBase(); exFlow.fillExecutableFromMapObject(wrapper); exFlow.setParentFlow(this); this.executableNodes.put(exFlow.getId(), exFlow); } else { final ExecutableNode exJob = new ExecutableNode(); exJob.fillExecutableFromMapObject(nodeObjMap); exJob.setParentFlow(this); this.executableNodes.put(exJob.getId(), exJob); } } } final List<Object> properties = flowObjMap.<Object>getList(PROPERTIES_PARAM); for (final Object propNode : properties) { final HashMap<String, Object> fprop = (HashMap<String, Object>) propNode; final String source = (String) fprop.get("source"); final String inheritedSource = (String) fprop.get("inherited"); final FlowProps flowProps = new FlowProps(inheritedSource, source); this.flowProps.put(source, flowProps); } } public Map<String, Object> toUpdateObject(final long lastUpdateTime) { final Map<String, Object> updateData = super.toUpdateObject(); final List<Map<String, Object>> updatedNodes = new ArrayList<>(); for (final ExecutableNode node : this.executableNodes.values()) { if (node instanceof ExecutableFlowBase) { final Map<String, Object> updatedNodeMap = ((ExecutableFlowBase) node).toUpdateObject(lastUpdateTime); // We add only flows to the list which either have a good update time, // or has updated descendants. if (node.getUpdateTime() > lastUpdateTime || updatedNodeMap.containsKey(NODES_PARAM)) { updatedNodes.add(updatedNodeMap); } } else { if (node.getUpdateTime() > lastUpdateTime) { final Map<String, Object> updatedNodeMap = node.toUpdateObject(); updatedNodes.add(updatedNodeMap); } } } // if there are no updated nodes, we just won't add it to the list. This is // good // since if this is a nested flow, the parent is given the option to include // or // discard these subflows. if (!updatedNodes.isEmpty()) { updateData.put(NODES_PARAM, updatedNodes); } return updateData; } public void applyUpdateObject(final TypedMapWrapper<String, Object> updateData, final List<ExecutableNode> updatedNodes) { super.applyUpdateObject(updateData); if (updatedNodes != null) { updatedNodes.add(this); } final List<Map<String, Object>> nodes = (List<Map<String, Object>>) updateData .<Map<String, Object>>getList(NODES_PARAM); if (nodes != null) { for (final Map<String, Object> node : nodes) { final TypedMapWrapper<String, Object> nodeWrapper = new TypedMapWrapper<>(node); String id = nodeWrapper.getString(ID_PARAM); if (id == null) { // Legacy case id = nodeWrapper.getString("jobId"); } final ExecutableNode exNode = this.executableNodes.get(id); if (updatedNodes != null) { updatedNodes.add(exNode); } if (exNode instanceof ExecutableFlowBase) { ((ExecutableFlowBase) exNode).applyUpdateObject(nodeWrapper, updatedNodes); } else { exNode.applyUpdateObject(nodeWrapper); } } } } public void applyUpdateObject(final Map<String, Object> updateData, final List<ExecutableNode> updatedNodes) { final TypedMapWrapper<String, Object> typedMapWrapper = new TypedMapWrapper<>(updateData); applyUpdateObject(typedMapWrapper, updatedNodes); } @Override public void applyUpdateObject(final Map<String, Object> updateData) { final TypedMapWrapper<String, Object> typedMapWrapper = new TypedMapWrapper<>(updateData); applyUpdateObject(typedMapWrapper, null); } public String getFlowPath() { if (this.getParentFlow() == null) { return this.getFlowId(); } else { return this.getParentFlow().getFlowPath() + "," + this.getId() + ":" + this.getFlowId(); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableFlowPriorityComparator.java
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.Pair; import java.util.Comparator; import org.apache.log4j.Logger; /** * Comparator implicitly used in priority queue for QueuedExecutions. */ public final class ExecutableFlowPriorityComparator implements Comparator<Pair<ExecutionReference, ExecutableFlow>> { private static final Logger logger = Logger .getLogger(ExecutableFlowPriorityComparator.class); /** * <pre> * Sorting order is determined by:- * 1. descending order of priority * 2. if same priority, ascending order of update time * 3. if same priority and updateTime, ascending order of execution id * </pre> * * {@inheritDoc} * * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object) */ @Override public int compare(final Pair<ExecutionReference, ExecutableFlow> pair1, final Pair<ExecutionReference, ExecutableFlow> pair2) { ExecutableFlow exflow1 = null, exflow2 = null; if (pair1 != null && pair1.getSecond() != null) { exflow1 = pair1.getSecond(); } if (pair2 != null && pair2.getSecond() != null) { exflow2 = pair2.getSecond(); } if (exflow1 == null && exflow2 == null) { return 0; } else if (exflow1 == null) { return -1; } else if (exflow2 == null) { return 1; } else { // descending order of priority int diff = getPriority(exflow2) - getPriority(exflow1); if (diff == 0) { // ascending order of update time, if same priority diff = Long.compare(exflow1.getUpdateTime(), exflow2.getUpdateTime()); } if (diff == 0) { // ascending order of execution id, if same priority and updateTime diff = exflow1.getExecutionId() - exflow2.getExecutionId(); } return diff; } } /* Helper method to fetch flow priority from flow props */ private int getPriority(final ExecutableFlow exflow) { final ExecutionOptions options = exflow.getExecutionOptions(); int priority = ExecutionOptions.DEFAULT_FLOW_PRIORITY; if (options != null && options.getFlowParameters() != null && options.getFlowParameters() .containsKey(ExecutionOptions.FLOW_PRIORITY)) { try { priority = Integer.valueOf(options.getFlowParameters().get( ExecutionOptions.FLOW_PRIORITY)); } catch (final NumberFormatException ex) { priority = ExecutionOptions.DEFAULT_FLOW_PRIORITY; logger.error( "Failed to parse flow priority for exec_id = " + exflow.getExecutionId(), ex); } } return priority; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableFlowRampMetadata.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.Props; import com.sun.istack.NotNull; import java.util.HashMap; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; /** * Executable Ramp Metadata. It is attached in the Flow's Executable Node * The Ramp Metadata is mainly includes the following Information * 1. ramp Props : which is a map of {dependency, dependencyPropValue}, for example, it looks like * {"dali", { * this: [{"jar:dali-data-pig", "...data-pig-9.2.10.jar"}, {"jar:dali-data-spark", "...data-spark-9.2.10.jar"}] * parent: [{"jar:dali-data-pig", "...data-pig-9.2.1.jar"}, {"jar:dali-data-spark", "...data-spark-9.2.1.jar"}] * }} * 2. ramp dependency map: which is a map to host the relationship between dependency and plugin types , for example, * [ * {"jar:dali-data-pig", [pigLi, pig]}, * {"jar:dali-data-spark", [spark]} * ] * 3. exceptional ramp items map: which hosts the exceptional items base on job name. It looks like * {jobName_in_flow, List_of_Exceptional_Items_based_on_dependency} */ public class ExecutableFlowRampMetadata { private static String RAMP_PROP_KEY_PREFIX = "azkaban.ramp."; private Map<String, Props> rampPropsMap = new HashMap<>(); private ExecutableRampDependencyMap executableRampDependencyMap = null; private Map<String, ExecutableRampExceptionalItems> exceptionalJobTreatments = new HashMap<>(); private ExecutableFlowRampMetadata() { } public static ExecutableFlowRampMetadata createInstance( ExecutableRampDependencyMap executableRampDependencyMap, Map<String, ExecutableRampExceptionalItems> exceptionalJobTreatments) { ExecutableFlowRampMetadata executableFlowRampMetadata = new ExecutableFlowRampMetadata(); executableFlowRampMetadata.executableRampDependencyMap = executableRampDependencyMap.clone(); Map<String, ExecutableRampExceptionalItems> clonedExceptionalJobTreatments = new HashMap<>(); clonedExceptionalJobTreatments.putAll( exceptionalJobTreatments.entrySet().stream().collect(Collectors.toMap( item -> item.getKey(), item -> item.getValue().clone() ))); executableFlowRampMetadata.exceptionalJobTreatments = clonedExceptionalJobTreatments; return executableFlowRampMetadata; } public ExecutableFlowRampMetadata setRampProps(String ramp, Props props) { this.rampPropsMap.put(ramp, props); return this; } public String getRampItemValue(final String ramp, final String key) { return this.rampPropsMap.get(ramp).get(key); } public ExecutableFlowRampMetadata setExecutableRampDependencyMap( ExecutableRampDependencyMap executableRampDependencyMap) { this.executableRampDependencyMap = executableRampDependencyMap; return this; } public Map<String,ExecutableRampExceptionalItems> getExceptionalJobTreatments() { return exceptionalJobTreatments; } public ExecutableFlowRampMetadata setExceptionalJobTreatments( Map<String, ExecutableRampExceptionalItems> exceptionalJobTreatments) { this.exceptionalJobTreatments = exceptionalJobTreatments; return this; } public Set<String> getActiveRamps() { return this.rampPropsMap.entrySet() .stream() .filter(item -> ExecutableRampStatus.SELECTED.name() .equalsIgnoreCase(item.getValue().getSource())) .map(item -> item.getKey()).collect(Collectors.toSet()); } /** * Select Ramp Props For Job based on jobId and jobType * @param jobId job_id which is the job name against flow * @param jobType job type * @return rampable dependency */ synchronized public Props selectRampPropsForJob(@NotNull final String jobId, @NotNull final String jobType) { Props selectedProps = new Props(); for(Map.Entry<String, Props> rampItem : rampPropsMap.entrySet()) { String rampId = rampItem.getKey(); Props rampValue = rampItem.getValue(); // 1. For each ramp, we need to check if there is any whitelist/blacklist treatment based on job_id, boolean isExceptionalJob = isExceptionalJob(rampId, jobId, rampValue.getSource()); Props filteredProps = generateFilteredProps(rampValue, isExceptionalJob); // 2. Continue Filter By jobType Set<String> filteredDependencies = filteredProps .getKeySet() .stream() .filter(dependency -> executableRampDependencyMap.isValidJobType(dependency, jobType)) .collect(Collectors.toSet()); // 3. Append the dependency into the final consolidated props. for(String dependency : filteredDependencies) { selectedProps.put( RAMP_PROP_KEY_PREFIX + dependency, Optional.ofNullable(filteredProps.get(dependency)) .orElse(executableRampDependencyMap.getDefaultValue(dependency)) ); } } return selectedProps; } synchronized private boolean isExceptionalJob(final String rampId, final String jobId, final String source) { ExecutableRampExceptionalItems exceptionalJobs = exceptionalJobTreatments.get(rampId); if (exceptionalJobs == null) { return false; } switch (exceptionalJobs.getStatus(jobId)) { case WHITELISTED: return ExecutableRampStatus.of(source).equals(ExecutableRampStatus.UNSELECTED); case BLACKLISTED: return ExecutableRampStatus.of(source).equals(ExecutableRampStatus.SELECTED); default: // by default it means no exceptional ramp treatment for this job return false; } } synchronized private Props generateFilteredProps(Props props, boolean isExceptionalJob) { Props filteredProps = new Props(); Set<String> keySet = props.getKeySet().stream().collect(Collectors.toSet()); for (String key : keySet) { if (!props.get(key).isEmpty()) { String currentValue = props.get(key); String parentValue = Optional.ofNullable(props.getParent()).map(prop -> prop.get(key)).orElse(""); filteredProps.put(key, (isExceptionalJob && !parentValue.isEmpty()) ? parentValue : currentValue); } } return filteredProps; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableJobInfo.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.Pair; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; public class ExecutableJobInfo { private final int execId; private final int projectId; private final int version; private final String flowId; private final String jobId; private final long startTime; private final long endTime; private final Status status; private final int attempt; private ArrayList<Pair<String, String>> jobPath; private String immediateFlowId; public ExecutableJobInfo(final int execId, final int projectId, final int version, final String flowId, final String jobId, final long startTime, final long endTime, final Status status, final int attempt) { this.execId = execId; this.projectId = projectId; this.startTime = startTime; this.endTime = endTime; this.status = status; this.version = version; this.flowId = flowId; this.jobId = jobId; this.attempt = attempt; parseFlowId(); } public int getProjectId() { return this.projectId; } public int getExecId() { return this.execId; } public int getVersion() { return this.version; } public String getFlowId() { return this.flowId; } public String getImmediateFlowId() { return this.immediateFlowId; } public String getJobId() { return this.jobId; } public long getStartTime() { return this.startTime; } public long getEndTime() { return this.endTime; } public Status getStatus() { return this.status; } public int getAttempt() { return this.attempt; } public List<Pair<String, String>> getParsedFlowId() { return this.jobPath; } private void parseFlowId() { this.jobPath = new ArrayList<>(); // parsing pattern: flowRootName[,embeddedFlowName:embeddedFlowPath]* final String[] flowPairs = this.flowId.split(","); for (final String flowPair : flowPairs) { // splitting each embeddedFlowName:embeddedFlowPath pair by the first occurrence of ':' // only because embeddedFlowPath also uses ':' as delimiter. // Ex: "embeddedFlow3:rootFlow:embeddedFlow1:embeddedFlow2:embeddedFlow3" will result in // ["embeddedFlow3", "rootFlow:embeddedFlow1:embeddedFlow2:embeddedFlow3"] final String[] pairSplit = flowPair.split(":", 2); final Pair<String, String> pair; if (pairSplit.length == 1) { pair = new Pair<>(pairSplit[0], pairSplit[0]); } else { pair = new Pair<>(pairSplit[0], pairSplit[1]); } this.jobPath.add(pair); } this.immediateFlowId = this.jobPath.get(this.jobPath.size() - 1).getSecond(); } public String getJobIdPath() { // Skip the first one because it's always just the root. String path = ""; for (int i = 1; i < this.jobPath.size(); ++i) { final Pair<String, String> pair = this.jobPath.get(i); path += pair.getFirst() + ":"; } path += this.jobId; return path; } public Map<String, Object> toObject() { final HashMap<String, Object> map = new HashMap<>(); map.put("execId", this.execId); map.put("version", this.version); map.put("flowId", this.flowId); map.put("jobId", this.jobId); map.put("startTime", this.startTime); map.put("endTime", this.endTime); map.put("status", this.status.toString()); map.put("attempt", this.attempt); return map; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableNode.java
/* * Copyright 2013 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.flow.ConditionOnJobStatus; import azkaban.flow.Node; import azkaban.utils.Props; import azkaban.utils.PropsUtils; import azkaban.utils.TypedMapWrapper; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; /** * Base Executable that nodes and flows are based. */ public class ExecutableNode { public static final String ID_PARAM = "id"; public static final String STATUS_PARAM = "status"; public static final String STARTTIME_PARAM = "startTime"; public static final String ENDTIME_PARAM = "endTime"; public static final String UPDATETIME_PARAM = "updateTime"; public static final String INNODES_PARAM = "inNodes"; public static final String OUTNODES_PARAM = "outNodes"; public static final String TYPE_PARAM = "type"; public static final String CONDITION_PARAM = "condition"; public static final String CONDITION_ON_JOB_STATUS_PARAM = "conditionOnJobStatus"; public static final String PROPS_SOURCE_PARAM = "propSource"; public static final String JOB_SOURCE_PARAM = "jobSource"; public static final String OUTPUT_PROPS_PARAM = "outputProps"; public static final String ATTEMPT_PARAM = "attempt"; public static final String PASTATTEMPTS_PARAM = "pastAttempts"; private final AtomicInteger attempt = new AtomicInteger(0); private String id; private String type = null; private volatile Status status = Status.READY; private volatile long startTime = -1; private volatile long endTime = -1; private long updateTime = -1; private volatile boolean killedBySLA = false; // Path to Job File private String jobSource; // Path to top level props file private String propsSource; private Set<String> inNodes = new HashSet<>(); private Set<String> outNodes = new HashSet<>(); private Props inputProps; private Props outputProps; private Props rampProps; private long delayExecution = 0; private ArrayList<ExecutionAttempt> pastAttempts = null; private String condition; private ConditionOnJobStatus conditionOnJobStatus = ConditionOnJobStatus.ALL_SUCCESS; // Transient. These values aren't saved, but rediscovered. private ExecutableFlowBase parentFlow; public ExecutableNode(final Node node) { this.id = node.getId(); this.jobSource = node.getJobSource(); this.propsSource = node.getPropsSource(); } public ExecutableNode(final Node node, final ExecutableFlowBase parent) { this(node.getId(), node.getType(), node.getCondition(), node.getConditionOnJobStatus(), node .getJobSource(), node .getPropsSource(), parent); } public ExecutableNode(final String id, final String type, final String condition, final ConditionOnJobStatus conditionOnJobStatus, final String jobSource, final String propsSource, final ExecutableFlowBase parent) { this.id = id; this.jobSource = jobSource; this.propsSource = propsSource; this.type = type; this.condition = condition; this.conditionOnJobStatus = conditionOnJobStatus; setParentFlow(parent); } public ExecutableNode() { } public ExecutableFlow getExecutableFlow() { if (this.parentFlow == null) { return null; } return this.parentFlow.getExecutableFlow(); } public ExecutableFlowBase getParentFlow() { return this.parentFlow; } public void setParentFlow(final ExecutableFlowBase flow) { this.parentFlow = flow; } public String getId() { return this.id; } public void setId(final String id) { this.id = id; } public Status getStatus() { return this.status; } public void setStatus(final Status status) { this.status = status; } public String getType() { return this.type; } public void setType(final String type) { this.type = type; } public long getStartTime() { return this.startTime; } public void setStartTime(final long startTime) { this.startTime = startTime; } public long getEndTime() { return this.endTime; } public void setEndTime(final long endTime) { this.endTime = endTime; } public long getUpdateTime() { return this.updateTime; } public void setUpdateTime(final long updateTime) { this.updateTime = updateTime; } public boolean isKilledBySLA() { return this.killedBySLA; } public void setKilledBySLA(final boolean killedBySLA) { this.killedBySLA = killedBySLA; } public void addOutNode(final String exNode) { this.outNodes.add(exNode); } public void addInNode(final String exNode) { this.inNodes.add(exNode); } public Set<String> getOutNodes() { return this.outNodes; } public Set<String> getInNodes() { return this.inNodes; } public boolean hasJobSource() { return this.jobSource != null; } public boolean hasPropsSource() { return this.propsSource != null; } public String getJobSource() { return this.jobSource; } public String getPropsSource() { return this.propsSource; } public Props getInputProps() { return this.inputProps; } public void setInputProps(final Props input) { this.inputProps = input; } public Props getOutputProps() { return this.outputProps; } public void setOutputProps(final Props output) { this.outputProps = output; } public long getDelayedExecution() { return this.delayExecution; } public void setDelayedExecution(final long delayMs) { this.delayExecution = delayMs; } public List<ExecutionAttempt> getPastAttemptList() { return this.pastAttempts; } public int getAttempt() { return this.attempt.get(); } public void resetForRetry() { final ExecutionAttempt pastAttempt = new ExecutionAttempt(this.attempt.get(), this); this.attempt.incrementAndGet(); synchronized (this) { if (this.pastAttempts == null) { this.pastAttempts = new ArrayList<>(); } this.pastAttempts.add(pastAttempt); } this.setStartTime(-1); this.setEndTime(-1); this.setUpdateTime(System.currentTimeMillis()); this.setStatus(Status.READY); this.setKilledBySLA(false); } public List<Object> getAttemptObjects() { final ArrayList<Object> array = new ArrayList<>(); for (final ExecutionAttempt attempt : this.pastAttempts) { array.add(attempt.toObject()); } return array; } public String getNestedId() { return getPrintableId(":"); } public String getPrintableId(final String delimiter) { if (this.getParentFlow() == null || this.getParentFlow() instanceof ExecutableFlow) { return getId(); } return getParentFlow().getPrintableId(delimiter) + delimiter + getId(); } public Map<String, Object> toObject() { final Map<String, Object> mapObj = new HashMap<>(); fillMapFromExecutable(mapObj); return mapObj; } protected void fillMapFromExecutable(final Map<String, Object> objMap) { objMap.put(ID_PARAM, this.id); objMap.put(STATUS_PARAM, this.status.toString()); objMap.put(STARTTIME_PARAM, this.startTime); objMap.put(ENDTIME_PARAM, this.endTime); objMap.put(UPDATETIME_PARAM, this.updateTime); objMap.put(TYPE_PARAM, this.type); objMap.put(CONDITION_PARAM, this.condition); if (this.conditionOnJobStatus != null) { objMap.put(CONDITION_ON_JOB_STATUS_PARAM, this.conditionOnJobStatus.toString()); } objMap.put(ATTEMPT_PARAM, this.attempt); if (this.inNodes != null && !this.inNodes.isEmpty()) { objMap.put(INNODES_PARAM, this.inNodes); } if (this.outNodes != null && !this.outNodes.isEmpty()) { objMap.put(OUTNODES_PARAM, this.outNodes); } if (hasPropsSource()) { objMap.put(PROPS_SOURCE_PARAM, this.propsSource); } if (hasJobSource()) { objMap.put(JOB_SOURCE_PARAM, this.jobSource); } if (this.outputProps != null && this.outputProps.size() > 0) { objMap.put(OUTPUT_PROPS_PARAM, PropsUtils.toStringMap(this.outputProps, true)); } if (this.pastAttempts != null) { final ArrayList<Object> attemptsList = new ArrayList<>(this.pastAttempts.size()); for (final ExecutionAttempt attempts : this.pastAttempts) { attemptsList.add(attempts.toObject()); } objMap.put(PASTATTEMPTS_PARAM, attemptsList); } } public void fillExecutableFromMapObject( final TypedMapWrapper<String, Object> wrappedMap) { this.id = wrappedMap.getString(ID_PARAM); this.type = wrappedMap.getString(TYPE_PARAM); this.condition = wrappedMap.getString(CONDITION_PARAM); this.conditionOnJobStatus = ConditionOnJobStatus.fromString(wrappedMap.getString (CONDITION_ON_JOB_STATUS_PARAM)); this.status = Status.valueOf(wrappedMap.getString(STATUS_PARAM)); this.startTime = wrappedMap.getLong(STARTTIME_PARAM); this.endTime = wrappedMap.getLong(ENDTIME_PARAM); this.updateTime = wrappedMap.getLong(UPDATETIME_PARAM); this.attempt.set(wrappedMap.getInt(ATTEMPT_PARAM, 0)); this.inNodes = new HashSet<>(); this.inNodes.addAll(wrappedMap.getStringCollection(INNODES_PARAM, Collections.<String>emptySet())); this.outNodes = new HashSet<>(); this.outNodes.addAll(wrappedMap.getStringCollection(OUTNODES_PARAM, Collections.<String>emptySet())); this.propsSource = wrappedMap.getString(PROPS_SOURCE_PARAM); this.jobSource = wrappedMap.getString(JOB_SOURCE_PARAM); final Map<String, String> outputProps = wrappedMap.<String, String>getMap(OUTPUT_PROPS_PARAM); if (outputProps != null) { this.outputProps = new Props(null, outputProps); } final Collection<Object> pastAttempts = wrappedMap.<Object>getCollection(PASTATTEMPTS_PARAM); if (pastAttempts != null) { final ArrayList<ExecutionAttempt> attempts = new ArrayList<>(); for (final Object attemptObj : pastAttempts) { final ExecutionAttempt attempt = ExecutionAttempt.fromObject(attemptObj); attempts.add(attempt); } this.pastAttempts = attempts; } } public void fillExecutableFromMapObject(final Map<String, Object> objMap) { final TypedMapWrapper<String, Object> wrapper = new TypedMapWrapper<>(objMap); fillExecutableFromMapObject(wrapper); } public Map<String, Object> toUpdateObject() { final Map<String, Object> updatedNodeMap = new HashMap<>(); updatedNodeMap.put(ID_PARAM, getId()); updatedNodeMap.put(STATUS_PARAM, getStatus().getNumVal()); updatedNodeMap.put(STARTTIME_PARAM, getStartTime()); updatedNodeMap.put(ENDTIME_PARAM, getEndTime()); updatedNodeMap.put(UPDATETIME_PARAM, getUpdateTime()); updatedNodeMap.put(ATTEMPT_PARAM, getAttempt()); if (getAttempt() > 0) { final ArrayList<Map<String, Object>> pastAttempts = new ArrayList<>(); for (final ExecutionAttempt attempt : getPastAttemptList()) { pastAttempts.add(attempt.toObject()); } updatedNodeMap.put(PASTATTEMPTS_PARAM, pastAttempts); } return updatedNodeMap; } public void applyUpdateObject(final TypedMapWrapper<String, Object> updateData) { this.status = Status.fromInteger(updateData.getInt(STATUS_PARAM, this.status.getNumVal())); this.startTime = updateData.getLong(STARTTIME_PARAM); this.updateTime = updateData.getLong(UPDATETIME_PARAM); this.endTime = updateData.getLong(ENDTIME_PARAM); if (updateData.containsKey(ATTEMPT_PARAM)) { this.attempt.set(updateData.getInt(ATTEMPT_PARAM)); if (this.attempt.get() > 0) { updatePastAttempts(updateData.<Object>getList(PASTATTEMPTS_PARAM, Collections.<Object>emptyList())); } } } public void applyUpdateObject(final Map<String, Object> updateData) { final TypedMapWrapper<String, Object> wrapper = new TypedMapWrapper<>(updateData); applyUpdateObject(wrapper); } public void cancelNode(final long cancelTime) { if (this.status == Status.DISABLED) { skipNode(cancelTime); } else { this.setStatus(Status.CANCELLED); this.setStartTime(cancelTime); this.setEndTime(cancelTime); this.setUpdateTime(cancelTime); } } public void skipNode(final long skipTime) { this.setStatus(Status.SKIPPED); this.setStartTime(skipTime); this.setEndTime(skipTime); this.setUpdateTime(skipTime); } private void updatePastAttempts(final List<Object> pastAttemptsList) { if (pastAttemptsList == null) { return; } synchronized (this) { if (this.pastAttempts == null) { this.pastAttempts = new ArrayList<>(); } // We just check size because past attempts don't change if (pastAttemptsList.size() <= this.pastAttempts.size()) { return; } final Object[] pastAttemptArray = pastAttemptsList.toArray(); for (int i = this.pastAttempts.size(); i < pastAttemptArray.length; ++i) { final ExecutionAttempt attempt = ExecutionAttempt.fromObject(pastAttemptArray[i]); this.pastAttempts.add(attempt); } } } public int getRetries() { return this.inputProps.getInt("retries", 0); } public long getRetryBackoff() { return this.inputProps.getLong("retry.backoff", 0); } public String getCondition() { return this.condition; } public void setCondition(final String condition) { this.condition = condition; } public ConditionOnJobStatus getConditionOnJobStatus() { return this.conditionOnJobStatus == null ? ConditionOnJobStatus.ALL_SUCCESS : this.conditionOnJobStatus; } public void setConditionOnJobStatus(final ConditionOnJobStatus conditionOnJobStatus) { this.conditionOnJobStatus = conditionOnJobStatus; } public Props getRampProps() { return rampProps; } public void setRampProps(Props rampProps) { this.rampProps = rampProps; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRamp.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.TimeUtils; import com.sun.istack.NotNull; import java.util.concurrent.locks.ReentrantLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Executable Ramp Object keeps the run-time status of the executing ramp item. * It is a data model which is applied to manage the current status of onging ramp item. * The main purpose of this object is to control ON/OFF status of the ramp in the execution engine. * * The status of ramp includes the following two parts * 1. active flag of the ramp which is set by administator manually by web Ajax call * 2. pause flag of the ramp which is set automatically based on the num of failure workflows durning the ramp * It is defined to protect to prohibiting massive failure caused by the ramp */ public final class ExecutableRamp implements IRefreshable<ExecutableRamp> { private static final int ONE_DAY = 60 * 60 * 24; private static final Logger LOGGER = LoggerFactory.getLogger(ExecutableRamp.class); private ReentrantLock lock = new ReentrantLock(); public enum Action { IGNORED, SUCCEEDED, FAILED } public enum CountType { TRAIL, SUCCESS, FAILURE, IGNORED } /** * Sub data model of ExecutableMap to host all status related data */ private static class State implements IRefreshable<State> { private volatile boolean isSynchronized = true; private volatile long startTime = 0; private volatile long endTime = 0; private volatile long lastUpdatedTime = 0; private volatile int numOfTrail = 0; private volatile int numOfSuccess = 0; private volatile int numOfFailure = 0; private volatile int numOfIgnored = 0; private volatile boolean isPaused = false; private volatile int rampStage = 0; private volatile long lastRampDownTime = 0; // The last time to ramp down the Ramping process automatically private volatile int cachedNumOfTrail = 0; private volatile int cachedNumOfSuccess = 0; private volatile int cachedNumOfFailure = 0; private volatile int cachedNumOfIgnored = 0; private volatile boolean isActive = true; private State() { } private State(long startTime, long endTime, long lastUpdatedTime, int numTrail, int numSuccess, int numFailure, int numIgnored, boolean isPaused, int rampStage, boolean isActive) { this.startTime = startTime; this.endTime = endTime; this.lastUpdatedTime = lastUpdatedTime; this.numOfTrail = numTrail; this.numOfSuccess = numSuccess; this.numOfFailure = numFailure; this.numOfIgnored = numIgnored; this.isPaused = isPaused; this.rampStage = rampStage; this.isActive = isActive; } private State(int rampStage, boolean isActive, boolean isPaused, boolean isSynchronized, long startTime, long endTime, long lastUpdatedTime, long lastRampDownTime, int numTrail, int numSuccess, int numFailure, int numIgnored, int cachedNumOfTrail, int cachedNumOfSuccess, int cachedNumOfFailure, int cachedNumOfIgnored) { this.rampStage = rampStage; this.isActive = isActive; this.isPaused = isPaused; this.isSynchronized = isSynchronized; this.startTime = startTime; this.endTime = endTime; this.lastUpdatedTime = lastUpdatedTime; this.lastRampDownTime = lastRampDownTime; this.numOfTrail = numTrail; this.numOfSuccess = numSuccess; this.numOfFailure = numFailure; this.numOfIgnored = numIgnored; this.cachedNumOfTrail = cachedNumOfTrail; this.cachedNumOfSuccess = cachedNumOfSuccess; this.cachedNumOfFailure = cachedNumOfFailure; this.cachedNumOfIgnored = cachedNumOfIgnored; } public static final State createInstance(long startTime, long endTime, long lastUpdatedTime, int numTrail, int numSuccess, int numFailure, int numIgnored, boolean isPaused, int rampStage, boolean isActive) { return new State(startTime, endTime, lastUpdatedTime, numTrail, numSuccess, numFailure, numIgnored, isPaused, rampStage, isActive); } @Override public State refresh(State source) { this.startTime = source.startTime; this.endTime = source.endTime; this.lastUpdatedTime = source.lastUpdatedTime; this.numOfTrail = source.numOfTrail; this.numOfSuccess = source.numOfSuccess; this.numOfFailure = source.numOfFailure; this.numOfIgnored = source.numOfIgnored; this.isPaused = source.isPaused; //Note: When the rampStage is set by the DB again and it is tiggered for ramping up, // The lastRampDownTime will be reset to make sure the ramp can be automatically ramp down // when more failures are detected during the ramping up stage. if (source.rampStage > this.rampStage) { this.lastRampDownTime = 0; } this.rampStage = source.rampStage; this.isActive = source.isActive; this.isSynchronized = (this.cachedNumOfFailure == 0) && (this.cachedNumOfIgnored == 0) && (this.cachedNumOfSuccess == 0) && (this.cachedNumOfTrail == 0); return this; } @Override public State clone() { return new State(this.rampStage, this.isActive, this.isPaused, this.isSynchronized, this.startTime, this.endTime, this.lastUpdatedTime, this.lastRampDownTime, this.numOfTrail, this.numOfSuccess, this.numOfFailure, this.numOfIgnored, this.cachedNumOfTrail, this.cachedNumOfSuccess, this.cachedNumOfFailure, this.cachedNumOfIgnored); } @Override public int elementCount() { return 1; } } /** * Sub data model of ExecutableMap to host all threshold settings * which are used to determine if the ramp will be paused or automatically ramp down * because massive failures are detected durning the run-time. */ private static class Metadata implements IRefreshable<Metadata> { private volatile int maxFailureToPause = 0; private volatile int maxFailureToRampDown = 0; private volatile boolean isPercentageScaleForMaxFailure = false; private Metadata() { } private Metadata(int maxFailureToRampPause, int maxFailureToRampDown, boolean isPercentageScaleForMaxFailure) { this.maxFailureToPause = maxFailureToRampPause; this.maxFailureToRampDown = maxFailureToRampDown; this.isPercentageScaleForMaxFailure = isPercentageScaleForMaxFailure; } public static Metadata createInstance(int maxFailureToRampPause, int maxFailureToRampDown, boolean isPercentageScaleForMaxFailure) { return new Metadata(maxFailureToRampPause, maxFailureToRampDown, isPercentageScaleForMaxFailure); } @Override public Metadata refresh(Metadata source) { this.maxFailureToPause = source.maxFailureToPause; this.maxFailureToRampDown = source.maxFailureToRampDown; this.isPercentageScaleForMaxFailure = source.isPercentageScaleForMaxFailure; return this; } @Override public Metadata clone() { return new Metadata(this.maxFailureToPause, this.maxFailureToRampDown, this.isPercentageScaleForMaxFailure); } @Override public int elementCount() { return 1; // Here, it will always return 1 since it is not a list. } } private volatile String id; private volatile String policy; private volatile Metadata metadata; private volatile State state; private ExecutableRamp() { } private ExecutableRamp(@NotNull final String id, @NotNull final String policy, @NotNull ExecutableRamp.Metadata metadata, @NotNull ExecutableRamp.State state) { this.id = id; this.policy = policy; this.metadata = metadata; this.state = state; } public static ExecutableRamp createInstance(@NotNull final String id, @NotNull final String policy, int maxFailureToRampPause, int maxFailureToRampDown, boolean isPercentageScaleForMaxFailure, long startTime, long endTime, long lastUpdatedTime, int numTrail, int numSuccess, int numFailure, int numIgnored, boolean isPaused, int rampStage, boolean isActive) { return new ExecutableRamp(id, policy, ExecutableRamp.Metadata.createInstance( maxFailureToRampPause, maxFailureToRampDown, isPercentageScaleForMaxFailure), ExecutableRamp.State.createInstance( startTime, endTime, lastUpdatedTime, numTrail, numSuccess, numFailure, numIgnored, isPaused, rampStage, isActive) ); } public String getId() { return id; } public String getPolicy() { return policy; } public boolean isActive() { long timeDiff = this.state.startTime - System.currentTimeMillis(); boolean isActive = this.state.isActive && (!this.state.isPaused) && (timeDiff <= 0); if (!isActive) { LOGGER.info("[Ramp Is Isolated] (isActive = {}, isPause = {}, timeDiff = {}", this.state.isActive, this.state.isPaused, timeDiff); } return isActive; } public boolean isChanged() { return !this.state.isSynchronized; } public boolean isPaused() { return this.state.isPaused; } public boolean isNotTestable() { return (!this.state.isActive || this.state.isPaused || (this.state.rampStage <= 0)); } public int getStage() { return this.state.rampStage; } public long getStartTime() { return this.state.startTime; } public long getEndTime() { return this.state.endTime; } public long getLastUpdatedTime() { return this.state.lastUpdatedTime; } public int getMaxFailureToRampDown() { return this.metadata.maxFailureToRampDown; } public int getMaxFailureToPause() { return this.metadata.maxFailureToPause; } public boolean isPercentageScaleForMaxFailure() { return this.metadata.isPercentageScaleForMaxFailure; } public int getCount(@NotNull CountType countType) { return getCount(countType, false); } public int getCachedCount(@NotNull CountType countType) { return getCount(countType, true); } private int getCount(@NotNull CountType countType, boolean isCached) { int value = 0; switch (countType) { case TRAIL: value = isCached ? this.state.cachedNumOfTrail : this.state.numOfTrail; break; case SUCCESS: value = isCached ? this.state.cachedNumOfSuccess : this.state.numOfSuccess; break; case FAILURE: value = isCached ? this.state.cachedNumOfFailure : this.state.numOfFailure; break; default: value = isCached ? this.state.cachedNumOfIgnored : this.state.numOfIgnored; break; } return value; } synchronized public void rampUp(final @NotNull int maxStage) { lock.lock(); try { int currentRampStage = this.state.rampStage; this.state.rampStage = currentRampStage + 1; this.state.lastUpdatedTime = System.currentTimeMillis(); this.state.lastRampDownTime = 0; if (this.state.rampStage >= maxStage) { this.state.endTime = this.state.lastUpdatedTime; } this.state.isSynchronized = false; LOGGER.info("[Ramp Up] Sychronized Flag of ramp (id = {}) is set to False " + "after ramp up from stage {} to stage {} at {}.", this.id, currentRampStage, this.state.rampStage, this.state.lastUpdatedTime); } finally { lock.unlock(); } } synchronized public void rampDown() { lock.lock(); try { int currentStage = this.state.rampStage; this.state.rampStage = currentStage - 1; this.state.lastRampDownTime = System.currentTimeMillis(); this.state.isSynchronized = false; LOGGER.info("[Ramp Down] Sychronized Flag of ramp (id ={}) is set to False " + "after ramp down from stage {} to stage {} at {}.", this.id, currentStage, this.state.rampStage, this.state.lastRampDownTime); } finally { lock.unlock(); } } synchronized public void cacheResult(Action action) { lock.lock(); try { this.state.cachedNumOfTrail++; switch (action) { case SUCCEEDED: this.state.cachedNumOfSuccess++; break; case FAILED: this.state.cachedNumOfFailure++; break; default: this.state.cachedNumOfIgnored++; break; } this.state.lastUpdatedTime = System.currentTimeMillis(); // verify the failure threshold int trails = this.state.numOfTrail + this.state.cachedNumOfTrail; int fails = this.state.numOfFailure + this.state.cachedNumOfFailure; int failure = this.metadata.isPercentageScaleForMaxFailure ? (trails == 0) ? 100 : (int) ((fails * 100.0) / (trails * 1.0)) : fails; LOGGER.info( "[Ramp Cached Result] (id = {}, action: {}, {} failure: {}, numOfTrail ({}, {}), " + "numOfSuccess: ({}, {}), numOfFailure: ({}, {}), numOfIgnore: ({}, {}))", this.id, action.name(), this.metadata.isPercentageScaleForMaxFailure ? "Percentage" : " ", failure, this.state.numOfTrail, this.state.cachedNumOfTrail, this.state.numOfSuccess, this.state.cachedNumOfSuccess, this.state.numOfFailure, this.state.cachedNumOfFailure, this.state.numOfIgnored, this.state.cachedNumOfIgnored); if (this.metadata.maxFailureToRampDown != 0) { if (failure > this.metadata.maxFailureToRampDown) { if (this.state.rampStage > 0) { if (TimeUtils.timeEscapedOver(this.state.lastRampDownTime, ONE_DAY)) { int currentStage = this.state.rampStage; this.rampDown(); LOGGER.warn("[RAMP DOWN] (rampId = {}, failure = {}, threshold = {}, from stage {} to stage {}.)", this.getId(), failure, this.metadata.maxFailureToRampDown, currentStage, this.state.rampStage); } } } } if (this.metadata.maxFailureToPause != 0) { if (failure > this.metadata.maxFailureToPause) { this.state.isPaused = true; LOGGER.info("[RAMP STOP] (rampId = {}, failure = {}, threshold = {}, timestamp = {})", this.getId(), failure, this.metadata.maxFailureToPause, System.currentTimeMillis()); } } this.state.isSynchronized = false; LOGGER.info("[Ramping] Sychronized Flag of ramp (id = {}) is set to False at {} on stage {}.", this.id, this.state.lastUpdatedTime, this.state.rampStage); } finally { lock.unlock(); } } synchronized public void cacheSaved() { lock.lock(); try { int ttlTrail = this.state.numOfTrail + this.state.cachedNumOfTrail; int ttlSuccess = this.state.numOfSuccess + this.state.cachedNumOfSuccess; int ttlFailure = this.state.numOfFailure + this.state.cachedNumOfFailure; int ttlIgnored = this.state.numOfIgnored + this.state.cachedNumOfIgnored; this.state.numOfTrail = ttlTrail; this.state.numOfSuccess = ttlSuccess; this.state.numOfFailure = ttlFailure; this.state.numOfIgnored = ttlIgnored; this.state.cachedNumOfTrail = 0; this.state.cachedNumOfSuccess = 0; this.state.cachedNumOfFailure = 0; this.state.cachedNumOfIgnored = 0; this.state.isSynchronized = true; } finally { lock.unlock(); } } @Override public ExecutableRamp refresh(ExecutableRamp source) { lock.lock(); try { if (source.getId().equalsIgnoreCase(this.id)) { this.policy = source.policy; this.state.refresh(source.state); this.metadata.refresh(source.metadata); } } finally { lock.unlock(); } return this; } @Override public ExecutableRamp clone() { return new ExecutableRamp(this.id, this.policy, this.metadata.clone(), this.state.clone()); } @Override public int elementCount() { return 1; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampDependency.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.StringUtils; import java.util.Arrays; import java.util.HashSet; import java.util.Set; /** * Object of Executable Ramp Dependency */ public final class ExecutableRampDependency implements IRefreshable<ExecutableRampDependency> { private static String DELIMITED = ","; private volatile String defaultValue = null; private volatile Set<String> associatedJobTypes = null; private ExecutableRampDependency() { } public static ExecutableRampDependency createInstance() { return new ExecutableRampDependency(); } public String getDefaultValue() { return defaultValue; } public ExecutableRampDependency setDefaultValue(final String defaultValue) { this.defaultValue = StringUtils.isEmpty(defaultValue) ? null : defaultValue.trim(); return this; } public Set<String> getAssociatedJobTypes() { return associatedJobTypes; } public ExecutableRampDependency setAssociatedJobTypes(Set<String> associatedJobTypes) { this.associatedJobTypes = associatedJobTypes; return this; } public ExecutableRampDependency setAssociatedJobTypes(final String jobTypes) { this.associatedJobTypes = StringUtils.isEmpty(jobTypes) ? null : new HashSet<>(Arrays.asList(jobTypes.split(DELIMITED))); return this; } @Override public ExecutableRampDependency refresh(ExecutableRampDependency source) { this.defaultValue = source.getDefaultValue(); this.associatedJobTypes = source.getAssociatedJobTypes(); return this; } @Override public ExecutableRampDependency clone() { Set<String> clonedAssociatedJobTypes = new HashSet<>(); clonedAssociatedJobTypes.addAll(this.associatedJobTypes); return ExecutableRampDependency .createInstance() .setDefaultValue(this.getDefaultValue()) .setAssociatedJobTypes(clonedAssociatedJobTypes); } @Override public int elementCount() { return 1; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampDependencyMap.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import com.sun.istack.NotNull; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; /** * Map Object of Executable Ramp Dependency, Map.key = dependencyId */ public final class ExecutableRampDependencyMap extends BaseRefreshableMap<String, ExecutableRampDependency> { private ExecutableRampDependencyMap() { super(); } public static ExecutableRampDependencyMap createInstance() { return new ExecutableRampDependencyMap(); } /** * Add new dependency default setting * * @param dependency dependency * @param defaultValue default dependency value * @param jobTypes job types * @return this */ synchronized public ExecutableRampDependencyMap add(@NotNull final String dependency, final String defaultValue, final String jobTypes) { this.add( dependency, ExecutableRampDependency .createInstance() .setDefaultValue(defaultValue) .setAssociatedJobTypes(jobTypes) ); return this; } /** * Get Default Value * @param dependency dependency name * @return default dependency value */ public String getDefaultValue(@NotNull final String dependency) { return Optional.ofNullable(this.get(dependency)) .map(ExecutableRampDependency::getDefaultValue) .orElse(null); } /** * Get Map of default values by the given set of dependencies * @param dependencies dependencies * @return Map of Default Values */ public Map<String, String> getDefaultValues(@NotNull final Set<String> dependencies) { return dependencies.stream().collect(Collectors.toMap( dependency -> dependency, dependency -> getDefaultValue(dependency) )); } /** * Check if the dependency is associated with the particular job type * @param dependency dependency name * @param jobType jobtype name * @return true/false */ synchronized public boolean isValidJobType(@NotNull final String dependency, @NotNull final String jobType) { // If no specified job type associated, it means the ramp is valid for all job types return Optional.ofNullable(this.get(dependency)) .map(dp -> Optional.ofNullable(dp.getAssociatedJobTypes()) .map(set -> set.contains(jobType)) .orElse(true)) .orElse(false); } @Override public ExecutableRampDependencyMap clone() { return (ExecutableRampDependencyMap) super.clone(); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampExceptionalFlowItemsMap.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import com.sun.istack.NotNull; import java.util.Optional; /** * Map of Executable Ramp Exceptional Items at Flow Level, Map.key = rampId */ public final class ExecutableRampExceptionalFlowItemsMap extends BaseRefreshableMap<String, ExecutableRampExceptionalItems> { private ExecutableRampExceptionalFlowItemsMap() { super(); } public static ExecutableRampExceptionalFlowItemsMap createInstance() { return new ExecutableRampExceptionalFlowItemsMap(); } public ExecutableRampExceptionalFlowItemsMap add(@NotNull final String rampId, @NotNull final String flowId, @NotNull final ExecutableRampStatus treatment, final long timeStamp) { return add(rampId, flowId, treatment, timeStamp, false); } public ExecutableRampExceptionalFlowItemsMap add(@NotNull final String rampId, @NotNull final String flowId, @NotNull final ExecutableRampStatus treatment, final long timeStamp, boolean isCacheOnly) { if (this.containsKey(rampId)) { this.get(rampId).add(flowId, treatment, timeStamp, isCacheOnly); } else { this.put(rampId, ExecutableRampExceptionalItems.createInstance().add(flowId, treatment, timeStamp, isCacheOnly)); } return this; } public ExecutableRampExceptionalItems.RampRecord get(@NotNull final String rampId, @NotNull final String flowId) { return Optional.ofNullable(this.get(rampId)) .map(items -> items.getItems().get(flowId)) .orElse(null); } public boolean exists(@NotNull final String rampId, @NotNull final String flowId) { return Optional.ofNullable(this.get(rampId)) .map(items -> items.exists(flowId)) .orElse(false); } public ExecutableRampStatus check(@NotNull final String rampId, @NotNull final String flowId) { return Optional.ofNullable(this.get(rampId)) .map(table -> table.get(flowId)) .map(ExecutableRampExceptionalItems.RampRecord::getStatus) .orElse(ExecutableRampStatus.UNDETERMINED); } @Override public ExecutableRampExceptionalFlowItemsMap clone() { return (ExecutableRampExceptionalFlowItemsMap) super.clone(); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampExceptionalItems.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import com.sun.istack.NotNull; import java.util.HashSet; import java.util.Hashtable; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; /** * Object of Executable Ramp Exceptional Items */ public final class ExecutableRampExceptionalItems implements IRefreshable<ExecutableRampExceptionalItems> { private volatile Hashtable<String, RampRecord> items = new Hashtable<>(); private ExecutableRampExceptionalItems() { } public static ExecutableRampExceptionalItems createInstance() { return new ExecutableRampExceptionalItems(); } public Hashtable<String, RampRecord> getItems() { return items; } public ExecutableRampExceptionalItems setItems(Hashtable<String, RampRecord> items) { this.items = items; return this; } public RampRecord get(final String key) { return this.items.get(key); } public ExecutableRampStatus getStatus(final String key) { return Optional.ofNullable(get(key)).map(RampRecord::getStatus).orElse(ExecutableRampStatus.UNDETERMINED); } public boolean exists(final String key) { return this.items.containsKey(key); } public List<Map.Entry<String, RampRecord>> getCachedItems() { return this.getItems() .entrySet() .stream() .filter(item -> item.getValue().isCachedOnly()) .collect(Collectors.toList()); } public void resetCacheFlag() { this.getCachedItems().forEach(item -> item.getValue().resetCachedOnly()); } public ExecutableRampExceptionalItems add( @NotNull final String key, @NotNull final ExecutableRampStatus treatment, final long timeStamp) { return add(key, treatment, timeStamp, false); } public ExecutableRampExceptionalItems add( @NotNull final String key, @NotNull final ExecutableRampStatus treatment, final long timeStamp, boolean isCacheOnly) { this.items.put(key, RampRecord.createInstance(treatment, timeStamp, isCacheOnly)); return this; } @Override public ExecutableRampExceptionalItems refresh(ExecutableRampExceptionalItems source) { Set<String> mergedKeys = new HashSet(); mergedKeys.addAll(this.items.keySet()); mergedKeys.addAll(source.items.keySet()); mergedKeys.stream().forEach(key -> { if (this.items.containsKey(key)) { if (source.items.containsKey(key)) { this.items.put(key, source.items.get(key)); } else { this.items.remove(key); } } else { this.items.put(key, source.items.get(key)); } }); return this; } @Override public ExecutableRampExceptionalItems clone() { Hashtable<String, RampRecord> clonedItems = new Hashtable<>(); clonedItems.putAll(this.getItems()); return ExecutableRampExceptionalItems .createInstance() .setItems(clonedItems); } @Override public int elementCount() { return this.items.size(); } public static class RampRecord { private ExecutableRampStatus status; private long timeStamp; private boolean isCachedOnly = false; public RampRecord(ExecutableRampStatus status, long timeStamp, boolean isCachedOnly) { this.status = status; this.timeStamp = timeStamp; this.isCachedOnly = isCachedOnly; } public static RampRecord createInstance(ExecutableRampStatus status, long timeStamp) { RampRecord rampRecord = new RampRecord(status, timeStamp, false); return rampRecord; } public static RampRecord createInstance(ExecutableRampStatus status, long timeStamp, boolean isCachedOnly) { RampRecord rampRecord = new RampRecord(status, timeStamp, isCachedOnly); return rampRecord; } public RampRecord setCachedOnly() { this.isCachedOnly = true; return this; } public RampRecord resetCachedOnly() { this.isCachedOnly = false; return this; } public boolean isCachedOnly() { return this.isCachedOnly; } public ExecutableRampStatus getStatus() { return this.status; } public long getTimeStamp() { return this.timeStamp; } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampExceptionalJobItemsMap.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.Pair; import com.sun.istack.NotNull; import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; /** * Map of Executable Ramp Exceptional Items at Job Level, Map.key = Pair(rampId, flowId) */ public final class ExecutableRampExceptionalJobItemsMap extends BaseRefreshableMap<Pair<String, String>, ExecutableRampExceptionalItems> { private ExecutableRampExceptionalJobItemsMap() { super(); } public static ExecutableRampExceptionalJobItemsMap createInstance() { return new ExecutableRampExceptionalJobItemsMap(); } public void add(@NotNull final String rampId, @NotNull final String flowId, @NotNull final String jobId, @NotNull final ExecutableRampStatus treatment, final long timeStamp) { Pair<String, String> key = new Pair<>(rampId, flowId); if (this.containsKey(key)) { this.get(key).add(jobId, treatment, timeStamp); } else { this.put(key, ExecutableRampExceptionalItems.createInstance().add(jobId, treatment, timeStamp)); } } public ExecutableRampExceptionalItems get(@NotNull final String rampId, @NotNull final String flowId) { return this.get(new Pair<>(rampId, flowId)); } public ExecutableRampExceptionalItems.RampRecord get(@NotNull final String rampId, @NotNull final String flowId, @NotNull final String jobId) { return Optional.ofNullable(this.get(rampId, flowId)) .map(items -> items.getItems().get(jobId)) .orElse(null); } public boolean exists(@NotNull final String rampId, @NotNull final String flowId, @NotNull final String jobId) { return Optional.ofNullable(this.get(rampId, flowId)) .map(items -> items.exists(jobId)) .orElse(false); } public ExecutableRampStatus check(@NotNull final String rampId, @NotNull final String flowId, @NotNull final String jobId) { return Optional.ofNullable(this.get(rampId, flowId)) .map(exceptionalItems -> exceptionalItems.getStatus(jobId)) .orElse(ExecutableRampStatus.UNDETERMINED); } public Map<String, ExecutableRampExceptionalItems> getExceptionalJobItemsByFlow(@NotNull final String flowId) { return this.entrySet().stream() .filter(entitySet -> entitySet.getKey().getSecond().equalsIgnoreCase(flowId)) .collect(Collectors.toMap( items -> items.getKey().getFirst(), items -> items.getValue() )); } @Override public ExecutableRampExceptionalJobItemsMap clone() { return (ExecutableRampExceptionalJobItemsMap) super.clone(); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampItems.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.Props; import com.sun.istack.NotNull; import java.util.Optional; import java.util.Set; /** * Object of Executable Ramp Items */ public final class ExecutableRampItems implements IRefreshable<ExecutableRampItems> { public static String RAMP_SOURCE_NAME = "ramp"; private volatile Props rampItems; private ExecutableRampItems() { } public static ExecutableRampItems createInstance() { return new ExecutableRampItems() .setRampItems(new Props().setSource(RAMP_SOURCE_NAME)); } private ExecutableRampItems setRampItems(Props rampItems) { this.rampItems = rampItems; return this; } public Props getRampItems() { return rampItems; } public Set<String> getDependencies() { return this.rampItems.getKeySet(); } public ExecutableRampItems addRampItem(@NotNull final String dependency, @NotNull final String rampValue) { this.rampItems.put(dependency, rampValue); return this; } @Override public ExecutableRampItems refresh(ExecutableRampItems source) { rampItems = source.rampItems; return this; } @Override public ExecutableRampItems clone() { return ExecutableRampItems .createInstance() .setRampItems(Props.clone(this.rampItems)); } @Override public int elementCount() { return Optional.ofNullable(this.rampItems) .map(Props::size) .orElse(0); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampItemsMap.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.Props; import com.sun.istack.NotNull; import java.util.Collections; import java.util.Optional; import java.util.Set; /** * Map of Executable Ramp Items, Mak.key = rampId */ public final class ExecutableRampItemsMap extends BaseRefreshableMap<String, ExecutableRampItems> { private ExecutableRampItemsMap() { super(); } public static ExecutableRampItemsMap createInstance() { return new ExecutableRampItemsMap(); } public ExecutableRampItemsMap add(@NotNull final String rampId, @NotNull final String dependency, @NotNull final String rampValue) { ExecutableRampItems executableRampItems = this.getOrDefault(rampId, null); if (executableRampItems == null) { executableRampItems = ExecutableRampItems.createInstance(); this.put(rampId, executableRampItems); } executableRampItems.addRampItem(dependency, rampValue); return this; } public Props getRampItems(@NotNull final String rampId) { return Optional.ofNullable(this.get(rampId)) .map(ExecutableRampItems::getRampItems) .orElse(new Props()); } public Set<String> getDependencies(@NotNull final String rampId) { return Optional.ofNullable(this.get(rampId)) .map(ExecutableRampItems::getDependencies) .orElse(Collections.emptySet()); } @Override public ExecutableRampItemsMap clone() { return (ExecutableRampItemsMap) super.clone(); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampMap.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import java.util.Collection; import java.util.stream.Collectors; /** * Map of ExecutableRamp, Map.key = RampId */ public class ExecutableRampMap extends BaseRefreshableMap<String, ExecutableRamp> { public static ExecutableRampMap createInstance() { return new ExecutableRampMap(); } public Collection<ExecutableRamp> getActivatedAll() { return this.values().stream() .filter(ExecutableRamp::isActive) .collect(Collectors.toSet()); } public Collection<ExecutableRamp> getAll() { return this.values().stream().collect(Collectors.toSet()); } @Override public ExecutableRampMap clone() { return (ExecutableRampMap) super.clone(); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutableRampStatus.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; /** * Object to hold the status of the current executable Ramp */ public enum ExecutableRampStatus { // Identify the undetermined ramp status for the desired flow UNDETERMINED(""), // Identify the flow is qualified to ramp SELECTED("s"), // Identify the flow is still not ready to ramp due to the ramp policy checking UNSELECTED("u"), // This is a special Exceptional Status, Identify the flow/job will never be ramped regardless the ramp policy check BLACKLISTED("b"), // This is a special Exceptional Status, Identify the flow/job will always be ramped regardless the ramp policy check WHITELISTED("w"), // This is a special Exceptional Status, Identify the flow/job will be excluded from the ramp management // If the default Global Value has been set, the default Global Value will be applied, // otherwise, the customized dependency in workflow package will be used. EXCLUDED("x"); private final String key; ExecutableRampStatus(String key) { this.key = key; } public static ExecutableRampStatus of(String key) { if (key.equalsIgnoreCase(ExecutableRampStatus.SELECTED.getKey())) { return ExecutableRampStatus.SELECTED; } else if (key.equalsIgnoreCase(ExecutableRampStatus.UNSELECTED.getKey())) { return ExecutableRampStatus.UNSELECTED; } else if (key.equalsIgnoreCase(ExecutableRampStatus.BLACKLISTED.getKey())) { return ExecutableRampStatus.BLACKLISTED; } else if (key.equalsIgnoreCase(ExecutableRampStatus.WHITELISTED.getKey())) { return ExecutableRampStatus.WHITELISTED; } else if (key.equalsIgnoreCase(ExecutableRampStatus.EXCLUDED.getKey())) { return ExecutableRampStatus.EXCLUDED; } return ExecutableRampStatus.UNDETERMINED; } public String getKey() { return key; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionAttempt.java
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.TypedMapWrapper; import java.util.HashMap; import java.util.Map; public class ExecutionAttempt { public static final String ATTEMPT_PARAM = "attempt"; public static final String STATUS_PARAM = "status"; public static final String STARTTIME_PARAM = "startTime"; public static final String ENDTIME_PARAM = "endTime"; private final Status status; private int attempt = 0; private long startTime = -1; private long endTime = -1; public ExecutionAttempt(final int attempt, final ExecutableNode executable) { this.attempt = attempt; this.startTime = executable.getStartTime(); this.endTime = executable.getEndTime(); this.status = executable.getStatus(); } public ExecutionAttempt(final int attempt, final long startTime, final long endTime, final Status status) { this.attempt = attempt; this.startTime = startTime; this.endTime = endTime; this.status = status; } public static ExecutionAttempt fromObject(final Object obj) { final Map<String, Object> map = (Map<String, Object>) obj; final TypedMapWrapper<String, Object> wrapper = new TypedMapWrapper<>(map); final int attempt = wrapper.getInt(ATTEMPT_PARAM); final long startTime = wrapper.getLong(STARTTIME_PARAM); final long endTime = wrapper.getLong(ENDTIME_PARAM); final Status status = Status.valueOf(wrapper.getString(STATUS_PARAM)); return new ExecutionAttempt(attempt, startTime, endTime, status); } public long getStartTime() { return this.startTime; } public long getEndTime() { return this.endTime; } public Status getStatus() { return this.status; } public int getAttempt() { return this.attempt; } public Map<String, Object> toObject() { final HashMap<String, Object> attempts = new HashMap<>(); attempts.put(ATTEMPT_PARAM, this.attempt); attempts.put(STARTTIME_PARAM, this.startTime); attempts.put(ENDTIME_PARAM, this.endTime); attempts.put(STATUS_PARAM, this.status.toString()); return attempts; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionController.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the “License”); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an “AS IS” BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.Constants.ConfigurationKeys; import azkaban.event.EventHandler; import azkaban.flow.FlowUtils; import azkaban.metrics.CommonMetrics; import azkaban.project.Project; import azkaban.project.ProjectWhitelist; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.Pair; import azkaban.utils.Props; import java.io.IOException; import java.lang.Thread.State; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Controls flow executions on web server. This module implements the polling model * in the new AZ dispatching design. It's injected only when azkaban.poll.model is configured to * true. It will ultimately replace ExecutorManager in the future. */ @Singleton public class ExecutionController extends EventHandler implements ExecutorManagerAdapter { private static final Logger logger = LoggerFactory.getLogger(ExecutionController.class); private static final Duration RECENTLY_FINISHED_LIFETIME = Duration.ofMinutes(10); private final ExecutorLoader executorLoader; private final ExecutorApiGateway apiGateway; private final AlerterHolder alerterHolder; private final ExecutorHealthChecker executorHealthChecker; private final int maxConcurrentRunsOneFlow; private final Map<Pair<String, String>, Integer> maxConcurrentRunsPerFlowMap; private final CommonMetrics commonMetrics; private final Props azkProps; @Inject ExecutionController(final Props azkProps, final ExecutorLoader executorLoader, final CommonMetrics commonMetrics, final ExecutorApiGateway apiGateway, final AlerterHolder alerterHolder, final ExecutorHealthChecker executorHealthChecker) { this.azkProps = azkProps; this.executorLoader = executorLoader; this.commonMetrics = commonMetrics; this.apiGateway = apiGateway; this.alerterHolder = alerterHolder; this.executorHealthChecker = executorHealthChecker; this.maxConcurrentRunsOneFlow = ExecutorUtils.getMaxConcurrentRunsOneFlow(azkProps); this.maxConcurrentRunsPerFlowMap = ExecutorUtils.getMaxConcurentRunsPerFlowMap(azkProps); } @Override public void setupExecutors() throws ExecutorManagerException { // Todo: deprecate this method } @Override public void disableQueueProcessorThread() { // Todo: deprecate this method } @Override public void enableQueueProcessorThread() { // Todo: deprecate this method } @Override public State getExecutorManagerThreadState() { // Todo: deprecate this method return State.RUNNABLE; } @Override public boolean isExecutorManagerThreadActive() { // Todo: deprecate this method return true; } @Override public long getLastExecutorManagerThreadCheckTime() { // Todo: deprecate this method return 1L; } @Override public Collection<Executor> getAllActiveExecutors() { List<Executor> executors = new ArrayList<>(); try { executors = this.executorLoader.fetchActiveExecutors(); } catch (final ExecutorManagerException e) { logger.error("Failed to get all active executors.", e); } return executors; } @Override public Executor fetchExecutor(final int executorId) throws ExecutorManagerException { return this.executorLoader.fetchExecutor(executorId); } @Override public Set<String> getPrimaryServerHosts() { final HashSet<String> ports = new HashSet<>(); try { for (final Executor executor : this.executorLoader.fetchActiveExecutors()) { ports.add(executor.getHost() + ":" + executor.getPort()); } } catch (final ExecutorManagerException e) { logger.error("Failed to get primary server hosts.", e); } return ports; } @Override public Set<String> getAllActiveExecutorServerHosts() { final Set<String> ports = getPrimaryServerHosts(); // include executor which were initially active and still has flows running try { for (final Pair<ExecutionReference, ExecutableFlow> running : this.executorLoader .fetchActiveFlows().values()) { final ExecutionReference ref = running.getFirst(); if (ref.getExecutor().isPresent()) { final Executor executor = ref.getExecutor().get(); ports.add(executor.getHost() + ":" + executor.getPort()); } } } catch (final ExecutorManagerException e) { logger.error("Failed to get all active executor server hosts.", e); } return ports; } /** * Gets a list of all the unfinished (both dispatched and non-dispatched) executions for a * given project and flow {@inheritDoc}. * * @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows(int, java.lang.String) */ @Override public List<Integer> getRunningFlows(final int projectId, final String flowId) { final List<Integer> executionIds = new ArrayList<>(); try { executionIds.addAll(getRunningFlowsHelper(projectId, flowId, this.executorLoader.fetchUnfinishedFlows().values())); } catch (final ExecutorManagerException e) { logger.error("Failed to get running flows for project " + projectId + ", flow " + flowId, e); } return executionIds; } /* Helper method for getRunningFlows */ private List<Integer> getRunningFlowsHelper(final int projectId, final String flowId, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { final List<Integer> executionIds = new ArrayList<>(); for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) { if (ref.getSecond().getFlowId().equals(flowId) && ref.getSecond().getProjectId() == projectId) { executionIds.add(ref.getFirst().getExecId()); } } return executionIds; } @Override public List<Pair<ExecutableFlow, Optional<Executor>>> getActiveFlowsWithExecutor() throws IOException { final List<Pair<ExecutableFlow, Optional<Executor>>> flows = new ArrayList<>(); try { getActiveFlowsWithExecutorHelper(flows, this.executorLoader.fetchUnfinishedFlows().values()); } catch (final ExecutorManagerException e) { logger.error("Failed to get active flows with executor.", e); } return flows; } /* Helper method for getActiveFlowsWithExecutor */ private void getActiveFlowsWithExecutorHelper( final List<Pair<ExecutableFlow, Optional<Executor>>> flows, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) { flows.add(new Pair<>(ref.getSecond(), ref .getFirst().getExecutor())); } } /** * Checks whether the given flow has an active (running, non-dispatched) execution from * database. {@inheritDoc} */ @Override public boolean isFlowRunning(final int projectId, final String flowId) { boolean isRunning = false; try { isRunning = isFlowRunningHelper(projectId, flowId, this.executorLoader.fetchUnfinishedFlows().values()); } catch (final ExecutorManagerException e) { logger.error( "Failed to check if the flow is running for project " + projectId + ", flow " + flowId, e); } return isRunning; } /* Search a running flow in a collection */ private boolean isFlowRunningHelper(final int projectId, final String flowId, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) { if (ref.getSecond().getProjectId() == projectId && ref.getSecond().getFlowId().equals(flowId)) { return true; } } return false; } /** * Fetch ExecutableFlow from database. {@inheritDoc} */ @Override public ExecutableFlow getExecutableFlow(final int execId) throws ExecutorManagerException { return this.executorLoader.fetchExecutableFlow(execId); } /** * Get all running (unfinished) flows from database. {@inheritDoc} */ @Override public List<ExecutableFlow> getRunningFlows() { final ArrayList<ExecutableFlow> flows = new ArrayList<>(); try { getFlowsHelper(flows, this.executorLoader.fetchUnfinishedFlows().values()); } catch (final ExecutorManagerException e) { logger.error("Failed to get running flows.", e); } return flows; } /** * Helper method to get all flows from collection. */ private void getFlowsHelper(final ArrayList<ExecutableFlow> flows, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { collection.stream().forEach(ref -> flows.add(ref.getSecond())); } /** * Get execution ids of all running (unfinished) flows from database. */ public List<Integer> getRunningFlowIds() { final List<Integer> allIds = new ArrayList<>(); try { getExecutionIdsHelper(allIds, this.executorLoader.fetchUnfinishedFlows().values()); } catch (final ExecutorManagerException e) { this.logger.error("Failed to get running flow ids.", e); } return allIds; } /** * Get execution ids of all non-dispatched flows from database. */ public List<Integer> getQueuedFlowIds() { final List<Integer> allIds = new ArrayList<>(); try { getExecutionIdsHelper(allIds, this.executorLoader.fetchQueuedFlows()); } catch (final ExecutorManagerException e) { this.logger.error("Failed to get queued flow ids.", e); } return allIds; } /* Helper method to get all execution ids from collection in sorted order. */ private void getExecutionIdsHelper(final List<Integer> allIds, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { collection.stream().forEach(ref -> allIds.add(ref.getSecond().getExecutionId())); Collections.sort(allIds); } /** * Get the number of non-dispatched flows from database. {@inheritDoc} */ @Override public long getQueuedFlowSize() { long size = 0L; try { size = this.executorLoader.fetchQueuedFlows().size(); } catch (final ExecutorManagerException e) { this.logger.error("Failed to get queued flow size.", e); } return size; } @Override public List<ExecutableFlow> getRecentlyFinishedFlows() { List<ExecutableFlow> flows = new ArrayList<>(); try { flows = this.executorLoader.fetchRecentlyFinishedFlows( RECENTLY_FINISHED_LIFETIME); } catch (final ExecutorManagerException e) { logger.error("Failed to fetch recently finished flows.", e); } return flows; } @Override public List<ExecutableFlow> getExecutableFlows(final int skip, final int size) throws ExecutorManagerException { final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(skip, size); return flows; } @Override public List<ExecutableFlow> getExecutableFlows(final String flowIdContains, final int skip, final int size) throws ExecutorManagerException { final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(null, '%' + flowIdContains + '%', null, 0, -1, -1, skip, size); return flows; } @Override public List<ExecutableFlow> getExecutableFlows(final String projContain, final String flowContain, final String userContain, final int status, final long begin, final long end, final int skip, final int size) throws ExecutorManagerException { final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(projContain, flowContain, userContain, status, begin, end, skip, size); return flows; } @Override public List<ExecutableJobInfo> getExecutableJobs(final Project project, final String jobId, final int skip, final int size) throws ExecutorManagerException { final List<ExecutableJobInfo> nodes = this.executorLoader.fetchJobHistory(project.getId(), jobId, skip, size); return nodes; } @Override public int getNumberOfJobExecutions(final Project project, final String jobId) throws ExecutorManagerException { return this.executorLoader.fetchNumExecutableNodes(project.getId(), jobId); } @Override public LogData getExecutableFlowLog(final ExecutableFlow exFlow, final int offset, final int length) throws ExecutorManagerException { final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader .fetchActiveFlowByExecId(exFlow.getExecutionId()); if (pair != null) { final Pair<String, String> typeParam = new Pair<>("type", "flow"); final Pair<String, String> offsetParam = new Pair<>("offset", String.valueOf(offset)); final Pair<String, String> lengthParam = new Pair<>("length", String.valueOf(length)); @SuppressWarnings("unchecked") final Map<String, Object> result = this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.LOG_ACTION, typeParam, offsetParam, lengthParam); return LogData.createLogDataFromObject(result); } else { final LogData value = this.executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset, length); return value; } } @Override public LogData getExecutionJobLog(final ExecutableFlow exFlow, final String jobId, final int offset, final int length, final int attempt) throws ExecutorManagerException { final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader .fetchActiveFlowByExecId(exFlow.getExecutionId()); if (pair != null) { final Pair<String, String> typeParam = new Pair<>("type", "job"); final Pair<String, String> jobIdParam = new Pair<>("jobId", jobId); final Pair<String, String> offsetParam = new Pair<>("offset", String.valueOf(offset)); final Pair<String, String> lengthParam = new Pair<>("length", String.valueOf(length)); final Pair<String, String> attemptParam = new Pair<>("attempt", String.valueOf(attempt)); @SuppressWarnings("unchecked") final Map<String, Object> result = this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.LOG_ACTION, typeParam, jobIdParam, offsetParam, lengthParam, attemptParam); return LogData.createLogDataFromObject(result); } else { final LogData value = this.executorLoader.fetchLogs(exFlow.getExecutionId(), jobId, attempt, offset, length); return value; } } @Override public List<Object> getExecutionJobStats(final ExecutableFlow exFlow, final String jobId, final int attempt) throws ExecutorManagerException { final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId()); if (pair == null) { return this.executorLoader.fetchAttachments(exFlow.getExecutionId(), jobId, attempt); } final Pair<String, String> jobIdParam = new Pair<>("jobId", jobId); final Pair<String, String> attemptParam = new Pair<>("attempt", String.valueOf(attempt)); @SuppressWarnings("unchecked") final Map<String, Object> result = this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.ATTACHMENTS_ACTION, jobIdParam, attemptParam); @SuppressWarnings("unchecked") final List<Object> jobStats = (List<Object>) result .get("attachments"); return jobStats; } /** * If the Resource Manager and Job History server urls are configured, find all the * Hadoop/Spark application ids present in the Azkaban job's log and then construct the url to * job logs in the Hadoop/Spark server for each application id found. Application ids are * returned in the order they appear in the Azkaban job log. * * @param exFlow The executable flow. * @param jobId The job id. * @param attempt The job execution attempt. * @return The map of (application id, job log url) */ @Override public Map<String, String> getExternalJobLogUrls(final ExecutableFlow exFlow, final String jobId, final int attempt) { final Map<String, String> jobLogUrlsByAppId = new LinkedHashMap<>(); if (!this.azkProps.containsKey(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL) || !this.azkProps.containsKey(ConfigurationKeys.HISTORY_SERVER_JOB_URL) || !this.azkProps.containsKey(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL)) { return jobLogUrlsByAppId; } final Set<String> applicationIds = getApplicationIds(exFlow, jobId, attempt); for (final String applicationId : applicationIds) { final String jobLogUrl = ExecutionControllerUtils .createJobLinkUrl(exFlow, jobId, applicationId, this.azkProps); if (jobLogUrl != null) { jobLogUrlsByAppId.put(applicationId, jobLogUrl); } } return jobLogUrlsByAppId; } /** * Find all the Hadoop/Spark application ids present in the Azkaban job log. When iterating * over the set returned by this method the application ids are in the same order they appear * in the log. * * @param exFlow The executable flow. * @param jobId The job id. * @param attempt The job execution attempt. * @return The application ids found. */ Set<String> getApplicationIds(final ExecutableFlow exFlow, final String jobId, final int attempt) { final Set<String> applicationIds = new LinkedHashSet<>(); int offset = 0; try { LogData data = getExecutionJobLog(exFlow, jobId, offset, 50000, attempt); while (data != null && data.getLength() > 0) { this.logger.info("Get application ID for execution " + exFlow.getExecutionId() + ", job" + " " + jobId + ", attempt " + attempt + ", data offset " + offset); String logData = data.getData(); final int indexOfLastSpace = logData.lastIndexOf(' '); final int indexOfLastTab = logData.lastIndexOf('\t'); final int indexOfLastEoL = logData.lastIndexOf('\n'); final int indexOfLastDelim = Math .max(indexOfLastEoL, Math.max(indexOfLastSpace, indexOfLastTab)); if (indexOfLastDelim > -1) { // index + 1 to avoid looping forever if indexOfLastDelim is zero logData = logData.substring(0, indexOfLastDelim + 1); } applicationIds.addAll(ExecutionControllerUtils.findApplicationIdsFromLog(logData)); offset = data.getOffset() + logData.length(); data = getExecutionJobLog(exFlow, jobId, offset, 50000, attempt); } } catch (final ExecutorManagerException e) { this.logger.error("Failed to get application ID for execution " + exFlow.getExecutionId() + ", job " + jobId + ", attempt " + attempt + ", data offset " + offset, e); } return applicationIds; } /** * If a flow is already dispatched to an executor, cancel by calling Executor. Else if it's still * queued in DB, remove it from DB queue and finalize. {@inheritDoc} */ @Override public void cancelFlow(final ExecutableFlow exFlow, final String userId) throws ExecutorManagerException { synchronized (exFlow) { final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> unfinishedFlows = this.executorLoader .fetchUnfinishedFlows(); if (unfinishedFlows.containsKey(exFlow.getExecutionId())) { final Pair<ExecutionReference, ExecutableFlow> pair = unfinishedFlows .get(exFlow.getExecutionId()); if (pair.getFirst().getExecutor().isPresent()) { // Flow is already dispatched to an executor, so call that executor to cancel the flow. this.apiGateway .callWithReferenceByUser(pair.getFirst(), ConnectorParams.CANCEL_ACTION, userId); } else { // Flow is still queued, need to finalize it and update the status in DB. ExecutionControllerUtils.finalizeFlow(this.executorLoader, this.alerterHolder, exFlow, "Cancelled before dispatching to executor", null); } } else { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } } } @Override public void resumeFlow(final ExecutableFlow exFlow, final String userId) throws ExecutorManagerException { synchronized (exFlow) { final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } this.apiGateway .callWithReferenceByUser(pair.getFirst(), ConnectorParams.RESUME_ACTION, userId); } } @Override public void pauseFlow(final ExecutableFlow exFlow, final String userId) throws ExecutorManagerException { synchronized (exFlow) { final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } this.apiGateway .callWithReferenceByUser(pair.getFirst(), ConnectorParams.PAUSE_ACTION, userId); } } @Override public void retryFailures(final ExecutableFlow exFlow, final String userId) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_FAILURES, userId); } @SuppressWarnings("unchecked") private Map<String, Object> modifyExecutingJobs(final ExecutableFlow exFlow, final String command, final String userId, final String... jobIds) throws ExecutorManagerException { synchronized (exFlow) { final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } final Map<String, Object> response; if (jobIds != null && jobIds.length > 0) { for (final String jobId : jobIds) { if (!jobId.isEmpty()) { final ExecutableNode node = exFlow.getExecutableNode(jobId); if (node == null) { throw new ExecutorManagerException("Job " + jobId + " doesn't exist in execution " + exFlow.getExecutionId() + "."); } } } final String ids = StringUtils.join(jobIds, ','); response = this.apiGateway.callWithReferenceByUser(pair.getFirst(), ConnectorParams.MODIFY_EXECUTION_ACTION, userId, new Pair<>( ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command), new Pair<>(ConnectorParams.MODIFY_JOBS_LIST, ids)); } else { response = this.apiGateway.callWithReferenceByUser(pair.getFirst(), ConnectorParams.MODIFY_EXECUTION_ACTION, userId, new Pair<>( ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command)); } return response; } } @Override public Map<String, String> doRampActions(List<Map<String, Object>> rampActions) throws ExecutorManagerException { return this.executorLoader.doRampActions(rampActions); } /** * When a flow is submitted, insert a new execution into the database queue. {@inheritDoc} */ @Override public String submitExecutableFlow(final ExecutableFlow exflow, final String userId) throws ExecutorManagerException { if (exflow.isLocked()) { // Skip execution for locked flows. final String message = String.format("Flow %s for project %s is locked.", exflow.getId(), exflow.getProjectName()); logger.info(message); return message; } final String exFlowKey = exflow.getProjectName() + "." + exflow.getId() + ".submitFlow"; // Use project and flow name to prevent race condition when same flow is submitted by API and // schedule at the same time // causing two same flow submission entering this piece. synchronized (exFlowKey.intern()) { final String flowId = exflow.getFlowId(); logger.info("Submitting execution flow " + flowId + " by " + userId); String message = ""; final int projectId = exflow.getProjectId(); exflow.setSubmitUser(userId); exflow.setStatus(Status.PREPARING); exflow.setSubmitTime(System.currentTimeMillis()); final List<Integer> running = getRunningFlows(projectId, flowId); ExecutionOptions options = exflow.getExecutionOptions(); if (options == null) { options = new ExecutionOptions(); } if (options.getDisabledJobs() != null) { FlowUtils.applyDisabledJobs(options.getDisabledJobs(), exflow); } if (!running.isEmpty()) { final int maxConcurrentRuns = ExecutorUtils.getMaxConcurrentRunsForFlow( exflow.getProjectName(), flowId, this.maxConcurrentRunsOneFlow, this.maxConcurrentRunsPerFlowMap); if (running.size() > maxConcurrentRuns) { this.commonMetrics.markSubmitFlowSkip(); throw new ExecutorManagerException("Flow " + flowId + " has more than " + maxConcurrentRuns + " concurrent runs. Skipping", ExecutorManagerException.Reason.SkippedExecution); } else if (options.getConcurrentOption().equals( ExecutionOptions.CONCURRENT_OPTION_PIPELINE)) { Collections.sort(running); final Integer runningExecId = running.get(running.size() - 1); options.setPipelineExecutionId(runningExecId); message = "Flow " + flowId + " is already running with exec id " + runningExecId + ". Pipelining level " + options.getPipelineLevel() + ". \n"; } else if (options.getConcurrentOption().equals( ExecutionOptions.CONCURRENT_OPTION_SKIP)) { this.commonMetrics.markSubmitFlowSkip(); throw new ExecutorManagerException("Flow " + flowId + " is already running. Skipping execution.", ExecutorManagerException.Reason.SkippedExecution); } else { message = "Flow " + flowId + " is already running with exec id " + StringUtils.join(running, ",") + ". Will execute concurrently. \n"; } } final boolean memoryCheck = !ProjectWhitelist.isProjectWhitelisted(exflow.getProjectId(), ProjectWhitelist.WhitelistType.MemoryCheck); options.setMemoryCheck(memoryCheck); // The exflow id is set by the loader. So it's unavailable until after // this call. this.executorLoader.uploadExecutableFlow(exflow); this.commonMetrics.markSubmitFlowSuccess(); message += "Execution queued successfully with exec id " + exflow.getExecutionId(); return message; } } @Override public Map<String, Object> callExecutorStats(final int executorId, final String action, final Pair<String, String>... params) throws IOException, ExecutorManagerException { final Executor executor = fetchExecutor(executorId); final List<Pair<String, String>> paramList = new ArrayList<>(); if (params != null) { paramList.addAll(Arrays.asList(params)); } paramList.add(new Pair<>(ConnectorParams.ACTION_PARAM, action)); return this.apiGateway.callForJsonObjectMap(executor.getHost(), executor.getPort(), "/stats", paramList); } @Override public Map<String, Object> callExecutorJMX(final String hostPort, final String action, final String mBean) throws IOException { final List<Pair<String, String>> paramList = new ArrayList<>(); paramList.add(new Pair<>(action, "")); if (mBean != null) { paramList.add(new Pair<>(ConnectorParams.JMX_MBEAN, mBean)); } final String[] hostPortSplit = hostPort.split(":"); return this.apiGateway.callForJsonObjectMap(hostPortSplit[0], Integer.valueOf(hostPortSplit[1]), "/jmx", paramList); } @Override public void start() { this.executorHealthChecker.start(); } @Override public void shutdown() { this.executorHealthChecker.shutdown(); } @Override public int getExecutableFlows(final int projectId, final String flowId, final int from, final int length, final List<ExecutableFlow> outputList) throws ExecutorManagerException { final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(projectId, flowId, from, length); outputList.addAll(flows); return this.executorLoader.fetchNumExecutableFlows(projectId, flowId); } @Override public List<ExecutableFlow> getExecutableFlows(final int projectId, final String flowId, final int from, final int length, final Status status) throws ExecutorManagerException { return this.executorLoader.fetchFlowHistory(projectId, flowId, from, length, status); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionControllerUtils.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import static java.util.Objects.requireNonNull; import azkaban.Constants.ConfigurationKeys; import azkaban.alert.Alerter; import azkaban.utils.AuthenticationUtils; import azkaban.utils.Props; import java.io.BufferedReader; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.annotation.Nullable; import org.apache.commons.lang.exception.ExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Utils for controlling executions. */ public class ExecutionControllerUtils { private static final Logger logger = LoggerFactory.getLogger( ExecutionControllerUtils.class); private static final String SPARK_JOB_TYPE = "spark"; private static final String APPLICATION_ID = "${application.id}"; // The regex to look for while fetching application ID from the Hadoop/Spark job log private static final Pattern APPLICATION_ID_PATTERN = Pattern.compile("application_(\\d+_\\d+)"); // The regex to look for while validating the content from RM job link private static final Pattern FAILED_TO_READ_APPLICATION_PATTERN = Pattern .compile("Failed to read the application"); private static final Pattern INVALID_APPLICATION_ID_PATTERN = Pattern .compile("Invalid Application ID"); /** * If the current status of the execution is not one of the finished statuses, mark the execution * as failed in the DB. * * @param executorLoader the executor loader * @param alerterHolder the alerter holder * @param flow the execution * @param reason reason for finalizing the execution * @param originalError the cause, if execution is being finalized because of an error */ public static void finalizeFlow(final ExecutorLoader executorLoader, final AlerterHolder alerterHolder, final ExecutableFlow flow, final String reason, @Nullable final Throwable originalError) { boolean alertUser = true; // First check if the execution in the datastore is finished. try { final ExecutableFlow dsFlow; if (isFinished(flow)) { dsFlow = flow; } else { dsFlow = executorLoader.fetchExecutableFlow(flow.getExecutionId()); // If it's marked finished, we're good. If not, we fail everything and then mark it // finished. if (!isFinished(dsFlow)) { failEverything(dsFlow); executorLoader.updateExecutableFlow(dsFlow); } } if (flow.getEndTime() == -1) { flow.setEndTime(System.currentTimeMillis()); executorLoader.updateExecutableFlow(dsFlow); } } catch (final ExecutorManagerException e) { // If failed due to azkaban internal error, do not alert user. alertUser = false; logger.error("Failed to finalize flow " + flow.getExecutionId() + ", do not alert user.", e); } if (alertUser) { alertUserOnFlowFinished(flow, alerterHolder, getFinalizeFlowReasons(reason, originalError)); } } /** * When a flow is finished, alert the user as is configured in the execution options. * * @param flow the execution * @param alerterHolder the alerter holder * @param extraReasons the extra reasons for alerting */ public static void alertUserOnFlowFinished(final ExecutableFlow flow, final AlerterHolder alerterHolder, final String[] extraReasons) { final ExecutionOptions options = flow.getExecutionOptions(); final Alerter mailAlerter = alerterHolder.get("email"); if (flow.getStatus() != Status.SUCCEEDED) { if (options.getFailureEmails() != null && !options.getFailureEmails().isEmpty()) { try { mailAlerter.alertOnError(flow, extraReasons); } catch (final Exception e) { logger.error("Failed to alert on error for execution " + flow.getExecutionId(), e); } } if (options.getFlowParameters().containsKey("alert.type")) { final String alertType = options.getFlowParameters().get("alert.type"); final Alerter alerter = alerterHolder.get(alertType); if (alerter != null) { try { alerter.alertOnError(flow, extraReasons); } catch (final Exception e) { logger.error("Failed to alert on error by " + alertType + " for execution " + flow .getExecutionId(), e); } } else { logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert."); } } } else { if (options.getSuccessEmails() != null && !options.getSuccessEmails().isEmpty()) { try { mailAlerter.alertOnSuccess(flow); } catch (final Exception e) { logger.error("Failed to alert on success for execution " + flow.getExecutionId(), e); } } if (options.getFlowParameters().containsKey("alert.type")) { final String alertType = options.getFlowParameters().get("alert.type"); final Alerter alerter = alerterHolder.get(alertType); if (alerter != null) { try { alerter.alertOnSuccess(flow); } catch (final Exception e) { logger.error("Failed to alert on success by " + alertType + " for execution " + flow .getExecutionId(), e); } } else { logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert."); } } } } /** * Alert the user when the flow has encountered the first error. * * @param flow the execution * @param alerterHolder the alerter holder */ public static void alertUserOnFirstError(final ExecutableFlow flow, final AlerterHolder alerterHolder) { final ExecutionOptions options = flow.getExecutionOptions(); if (options.getNotifyOnFirstFailure()) { logger.info("Alert on first error of execution " + flow.getExecutionId()); final Alerter mailAlerter = alerterHolder.get("email"); try { mailAlerter.alertOnFirstError(flow); } catch (final Exception e) { logger.error("Failed to send first error email." + e.getMessage(), e); } if (options.getFlowParameters().containsKey("alert.type")) { final String alertType = options.getFlowParameters().get("alert.type"); final Alerter alerter = alerterHolder.get(alertType); if (alerter != null) { try { alerter.alertOnFirstError(flow); } catch (final Exception e) { logger.error("Failed to alert by " + alertType, e); } } else { logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert."); } } } } /** * Get the reasons to finalize the flow. * * @param reason the reason * @param originalError the original error * @return the reasons to finalize the flow */ public static String[] getFinalizeFlowReasons(final String reason, final Throwable originalError) { final List<String> reasons = new LinkedList<>(); reasons.add(reason); if (originalError != null) { reasons.add(ExceptionUtils.getStackTrace(originalError)); } return reasons.toArray(new String[reasons.size()]); } /** * Set the flow status to failed and fail every node inside the flow. * * @param exFlow the executable flow */ public static void failEverything(final ExecutableFlow exFlow) { final long time = System.currentTimeMillis(); for (final ExecutableNode node : exFlow.getExecutableNodes()) { switch (node.getStatus()) { case SUCCEEDED: case FAILED: case KILLED: case SKIPPED: case DISABLED: continue; // case UNKNOWN: case READY: node.setStatus(Status.KILLING); break; default: node.setStatus(Status.FAILED); break; } if (node.getStartTime() == -1) { node.setStartTime(time); } if (node.getEndTime() == -1) { node.setEndTime(time); } } if (exFlow.getEndTime() == -1) { exFlow.setEndTime(time); } exFlow.setStatus(Status.FAILED); } /** * Check if the flow status is finished. * * @param flow the executable flow * @return the boolean */ public static boolean isFinished(final ExecutableFlow flow) { switch (flow.getStatus()) { case SUCCEEDED: case FAILED: case KILLED: return true; default: return false; } } /** * Dynamically create the job link url. Construct the job link url from resource manager url. * If it's valid, just return the job link url. Otherwise, construct the job link url from * Hadoop/Spark job history server. * * @param exFlow The executable flow. * @param jobId The job id. * @param applicationId The application id. * @param azkProps The azkaban props. * @return the job link url. */ public static String createJobLinkUrl(final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps) { if (applicationId == null) { return null; } final URL url; final String jobLinkUrl; boolean isRMJobLinkValid = true; try { url = new URL(azkProps.getString(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL) .replace(APPLICATION_ID, applicationId)); final String keytabPrincipal = requireNonNull( azkProps.getString(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL)); final String keytabPath = requireNonNull(azkProps.getString(ConfigurationKeys .AZKABAN_KEYTAB_PATH)); final HttpURLConnection connection = AuthenticationUtils.loginAuthenticatedURL(url, keytabPrincipal, keytabPath); try (final BufferedReader in = new BufferedReader( new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8))) { String inputLine; while ((inputLine = in.readLine()) != null) { if (FAILED_TO_READ_APPLICATION_PATTERN.matcher(inputLine).find()) { logger.info("RM job link has expired for application_" + applicationId); isRMJobLinkValid = false; break; } if (INVALID_APPLICATION_ID_PATTERN.matcher(inputLine).find()) { logger.info("Invalid application id application_" + applicationId); return null; } } } } catch (final Exception e) { logger.error("Failed to get job link for application_" + applicationId, e); return null; } if (isRMJobLinkValid) { jobLinkUrl = url.toString(); } else { // If RM job url has expired, build the url to the JHS or SHS instead. final ExecutableNode node = exFlow.getExecutableNodePath(jobId); if (node == null) { logger.error( "Failed to create job url. Job " + jobId + " doesn't exist in " + exFlow .getExecutionId()); return null; } if (node.getType().equals(SPARK_JOB_TYPE)) { jobLinkUrl = azkProps.get(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL) .replace(APPLICATION_ID, applicationId); } else { jobLinkUrl = azkProps.get(ConfigurationKeys.HISTORY_SERVER_JOB_URL) .replace(APPLICATION_ID, applicationId); } } logger.info("Job link url is " + jobLinkUrl + " for execution " + exFlow.getExecutionId() + ", job " + jobId); return jobLinkUrl; } /** * Find all the application ids the job log data contains by matching "application_<id>" pattern. * Application ids are returned in the order they appear. * * @param logData The log data. * @return The set of application ids found. */ public static Set<String> findApplicationIdsFromLog(final String logData) { final Set<String> applicationIds = new LinkedHashSet<>(); final Matcher matcher = APPLICATION_ID_PATTERN.matcher(logData); while (matcher.find()) { final String appId = matcher.group(1); applicationIds.add(appId); } logger.info("Application Ids found: " + applicationIds.toString()); return applicationIds; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionFinalizer.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import javax.annotation.Nullable; import javax.inject.Inject; import org.apache.log4j.Logger; /** * Handles removing of running executions (after they have been deemed to be be done or orphaned). */ public class ExecutionFinalizer { private static final Logger logger = Logger.getLogger(ExecutionFinalizer.class); private final ExecutorLoader executorLoader; private final ExecutorManagerUpdaterStage updaterStage; private final AlerterHolder alerterHolder; private final RunningExecutions runningExecutions; @Inject public ExecutionFinalizer(final ExecutorLoader executorLoader, final ExecutorManagerUpdaterStage updaterStage, final AlerterHolder alerterHolder, final RunningExecutions runningExecutions) { this.executorLoader = executorLoader; this.updaterStage = updaterStage; this.alerterHolder = alerterHolder; this.runningExecutions = runningExecutions; } /** * If the current status of the execution is not one of the finished statuses, marks the execution * as failed in the DB. Removes the execution from the running executions cache. * * @param flow the execution * @param reason reason for finalizing the execution * @param originalError the cause, if execution is being finalized because of an error */ public void finalizeFlow(final ExecutableFlow flow, final String reason, @Nullable final Throwable originalError) { final int execId = flow.getExecutionId(); boolean alertUser = true; this.updaterStage.set("finalizing flow " + execId); // First we check if the execution in the datastore is complete try { final ExecutableFlow dsFlow; if (ExecutionControllerUtils.isFinished(flow)) { dsFlow = flow; } else { this.updaterStage.set("finalizing flow " + execId + " loading from db"); dsFlow = this.executorLoader.fetchExecutableFlow(execId); // If it's marked finished, we're good. If not, we fail everything and // then mark it finished. if (!ExecutionControllerUtils.isFinished(dsFlow)) { this.updaterStage.set("finalizing flow " + execId + " failing the flow"); ExecutionControllerUtils.failEverything(dsFlow); this.executorLoader.updateExecutableFlow(dsFlow); } } this.updaterStage.set("finalizing flow " + execId + " deleting active reference"); // Delete the executing reference. if (flow.getEndTime() == -1) { flow.setEndTime(System.currentTimeMillis()); this.executorLoader.updateExecutableFlow(dsFlow); } this.executorLoader.removeActiveExecutableReference(execId); this.updaterStage.set("finalizing flow " + execId + " cleaning from memory"); this.runningExecutions.get().remove(execId); } catch (final ExecutorManagerException e) { alertUser = false; // failed due to azkaban internal error, not to alert user logger.error(e); } // TODO append to the flow log that we marked this flow as failed + the extraReasons this.updaterStage.set("finalizing flow " + execId + " alerting and emailing"); if (alertUser) { ExecutionControllerUtils.alertUserOnFlowFinished(flow, this.alerterHolder, ExecutionControllerUtils.getFinalizeFlowReasons(reason, originalError)); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionFlowDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import azkaban.db.EncodingType; import azkaban.db.SQLTransaction; import azkaban.utils.GZIPUtils; import azkaban.utils.JSONUtils; import azkaban.utils.Pair; import java.io.IOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.List; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.log4j.Logger; @Singleton public class ExecutionFlowDao { private static final Logger logger = Logger.getLogger(ExecutionFlowDao.class); private final DatabaseOperator dbOperator; private final MysqlNamedLock mysqlNamedLock; @Inject public ExecutionFlowDao(final DatabaseOperator dbOperator, final MysqlNamedLock mysqlNamedLock) { this.dbOperator = dbOperator; this.mysqlNamedLock = mysqlNamedLock; } public void uploadExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { final String useExecutorParam = flow.getExecutionOptions().getFlowParameters().get(ExecutionOptions.USE_EXECUTOR); final String executorId = StringUtils.isNotEmpty(useExecutorParam) ? useExecutorParam : null; final String flowPriorityParam = flow.getExecutionOptions().getFlowParameters().get(ExecutionOptions.FLOW_PRIORITY); final int flowPriority = StringUtils.isNotEmpty(flowPriorityParam) ? Integer.parseInt(flowPriorityParam) : ExecutionOptions.DEFAULT_FLOW_PRIORITY; final String INSERT_EXECUTABLE_FLOW = "INSERT INTO execution_flows " + "(project_id, flow_id, version, status, submit_time, submit_user, update_time, " + "use_executor, flow_priority) values (?,?,?,?,?,?,?,?,?)"; final long submitTime = flow.getSubmitTime(); /** * Why we need a transaction to get last insert ID? * Because "SELECT LAST_INSERT_ID()" needs to have the same connection * as inserting the new entry. * See https://dev.mysql.com/doc/refman/5.7/en/information-functions.html#function_last-insert-id */ final SQLTransaction<Long> insertAndGetLastID = transOperator -> { transOperator.update(INSERT_EXECUTABLE_FLOW, flow.getProjectId(), flow.getFlowId(), flow.getVersion(), flow.getStatus().getNumVal(), submitTime, flow.getSubmitUser(), submitTime, executorId, flowPriority); transOperator.getConnection().commit(); return transOperator.getLastInsertId(); }; try { final long id = this.dbOperator.transaction(insertAndGetLastID); logger.info("Flow given " + flow.getFlowId() + " given id " + id); flow.setExecutionId((int) id); updateExecutableFlow(flow); } catch (final SQLException e) { throw new ExecutorManagerException("Error creating execution.", e); } } List<ExecutableFlow> fetchFlowHistory(final int skip, final int num) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_ALL_EXECUTABLE_FLOW_HISTORY, new FetchExecutableFlows(), skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow History", e); } } List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_HISTORY, new FetchExecutableFlows(), projectId, flowId, skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow history", e); } } public List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows() throws ExecutorManagerException { try { return this.dbOperator.query(FetchQueuedExecutableFlows.FETCH_QUEUED_EXECUTABLE_FLOW, new FetchQueuedExecutableFlows()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } /** * fetch flow execution history with specified {@code projectId}, {@code flowId} and flow start * time >= {@code startTime} * * @return the list of flows meeting the specified criteria */ public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final long startTime) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_BY_START_TIME, new FetchExecutableFlows(), projectId, flowId, startTime); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching historic flows", e); } } List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num, final Status status) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_BY_STATUS, new FetchExecutableFlows(), projectId, flowId, status.getNumVal(), skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } List<ExecutableFlow> fetchRecentlyFinishedFlows(final Duration maxAge) throws ExecutorManagerException { try { return this.dbOperator.query(FetchRecentlyFinishedFlows.FETCH_RECENTLY_FINISHED_FLOW, new FetchRecentlyFinishedFlows(), System.currentTimeMillis() - maxAge.toMillis(), Status.SUCCEEDED.getNumVal(), Status.KILLED.getNumVal(), Status.FAILED.getNumVal()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching recently finished flows", e); } } List<ExecutableFlow> fetchFlowHistory(final String projectNameContains, final String flowNameContains, final String userNameContains, final int status, final long startTime, final long endTime, final int skip, final int num) throws ExecutorManagerException { String query = FetchExecutableFlows.FETCH_BASE_EXECUTABLE_FLOW_QUERY; final List<Object> params = new ArrayList<>(); boolean first = true; if (projectNameContains != null && !projectNameContains.isEmpty()) { query += " JOIN projects p ON ef.project_id = p.id WHERE name LIKE ?"; params.add('%' + projectNameContains + '%'); first = false; } // todo kunkun-tang: we don't need the below complicated logics. We should just use a simple way. if (flowNameContains != null && !flowNameContains.isEmpty()) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " flow_id LIKE ?"; params.add('%' + flowNameContains + '%'); } if (userNameContains != null && !userNameContains.isEmpty()) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " submit_user LIKE ?"; params.add('%' + userNameContains + '%'); } if (status != 0) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " status = ?"; params.add(status); } if (startTime > 0) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " start_time > ?"; params.add(startTime); } if (endTime > 0) { if (first) { query += " WHERE "; } else { query += " AND "; } query += " end_time < ?"; params.add(endTime); } if (skip > -1 && num > 0) { query += " ORDER BY exec_id DESC LIMIT ?, ?"; params.add(skip); params.add(num); } try { return this.dbOperator.query(query, new FetchExecutableFlows(), params.toArray()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } void updateExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { updateExecutableFlow(flow, EncodingType.GZIP); } private void updateExecutableFlow(final ExecutableFlow flow, final EncodingType encType) throws ExecutorManagerException { final String UPDATE_EXECUTABLE_FLOW_DATA = "UPDATE execution_flows " + "SET status=?,update_time=?,start_time=?,end_time=?,enc_type=?,flow_data=? " + "WHERE exec_id=?"; byte[] data = null; try { // If this action fails, the execution must be failed. final String json = JSONUtils.toJSON(flow.toObject()); final byte[] stringData = json.getBytes("UTF-8"); data = stringData; // Todo kunkun-tang: use a common method to transform stringData to data. if (encType == EncodingType.GZIP) { data = GZIPUtils.gzipBytes(stringData); } } catch (final IOException e) { flow.setStatus(Status.FAILED); updateExecutableFlowStatusInDB(flow); throw new ExecutorManagerException("Error encoding the execution flow. Execution Id = " + flow.getExecutionId()); } catch (final RuntimeException re) { flow.setStatus(Status.FAILED); // Likely due to serialization error if ( data == null && re instanceof NullPointerException) { logger.warn("Failed to serialize executable flow for " + flow.getExecutionId()); logger.warn("NPE stacktrace" + ExceptionUtils.getStackTrace(re)); } updateExecutableFlowStatusInDB(flow); throw new ExecutorManagerException("Error encoding the execution flow due to " + "RuntimeException. Execution Id = " + flow.getExecutionId(), re); } try { this.dbOperator.update(UPDATE_EXECUTABLE_FLOW_DATA, flow.getStatus() .getNumVal(), flow.getUpdateTime(), flow.getStartTime(), flow .getEndTime(), encType.getNumVal(), data, flow.getExecutionId()); } catch (final SQLException e) { throw new ExecutorManagerException("Error updating flow.", e); } } private void updateExecutableFlowStatusInDB(final ExecutableFlow flow) throws ExecutorManagerException { final String UPDATE_FLOW_STATUS = "UPDATE execution_flows SET status = ?, update_time = ? " + "where exec_id = ?"; try { this.dbOperator.update(UPDATE_FLOW_STATUS, flow.getStatus().getNumVal(), System.currentTimeMillis(), flow.getExecutionId()); } catch (final SQLException e) { throw new ExecutorManagerException("Error updating flow.", e); } } public ExecutableFlow fetchExecutableFlow(final int execId) throws ExecutorManagerException { final FetchExecutableFlows flowHandler = new FetchExecutableFlows(); try { final List<ExecutableFlow> properties = this.dbOperator .query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW, flowHandler, execId); if (properties.isEmpty()) { return null; } else { return properties.get(0); } } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow id " + execId, e); } } /** * set executor id to null for the execution id */ public void unsetExecutorIdForExecution(final int executionId) throws ExecutorManagerException { final String UNSET_EXECUTOR = "UPDATE execution_flows SET executor_id = null, update_time = ? where exec_id = ?"; final SQLTransaction<Integer> unsetExecutor = transOperator -> transOperator.update(UNSET_EXECUTOR, System.currentTimeMillis(), executionId); try { this.dbOperator.transaction(unsetExecutor); } catch (final SQLException e) { throw new ExecutorManagerException("Error unsetting executor id for execution " + executionId, e); } } public int selectAndUpdateExecution(final int executorId, final boolean isActive) throws ExecutorManagerException { final String UPDATE_EXECUTION = "UPDATE execution_flows SET executor_id = ?, update_time = ? " + "where exec_id = ?"; final String selectExecutionForUpdate = isActive ? SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_ACTIVE : SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_INACTIVE; final SQLTransaction<Integer> selectAndUpdateExecution = transOperator -> { transOperator.getConnection().setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); final List<Integer> execIds = transOperator.query(selectExecutionForUpdate, new SelectFromExecutionFlows(), executorId); int execId = -1; if (!execIds.isEmpty()) { execId = execIds.get(0); transOperator.update(UPDATE_EXECUTION, executorId, System.currentTimeMillis(), execId); } transOperator.getConnection().commit(); return execId; }; try { return this.dbOperator.transaction(selectAndUpdateExecution); } catch (final SQLException e) { throw new ExecutorManagerException("Error selecting and updating execution with executor " + executorId, e); } } public int selectAndUpdateExecutionWithLocking(final int executorId, final boolean isActive) throws ExecutorManagerException { final String UPDATE_EXECUTION = "UPDATE execution_flows SET executor_id = ?, update_time = ? " + "where exec_id = ?"; final String selectExecutionForUpdate = isActive ? SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_ACTIVE : SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_INACTIVE; final SQLTransaction<Integer> selectAndUpdateExecution = transOperator -> { final String POLLING_LOCK_NAME = "execution_flows_polling"; final int GET_LOCK_TIMEOUT_IN_SECONDS = 5; int execId = -1; final boolean hasLocked = this.mysqlNamedLock.getLock(transOperator, POLLING_LOCK_NAME, GET_LOCK_TIMEOUT_IN_SECONDS); logger.info("ExecutionFlow polling lock value: " + hasLocked + " for executorId: " + executorId); if (hasLocked) { try { final List<Integer> execIds = transOperator.query(selectExecutionForUpdate, new SelectFromExecutionFlows(), executorId); if (CollectionUtils.isNotEmpty(execIds)) { execId = execIds.get(0); transOperator.update(UPDATE_EXECUTION, executorId, System.currentTimeMillis(), execId); } } finally { this.mysqlNamedLock.releaseLock(transOperator, POLLING_LOCK_NAME); logger.info("Released polling lock for executorId: " + executorId); } } else { logger.info("Could not acquire polling lock for executorId: " + executorId); } return execId; }; try { return this.dbOperator.transaction(selectAndUpdateExecution); } catch (final SQLException e) { throw new ExecutorManagerException("Error selecting and updating execution with executor " + executorId, e); } } public static class SelectFromExecutionFlows implements ResultSetHandler<List<Integer>> { private static final String SELECT_EXECUTION_FOR_UPDATE_FORMAT = "SELECT exec_id from execution_flows WHERE exec_id = (SELECT exec_id from execution_flows" + " WHERE status = " + Status.PREPARING.getNumVal() + " and executor_id is NULL and flow_data is NOT NULL and %s" + " ORDER BY flow_priority DESC, update_time ASC, exec_id ASC LIMIT 1) and executor_id is NULL FOR UPDATE"; public static final String SELECT_EXECUTION_FOR_UPDATE_ACTIVE = String.format(SELECT_EXECUTION_FOR_UPDATE_FORMAT, "(use_executor is NULL or use_executor = ?)"); public static final String SELECT_EXECUTION_FOR_UPDATE_INACTIVE = String.format(SELECT_EXECUTION_FOR_UPDATE_FORMAT, "use_executor = ?"); @Override public List<Integer> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<Integer> execIds = new ArrayList<>(); do { final int execId = rs.getInt(1); execIds.add(execId); } while (rs.next()); return execIds; } } public static class FetchExecutableFlows implements ResultSetHandler<List<ExecutableFlow>> { static String FETCH_EXECUTABLE_FLOW_BY_START_TIME = "SELECT ef.exec_id, ef.enc_type, ef.flow_data, ef.status FROM execution_flows ef WHERE " + "project_id=? AND flow_id=? AND start_time >= ? ORDER BY start_time DESC"; static String FETCH_BASE_EXECUTABLE_FLOW_QUERY = "SELECT ef.exec_id, ef.enc_type, ef.flow_data, ef.status FROM execution_flows ef"; static String FETCH_EXECUTABLE_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE exec_id=?"; static String FETCH_ALL_EXECUTABLE_FLOW_HISTORY = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "ORDER BY exec_id DESC LIMIT ?, ?"; static String FETCH_EXECUTABLE_FLOW_HISTORY = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE project_id=? AND flow_id=? " + "ORDER BY exec_id DESC LIMIT ?, ?"; static String FETCH_EXECUTABLE_FLOW_BY_STATUS = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE project_id=? AND flow_id=? AND status=? " + "ORDER BY exec_id DESC LIMIT ?, ?"; @Override public List<ExecutableFlow> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<ExecutableFlow> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data != null) { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); execFlows.add(exFlow); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } /** * JDBC ResultSetHandler to fetch queued executions */ private static class FetchQueuedExecutableFlows implements ResultSetHandler<List<Pair<ExecutionReference, ExecutableFlow>>> { // Select queued unassigned flows private static final String FETCH_QUEUED_EXECUTABLE_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows" + " WHERE executor_id is NULL AND status = " + Status.PREPARING.getNumVal(); @Override public List<Pair<ExecutionReference, ExecutableFlow>> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<Pair<ExecutionReference, ExecutableFlow>> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data == null) { ExecutionFlowDao.logger.error("Found a flow with empty data blob exec_id: " + id); } else { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); final ExecutionReference ref = new ExecutionReference(id); execFlows.add(new Pair<>(ref, exFlow)); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } private static class FetchRecentlyFinishedFlows implements ResultSetHandler<List<ExecutableFlow>> { // Execution_flows table is already indexed by end_time private static final String FETCH_RECENTLY_FINISHED_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE end_time > ? AND status IN (?, ?, ?)"; @Override public List<ExecutableFlow> handle( final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<ExecutableFlow> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data != null) { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); execFlows.add(exFlow); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionJobDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import azkaban.utils.GZIPUtils; import azkaban.utils.JSONUtils; import azkaban.utils.Pair; import azkaban.utils.Props; import azkaban.utils.PropsUtils; import java.io.File; import java.io.IOException; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.commons.io.FileUtils; import org.apache.log4j.Logger; @Singleton public class ExecutionJobDao { private static final Logger logger = Logger.getLogger(ExecutorDao.class); private final DatabaseOperator dbOperator; @Inject ExecutionJobDao(final DatabaseOperator databaseOperator) { this.dbOperator = databaseOperator; } public void uploadExecutableNode(final ExecutableNode node, final Props inputProps) throws ExecutorManagerException { final String INSERT_EXECUTION_NODE = "INSERT INTO execution_jobs " + "(exec_id, project_id, version, flow_id, job_id, start_time, " + "end_time, status, input_params, attempt) VALUES (?,?,?,?,?,?,?,?,?,?)"; byte[] inputParam = null; if (inputProps != null) { try { final String jsonString = JSONUtils.toJSON(PropsUtils.toHierarchicalMap(inputProps)); inputParam = GZIPUtils.gzipString(jsonString, "UTF-8"); } catch (final IOException e) { throw new ExecutorManagerException("Error encoding input params"); } } final ExecutableFlow flow = node.getExecutableFlow(); final String flowId = node.getParentFlow().getFlowPath(); logger.info("Uploading flowId " + flowId); try { this.dbOperator.update(INSERT_EXECUTION_NODE, flow.getExecutionId(), flow.getProjectId(), flow.getVersion(), flowId, node.getId(), node.getStartTime(), node.getEndTime(), node.getStatus().getNumVal(), inputParam, node.getAttempt()); } catch (final SQLException e) { throw new ExecutorManagerException("Error writing job " + node.getId(), e); } } public void updateExecutableNode(final ExecutableNode node) throws ExecutorManagerException { final String UPSERT_EXECUTION_NODE = "UPDATE execution_jobs " + "SET start_time=?, end_time=?, status=?, output_params=? " + "WHERE exec_id=? AND flow_id=? AND job_id=? AND attempt=?"; byte[] outputParam = null; final Props outputProps = node.getOutputProps(); if (outputProps != null) { try { final String jsonString = JSONUtils.toJSON(PropsUtils.toHierarchicalMap(outputProps)); outputParam = GZIPUtils.gzipString(jsonString, "UTF-8"); } catch (final IOException e) { throw new ExecutorManagerException("Error encoding input params"); } } try { this.dbOperator.update(UPSERT_EXECUTION_NODE, node.getStartTime(), node .getEndTime(), node.getStatus().getNumVal(), outputParam, node .getExecutableFlow().getExecutionId(), node.getParentFlow() .getFlowPath(), node.getId(), node.getAttempt()); } catch (final SQLException e) { throw new ExecutorManagerException("Error updating job " + node.getId(), e); } } public List<ExecutableJobInfo> fetchJobInfoAttempts(final int execId, final String jobId) throws ExecutorManagerException { try { final List<ExecutableJobInfo> info = this.dbOperator.query( FetchExecutableJobHandler.FETCH_EXECUTABLE_NODE_ATTEMPTS, new FetchExecutableJobHandler(), execId, jobId); if (info == null || info.isEmpty()) { return null; } else { return info; } } catch (final SQLException e) { throw new ExecutorManagerException("Error querying job info " + jobId, e); } } public ExecutableJobInfo fetchJobInfo(final int execId, final String jobId, final int attempts) throws ExecutorManagerException { try { final List<ExecutableJobInfo> info = this.dbOperator.query(FetchExecutableJobHandler.FETCH_EXECUTABLE_NODE, new FetchExecutableJobHandler(), execId, jobId, attempts); if (info == null || info.isEmpty()) { return null; } else { return info.get(0); } } catch (final SQLException e) { throw new ExecutorManagerException("Error querying job info " + jobId, e); } } public Props fetchExecutionJobInputProps(final int execId, final String jobId) throws ExecutorManagerException { try { final Pair<Props, Props> props = this.dbOperator.query( FetchExecutableJobPropsHandler.FETCH_INPUT_PARAM_EXECUTABLE_NODE, new FetchExecutableJobPropsHandler(), execId, jobId); return props.getFirst(); } catch (final SQLException e) { throw new ExecutorManagerException("Error querying job params " + execId + " " + jobId, e); } } public Props fetchExecutionJobOutputProps(final int execId, final String jobId) throws ExecutorManagerException { try { final Pair<Props, Props> props = this.dbOperator.query( FetchExecutableJobPropsHandler.FETCH_OUTPUT_PARAM_EXECUTABLE_NODE, new FetchExecutableJobPropsHandler(), execId, jobId); return props.getFirst(); } catch (final SQLException e) { throw new ExecutorManagerException("Error querying job params " + execId + " " + jobId, e); } } public Pair<Props, Props> fetchExecutionJobProps(final int execId, final String jobId) throws ExecutorManagerException { try { return this.dbOperator.query( FetchExecutableJobPropsHandler.FETCH_INPUT_OUTPUT_PARAM_EXECUTABLE_NODE, new FetchExecutableJobPropsHandler(), execId, jobId); } catch (final SQLException e) { throw new ExecutorManagerException("Error querying job params " + execId + " " + jobId, e); } } public List<ExecutableJobInfo> fetchJobHistory(final int projectId, final String jobId, final int skip, final int size) throws ExecutorManagerException { try { final List<ExecutableJobInfo> info = this.dbOperator.query(FetchExecutableJobHandler.FETCH_PROJECT_EXECUTABLE_NODE, new FetchExecutableJobHandler(), projectId, jobId, skip, size); if (info == null || info.isEmpty()) { return null; } else { return info; } } catch (final SQLException e) { throw new ExecutorManagerException("Error querying job info " + jobId, e); } } public List<Object> fetchAttachments(final int execId, final String jobId, final int attempt) throws ExecutorManagerException { try { final String attachments = this.dbOperator.query( FetchExecutableJobAttachmentsHandler.FETCH_ATTACHMENTS_EXECUTABLE_NODE, new FetchExecutableJobAttachmentsHandler(), execId, jobId); if (attachments == null) { return null; } else { return (List<Object>) JSONUtils.parseJSONFromString(attachments); } } catch (final IOException e) { throw new ExecutorManagerException( "Error converting job attachments to JSON " + jobId, e); } catch (final SQLException e) { throw new ExecutorManagerException( "Error query job attachments " + jobId, e); } } public void uploadAttachmentFile(final ExecutableNode node, final File file) throws ExecutorManagerException { final String UPDATE_EXECUTION_NODE_ATTACHMENTS = "UPDATE execution_jobs " + "SET attachments=? " + "WHERE exec_id=? AND flow_id=? AND job_id=? AND attempt=?"; try { final String jsonString = FileUtils.readFileToString(file); final byte[] attachments = GZIPUtils.gzipString(jsonString, "UTF-8"); this.dbOperator.update(UPDATE_EXECUTION_NODE_ATTACHMENTS, attachments, node.getExecutableFlow().getExecutionId(), node.getParentFlow() .getNestedId(), node.getId(), node.getAttempt()); } catch (final IOException | SQLException e) { throw new ExecutorManagerException("Error uploading attachments.", e); } } private static class FetchExecutableJobHandler implements ResultSetHandler<List<ExecutableJobInfo>> { private static final String FETCH_EXECUTABLE_NODE = "SELECT exec_id, project_id, version, flow_id, job_id, " + "start_time, end_time, status, attempt " + "FROM execution_jobs WHERE exec_id=? " + "AND job_id=? AND attempt=?"; private static final String FETCH_EXECUTABLE_NODE_ATTEMPTS = "SELECT exec_id, project_id, version, flow_id, job_id, " + "start_time, end_time, status, attempt FROM execution_jobs " + "WHERE exec_id=? AND job_id=?"; private static final String FETCH_PROJECT_EXECUTABLE_NODE = "SELECT exec_id, project_id, version, flow_id, job_id, " + "start_time, end_time, status, attempt FROM execution_jobs " + "WHERE project_id=? AND job_id=? " + "ORDER BY exec_id DESC LIMIT ?, ? "; @Override public List<ExecutableJobInfo> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.<ExecutableJobInfo>emptyList(); } final List<ExecutableJobInfo> execNodes = new ArrayList<>(); do { final int execId = rs.getInt(1); final int projectId = rs.getInt(2); final int version = rs.getInt(3); final String flowId = rs.getString(4); final String jobId = rs.getString(5); final long startTime = rs.getLong(6); final long endTime = rs.getLong(7); final Status status = Status.fromInteger(rs.getInt(8)); final int attempt = rs.getInt(9); final ExecutableJobInfo info = new ExecutableJobInfo(execId, projectId, version, flowId, jobId, startTime, endTime, status, attempt); execNodes.add(info); } while (rs.next()); return execNodes; } } private static class FetchExecutableJobPropsHandler implements ResultSetHandler<Pair<Props, Props>> { private static final String FETCH_OUTPUT_PARAM_EXECUTABLE_NODE = "SELECT output_params FROM execution_jobs WHERE exec_id=? AND job_id=?"; private static final String FETCH_INPUT_PARAM_EXECUTABLE_NODE = "SELECT input_params FROM execution_jobs WHERE exec_id=? AND job_id=?"; private static final String FETCH_INPUT_OUTPUT_PARAM_EXECUTABLE_NODE = "SELECT input_params, output_params " + "FROM execution_jobs WHERE exec_id=? AND job_id=?"; @Override public Pair<Props, Props> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return new Pair<>(null, null); } if (rs.getMetaData().getColumnCount() > 1) { final byte[] input = rs.getBytes(1); final byte[] output = rs.getBytes(2); Props inputProps = null; Props outputProps = null; try { if (input != null) { final String jsonInputString = GZIPUtils.unGzipString(input, "UTF-8"); inputProps = PropsUtils.fromHierarchicalMap((Map<String, Object>) JSONUtils .parseJSONFromString(jsonInputString)); } if (output != null) { final String jsonOutputString = GZIPUtils.unGzipString(output, "UTF-8"); outputProps = PropsUtils.fromHierarchicalMap((Map<String, Object>) JSONUtils .parseJSONFromString(jsonOutputString)); } } catch (final IOException e) { throw new SQLException("Error decoding param data", e); } return new Pair<>(inputProps, outputProps); } else { final byte[] params = rs.getBytes(1); Props props = null; try { if (params != null) { final String jsonProps = GZIPUtils.unGzipString(params, "UTF-8"); props = PropsUtils.fromHierarchicalMap((Map<String, Object>) JSONUtils .parseJSONFromString(jsonProps)); } } catch (final IOException e) { throw new SQLException("Error decoding param data", e); } return new Pair<>(props, null); } } } private static class FetchExecutableJobAttachmentsHandler implements ResultSetHandler<String> { private static final String FETCH_ATTACHMENTS_EXECUTABLE_NODE = "SELECT attachments FROM execution_jobs WHERE exec_id=? AND job_id=?"; @Override public String handle(final ResultSet rs) throws SQLException { String attachmentsJson = null; if (rs.next()) { try { final byte[] attachments = rs.getBytes(1); if (attachments != null) { attachmentsJson = GZIPUtils.unGzipString(attachments, "UTF-8"); } } catch (final IOException e) { throw new SQLException("Error decoding job attachments", e); } } return attachmentsJson; } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionLogsDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import azkaban.db.DatabaseTransOperator; import azkaban.db.EncodingType; import azkaban.db.SQLTransaction; import azkaban.utils.FileIOUtils; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.GZIPUtils; import azkaban.utils.Pair; import java.io.BufferedInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Arrays; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.commons.io.IOUtils; import org.apache.log4j.Logger; import org.joda.time.DateTime; @Singleton public class ExecutionLogsDao { private static final Logger logger = Logger.getLogger(ExecutionLogsDao.class); private final DatabaseOperator dbOperator; private final EncodingType defaultEncodingType = EncodingType.GZIP; @Inject ExecutionLogsDao(final DatabaseOperator dbOperator) { this.dbOperator = dbOperator; } // TODO kunkun-tang: the interface's parameter is called endByte, but actually is length. LogData fetchLogs(final int execId, final String name, final int attempt, final int startByte, final int length) throws ExecutorManagerException { final FetchLogsHandler handler = new FetchLogsHandler(startByte, length + startByte); try { return this.dbOperator.query(FetchLogsHandler.FETCH_LOGS, handler, execId, name, attempt, startByte, startByte + length); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching logs " + execId + " : " + name, e); } } public void uploadLogFile(final int execId, final String name, final int attempt, final File... files) throws ExecutorManagerException { final SQLTransaction<Integer> transaction = transOperator -> { uploadLogFile(transOperator, execId, name, attempt, files, this.defaultEncodingType); transOperator.getConnection().commit(); return 1; }; try { this.dbOperator.transaction(transaction); } catch (final SQLException e) { logger.error("uploadLogFile failed.", e); throw new ExecutorManagerException("uploadLogFile failed.", e); } } private void uploadLogFile(final DatabaseTransOperator transOperator, final int execId, final String name, final int attempt, final File[] files, final EncodingType encType) throws SQLException { // 50K buffer... if logs are greater than this, we chunk. // However, we better prevent large log files from being uploaded somehow final byte[] buffer = new byte[50 * 1024]; int pos = 0; int length = buffer.length; int startByte = 0; try { for (int i = 0; i < files.length; ++i) { final File file = files[i]; final BufferedInputStream bufferedStream = new BufferedInputStream(new FileInputStream(file)); try { int size = bufferedStream.read(buffer, pos, length); while (size >= 0) { if (pos + size == buffer.length) { // Flush here. uploadLogPart(transOperator, execId, name, attempt, startByte, startByte + buffer.length, encType, buffer, buffer.length); pos = 0; length = buffer.length; startByte += buffer.length; } else { // Usually end of file. pos += size; length = buffer.length - pos; } size = bufferedStream.read(buffer, pos, length); } } finally { IOUtils.closeQuietly(bufferedStream); } } // Final commit of buffer. if (pos > 0) { uploadLogPart(transOperator, execId, name, attempt, startByte, startByte + pos, encType, buffer, pos); } } catch (final SQLException e) { logger.error("Error writing log part.", e); throw new SQLException("Error writing log part", e); } catch (final IOException e) { logger.error("Error chunking.", e); throw new SQLException("Error chunking", e); } } int removeExecutionLogsByTime(final long millis, final int recordCleanupLimit) throws ExecutorManagerException { int totalRecordsRemoved = 0; int removedRecords; do { removedRecords = removeExecutionLogsBatch(millis, recordCleanupLimit); logger.debug("Removed batch of execution logs. Count of records removed in this batch: " + removedRecords); totalRecordsRemoved = totalRecordsRemoved + removedRecords; // Adding sleep of 1 second try { Thread.sleep(1000L); } catch (InterruptedException e) { logger.error("Execution logs cleanup thread's sleep was interrupted.", e); } } while (removedRecords == recordCleanupLimit); return totalRecordsRemoved; } int removeExecutionLogsBatch(final long millis, final int recordCleanupLimit) throws ExecutorManagerException { final String DELETE_BY_TIME = "DELETE FROM execution_logs WHERE upload_time < ? LIMIT ?"; try { return this.dbOperator.update(DELETE_BY_TIME, millis, recordCleanupLimit); } catch (final SQLException e) { logger.error("delete execution logs failed", e); throw new ExecutorManagerException( "Error deleting old execution_logs before " + millis, e); } } private void uploadLogPart(final DatabaseTransOperator transOperator, final int execId, final String name, final int attempt, final int startByte, final int endByte, final EncodingType encType, final byte[] buffer, final int length) throws SQLException, IOException { final String INSERT_EXECUTION_LOGS = "INSERT INTO execution_logs " + "(exec_id, name, attempt, enc_type, start_byte, end_byte, " + "log, upload_time) VALUES (?,?,?,?,?,?,?,?)"; byte[] buf = buffer; if (encType == EncodingType.GZIP) { buf = GZIPUtils.gzipBytes(buf, 0, length); } else if (length < buf.length) { buf = Arrays.copyOf(buffer, length); } transOperator.update(INSERT_EXECUTION_LOGS, execId, name, attempt, encType.getNumVal(), startByte, startByte + length, buf, DateTime.now() .getMillis()); } private static class FetchLogsHandler implements ResultSetHandler<LogData> { private static final String FETCH_LOGS = "SELECT exec_id, name, attempt, enc_type, start_byte, end_byte, log " + "FROM execution_logs " + "WHERE exec_id=? AND name=? AND attempt=? AND end_byte > ? " + "AND start_byte <= ? ORDER BY start_byte"; private final int startByte; private final int endByte; FetchLogsHandler(final int startByte, final int endByte) { this.startByte = startByte; this.endByte = endByte; } @Override public LogData handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return null; } final ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); do { // int execId = rs.getInt(1); // String name = rs.getString(2); final int attempt = rs.getInt(3); final EncodingType encType = EncodingType.fromInteger(rs.getInt(4)); final int startByte = rs.getInt(5); final int endByte = rs.getInt(6); final byte[] data = rs.getBytes(7); final int offset = this.startByte > startByte ? this.startByte - startByte : 0; final int length = this.endByte < endByte ? this.endByte - startByte - offset : endByte - startByte - offset; try { byte[] buffer = data; if (encType == EncodingType.GZIP) { buffer = GZIPUtils.unGzipBytes(data); } byteStream.write(buffer, offset, length); } catch (final IOException e) { throw new SQLException(e); } } while (rs.next()); final byte[] buffer = byteStream.toByteArray(); final Pair<Integer, Integer> result = FileIOUtils.getUtf8Range(buffer, 0, buffer.length); return new LogData(this.startByte + result.getFirst(), result.getSecond(), new String(buffer, result.getFirst(), result.getSecond(), StandardCharsets.UTF_8)); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionOptions.java
/* * Copyright 2013 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.executor.mail.DefaultMailCreator; import azkaban.sla.SlaOption; import azkaban.utils.TypedMapWrapper; import com.google.gson.GsonBuilder; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; /** * Execution options for submitted flows and scheduled flows */ public class ExecutionOptions { public static final String CONCURRENT_OPTION_SKIP = "skip"; public static final String CONCURRENT_OPTION_PIPELINE = "pipeline"; public static final String CONCURRENT_OPTION_IGNORE = "ignore"; public static final String FLOW_PRIORITY = "flowPriority"; /* override dispatcher selection and use executor id specified */ public static final String USE_EXECUTOR = "useExecutor"; public static final int DEFAULT_FLOW_PRIORITY = 5; private static final String FLOW_PARAMETERS = "flowParameters"; private static final String NOTIFY_ON_FIRST_FAILURE = "notifyOnFirstFailure"; private static final String NOTIFY_ON_LAST_FAILURE = "notifyOnLastFailure"; private static final String SUCCESS_EMAILS = "successEmails"; private static final String FAILURE_EMAILS = "failureEmails"; private static final String FAILURE_ACTION = "failureAction"; private static final String PIPELINE_LEVEL = "pipelineLevel"; private static final String PIPELINE_EXECID = "pipelineExecId"; private static final String QUEUE_LEVEL = "queueLevel"; private static final String CONCURRENT_OPTION = "concurrentOption"; private static final String DISABLE = "disabled"; private static final String FAILURE_EMAILS_OVERRIDE = "failureEmailsOverride"; private static final String SUCCESS_EMAILS_OVERRIDE = "successEmailsOverride"; private static final String MAIL_CREATOR = "mailCreator"; private static final String MEMORY_CHECK = "memoryCheck"; private boolean notifyOnFirstFailure = true; private boolean notifyOnLastFailure = false; private boolean failureEmailsOverride = false; private boolean successEmailsOverride = false; private ArrayList<String> failureEmails = new ArrayList<>(); private ArrayList<String> successEmails = new ArrayList<>(); private Integer pipelineLevel = null; private Integer pipelineExecId = null; private Integer queueLevel = 0; private String concurrentOption = CONCURRENT_OPTION_IGNORE; private String mailCreator = DefaultMailCreator.DEFAULT_MAIL_CREATOR; private boolean memoryCheck = true; private Map<String, String> flowParameters = new HashMap<>(); private FailureAction failureAction = FailureAction.FINISH_CURRENTLY_RUNNING; private List<DisabledJob> initiallyDisabledJobs = new ArrayList<>(); private List<SlaOption> slaOptions = new ArrayList<>(); public static ExecutionOptions createFromObject(final Object obj) { if (obj == null || !(obj instanceof Map)) { return null; } final Map<String, Object> optionsMap = (Map<String, Object>) obj; final TypedMapWrapper<String, Object> wrapper = new TypedMapWrapper<>(optionsMap); final ExecutionOptions options = new ExecutionOptions(); if (optionsMap.containsKey(FLOW_PARAMETERS)) { options.flowParameters = new HashMap<>(); options.flowParameters.putAll(wrapper .<String, String>getMap(FLOW_PARAMETERS)); } // Failure notification options.notifyOnFirstFailure = wrapper.getBool(NOTIFY_ON_FIRST_FAILURE, options.notifyOnFirstFailure); options.notifyOnLastFailure = wrapper.getBool(NOTIFY_ON_LAST_FAILURE, options.notifyOnLastFailure); options.concurrentOption = wrapper.getString(CONCURRENT_OPTION, options.concurrentOption); if (wrapper.containsKey(DISABLE)) { options.initiallyDisabledJobs = DisabledJob.fromDeprecatedObjectList(wrapper .<Object>getList(DISABLE)); } if (optionsMap.containsKey(MAIL_CREATOR)) { options.mailCreator = (String) optionsMap.get(MAIL_CREATOR); } // Failure action options.failureAction = FailureAction.valueOf(wrapper.getString(FAILURE_ACTION, options.failureAction.toString())); options.pipelineLevel = wrapper.getInt(PIPELINE_LEVEL, options.pipelineLevel); options.pipelineExecId = wrapper.getInt(PIPELINE_EXECID, options.pipelineExecId); options.queueLevel = wrapper.getInt(QUEUE_LEVEL, options.queueLevel); // Success emails options.setSuccessEmails(wrapper.<String>getList(SUCCESS_EMAILS, Collections.<String>emptyList())); options.setFailureEmails(wrapper.<String>getList(FAILURE_EMAILS, Collections.<String>emptyList())); options.setSuccessEmailsOverridden(wrapper.getBool(SUCCESS_EMAILS_OVERRIDE, false)); options.setFailureEmailsOverridden(wrapper.getBool(FAILURE_EMAILS_OVERRIDE, false)); options.setMemoryCheck(wrapper.getBool(MEMORY_CHECK, true)); // Note: slaOptions was originally outside of execution options, so it parsed and set // separately for the original JSON format. New formats should include slaOptions as // part of execution options. return options; } public void addAllFlowParameters(final Map<String, String> flowParam) { this.flowParameters.putAll(flowParam); } public Map<String, String> getFlowParameters() { return this.flowParameters; } public boolean isFailureEmailsOverridden() { return this.failureEmailsOverride; } public void setFailureEmailsOverridden(final boolean override) { this.failureEmailsOverride = override; } public boolean isSuccessEmailsOverridden() { return this.successEmailsOverride; } public void setSuccessEmailsOverridden(final boolean override) { this.successEmailsOverride = override; } public List<String> getFailureEmails() { return this.failureEmails; } public void setFailureEmails(final Collection<String> emails) { this.failureEmails = new ArrayList<>(emails); } public List<String> getSuccessEmails() { return this.successEmails; } public void setSuccessEmails(final Collection<String> emails) { this.successEmails = new ArrayList<>(emails); } public boolean getNotifyOnFirstFailure() { return this.notifyOnFirstFailure; } public void setNotifyOnFirstFailure(final boolean notify) { this.notifyOnFirstFailure = notify; } public boolean getNotifyOnLastFailure() { return this.notifyOnLastFailure; } public void setNotifyOnLastFailure(final boolean notify) { this.notifyOnLastFailure = notify; } public FailureAction getFailureAction() { return this.failureAction; } public void setFailureAction(final FailureAction action) { this.failureAction = action; } public String getConcurrentOption() { return this.concurrentOption; } public void setConcurrentOption(final String concurrentOption) { this.concurrentOption = concurrentOption; } public String getMailCreator() { return this.mailCreator; } public void setMailCreator(final String mailCreator) { this.mailCreator = mailCreator; } public Integer getPipelineLevel() { return this.pipelineLevel; } public void setPipelineLevel(final Integer level) { this.pipelineLevel = level; } public Integer getPipelineExecutionId() { return this.pipelineExecId; } public void setPipelineExecutionId(final Integer id) { this.pipelineExecId = id; } public Integer getQueueLevel() { return this.queueLevel; } public List<DisabledJob> getDisabledJobs() { return new ArrayList<>(this.initiallyDisabledJobs); } public void setDisabledJobs(final List<DisabledJob> disabledJobs) { this.initiallyDisabledJobs = disabledJobs; } public boolean getMemoryCheck() { return this.memoryCheck; } public void setMemoryCheck(final boolean memoryCheck) { this.memoryCheck = memoryCheck; } public List<SlaOption> getSlaOptions() { return slaOptions; } public void setSlaOptions(final List<SlaOption> slaOptions) { this.slaOptions = slaOptions; } public Map<String, Object> toObject() { final HashMap<String, Object> flowOptionObj = new HashMap<>(); flowOptionObj.put(FLOW_PARAMETERS, this.flowParameters); flowOptionObj.put(NOTIFY_ON_FIRST_FAILURE, this.notifyOnFirstFailure); flowOptionObj.put(NOTIFY_ON_LAST_FAILURE, this.notifyOnLastFailure); flowOptionObj.put(SUCCESS_EMAILS, this.successEmails); flowOptionObj.put(FAILURE_EMAILS, this.failureEmails); flowOptionObj.put(FAILURE_ACTION, this.failureAction.toString()); flowOptionObj.put(PIPELINE_LEVEL, this.pipelineLevel); flowOptionObj.put(PIPELINE_EXECID, this.pipelineExecId); flowOptionObj.put(QUEUE_LEVEL, this.queueLevel); flowOptionObj.put(CONCURRENT_OPTION, this.concurrentOption); flowOptionObj.put(DISABLE, DisabledJob.toDeprecatedObjectList(this.initiallyDisabledJobs)); flowOptionObj.put(FAILURE_EMAILS_OVERRIDE, this.failureEmailsOverride); flowOptionObj.put(SUCCESS_EMAILS_OVERRIDE, this.successEmailsOverride); flowOptionObj.put(MAIL_CREATOR, this.mailCreator); flowOptionObj.put(MEMORY_CHECK, this.memoryCheck); return flowOptionObj; } public String toJSON() { return new GsonBuilder().setPrettyPrinting().create().toJson(toObject()); } public enum FailureAction { FINISH_CURRENTLY_RUNNING, CANCEL_ALL, FINISH_ALL_POSSIBLE } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionRampDao.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import com.google.common.collect.ImmutableMap; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.stream.Collectors; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.dbutils.ResultSetHandler; /** * The Hookup DB Operation for Flow Ramp */ @Singleton public class ExecutionRampDao { private final String FAILURE_RESULT_FORMATTER = "[FAILURE] {Reason = %s, Command = %s}"; private final String SUCCESS_RESULT_FORMATTER = "[SUCCESS] {Command = %s}"; private final DatabaseOperator dbOperator; @Inject public ExecutionRampDao(final DatabaseOperator dbOperator) { this.dbOperator = dbOperator; } /** * Fetch Executable Ramps */ private static class FetchExecutableRamps implements ResultSetHandler<ExecutableRampMap> { static String FETCH_EXECUTABLE_RAMPS = "SELECT r.rampId, r.rampPolicy, " + "r.maxFailureToPause, r.maxFailureToRampDown, r.isPercentageScaleForMaxFailure, " + "r.startTime, r.endTime, r.lastUpdatedTime, " + "r.numOfTrail, r.numOfSuccess, r.numOfFailure, r.numOfIgnored, " + "r.isPaused, r.rampStage, r.isActive " + "FROM ramp r "; @Override public ExecutableRampMap handle(final ResultSet resultSet) throws SQLException { final ExecutableRampMap executableRampMap = ExecutableRampMap.createInstance(); if (!resultSet.next()) { return executableRampMap; } do { executableRampMap.add( resultSet.getString(1), ExecutableRamp.createInstance( resultSet.getString(1), resultSet.getString(2), resultSet.getInt(3), resultSet.getInt(4), resultSet.getBoolean(5), resultSet.getLong(6), resultSet.getLong(7), resultSet.getLong(8), resultSet.getInt(9), resultSet.getInt(10), resultSet.getInt(11), resultSet.getInt(12), resultSet.getBoolean(13), resultSet.getInt(14), resultSet.getBoolean(15) ) ); } while (resultSet.next()); return executableRampMap; } } public ExecutableRampMap fetchExecutableRampMap() throws ExecutorManagerException { try { return this.dbOperator.query( FetchExecutableRamps.FETCH_EXECUTABLE_RAMPS, new FetchExecutableRamps() ); } catch (final SQLException e) { throw new ExecutorManagerException("Error on fetching all Ramps", e); } } /** * Fetch Executable Ramp Items */ private static class FetchExecutableRampItems implements ResultSetHandler<ExecutableRampItemsMap> { static String FETCH_EXECUTABLE_RAMP_ITEMS = "SELECT rampId, dependency, rampValue " + "FROM ramp_items "; @Override public ExecutableRampItemsMap handle(final ResultSet resultSet) throws SQLException { final ExecutableRampItemsMap executableRampItemsMap = ExecutableRampItemsMap.createInstance(); if (!resultSet.next()) { return executableRampItemsMap; } do { executableRampItemsMap.add( resultSet.getString(1), resultSet.getString(2), resultSet.getString(3) ); } while (resultSet.next()); return executableRampItemsMap; } } public ExecutableRampItemsMap fetchExecutableRampItemsMap() throws ExecutorManagerException { try { return this.dbOperator.query( FetchExecutableRampItems.FETCH_EXECUTABLE_RAMP_ITEMS, new FetchExecutableRampItems() ); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active Ramp Items", e); } } /** * Fetch Rampable Dependency's default Value */ private static class FetchExecutableRampDependencies implements ResultSetHandler<ExecutableRampDependencyMap> { static String FETCH_EXECUTABLE_RAMP_DEPENDENCIES = "SELECT dependency, defaultValue, jobtypes " + "FROM ramp_dependency "; @Override public ExecutableRampDependencyMap handle(ResultSet resultSet) throws SQLException { final ExecutableRampDependencyMap executableRampDependencyMap = ExecutableRampDependencyMap.createInstance(); if (!resultSet.next()) { return executableRampDependencyMap; } do { executableRampDependencyMap .add( resultSet.getString(1), resultSet.getString(2), resultSet.getString(3) ); } while (resultSet.next()); return executableRampDependencyMap; } } public ExecutableRampDependencyMap fetchExecutableRampDependencyMap() throws ExecutorManagerException { try { return this.dbOperator.query( FetchExecutableRampDependencies.FETCH_EXECUTABLE_RAMP_DEPENDENCIES, new FetchExecutableRampDependencies() ); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching default value list of dependencies", e); } } /** * Fetch Executable Ramp's Exceptional Flow Items */ private static class FetchExecutableRampExceptionalFlowItems implements ResultSetHandler<ExecutableRampExceptionalFlowItemsMap> { static String FETCH_EXECUTABLE_RAMP_EXCEPTIONAL_FLOW_ITEMS = "SELECT rampId, flowId, treatment, timestamp " + "FROM ramp_exceptional_flow_items "; @Override public ExecutableRampExceptionalFlowItemsMap handle(ResultSet resultSet) throws SQLException { final ExecutableRampExceptionalFlowItemsMap executableRampExceptionalFlowItemsMap = ExecutableRampExceptionalFlowItemsMap.createInstance(); if (!resultSet.next()) { return executableRampExceptionalFlowItemsMap; } do { executableRampExceptionalFlowItemsMap .add( resultSet.getString(1), resultSet.getString(2), ExecutableRampStatus.of(resultSet.getString(3)), resultSet.getLong(4) ); } while (resultSet.next()); return executableRampExceptionalFlowItemsMap; } } public ExecutableRampExceptionalFlowItemsMap fetchExecutableRampExceptionalFlowItemsMap() throws ExecutorManagerException { try { return this.dbOperator.query( FetchExecutableRampExceptionalFlowItems.FETCH_EXECUTABLE_RAMP_EXCEPTIONAL_FLOW_ITEMS, new FetchExecutableRampExceptionalFlowItems() ); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching Executable Ramp Exceptional Flow Items", e); } } /** * Fetch Executable Ramp's Exceptional Job Items */ private static class FetchExecutableRampExceptionalJobItems implements ResultSetHandler<ExecutableRampExceptionalJobItemsMap> { static String FETCH_EXECUTABLE_RAMP_EXCEPTIONAL_JOB_ITEMS = "SELECT rampId, flowId, jobId, treatment, timestamp " + "FROM ramp_exceptional_job_items "; @Override public ExecutableRampExceptionalJobItemsMap handle(ResultSet resultSet) throws SQLException { final ExecutableRampExceptionalJobItemsMap executableRampExceptionalJobItemsMap = ExecutableRampExceptionalJobItemsMap.createInstance(); if (!resultSet.next()) { return executableRampExceptionalJobItemsMap; } do { executableRampExceptionalJobItemsMap.add( resultSet.getString(1), resultSet.getString(2), resultSet.getString(3), ExecutableRampStatus.of(resultSet.getString(4)), resultSet.getLong(5) ); } while (resultSet.next()); return executableRampExceptionalJobItemsMap; } } public ExecutableRampExceptionalJobItemsMap fetchExecutableRampExceptionalJobItemsMap() throws ExecutorManagerException { try { return this.dbOperator.query( FetchExecutableRampExceptionalJobItems.FETCH_EXECUTABLE_RAMP_EXCEPTIONAL_JOB_ITEMS, new FetchExecutableRampExceptionalJobItems() ); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching Executable Ramp Exceptional Flow Items", e); } } // ------------------------------------------------------------------ // Ramp DataSets Management Section // ------------------------------------------------------------------ /** * Generic Insert Action Function * @param tableName table name * @param actionData associated action data which include field name value pairs * @throws ExecutorManagerException */ public void insertAction(final String tableName, Map<String, Object> actionData) throws ExecutorManagerException { if (actionData.size() == 0) { throw new ExecutorManagerException( String.format("Error on inserting into %s WITHOUT ANY DATA", tableName) ); } try { if ("ramp".equalsIgnoreCase(tableName)) { actionData = adjustActionData( actionData, ImmutableMap.<String, Object>builder() .put("startTime", System.currentTimeMillis()) .build() ); } else if ("ramp_exceptional_flow_items".equalsIgnoreCase(tableName)) { actionData = adjustActionData( actionData, ImmutableMap.<String, Object>builder() .put("timestamp", System.currentTimeMillis()) .build() ); } else if ("ramp_exceptional_job_items".equalsIgnoreCase(tableName)) { actionData = adjustActionData( actionData, ImmutableMap.<String, Object>builder() .put("timestamp", System.currentTimeMillis()) .build() ); } String fieldListString = ""; String positionListString = ""; ArrayList<Object> values = new ArrayList<>(); for (Entry<String, Object> element : actionData.entrySet()) { fieldListString += "," + element.getKey(); positionListString += ",?"; values.add(element.getValue()); } String sqlCommand = String.format( "INSERT INTO %s (%s) VALUES(%s)", tableName, fieldListString.substring(1), positionListString.substring(1) ); int rows = this.dbOperator.update(sqlCommand, values.toArray()); if (rows <= 0) { throw new ExecutorManagerException( String.format("No record(s) is inserted into %s, with data %s", tableName, actionData) ); } } catch (final SQLException e) { throw new ExecutorManagerException( String.format("Error on inserting into %s, with data %s", tableName, actionData), e ); } } /** * Generic Delete Action Function * @param tableName table name * @param constraints associated constraints which include field name value pairs * @throws ExecutorManagerException */ public void deleteAction(final String tableName, Map<String, Object> constraints) throws ExecutorManagerException { if (constraints.size() == 0) { throw new ExecutorManagerException( String.format("Error on deleting from %s WITHOUT ANY CONDITIONS", tableName) ); } try { String conditionListString = ""; ArrayList<Object> values = new ArrayList<>(); for (Entry<String, Object> element : constraints.entrySet()) { conditionListString += " AND " + element.getKey() + "=?"; values.add(element.getValue()); } String sqlCommand = String.format( "DELETE FROM %s WHERE %s", tableName, conditionListString.substring(5) ); int rows = this.dbOperator.update(sqlCommand, values.toArray()); if (rows <= 0) { throw new ExecutorManagerException( String.format("Record(s) do(es) not exist in %s, with constraints %s", tableName, constraints) ); } } catch (final SQLException e) { throw new ExecutorManagerException( String.format("Error on deleting from %s, with data %s", tableName, constraints), e ); } } /** * Generic Update Action Function * @param tableName table name * @param actionData associated action data which include field name value pairs * @param constraints associated constraints which include field name value pairs * @throws ExecutorManagerException */ public void updateAction(final String tableName, Map<String, Object> actionData, Map<String, Object> constraints) throws ExecutorManagerException { if (actionData.size() == 0 || constraints.size() == 0) { throw new ExecutorManagerException( String.format("Error on updating %s WITHOUT ANY CONDITIONS OR ANY CHANGES", tableName) ); } try { if ("ramp".equalsIgnoreCase(tableName)) { actionData = adjustActionData( actionData, ImmutableMap.<String, Object>builder() .put("lastUpdatedTime", System.currentTimeMillis()) .build() ); } else if ("ramp_exceptional_flow_items".equalsIgnoreCase(tableName)) { actionData = adjustActionData( actionData, ImmutableMap.<String, Object>builder() .put("timestamp", System.currentTimeMillis()) .build() ); } else if ("ramp_exceptional_job_items".equalsIgnoreCase(tableName)) { actionData = adjustActionData( actionData, ImmutableMap.<String, Object>builder() .put("timestamp", System.currentTimeMillis()) .build() ); } ArrayList<Object> parameters = new ArrayList<>(); String valueListString = ""; for (Entry<String, Object> element : actionData.entrySet()) { valueListString += ", " + element.getKey() + "=?"; parameters.add(element.getValue()); } String conditionListString = ""; for (Entry<String, Object> element : constraints.entrySet()) { conditionListString += " AND " + element.getKey() + "=?"; parameters.add(element.getValue()); } String sqlCommand = String.format( "UPDATE %s SET %s WHERE %s", tableName, valueListString.substring(2), conditionListString.substring(5) ); int rows = this.dbOperator.update(sqlCommand, parameters.toArray()); if (rows <= 0) { throw new ExecutorManagerException( String.format("No record(s) is updated for %s, with data %s", tableName, actionData) ); } } catch (final SQLException e) { throw new ExecutorManagerException( String.format("Error on updating %s, with data %s", tableName, actionData), e ); } } /** * Generic data update action for Ramp DataSets * @param rampActionsMap list of ramp action map * @return result of each command */ public Map<String, String> doRampActions(List<Map<String, Object>> rampActionsMap) { Map<String, String> result = new HashMap<>(); for(int i = 0; i < rampActionsMap.size(); i++) { result.put(Integer.toString(i), doRampAction(rampActionsMap.get(i))); } return result; } private Map<String, Object> adjustActionData(Map<String, Object> actionData, Map<String, Object> defaultValues) { Map<String, Object> modifiedActionData = new HashMap<>(); actionData.entrySet().stream().forEach(entry -> modifiedActionData.put(entry.getKey(), entry.getValue())); for (Map.Entry<String, Object> defaultValue : defaultValues.entrySet()) { if (!modifiedActionData.containsKey(defaultValue.getKey())) { modifiedActionData.put(defaultValue.getKey(), defaultValue.getValue()); } } return modifiedActionData; } /** * Generic data update action for Ramp DataSets * @param actionDataMap ramp action map * @return result * @throws ExecutorManagerException */ private String doRampAction(Map<String, Object> actionDataMap) { String action = (String) actionDataMap.get("action"); String tableName = (String) actionDataMap.get("table"); Map<String, Object> conditions =(Map<String, Object>) actionDataMap.get("conditions"); Map<String, Object> values = (Map<String, Object>) actionDataMap.get("values"); try { if ("INSERT".equalsIgnoreCase(action)) { insertAction(tableName, values); } else if ("DELETE".equalsIgnoreCase(action)) { deleteAction(tableName, conditions); } else if ("UPDATE".equalsIgnoreCase(action)) { updateAction(tableName, values, conditions); } else { return String.format(FAILURE_RESULT_FORMATTER, "Invalid Action", actionDataMap.toString()); } return String.format(SUCCESS_RESULT_FORMATTER, actionDataMap.toString()); } catch (ExecutorManagerException e) { return String.format(FAILURE_RESULT_FORMATTER, e.toString(), actionDataMap.toString()); } } public void updateExecutableRamp(ExecutableRamp executableRamp) throws ExecutorManagerException { String sqlCommand = ""; try { // Save all cachedNumTrail, cachedNumSuccess, cachedNumFailure, cachedNumIgnored, // save isPaused, endTime when it is not zero, lastUpdatedTime when it is changed. String ramp = executableRamp.getId(); int cachedNumOfTrail = executableRamp.getCachedCount(ExecutableRamp.CountType.TRAIL); int cachedNumOfSuccess = executableRamp.getCachedCount(ExecutableRamp.CountType.SUCCESS); int cachedNumOfFailure = executableRamp.getCachedCount(ExecutableRamp.CountType.FAILURE); int cachedNumOfIgnored = executableRamp.getCachedCount(ExecutableRamp.CountType.IGNORED); int rampStage = executableRamp.getStage(); long endTime = executableRamp.getEndTime(); boolean isPaused = executableRamp.isPaused(); long lastUpdatedTime = executableRamp.getLastUpdatedTime(); StringBuilder sqlCommandStringBuilder = new StringBuilder(); sqlCommandStringBuilder.append("UPDATE ramp SET "); sqlCommandStringBuilder.append(String.format("numOfTrail = numOfTrail + %s, ", cachedNumOfTrail)); sqlCommandStringBuilder.append(String.format("numOfFailure = numOfFailure + %s, ", cachedNumOfFailure)); sqlCommandStringBuilder.append(String.format("numOfSuccess = numOfSuccess + %s, ", cachedNumOfSuccess)); sqlCommandStringBuilder.append(String.format("numOfIgnored = numOfIgnored + %s, ", cachedNumOfIgnored)); sqlCommandStringBuilder.append(String.format("rampStage = CASE WHEN rampStage > %s THEN rampStage ELSE %s END, ", rampStage, rampStage)); sqlCommandStringBuilder.append(String.format("endTime = CASE WHEN endTime > %s THEN endTime ELSE %s END, ", endTime, endTime)); sqlCommandStringBuilder.append(String.format("lastUpdatedTime = CASE WHEN lastUpdatedTime > %s THEN lastUpdatedTime ELSE %s END", lastUpdatedTime, lastUpdatedTime)); if (isPaused) { sqlCommandStringBuilder.append(", isPaused = true"); } sqlCommandStringBuilder.append(String.format(" WHERE rampId = '%s'", ramp)); sqlCommand = sqlCommandStringBuilder.toString(); int rows = this.dbOperator.update(sqlCommand); if (rows <= 0) { throw new ExecutorManagerException( String.format("No record(s) is updated into ramp, by command [%s]", sqlCommand) ); } } catch (final SQLException e) { throw new ExecutorManagerException( String.format("Error on update into ramp, by command [%s]", sqlCommand), e ); } } public void updateExecutedRampFlows(final String ramp, ExecutableRampExceptionalItems executableRampExceptionalItems) throws ExecutorManagerException { String sqlCommand = ""; try { Object[][] parameters = executableRampExceptionalItems.getCachedItems().stream() .map(item -> { ArrayList<Object> object = new ArrayList<>(); object.add(ramp); object.add(item.getKey()); object.add(item.getValue().getStatus().getKey()); object.add(item.getValue().getTimeStamp()); return object.toArray(); }) .collect(Collectors.toList()).toArray(new Object[0][]); if (parameters.length > 0) { sqlCommand = "INSERT INTO ramp_exceptional_flow_items (rampId, flowId, treatment, timestamp) VALUES(?,?,?,?)"; this.dbOperator.batch(sqlCommand, parameters); executableRampExceptionalItems.resetCacheFlag(); } } catch (final SQLException e) { throw new ExecutorManagerException( String.format("Error on update into ramp, by command [%s]", sqlCommand), e ); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutionReference.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import java.util.Optional; import javax.annotation.Nullable; public class ExecutionReference { private final int execId; private Executor executor; //Todo jamiesjc: deprecate updateTime in ExecutionReference class gradually. private long updateTime; private long nextCheckTime = -1; private int numErrors = 0; public ExecutionReference(final int execId) { this.execId = execId; } public ExecutionReference(final int execId, @Nullable final Executor executor) { this.execId = execId; this.executor = executor; } public long getUpdateTime() { return this.updateTime; } public void setUpdateTime(final long updateTime) { this.updateTime = updateTime; } public long getNextCheckTime() { return this.nextCheckTime; } public void setNextCheckTime(final long nextCheckTime) { this.nextCheckTime = nextCheckTime; } public int getExecId() { return this.execId; } public int getNumErrors() { return this.numErrors; } public void setNumErrors(final int numErrors) { this.numErrors = numErrors; } public Optional<Executor> getExecutor() { return Optional.ofNullable(this.executor); } public void setExecutor(final @Nullable Executor executor) { this.executor = executor; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/Executor.java
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.Utils; import java.util.Date; /** * Class to represent an AzkabanExecutorServer details for ExecutorManager * * @author gaggarwa */ public class Executor implements Comparable<Executor> { private final int id; private final String host; private final int port; private boolean isActive; // cached copy of the latest statistics from the executor. private ExecutorInfo cachedExecutorStats; private Date lastStatsUpdatedTime; /** * <pre> * Construct an Executor Object * Note: port should be a within unsigned 2 byte * integer range * </pre> */ public Executor(final int id, final String host, final int port, final boolean isActive) { if (!Utils.isValidPort(port)) { throw new IllegalArgumentException(String.format( "Invalid port number %d for host %s, executor_id %d", port, host, id)); } this.id = id; this.host = host; this.port = port; this.isActive = isActive; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (this.isActive ? 1231 : 1237); result = prime * result + ((this.host == null) ? 0 : this.host.hashCode()); result = prime * result + this.id; result = prime * result + this.port; return result; } @Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof Executor)) { return false; } final Executor other = (Executor) obj; if (this.isActive != other.isActive) { return false; } if (this.host == null) { if (other.host != null) { return false; } } else if (!this.host.equals(other.host)) { return false; } if (this.id != other.id) { return false; } if (this.port != other.port) { return false; } return true; } @Override public String toString() { return String.format("%s:%s (id: %s), active=%s", null == this.host || this.host.length() == 0 ? "(empty)" : this.host, this.port, this.id, this.isActive); } public String getHost() { return this.host; } public int getPort() { return this.port; } public boolean isActive() { return this.isActive; } public void setActive(final boolean isActive) { this.isActive = isActive; } public int getId() { return this.id; } public ExecutorInfo getExecutorInfo() { return this.cachedExecutorStats; } public void setExecutorInfo(final ExecutorInfo info) { this.cachedExecutorStats = info; this.lastStatsUpdatedTime = new Date(); } /** * Gets the timestamp when the executor info is last updated. * * @return date object represents the timestamp, null if the executor info of this specific * executor is never refreshed. */ public Date getLastStatsUpdatedTime() { return this.lastStatsUpdatedTime; } @Override public int compareTo(final Executor o) { return null == o ? 1 : this.hashCode() - o.hashCode(); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorApiClient.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.RestfulApiClient; import java.io.IOException; import javax.inject.Singleton; import org.apache.http.HttpResponse; import org.apache.http.StatusLine; import org.apache.http.client.HttpResponseException; import org.apache.http.util.EntityUtils; /** * Client class that will be used to handle all Restful API calls between Executor and the host * application. */ @Singleton public class ExecutorApiClient extends RestfulApiClient<String> { /** * Implementing the parseResponse function to return de-serialized Json object. * * @param response the returned response from the HttpClient. * @return de-serialized object from Json or null if the response doesn't have a body. */ @Override protected String parseResponse(final HttpResponse response) throws HttpResponseException, IOException { final StatusLine statusLine = response.getStatusLine(); final String responseBody = response.getEntity() != null ? EntityUtils.toString(response.getEntity()) : ""; if (statusLine.getStatusCode() >= 300) { logger.error(String.format("unable to parse response as the response status is %s", statusLine.getStatusCode())); throw new HttpResponseException(statusLine.getStatusCode(), responseBody); } return responseBody; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorApiGateway.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.JSONUtils; import azkaban.utils.Pair; import com.google.inject.Inject; import java.io.IOException; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import javax.inject.Singleton; import org.codehaus.jackson.map.ObjectMapper; @Singleton public class ExecutorApiGateway { private final ExecutorApiClient apiClient; @Inject public ExecutorApiGateway(final ExecutorApiClient apiClient) { this.apiClient = apiClient; } Map<String, Object> callWithExecutable(final ExecutableFlow exflow, final Executor executor, final String action) throws ExecutorManagerException { return callWithExecutionId(executor.getHost(), executor.getPort(), action, exflow.getExecutionId(), null, (Pair<String, String>[]) null); } Map<String, Object> callWithReference(final ExecutionReference ref, final String action, final Pair<String, String>... params) throws ExecutorManagerException { final Executor executor = ref.getExecutor().get(); return callWithExecutionId(executor.getHost(), executor.getPort(), action, ref.getExecId(), null, params); } Map<String, Object> callWithReferenceByUser(final ExecutionReference ref, final String action, final String user, final Pair<String, String>... params) throws ExecutorManagerException { final Executor executor = ref.getExecutor().get(); return callWithExecutionId(executor.getHost(), executor.getPort(), action, ref.getExecId(), user, params); } Map<String, Object> callWithExecutionId(final String host, final int port, final String action, final Integer executionId, final String user, final Pair<String, String>... params) throws ExecutorManagerException { try { final List<Pair<String, String>> paramList = new ArrayList<>(); if (params != null) { paramList.addAll(Arrays.asList(params)); } paramList .add(new Pair<>(ConnectorParams.ACTION_PARAM, action)); paramList.add(new Pair<>(ConnectorParams.EXECID_PARAM, String .valueOf(executionId))); paramList.add(new Pair<>(ConnectorParams.USER_PARAM, user)); return callForJsonObjectMap(host, port, "/executor", paramList); } catch (final IOException e) { throw new ExecutorManagerException(e.getMessage(), e); } } /** * Call executor and parse the JSON response as an instance of the class given as an argument. */ <T> T callForJsonType(final String host, final int port, final String path, final List<Pair<String, String>> paramList, final Class<T> valueType) throws IOException { final String responseString = callForJsonString(host, port, path, paramList); if (null == responseString || responseString.length() == 0) { return null; } return new ObjectMapper().readValue(responseString, valueType); } /* * Call executor and return json object map. */ Map<String, Object> callForJsonObjectMap(final String host, final int port, final String path, final List<Pair<String, String>> paramList) throws IOException { final String responseString = callForJsonString(host, port, path, paramList); @SuppressWarnings("unchecked") final Map<String, Object> jsonResponse = (Map<String, Object>) JSONUtils.parseJSONFromString(responseString); final String error = (String) jsonResponse.get(ConnectorParams.RESPONSE_ERROR); if (error != null) { throw new IOException(error); } return jsonResponse; } /* * Call executor and return raw json string. */ private String callForJsonString(final String host, final int port, final String path, List<Pair<String, String>> paramList) throws IOException { if (paramList == null) { paramList = new ArrayList<>(); } @SuppressWarnings("unchecked") final URI uri = ExecutorApiClient.buildUri(host, port, path, true); return this.apiClient.httpPost(uri, paramList); } public Map<String, Object> updateExecutions(final Executor executor, final List<ExecutableFlow> executions) throws ExecutorManagerException { final List<Long> updateTimesList = new ArrayList<>(); final List<Integer> executionIdsList = new ArrayList<>(); // We pack the parameters of the same host together before query for (final ExecutableFlow flow : executions) { executionIdsList.add(flow.getExecutionId()); updateTimesList.add(flow.getUpdateTime()); } final Pair<String, String> updateTimes = new Pair<>( ConnectorParams.UPDATE_TIME_LIST_PARAM, JSONUtils.toJSON(updateTimesList)); final Pair<String, String> executionIds = new Pair<>( ConnectorParams.EXEC_ID_LIST_PARAM, JSONUtils.toJSON(executionIdsList)); return callWithExecutionId(executor.getHost(), executor.getPort(), ConnectorParams.UPDATE_ACTION, null, null, executionIds, updateTimes); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.log4j.Logger; @Singleton public class ExecutorDao { private static final Logger logger = Logger.getLogger(ExecutorDao.class); private final DatabaseOperator dbOperator; @Inject public ExecutorDao(final DatabaseOperator dbOperator) { this.dbOperator = dbOperator; } List<Executor> fetchAllExecutors() throws ExecutorManagerException { try { return this.dbOperator .query(FetchExecutorHandler.FETCH_ALL_EXECUTORS, new FetchExecutorHandler()); } catch (final Exception e) { throw new ExecutorManagerException("Error fetching executors", e); } } List<Executor> fetchActiveExecutors() throws ExecutorManagerException { try { return this.dbOperator .query(FetchExecutorHandler.FETCH_ACTIVE_EXECUTORS, new FetchExecutorHandler()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active executors", e); } } public Executor fetchExecutor(final String host, final int port) throws ExecutorManagerException { try { final List<Executor> executors = this.dbOperator.query(FetchExecutorHandler.FETCH_EXECUTOR_BY_HOST_PORT, new FetchExecutorHandler(), host, port); if (executors.isEmpty()) { return null; } else { return executors.get(0); } } catch (final SQLException e) { throw new ExecutorManagerException(String.format( "Error fetching executor %s:%d", host, port), e); } } public Executor fetchExecutor(final int executorId) throws ExecutorManagerException { try { final List<Executor> executors = this.dbOperator .query(FetchExecutorHandler.FETCH_EXECUTOR_BY_ID, new FetchExecutorHandler(), executorId); if (executors.isEmpty()) { return null; } else { return executors.get(0); } } catch (final Exception e) { throw new ExecutorManagerException(String.format( "Error fetching executor with id: %d", executorId), e); } } Executor fetchExecutorByExecutionId(final int executionId) throws ExecutorManagerException { final FetchExecutorHandler executorHandler = new FetchExecutorHandler(); try { final List<Executor> executors = this.dbOperator .query(FetchExecutorHandler.FETCH_EXECUTION_EXECUTOR, executorHandler, executionId); if (executors.size() > 0) { return executors.get(0); } else { return null; } } catch (final SQLException e) { throw new ExecutorManagerException( "Error fetching executor for exec_id : " + executionId, e); } } Executor addExecutor(final String host, final int port) throws ExecutorManagerException { // verify, if executor already exists if (fetchExecutor(host, port) != null) { throw new ExecutorManagerException(String.format( "Executor %s:%d already exist", host, port)); } // add new executor addExecutorHelper(host, port); // fetch newly added executor return fetchExecutor(host, port); } private void addExecutorHelper(final String host, final int port) throws ExecutorManagerException { final String INSERT = "INSERT INTO executors (host, port) values (?,?)"; try { this.dbOperator.update(INSERT, host, port); } catch (final SQLException e) { throw new ExecutorManagerException(String.format("Error adding %s:%d ", host, port), e); } } public void updateExecutor(final Executor executor) throws ExecutorManagerException { final String UPDATE = "UPDATE executors SET host=?, port=?, active=? where id=?"; try { final int rows = this.dbOperator.update(UPDATE, executor.getHost(), executor.getPort(), executor.isActive(), executor.getId()); if (rows == 0) { throw new ExecutorManagerException("No executor with id :" + executor.getId()); } } catch (final SQLException e) { throw new ExecutorManagerException("Error inactivating executor " + executor.getId(), e); } } void removeExecutor(final String host, final int port) throws ExecutorManagerException { final String DELETE = "DELETE FROM executors WHERE host=? AND port=?"; try { final int rows = this.dbOperator.update(DELETE, host, port); if (rows == 0) { throw new ExecutorManagerException("No executor with host, port :" + "(" + host + "," + port + ")"); } } catch (final SQLException e) { throw new ExecutorManagerException("Error removing executor with host, port : " + "(" + host + "," + port + ")", e); } } /** * JDBC ResultSetHandler to fetch records from executors table */ public static class FetchExecutorHandler implements ResultSetHandler<List<Executor>> { static String FETCH_ALL_EXECUTORS = "SELECT id, host, port, active FROM executors"; static String FETCH_ACTIVE_EXECUTORS = "SELECT id, host, port, active FROM executors where active=true"; static String FETCH_EXECUTOR_BY_ID = "SELECT id, host, port, active FROM executors where id=?"; static String FETCH_EXECUTOR_BY_HOST_PORT = "SELECT id, host, port, active FROM executors where host=? AND port=?"; static String FETCH_EXECUTION_EXECUTOR = "SELECT ex.id, ex.host, ex.port, ex.active FROM " + " executors ex INNER JOIN execution_flows ef " + "on ex.id = ef.executor_id where exec_id=?"; @Override public List<Executor> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<Executor> executors = new ArrayList<>(); do { final int id = rs.getInt(1); final String host = rs.getString(2); final int port = rs.getInt(3); final boolean active = rs.getBoolean(4); final Executor executor = new Executor(id, host, port, active); executors.add(executor); } while (rs.next()); return executors; } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorEventsDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import azkaban.executor.ExecutorLogEvent.EventType; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.List; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.dbutils.ResultSetHandler; @Singleton public class ExecutorEventsDao { private final DatabaseOperator dbOperator; @Inject public ExecutorEventsDao(final DatabaseOperator dbOperator) { this.dbOperator = dbOperator; } public void postExecutorEvent(final Executor executor, final EventType type, final String user, final String message) throws ExecutorManagerException { final String INSERT_PROJECT_EVENTS = "INSERT INTO executor_events (executor_id, event_type, event_time, username, message) values (?,?,?,?,?)"; try { this.dbOperator.update(INSERT_PROJECT_EVENTS, executor.getId(), type.getNumVal(), new Date(), user, message); } catch (final SQLException e) { throw new ExecutorManagerException("Failed to post executor event", e); } } public List<ExecutorLogEvent> getExecutorEvents(final Executor executor, final int num, final int offset) throws ExecutorManagerException { try { return this.dbOperator.query(ExecutorLogsResultHandler.SELECT_EXECUTOR_EVENTS_ORDER, new ExecutorLogsResultHandler(), executor.getId(), num, offset); } catch (final SQLException e) { throw new ExecutorManagerException( "Failed to fetch events for executor id : " + executor.getId(), e); } } /** * JDBC ResultSetHandler to fetch records from executor_events table */ private static class ExecutorLogsResultHandler implements ResultSetHandler<List<ExecutorLogEvent>> { private static final String SELECT_EXECUTOR_EVENTS_ORDER = "SELECT executor_id, event_type, event_time, username, message FROM executor_events " + " WHERE executor_id=? ORDER BY event_time LIMIT ? OFFSET ?"; @Override public List<ExecutorLogEvent> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.<ExecutorLogEvent>emptyList(); } final ArrayList<ExecutorLogEvent> events = new ArrayList<>(); do { final int executorId = rs.getInt(1); final int eventType = rs.getInt(2); final Date eventTime = rs.getDate(3); final String username = rs.getString(4); final String message = rs.getString(5); final ExecutorLogEvent event = new ExecutorLogEvent(executorId, username, eventTime, EventType.fromInteger(eventType), message); events.add(event); } while (rs.next()); return events; } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorHealthChecker.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the “License”); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an “AS IS” BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.Constants.ConfigurationKeys; import azkaban.utils.Pair; import azkaban.utils.Props; import com.google.common.annotations.VisibleForTesting; import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.lang.exception.ExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Periodically checks the health of executors. Finalizes flows or sends alert emails when needed. */ @SuppressWarnings("FutureReturnValueIgnored") @Singleton public class ExecutorHealthChecker { private static final Logger logger = LoggerFactory.getLogger(ExecutorHealthChecker.class); // Max number of executor failures before sending out alert emails. private static final int DEFAULT_EXECUTOR_MAX_FAILURE_COUNT = 6; // Web server checks executor health every 5 min by default. private static final Duration DEFAULT_EXECUTOR_HEALTHCHECK_INTERVAL = Duration.ofMinutes(5); private final long healthCheckIntervalMin; private final int executorMaxFailureCount; private final List<String> alertEmails; private final ScheduledExecutorService scheduler; private final ExecutorLoader executorLoader; private final ExecutorApiGateway apiGateway; private final AlerterHolder alerterHolder; private final Map<Integer, Integer> executorFailureCount = new HashMap<>(); @Inject public ExecutorHealthChecker(final Props azkProps, final ExecutorLoader executorLoader, final ExecutorApiGateway apiGateway, final AlerterHolder alerterHolder) { this.healthCheckIntervalMin = azkProps .getLong(ConfigurationKeys.AZKABAN_EXECUTOR_HEALTHCHECK_INTERVAL_MIN, DEFAULT_EXECUTOR_HEALTHCHECK_INTERVAL.toMinutes()); this.executorMaxFailureCount = azkProps.getInt(ConfigurationKeys .AZKABAN_EXECUTOR_MAX_FAILURE_COUNT, DEFAULT_EXECUTOR_MAX_FAILURE_COUNT); this.alertEmails = azkProps.getStringList(ConfigurationKeys.AZKABAN_ADMIN_ALERT_EMAIL); this.scheduler = Executors.newSingleThreadScheduledExecutor(); this.executorLoader = executorLoader; this.apiGateway = apiGateway; this.alerterHolder = alerterHolder; } public void start() { logger.info("Starting executor health checker."); this.scheduler.scheduleAtFixedRate(this::checkExecutorHealthQuietly, 0L, this.healthCheckIntervalMin, TimeUnit.MINUTES); } public void shutdown() { logger.info("Shutting down executor health checker."); this.scheduler.shutdown(); try { if (!this.scheduler.awaitTermination(60, TimeUnit.SECONDS)) { this.scheduler.shutdownNow(); } } catch (final InterruptedException ex) { this.scheduler.shutdownNow(); Thread.currentThread().interrupt(); } } /** * Wrapper for capturing and logging any exceptions thrown during healthcheck. * {@code ScheduledExecutorService} stops the scheduled invocations of a given method in * case it throws an exception. * Any exceptions are not expected at this stage however in case any unchecked exceptions * do occur, we still don't want subsequent healthchecks to stop. */ public void checkExecutorHealthQuietly() { try { checkExecutorHealth(); } catch (final RuntimeException e) { logger.error("Unexepected error during executor healthcheck. Cause: " + ExceptionUtils.getStackTrace(e)); } } /** * Checks executor health. Finalizes the flow if its executor is already removed from DB or * sends alert emails if the executor isn't alive any more. */ @VisibleForTesting void checkExecutorHealth() { final Map<Optional<Executor>, List<ExecutableFlow>> exFlowMap = getFlowToExecutorMap(); for (final Map.Entry<Optional<Executor>, List<ExecutableFlow>> entry : exFlowMap.entrySet()) { final Optional<Executor> executorOption = entry.getKey(); if (!executorOption.isPresent()) { final String finalizeReason = "Executor id of this execution doesn't exist."; finalizeFlows(entry.getValue(), finalizeReason); continue; } final Executor executor = executorOption.get(); try { // Todo jamiesjc: add metrics to monitor the http call return time final Map<String, Object> results = this.apiGateway .callWithExecutionId(executor.getHost(), executor.getPort(), ConnectorParams.PING_ACTION, null, null); if (results == null || results.containsKey(ConnectorParams.RESPONSE_ERROR) || !results .containsKey(ConnectorParams.STATUS_PARAM) || !results.get(ConnectorParams.STATUS_PARAM) .equals(ConnectorParams.RESPONSE_ALIVE)) { throw new ExecutorManagerException("Status of executor " + executor.getId() + " is " + "not alive."); } else { // Executor is alive. Clear the failure count. if (this.executorFailureCount.containsKey(executor.getId())) { this.executorFailureCount.put(executor.getId(), 0); } } } catch (final ExecutorManagerException e) { handleExecutorNotAliveCase(executor, entry.getValue(), e); } } } /** * Finalize given flows with the provided reason. * * @param flows * @param finalizeReason */ @VisibleForTesting void finalizeFlows(List<ExecutableFlow> flows, String finalizeReason) { for (ExecutableFlow flow: flows) { logger.warn( String.format("Finalizing execution %s, %s", flow.getExecutionId(), finalizeReason)); try { ExecutionControllerUtils .finalizeFlow(this.executorLoader, this.alerterHolder, flow, finalizeReason, null); } catch (RuntimeException e) { logger.error(String.format("Unchecked exception while finalizing execution: %d. " + "Exception: %s", flow.getExecutionId(), ExceptionUtils.getStackTrace(e))); } } } /** * Groups Executable flow by Executors to reduce number of REST calls. * * @return executor to list of flows map */ private Map<Optional<Executor>, List<ExecutableFlow>> getFlowToExecutorMap() { final HashMap<Optional<Executor>, List<ExecutableFlow>> exFlowMap = new HashMap<>(); try { for (final Pair<ExecutionReference, ExecutableFlow> runningFlow : this .executorLoader.fetchActiveFlows().values()) { final Optional<Executor> executor = runningFlow.getFirst().getExecutor(); List<ExecutableFlow> flows = exFlowMap.get(executor); if (flows == null) { flows = new ArrayList<>(); exFlowMap.put(executor, flows); } flows.add(runningFlow.getSecond()); } } catch (final ExecutorManagerException e) { logger.error("Failed to get flow to executor map. Exception reported: " + ExceptionUtils .getStackTrace(e)); } return exFlowMap; } /** * Increments executor failure count. If it reaches max failure count, sends alert emails to AZ * admin and executes any cleanup actions for flows on those executors. * * @param executor the executor * @param flows flows assigned to the executor * @param e Exception thrown when the executor is not alive */ private void handleExecutorNotAliveCase(final Executor executor, final List<ExecutableFlow> flows, final ExecutorManagerException e) { logger.error("Failed to get update from executor " + executor.getId(), e); this.executorFailureCount.put(executor.getId(), this.executorFailureCount.getOrDefault (executor.getId(), 0) + 1); if (this.executorFailureCount.get(executor.getId()) % this.executorMaxFailureCount == 0) { if (!this.alertEmails.isEmpty()) { logger.info(String.format("Executor failure count is %d. Sending alert emails to %s.", this.executorFailureCount.get(executor.getId()), this.alertEmails)); this.alerterHolder.get("email") .alertOnFailedExecutorHealthCheck(executor, flows, e, this.alertEmails); } this.cleanupForMissingExecutor(executor, flows); } } /** * Perform any cleanup required for an unreachable executor. * * Note that ideally we would like to disable the executor such that not further executions are * 'assigned' to it. However with the pull/polling based model there is currently no direct way * of doing this other than the hitting the corresponding ajax endpoint for the executor. * That endpoint is most likely not reachable (hence the repeated healthcheck failures). * Updating the active status or removing the executor from db will not have an impact on any * executor that is still alive and was unreachable temporarily. * For now we limit the action to finalizing any flows assigned to the the unreachable executor. * * @param executor * @param executions */ private void cleanupForMissingExecutor(Executor executor, List<ExecutableFlow> executions) { String finalizeReason = String.format("Executor was unreachable, executor-id: %s, executor-host: %s, " + "executor-port: %d", executor.getId(), executor.getHost(), executor.getPort()); finalizeFlows(executions, finalizeReason); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorInfo.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; /** * Class that exposes the statistics from the executor server. List of the statistics - * remainingMemoryPercent; remainingMemory; remainingFlowCapacity; numberOfAssignedFlows; * lastDispatchedTime; cpuUsage; */ public class ExecutorInfo implements java.io.Serializable { private static final long serialVersionUID = 3009746603773371263L; private double remainingMemoryPercent; private long remainingMemoryInMB; private int remainingFlowCapacity; private int numberOfAssignedFlows; private long lastDispatchedTime; private double cpuUsage; public ExecutorInfo() { } public ExecutorInfo(final double remainingMemoryPercent, final long remainingMemory, final int remainingFlowCapacity, final long lastDispatched, final double cpuUsage, final int numberOfAssignedFlows) { this.remainingMemoryInMB = remainingMemory; this.cpuUsage = cpuUsage; this.remainingFlowCapacity = remainingFlowCapacity; this.remainingMemoryPercent = remainingMemoryPercent; this.lastDispatchedTime = lastDispatched; this.numberOfAssignedFlows = numberOfAssignedFlows; } public double getCpuUsage() { return this.cpuUsage; } public void setCpuUpsage(final double value) { this.cpuUsage = value; } public double getRemainingMemoryPercent() { return this.remainingMemoryPercent; } public void setRemainingMemoryPercent(final double value) { this.remainingMemoryPercent = value; } public long getRemainingMemoryInMB() { return this.remainingMemoryInMB; } public void setRemainingMemoryInMB(final long value) { this.remainingMemoryInMB = value; } public int getRemainingFlowCapacity() { return this.remainingFlowCapacity; } public void setRemainingFlowCapacity(final int value) { this.remainingFlowCapacity = value; } public long getLastDispatchedTime() { return this.lastDispatchedTime; } public void setLastDispatchedTime(final long value) { this.lastDispatchedTime = value; } public int getNumberOfAssignedFlows() { return this.numberOfAssignedFlows; } public void setNumberOfAssignedFlows(final int value) { this.numberOfAssignedFlows = value; } @Override public int hashCode() { int result; long temp; temp = Double.doubleToLongBits(this.remainingMemoryPercent); result = (int) (temp ^ (temp >>> 32)); result = 31 * result + (int) (this.remainingMemoryInMB ^ (this.remainingMemoryInMB >>> 32)); result = 31 * result + this.remainingFlowCapacity; result = 31 * result + this.numberOfAssignedFlows; result = 31 * result + (int) (this.lastDispatchedTime ^ (this.lastDispatchedTime >>> 32)); temp = Double.doubleToLongBits(this.cpuUsage); result = 31 * result + (int) (temp ^ (temp >>> 32)); return result; } @Override public boolean equals(final Object obj) { if (obj instanceof ExecutorInfo) { boolean result = true; final ExecutorInfo stat = (ExecutorInfo) obj; result &= this.remainingMemoryInMB == stat.remainingMemoryInMB; result &= this.cpuUsage == stat.cpuUsage; result &= this.remainingFlowCapacity == stat.remainingFlowCapacity; result &= this.remainingMemoryPercent == stat.remainingMemoryPercent; result &= this.numberOfAssignedFlows == stat.numberOfAssignedFlows; result &= this.lastDispatchedTime == stat.lastDispatchedTime; return result; } return false; } @Override public String toString() { return "ExecutorInfo{" + "remainingMemoryPercent=" + this.remainingMemoryPercent + ", remainingMemoryInMB=" + this.remainingMemoryInMB + ", remainingFlowCapacity=" + this.remainingFlowCapacity + ", numberOfAssignedFlows=" + this.numberOfAssignedFlows + ", lastDispatchedTime=" + this.lastDispatchedTime + ", cpuUsage=" + this.cpuUsage + '}'; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorLoader.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.executor.ExecutorLogEvent.EventType; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.Pair; import azkaban.utils.Props; import java.io.File; import java.time.Duration; import java.util.List; import java.util.Map; public interface ExecutorLoader { void uploadExecutableFlow(ExecutableFlow flow) throws ExecutorManagerException; ExecutableFlow fetchExecutableFlow(int execId) throws ExecutorManagerException; List<ExecutableFlow> fetchRecentlyFinishedFlows(Duration maxAge) throws ExecutorManagerException; Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows() throws ExecutorManagerException; Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlows() throws ExecutorManagerException; Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlowsMetadata() throws ExecutorManagerException; Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(int execId) throws ExecutorManagerException; List<ExecutableFlow> fetchFlowHistory(int skip, int num) throws ExecutorManagerException; List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId, int skip, int num) throws ExecutorManagerException; List<ExecutableFlow> fetchFlowHistory(int projectId, String flowId, int skip, int num, Status status) throws ExecutorManagerException; List<ExecutableFlow> fetchFlowHistory(String projContain, String flowContains, String userNameContains, int status, long startData, long endData, int skip, int num) throws ExecutorManagerException; List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final long startTime) throws ExecutorManagerException; /** * <pre> * Fetch all executors from executors table * Note:- * 1 throws an Exception in case of a SQL issue * 2 returns an empty list in case of no executor * </pre> * * @return List<Executor> */ List<Executor> fetchAllExecutors() throws ExecutorManagerException; /** * <pre> * Fetch all executors from executors table with active = true * Note:- * 1 throws an Exception in case of a SQL issue * 2 returns an empty list in case of no active executor * </pre> * * @return List<Executor> */ List<Executor> fetchActiveExecutors() throws ExecutorManagerException; /** * <pre> * Fetch executor from executors with a given (host, port) * Note: * 1. throws an Exception in case of a SQL issue * 2. return null when no executor is found * with the given (host,port) * </pre> * * @return Executor */ Executor fetchExecutor(String host, int port) throws ExecutorManagerException; /** * <pre> * Fetch executor from executors with a given executorId * Note: * 1. throws an Exception in case of a SQL issue * 2. return null when no executor is found with the given executorId * </pre> * * @return Executor */ Executor fetchExecutor(int executorId) throws ExecutorManagerException; /** * <pre> * create an executor and insert in executors table. * Note:- * 1. throws an Exception in case of a SQL issue * 2. throws an Exception if a executor with (host, port) already exist * 3. return null when no executor is found with the given executorId * </pre> * * @return Executor */ Executor addExecutor(String host, int port) throws ExecutorManagerException; /** * <pre> * create an executor and insert in executors table. * Note:- * 1. throws an Exception in case of a SQL issue * 2. throws an Exception if there is no executor with the given id * 3. return null when no executor is found with the given executorId * </pre> */ void updateExecutor(Executor executor) throws ExecutorManagerException; /** * <pre> * Remove the executor from executors table. * Note:- * 1. throws an Exception in case of a SQL issue * 2. throws an Exception if there is no executor in the table* </pre> * </pre> */ void removeExecutor(String host, int port) throws ExecutorManagerException; /** * <pre> * Log an event in executor_event audit table Note:- throws an Exception in * case of a SQL issue * Note: throws an Exception in case of a SQL issue * </pre> * * @return isSuccess */ void postExecutorEvent(Executor executor, EventType type, String user, String message) throws ExecutorManagerException; /** * <pre> * This method is to fetch events recorded in executor audit table, inserted * by postExecutorEvents with a given executor, starting from skip * Note:- * 1. throws an Exception in case of a SQL issue * 2. Returns an empty list in case of no events * </pre> * * @return List<ExecutorLogEvent> */ List<ExecutorLogEvent> getExecutorEvents(Executor executor, int num, int offset) throws ExecutorManagerException; void addActiveExecutableReference(ExecutionReference ref) throws ExecutorManagerException; void removeActiveExecutableReference(int execId) throws ExecutorManagerException; /** * <pre> * Unset executor Id for an execution * Note:- * throws an Exception in case of a SQL issue * </pre> */ void unassignExecutor(int executionId) throws ExecutorManagerException; /** * <pre> * Set an executor Id to an execution * Note:- * 1. throws an Exception in case of a SQL issue * 2. throws an Exception in case executionId or executorId do not exist * </pre> */ void assignExecutor(int executorId, int execId) throws ExecutorManagerException; /** * <pre> * Fetches an executor corresponding to a given execution * Note:- * 1. throws an Exception in case of a SQL issue * 2. return null when no executor is found with the given executionId * </pre> * * @return fetched Executor */ Executor fetchExecutorByExecutionId(int executionId) throws ExecutorManagerException; /** * <pre> * Fetch queued flows which have not yet dispatched * Note: * 1. throws an Exception in case of a SQL issue * 2. return empty list when no queued execution is found * </pre> * * @return List of queued flows and corresponding execution reference */ List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows() throws ExecutorManagerException; boolean updateExecutableReference(int execId, long updateTime) throws ExecutorManagerException; LogData fetchLogs(int execId, String name, int attempt, int startByte, int endByte) throws ExecutorManagerException; List<Object> fetchAttachments(int execId, String name, int attempt) throws ExecutorManagerException; void uploadLogFile(int execId, String name, int attempt, File... files) throws ExecutorManagerException; void uploadAttachmentFile(ExecutableNode node, File file) throws ExecutorManagerException; void updateExecutableFlow(ExecutableFlow flow) throws ExecutorManagerException; void uploadExecutableNode(ExecutableNode node, Props inputParams) throws ExecutorManagerException; List<ExecutableJobInfo> fetchJobInfoAttempts(int execId, String jobId) throws ExecutorManagerException; ExecutableJobInfo fetchJobInfo(int execId, String jobId, int attempt) throws ExecutorManagerException; List<ExecutableJobInfo> fetchJobHistory(int projectId, String jobId, int skip, int size) throws ExecutorManagerException; void updateExecutableNode(ExecutableNode node) throws ExecutorManagerException; int fetchNumExecutableFlows(int projectId, String flowId) throws ExecutorManagerException; int fetchNumExecutableFlows() throws ExecutorManagerException; int fetchNumExecutableNodes(int projectId, String jobId) throws ExecutorManagerException; Props fetchExecutionJobInputProps(int execId, String jobId) throws ExecutorManagerException; Props fetchExecutionJobOutputProps(int execId, String jobId) throws ExecutorManagerException; Pair<Props, Props> fetchExecutionJobProps(int execId, String jobId) throws ExecutorManagerException; int removeExecutionLogsByTime(long millis, int recordCleanupLimit) throws ExecutorManagerException; void unsetExecutorIdForExecution(final int executionId) throws ExecutorManagerException; int selectAndUpdateExecution(final int executorId, boolean isActive) throws ExecutorManagerException; int selectAndUpdateExecutionWithLocking(final int executorId, boolean isActive) throws ExecutorManagerException; ExecutableRampMap fetchExecutableRampMap() throws ExecutorManagerException; ExecutableRampItemsMap fetchExecutableRampItemsMap() throws ExecutorManagerException; ExecutableRampDependencyMap fetchExecutableRampDependencyMap() throws ExecutorManagerException; ExecutableRampExceptionalFlowItemsMap fetchExecutableRampExceptionalFlowItemsMap() throws ExecutorManagerException; void updateExecutedRampFlows(final String ramp, ExecutableRampExceptionalItems executableRampExceptionalItems) throws ExecutorManagerException; ExecutableRampExceptionalJobItemsMap fetchExecutableRampExceptionalJobItemsMap() throws ExecutorManagerException; Map<String, String> doRampActions(List<Map<String, Object>> rampActionsMap) throws ExecutorManagerException; void updateExecutableRamp(ExecutableRamp executableRamp) throws ExecutorManagerException; }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorLogEvent.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import java.util.Date; /** * Class to represent events on Azkaban executors * * @author gaggarwa */ public class ExecutorLogEvent { private final int executorId; private final String user; private final Date time; private final EventType type; private final String message; public ExecutorLogEvent(final int executorId, final String user, final Date time, final EventType type, final String message) { this.executorId = executorId; this.user = user; this.time = time; this.type = type; this.message = message; } public int getExecutorId() { return this.executorId; } public String getUser() { return this.user; } public Date getTime() { return this.time; } public EventType getType() { return this.type; } public String getMessage() { return this.message; } /** * Log event type messages. Do not change the numeric representation of each enum. Only represent * from 0 to 255 different codes. */ public enum EventType { ERROR(128), HOST_UPDATE(1), PORT_UPDATE(2), ACTIVATION(3), INACTIVATION(4), CREATED(5); private final int numVal; EventType(final int numVal) { this.numVal = numVal; } public static EventType fromInteger(final int x) throws IllegalArgumentException { switch (x) { case 1: return HOST_UPDATE; case 2: return PORT_UPDATE; case 3: return ACTIVATION; case 4: return INACTIVATION; case 5: return CREATED; case 128: return ERROR; default: throw new IllegalArgumentException(String.format( "inalid status code %d", x)); } } public int getNumVal() { return this.numVal; } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorManager.java
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.Constants; import azkaban.Constants.ConfigurationKeys; import azkaban.event.EventHandler; import azkaban.executor.selector.ExecutorComparator; import azkaban.executor.selector.ExecutorFilter; import azkaban.executor.selector.ExecutorSelector; import azkaban.flow.FlowUtils; import azkaban.metrics.CommonMetrics; import azkaban.project.Project; import azkaban.project.ProjectWhitelist; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.Pair; import azkaban.utils.Props; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import java.io.File; import java.io.IOException; import java.lang.Thread.State; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.joda.time.DateTime; /** * Executor manager used to manage the client side job. * * @deprecated replaced by {@link ExecutionController} */ @Singleton @Deprecated public class ExecutorManager extends EventHandler implements ExecutorManagerAdapter { // 12 weeks private static final long DEFAULT_EXECUTION_LOGS_RETENTION_MS = 3 * 4 * 7 * 24 * 60 * 60 * 1000L; private static final Duration RECENTLY_FINISHED_LIFETIME = Duration.ofMinutes(10); private static final Logger logger = Logger.getLogger(ExecutorManager.class); private final RunningExecutions runningExecutions; private final Props azkProps; private final CommonMetrics commonMetrics; private final ExecutorLoader executorLoader; private final RunningExecutionsUpdaterThread updaterThread; private final ExecutorApiGateway apiGateway; private final int maxConcurrentRunsOneFlow; private final Map<Pair<String, String>, Integer> maxConcurrentRunsPerFlowMap; private final ExecutorManagerUpdaterStage updaterStage; private final ExecutionFinalizer executionFinalizer; private final ActiveExecutors activeExecutors; private final ExecutorService executorInfoRefresherService; QueuedExecutions queuedFlows; File cacheDir; private QueueProcessorThread queueProcessor; private volatile Pair<ExecutionReference, ExecutableFlow> runningCandidate = null; private List<String> filterList; private Map<String, Integer> comparatorWeightsMap; private long lastSuccessfulExecutorInfoRefresh; private Duration sleepAfterDispatchFailure = Duration.ofSeconds(1L); private boolean initialized = false; @Inject public ExecutorManager(final Props azkProps, final ExecutorLoader executorLoader, final CommonMetrics commonMetrics, final ExecutorApiGateway apiGateway, final RunningExecutions runningExecutions, final ActiveExecutors activeExecutors, final ExecutorManagerUpdaterStage updaterStage, final ExecutionFinalizer executionFinalizer, final RunningExecutionsUpdaterThread updaterThread) throws ExecutorManagerException { this.azkProps = azkProps; this.commonMetrics = commonMetrics; this.executorLoader = executorLoader; this.apiGateway = apiGateway; this.runningExecutions = runningExecutions; this.activeExecutors = activeExecutors; this.updaterStage = updaterStage; this.executionFinalizer = executionFinalizer; this.updaterThread = updaterThread; this.maxConcurrentRunsOneFlow = ExecutorUtils.getMaxConcurrentRunsOneFlow(azkProps); this.maxConcurrentRunsPerFlowMap = ExecutorUtils.getMaxConcurentRunsPerFlowMap(azkProps); this.executorInfoRefresherService = createExecutorInfoRefresherService(); } void initialize() throws ExecutorManagerException { if (this.initialized) { return; } this.initialized = true; this.setupExecutors(); this.loadRunningExecutions(); this.queuedFlows = new QueuedExecutions( this.azkProps.getLong(ConfigurationKeys.WEBSERVER_QUEUE_SIZE, 100000)); this.loadQueuedFlows(); this.cacheDir = new File(this.azkProps.getString("cache.directory", "cache")); // TODO extract QueueProcessor as a separate class, move all of this into it setupExecutotrComparatorWeightsMap(); setupExecutorFilterList(); this.queueProcessor = setupQueueProcessor(); } @Override public void start() throws ExecutorManagerException { initialize(); this.updaterThread.start(); this.queueProcessor.start(); } private QueueProcessorThread setupQueueProcessor() { return new QueueProcessorThread( this.azkProps.getBoolean(Constants.ConfigurationKeys.QUEUEPROCESSING_ENABLED, true), this.azkProps.getLong(Constants.ConfigurationKeys.ACTIVE_EXECUTOR_REFRESH_IN_MS, 50000), this.azkProps.getInt( Constants.ConfigurationKeys.ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW, 5), this.azkProps.getInt( Constants.ConfigurationKeys.MAX_DISPATCHING_ERRORS_PERMITTED, this.activeExecutors.getAll().size()), this.sleepAfterDispatchFailure); } private void setupExecutotrComparatorWeightsMap() { // initialize comparator feature weights for executor selector from azkaban.properties final Map<String, String> compListStrings = this.azkProps .getMapByPrefix(ConfigurationKeys.EXECUTOR_SELECTOR_COMPARATOR_PREFIX); if (compListStrings != null) { this.comparatorWeightsMap = new TreeMap<>(); for (final Map.Entry<String, String> entry : compListStrings.entrySet()) { this.comparatorWeightsMap.put(entry.getKey(), Integer.valueOf(entry.getValue())); } } } private void setupExecutorFilterList() { // initialize hard filters for executor selector from azkaban.properties final String filters = this.azkProps .getString(ConfigurationKeys.EXECUTOR_SELECTOR_FILTERS, ""); if (filters != null) { this.filterList = Arrays.asList(StringUtils.split(filters, ",")); } } private ExecutorService createExecutorInfoRefresherService() { return Executors.newFixedThreadPool(this.azkProps.getInt( ConfigurationKeys.EXECUTORINFO_REFRESH_MAX_THREADS, 5)); } /** * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#setupExecutors() */ @Override public void setupExecutors() throws ExecutorManagerException { checkMultiExecutorMode(); this.activeExecutors.setupExecutors(); } // TODO Enforced for now to ensure that users migrate to multi-executor mode acknowledgingly. // TODO Remove this once confident enough that all active users have already updated to some // version new enough to have this change - for example after 1 year has passed. // TODO Then also delete ConfigurationKeys.USE_MULTIPLE_EXECUTORS. @Deprecated private void checkMultiExecutorMode() { if (!this.azkProps.getBoolean(Constants.ConfigurationKeys.USE_MULTIPLE_EXECUTORS, false)) { throw new IllegalArgumentException( Constants.ConfigurationKeys.USE_MULTIPLE_EXECUTORS + " must be true. Single executor mode is not supported any more."); } } /** * Refresh Executor stats for all the actie executors in this executorManager */ private void refreshExecutors() { final List<Pair<Executor, Future<ExecutorInfo>>> futures = new ArrayList<>(); for (final Executor executor : this.activeExecutors.getAll()) { // execute each executorInfo refresh task to fetch final Future<ExecutorInfo> fetchExecutionInfo = this.executorInfoRefresherService.submit( () -> this.apiGateway.callForJsonType(executor.getHost(), executor.getPort(), "/serverStatistics", null, ExecutorInfo.class)); futures.add(new Pair<>(executor, fetchExecutionInfo)); } boolean wasSuccess = true; for (final Pair<Executor, Future<ExecutorInfo>> refreshPair : futures) { final Executor executor = refreshPair.getFirst(); executor.setExecutorInfo(null); // invalidate cached ExecutorInfo try { // max 5 secs final ExecutorInfo executorInfo = refreshPair.getSecond().get(5, TimeUnit.SECONDS); // executorInfo is null if the response was empty executor.setExecutorInfo(executorInfo); logger.info(String.format( "Successfully refreshed executor: %s with executor info : %s", executor, executorInfo)); } catch (final TimeoutException e) { wasSuccess = false; logger.error("Timed out while waiting for ExecutorInfo refresh" + executor, e); } catch (final Exception e) { wasSuccess = false; logger.error("Failed to update ExecutorInfo for executor : " + executor, e); } // update is successful for all executors if (wasSuccess) { this.lastSuccessfulExecutorInfoRefresh = System.currentTimeMillis(); } } } /** * @see azkaban.executor.ExecutorManagerAdapter#disableQueueProcessorThread() */ @Override public void disableQueueProcessorThread() { this.queueProcessor.setActive(false); } /** * @see azkaban.executor.ExecutorManagerAdapter#enableQueueProcessorThread() */ @Override public void enableQueueProcessorThread() { this.queueProcessor.setActive(true); } public State getQueueProcessorThreadState() { return this.queueProcessor.getState(); } /** * Returns state of QueueProcessor False, no flow is being dispatched True , flows are being * dispatched as expected */ public boolean isQueueProcessorThreadActive() { return this.queueProcessor.isActive(); } /** * Return last Successful ExecutorInfo Refresh for all active executors */ public long getLastSuccessfulExecutorInfoRefresh() { return this.lastSuccessfulExecutorInfoRefresh; } /** * Get currently supported Comparators available to use via azkaban.properties */ public Set<String> getAvailableExecutorComparatorNames() { return ExecutorComparator.getAvailableComparatorNames(); } /** * Get currently supported filters available to use via azkaban.properties */ public Set<String> getAvailableExecutorFilterNames() { return ExecutorFilter.getAvailableFilterNames(); } @Override public State getExecutorManagerThreadState() { return this.updaterThread.getState(); } public String getExecutorThreadStage() { return this.updaterStage.get(); } @Override public boolean isExecutorManagerThreadActive() { return this.updaterThread.isAlive(); } @Override public long getLastExecutorManagerThreadCheckTime() { return this.updaterThread.getLastThreadCheckTime(); } @Override public Collection<Executor> getAllActiveExecutors() { return Collections.unmodifiableCollection(this.activeExecutors.getAll()); } /** * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#fetchExecutor(int) */ @Override public Executor fetchExecutor(final int executorId) throws ExecutorManagerException { for (final Executor executor : this.activeExecutors.getAll()) { if (executor.getId() == executorId) { return executor; } } return this.executorLoader.fetchExecutor(executorId); } @Override public Set<String> getPrimaryServerHosts() { // Only one for now. More probably later. final HashSet<String> ports = new HashSet<>(); for (final Executor executor : this.activeExecutors.getAll()) { ports.add(executor.getHost() + ":" + executor.getPort()); } return ports; } @Override public Set<String> getAllActiveExecutorServerHosts() { // Includes non primary server/hosts final HashSet<String> ports = new HashSet<>(); for (final Executor executor : this.activeExecutors.getAll()) { ports.add(executor.getHost() + ":" + executor.getPort()); } // include executor which were initially active and still has flows running for (final Pair<ExecutionReference, ExecutableFlow> running : this.runningExecutions.get() .values()) { final ExecutionReference ref = running.getFirst(); if (ref.getExecutor().isPresent()) { final Executor executor = ref.getExecutor().get(); ports.add(executor.getHost() + ":" + executor.getPort()); } } return ports; } private void loadRunningExecutions() throws ExecutorManagerException { logger.info("Loading running flows from database.."); final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows = this.executorLoader .fetchActiveFlows(); logger.info("Loaded " + activeFlows.size() + " running flows"); this.runningExecutions.get().putAll(activeFlows); } /* * load queued flows i.e with active_execution_reference and not assigned to * any executor */ private void loadQueuedFlows() throws ExecutorManagerException { final List<Pair<ExecutionReference, ExecutableFlow>> retrievedExecutions = this.executorLoader.fetchQueuedFlows(); if (retrievedExecutions != null) { for (final Pair<ExecutionReference, ExecutableFlow> pair : retrievedExecutions) { this.queuedFlows.enqueue(pair.getSecond(), pair.getFirst()); } } } /** * Gets a list of all the active (running flows and non-dispatched flows) executions for a given * project and flow {@inheritDoc}. Results should be sorted as we assume this while setting up * pipelined execution Id. * * @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows(int, java.lang.String) */ @Override public List<Integer> getRunningFlows(final int projectId, final String flowId) { final List<Integer> executionIds = new ArrayList<>(); executionIds.addAll(getRunningFlowsHelper(projectId, flowId, this.queuedFlows.getAllEntries())); // it's possible an execution is runningCandidate, meaning it's in dispatching state neither in queuedFlows nor runningFlows, // so checks the runningCandidate as well. if (this.runningCandidate != null) { executionIds .addAll( getRunningFlowsHelper(projectId, flowId, Lists.newArrayList(this.runningCandidate))); } executionIds.addAll(getRunningFlowsHelper(projectId, flowId, this.runningExecutions.get().values())); Collections.sort(executionIds); return executionIds; } /* Helper method for getRunningFlows */ private List<Integer> getRunningFlowsHelper(final int projectId, final String flowId, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { final List<Integer> executionIds = new ArrayList<>(); for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) { if (ref.getSecond().getFlowId().equals(flowId) && ref.getSecond().getProjectId() == projectId) { executionIds.add(ref.getFirst().getExecId()); } } return executionIds; } /** * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#getActiveFlowsWithExecutor() */ @Override public List<Pair<ExecutableFlow, Optional<Executor>>> getActiveFlowsWithExecutor() throws IOException { final List<Pair<ExecutableFlow, Optional<Executor>>> flows = new ArrayList<>(); getActiveFlowsWithExecutorHelper(flows, this.queuedFlows.getAllEntries()); getActiveFlowsWithExecutorHelper(flows, this.runningExecutions.get().values()); return flows; } /* Helper method for getActiveFlowsWithExecutor */ private void getActiveFlowsWithExecutorHelper( final List<Pair<ExecutableFlow, Optional<Executor>>> flows, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) { flows.add(new Pair<>(ref.getSecond(), ref .getFirst().getExecutor())); } } /** * Checks whether the given flow has an active (running, non-dispatched) executions {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#isFlowRunning(int, java.lang.String) */ @Override public boolean isFlowRunning(final int projectId, final String flowId) { boolean isRunning = false; isRunning = isRunning || isFlowRunningHelper(projectId, flowId, this.queuedFlows.getAllEntries()); isRunning = isRunning || isFlowRunningHelper(projectId, flowId, this.runningExecutions.get().values()); return isRunning; } /* Search a running flow in a collection */ private boolean isFlowRunningHelper(final int projectId, final String flowId, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) { if (ref.getSecond().getProjectId() == projectId && ref.getSecond().getFlowId().equals(flowId)) { return true; } } return false; } /** * Fetch ExecutableFlow from database {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#getExecutableFlow(int) */ @Override public ExecutableFlow getExecutableFlow(final int execId) throws ExecutorManagerException { return this.executorLoader.fetchExecutableFlow(execId); } /** * Get all active (running, non-dispatched) flows * * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows() */ @Override public List<ExecutableFlow> getRunningFlows() { final ArrayList<ExecutableFlow> flows = new ArrayList<>(); getActiveFlowHelper(flows, this.queuedFlows.getAllEntries()); getActiveFlowHelper(flows, this.runningExecutions.get().values()); return flows; } /* * Helper method to get all running flows from a Pair<ExecutionReference, * ExecutableFlow collection */ private void getActiveFlowHelper(final ArrayList<ExecutableFlow> flows, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) { flows.add(ref.getSecond()); } } /** * Get execution Ids of all running (unfinished) flows */ public String getRunningFlowIds() { final List<Integer> allIds = new ArrayList<>(); getRunningFlowsIdsHelper(allIds, this.queuedFlows.getAllEntries()); getRunningFlowsIdsHelper(allIds, this.runningExecutions.get().values()); Collections.sort(allIds); return allIds.toString(); } /** * Get execution Ids of all non-dispatched flows */ public String getQueuedFlowIds() { final List<Integer> allIds = new ArrayList<>(); getRunningFlowsIdsHelper(allIds, this.queuedFlows.getAllEntries()); Collections.sort(allIds); return allIds.toString(); } /** * Get the number of non-dispatched flows. {@inheritDoc} */ @Override public long getQueuedFlowSize() { return this.queuedFlows.size(); } /* Helper method to flow ids of all running flows */ private void getRunningFlowsIdsHelper(final List<Integer> allIds, final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) { allIds.add(ref.getSecond().getExecutionId()); } } @Override public List<ExecutableFlow> getRecentlyFinishedFlows() { List<ExecutableFlow> flows = new ArrayList<>(); try { flows = this.executorLoader.fetchRecentlyFinishedFlows( RECENTLY_FINISHED_LIFETIME); } catch (final ExecutorManagerException e) { //Todo jamiesjc: fix error handling. logger.error("Failed to fetch recently finished flows.", e); } return flows; } @Override public List<ExecutableFlow> getExecutableFlows(final int skip, final int size) throws ExecutorManagerException { final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(skip, size); return flows; } @Override public List<ExecutableFlow> getExecutableFlows(final String flowIdContains, final int skip, final int size) throws ExecutorManagerException { final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(null, '%' + flowIdContains + '%', null, 0, -1, -1, skip, size); return flows; } @Override public List<ExecutableFlow> getExecutableFlows(final String projContain, final String flowContain, final String userContain, final int status, final long begin, final long end, final int skip, final int size) throws ExecutorManagerException { final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(projContain, flowContain, userContain, status, begin, end, skip, size); return flows; } @Override public List<ExecutableJobInfo> getExecutableJobs(final Project project, final String jobId, final int skip, final int size) throws ExecutorManagerException { final List<ExecutableJobInfo> nodes = this.executorLoader.fetchJobHistory(project.getId(), jobId, skip, size); return nodes; } @Override public int getNumberOfJobExecutions(final Project project, final String jobId) throws ExecutorManagerException { return this.executorLoader.fetchNumExecutableNodes(project.getId(), jobId); } @Override public LogData getExecutableFlowLog(final ExecutableFlow exFlow, final int offset, final int length) throws ExecutorManagerException { final Pair<ExecutionReference, ExecutableFlow> pair = this.runningExecutions.get().get(exFlow.getExecutionId()); if (pair != null) { final Pair<String, String> typeParam = new Pair<>("type", "flow"); final Pair<String, String> offsetParam = new Pair<>("offset", String.valueOf(offset)); final Pair<String, String> lengthParam = new Pair<>("length", String.valueOf(length)); @SuppressWarnings("unchecked") final Map<String, Object> result = this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.LOG_ACTION, typeParam, offsetParam, lengthParam); return LogData.createLogDataFromObject(result); } else { final LogData value = this.executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset, length); return value; } } @Override public LogData getExecutionJobLog(final ExecutableFlow exFlow, final String jobId, final int offset, final int length, final int attempt) throws ExecutorManagerException { final Pair<ExecutionReference, ExecutableFlow> pair = this.runningExecutions.get().get(exFlow.getExecutionId()); if (pair != null) { final Pair<String, String> typeParam = new Pair<>("type", "job"); final Pair<String, String> jobIdParam = new Pair<>("jobId", jobId); final Pair<String, String> offsetParam = new Pair<>("offset", String.valueOf(offset)); final Pair<String, String> lengthParam = new Pair<>("length", String.valueOf(length)); final Pair<String, String> attemptParam = new Pair<>("attempt", String.valueOf(attempt)); @SuppressWarnings("unchecked") final Map<String, Object> result = this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.LOG_ACTION, typeParam, jobIdParam, offsetParam, lengthParam, attemptParam); return LogData.createLogDataFromObject(result); } else { final LogData value = this.executorLoader.fetchLogs(exFlow.getExecutionId(), jobId, attempt, offset, length); return value; } } @Override public List<Object> getExecutionJobStats(final ExecutableFlow exFlow, final String jobId, final int attempt) throws ExecutorManagerException { final Pair<ExecutionReference, ExecutableFlow> pair = this.runningExecutions.get().get(exFlow.getExecutionId()); if (pair == null) { return this.executorLoader.fetchAttachments(exFlow.getExecutionId(), jobId, attempt); } final Pair<String, String> jobIdParam = new Pair<>("jobId", jobId); final Pair<String, String> attemptParam = new Pair<>("attempt", String.valueOf(attempt)); @SuppressWarnings("unchecked") final Map<String, Object> result = this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.ATTACHMENTS_ACTION, jobIdParam, attemptParam); @SuppressWarnings("unchecked") final List<Object> jobStats = (List<Object>) result .get("attachments"); return jobStats; } /** * If the Resource Manager and Job History server urls are configured, find all the * Hadoop/Spark application ids present in the Azkaban job's log and then construct the url to * job logs in the Hadoop/Spark server for each application id found. Application ids are * returned in the order they appear in the Azkaban job log. * * @param exFlow The executable flow. * @param jobId The job id. * @param attempt The job execution attempt. * @return The map of (application id, job log url) */ @Override public Map<String, String> getExternalJobLogUrls(final ExecutableFlow exFlow, final String jobId, final int attempt) { final Map<String, String> jobLogUrlsByAppId = new LinkedHashMap<>(); if (!this.azkProps.containsKey(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL) || !this.azkProps.containsKey(ConfigurationKeys.HISTORY_SERVER_JOB_URL) || !this.azkProps.containsKey(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL)) { return jobLogUrlsByAppId; } final Set<String> applicationIds = getApplicationIds(exFlow, jobId, attempt); for (final String applicationId : applicationIds) { final String jobLogUrl = ExecutionControllerUtils .createJobLinkUrl(exFlow, jobId, applicationId, this.azkProps); if (jobLogUrl != null) { jobLogUrlsByAppId.put(applicationId, jobLogUrl); } } return jobLogUrlsByAppId; } /** * Find all the Hadoop/Spark application ids present in the Azkaban job log. When iterating * over the set returned by this method the application ids are in the same order they appear * in the log. * * @param exFlow The executable flow. * @param jobId The job id. * @param attempt The job execution attempt. * @return The application ids found. */ Set<String> getApplicationIds(final ExecutableFlow exFlow, final String jobId, final int attempt) { final Set<String> applicationIds = new LinkedHashSet<>(); int offset = 0; try { LogData data = getExecutionJobLog(exFlow, jobId, offset, 50000, attempt); while (data != null && data.getLength() > 0) { this.logger.info("Get application ID for execution " + exFlow.getExecutionId() + ", job" + " " + jobId + ", attempt " + attempt + ", data offset " + offset); String logData = data.getData(); final int indexOfLastSpace = logData.lastIndexOf(' '); final int indexOfLastTab = logData.lastIndexOf('\t'); final int indexOfLastEoL = logData.lastIndexOf('\n'); final int indexOfLastDelim = Math .max(indexOfLastEoL, Math.max(indexOfLastSpace, indexOfLastTab)); if (indexOfLastDelim > -1) { // index + 1 to avoid looping forever if indexOfLastDelim is zero logData = logData.substring(0, indexOfLastDelim + 1); } applicationIds.addAll(ExecutionControllerUtils.findApplicationIdsFromLog(logData)); offset = data.getOffset() + logData.length(); data = getExecutionJobLog(exFlow, jobId, offset, 50000, attempt); } } catch (final ExecutorManagerException e) { this.logger.error("Failed to get application ID for execution " + exFlow.getExecutionId() + ", job " + jobId + ", attempt " + attempt + ", data offset " + offset, e); } return applicationIds; } /** * if flows was dispatched to an executor, cancel by calling Executor else if flow is still in * queue, remove from queue and finalize {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#cancelFlow(azkaban.executor.ExecutableFlow, * java.lang.String) */ @Override public void cancelFlow(final ExecutableFlow exFlow, final String userId) throws ExecutorManagerException { synchronized (exFlow) { if (this.runningExecutions.get().containsKey(exFlow.getExecutionId())) { final Pair<ExecutionReference, ExecutableFlow> pair = this.runningExecutions.get().get(exFlow.getExecutionId()); this.apiGateway.callWithReferenceByUser(pair.getFirst(), ConnectorParams.CANCEL_ACTION, userId); } else if (this.queuedFlows.hasExecution(exFlow.getExecutionId())) { this.queuedFlows.dequeue(exFlow.getExecutionId()); this.executionFinalizer .finalizeFlow(exFlow, "Cancelled before dispatching to executor", null); } else { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } } } @Override public void resumeFlow(final ExecutableFlow exFlow, final String userId) throws ExecutorManagerException { synchronized (exFlow) { final Pair<ExecutionReference, ExecutableFlow> pair = this.runningExecutions.get().get(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } this.apiGateway .callWithReferenceByUser(pair.getFirst(), ConnectorParams.RESUME_ACTION, userId); } } @Override public void pauseFlow(final ExecutableFlow exFlow, final String userId) throws ExecutorManagerException { synchronized (exFlow) { final Pair<ExecutionReference, ExecutableFlow> pair = this.runningExecutions.get().get(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } this.apiGateway .callWithReferenceByUser(pair.getFirst(), ConnectorParams.PAUSE_ACTION, userId); } } @Override public void retryFailures(final ExecutableFlow exFlow, final String userId) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_FAILURES, userId); } @SuppressWarnings("unchecked") private Map<String, Object> modifyExecutingJobs(final ExecutableFlow exFlow, final String command, final String userId, final String... jobIds) throws ExecutorManagerException { synchronized (exFlow) { final Pair<ExecutionReference, ExecutableFlow> pair = this.runningExecutions.get().get(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } final Map<String, Object> response; if (jobIds != null && jobIds.length > 0) { for (final String jobId : jobIds) { if (!jobId.isEmpty()) { final ExecutableNode node = exFlow.getExecutableNode(jobId); if (node == null) { throw new ExecutorManagerException("Job " + jobId + " doesn't exist in execution " + exFlow.getExecutionId() + "."); } } } final String ids = StringUtils.join(jobIds, ','); response = this.apiGateway.callWithReferenceByUser(pair.getFirst(), ConnectorParams.MODIFY_EXECUTION_ACTION, userId, new Pair<>( ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command), new Pair<>(ConnectorParams.MODIFY_JOBS_LIST, ids)); } else { response = this.apiGateway.callWithReferenceByUser(pair.getFirst(), ConnectorParams.MODIFY_EXECUTION_ACTION, userId, new Pair<>( ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command)); } return response; } } @Override public String submitExecutableFlow(final ExecutableFlow exflow, final String userId) throws ExecutorManagerException { if (exflow.isLocked()) { // Skip execution for locked flows. final String message = String.format("Flow %s for project %s is locked.", exflow.getId(), exflow.getProjectName()); logger.info(message); return message; } final String exFlowKey = exflow.getProjectName() + "." + exflow.getId() + ".submitFlow"; // using project and flow name to prevent race condition when same flow is submitted by API and schedule at the same time // causing two same flow submission entering this piece. synchronized (exFlowKey.intern()) { final String flowId = exflow.getFlowId(); logger.info("Submitting execution flow " + flowId + " by " + userId); String message = ""; if (this.queuedFlows.isFull()) { message = String .format( "Failed to submit %s for project %s. Azkaban has overrun its webserver queue capacity", flowId, exflow.getProjectName()); logger.error(message); this.commonMetrics.markSubmitFlowFail(); } else { final int projectId = exflow.getProjectId(); exflow.setSubmitUser(userId); exflow.setStatus(Status.PREPARING); exflow.setSubmitTime(System.currentTimeMillis()); // Get collection of running flows given a project and a specific flow name final List<Integer> running = getRunningFlows(projectId, flowId); ExecutionOptions options = exflow.getExecutionOptions(); if (options == null) { options = new ExecutionOptions(); } if (options.getDisabledJobs() != null) { FlowUtils.applyDisabledJobs(options.getDisabledJobs(), exflow); } if (!running.isEmpty()) { final int maxConcurrentRuns = ExecutorUtils.getMaxConcurrentRunsForFlow( exflow.getProjectName(), flowId, this.maxConcurrentRunsOneFlow, this.maxConcurrentRunsPerFlowMap); if (running.size() > maxConcurrentRuns) { this.commonMetrics.markSubmitFlowSkip(); throw new ExecutorManagerException("Flow " + flowId + " has more than " + maxConcurrentRuns + " concurrent runs. Skipping", ExecutorManagerException.Reason.SkippedExecution); } else if (options.getConcurrentOption().equals( ExecutionOptions.CONCURRENT_OPTION_PIPELINE)) { Collections.sort(running); final Integer runningExecId = running.get(running.size() - 1); options.setPipelineExecutionId(runningExecId); message = "Flow " + flowId + " is already running with exec id " + runningExecId + ". Pipelining level " + options.getPipelineLevel() + ". \n"; } else if (options.getConcurrentOption().equals( ExecutionOptions.CONCURRENT_OPTION_SKIP)) { this.commonMetrics.markSubmitFlowSkip(); throw new ExecutorManagerException("Flow " + flowId + " is already running. Skipping execution.", ExecutorManagerException.Reason.SkippedExecution); } else { // The settings is to run anyways. message = "Flow " + flowId + " is already running with exec id " + StringUtils.join(running, ",") + ". Will execute concurrently. \n"; } } final boolean memoryCheck = !ProjectWhitelist.isProjectWhitelisted(exflow.getProjectId(), ProjectWhitelist.WhitelistType.MemoryCheck); options.setMemoryCheck(memoryCheck); // The exflow id is set by the loader. So it's unavailable until after // this call. this.executorLoader.uploadExecutableFlow(exflow); // We create an active flow reference in the datastore. If the upload // fails, we remove the reference. final ExecutionReference reference = new ExecutionReference(exflow.getExecutionId()); this.executorLoader.addActiveExecutableReference(reference); this.queuedFlows.enqueue(exflow, reference); message += "Execution queued successfully with exec id " + exflow.getExecutionId(); this.commonMetrics.markSubmitFlowSuccess(); } return message; } } @Override public Map<String, String> doRampActions(List<Map<String, Object>> rampActions) throws ExecutorManagerException { return this.executorLoader.doRampActions(rampActions); } /** * Manage servlet call for stats servlet in Azkaban execution server {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#callExecutorStats(int, java.lang.String, * azkaban.utils.Pair[]) */ @Override public Map<String, Object> callExecutorStats(final int executorId, final String action, final Pair<String, String>... params) throws IOException, ExecutorManagerException { final Executor executor = fetchExecutor(executorId); final List<Pair<String, String>> paramList = new ArrayList<>(); // if params = null if (params != null) { paramList.addAll(Arrays.asList(params)); } paramList .add(new Pair<>(ConnectorParams.ACTION_PARAM, action)); return this.apiGateway.callForJsonObjectMap(executor.getHost(), executor.getPort(), "/stats", paramList); } @Override public Map<String, Object> callExecutorJMX(final String hostPort, final String action, final String mBean) throws IOException { final List<Pair<String, String>> paramList = new ArrayList<>(); paramList.add(new Pair<>(action, "")); if (mBean != null) { paramList.add(new Pair<>(ConnectorParams.JMX_MBEAN, mBean)); } final String[] hostPortSplit = hostPort.split(":"); return this.apiGateway.callForJsonObjectMap(hostPortSplit[0], Integer.valueOf(hostPortSplit[1]), "/jmx", paramList); } @Override public void shutdown() { if(null != this.queueProcessor) { this.queueProcessor.shutdown(); } if(null != this.updaterThread) { this.updaterThread.shutdown(); } } @Override public int getExecutableFlows(final int projectId, final String flowId, final int from, final int length, final List<ExecutableFlow> outputList) throws ExecutorManagerException { final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(projectId, flowId, from, length); outputList.addAll(flows); return this.executorLoader.fetchNumExecutableFlows(projectId, flowId); } @Override public List<ExecutableFlow> getExecutableFlows(final int projectId, final String flowId, final int from, final int length, final Status status) throws ExecutorManagerException { return this.executorLoader.fetchFlowHistory(projectId, flowId, from, length, status); } /** * Calls executor to dispatch the flow, update db to assign the executor and in-memory state of * executableFlow. */ private void dispatch(final ExecutionReference reference, final ExecutableFlow exflow, final Executor choosenExecutor) throws ExecutorManagerException { exflow.setUpdateTime(System.currentTimeMillis()); this.executorLoader.assignExecutor(choosenExecutor.getId(), exflow.getExecutionId()); try { this.apiGateway.callWithExecutable(exflow, choosenExecutor, ConnectorParams.EXECUTE_ACTION); } catch (final ExecutorManagerException ex) { logger.error("Rolling back executor assignment for execution id:" + exflow.getExecutionId(), ex); this.executorLoader.unassignExecutor(exflow.getExecutionId()); throw new ExecutorManagerException(ex); } reference.setExecutor(choosenExecutor); // move from flow to running flows this.runningExecutions.get().put(exflow.getExecutionId(), new Pair<>(reference, exflow)); synchronized (this.runningExecutions.get()) { // Wake up RunningExecutionsUpdaterThread from wait() so that it will immediately check status // from executor(s). Normally flows will run at least some time and can't be cleaned up // immediately, so there will be another wait round (or many, actually), but for unit tests // this is significant to let them run quickly. this.runningExecutions.get().notifyAll(); } synchronized (this) { // wake up all internal waiting threads, too this.notifyAll(); } logger.info(String.format( "Successfully dispatched exec %d with error count %d", exflow.getExecutionId(), reference.getNumErrors())); } @VisibleForTesting void setSleepAfterDispatchFailure(final Duration sleepAfterDispatchFailure) { this.sleepAfterDispatchFailure = sleepAfterDispatchFailure; } /* * This thread is responsible for processing queued flows using dispatcher and * making rest api calls to executor server */ private class QueueProcessorThread extends Thread { private static final long QUEUE_PROCESSOR_WAIT_IN_MS = 1000; private final int maxDispatchingErrors; private final long activeExecutorRefreshWindowInMillisec; private final int activeExecutorRefreshWindowInFlows; private final Duration sleepAfterDispatchFailure; private volatile boolean shutdown = false; private volatile boolean isActive = true; public QueueProcessorThread(final boolean isActive, final long activeExecutorRefreshWindowInTime, final int activeExecutorRefreshWindowInFlows, final int maxDispatchingErrors, final Duration sleepAfterDispatchFailure) { setActive(isActive); this.maxDispatchingErrors = maxDispatchingErrors; this.activeExecutorRefreshWindowInFlows = activeExecutorRefreshWindowInFlows; this.activeExecutorRefreshWindowInMillisec = activeExecutorRefreshWindowInTime; this.sleepAfterDispatchFailure = sleepAfterDispatchFailure; this.setName("AzkabanWebServer-QueueProcessor-Thread"); } public boolean isActive() { return this.isActive; } public void setActive(final boolean isActive) { this.isActive = isActive; ExecutorManager.logger.info("QueueProcessorThread active turned " + this.isActive); } public void shutdown() { this.shutdown = true; this.interrupt(); } @Override public void run() { // Loops till QueueProcessorThread is shutdown while (!this.shutdown) { synchronized (this) { try { // start processing queue if active, other wait for sometime if (this.isActive) { processQueuedFlows(this.activeExecutorRefreshWindowInMillisec, this.activeExecutorRefreshWindowInFlows); } wait(QUEUE_PROCESSOR_WAIT_IN_MS); } catch (final Exception e) { ExecutorManager.logger.error( "QueueProcessorThread Interrupted. Probably to shut down.", e); } } } } /* Method responsible for processing the non-dispatched flows */ private void processQueuedFlows(final long activeExecutorsRefreshWindow, final int maxContinuousFlowProcessed) throws InterruptedException, ExecutorManagerException { long lastExecutorRefreshTime = 0; int currentContinuousFlowProcessed = 0; while (isActive() && (ExecutorManager.this.runningCandidate = ExecutorManager.this.queuedFlows .fetchHead()) != null) { final ExecutionReference reference = ExecutorManager.this.runningCandidate.getFirst(); final ExecutableFlow exflow = ExecutorManager.this.runningCandidate.getSecond(); final long currentTime = System.currentTimeMillis(); // if we have dispatched more than maxContinuousFlowProcessed or // It has been more then activeExecutorsRefreshWindow millisec since we // refreshed if (currentTime - lastExecutorRefreshTime > activeExecutorsRefreshWindow || currentContinuousFlowProcessed >= maxContinuousFlowProcessed) { // Refresh executorInfo for all activeExecutors refreshExecutors(); lastExecutorRefreshTime = currentTime; currentContinuousFlowProcessed = 0; } /** * <pre> * TODO: Work around till we improve Filters to have a notion of GlobalSystemState. * Currently we try each queued flow once to infer a global busy state * Possible improvements:- * 1. Move system level filters in refreshExecutors and sleep if we have all executors busy after refresh * 2. Implement GlobalSystemState in selector or in a third place to manage system filters. Basically * taking out all the filters which do not depend on the flow but are still being part of Selector. * Assumptions:- * 1. no one else except QueueProcessor is updating ExecutableFlow update time * 2. re-attempting a flow (which has been tried before) is considered as all executors are busy * </pre> */ if (exflow.getUpdateTime() > lastExecutorRefreshTime) { // put back in the queue ExecutorManager.this.queuedFlows.enqueue(exflow, reference); ExecutorManager.this.runningCandidate = null; final long sleepInterval = activeExecutorsRefreshWindow - (currentTime - lastExecutorRefreshTime); // wait till next executor refresh Thread.sleep(sleepInterval); } else { exflow.setUpdateTime(currentTime); // process flow with current snapshot of activeExecutors selectExecutorAndDispatchFlow(reference, exflow); ExecutorManager.this.runningCandidate = null; } // do not count failed flow processsing (flows still in queue) if (ExecutorManager.this.queuedFlows.getFlow(exflow.getExecutionId()) == null) { currentContinuousFlowProcessed++; } } } /* process flow with a snapshot of available Executors */ private void selectExecutorAndDispatchFlow(final ExecutionReference reference, final ExecutableFlow exflow) throws ExecutorManagerException { final Set<Executor> remainingExecutors = new HashSet<>( ExecutorManager.this.activeExecutors.getAll()); Throwable lastError; synchronized (exflow) { do { final Executor selectedExecutor = selectExecutor(exflow, remainingExecutors); if (selectedExecutor == null) { ExecutorManager.this.commonMetrics.markDispatchFail(); handleNoExecutorSelectedCase(reference, exflow); // RE-QUEUED - exit return; } else { try { dispatch(reference, exflow, selectedExecutor); ExecutorManager.this.commonMetrics.markDispatchSuccess(); // SUCCESS - exit return; } catch (final ExecutorManagerException e) { lastError = e; logFailedDispatchAttempt(reference, exflow, selectedExecutor, e); ExecutorManager.this.commonMetrics.markDispatchFail(); reference.setNumErrors(reference.getNumErrors() + 1); // FAILED ATTEMPT - try other executors except selectedExecutor updateRemainingExecutorsAndSleep(remainingExecutors, selectedExecutor); } } } while (reference.getNumErrors() < this.maxDispatchingErrors); // GAVE UP DISPATCHING final String message = "Failed to dispatch queued execution " + exflow.getId() + " because " + "reached " + ConfigurationKeys.MAX_DISPATCHING_ERRORS_PERMITTED + " (tried " + reference.getNumErrors() + " executors)"; ExecutorManager.logger.error(message); ExecutorManager.this.executionFinalizer.finalizeFlow(exflow, message, lastError); } } private void updateRemainingExecutorsAndSleep(final Set<Executor> remainingExecutors, final Executor selectedExecutor) { remainingExecutors.remove(selectedExecutor); if (remainingExecutors.isEmpty()) { remainingExecutors.addAll(ExecutorManager.this.activeExecutors.getAll()); sleepAfterDispatchFailure(); } } private void sleepAfterDispatchFailure() { try { Thread.sleep(this.sleepAfterDispatchFailure.toMillis()); } catch (final InterruptedException e1) { ExecutorManager.logger.warn("Sleep after dispatch failure was interrupted - ignoring"); } } private void logFailedDispatchAttempt(final ExecutionReference reference, final ExecutableFlow exflow, final Executor selectedExecutor, final ExecutorManagerException e) { ExecutorManager.logger.warn(String.format( "Executor %s responded with exception for exec: %d", selectedExecutor, exflow.getExecutionId()), e); ExecutorManager.logger.info(String.format( "Failed dispatch attempt for exec %d with error count %d", exflow.getExecutionId(), reference.getNumErrors())); } /* Helper method to fetch overriding Executor, if a valid user has specifed otherwise return null */ private Executor getUserSpecifiedExecutor(final ExecutionOptions options, final int executionId) { Executor executor = null; if (options != null && options.getFlowParameters() != null && options.getFlowParameters().containsKey( ExecutionOptions.USE_EXECUTOR)) { try { final int executorId = Integer.valueOf(options.getFlowParameters().get( ExecutionOptions.USE_EXECUTOR)); executor = fetchExecutor(executorId); if (executor == null) { ExecutorManager.logger .warn(String .format( "User specified executor id: %d for execution id: %d is not active, Looking up db.", executorId, executionId)); executor = ExecutorManager.this.executorLoader.fetchExecutor(executorId); if (executor == null) { ExecutorManager.logger .warn(String .format( "User specified executor id: %d for execution id: %d is missing from db. Defaulting to availableExecutors", executorId, executionId)); } } } catch (final ExecutorManagerException ex) { ExecutorManager.logger.error("Failed to fetch user specified executor for exec_id = " + executionId, ex); } } return executor; } /* Choose Executor for exflow among the available executors */ private Executor selectExecutor(final ExecutableFlow exflow, final Set<Executor> availableExecutors) { Executor choosenExecutor = getUserSpecifiedExecutor(exflow.getExecutionOptions(), exflow.getExecutionId()); // If no executor was specified by admin if (choosenExecutor == null) { ExecutorManager.logger.info("Using dispatcher for execution id :" + exflow.getExecutionId()); final ExecutorSelector selector = new ExecutorSelector(ExecutorManager.this.filterList, ExecutorManager.this.comparatorWeightsMap); choosenExecutor = selector.getBest(availableExecutors, exflow); } return choosenExecutor; } private void handleNoExecutorSelectedCase(final ExecutionReference reference, final ExecutableFlow exflow) throws ExecutorManagerException { ExecutorManager.logger .info(String .format( "Reached handleNoExecutorSelectedCase stage for exec %d with error count %d", exflow.getExecutionId(), reference.getNumErrors())); // TODO: handle scenario where a high priority flow failing to get // schedule can starve all others ExecutorManager.this.queuedFlows.enqueue(exflow, reference); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorManagerAdapter.java
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.project.Project; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.Pair; import java.io.IOException; import java.lang.Thread.State; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; public interface ExecutorManagerAdapter { public boolean isFlowRunning(int projectId, String flowId); public ExecutableFlow getExecutableFlow(int execId) throws ExecutorManagerException; public List<Integer> getRunningFlows(int projectId, String flowId); public List<ExecutableFlow> getRunningFlows(); public long getQueuedFlowSize(); /** * <pre> * Returns All running with executors and queued flows * Note, returns empty list if there isn't any running or queued flows * </pre> */ public List<Pair<ExecutableFlow, Optional<Executor>>> getActiveFlowsWithExecutor() throws IOException; public List<ExecutableFlow> getRecentlyFinishedFlows(); public List<ExecutableFlow> getExecutableFlows(int skip, int size) throws ExecutorManagerException; public List<ExecutableFlow> getExecutableFlows(String flowIdContains, int skip, int size) throws ExecutorManagerException; public List<ExecutableFlow> getExecutableFlows(String projContain, String flowContain, String userContain, int status, long begin, long end, int skip, int size) throws ExecutorManagerException; public int getExecutableFlows(int projectId, String flowId, int from, int length, List<ExecutableFlow> outputList) throws ExecutorManagerException; public List<ExecutableFlow> getExecutableFlows(int projectId, String flowId, int from, int length, Status status) throws ExecutorManagerException; public List<ExecutableJobInfo> getExecutableJobs(Project project, String jobId, int skip, int size) throws ExecutorManagerException; public int getNumberOfJobExecutions(Project project, String jobId) throws ExecutorManagerException; public LogData getExecutableFlowLog(ExecutableFlow exFlow, int offset, int length) throws ExecutorManagerException; public LogData getExecutionJobLog(ExecutableFlow exFlow, String jobId, int offset, int length, int attempt) throws ExecutorManagerException; public List<Object> getExecutionJobStats(ExecutableFlow exflow, String jobId, int attempt) throws ExecutorManagerException; public Map<String, String> getExternalJobLogUrls(ExecutableFlow exFlow, String jobId, int attempt); public void cancelFlow(ExecutableFlow exFlow, String userId) throws ExecutorManagerException; public void resumeFlow(ExecutableFlow exFlow, String userId) throws ExecutorManagerException; public void pauseFlow(ExecutableFlow exFlow, String userId) throws ExecutorManagerException; public void retryFailures(ExecutableFlow exFlow, String userId) throws ExecutorManagerException; public String submitExecutableFlow(ExecutableFlow exflow, String userId) throws ExecutorManagerException; public Map<String, String> doRampActions(List<Map<String, Object>> rampAction) throws ExecutorManagerException; /** * Manage servlet call for stats servlet in Azkaban execution server Action can take any of the * following values <ul> <li>{@link azkaban.executor.ConnectorParams#STATS_SET_REPORTINGINTERVAL}<li> * <li>{@link azkaban.executor.ConnectorParams#STATS_SET_CLEANINGINTERVAL}<li> <li>{@link * azkaban.executor.ConnectorParams#STATS_SET_MAXREPORTERPOINTS}<li> <li>{@link * azkaban.executor.ConnectorParams#STATS_GET_ALLMETRICSNAME}<li> <li>{@link * azkaban.executor.ConnectorParams#STATS_GET_METRICHISTORY}<li> <li>{@link * azkaban.executor.ConnectorParams#STATS_SET_ENABLEMETRICS}<li> <li>{@link * azkaban.executor.ConnectorParams#STATS_SET_DISABLEMETRICS}<li> </ul> */ public Map<String, Object> callExecutorStats(int executorId, String action, Pair<String, String>... param) throws IOException, ExecutorManagerException; public Map<String, Object> callExecutorJMX(String hostPort, String action, String mBean) throws IOException; public void start() throws ExecutorManagerException; public void shutdown(); public Set<String> getAllActiveExecutorServerHosts(); public State getExecutorManagerThreadState(); public boolean isExecutorManagerThreadActive(); public long getLastExecutorManagerThreadCheckTime(); public Set<? extends String> getPrimaryServerHosts(); /** * Returns a collection of all the active executors maintained by active executors */ public Collection<Executor> getAllActiveExecutors(); /** * <pre> * Fetch executor from executors with a given executorId * Note: * 1. throws an Exception in case of a SQL issue * 2. return null when no executor is found with the given executorId * </pre> */ public Executor fetchExecutor(int executorId) throws ExecutorManagerException; /** * <pre> * Setup activeExecutors using azkaban.properties and database executors * Note: * 1. If azkaban.use.multiple.executors is set true, this method will * load all active executors * 2. In local mode, If a local executor is specified and it is missing from db, * this method add local executor as active in DB * 3. In local mode, If a local executor is specified and it is marked inactive in db, * this method will convert local executor as active in DB * </pre> */ public void setupExecutors() throws ExecutorManagerException; /** * Enable flow dispatching in QueueProcessor */ public void enableQueueProcessorThread() throws ExecutorManagerException; /** * Disable flow dispatching in QueueProcessor */ public void disableQueueProcessorThread() throws ExecutorManagerException; }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorManagerException.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; public class ExecutorManagerException extends Exception { private static final long serialVersionUID = 1L; private ExecutableFlow flow = null; private Reason reason = null; public ExecutorManagerException(final Exception e) { super(e); } public ExecutorManagerException(final String message) { super(message); } public ExecutorManagerException(final String message, final ExecutableFlow flow) { super(message); this.flow = flow; } public ExecutorManagerException(final String message, final Reason reason) { super(message); this.reason = reason; } public ExecutorManagerException(final String message, final Throwable cause) { super(message, cause); } public ExecutableFlow getExecutableFlow() { return this.flow; } public Reason getReason() { return this.reason; } public enum Reason { SkippedExecution } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorManagerUpdaterStage.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; /** * Holds value of execution update state (for monitoring). */ public class ExecutorManagerUpdaterStage { private volatile String value = "not started"; /** * Get the current value. * * @return the current value. */ public String get() { return value; } /** * Set the value. * * @param value the new value to set. */ public void set(String value) { this.value = value; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/ExecutorUtils.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the “License”); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an “AS IS” BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */package azkaban.executor; import azkaban.Constants; import azkaban.Constants.ConfigurationKeys; import azkaban.utils.Pair; import azkaban.utils.Props; import com.google.common.base.Preconditions; import java.util.HashMap; import java.util.Map; /** Executor utility functions */ public class ExecutorUtils { /** Private constructor. */ private ExecutorUtils() {} /** @return the maximum number of concurrent runs for one flow */ public static int getMaxConcurrentRunsOneFlow(final Props azkProps) { // The default threshold is set to 30 for now, in case some users are affected. We may // decrease this number in future, to better prevent DDos attacks. return azkProps.getInt(ConfigurationKeys.MAX_CONCURRENT_RUNS_ONEFLOW, Constants.DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW); } /** @return a map of (project name, flow name) to max number of concurrent runs for the flow. */ public static Map<Pair<String, String>, Integer> getMaxConcurentRunsPerFlowMap( final Props azkProps) { Map<Pair<String, String>, Integer> map = new HashMap<>(); String perFlowSettings = azkProps.get(ConfigurationKeys.CONCURRENT_RUNS_ONEFLOW_WHITELIST); if (perFlowSettings != null) { // settings for flows are delimited by semicolon, so split on semicolon to to get the list // of flows with custom max concurrent runs String[] flowSettings = perFlowSettings.split(";"); for (String flowSetting: flowSettings) { // fields for a flow are delimited by comma, so split on comma to get the list of fields: // project name, flow name, and max number of concurrent runs. String[] setting = flowSetting.split(","); Preconditions.checkState(setting.length == 3, "setting value must be specified as <project name>,<flow name>,<max concurrent runs>"); Pair<String, String> key = new Pair(setting[0], setting[1]); Integer maxRuns = Integer.parseInt(setting[2]); map.put(key, maxRuns); } } return map; } /** * Get the maximum number of concurrent runs for the specified flow, using the value in * azkaban.concurrent.runs.oneflow.whitelist if explictly specified for the flow, and otherwise * azkaban.max.concurrent.runs.oneflow or the default. * * @param projectName project name * @param flowName flow name * @param defaultMaxConcurrentRuns default max number of concurrent runs for one flow, if not * explcitly specified for the flow. * @param maxConcurrentRunsFlowMap map of (project, flow) to max number of concurrent runs for * flow for which the value is explicitly specified via whitelist. * @return the maximum number of concurrent runs for the flow. */ public static int getMaxConcurrentRunsForFlow(String projectName, String flowName, int defaultMaxConcurrentRuns, Map<Pair<String, String>, Integer> maxConcurrentRunsFlowMap) { return maxConcurrentRunsFlowMap.getOrDefault(new Pair(projectName, flowName), defaultMaxConcurrentRuns); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/FetchActiveFlowDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import azkaban.db.EncodingType; import azkaban.flow.Flow; import azkaban.project.Project; import azkaban.utils.GZIPUtils; import azkaban.utils.Pair; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collections; import java.util.HashMap; import java.util.Map; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.log4j.Logger; @Singleton public class FetchActiveFlowDao { private static final Logger logger = Logger.getLogger(FetchActiveFlowDao.class); private final DatabaseOperator dbOperator; @Inject public FetchActiveFlowDao(final DatabaseOperator dbOperator) { this.dbOperator = dbOperator; } private static Pair<ExecutionReference, ExecutableFlow> getExecutableFlowHelper( final ResultSet rs) throws SQLException { final int id = rs.getInt("exec_id"); final int encodingType = rs.getInt("enc_type"); final byte[] data = rs.getBytes("flow_data"); final int status = rs.getInt("status"); if (data == null) { logger.warn("Execution id " + id + " has flow_data = null. To clean up, update status to " + "FAILED manually, eg. " + "SET status = " + Status.FAILED.getNumVal() + " WHERE id = " + id); } else { final EncodingType encType = EncodingType.fromInteger(encodingType); final ExecutableFlow exFlow; try { exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), Status.fromInteger(status)); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } return getPairWithExecutorInfo(rs, exFlow); } return null; } private static Pair<ExecutionReference, ExecutableFlow> getPairWithExecutorInfo(final ResultSet rs, final ExecutableFlow exFlow) throws SQLException { final int executorId = rs.getInt("executorId"); final String host = rs.getString("host"); final int port = rs.getInt("port"); final Executor executor; if (host == null) { logger.warn("Executor id " + executorId + " (on execution " + exFlow.getExecutionId() + ") wasn't found"); executor = null; } else { final boolean executorStatus = rs.getBoolean("executorStatus"); executor = new Executor(executorId, host, port, executorStatus); } final ExecutionReference ref = new ExecutionReference(exFlow.getExecutionId(), executor); return new Pair<>(ref, exFlow); } private static Pair<ExecutionReference, ExecutableFlow> getExecutableFlowMetadataHelper( final ResultSet rs) throws SQLException { final Flow flow = new Flow(rs.getString("flow_id")); final Project project = new Project(rs.getInt("project_id"), null); project.setVersion(rs.getInt("version")); final ExecutableFlow exFlow = new ExecutableFlow(project, flow); exFlow.setExecutionId(rs.getInt("exec_id")); exFlow.setStatus(Status.fromInteger(rs.getInt("status"))); exFlow.setSubmitTime(rs.getLong("submit_time")); exFlow.setStartTime(rs.getLong("start_time")); exFlow.setEndTime(rs.getLong("end_time")); exFlow.setSubmitUser(rs.getString("submit_user")); return getPairWithExecutorInfo(rs, exFlow); } /** * Fetch flows that are not in finished status, including both dispatched and non-dispatched * flows. * * @return unfinished flows map * @throws ExecutorManagerException the executor manager exception */ Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlows() throws ExecutorManagerException { try { return this.dbOperator.query(FetchActiveExecutableFlows.FETCH_UNFINISHED_EXECUTABLE_FLOWS, new FetchActiveExecutableFlows()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching unfinished flows", e); } } /** * Fetch unfinished flows similar to {@link #fetchUnfinishedFlows}, excluding flow data. * * @return unfinished flows map * @throws ExecutorManagerException the executor manager exception */ public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlowsMetadata() throws ExecutorManagerException { try { return this.dbOperator.query(FetchUnfinishedFlowsMetadata.FETCH_UNFINISHED_FLOWS_METADATA, new FetchUnfinishedFlowsMetadata()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching unfinished flows metadata", e); } } /** * Fetch flows that are dispatched and not yet finished. * * @return active flows map * @throws ExecutorManagerException the executor manager exception */ Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows() throws ExecutorManagerException { try { return this.dbOperator.query(FetchActiveExecutableFlows.FETCH_ACTIVE_EXECUTABLE_FLOWS, new FetchActiveExecutableFlows()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } /** * Fetch the flow that is dispatched and not yet finished by execution id. * * @return active flow pair * @throws ExecutorManagerException the executor manager exception */ Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(final int execId) throws ExecutorManagerException { try { return this.dbOperator.query(FetchActiveExecutableFlow .FETCH_ACTIVE_EXECUTABLE_FLOW_BY_EXEC_ID, new FetchActiveExecutableFlow(), execId); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flow by exec id" + execId, e); } } @VisibleForTesting static class FetchActiveExecutableFlows implements ResultSetHandler<Map<Integer, Pair<ExecutionReference, ExecutableFlow>>> { // Select flows that are not in finished status private static final String FETCH_UNFINISHED_EXECUTABLE_FLOWS = "SELECT ex.exec_id exec_id, ex.enc_type enc_type, ex.flow_data flow_data, ex.status status," + " et.host host, et.port port, ex.executor_id executorId, et.active executorStatus" + " FROM execution_flows ex" + " LEFT JOIN " + " executors et ON ex.executor_id = et.id" + " WHERE ex.status NOT IN (" + Status.SUCCEEDED.getNumVal() + ", " + Status.KILLED.getNumVal() + ", " + Status.FAILED.getNumVal() + ")"; // Select flows that are dispatched and not in finished status private static final String FETCH_ACTIVE_EXECUTABLE_FLOWS = "SELECT ex.exec_id exec_id, ex.enc_type enc_type, ex.flow_data flow_data, ex.status status," + " et.host host, et.port port, ex.executor_id executorId, et.active executorStatus" + " FROM execution_flows ex" + " LEFT JOIN " + " executors et ON ex.executor_id = et.id" + " WHERE ex.status NOT IN (" + Status.SUCCEEDED.getNumVal() + ", " + Status.KILLED.getNumVal() + ", " + Status.FAILED.getNumVal() + ")" // exclude queued flows that haven't been assigned yet -- this is the opposite of // the condition in ExecutionFlowDao#FETCH_QUEUED_EXECUTABLE_FLOW + " AND NOT (" + " ex.executor_id IS NULL" + " AND ex.status = " + Status.PREPARING.getNumVal() + " )"; @Override public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> handle( final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyMap(); } final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> execFlows = new HashMap<>(); do { final Pair<ExecutionReference, ExecutableFlow> exFlow = getExecutableFlowHelper(rs); if (exFlow != null) { execFlows.put(rs.getInt("exec_id"), exFlow); } } while (rs.next()); return execFlows; } } @VisibleForTesting static class FetchUnfinishedFlowsMetadata implements ResultSetHandler<Map<Integer, Pair<ExecutionReference, ExecutableFlow>>> { // Select flows that are not in finished status private static final String FETCH_UNFINISHED_FLOWS_METADATA = "SELECT ex.exec_id exec_id, ex.project_id project_id, ex.version version, " + "ex.flow_id flow_id, et.host host, et.port port, ex.executor_id executorId, " + "ex.status status, ex.submit_time submit_time, ex.start_time start_time, " + "ex.end_time end_time, ex.submit_user submit_user, et.active executorStatus" + " FROM execution_flows ex" + " LEFT JOIN " + " executors et ON ex.executor_id = et.id" + " Where ex.status NOT IN (" + Status.SUCCEEDED.getNumVal() + ", " + Status.KILLED.getNumVal() + ", " + Status.FAILED.getNumVal() + ")"; @Override public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> handle( final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyMap(); } final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> execFlows = new HashMap<>(); do { final Pair<ExecutionReference, ExecutableFlow> exFlow = getExecutableFlowMetadataHelper(rs); if (exFlow != null) { execFlows.put(rs.getInt("exec_id"), exFlow); } } while (rs.next()); return execFlows; } } private static class FetchActiveExecutableFlow implements ResultSetHandler<Pair<ExecutionReference, ExecutableFlow>> { // Select the flow that is dispatched and not in finished status by execution id private static final String FETCH_ACTIVE_EXECUTABLE_FLOW_BY_EXEC_ID = "SELECT ex.exec_id exec_id, ex.enc_type enc_type, ex.flow_data flow_data, ex.status status," + " et.host host, et.port port, ex.executor_id executorId, et.active executorStatus" + " FROM execution_flows ex" + " LEFT JOIN " + " executors et ON ex.executor_id = et.id" + " WHERE ex.exec_id = ? AND ex.status NOT IN (" + Status.SUCCEEDED.getNumVal() + ", " + Status.KILLED.getNumVal() + ", " + Status.FAILED.getNumVal() + ")" // exclude queued flows that haven't been assigned yet -- this is the opposite of // the condition in ExecutionFlowDao#FETCH_QUEUED_EXECUTABLE_FLOW + " AND NOT (" + " ex.executor_id IS NULL" + " AND ex.status = " + Status.PREPARING.getNumVal() + " )"; @Override public Pair<ExecutionReference, ExecutableFlow> handle( final ResultSet rs) throws SQLException { if (!rs.next()) { return null; } return getExecutableFlowHelper(rs); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/IRefreshable.java
/* * Copyright 2019 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; public interface IRefreshable<T> { /** * Use the source Object to refresh the Object * @param source source Object * @return the refreshed Object */ T refresh(T source); /** * Clone Object * @return new instance */ T clone(); /** * Number of Elements in Object * @return number of elements */ int elementCount(); }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/JdbcExecutorLoader.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.executor.ExecutorLogEvent.EventType; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.Pair; import azkaban.utils.Props; import java.io.File; import java.time.Duration; import java.util.List; import java.util.Map; import javax.inject.Inject; import javax.inject.Singleton; @Singleton public class JdbcExecutorLoader implements ExecutorLoader { private final ExecutionFlowDao executionFlowDao; private final ExecutorDao executorDao; private final ExecutionJobDao executionJobDao; private final ExecutionLogsDao executionLogsDao; private final ExecutorEventsDao executorEventsDao; private final ActiveExecutingFlowsDao activeExecutingFlowsDao; private final FetchActiveFlowDao fetchActiveFlowDao; private final AssignExecutorDao assignExecutorDao; private final NumExecutionsDao numExecutionsDao; private final ExecutionRampDao executionRampDao; @Inject public JdbcExecutorLoader(final ExecutionFlowDao executionFlowDao, final ExecutorDao executorDao, final ExecutionJobDao executionJobDao, final ExecutionLogsDao executionLogsDao, final ExecutorEventsDao executorEventsDao, final ActiveExecutingFlowsDao activeExecutingFlowsDao, final FetchActiveFlowDao fetchActiveFlowDao, final AssignExecutorDao assignExecutorDao, final NumExecutionsDao numExecutionsDao, final ExecutionRampDao executionRampDao) { this.executionFlowDao = executionFlowDao; this.executorDao = executorDao; this.executionJobDao = executionJobDao; this.executionLogsDao = executionLogsDao; this.executorEventsDao = executorEventsDao; this.activeExecutingFlowsDao = activeExecutingFlowsDao; this.fetchActiveFlowDao = fetchActiveFlowDao; this.numExecutionsDao = numExecutionsDao; this.assignExecutorDao = assignExecutorDao; this.executionRampDao = executionRampDao; } @Override public synchronized void uploadExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { this.executionFlowDao.uploadExecutableFlow(flow); } @Override public void updateExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { this.executionFlowDao.updateExecutableFlow(flow); } @Override public ExecutableFlow fetchExecutableFlow(final int id) throws ExecutorManagerException { return this.executionFlowDao.fetchExecutableFlow(id); } @Override public List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows() throws ExecutorManagerException { return this.executionFlowDao.fetchQueuedFlows(); } /** * maxAge indicates how long finished flows are shown in Recently Finished flow page. */ @Override public List<ExecutableFlow> fetchRecentlyFinishedFlows(final Duration maxAge) throws ExecutorManagerException { return this.executionFlowDao.fetchRecentlyFinishedFlows(maxAge); } @Override public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchActiveFlows() throws ExecutorManagerException { return this.fetchActiveFlowDao.fetchActiveFlows(); } @Override public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlows() throws ExecutorManagerException { return this.fetchActiveFlowDao.fetchUnfinishedFlows(); } @Override public Map<Integer, Pair<ExecutionReference, ExecutableFlow>> fetchUnfinishedFlowsMetadata() throws ExecutorManagerException { return this.fetchActiveFlowDao.fetchUnfinishedFlowsMetadata(); } @Override public Pair<ExecutionReference, ExecutableFlow> fetchActiveFlowByExecId(final int execId) throws ExecutorManagerException { return this.fetchActiveFlowDao.fetchActiveFlowByExecId(execId); } @Override public int fetchNumExecutableFlows() throws ExecutorManagerException { return this.numExecutionsDao.fetchNumExecutableFlows(); } @Override public int fetchNumExecutableFlows(final int projectId, final String flowId) throws ExecutorManagerException { return this.numExecutionsDao.fetchNumExecutableFlows(projectId, flowId); } @Override public int fetchNumExecutableNodes(final int projectId, final String jobId) throws ExecutorManagerException { return this.numExecutionsDao.fetchNumExecutableNodes(projectId, jobId); } @Override public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num) throws ExecutorManagerException { return this.executionFlowDao.fetchFlowHistory(projectId, flowId, skip, num); } @Override public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final long startTime) throws ExecutorManagerException { return this.executionFlowDao.fetchFlowHistory(projectId, flowId, startTime); } @Override public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num, final Status status) throws ExecutorManagerException { return this.executionFlowDao.fetchFlowHistory(projectId, flowId, skip, num, status); } @Override public List<ExecutableFlow> fetchFlowHistory(final int skip, final int num) throws ExecutorManagerException { return this.executionFlowDao.fetchFlowHistory(skip, num); } @Override public List<ExecutableFlow> fetchFlowHistory(final String projContain, final String flowContains, final String userNameContains, final int status, final long startTime, final long endTime, final int skip, final int num) throws ExecutorManagerException { return this.executionFlowDao.fetchFlowHistory(projContain, flowContains, userNameContains, status, startTime, endTime, skip, num); } @Override public void addActiveExecutableReference(final ExecutionReference reference) throws ExecutorManagerException { this.activeExecutingFlowsDao.addActiveExecutableReference(reference); } @Override public void removeActiveExecutableReference(final int execid) throws ExecutorManagerException { this.activeExecutingFlowsDao.removeActiveExecutableReference(execid); } @Override public boolean updateExecutableReference(final int execId, final long updateTime) throws ExecutorManagerException { // Should be 1. return this.activeExecutingFlowsDao.updateExecutableReference(execId, updateTime); } @Override public void uploadExecutableNode(final ExecutableNode node, final Props inputProps) throws ExecutorManagerException { this.executionJobDao.uploadExecutableNode(node, inputProps); } @Override public void updateExecutableNode(final ExecutableNode node) throws ExecutorManagerException { this.executionJobDao.updateExecutableNode(node); } @Override public List<ExecutableJobInfo> fetchJobInfoAttempts(final int execId, final String jobId) throws ExecutorManagerException { return this.executionJobDao.fetchJobInfoAttempts(execId, jobId); } @Override public ExecutableJobInfo fetchJobInfo(final int execId, final String jobId, final int attempts) throws ExecutorManagerException { return this.executionJobDao.fetchJobInfo(execId, jobId, attempts); } @Override public Props fetchExecutionJobInputProps(final int execId, final String jobId) throws ExecutorManagerException { return this.executionJobDao.fetchExecutionJobInputProps(execId, jobId); } @Override public Props fetchExecutionJobOutputProps(final int execId, final String jobId) throws ExecutorManagerException { return this.executionJobDao.fetchExecutionJobOutputProps(execId, jobId); } @Override public Pair<Props, Props> fetchExecutionJobProps(final int execId, final String jobId) throws ExecutorManagerException { return this.executionJobDao.fetchExecutionJobProps(execId, jobId); } @Override public List<ExecutableJobInfo> fetchJobHistory(final int projectId, final String jobId, final int skip, final int size) throws ExecutorManagerException { return this.executionJobDao.fetchJobHistory(projectId, jobId, skip, size); } @Override public LogData fetchLogs(final int execId, final String name, final int attempt, final int startByte, final int length) throws ExecutorManagerException { return this.executionLogsDao.fetchLogs(execId, name, attempt, startByte, length); } @Override public List<Object> fetchAttachments(final int execId, final String jobId, final int attempt) throws ExecutorManagerException { return this.executionJobDao.fetchAttachments(execId, jobId, attempt); } @Override public void uploadLogFile(final int execId, final String name, final int attempt, final File... files) throws ExecutorManagerException { this.executionLogsDao.uploadLogFile(execId, name, attempt, files); } @Override public void uploadAttachmentFile(final ExecutableNode node, final File file) throws ExecutorManagerException { this.executionJobDao.uploadAttachmentFile(node, file); } @Override public List<Executor> fetchAllExecutors() throws ExecutorManagerException { return this.executorDao.fetchAllExecutors(); } @Override public List<Executor> fetchActiveExecutors() throws ExecutorManagerException { return this.executorDao.fetchActiveExecutors(); } @Override public Executor fetchExecutor(final String host, final int port) throws ExecutorManagerException { return this.executorDao.fetchExecutor(host, port); } @Override public Executor fetchExecutor(final int executorId) throws ExecutorManagerException { return this.executorDao.fetchExecutor(executorId); } @Override public void updateExecutor(final Executor executor) throws ExecutorManagerException { this.executorDao.updateExecutor(executor); } @Override public Executor addExecutor(final String host, final int port) throws ExecutorManagerException { return this.executorDao.addExecutor(host, port); } @Override public void removeExecutor(final String host, final int port) throws ExecutorManagerException { this.executorDao.removeExecutor(host, port); } @Override public void postExecutorEvent(final Executor executor, final EventType type, final String user, final String message) throws ExecutorManagerException { this.executorEventsDao.postExecutorEvent(executor, type, user, message); } @Override public List<ExecutorLogEvent> getExecutorEvents(final Executor executor, final int num, final int offset) throws ExecutorManagerException { return this.executorEventsDao.getExecutorEvents(executor, num, offset); } @Override public void assignExecutor(final int executorId, final int executionId) throws ExecutorManagerException { this.assignExecutorDao.assignExecutor(executorId, executionId); } @Override public Executor fetchExecutorByExecutionId(final int executionId) throws ExecutorManagerException { return this.executorDao.fetchExecutorByExecutionId(executionId); } @Override public int removeExecutionLogsByTime(final long millis, final int recordCleanupLimit) throws ExecutorManagerException { return this.executionLogsDao.removeExecutionLogsByTime(millis, recordCleanupLimit); } @Override public void unassignExecutor(final int executionId) throws ExecutorManagerException { this.assignExecutorDao.unassignExecutor(executionId); } @Override public int selectAndUpdateExecution(final int executorId, final boolean isActive) throws ExecutorManagerException { return this.executionFlowDao.selectAndUpdateExecution(executorId, isActive); } @Override public int selectAndUpdateExecutionWithLocking(final int executorId, final boolean isActive) throws ExecutorManagerException { return this.executionFlowDao.selectAndUpdateExecutionWithLocking(executorId, isActive); } @Override public ExecutableRampMap fetchExecutableRampMap() throws ExecutorManagerException { return this.executionRampDao.fetchExecutableRampMap(); } @Override public ExecutableRampItemsMap fetchExecutableRampItemsMap() throws ExecutorManagerException { return this.executionRampDao.fetchExecutableRampItemsMap(); } @Override public ExecutableRampDependencyMap fetchExecutableRampDependencyMap() throws ExecutorManagerException { return this.executionRampDao.fetchExecutableRampDependencyMap(); } @Override public ExecutableRampExceptionalFlowItemsMap fetchExecutableRampExceptionalFlowItemsMap() throws ExecutorManagerException { return this.executionRampDao.fetchExecutableRampExceptionalFlowItemsMap(); } @Override public void updateExecutedRampFlows(final String ramp, ExecutableRampExceptionalItems executableRampExceptionalItems) throws ExecutorManagerException { this.executionRampDao.updateExecutedRampFlows(ramp, executableRampExceptionalItems); } @Override public ExecutableRampExceptionalJobItemsMap fetchExecutableRampExceptionalJobItemsMap() throws ExecutorManagerException { return this.executionRampDao.fetchExecutableRampExceptionalJobItemsMap(); } @Override public Map<String, String> doRampActions(List<Map<String, Object>> rampActionsMap) throws ExecutorManagerException { return this.executionRampDao.doRampActions(rampActionsMap); } @Override public void updateExecutableRamp(ExecutableRamp executableRamp) throws ExecutorManagerException { this.executionRampDao.updateExecutableRamp(executableRamp); } @Override public void unsetExecutorIdForExecution(final int executionId) throws ExecutorManagerException { this.executionFlowDao.unsetExecutorIdForExecution(executionId); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/MysqlNamedLock.java
package azkaban.executor; import azkaban.db.DatabaseTransOperator; import azkaban.utils.StringUtils; import java.sql.ResultSet; import java.sql.SQLException; import javax.inject.Singleton; import org.apache.commons.dbutils.ResultSetHandler; /** * Util class for locking using named lock in mysql */ @Singleton public class MysqlNamedLock implements ResultSetHandler<Boolean> { private String getLockTemplate = "SELECT GET_LOCK('%s', %s)"; private String releaseLockTemplate = "SELECT RELEASE_LOCK('%s')"; public boolean getLock(DatabaseTransOperator transOperator, String lockName, int lockTimeoutInSeconds) throws SQLException { if (StringUtils.isEmpty(lockName)) { throw new IllegalArgumentException("Lock name cannot be null or empty"); } String getLockStatement = String.format(getLockTemplate, lockName, lockTimeoutInSeconds); return transOperator.query(getLockStatement, this); } public boolean releaseLock(DatabaseTransOperator transOperator, String lockName) throws SQLException { if (StringUtils.isEmpty(lockName)) { throw new IllegalArgumentException("Lock name cannot be null or empty"); } String releaseLockStatement = String.format(releaseLockTemplate, lockName); return transOperator.query(releaseLockStatement, this); } @Override public Boolean handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return false; } return rs.getBoolean(1); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/NumExecutionsDao.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import java.sql.ResultSet; import java.sql.SQLException; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.log4j.Logger; @Singleton public class NumExecutionsDao { private static final Logger logger = Logger.getLogger(NumExecutionsDao.class); private final DatabaseOperator dbOperator; @Inject public NumExecutionsDao(final DatabaseOperator dbOperator) { this.dbOperator = dbOperator; } public int fetchNumExecutableFlows() throws ExecutorManagerException { try { return this.dbOperator.query(IntHandler.NUM_EXECUTIONS, new IntHandler()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching num executions", e); } } public int fetchNumExecutableFlows(final int projectId, final String flowId) throws ExecutorManagerException { final IntHandler intHandler = new IntHandler(); try { return this.dbOperator.query(IntHandler.NUM_FLOW_EXECUTIONS, intHandler, projectId, flowId); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching num executions", e); } } public int fetchNumExecutableNodes(final int projectId, final String jobId) throws ExecutorManagerException { final IntHandler intHandler = new IntHandler(); try { return this.dbOperator.query(IntHandler.NUM_JOB_EXECUTIONS, intHandler, projectId, jobId); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching num executions", e); } } private static class IntHandler implements ResultSetHandler<Integer> { private static final String NUM_EXECUTIONS = "SELECT COUNT(1) FROM execution_flows"; private static final String NUM_FLOW_EXECUTIONS = "SELECT COUNT(1) FROM execution_flows WHERE project_id=? AND flow_id=?"; private static final String NUM_JOB_EXECUTIONS = "SELECT COUNT(1) FROM execution_jobs WHERE project_id=? AND job_id=?"; @Override public Integer handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return 0; } return rs.getInt(1); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/QueuedExecutions.java
package azkaban.executor; import azkaban.utils.Pair; import java.util.Collection; import java.util.Collections; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.PriorityBlockingQueue; import org.apache.log4j.Logger; /** * <pre> * Composite data structure to represent non-dispatched flows in webserver. * This data structure wraps a blocking queue and a concurrent hashmap. * </pre> */ public class QueuedExecutions { private static final Logger logger = Logger.getLogger(QueuedExecutions.class); final long capacity; /* map to easily access queued flows */ final private ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>> queuedFlowMap; /* actual queue */ final private BlockingQueue<Pair<ExecutionReference, ExecutableFlow>> queuedFlowList; public QueuedExecutions(final long capacity) { this.capacity = capacity; this.queuedFlowMap = new ConcurrentHashMap<>(); this.queuedFlowList = new PriorityBlockingQueue<>(10, new ExecutableFlowPriorityComparator()); } /** * Wraps BoundedQueue Take method to have a corresponding update in queuedFlowMap lookup table */ public Pair<ExecutionReference, ExecutableFlow> fetchHead() throws InterruptedException { final Pair<ExecutionReference, ExecutableFlow> pair = this.queuedFlowList.take(); if (pair != null && pair.getFirst() != null) { this.queuedFlowMap.remove(pair.getFirst().getExecId()); } return pair; } /** * Helper method to have a single point of deletion in the queued flows */ public void dequeue(final int executionId) { if (this.queuedFlowMap.containsKey(executionId)) { this.queuedFlowList.remove(this.queuedFlowMap.get(executionId)); this.queuedFlowMap.remove(executionId); } } /** * <pre> * Helper method to have a single point of insertion in the queued flows * * @param exflow * flow to be enqueued * @param ref * reference to be enqueued * @throws ExecutorManagerException * case 1: if blocking queue put method fails due to * InterruptedException * case 2: if there already an element with * same execution Id * </pre> */ public void enqueue(final ExecutableFlow exflow, final ExecutionReference ref) throws ExecutorManagerException { if (hasExecution(exflow.getExecutionId())) { final String errMsg = "Flow already in queue " + exflow.getExecutionId(); throw new ExecutorManagerException(errMsg); } final Pair<ExecutionReference, ExecutableFlow> pair = new Pair<>(ref, exflow); try { this.queuedFlowMap.put(exflow.getExecutionId(), pair); this.queuedFlowList.put(pair); } catch (final InterruptedException e) { final String errMsg = "Failed to insert flow " + exflow.getExecutionId(); logger.error(errMsg, e); throw new ExecutorManagerException(errMsg); } } /** * <pre> * Enqueues all the elements of a collection * * @param collection * * @throws ExecutorManagerException * case 1: if blocking queue put method fails due to * InterruptedException * case 2: if there already an element with * same execution Id * </pre> */ public void enqueueAll( final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) throws ExecutorManagerException { for (final Pair<ExecutionReference, ExecutableFlow> pair : collection) { enqueue(pair.getSecond(), pair.getFirst()); } } /** * Returns a read only collection of all the queued (flows, reference) pairs */ public Collection<Pair<ExecutionReference, ExecutableFlow>> getAllEntries() { return Collections.unmodifiableCollection(this.queuedFlowMap.values()); } /** * Checks if an execution is queued or not */ public boolean hasExecution(final int executionId) { return this.queuedFlowMap.containsKey(executionId); } /** * Fetch flow for an execution. Returns null, if execution not in queue */ public ExecutableFlow getFlow(final int executionId) { if (hasExecution(executionId)) { return this.queuedFlowMap.get(executionId).getSecond(); } return null; } /** * Fetch Activereference for an execution. Returns null, if execution not in queue */ public ExecutionReference getReference(final int executionId) { if (hasExecution(executionId)) { return this.queuedFlowMap.get(executionId).getFirst(); } return null; } /** * Size of the queue */ public long size() { return this.queuedFlowList.size(); } /** * Verify, if queue is full as per initialized capacity */ public boolean isFull() { return size() >= this.capacity; } /** * Verify, if queue is empty or not */ public boolean isEmpty() { return this.queuedFlowList.isEmpty() && this.queuedFlowMap.isEmpty(); } /** * Empties queue by dequeuing all the elements */ public void clear() { for (final Pair<ExecutionReference, ExecutableFlow> pair : this.queuedFlowMap.values()) { dequeue(pair.getFirst().getExecId()); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/RunningExecutions.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.utils.Pair; import java.util.concurrent.ConcurrentHashMap; import javax.inject.Singleton; /** * Provides access to running executions. */ @Singleton public class RunningExecutions { private final ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>> runningExecutions = new ConcurrentHashMap<>(); /** * Get running executions. * * @return executions. */ public ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>> get() { return this.runningExecutions; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/RunningExecutionsUpdater.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.alert.Alerter; import azkaban.metrics.CommonMetrics; import azkaban.utils.Pair; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Optional; import javax.inject.Inject; import org.apache.log4j.Logger; import org.joda.time.DateTime; /** * Updates running executions. */ public class RunningExecutionsUpdater { private static final Logger logger = Logger.getLogger(RunningExecutionsUpdater.class); // First email is sent after 1 minute of unresponsiveness final int numErrorsBeforeUnresponsiveEmail = 6; final long errorThreshold = 10000; // When we have an http error, for that flow, we'll check every 10 secs, 360 // times (3600 seconds = 1 hour) before we send an email about unresponsive executor. private final int numErrorsBetweenUnresponsiveEmail = 360; private final ExecutorManagerUpdaterStage updaterStage; private final AlerterHolder alerterHolder; private final CommonMetrics commonMetrics; private final ExecutorApiGateway apiGateway; private final RunningExecutions runningExecutions; private final ExecutionFinalizer executionFinalizer; private final ExecutorLoader executorLoader; @Inject public RunningExecutionsUpdater(final ExecutorManagerUpdaterStage updaterStage, final AlerterHolder alerterHolder, final CommonMetrics commonMetrics, final ExecutorApiGateway apiGateway, final RunningExecutions runningExecutions, final ExecutionFinalizer executionFinalizer, final ExecutorLoader executorLoader) { this.updaterStage = updaterStage; this.alerterHolder = alerterHolder; this.commonMetrics = commonMetrics; this.apiGateway = apiGateway; this.runningExecutions = runningExecutions; this.executionFinalizer = executionFinalizer; this.executorLoader = executorLoader; } /** * Updates running executions. */ @SuppressWarnings("unchecked") public void updateExecutions() { this.updaterStage.set("Starting update all flows."); final Map<Optional<Executor>, List<ExecutableFlow>> exFlowMap = getFlowToExecutorMap(); final ArrayList<ExecutableFlow> finalizeFlows = new ArrayList<>(); for (final Map.Entry<Optional<Executor>, List<ExecutableFlow>> entry : exFlowMap .entrySet()) { final Optional<Executor> executorOption = entry.getKey(); if (!executorOption.isPresent()) { for (final ExecutableFlow flow : entry.getValue()) { logger.warn("Finalizing execution " + flow.getExecutionId() + ". Executor id of this execution doesn't exist"); finalizeFlows.add(flow); } continue; } final Executor executor = executorOption.get(); this.updaterStage.set("Starting update flows on " + executor.getHost() + ":" + executor.getPort()); Map<String, Object> results = null; try { results = this.apiGateway.updateExecutions(executor, entry.getValue()); } catch (final ExecutorManagerException e) { handleException(entry, executor, e, finalizeFlows); } if (results != null) { final List<Map<String, Object>> executionUpdates = (List<Map<String, Object>>) results .get(ConnectorParams.RESPONSE_UPDATED_FLOWS); for (final Map<String, Object> updateMap : executionUpdates) { try { final ExecutableFlow flow = updateExecution(updateMap); this.updaterStage.set("Updated flow " + flow.getExecutionId()); if (ExecutionControllerUtils.isFinished(flow)) { finalizeFlows.add(flow); } } catch (final ExecutorManagerException e) { final ExecutableFlow flow = e.getExecutableFlow(); logger.error(e); if (flow != null) { logger.warn("Finalizing execution " + flow.getExecutionId()); finalizeFlows.add(flow); } } } } } this.updaterStage.set("Finalizing " + finalizeFlows.size() + " error flows."); for (final ExecutableFlow flow : finalizeFlows) { this.executionFinalizer .finalizeFlow(flow, "Not running on the assigned executor (any more)", null); } this.updaterStage.set("Updated all active flows. Waiting for next round."); } private void handleException(final Entry<Optional<Executor>, List<ExecutableFlow>> entry, final Executor executor, final ExecutorManagerException e, final ArrayList<ExecutableFlow> finalizeFlows) { logger.error("Failed to get update from executor " + executor.getHost(), e); boolean sendUnresponsiveEmail = false; final boolean executorRemoved = isExecutorRemoved(executor.getId()); for (final ExecutableFlow flow : entry.getValue()) { final Pair<ExecutionReference, ExecutableFlow> pair = this.runningExecutions.get().get(flow.getExecutionId()); this.updaterStage .set("Failed to get update for flow " + pair.getSecond().getExecutionId()); if (executorRemoved) { logger.warn("Finalizing execution " + flow.getExecutionId() + ". Executor is removed"); finalizeFlows.add(flow); } else { final ExecutionReference ref = pair.getFirst(); ref.setNextCheckTime(DateTime.now().getMillis() + this.errorThreshold); ref.setNumErrors(ref.getNumErrors() + 1); if (ref.getNumErrors() == this.numErrorsBeforeUnresponsiveEmail || ref.getNumErrors() % this.numErrorsBetweenUnresponsiveEmail == 0) { // if any of the executions has failed many enough updates, alert sendUnresponsiveEmail = true; } } } if (sendUnresponsiveEmail) { final Alerter mailAlerter = this.alerterHolder.get("email"); mailAlerter.alertOnFailedUpdate(executor, entry.getValue(), e); } } private boolean isExecutorRemoved(final int id) { final Executor fetchedExecutor; try { fetchedExecutor = this.executorLoader.fetchExecutor(id); } catch (final ExecutorManagerException e) { logger.error("Couldn't check if executor exists", e); // don't know if removed or not -> default to false return false; } return fetchedExecutor == null; } /* Group Executable flow by Executors to reduce number of REST calls */ private Map<Optional<Executor>, List<ExecutableFlow>> getFlowToExecutorMap() { final HashMap<Optional<Executor>, List<ExecutableFlow>> exFlowMap = new HashMap<>(); for (final Pair<ExecutionReference, ExecutableFlow> runningFlow : this.runningExecutions.get() .values()) { final ExecutionReference ref = runningFlow.getFirst(); final ExecutableFlow flow = runningFlow.getSecond(); final Optional<Executor> executor = ref.getExecutor(); // We can set the next check time to prevent the checking of certain // flows. if (ref.getNextCheckTime() >= DateTime.now().getMillis()) { continue; } List<ExecutableFlow> flows = exFlowMap.get(executor); if (flows == null) { flows = new ArrayList<>(); exFlowMap.put(executor, flows); } flows.add(flow); } return exFlowMap; } private ExecutableFlow updateExecution(final Map<String, Object> updateData) throws ExecutorManagerException { final Integer execId = (Integer) updateData.get(ConnectorParams.UPDATE_MAP_EXEC_ID); if (execId == null) { throw new ExecutorManagerException( "Response is malformed. Need exec id to update."); } final Pair<ExecutionReference, ExecutableFlow> refPair = this.runningExecutions.get().get(execId); if (refPair == null) { // this shouldn't ever happen on real azkaban runtime. // but this can easily happen in unit tests if there's some inconsistent mocking. throw new ExecutorManagerException( "No execution found in the map with the execution id any more. Removing " + execId); } final ExecutionReference ref = refPair.getFirst(); final ExecutableFlow flow = refPair.getSecond(); if (updateData.containsKey("error")) { // The flow should be finished here. throw new ExecutorManagerException((String) updateData.get("error"), flow); } // Reset errors. ref.setNextCheckTime(0); ref.setNumErrors(0); final Status oldStatus = flow.getStatus(); flow.applyUpdateObject(updateData); final Status newStatus = flow.getStatus(); if (oldStatus != newStatus && newStatus.equals(Status.FAILED_FINISHING)) { ExecutionControllerUtils.alertUserOnFirstError(flow, this.alerterHolder); } return flow; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/RunningExecutionsUpdaterThread.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import javax.inject.Inject; import org.apache.log4j.Logger; /** * Updates running executions periodically. */ public class RunningExecutionsUpdaterThread extends Thread { private static final Logger logger = Logger.getLogger(RunningExecutionsUpdaterThread.class); volatile int waitTimeIdleMs = 2000; volatile int waitTimeMs = 500; private final RunningExecutionsUpdater updater; private final RunningExecutions runningExecutions; private long lastThreadCheckTime = -1; private boolean shutdown = false; @Inject public RunningExecutionsUpdaterThread(final RunningExecutionsUpdater updater, final RunningExecutions runningExecutions) { this.updater = updater; this.runningExecutions = runningExecutions; this.setName("ExecutorManagerUpdaterThread"); } /** * Start the thread: updates running executions periodically. */ @Override @SuppressWarnings("unchecked") public void run() { while (!this.shutdown) { try { this.lastThreadCheckTime = System.currentTimeMillis(); this.updater.updateExecutions(); // TODO not sure why it would be important to check the status immediately in case of _new_ // executions. This can only optimize finalizing executions that finish super-quickly after // being started. waitForNewExecutions(); } catch (final Exception e) { logger.error("Unexpected exception in updating executions", e); } } } private void waitForNewExecutions() { synchronized (this.runningExecutions) { try { final int waitTimeMillis = this.runningExecutions.get().size() > 0 ? this.waitTimeMs : this.waitTimeIdleMs; if (waitTimeMillis > 0) { this.runningExecutions.wait(waitTimeMillis); } } catch (final InterruptedException e) { } } } void shutdown() { this.shutdown = true; } public long getLastThreadCheckTime() { return this.lastThreadCheckTime; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/Status.java
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import com.google.common.collect.ImmutableMap; import java.util.Arrays; public enum Status { READY(10), PREPARING(20), RUNNING(30), PAUSED(40), SUCCEEDED(50), KILLING(55), KILLED(60), FAILED(70), FAILED_FINISHING(80), SKIPPED(90), DISABLED(100), QUEUED(110), FAILED_SUCCEEDED(120), CANCELLED(125); // status is TINYINT in DB and the value ranges from -128 to 127 private static final ImmutableMap<Integer, Status> numValMap = Arrays.stream(Status.values()) .collect(ImmutableMap.toImmutableMap(status -> status.getNumVal(), status -> status)); private final int numVal; Status(final int numVal) { this.numVal = numVal; } public static Status fromInteger(final int x) { return numValMap.getOrDefault(x, READY); } public static boolean isStatusFinished(final Status status) { switch (status) { case FAILED: case KILLED: case SUCCEEDED: case SKIPPED: case FAILED_SUCCEEDED: case CANCELLED: return true; default: return false; } } public static boolean isStatusRunning(final Status status) { switch (status) { case RUNNING: case FAILED_FINISHING: case QUEUED: return true; default: return false; } } public static boolean isStatusFailed(final Status status) { switch (status) { case FAILED: case KILLED: case CANCELLED: return true; default: return false; } } public static boolean isStatusSucceeded(final Status status) { switch (status) { case SUCCEEDED: case FAILED_SUCCEEDED: case SKIPPED: return true; default: return false; } } public int getNumVal() { return this.numVal; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/mail/DefaultMailCreator.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.mail; import azkaban.executor.ExecutableFlow; import azkaban.executor.ExecutableNode; import azkaban.executor.ExecutionOptions; import azkaban.executor.ExecutionOptions.FailureAction; import azkaban.executor.Executor; import azkaban.executor.ExecutorManagerException; import azkaban.executor.Status; import azkaban.utils.EmailMessage; import azkaban.utils.TimeUtils; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import org.apache.commons.lang.exception.ExceptionUtils; public class DefaultMailCreator implements MailCreator { public static final String DEFAULT_MAIL_CREATOR = "default"; private static final HashMap<String, MailCreator> registeredCreators = new HashMap<>(); private static final MailCreator defaultCreator; static { defaultCreator = new DefaultMailCreator(); registerCreator(DEFAULT_MAIL_CREATOR, defaultCreator); } public static void registerCreator(final String name, final MailCreator creator) { registeredCreators.put(name, creator); } public static MailCreator getCreator(final String name) { MailCreator creator = registeredCreators.get(name); if (creator == null) { creator = defaultCreator; } return creator; } private static List<String> findFailedJobs(final ExecutableFlow flow) { final ArrayList<String> failedJobs = new ArrayList<>(); for (final ExecutableNode node : flow.getExecutableNodes()) { if (node.getStatus() == Status.FAILED) { failedJobs.add(node.getId()); } } return failedJobs; } @Override public boolean createFirstErrorMessage(final ExecutableFlow flow, final EmailMessage message, final String azkabanName, final String scheme, final String clientHostname, final String clientPortNumber) { final ExecutionOptions option = flow.getExecutionOptions(); final List<String> emailList = option.getFailureEmails(); final int execId = flow.getExecutionId(); if (emailList != null && !emailList.isEmpty()) { message.addAllToAddress(emailList); message.setMimeType("text/html"); message.setSubject("Flow '" + flow.getFlowId() + "' has encountered a failure on " + azkabanName); message.println("<h2 style=\"color:#FF0000\"> Execution '" + flow.getExecutionId() + "' of flow '" + flow.getFlowId() + "' of project '" + flow.getProjectName() + "' has encountered a failure on " + azkabanName + "</h2>"); if (option.getFailureAction() == FailureAction.CANCEL_ALL) { message .println("This flow is set to cancel all currently running jobs."); } else if (option.getFailureAction() == FailureAction.FINISH_ALL_POSSIBLE) { message .println("This flow is set to complete all jobs that aren't blocked by the failure."); } else { message .println("This flow is set to complete all currently running jobs before stopping."); } message.println("<table>"); message.println("<tr><td>Start Time</td><td>" + TimeUtils.formatDateTimeZone(flow.getStartTime()) + "</td></tr>"); message.println("<tr><td>End Time</td><td>" + TimeUtils.formatDateTimeZone(flow.getEndTime()) + "</td></tr>"); message.println("<tr><td>Duration</td><td>" + TimeUtils.formatDuration(flow.getStartTime(), flow.getEndTime()) + "</td></tr>"); message.println("<tr><td>Status</td><td>" + flow.getStatus() + "</td></tr>"); message.println("</table>"); message.println(""); final String executionUrl = scheme + "://" + clientHostname + ":" + clientPortNumber + "/" + "executor?" + "execid=" + execId; message.println("<a href=\"" + executionUrl + "\">" + flow.getFlowId() + " Execution Link</a>"); message.println(""); message.println("<h3>Reason</h3>"); final List<String> failedJobs = findFailedJobs(flow); message.println("<ul>"); for (final String jobId : failedJobs) { message.println("<li><a href=\"" + executionUrl + "&job=" + jobId + "\">Failed job '" + jobId + "' Link</a></li>"); } message.println("</ul>"); return true; } return false; } @Override public boolean createErrorEmail(final ExecutableFlow flow, final List<ExecutableFlow> pastExecutions, final EmailMessage message, final String azkabanName, final String scheme, final String clientHostname, final String clientPortNumber, final String... reasons) { final ExecutionOptions option = flow.getExecutionOptions(); final List<String> emailList = option.getFailureEmails(); final int execId = flow.getExecutionId(); if (emailList != null && !emailList.isEmpty()) { message.addAllToAddress(emailList); message.setMimeType("text/html"); message.setSubject("Flow '" + flow.getFlowId() + "' has failed on " + azkabanName); message.println("<h2 style=\"color:#FF0000\"> Execution '" + execId + "' of flow '" + flow.getFlowId() + "' of project '" + flow.getProjectName() + "' has failed on " + azkabanName + "</h2>"); message.println("<table>"); message.println("<tr><td>Start Time</td><td>" + TimeUtils.formatDateTimeZone(flow.getStartTime()) + "</td></tr>"); message.println("<tr><td>End Time</td><td>" + TimeUtils.formatDateTimeZone(flow.getEndTime()) + "</td></tr>"); message.println("<tr><td>Duration</td><td>" + TimeUtils.formatDuration(flow.getStartTime(), flow.getEndTime()) + "</td></tr>"); message.println("<tr><td>Status</td><td>" + flow.getStatus() + "</td></tr>"); message.println("</table>"); message.println(""); final String executionUrl = scheme + "://" + clientHostname + ":" + clientPortNumber + "/" + "executor?" + "execid=" + execId; message.println("<a href=\"" + executionUrl + "\">" + flow.getFlowId() + " Execution Link</a>"); message.println(""); message.println("<h3>Reason</h3>"); final List<String> failedJobs = findFailedJobs(flow); message.println("<ul>"); for (final String jobId : failedJobs) { message.println("<li><a href=\"" + executionUrl + "&job=" + jobId + "\">Failed job '" + jobId + "' Link</a></li>"); } for (final String reason : reasons) { message.println("<li>" + reason + "</li>"); } message.println("</ul>"); message.println(""); int failedCount = 0; for (final ExecutableFlow executableFlow : pastExecutions) { if (executableFlow.getStatus().equals(Status.FAILED)) { failedCount++; } } message.println(String.format("<h3>Executions from past 72 hours (%s out %s) failed</h3>", failedCount, pastExecutions.size())); for (final ExecutableFlow executableFlow : pastExecutions) { message.println("<table>"); message.println( "<tr><td>Execution Id</td><td>" + (executableFlow.getExecutionId()) + "</td></tr>"); message.println("<tr><td>Start Time</td><td>" + TimeUtils.formatDateTimeZone(executableFlow.getStartTime()) + "</td></tr>"); message.println("<tr><td>End Time</td><td>" + TimeUtils.formatDateTimeZone(executableFlow.getEndTime()) + "</td></tr>"); message.println("<tr><td>Status</td><td>" + executableFlow.getStatus() + "</td></tr>"); message.println("</table>"); } return true; } return false; } @Override public boolean createSuccessEmail(final ExecutableFlow flow, final EmailMessage message, final String azkabanName, final String scheme, final String clientHostname, final String clientPortNumber) { final ExecutionOptions option = flow.getExecutionOptions(); final List<String> emailList = option.getSuccessEmails(); final int execId = flow.getExecutionId(); if (emailList != null && !emailList.isEmpty()) { message.addAllToAddress(emailList); message.setMimeType("text/html"); message.setSubject("Flow '" + flow.getFlowId() + "' has succeeded on " + azkabanName); message.println("<h2> Execution '" + flow.getExecutionId() + "' of flow '" + flow.getFlowId() + "' of project '" + flow.getProjectName() + "' has succeeded on " + azkabanName + "</h2>"); message.println("<table>"); message.println("<tr><td>Start Time</td><td>" + TimeUtils.formatDateTimeZone(flow.getStartTime()) + "</td></tr>"); message.println("<tr><td>End Time</td><td>" + TimeUtils.formatDateTimeZone(flow.getEndTime()) + "</td></tr>"); message.println("<tr><td>Duration</td><td>" + TimeUtils.formatDuration(flow.getStartTime(), flow.getEndTime()) + "</td></tr>"); message.println("<tr><td>Status</td><td>" + flow.getStatus() + "</td></tr>"); message.println("</table>"); message.println(""); final String executionUrl = scheme + "://" + clientHostname + ":" + clientPortNumber + "/" + "executor?" + "execid=" + execId; message.println("<a href=\"" + executionUrl + "\">" + flow.getFlowId() + " Execution Link</a>"); return true; } return false; } @Override public boolean createFailedUpdateMessage(final List<ExecutableFlow> flows, final Executor executor, final ExecutorManagerException updateException, final EmailMessage message, final String azkabanName, final String scheme, final String clientHostname, final String clientPortNumber) { final ExecutionOptions option = flows.get(0).getExecutionOptions(); final List<String> emailList = option.getFailureEmails(); if (emailList != null && !emailList.isEmpty()) { message.addAllToAddress(emailList); message.setMimeType("text/html"); message.setSubject( "Flow status could not be updated from " + executor.getHost() + " on " + azkabanName); message.println( "<h2 style=\"color:#FF0000\"> Flow status could not be updated from " + executor.getHost() + " on " + azkabanName + "</h2>"); message.println("The actual status of these executions is unknown, " + "because getting status update from azkaban executor is failing"); message.println(""); message.println("<h3>Error detail</h3>"); message.println("<pre>" + ExceptionUtils.getStackTrace(updateException) + "</pre>"); message.println(""); message.println("<h3>Affected executions</h3>"); message.println("<ul>"); appendFlowLinksToMessage(message, flows, scheme, clientHostname, clientPortNumber); message.println("</ul>"); return true; } return false; } @Override public boolean createFailedExecutorHealthCheckMessage(final List<ExecutableFlow> flows, final Executor executor, final ExecutorManagerException failureException, final EmailMessage message, final String azkabanName, final String scheme, final String clientHostname, final String clientPortNumber, final List<String> emailList) { if (emailList == null || emailList.isEmpty()) { return false; } message.addAllToAddress(emailList); message.setMimeType("text/html"); message.setSubject( "Alert: Executor is unreachable, " + executor.getHost() + " on " + azkabanName); message.println( "<h2 style=\"color:#FFA500\"> Executor is unreachable. Executor host - " + executor .getHost() + " on Cluster - " + azkabanName + "</h2>"); message.println("Remedial action will be attempted on affected executions - <br>"); message.println("Following flows were reported as running on the executor and will be " + "finalized."); message.println(""); message.println("<h3>Affected executions</h3>"); message.println("<ul>"); appendFlowLinksToMessage(message, flows, scheme, clientHostname, clientPortNumber); message.println("</ul>"); message.println(""); message.println("<h3>Error detail</h3>"); message.println(String.format("Following error was reported for executor-id: %s, " + "executor-host: %s, executor-port: %d", executor.getId(), executor.getHost(), executor.getPort())); message.println("<pre>" + ExceptionUtils.getStackTrace(failureException) + "</pre>"); return true; } private void appendFlowLinksToMessage(final EmailMessage message, final List<ExecutableFlow> flows, final String scheme, final String clientHostname, final String clientPortNumber) { for (final ExecutableFlow flow : flows) { final int execId = flow.getExecutionId(); final String executionUrl = scheme + "://" + clientHostname + ":" + clientPortNumber + "/" + "executor?" + "execid=" + execId; message.println("<li>Execution '" + flow.getExecutionId() + "' of flow '" + flow.getFlowId() + "' of project '" + flow.getProjectName() + "' - " + " <a href=\"" + executionUrl + "\">Execution Link</a></li>"); } } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/mail/MailCreator.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.mail; import azkaban.executor.ExecutableFlow; import azkaban.executor.Executor; import azkaban.executor.ExecutorManagerException; import azkaban.utils.EmailMessage; import java.util.List; public interface MailCreator { boolean createFirstErrorMessage(ExecutableFlow flow, EmailMessage message, String azkabanName, String scheme, String clientHostname, String clientPortNumber); boolean createErrorEmail(ExecutableFlow flow, List<ExecutableFlow> pastExecutions, EmailMessage message, String azkabanName, String scheme, String clientHostname, String clientPortNumber, String... reasons); boolean createSuccessEmail(ExecutableFlow flow, EmailMessage message, String azkabanName, String scheme, String clientHostname, String clientPortNumber); boolean createFailedUpdateMessage(List<ExecutableFlow> flows, Executor executor, ExecutorManagerException updateException, EmailMessage message, String azkabanName, String scheme, String clientHostname, String clientPortNumber); default boolean createFailedExecutorHealthCheckMessage(List<ExecutableFlow> flows, Executor executor, ExecutorManagerException failureException, EmailMessage message, String azkabanName, String scheme, String clientHostname, String clientPortNumber, List<String> emailList) { return false; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/CandidateComparator.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import azkaban.utils.Pair; import java.util.Collection; import java.util.Comparator; import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import org.apache.log4j.Logger; /** * <pre> * Abstract class for a candidate comparator. * this class contains implementation of most of the core logics. Implementing classes is expected * only to * register factor comparators using the provided register function. * <pre> */ public abstract class CandidateComparator<T> implements Comparator<T> { protected static Logger logger = Logger.getLogger(CandidateComparator.class); // internal repository of the registered comparators . private final Map<String, FactorComparator<T>> factorComparatorList = new ConcurrentHashMap<>(); /** * gets the name of the current implementation of the candidate comparator. * * @returns : name of the comparator. */ public abstract String getName(); /** * tieBreak method which will kick in when the comparator list generated an equality result for * both sides. the tieBreak method will try best to make sure a stable result is returned. */ protected boolean tieBreak(final T object1, final T object2) { if (null == object2) { return true; } if (null == object1) { return false; } return object1.hashCode() >= object2.hashCode(); } /** * function to register a factorComparator to the internal Map for future reference. * * @param factorComparator : the comparator object to be registered. */ protected void registerFactorComparator(final FactorComparator<T> comparator) { if (null == comparator || Integer.MAX_VALUE - this.getTotalWeight() < comparator.getWeight()) { throw new IllegalArgumentException("unable to register comparator." + " The passed comparator is null or has an invalid weight value."); } // add or replace the Comparator. this.factorComparatorList.put(comparator.getFactorName(), comparator); logger.debug(String.format("Factor comparator added for '%s'. Weight = '%s'", comparator.getFactorName(), comparator.getWeight())); } /** * function returns the total weight of the registered comparators. * * @return the value of total weight. */ public int getTotalWeight() { int totalWeight = 0; // save out a copy of the values as HashMap.values() takes o(n) to return the value. final Collection<FactorComparator<T>> allValues = this.factorComparatorList.values(); for (final FactorComparator<T> item : allValues) { if (item != null) { totalWeight += item.getWeight(); } } return totalWeight; } /** * <pre> * function to actually calculate the scores for the two objects that are being compared. * the comparison follows the following logic - * 1. if both objects are equal return 0 score for both. * 2. if one side is null, the other side gets all the score. * 3. if both sides are non-null value, both values will be passed to all the registered * FactorComparators * each factor comparator will generate a result based off it sole logic the weight of the * comparator will be * added to the wining side, if equal, no value will be added to either side. * 4. final result will be returned in a Pair container. * * </pre> * * @param object1 the first object (left side) to be compared. * @param object2 the second object (right side) to be compared. * @return a pair structure contains the score for both sides. */ public Pair<Integer, Integer> getComparisonScore(final T object1, final T object2) { logger.debug(String.format("start comparing '%s' with '%s', total weight = %s ", object1 == null ? "(null)" : object1.toString(), object2 == null ? "(null)" : object2.toString(), this.getTotalWeight())); int result1 = 0; int result2 = 0; // short cut if object equals. if (object1 == object2) { logger.debug("[Comparator] same object."); } else // left side is null. if (object1 == null) { logger.debug("[Comparator] left side is null, right side gets total weight."); result2 = this.getTotalWeight(); } else // right side is null. if (object2 == null) { logger.debug("[Comparator] right side is null, left side gets total weight."); result1 = this.getTotalWeight(); } else // both side is not null,put them thru the full loop { final Collection<FactorComparator<T>> comparatorList = this.factorComparatorList.values(); for (final FactorComparator<T> comparator : comparatorList) { final int result = comparator.compare(object1, object2); result1 = result1 + (result > 0 ? comparator.getWeight() : 0); result2 = result2 + (result < 0 ? comparator.getWeight() : 0); logger.debug(String.format("[Factor: %s] compare result : %s (current score %s vs %s)", comparator.getFactorName(), result, result1, result2)); } } // in case of same score, use tie-breaker to stabilize the result. if (result1 == result2) { final boolean result = this.tieBreak(object1, object2); logger.debug("[TieBreaker] TieBreaker chose " + (result ? String.format("left side (%s)", null == object1 ? "null" : object1.toString()) : String.format("right side (%s)", null == object2 ? "null" : object2.toString()))); if (result) { result1++; } else { result2++; } } logger.debug(String.format("Result : %s vs %s ", result1, result2)); return new Pair<>(result1, result2); } @Override public int compare(final T o1, final T o2) { final Pair<Integer, Integer> result = this.getComparisonScore(o1, o2); return Objects.equals(result.getFirst(), result.getSecond()) ? 0 : result.getFirst() > result.getSecond() ? 1 : -1; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/CandidateFilter.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import java.util.Collection; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.apache.log4j.Logger; /** * Abstract class for a candidate filter. this class contains implementation of most of the core * logics. Implementing classes is expected only to register filters using the provided register * function. */ public abstract class CandidateFilter<T, V> { protected static Logger logger = Logger.getLogger(CandidateFilter.class); // internal repository of the registered filters . private final Map<String, FactorFilter<T, V>> factorFilterList = new ConcurrentHashMap<>(); /** * gets the name of the current implementation of the candidate filter. * * @return : name of the filter. */ public abstract String getName(); /** * function to register a factorFilter to the internal Map for future reference. * * @param factorfilter : the Filter object to be registered. */ protected void registerFactorFilter(final FactorFilter<T, V> filter) { if (null == filter) { throw new IllegalArgumentException("unable to register factor filter. " + "The passed comaractor is null or has an invalid weight value."); } // add or replace the filter. this.factorFilterList.put(filter.getFactorName(), filter); logger.debug(String.format("Factor filter added for '%s'.", filter.getFactorName())); } /** * function to analyze the target item according to the reference object to decide whether the * item should be filtered. * * @param filteringTarget: object to be checked. * @param referencingObject: object which contains statistics based on which a decision is made * whether the object being checked need to be filtered or not. * @return true if the check passed, false if check failed, which means the item need to be * filtered. */ public boolean filterTarget(final T filteringTarget, final V referencingObject) { logger.debug(String.format("start filtering '%s' with factor filter for '%s'", filteringTarget == null ? "(null)" : filteringTarget.toString(), this.getName())); final Collection<FactorFilter<T, V>> filterList = this.factorFilterList.values(); boolean result = true; for (final FactorFilter<T, V> filter : filterList) { result &= filter.filterTarget(filteringTarget, referencingObject); logger.debug(String.format("[Factor: %s] filter result : %s ", filter.getFactorName(), result)); if (!result) { break; } } logger.debug(String.format("Final filtering result : %s ", result)); return result; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/CandidateSelector.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import org.apache.log4j.Logger; /** * Implementation of the CandidateSelector. * * @param K executor object type. * @param V dispatching object type. */ public class CandidateSelector<K extends Comparable<K>, V> implements Selector<K, V> { private static final Logger logger = Logger.getLogger(CandidateComparator.class); private final CandidateFilter<K, V> filter; private final CandidateComparator<K> comparator; /** * constructor of the class. * * @param filter CandidateFilter object to be used to perform the candidate filtering. * @param comparator CandidateComparator object to be used to find the best suit candidate from * the filtered list. */ public CandidateSelector(final CandidateFilter<K, V> filter, final CandidateComparator<K> comparator) { this.filter = filter; this.comparator = comparator; } @Override public K getBest(final Collection<K> candidateList, final V dispatchingObject) { // shortcut if the candidateList is empty. if (null == candidateList || candidateList.size() == 0) { logger.error("failed to getNext candidate as the passed candidateList is null or empty."); return null; } logger.debug("start candidate selection logic."); logger.debug(String.format("candidate count before filtering: %s", candidateList.size())); // to keep the input untouched, we will form up a new list based off the filtering result. Collection<K> filteredList = new ArrayList<>(); if (null != this.filter) { for (final K candidateInfo : candidateList) { if (this.filter.filterTarget(candidateInfo, dispatchingObject)) { filteredList.add(candidateInfo); } } } else { filteredList = candidateList; logger.debug("skipping the candidate filtering as the filter object is not specifed."); } logger.debug(String.format("candidate count after filtering: %s", filteredList.size())); if (filteredList.size() == 0) { logger.debug("failed to select candidate as the filtered candidate list is empty."); return null; } if (null == this.comparator) { logger.debug( "candidate comparator is not specified, default hash code comparator class will be used."); } // final work - find the best candidate from the filtered list. final K executor = Collections.max(filteredList, this.comparator); logger.debug(String.format("candidate selected %s", null == executor ? "(null)" : executor.toString())); return executor; } @Override public String getName() { return "CandidateSelector"; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/ExecutorComparator.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import azkaban.executor.Executor; import azkaban.executor.ExecutorInfo; import java.util.Comparator; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Set; /** * De-normalized version of the CandidateComparator, which also contains the implementation of the * factor comparators. */ public class ExecutorComparator extends CandidateComparator<Executor> { // factor comparator names private static final String NUMOFASSIGNEDFLOW_COMPARATOR_NAME = "NumberOfAssignedFlowComparator"; private static final String MEMORY_COMPARATOR_NAME = "Memory"; private static final String LSTDISPATCHED_COMPARATOR_NAME = "LastDispatched"; private static final String CPUUSAGE_COMPARATOR_NAME = "CpuUsage"; private static Map<String, ComparatorCreator> comparatorCreatorRepository = null; /** * static initializer of the class. * We will build the filter repository here. * when a new comparator is added, please do remember to register it here. * */ static { comparatorCreatorRepository = new HashMap<>(); // register the creator for number of assigned flow comparator. comparatorCreatorRepository.put(NUMOFASSIGNEDFLOW_COMPARATOR_NAME, ExecutorComparator::getNumberOfAssignedFlowComparator); // register the creator for memory comparator. comparatorCreatorRepository .put(MEMORY_COMPARATOR_NAME, ExecutorComparator::getMemoryComparator); // register the creator for last dispatched time comparator. comparatorCreatorRepository .put(LSTDISPATCHED_COMPARATOR_NAME, ExecutorComparator::getLstDispatchedTimeComparator); // register the creator for CPU Usage comparator. comparatorCreatorRepository .put(CPUUSAGE_COMPARATOR_NAME, ExecutorComparator::getCpuUsageComparator); } /** * constructor of the ExecutorComparator. * * @param comparatorList the list of comparator, plus its weight information to be registered, the * parameter must be a not-empty and valid list object. */ public ExecutorComparator(final Map<String, Integer> comparatorList) { if (null == comparatorList || comparatorList.size() == 0) { throw new IllegalArgumentException("failed to initialize executor comparator" + "as the passed comparator list is invalid or empty."); } // register the comparators, we will now throw here if the weight is invalid, it is handled in the super. for (final Entry<String, Integer> entry : comparatorList.entrySet()) { if (comparatorCreatorRepository.containsKey(entry.getKey())) { this.registerFactorComparator(comparatorCreatorRepository. get(entry.getKey()). create(entry.getValue())); } else { throw new IllegalArgumentException( String.format("failed to initialize executor comparator " + "as the comparator implementation for requested factor '%s' doesn't exist.", entry.getKey())); } } } /** * Gets the name list of all available comparators. * * @return the list of the names. */ public static Set<String> getAvailableComparatorNames() { return comparatorCreatorRepository.keySet(); } /** * <pre> * helper function that does the object on two statistics, comparator can leverage this function * to provide * shortcuts if the statistics object is missing from one or both sides of the executors. * </pre> * * @param stat1 the first statistics object to be checked . * @param stat2 the second statistics object to be checked. * @param caller the name of the calling function, for logging purpose. * @return true if the passed statistics are NOT both valid, a shortcut can be made (caller can * consume the result), false otherwise. */ private static boolean statisticsObjectCheck(final ExecutorInfo statisticsObj1, final ExecutorInfo statisticsObj2, final String caller) { // both doesn't expose the info if (null == statisticsObj1 && null == statisticsObj2) { logger.debug(String.format("%s : neither of the executors exposed statistics info.", caller)); return true; } //right side doesn't expose the info. if (null == statisticsObj2) { logger.debug(String.format( "%s : choosing left side and the right side executor doesn't expose statistics info", caller)); return true; } //left side doesn't expose the info. if (null == statisticsObj1) { logger.debug(String.format( "%s : choosing right side and the left side executor doesn't expose statistics info", caller)); return true; } // both not null return false; } /** * function defines the number of assigned flow comparator. * * @param weight weight of the comparator. */ private static FactorComparator<Executor> getNumberOfAssignedFlowComparator(final int weight) { return FactorComparator .create(NUMOFASSIGNEDFLOW_COMPARATOR_NAME, weight, new Comparator<Executor>() { @Override public int compare(final Executor o1, final Executor o2) { final ExecutorInfo stat1 = o1.getExecutorInfo(); final ExecutorInfo stat2 = o2.getExecutorInfo(); final Integer result = 0; if (statisticsObjectCheck(stat1, stat2, NUMOFASSIGNEDFLOW_COMPARATOR_NAME)) { return result; } return ((Integer) stat1.getRemainingFlowCapacity()) .compareTo(stat2.getRemainingFlowCapacity()); } }); } /** * function defines the cpuUsage comparator. * * @param weight weight of the comparator. */ private static FactorComparator<Executor> getCpuUsageComparator(final int weight) { return FactorComparator.create(CPUUSAGE_COMPARATOR_NAME, weight, new Comparator<Executor>() { @Override public int compare(final Executor o1, final Executor o2) { final ExecutorInfo stat1 = o1.getExecutorInfo(); final ExecutorInfo stat2 = o2.getExecutorInfo(); final int result = 0; if (statisticsObjectCheck(stat1, stat2, CPUUSAGE_COMPARATOR_NAME)) { return result; } // CPU usage , the lesser the value is, the better. return ((Double) stat2.getCpuUsage()).compareTo(stat1.getCpuUsage()); } }); } /** * function defines the last dispatched time comparator. * * @param weight weight of the comparator. */ private static FactorComparator<Executor> getLstDispatchedTimeComparator(final int weight) { return FactorComparator .create(LSTDISPATCHED_COMPARATOR_NAME, weight, new Comparator<Executor>() { @Override public int compare(final Executor o1, final Executor o2) { final ExecutorInfo stat1 = o1.getExecutorInfo(); final ExecutorInfo stat2 = o2.getExecutorInfo(); final int result = 0; if (statisticsObjectCheck(stat1, stat2, LSTDISPATCHED_COMPARATOR_NAME)) { return result; } // Note: an earlier date time indicates higher weight. return ((Long) stat2.getLastDispatchedTime()).compareTo(stat1.getLastDispatchedTime()); } }); } /** * <pre> * function defines the Memory comparator. * Note: comparator firstly take the absolute value of the remaining memory, if both sides have * the same value, * it go further to check the percent of the remaining memory. * </pre> * * @param weight weight of the comparator. */ private static FactorComparator<Executor> getMemoryComparator(final int weight) { return FactorComparator.create(MEMORY_COMPARATOR_NAME, weight, new Comparator<Executor>() { @Override public int compare(final Executor o1, final Executor o2) { final ExecutorInfo stat1 = o1.getExecutorInfo(); final ExecutorInfo stat2 = o2.getExecutorInfo(); final int result = 0; if (statisticsObjectCheck(stat1, stat2, MEMORY_COMPARATOR_NAME)) { return result; } if (stat1.getRemainingMemoryInMB() != stat2.getRemainingMemoryInMB()) { return stat1.getRemainingMemoryInMB() > stat2.getRemainingMemoryInMB() ? 1 : -1; } return Double.compare(stat1.getRemainingMemoryPercent(), stat2.getRemainingMemoryPercent()); } }); } @Override public String getName() { return "ExecutorComparator"; } private interface ComparatorCreator { FactorComparator<Executor> create(int weight); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/ExecutorFilter.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import azkaban.executor.ExecutableFlow; import azkaban.executor.Executor; import azkaban.executor.ExecutorInfo; import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.Set; /** * De-normalized version of the candidateFilter, which also contains the implementation of the * factor filters. */ public final class ExecutorFilter extends CandidateFilter<Executor, ExecutableFlow> { // factor filter names. private static final String STATICREMAININGFLOWSIZE_FILTER_NAME = "StaticRemainingFlowSize"; private static final String MINIMUMFREEMEMORY_FILTER_NAME = "MinimumFreeMemory"; private static final String CPUSTATUS_FILTER_NAME = "CpuStatus"; private static Map<String, FactorFilter<Executor, ExecutableFlow>> filterRepository = null; /**<pre> * static initializer of the class. * We will build the filter repository here. * when a new filter is added, please do remember to register it here. * </pre> * */ static { filterRepository = new HashMap<>(); filterRepository.put(STATICREMAININGFLOWSIZE_FILTER_NAME, getStaticRemainingFlowSizeFilter()); filterRepository.put(MINIMUMFREEMEMORY_FILTER_NAME, getMinimumReservedMemoryFilter()); filterRepository.put(CPUSTATUS_FILTER_NAME, getCpuStatusFilter()); } /** * constructor of the ExecutorFilter. * * @param filterList the list of filter to be registered, the parameter must be a not-empty and * valid list object. */ public ExecutorFilter(final Collection<String> filterList) { // shortcut if the filter list is invalid. A little bit ugly to have to throw in constructor. if (null == filterList || filterList.size() == 0) { logger.error( "failed to initialize executor filter as the passed filter list is invalid or empty."); throw new IllegalArgumentException("filterList"); } // register the filters according to the list. for (final String filterName : filterList) { if (filterRepository.containsKey(filterName)) { this.registerFactorFilter(filterRepository.get(filterName)); } else { logger.error(String.format("failed to initialize executor filter " + "as the filter implementation for requested factor '%s' doesn't exist.", filterName)); throw new IllegalArgumentException("filterList"); } } } /** * Gets the name list of all available filters. * * @return the list of the names. */ public static Set<String> getAvailableFilterNames() { return filterRepository.keySet(); } /** * <pre> * function to register the static remaining flow size filter. * NOTE : this is a static filter which means the filter will be filtering based on the system * standard which is not * Coming for the passed flow. * Ideally this filter will make sure only the executor hasn't reached the Max allowed # * of * executing flows. * </pre> */ private static FactorFilter<Executor, ExecutableFlow> getStaticRemainingFlowSizeFilter() { return FactorFilter .create(STATICREMAININGFLOWSIZE_FILTER_NAME, (filteringTarget, referencingObject) -> { if (null == filteringTarget) { logger.debug(String.format("%s : filtering out the target as it is null.", STATICREMAININGFLOWSIZE_FILTER_NAME)); return false; } final ExecutorInfo stats = filteringTarget.getExecutorInfo(); if (null == stats) { logger.debug(String.format("%s : filtering out %s as it's stats is unavailable.", STATICREMAININGFLOWSIZE_FILTER_NAME, filteringTarget.toString())); return false; } return stats.getRemainingFlowCapacity() > 0; }); } /** * <pre> * function to register the static Minimum Reserved Memory filter. * NOTE : this is a static filter which means the filter will be filtering based on the system * standard which is not * Coming for the passed flow. * This filter will filter out any executors that has the remaining memory below 6G * </pre> */ private static FactorFilter<Executor, ExecutableFlow> getMinimumReservedMemoryFilter() { return FactorFilter .create(MINIMUMFREEMEMORY_FILTER_NAME, new FactorFilter.Filter<Executor, ExecutableFlow>() { private static final int MINIMUM_FREE_MEMORY = 6 * 1024; @Override public boolean filterTarget(final Executor filteringTarget, final ExecutableFlow referencingObject) { if (null == filteringTarget) { logger.debug(String.format("%s : filtering out the target as it is null.", MINIMUMFREEMEMORY_FILTER_NAME)); return false; } final ExecutorInfo stats = filteringTarget.getExecutorInfo(); if (null == stats) { logger.debug(String.format("%s : filtering out %s as it's stats is unavailable.", MINIMUMFREEMEMORY_FILTER_NAME, filteringTarget.toString())); return false; } return stats.getRemainingMemoryInMB() > MINIMUM_FREE_MEMORY; } }); } /** * <pre> * function to register the static Minimum Reserved Memory filter. * NOTE : this is a static filter which means the filter will be filtering based on the system * standard which * is not Coming for the passed flow. * This filter will filter out any executors that the current CPU usage exceed 95% * </pre> */ private static FactorFilter<Executor, ExecutableFlow> getCpuStatusFilter() { return FactorFilter .create(CPUSTATUS_FILTER_NAME, new FactorFilter.Filter<Executor, ExecutableFlow>() { private static final int MAX_CPU_CURRENT_USAGE = 95; @Override public boolean filterTarget(final Executor filteringTarget, final ExecutableFlow referencingObject) { if (null == filteringTarget) { logger.debug(String .format("%s : filtering out the target as it is null.", CPUSTATUS_FILTER_NAME)); return false; } final ExecutorInfo stats = filteringTarget.getExecutorInfo(); if (null == stats) { logger.debug(String.format("%s : filtering out %s as it's stats is unavailable.", CPUSTATUS_FILTER_NAME, filteringTarget.toString())); return false; } return stats.getCpuUsage() < MAX_CPU_CURRENT_USAGE; } }); } @Override public String getName() { return "ExecutorFilter"; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/ExecutorSelector.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import azkaban.executor.ExecutableFlow; import azkaban.executor.Executor; import java.util.Collection; import java.util.Map; /** * <pre> * Executor selector class implementation. * NOTE: This class is a de-generalized version of the CandidateSelector, which provides a * clean and convenient constructor to take in filter and comparator name list and build * the instance from that. * </pre> */ public class ExecutorSelector extends CandidateSelector<Executor, ExecutableFlow> { /** * Contractor of the class. * * @param filterList name list of the filters to be registered, filter feature will be disabled if * a null value is passed. * @param comparatorList name/weight pair list of the comparators to be registered , again * comparator feature is disabled if a null value is passed. */ public ExecutorSelector(final Collection<String> filterList, final Map<String, Integer> comparatorList) { super(null == filterList || filterList.isEmpty() ? null : new ExecutorFilter(filterList), null == comparatorList || comparatorList.isEmpty() ? null : new ExecutorComparator(comparatorList)); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/FactorComparator.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import java.util.Comparator; import org.apache.log4j.Logger; /** * wrapper class for a factor comparator . * * @param T: the type of the objects to be compared. */ public final class FactorComparator<T> { private static final Logger logger = Logger.getLogger(CandidateComparator.class); private final String factorName; private final Comparator<T> comparator; private int weight; /** * private constructor of the class. User will create the instance of the class by calling the * static method provided below. * * @param factorName : the factor name . * @param weight : the weight of the comparator. * @param comparator : function to be provided by user on how the comparison should be made. */ private FactorComparator(final String factorName, final int weight, final Comparator<T> comparator) { this.factorName = factorName; this.weight = weight; this.comparator = comparator; } /** * static function to generate an instance of the class. refer to the constructor for the param * definitions. */ public static <T> FactorComparator<T> create(final String factorName, final int weight, final Comparator<T> comparator) { if (null == factorName || factorName.length() == 0 || weight < 0 || null == comparator) { logger.error( "failed to create instance of FactorComparator, at least one of the input paramters are invalid"); return null; } return new FactorComparator<>(factorName, weight, comparator); } // function to return the factor name. public String getFactorName() { return this.factorName; } // function to return the weight value. public int getWeight() { return this.weight; } // function to return the weight value. public void updateWeight(final int value) { this.weight = value; } // the actual compare function, which will leverage the user defined function. public int compare(final T object1, final T object2) { return this.comparator.compare(object1, object2); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/FactorFilter.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import org.apache.log4j.Logger; /** * wrapper class for a factor Filter . * * @param T: the type of the objects to be compared. * @param V: the type of the object to be used for filtering. */ public final class FactorFilter<T, V> { private static final Logger logger = Logger.getLogger(FactorFilter.class); private final String factorName; private final Filter<T, V> filter; /** * private constructor of the class. User will create the instance of the class by calling the * static method provided below. * * @param factorName : the factor name . * @param filter : user defined function specifying how the filtering should be implemented. */ private FactorFilter(final String factorName, final Filter<T, V> filter) { this.factorName = factorName; this.filter = filter; } /** * static function to generate an instance of the class. refer to the constructor for the param * definitions. */ public static <T, V> FactorFilter<T, V> create(final String factorName, final Filter<T, V> filter) { if (null == factorName || factorName.length() == 0 || null == filter) { logger.error( "failed to create instance of FactorFilter, at least one of the input paramters are invalid"); return null; } return new FactorFilter<>(factorName, filter); } // function to return the factor name. public String getFactorName() { return this.factorName; } // the actual check function, which will leverage the logic defined by user. public boolean filterTarget(final T filteringTarget, final V referencingObject) { return this.filter.filterTarget(filteringTarget, referencingObject); } // interface of the filter. public interface Filter<T, V> { /** * function to analyze the target item according to the reference object to decide whether the * item should be filtered. * * @param filteringTarget object to be checked. * @param referencingObject object which contains statistics based on which a decision is made * whether the object being checked need to be filtered or not. * @return true if the check passed, false if check failed, which means the item need to be * filtered. */ boolean filterTarget(T filteringTarget, V referencingObject); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/executor/selector/Selector.java
/* * Copyright 2015 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor.selector; import java.util.Collection; /** * <pre> * Definition of the selector interface. * an implementation of the selector interface provides the functionality * to return a candidate from the candidateList that suits best for the dispatchingObject. * </pre> * * @param K : type of the candidate. * @param V : type of the dispatching object. */ public interface Selector<K extends Comparable<K>, V> { /** * Function returns the next best suit candidate from the candidateList for the dispatching * object. * * @param candidateList : List of the candidates to select from . * @param dispatchingObject : the object to be dispatched . * @return candidate from the candidate list that suits best for the dispatching object. */ public K getBest(Collection<K> candidateList, V dispatchingObject); /** * Function returns the name of the current Dispatcher * * @return name of the dispatcher. */ public String getName(); }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/flow/CommonJobProperties.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.flow; public class CommonJobProperties { /* * The following are Common properties that can be set in a job file */ /** * The type of job that will be executed. Examples: command, java, etc. */ public static final String JOB_TYPE = "type"; /** * Force a node to be a root node in a flow, even if there are other jobs dependent on it. */ public static final String ROOT_NODE = "root.node"; /** * Comma delimited list of job names which are dependencies */ public static final String DEPENDENCIES = "dependencies"; /** * The number of retries when this job has failed. */ public static final String RETRIES = "retries"; /** * The time in millisec to back off after every retry */ public static final String RETRY_BACKOFF = "retry.backoff"; /** * Comma delimited list of email addresses for both failure and success messages */ public static final String NOTIFY_EMAILS = "notify.emails"; /** * Comma delimited list of email addresses for success messages */ public static final String SUCCESS_EMAILS = "success.emails"; /** * Comma delimited list of email addresses for failure messages */ public static final String FAILURE_EMAILS = "failure.emails"; /* * The following are the common props that will be added to the job by azkaban */ /** * Url to access azkaban on a given host */ public static final String AZKABAN_URL = "azkaban.url"; /** * The attempt number of the executing job. */ public static final String JOB_ATTEMPT = "azkaban.job.attempt"; /** * The job's metadata file name. */ public static final String JOB_METADATA_FILE = "azkaban.job.metadata.file"; /** * The job's attachment file absolute path. */ public static final String JOB_ATTACHMENT_FILE = "azkaban.job.attachment.file"; /** * The job's log file absolute path. */ public static final String JOB_LOG_FILE = "azkaban.job.log.file"; /** * The executing flow id */ public static final String FLOW_ID = "azkaban.flow.flowid"; /** * The nested flow id path */ public static final String NESTED_FLOW_PATH = "azkaban.flow.nested.path"; /** * The executing job id */ public static final String JOB_ID = "azkaban.job.id"; /** * The execution id. This should be unique per flow, but may not be due to restarts. */ public static final String EXEC_ID = "azkaban.flow.execid"; /** * The numerical project id identifier. */ public static final String PROJECT_ID = "azkaban.flow.projectid"; /** * The project name. */ public static final String PROJECT_NAME = "azkaban.flow.projectname"; /** * The project last modified by user. */ public static final String PROJECT_LAST_CHANGED_BY = "azkaban.flow.projectlastchangedby"; /** * The project last modified on date. */ public static final String PROJECT_LAST_CHANGED_DATE = "azkaban.flow.projectlastchangeddate"; /** * The version of the project the flow is running. This may change if a forced hotspot occurs. */ public static final String PROJECT_VERSION = "azkaban.flow.projectversion"; /** * Find out who is the submit user, in addition to the user.to.proxy (they may be different) */ public static final String SUBMIT_USER = "azkaban.flow.submituser"; /** * A uuid assigned to every execution */ public static final String FLOW_UUID = "azkaban.flow.uuid"; public static final String JOB_LINK = "azkaban.link.job.url"; public static final String WORKFLOW_LINK = "azkaban.link.workflow.url"; public static final String EXECUTION_LINK = "azkaban.link.execution.url"; public static final String JOBEXEC_LINK = "azkaban.link.jobexec.url"; public static final String ATTEMPT_LINK = "azkaban.link.attempt.url"; public static final String OUT_NODES = "azkaban.job.outnodes"; public static final String IN_NODES = "azkaban.job.innodes"; /** * Properties for passing the flow start time to the jobs. */ public static final String FLOW_START_TIMESTAMP = "azkaban.flow.start.timestamp"; public static final String FLOW_START_YEAR = "azkaban.flow.start.year"; public static final String FLOW_START_MONTH = "azkaban.flow.start.month"; public static final String FLOW_START_DAY = "azkaban.flow.start.day"; public static final String FLOW_START_HOUR = "azkaban.flow.start.hour"; public static final String FLOW_START_MINUTE = "azkaban.flow.start.minute"; public static final String FLOW_START_SECOND = "azkaban.flow.start.second"; public static final String FLOW_START_MILLISSECOND = "azkaban.flow.start.milliseconds"; public static final String FLOW_START_TIMEZONE = "azkaban.flow.start.timezone"; }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/flow/ConditionOnJobStatus.java
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the “License”); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an “AS IS” BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.flow; public enum ConditionOnJobStatus { ALL_SUCCESS("all_success"), ALL_FAILED("all_failed"), ALL_DONE("all_done"), ONE_FAILED("one_failed"), ONE_SUCCESS("one_success"); private final String condition; ConditionOnJobStatus(final String condition) { this.condition = condition; } public static ConditionOnJobStatus fromString(final String condition) { for (final ConditionOnJobStatus conditionOnJobStatus : ConditionOnJobStatus.values()) { if (conditionOnJobStatus.condition.equalsIgnoreCase(condition)) { return conditionOnJobStatus; } } return null; } @Override public String toString() { return this.condition; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/flow/Edge.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.flow; import java.awt.geom.Point2D; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; public class Edge { private final String sourceId; private final String targetId; private Node source; private Node target; private String error; // Useful in rendering. private String guideType; private List<Point2D> guideValues; public Edge(final String fromId, final String toId) { this.sourceId = fromId; this.targetId = toId; } public Edge(final Edge clone) { this.sourceId = clone.getSourceId(); this.targetId = clone.getTargetId(); this.error = clone.getError(); } public static Edge fromObject(final Object obj) { final HashMap<String, Object> edgeObj = (HashMap<String, Object>) obj; final String source = (String) edgeObj.get("source"); final String target = (String) edgeObj.get("target"); final String error = (String) edgeObj.get("error"); final Edge edge = new Edge(source, target); edge.setError(error); if (edgeObj.containsKey("guides")) { final Map<String, Object> guideMap = (Map<String, Object>) edgeObj.get("guides"); final List<Object> values = (List<Object>) guideMap.get("values"); final String type = (String) guideMap.get("type"); final ArrayList<Point2D> valuePoints = new ArrayList<>(); for (final Object pointObj : values) { final Map<String, Double> point = (Map<String, Double>) pointObj; final Double x = point.get("x"); final Double y = point.get("y"); valuePoints.add(new Point2D.Double(x, y)); } edge.setGuides(type, valuePoints); } return edge; } public String getId() { return getSourceId() + ">>" + getTargetId(); } public String getSourceId() { return this.sourceId; } public String getTargetId() { return this.targetId; } public String getError() { return this.error; } public void setError(final String error) { this.error = error; } public boolean hasError() { return this.error != null; } public Node getSource() { return this.source; } public void setSource(final Node source) { this.source = source; } public Node getTarget() { return this.target; } public void setTarget(final Node target) { this.target = target; } public String getGuideType() { return this.guideType; } public List<Point2D> getGuideValues() { return this.guideValues; } public void setGuides(final String type, final List<Point2D> values) { this.guideType = type; this.guideValues = values; } public Object toObject() { final HashMap<String, Object> obj = new HashMap<>(); obj.put("source", getSourceId()); obj.put("target", getTargetId()); if (hasError()) { obj.put("error", this.error); } if (this.guideValues != null) { final HashMap<String, Object> lineGuidesObj = new HashMap<>(); lineGuidesObj.put("type", this.guideType); final ArrayList<Object> guides = new ArrayList<>(); for (final Point2D point : this.guideValues) { final HashMap<String, Double> pointObj = new HashMap<>(); pointObj.put("x", point.getX()); pointObj.put("y", point.getY()); guides.add(pointObj); } lineGuidesObj.put("values", guides); obj.put("guides", lineGuidesObj); } return obj; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/flow/Flow.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.flow; import azkaban.Constants; import azkaban.executor.mail.DefaultMailCreator; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; public class Flow { private final String id; private final HashMap<String, Node> nodes = new HashMap<>(); private final HashMap<String, Edge> edges = new HashMap<>(); private final HashMap<String, Set<Edge>> outEdges = new HashMap<>(); private final HashMap<String, Set<Edge>> inEdges = new HashMap<>(); private final HashMap<String, FlowProps> flowProps = new HashMap<>(); private int projectId; private ArrayList<Node> startNodes = null; private ArrayList<Node> endNodes = null; private int numLevels = -1; private List<String> failureEmail = new ArrayList<>(); private List<String> successEmail = new ArrayList<>(); private String mailCreator = DefaultMailCreator.DEFAULT_MAIL_CREATOR; private ArrayList<String> errors; private int version = -1; private Map<String, Object> metadata = new HashMap<>(); private boolean isLayedOut = false; private boolean isEmbeddedFlow = false; private double azkabanFlowVersion = Constants.DEFAULT_AZKABAN_FLOW_VERSION; private String condition = null; private boolean isLocked = false; private String flowLockErrorMessage = null; public Flow(final String id) { this.id = id; } public static Flow flowFromObject(final Object object) { final Map<String, Object> flowObject = (Map<String, Object>) object; final String id = (String) flowObject.get("id"); final Boolean layedout = (Boolean) flowObject.get("layedout"); final Boolean isEmbeddedFlow = (Boolean) flowObject.get("embeddedFlow"); final Double azkabanFlowVersion = (Double) flowObject.get("azkabanFlowVersion"); final String condition = (String) flowObject.get("condition"); final Boolean isLocked = (Boolean) flowObject.get("isLocked"); final String flowLockErrorMessage = (String) flowObject.get("flowLockErrorMessage"); final Flow flow = new Flow(id); if (layedout != null) { flow.setLayedOut(layedout); } if (isEmbeddedFlow != null) { flow.setEmbeddedFlow(isEmbeddedFlow); } if (azkabanFlowVersion != null) { flow.setAzkabanFlowVersion(azkabanFlowVersion); } if (condition != null) { flow.setCondition(condition); } if (isLocked != null) { flow.setLocked(isLocked); } if (flowLockErrorMessage != null) { flow.setFlowLockErrorMessage(flowLockErrorMessage); } final int projId = (Integer) flowObject.get("project.id"); flow.setProjectId(projId); final int version = (Integer) flowObject.get("version"); flow.setVersion(version); // Loading projects final List<Object> propertiesList = (List<Object>) flowObject.get("props"); final Map<String, FlowProps> properties = loadPropertiesFromObject(propertiesList); flow.addAllFlowProperties(properties.values()); // Loading nodes final List<Object> nodeList = (List<Object>) flowObject.get("nodes"); final Map<String, Node> nodes = loadNodesFromObjects(nodeList); flow.addAllNodes(nodes.values()); // Loading edges final List<Object> edgeList = (List<Object>) flowObject.get("edges"); final List<Edge> edges = loadEdgeFromObjects(edgeList, nodes); flow.addAllEdges(edges); final Map<String, Object> metadata = (Map<String, Object>) flowObject.get("metadata"); if (metadata != null) { flow.setMetadata(metadata); } flow.failureEmail = (List<String>) flowObject.get("failure.email"); flow.successEmail = (List<String>) flowObject.get("success.email"); if (flowObject.containsKey("mailCreator")) { flow.mailCreator = flowObject.get("mailCreator").toString(); } return flow; } private static Map<String, Node> loadNodesFromObjects(final List<Object> nodeList) { final Map<String, Node> nodeMap = new HashMap<>(); for (final Object obj : nodeList) { final Node node = Node.fromObject(obj); nodeMap.put(node.getId(), node); } return nodeMap; } private static List<Edge> loadEdgeFromObjects(final List<Object> edgeList, final Map<String, Node> nodes) { final List<Edge> edgeResult = new ArrayList<>(); for (final Object obj : edgeList) { final Edge edge = Edge.fromObject(obj); edgeResult.add(edge); } return edgeResult; } private static Map<String, FlowProps> loadPropertiesFromObject( final List<Object> propertyObjectList) { final Map<String, FlowProps> properties = new HashMap<>(); for (final Object propObj : propertyObjectList) { final FlowProps prop = FlowProps.fromObject(propObj); properties.put(prop.getSource(), prop); } return properties; } public int getVersion() { return this.version; } public void setVersion(final int version) { this.version = version; } public void initialize() { if (this.startNodes == null) { this.startNodes = new ArrayList<>(); this.endNodes = new ArrayList<>(); for (final Node node : this.nodes.values()) { // If it doesn't have any incoming edges, its a start node if (!this.inEdges.containsKey(node.getId())) { this.startNodes.add(node); } // If it doesn't contain any outgoing edges, its an end node. if (!this.outEdges.containsKey(node.getId())) { this.endNodes.add(node); } } setLevelsAndEdgeNodes(new HashSet<>(this.startNodes), 0); } } private void setLevelsAndEdgeNodes(final Set<Node> levelNodes, final int level) { final Set<Node> nextLevelNodes = new HashSet<>(); for (final Node node : levelNodes) { node.setLevel(level); final Set<Edge> edges = this.outEdges.get(node.getId()); if (edges != null) { edges.forEach(edge -> { edge.setSource(node); edge.setTarget(this.nodes.get(edge.getTargetId())); nextLevelNodes.add(edge.getTarget()); }); } } this.numLevels = level; if (!nextLevelNodes.isEmpty()) { setLevelsAndEdgeNodes(nextLevelNodes, level + 1); } } public Node getNode(final String nodeId) { return this.nodes.get(nodeId); } public List<String> getSuccessEmails() { return this.successEmail; } public String getMailCreator() { return this.mailCreator; } public void setMailCreator(final String mailCreator) { this.mailCreator = mailCreator; } public List<String> getFailureEmails() { return this.failureEmail; } public void addSuccessEmails(final Collection<String> emails) { this.successEmail.addAll(emails); } public void addFailureEmails(final Collection<String> emails) { this.failureEmail.addAll(emails); } public int getNumLevels() { return this.numLevels; } public List<Node> getStartNodes() { return this.startNodes; } public List<Node> getEndNodes() { return this.endNodes; } public Set<Edge> getInEdges(final String id) { return this.inEdges.get(id); } public Set<Edge> getOutEdges(final String id) { return this.outEdges.get(id); } public void addAllNodes(final Collection<Node> nodes) { for (final Node node : nodes) { addNode(node); } } public void addNode(final Node node) { this.nodes.put(node.getId(), node); } public void addAllFlowProperties(final Collection<FlowProps> props) { for (final FlowProps prop : props) { this.flowProps.put(prop.getSource(), prop); } } public String getId() { return this.id; } public void addError(final String error) { if (this.errors == null) { this.errors = new ArrayList<>(); } this.errors.add(error); } public List<String> getErrors() { return this.errors; } public boolean hasErrors() { return this.errors != null && !this.errors.isEmpty(); } public Collection<Node> getNodes() { return this.nodes.values(); } public Collection<Edge> getEdges() { return this.edges.values(); } public void addAllEdges(final Collection<Edge> edges) { for (final Edge edge : edges) { addEdge(edge); } } public void addEdge(final Edge edge) { final String source = edge.getSourceId(); final String target = edge.getTargetId(); if (edge.hasError()) { addError("Error on " + edge.getId() + ". " + edge.getError()); } final Set<Edge> sourceSet = getEdgeSet(this.outEdges, source); sourceSet.add(edge); final Set<Edge> targetSet = getEdgeSet(this.inEdges, target); targetSet.add(edge); this.edges.put(edge.getId(), edge); } private Set<Edge> getEdgeSet(final HashMap<String, Set<Edge>> map, final String id) { Set<Edge> edges = map.get(id); if (edges == null) { edges = new HashSet<>(); map.put(id, edges); } return edges; } public Map<String, Object> toObject() { final HashMap<String, Object> flowObj = new HashMap<>(); flowObj.put("type", "flow"); flowObj.put("id", getId()); flowObj.put("project.id", this.projectId); flowObj.put("version", this.version); flowObj.put("props", objectizeProperties()); flowObj.put("nodes", objectizeNodes()); flowObj.put("edges", objectizeEdges()); flowObj.put("failure.email", this.failureEmail); flowObj.put("success.email", this.successEmail); flowObj.put("mailCreator", this.mailCreator); flowObj.put("layedout", this.isLayedOut); flowObj.put("embeddedFlow", this.isEmbeddedFlow); flowObj.put("azkabanFlowVersion", this.azkabanFlowVersion); flowObj.put("condition", this.condition); flowObj.put("isLocked", this.isLocked); flowObj.put("flowLockErrorMessage", this.flowLockErrorMessage); if (this.errors != null) { flowObj.put("errors", this.errors); } if (this.metadata != null) { flowObj.put("metadata", this.metadata); } return flowObj; } private List<Object> objectizeProperties() { final ArrayList<Object> result = new ArrayList<>(); for (final FlowProps props : this.flowProps.values()) { final Object objProps = props.toObject(); result.add(objProps); } return result; } private List<Object> objectizeNodes() { final ArrayList<Object> result = new ArrayList<>(); for (final Node node : getNodes()) { final Object nodeObj = node.toObject(); result.add(nodeObj); } return result; } private List<Object> objectizeEdges() { final ArrayList<Object> result = new ArrayList<>(); for (final Edge edge : getEdges()) { final Object edgeObj = edge.toObject(); result.add(edgeObj); } return result; } public boolean isLayedOut() { return this.isLayedOut; } public void setLayedOut(final boolean layedOut) { this.isLayedOut = layedOut; } public boolean isEmbeddedFlow() { return this.isEmbeddedFlow; } public void setEmbeddedFlow(final boolean embeddedFlow) { this.isEmbeddedFlow = embeddedFlow; } public double getAzkabanFlowVersion() { return this.azkabanFlowVersion; } public void setAzkabanFlowVersion(final double azkabanFlowVersion) { this.azkabanFlowVersion = azkabanFlowVersion; } public String getCondition() { return this.condition; } public void setCondition(final String condition) { this.condition = condition; } public Map<String, Object> getMetadata() { if (this.metadata == null) { this.metadata = new HashMap<>(); } return this.metadata; } public void setMetadata(final Map<String, Object> metadata) { this.metadata = metadata; } public Map<String, Node> getNodeMap() { return this.nodes; } public Map<String, Set<Edge>> getOutEdgeMap() { return this.outEdges; } public Map<String, Set<Edge>> getInEdgeMap() { return this.inEdges; } public FlowProps getFlowProps(final String propSource) { return this.flowProps.get(propSource); } public Map<String, FlowProps> getAllFlowProps() { return this.flowProps; } public int getProjectId() { return this.projectId; } public void setProjectId(final int projectId) { this.projectId = projectId; } public boolean isLocked() { return this.isLocked; } public void setLocked(boolean locked) { this.isLocked = locked; } public String getFlowLockErrorMessage() { return this.flowLockErrorMessage; } public void setFlowLockErrorMessage(final String flowLockErrorMessage) { this.flowLockErrorMessage = flowLockErrorMessage; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/flow/FlowProps.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.flow; import azkaban.utils.Props; import java.util.HashMap; import java.util.Map; public class FlowProps { private String parentSource; private String propSource; private Props props = null; public FlowProps(final String parentSource, final String propSource) { /** * Use String interning so that just 1 copy of the string value exists in String Constant Pool * and the value is reused. Azkaban Heap dump analysis indicated a high percentage of heap * usage is coming from duplicate strings of FlowProps fields. * * Using intern() eliminates all the duplicate values, thereby significantly reducing heap * memory usage. */ if(parentSource != null) { this.parentSource = parentSource.intern(); } if (propSource != null) { this.propSource = propSource.intern(); } } public FlowProps(final Props props) { this.setProps(props); } public static FlowProps fromObject(final Object obj) { final Map<String, Object> flowMap = (Map<String, Object>) obj; final String source = (String) flowMap.get("source"); final String parentSource = (String) flowMap.get("inherits"); final FlowProps flowProps = new FlowProps(parentSource, source); return flowProps; } public Props getProps() { return this.props; } public void setProps(final Props props) { this.props = props; this.parentSource = props.getParent() == null ? null : props.getParent().getSource(); this.propSource = props.getSource(); } public String getSource() { return this.propSource; } public String getInheritedSource() { return this.parentSource; } public Object toObject() { final HashMap<String, Object> obj = new HashMap<>(); obj.put("source", this.propSource); if (this.parentSource != null) { obj.put("inherits", this.parentSource); } return obj; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/flow/FlowUtils.java
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.flow; import static java.util.Objects.requireNonNull; import azkaban.executor.DisabledJob; import azkaban.executor.ExecutableFlow; import azkaban.executor.ExecutableFlowBase; import azkaban.executor.ExecutableNode; import azkaban.executor.Status; import azkaban.project.Project; import azkaban.project.ProjectManager; import azkaban.utils.Props; import com.google.gson.Gson; import java.util.List; import java.util.Map; import java.util.UUID; import org.joda.time.DateTime; public class FlowUtils { public static Props addCommonFlowProperties(final Props parentProps, final ExecutableFlowBase flow) { final Props props = new Props(parentProps); props.put(CommonJobProperties.FLOW_ID, flow.getFlowId()); props.put(CommonJobProperties.EXEC_ID, flow.getExecutionId()); props.put(CommonJobProperties.PROJECT_ID, flow.getProjectId()); props.put(CommonJobProperties.PROJECT_NAME, flow.getProjectName()); props.put(CommonJobProperties.PROJECT_VERSION, flow.getVersion()); props.put(CommonJobProperties.FLOW_UUID, UUID.randomUUID().toString()); props.put(CommonJobProperties.PROJECT_LAST_CHANGED_BY, flow.getLastModifiedByUser()); props.put(CommonJobProperties.PROJECT_LAST_CHANGED_DATE, flow.getLastModifiedTimestamp()); props.put(CommonJobProperties.SUBMIT_USER, flow.getExecutableFlow().getSubmitUser()); final DateTime loadTime = new DateTime(); props.put(CommonJobProperties.FLOW_START_TIMESTAMP, loadTime.toString()); props.put(CommonJobProperties.FLOW_START_YEAR, loadTime.toString("yyyy")); props.put(CommonJobProperties.FLOW_START_MONTH, loadTime.toString("MM")); props.put(CommonJobProperties.FLOW_START_DAY, loadTime.toString("dd")); props.put(CommonJobProperties.FLOW_START_HOUR, loadTime.toString("HH")); props.put(CommonJobProperties.FLOW_START_MINUTE, loadTime.toString("mm")); props.put(CommonJobProperties.FLOW_START_SECOND, loadTime.toString("ss")); props.put(CommonJobProperties.FLOW_START_MILLISSECOND, loadTime.toString("SSS")); props.put(CommonJobProperties.FLOW_START_TIMEZONE, loadTime.toString("ZZZZ")); return props; } /** * Change job status to disabled in exflow if the job is in disabledJobs */ public static void applyDisabledJobs(final List<DisabledJob> disabledJobs, final ExecutableFlowBase exflow) { for (final DisabledJob disabled : disabledJobs) { if (disabled.isEmbeddedFlow()) { final ExecutableNode node = exflow.getExecutableNode(disabled.getName()); if (node != null && node instanceof ExecutableFlowBase) { applyDisabledJobs(disabled.getChildren(), (ExecutableFlowBase) node); } } else { // job final ExecutableNode node = exflow.getExecutableNode(disabled.getName()); if (node != null) { node.setStatus(Status.DISABLED); } } } } public static Project getProject(final ProjectManager projectManager, final int projectId) { final Project project = projectManager.getProject(projectId); if (project == null) { throw new RuntimeException("Error finding the project to execute " + projectId); } return project; } public static Flow getFlow(final Project project, final String flowName) { final Project nonNullProj = requireNonNull(project); final Flow flow = nonNullProj.getFlow(flowName); if (flow == null) { throw new RuntimeException("Error finding the flow to execute " + flowName); } return flow; } public static ExecutableFlow createExecutableFlow(final Project project, final Flow flow) { final ExecutableFlow exflow = new ExecutableFlow(project, flow); exflow.addAllProxyUsers(project.getProxyUsers()); return exflow; } public static String toJson(final Project proj) { final Gson gson = new Gson(); final String jsonStr = gson.toJson(proj); return jsonStr; } public static Project toProject(final String json) { final Gson gson = new Gson(); return gson.fromJson(json, Project.class); } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/flow/Node.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.flow; import azkaban.utils.Utils; import java.awt.geom.Point2D; import java.util.HashMap; import java.util.Map; public class Node { private final String id; private String jobSource; private String propsSource; private Point2D position = null; private int level; private int expectedRunTimeSec = 1; private String type; private String embeddedFlowId; private String condition = null; private ConditionOnJobStatus conditionOnJobStatus = ConditionOnJobStatus.ALL_SUCCESS; public Node(final String id) { this.id = id; } /** * Clones nodes */ public Node(final Node clone) { this.id = clone.id; this.propsSource = clone.propsSource; this.jobSource = clone.jobSource; } public static Node fromObject(final Object obj) { final Map<String, Object> mapObj = (Map<String, Object>) obj; final String id = (String) mapObj.get("id"); final Node node = new Node(id); final String jobSource = (String) mapObj.get("jobSource"); final String propSource = (String) mapObj.get("propSource"); final String jobType = (String) mapObj.get("jobType"); final String embeddedFlowId = (String) mapObj.get("embeddedFlowId"); final String condition = (String) mapObj.get("condition"); final ConditionOnJobStatus conditionOnJobStatus = ConditionOnJobStatus .fromString((String) mapObj.get("conditionOnJobStatus")); node.setJobSource(jobSource); node.setPropsSource(propSource); node.setType(jobType); node.setEmbeddedFlowId(embeddedFlowId); node.setCondition(condition); node.setConditionOnJobStatus(conditionOnJobStatus); final Integer expectedRuntime = (Integer) mapObj.get("expectedRuntime"); if (expectedRuntime != null) { node.setExpectedRuntimeSec(expectedRuntime); } final Map<String, Object> layoutInfo = (Map<String, Object>) mapObj.get("layout"); if (layoutInfo != null) { Double x = null; Double y = null; Integer level = null; try { x = Utils.convertToDouble(layoutInfo.get("x")); y = Utils.convertToDouble(layoutInfo.get("y")); level = (Integer) layoutInfo.get("level"); } catch (final ClassCastException e) { throw new RuntimeException("Error creating node " + id, e); } if (x != null && y != null) { node.setPosition(new Point2D.Double(x, y)); } if (level != null) { node.setLevel(level); } } return node; } public String getId() { return this.id; } public String getType() { return this.type; } public void setType(final String type) { this.type = type; } public Point2D getPosition() { return this.position; } public void setPosition(final Point2D position) { this.position = position; } public void setPosition(final double x, final double y) { this.position = new Point2D.Double(x, y); } public int getLevel() { return this.level; } public void setLevel(final int level) { this.level = level; } public String getJobSource() { return this.jobSource; } public void setJobSource(final String jobSource) { this.jobSource = jobSource; } public String getPropsSource() { return this.propsSource; } public void setPropsSource(final String propsSource) { this.propsSource = propsSource; } public int getExpectedRuntimeSec() { return this.expectedRunTimeSec; } public void setExpectedRuntimeSec(final int runtimeSec) { this.expectedRunTimeSec = runtimeSec; } public String getEmbeddedFlowId() { return this.embeddedFlowId; } public void setEmbeddedFlowId(final String flowId) { this.embeddedFlowId = flowId; } public Object toObject() { final HashMap<String, Object> objMap = new HashMap<>(); objMap.put("id", this.id); objMap.put("jobSource", this.jobSource); objMap.put("propSource", this.propsSource); objMap.put("jobType", this.type); if (this.embeddedFlowId != null) { objMap.put("embeddedFlowId", this.embeddedFlowId); } objMap.put("expectedRuntime", this.expectedRunTimeSec); final HashMap<String, Object> layoutInfo = new HashMap<>(); if (this.position != null) { layoutInfo.put("x", this.position.getX()); layoutInfo.put("y", this.position.getY()); } layoutInfo.put("level", this.level); objMap.put("layout", layoutInfo); objMap.put("condition", this.condition); objMap.put("conditionOnJobStatus", this.conditionOnJobStatus); return objMap; } public String getCondition() { return this.condition; } public void setCondition(final String condition) { this.condition = condition; } public ConditionOnJobStatus getConditionOnJobStatus() { return this.conditionOnJobStatus; } public void setConditionOnJobStatus(final ConditionOnJobStatus conditionOnJobStatus) { this.conditionOnJobStatus = conditionOnJobStatus; } }
0
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/flow/SpecialJobTypes.java
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.flow; public class SpecialJobTypes { public static final String BRANCH_START_TYPE = "branch.start"; public static final String BRANCH_END_TYPE = "branch.end"; public static final String EMBEDDED_FLOW_TYPE = "flow"; public static final String FLOW_NAME = "flow.name"; }