index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/storage/DatabaseStorage.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.storage;
import azkaban.project.ProjectFileHandler;
import azkaban.project.ProjectLoader;
import azkaban.spi.Dependency;
import azkaban.spi.Storage;
import azkaban.spi.ProjectStorageMetadata;
import javax.inject.Singleton;
import java.io.File;
import java.io.InputStream;
import javax.inject.Inject;
/**
* DatabaseStorage
*
* This class helps in storing projects in the DB itself. This is intended to be the default since
* it is the current behavior of Azkaban.
*/
@Singleton
public class DatabaseStorage implements Storage {
private final ProjectLoader projectLoader;
@Inject
public DatabaseStorage(final ProjectLoader projectLoader) {
this.projectLoader = projectLoader;
}
@Override
public InputStream getProject(final String key) {
throw new UnsupportedOperationException(
"Not implemented yet. Use get(projectId, version) instead");
}
public ProjectFileHandler getProject(final int projectId, final int version) {
return this.projectLoader.getUploadedFile(projectId, version);
}
@Override
public String putProject(final ProjectStorageMetadata metadata, final File localFile) {
this.projectLoader.uploadProjectFile(metadata.getProjectId(), metadata.getVersion(),
localFile, metadata.getUploader(), metadata.getUploaderIPAddr());
return null;
}
// DatabaseStorage does not support dependency fetching and thus does not support thin archives.
// Use LocalHadoopStorage or HdfsStorage instead for thin archive support.
@Override
public InputStream getDependency(final Dependency dep) {
throw new UnsupportedOperationException(
"Not implemented yet. Must use HdfsStorage or LocalStorage.");
}
@Override
public boolean dependencyFetchingEnabled() {
return false;
}
@Override
public String getDependencyRootPath() {
return null;
}
@Override
public boolean deleteProject(final String key) {
throw new UnsupportedOperationException("Delete is not supported");
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/storage/HdfsAuth.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.storage;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_KEYTAB_PATH;
import static java.util.Objects.requireNonNull;
import azkaban.spi.AzkabanException;
import azkaban.utils.Props;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Logger;
/**
* This class helps in HDFS authorization and is a wrapper over Hadoop's {@link
* UserGroupInformation} class.
*/
@Singleton
public class HdfsAuth {
private static final Logger log = Logger.getLogger(HdfsAuth.class);
private final boolean isSecurityEnabled;
private UserGroupInformation loggedInUser = null;
private String keytabPath = null;
private String keytabPrincipal = null;
@Inject
public HdfsAuth(final Props props, @Named("hdfsConf") final Configuration conf) {
UserGroupInformation.setConfiguration(conf);
this.isSecurityEnabled = UserGroupInformation.isSecurityEnabled();
if (this.isSecurityEnabled) {
log.info("The Hadoop cluster has enabled security");
this.keytabPath = requireNonNull(props.getString(AZKABAN_KEYTAB_PATH));
this.keytabPrincipal = requireNonNull(props.getString(AZKABAN_KERBEROS_PRINCIPAL));
}
}
/**
* API to authorize HDFS access. This logins in the configured user via the keytab. If the user is
* already logged in then it renews the TGT.
*/
public void authorize() {
if (this.isSecurityEnabled) {
try {
login(this.keytabPrincipal, this.keytabPath);
} catch (final IOException e) {
log.error(e);
throw new AzkabanException(String.format(
"Error: Unable to authorize to Hadoop. Principal: %s Keytab: %s", this.keytabPrincipal,
this.keytabPath));
}
}
}
private void login(final String keytabPrincipal, final String keytabPath) throws IOException {
if (this.loggedInUser == null) {
log.info(
String.format("Logging in using Principal: %s Keytab: %s", keytabPrincipal, keytabPath));
UserGroupInformation.loginUserFromKeytab(keytabPrincipal, keytabPath);
this.loggedInUser = UserGroupInformation.getLoginUser();
log.info(String.format("User %s logged in.", this.loggedInUser));
} else {
log.info(String.format("User %s already logged in. Refreshing TGT", this.loggedInUser));
this.loggedInUser.checkTGTAndReloginFromKeytab();
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/storage/HdfsStorage.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.storage;
import static java.util.Objects.requireNonNull;
import azkaban.AzkabanCommonModuleConfig;
import azkaban.utils.HashUtils;
import azkaban.utils.StorageUtils;
import azkaban.spi.Dependency;
import azkaban.spi.Storage;
import azkaban.spi.StorageException;
import azkaban.spi.ProjectStorageMetadata;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.log4j.Logger;
@Singleton
public class HdfsStorage implements Storage {
private static final String TMP_PROJECT_UPLOAD_FILENAME = "upload.tmp";
private static final Logger log = Logger.getLogger(HdfsStorage.class);
private final HdfsAuth hdfsAuth;
private final URI projectRootUri;
private final URI dependencyRootUri;
private final DistributedFileSystem hdfs;
private final FileSystem http;
@Inject
public HdfsStorage(final AzkabanCommonModuleConfig config, final HdfsAuth hdfsAuth, @Named("hdfsFS") final FileSystem hdfs,
@Named("hdfs_cached_httpFS") @Nullable final FileSystem http) {
this.hdfsAuth = requireNonNull(hdfsAuth);
// Usually we can just interact with this object as a FileSystem however putProject() uses
// the .rename() method with the OVERRIDE option which is protected in FileSystem but
// public in DistributedFileSystem so we need to cast it here.
this.hdfs = (DistributedFileSystem) requireNonNull(hdfs);
this.http = http; // May be null if thin archives is not enabled
this.projectRootUri = config.getHdfsProjectRootUri();
this.dependencyRootUri = config.getOriginDependencyRootUri();
}
@Override
public InputStream getProject(final String key) throws IOException {
this.hdfsAuth.authorize();
return this.hdfs.open(fullProjectPath(key));
}
@Override
public String putProject(final ProjectStorageMetadata metadata, final File localFile) {
this.hdfsAuth.authorize();
final Path projectsPath = new Path(this.projectRootUri.getPath(),
String.valueOf(metadata.getProjectId()));
try {
if (this.hdfs.mkdirs(projectsPath)) {
log.info("Created project dir: " + projectsPath);
}
final Path targetPath = new Path(projectsPath,
StorageUtils.getTargetProjectFilename(metadata.getProjectId(), metadata.getHash()));
final Path tmpPath = new Path(projectsPath, TMP_PROJECT_UPLOAD_FILENAME);
// Copy file to HDFS
log.info(String.format("Creating project artifact: meta: %s path: %s", metadata, targetPath));
this.hdfs.copyFromLocalFile(false, true, new Path(localFile.getAbsolutePath()), tmpPath);
// Rename the tmp file to the final file and overwrite the final file if it already exists
// (i.e. if the hash is the same).
this.hdfs.rename(tmpPath, targetPath, Options.Rename.OVERWRITE);
return getRelativeProjectPath(targetPath);
} catch (final IOException e) {
log.error("error in putProject(): Metadata: " + metadata);
throw new StorageException(e);
}
}
@Override
public InputStream getDependency(final Dependency dep) throws IOException {
if (!dependencyFetchingEnabled()) {
throw new UnsupportedOperationException("Dependency fetching is not enabled.");
}
// CachedHttpFileSystem will cache in HDFS, so it needs to be authenticated to access HDFS!
this.hdfsAuth.authorize();
return this.http.open(resolveAbsoluteDependencyURI(dep));
}
@Override
public boolean dependencyFetchingEnabled() {
return this.http != null;
}
@Override
public boolean deleteProject(final String key) {
this.hdfsAuth.authorize();
final Path path = fullProjectPath(key);
try {
return this.hdfs.delete(path, false);
} catch (final IOException e) {
log.error("HDFS project file delete failed on " + path, e);
return false;
}
}
private Path fullProjectPath(final String key) {
return new Path(this.projectRootUri.toString(), key);
}
private Path resolveAbsoluteDependencyURI(Dependency dep) {
return new Path(this.dependencyRootUri.toString(), StorageUtils.getTargetDependencyPath(dep));
}
@Override
public String getDependencyRootPath() {
return this.dependencyRootUri != null ? this.dependencyRootUri.toString() : null;
}
private String getRelativeProjectPath(final Path targetPath) {
return URI.create(this.projectRootUri.getPath()).relativize(targetPath.toUri()).getPath();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/storage/LocalHadoopStorage.java
|
package azkaban.storage;
import azkaban.AzkabanCommonModuleConfig;
import azkaban.spi.Dependency;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
import static azkaban.utils.StorageUtils.*;
/**
* LocalHadoopStorage is an extension of LocalStorage that adds support for dependency fetching. LocalHadoopStorage
* depends on hadoop-common and can only be injected when hadoop-common is on the classpath.
*/
public class LocalHadoopStorage extends LocalStorage {
private static final Logger log = Logger.getLogger(LocalHadoopStorage.class);
final FileSystem http;
final URI dependencyRootUri;
@Inject
public LocalHadoopStorage(final AzkabanCommonModuleConfig config,
@Named("local_cached_httpFS") @Nullable final FileSystem http) {
super(config);
this.http = http; // May be null if thin archives is not enabled
this.dependencyRootUri = config.getOriginDependencyRootUri();
}
@Override
public InputStream getDependency(final Dependency dep) throws IOException {
if (!dependencyFetchingEnabled()) {
throw new UnsupportedOperationException("Dependency fetching is not enabled.");
}
return this.http.open(resolveAbsoluteDependencyURI(dep));
}
@Override
public boolean dependencyFetchingEnabled() {
return this.http != null;
}
@Override
public String getDependencyRootPath() {
return dependencyFetchingEnabled() ? this.dependencyRootUri.toString() : null;
}
private Path resolveAbsoluteDependencyURI(Dependency dep) {
return new Path(this.dependencyRootUri.toString(), getTargetDependencyPath(dep));
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/storage/LocalStorage.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.storage;
import static azkaban.utils.StorageUtils.*;
import static com.google.common.base.Preconditions.checkArgument;
import azkaban.AzkabanCommonModuleConfig;
import azkaban.spi.Dependency;
import azkaban.spi.Storage;
import azkaban.spi.StorageException;
import azkaban.spi.ProjectStorageMetadata;
import azkaban.utils.FileIOUtils;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
@Singleton
public class LocalStorage implements Storage {
private static final Logger log = Logger.getLogger(LocalStorage.class);
final File rootDirectory;
@Inject
public LocalStorage(final AzkabanCommonModuleConfig config) {
this.rootDirectory = validateDirectory(createIfDoesNotExist(config.getLocalStorageBaseDirPath()));
}
private static File createIfDoesNotExist(final File baseDirectory) {
if (!baseDirectory.exists()) {
baseDirectory.mkdirs();
log.info("Creating dir: " + baseDirectory.getAbsolutePath());
}
return baseDirectory;
}
private static File createIfDoesNotExist(final String baseDirectoryPath) {
return createIfDoesNotExist(new File(baseDirectoryPath));
}
private static File validateDirectory(final File baseDirectory) {
checkArgument(baseDirectory.isDirectory());
if (!FileIOUtils.isDirWritable(baseDirectory)) {
throw new IllegalArgumentException("Directory not writable: " + baseDirectory);
}
return baseDirectory;
}
private File getFileInRoot(final String key) {
return new File(this.rootDirectory, key);
}
/**
* @param key Relative path of the file from the baseDirectory
*/
@Override
public InputStream getProject(final String key) throws IOException {
return new FileInputStream(getFileInRoot(key));
}
@Override
public String putProject(final ProjectStorageMetadata metadata, final File localFile) {
final File projectDir = new File(this.rootDirectory, String.valueOf(metadata.getProjectId()));
if (projectDir.mkdir()) {
log.info("Created project dir: " + projectDir.getAbsolutePath());
}
final File targetFile = new File(projectDir,
getTargetProjectFilename(metadata.getProjectId(), metadata.getHash()));
if (targetFile.exists()) {
log.info(String.format("Duplicate found: meta: %s, targetFile: %s, ", metadata,
targetFile.getAbsolutePath()));
return getRelativePath(targetFile);
}
// Copy file to storage dir
try {
FileUtils.copyFile(localFile, targetFile);
} catch (final IOException e) {
log.error("LocalStorage error in putProject(): meta: " + metadata);
throw new StorageException(e);
}
return getRelativePath(targetFile);
}
// LocalStorage does not support dependency fetching and thus does not support thin archives.
// Use LocalHadoopStorage or HdfsStorage instead for thin archive support.
@Override
public InputStream getDependency(final Dependency dep) throws IOException {
throw new UnsupportedOperationException("Dependency fetching is not supported with LocalStorage.");
}
@Override
public boolean dependencyFetchingEnabled() {
return false;
}
@Override
public String getDependencyRootPath() {
return null;
}
@Override
public boolean deleteProject(final String key) {
final File file = getFileInRoot(key);
final boolean result = file.exists() && file.delete();
if (result) {
log.warn("Deleted project file: " + file.getAbsolutePath());
} else {
log.warn("Unable to delete project file: " + file.getAbsolutePath());
}
return result;
}
private String getRelativePath(final File targetFile) {
return this.rootDirectory.toURI().relativize(targetFile.toURI()).getPath();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/storage/ProjectStorageManager.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.storage;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;
import azkaban.project.Project;
import azkaban.project.ProjectFileHandler;
import azkaban.project.ProjectLoader;
import azkaban.spi.Storage;
import azkaban.spi.StorageException;
import azkaban.spi.ProjectStorageMetadata;
import azkaban.user.User;
import azkaban.utils.HashUtils;
import azkaban.utils.Props;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.io.IOUtils;
import org.apache.log4j.Logger;
/**
* StorageManager manages and coordinates all project related interactions with the Storage layer. This also
* includes bookkeeping like updating DB with the new versionm, etc
*/
@Singleton
public class ProjectStorageManager {
private static final Logger log = Logger.getLogger(ProjectStorageManager.class);
private final StorageCleaner storageCleaner;
private final Storage storage;
private final ProjectLoader projectLoader;
private final File tempDir;
@Inject
public ProjectStorageManager(final Props props, final Storage storage,
final ProjectLoader projectLoader,
final StorageCleaner storageCleaner) {
this.tempDir = new File(props.getString("project.temp.dir", "temp"));
this.storage = requireNonNull(storage, "storage is null");
this.projectLoader = requireNonNull(projectLoader, "projectLoader is null");
this.storageCleaner = requireNonNull(storageCleaner, "storageCleanUp is null");
prepareTempDir();
}
private void prepareTempDir() {
if (!this.tempDir.exists()) {
this.tempDir.mkdirs();
}
checkArgument(this.tempDir.isDirectory());
}
/**
* API to a project file into Azkaban Storage
*
* TODO clean up interface
*
* @param project project
* @param version The new version to be uploaded
* @param localFile local file
* @param uploader the user who uploaded
*/
public void uploadProject(
final Project project,
final int version,
final File localFile,
final File startupDependencies,
final User uploader,
final String uploaderIPAddr) {
byte[] md5 = null;
if (!(this.storage instanceof DatabaseStorage)) {
md5 = computeHash(localFile);
}
final ProjectStorageMetadata metadata = new ProjectStorageMetadata(
project.getId(), version, uploader.getUserId(), md5, uploaderIPAddr);
log.info(String.format("Adding archive to storage. Meta:%s File: %s[%d bytes]",
metadata, localFile.getName(), localFile.length()));
/* upload to storage */
final String resourceId = this.storage.putProject(metadata, localFile);
/* Add metadata to db */
// TODO spyne: remove hack. Database storage should go through the same flow
if (!(this.storage instanceof DatabaseStorage)) {
this.projectLoader.addProjectVersion(
project.getId(),
version,
localFile,
startupDependencies,
uploader.getUserId(),
requireNonNull(md5),
requireNonNull(resourceId),
uploaderIPAddr
);
log.info(String.format("Added project metadata to DB. Meta:%s File: %s[%d bytes] URI: %s",
metadata, localFile.getName(), localFile.length(), resourceId));
}
}
/**
* Clean up project artifacts of a given project id, except those with the project versions
* provided.
*/
public void cleanupProjectArtifacts(final int projectId, final List<Integer> versionsToExclude) {
try {
this.storageCleaner.cleanupProjectArtifacts(projectId, versionsToExclude);
} catch (final Exception e) {
log.error("Error occured during cleanup. Ignoring and continuing...", e);
}
}
private byte[] computeHash(final File localFile) {
final byte[] md5;
try {
md5 = HashUtils.MD5.getHashBytes(localFile);
} catch (final IOException e) {
throw new StorageException(e);
}
return md5;
}
/**
* Fetch project file from storage.
*
* @param projectId required project ID
* @param version version to be fetched
* @return Handler object containing hooks to fetched project file
*/
public ProjectFileHandler getProjectFile(final int projectId, final int version) {
log.info(
String.format("Fetching project file. project ID: %d version: %d", projectId, version));
// TODO spyne: remove huge hack ! There should not be any special handling for Database Storage.
if (this.storage instanceof DatabaseStorage) {
return ((DatabaseStorage) this.storage).getProject(projectId, version);
}
/* Fetch meta data from db */
final ProjectFileHandler pfh = this.projectLoader.fetchProjectMetaData(projectId, version);
/* Fetch project file from storage and copy to local file */
final String resourceId = requireNonNull(pfh.getResourceId(),
String.format("URI is null. project ID: %d version: %d",
pfh.getProjectId(), pfh.getVersion()));
try (final InputStream is = this.storage.getProject(resourceId)) {
final File file = createTempOutputFile(pfh);
/* Copy from storage to output stream */
try (final FileOutputStream fos = new FileOutputStream(file)) {
IOUtils.copy(is, fos);
}
/* Validate checksum */
validateChecksum(file, pfh);
/* Attach file to handler */
pfh.setLocalFile(file);
return pfh;
} catch (final IOException e) {
throw new StorageException(e);
}
}
private void validateChecksum(final File file, final ProjectFileHandler pfh) throws IOException {
final byte[] hash = HashUtils.MD5.getHashBytes(file);
checkState(HashUtils.isSameHash(pfh.getMD5Hash(), hash),
String.format("MD5 HASH Failed. project ID: %d version: %d Expected: %s Actual: %s",
pfh.getProjectId(), pfh.getVersion(), HashUtils.bytesHashToString(pfh.getMD5Hash()),
HashUtils.bytesHashToString(hash))
);
}
private File createTempOutputFile(final ProjectFileHandler projectFileHandler)
throws IOException {
return File.createTempFile(
projectFileHandler.getFileName(),
String.valueOf(projectFileHandler.getVersion()), this.tempDir);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/storage/StorageCleaner.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.storage;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION;
import static com.google.common.base.Preconditions.checkArgument;
import azkaban.db.DatabaseOperator;
import azkaban.spi.Storage;
import azkaban.utils.Pair;
import azkaban.utils.Props;
import com.google.common.annotations.VisibleForTesting;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.log4j.Logger;
@Singleton
public class StorageCleaner {
// Delete records of all older versions
static final String SQL_DELETE_RESOURCE_ID = "DELETE FROM project_versions WHERE resource_id=?";
/**
* The query must sort the versions in reverse order for the cleanup operation to work correctly!
* TODO spyne: Refactor database storage cleanup to use this
*
* When using DatabaseStorage, resourceId is always NULL. Hence, those rows will currently be
* never cleaned up.
*/
static final String SQL_FETCH_PVR = "SELECT resource_id, version FROM project_versions WHERE "
+ "project_id=? AND resource_id IS NOT NULL ORDER BY version DESC";
private static final Logger log = Logger.getLogger(StorageCleaner.class);
private final DatabaseOperator databaseOperator;
private final int maxArtifactsPerProject;
private final Storage storage;
@Inject
public StorageCleaner(final Props props, final Storage storage,
final DatabaseOperator databaseOperator) {
this.storage = storage;
this.databaseOperator = databaseOperator;
this.maxArtifactsPerProject = props.getInt(AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION, 0);
checkArgument(this.maxArtifactsPerProject >= 0,
String.format("Invalid value for %s : %d", AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION,
this.maxArtifactsPerProject));
if (isCleanupPermitted()) {
log.info(String.format("%s Config: Max %d artifact(s) retained per project",
AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION, this.maxArtifactsPerProject));
} else {
log.warn("Project cleanup disabled. All artifacts will be stored.");
}
}
@VisibleForTesting
boolean isCleanupPermitted() {
return this.maxArtifactsPerProject > 0;
}
/**
* Remove all but:
* - last N artifacts as configured by AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION
* - artifacts with the versions provided
*
* Since multiple versions can share the same filename, the algo is to collect all filenames and
* from them, remove the latest ones. The remaining ones are deleted by the respective storage.
*
* From the storage perspective, cleanup just needs the {@link Storage#deleteProject(String)} API to
* work.
*
* Failure cases: - If the storage cleanup fails, the cleanup will be attempted again on the next
* upload - If the storage cleanup succeeds and the DB cleanup fails, the DB will be cleaned up in
* the next attempt.
*
* @param projectId project ID
*/
public void cleanupProjectArtifacts(final int projectId, final List<Integer> versionsToExclude) {
if (!isCleanupPermitted()) {
return;
}
final Set<String> allResourceIds = findResourceIdsToDelete(projectId, versionsToExclude);
if (allResourceIds.size() == 0) {
return;
}
log.warn(String.format("Deleting project artifacts [id: %d]: %s", projectId, allResourceIds));
allResourceIds.forEach(this::delete);
}
private Set<String> findResourceIdsToDelete(final int projectId,
final List<Integer> versionsToExclude) {
final List<Pair<String, Integer>> resourceIdOrderedList = fetchResourceIdOrderedList(projectId);
if (resourceIdOrderedList.size() <= this.maxArtifactsPerProject) {
return Collections.emptySet();
}
// Different project versions may have the same resource id, we can only delete those
// resource ids that are not used by the versions we must keep.
Set<String> resourceIdsToKeep = new HashSet<>();
for (int i = 0; i < resourceIdOrderedList.size(); i++) {
Pair<String, Integer> pair = resourceIdOrderedList.get(i);
if (i < this.maxArtifactsPerProject || versionsToExclude.contains(pair.getSecond())) {
resourceIdsToKeep.add(pair.getFirst());
}
}
Set<String> resourceIdsToDelete = new HashSet<>();
for (Pair<String, Integer> pair: resourceIdOrderedList) {
String id = pair.getFirst();
if (!resourceIdsToKeep.contains(id)) {
resourceIdsToDelete.add(id);
}
}
return resourceIdsToDelete;
}
/**
* Main Delete Utility.
*
* Delete the storage first. Then remove metadata from DB. Warning! This order cannot be reversed
* since if the metadata is lost, there is no reference of the storage blob.
*
* @param resourceId the storage key to be deleted.
* @return true if deletion was successful. false otherwise
*/
private boolean delete(final String resourceId) {
final boolean isDeleted = this.storage.deleteProject(resourceId) && removeDbEntry(resourceId);
if (!isDeleted) {
log.info("Failed to delete resourceId: " + resourceId);
}
return isDeleted;
}
private boolean removeDbEntry(final String resourceId) {
try {
final int nAffectedRows = this.databaseOperator.update(SQL_DELETE_RESOURCE_ID, resourceId);
return nAffectedRows > 0;
} catch (final SQLException e) {
log.error("Error while deleting DB metadata resource ID: " + resourceId, e);
}
return false;
}
private List<Pair<String, Integer>> fetchResourceIdOrderedList(final int projectId) {
try {
return this.databaseOperator.query(SQL_FETCH_PVR,
rs -> {
final List<Pair<String, Integer>> results = new ArrayList<>();
while (rs.next()) {
final Pair<String, Integer> pair = new Pair<>(rs.getString("resource_id"),
rs.getInt("version"));
results.add(pair);
}
return results;
}, projectId);
} catch (final SQLException e) {
log.error("Error performing cleanup of Project: " + projectId, e);
}
return Collections.emptyList();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/storage/StorageImplementationType.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.storage;
import azkaban.spi.Storage;
public enum StorageImplementationType {
LOCAL(LocalStorage.class),
LOCAL_HADOOP(LocalHadoopStorage.class),
HDFS(HdfsStorage.class),
DATABASE(DatabaseStorage.class);
private final Class<? extends Storage> implementationClass;
StorageImplementationType(final Class<? extends Storage> implementationClass) {
this.implementationClass = implementationClass;
}
public static StorageImplementationType from(final String name) {
try {
return valueOf(name);
} catch (final NullPointerException e) {
return null;
}
}
public Class<? extends Storage> getImplementationClass() {
return this.implementationClass;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/ActionTypeLoader.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import azkaban.utils.Props;
import azkaban.utils.Utils;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.log4j.Logger;
public class ActionTypeLoader {
public static final String DEFAULT_TRIGGER_ACTION_PLUGIN_DIR =
"plugins/triggeractions";
private static final Logger logger = Logger.getLogger(ActionTypeLoader.class);
protected static Map<String, Class<? extends TriggerAction>> actionToClass =
new HashMap<>();
public static void registerBuiltinActions(
final Map<String, Class<? extends TriggerAction>> builtinActions) {
actionToClass.putAll(builtinActions);
for (final String type : builtinActions.keySet()) {
logger.info("Loaded " + type + " action.");
}
}
public void init(final Props props) throws TriggerException {
}
public synchronized void registerActionType(final String type,
final Class<? extends TriggerAction> actionClass) {
logger.info("Registering action " + type);
if (!actionToClass.containsKey(type)) {
actionToClass.put(type, actionClass);
}
}
public TriggerAction createActionFromJson(final String type, final Object obj)
throws Exception {
TriggerAction action = null;
final Class<? extends TriggerAction> actionClass = actionToClass.get(type);
if (actionClass == null) {
throw new Exception("Action Type " + type + " not supported!");
}
action =
(TriggerAction) Utils.invokeStaticMethod(actionClass.getClassLoader(),
actionClass.getName(), "createFromJson", obj);
return action;
}
public TriggerAction createAction(final String type, final Object... args) {
TriggerAction action = null;
final Class<? extends TriggerAction> actionClass = actionToClass.get(type);
action = (TriggerAction) Utils.callConstructor(actionClass, args);
return action;
}
public Set<String> getSupportedActions() {
return actionToClass.keySet();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/CheckerTypeLoader.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import azkaban.utils.Props;
import azkaban.utils.Utils;
import java.util.HashMap;
import java.util.Map;
import org.apache.log4j.Logger;
public class CheckerTypeLoader {
public static final String DEFAULT_CONDITION_CHECKER_PLUGIN_DIR =
"plugins/conditioncheckers";
private static final Logger logger = Logger.getLogger(CheckerTypeLoader.class);
protected static Map<String, Class<? extends ConditionChecker>> checkerToClass =
new HashMap<>();
public static void registerBuiltinCheckers(
final Map<String, Class<? extends ConditionChecker>> builtinCheckers) {
checkerToClass.putAll(checkerToClass);
for (final String type : builtinCheckers.keySet()) {
logger.info("Loaded " + type + " checker.");
}
}
public void init(final Props props) throws TriggerException {
}
public synchronized void registerCheckerType(final String type,
final Class<? extends ConditionChecker> checkerClass) {
logger.info("Registering checker " + type);
if (!checkerToClass.containsKey(type)) {
checkerToClass.put(type, checkerClass);
}
}
public ConditionChecker createCheckerFromJson(final String type, final Object obj)
throws Exception {
ConditionChecker checker = null;
final Class<? extends ConditionChecker> checkerClass = checkerToClass.get(type);
if (checkerClass == null) {
throw new Exception("Checker type " + type + " not supported!");
}
checker =
(ConditionChecker) Utils.invokeStaticMethod(
checkerClass.getClassLoader(), checkerClass.getName(),
"createFromJson", obj);
return checker;
}
public ConditionChecker createChecker(final String type, final Object... args) {
ConditionChecker checker = null;
final Class<? extends ConditionChecker> checkerClass = checkerToClass.get(type);
checker = (ConditionChecker) Utils.callConstructor(checkerClass, args);
return checker;
}
public Map<String, Class<? extends ConditionChecker>> getSupportedCheckers() {
return checkerToClass;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/Condition.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.jexl2.Expression;
import org.apache.commons.jexl2.JexlEngine;
import org.apache.commons.jexl2.MapContext;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
public class Condition {
private static final Logger logger = Logger.getLogger(Condition.class);
private static final JexlEngine jexl = new JexlEngine();
private static CheckerTypeLoader checkerLoader = null;
private final MapContext context = new MapContext();
private Expression expression;
private Map<String, ConditionChecker> checkers =
new HashMap<>();
private Long nextCheckTime = -1L;
public Condition(final Map<String, ConditionChecker> checkers, final String expr) {
setCheckers(checkers);
this.expression = jexl.createExpression(expr);
updateNextCheckTime();
}
public Condition(final Map<String, ConditionChecker> checkers, final String expr,
final long nextCheckTime) {
this.nextCheckTime = nextCheckTime;
setCheckers(checkers);
this.expression = jexl.createExpression(expr);
}
public synchronized static void setCheckerLoader(final CheckerTypeLoader loader) {
Condition.checkerLoader = loader;
}
public static Condition fromJson(final Object obj) throws Exception {
if (checkerLoader == null) {
throw new Exception("Condition Checker loader not initialized!");
}
final Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
Condition cond = null;
try {
final Map<String, ConditionChecker> checkers =
new HashMap<>();
final List<Object> checkersJson = (List<Object>) jsonObj.get("checkers");
for (final Object oneCheckerJson : checkersJson) {
final Map<String, Object> oneChecker =
(HashMap<String, Object>) oneCheckerJson;
final String type = (String) oneChecker.get("type");
final ConditionChecker ck =
checkerLoader.createCheckerFromJson(type,
oneChecker.get("checkerJson"));
checkers.put(ck.getId(), ck);
}
final String expr = (String) jsonObj.get("expression");
final Long nextCheckTime = Long.valueOf((String) jsonObj.get("nextCheckTime"));
cond = new Condition(checkers, expr, nextCheckTime);
} catch (final Exception e) {
e.printStackTrace();
logger.error("Failed to recreate condition from json.", e);
throw new Exception("Failed to recreate condition from json.", e);
}
return cond;
}
public long getNextCheckTime() {
return this.nextCheckTime;
}
public Map<String, ConditionChecker> getCheckers() {
return this.checkers;
}
private void setCheckers(final Map<String, ConditionChecker> checkers) {
this.checkers = checkers;
for (final ConditionChecker checker : checkers.values()) {
this.context.set(checker.getId(), checker);
}
updateNextCheckTime();
}
private void updateNextCheckTime() {
long time = Long.MAX_VALUE;
for (final ConditionChecker checker : this.checkers.values()) {
time = Math.min(time, checker.getNextCheckTime());
}
this.nextCheckTime = time;
}
public void resetCheckers() {
for (final ConditionChecker checker : this.checkers.values()) {
checker.reset();
}
updateNextCheckTime();
logger.info("Done resetting checkers. The next check time will be "
+ new DateTime(this.nextCheckTime));
}
public String getExpression() {
return this.expression.getExpression();
}
public void setExpression(final String expr) {
this.expression = jexl.createExpression(expr);
}
public boolean isMet() {
if (logger.isDebugEnabled()) {
logger.debug("Testing condition " + this.expression);
}
return this.expression.evaluate(this.context).equals(Boolean.TRUE);
}
public Object toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("expression", this.expression.getExpression());
final List<Object> checkersJson = new ArrayList<>();
for (final ConditionChecker checker : this.checkers.values()) {
final Map<String, Object> oneChecker = new HashMap<>();
oneChecker.put("type", checker.getType());
oneChecker.put("checkerJson", checker.toJson());
checkersJson.add(oneChecker);
}
jsonObj.put("checkers", checkersJson);
jsonObj.put("nextCheckTime", String.valueOf(this.nextCheckTime));
return jsonObj;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/ConditionChecker.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import java.util.Map;
public interface ConditionChecker {
Object eval();
Object getNum();
void reset();
String getId();
String getType();
ConditionChecker fromJson(Object obj) throws Exception;
Object toJson();
void stopChecker();
void setContext(Map<String, Object> context);
long getNextCheckTime();
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/JdbcTriggerImpl.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import azkaban.db.EncodingType;
import azkaban.db.DatabaseOperator;
import azkaban.db.SQLTransaction;
import azkaban.utils.GZIPUtils;
import azkaban.utils.JSONUtils;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.commons.dbutils.ResultSetHandler;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
@Singleton
public class JdbcTriggerImpl implements TriggerLoader {
private static final String TRIGGER_TABLE_NAME = "triggers";
private static final String GET_UPDATED_TRIGGERS =
"SELECT trigger_id, trigger_source, modify_time, enc_type, data FROM " + TRIGGER_TABLE_NAME
+ " WHERE modify_time>=?";
private static final String GET_ALL_TRIGGERS =
"SELECT trigger_id, trigger_source, modify_time, enc_type, data FROM " + TRIGGER_TABLE_NAME;
private static final String GET_TRIGGER =
"SELECT trigger_id, trigger_source, modify_time, enc_type, data FROM " + TRIGGER_TABLE_NAME
+ " WHERE trigger_id=?";
private static final String ADD_TRIGGER =
"INSERT INTO " + TRIGGER_TABLE_NAME + " ( modify_time) values (?)";
private static final String REMOVE_TRIGGER =
"DELETE FROM " + TRIGGER_TABLE_NAME + " WHERE trigger_id=?";
private static final String UPDATE_TRIGGER =
"UPDATE " + TRIGGER_TABLE_NAME
+ " SET trigger_source=?, modify_time=?, enc_type=?, data=? WHERE trigger_id=?";
private static final Logger logger = Logger.getLogger(JdbcTriggerImpl.class);
private final DatabaseOperator dbOperator;
private final EncodingType defaultEncodingType = EncodingType.GZIP;
@Inject
public JdbcTriggerImpl(final DatabaseOperator databaseOperator) {
this.dbOperator = databaseOperator;
}
@Override
public List<Trigger> getUpdatedTriggers(final long lastUpdateTime) throws TriggerLoaderException {
logger.info("Loading triggers changed since " + new DateTime(lastUpdateTime).toString());
final ResultSetHandler<List<Trigger>> handler = new TriggerResultHandler();
try {
final List<Trigger> triggers = this.dbOperator
.query(GET_UPDATED_TRIGGERS, handler, lastUpdateTime);
logger.info("Loaded " + triggers.size() + " triggers.");
return triggers;
} catch (final SQLException ex) {
throw new TriggerLoaderException("Loading triggers from db failed.", ex);
}
}
@Override
public List<Trigger> loadTriggers() throws TriggerLoaderException {
logger.info("Loading all triggers from db.");
final ResultSetHandler<List<Trigger>> handler = new TriggerResultHandler();
try {
final List<Trigger> triggers = this.dbOperator.query(GET_ALL_TRIGGERS, handler);
logger.info("Loaded " + triggers.size() + " triggers.");
return triggers;
} catch (final SQLException ex) {
throw new TriggerLoaderException("Loading triggers from db failed.", ex);
}
}
@Override
public void removeTrigger(final Trigger t) throws TriggerLoaderException {
logger.info("Removing trigger " + t.toString() + " from db.");
try {
final int removes = this.dbOperator.update(REMOVE_TRIGGER, t.getTriggerId());
if (removes == 0) {
throw new TriggerLoaderException("No trigger has been removed.");
}
} catch (final SQLException ex) {
throw new TriggerLoaderException("Remove trigger " + t.getTriggerId() + " from db failed. ",
ex);
}
}
/**
* TODO: Don't understand why we need synchronized here.
*/
@Override
public synchronized void addTrigger(final Trigger t) throws TriggerLoaderException {
logger.info("Inserting trigger " + t.toString() + " into db.");
final SQLTransaction<Long> insertAndGetLastID = transOperator -> {
transOperator.update(ADD_TRIGGER, DateTime.now().getMillis());
// This commit must be called in order to unlock trigger table and have last insert ID.
transOperator.getConnection().commit();
return transOperator.getLastInsertId();
};
try {
final long id = this.dbOperator.transaction(insertAndGetLastID);
t.setTriggerId((int) id);
updateTrigger(t);
logger.info("uploaded trigger " + t.getDescription());
} catch (final SQLException ex) {
logger.error("Adding Trigger " + t.getTriggerId() + " failed.");
throw new TriggerLoaderException("trigger id is not properly created.", ex);
}
}
@Override
public void updateTrigger(final Trigger t) throws TriggerLoaderException {
logger.info("Updating trigger " + t.getTriggerId() + " into db.");
t.setLastModifyTime(System.currentTimeMillis());
updateTrigger(t, this.defaultEncodingType);
}
private void updateTrigger(final Trigger t, final EncodingType encType)
throws TriggerLoaderException {
final String json = JSONUtils.toJSON(t.toJson());
byte[] data = null;
try {
final byte[] stringData = json.getBytes("UTF-8");
data = stringData;
if (encType == EncodingType.GZIP) {
data = GZIPUtils.gzipBytes(stringData);
}
logger.debug(
"NumChars: " + json.length() + " UTF-8:" + stringData.length + " Gzip:" + data.length);
} catch (final IOException e) {
logger.error("Trigger encoding fails", e);
throw new TriggerLoaderException("Error encoding the trigger " + t.toString(), e);
}
try {
final int updates = this.dbOperator
.update(UPDATE_TRIGGER, t.getSource(), t.getLastModifyTime(), encType.getNumVal(), data,
t.getTriggerId());
if (updates == 0) {
throw new TriggerLoaderException("No trigger has been updated.");
}
} catch (final SQLException ex) {
logger.error("Updating Trigger " + t.getTriggerId() + " failed.");
throw new TriggerLoaderException("DB Trigger update failed. ", ex);
}
}
@Override
public Trigger loadTrigger(final int triggerId) throws TriggerLoaderException {
logger.info("Loading trigger " + triggerId + " from db.");
final ResultSetHandler<List<Trigger>> handler = new TriggerResultHandler();
try {
final List<Trigger> triggers = this.dbOperator.query(GET_TRIGGER, handler, triggerId);
if (triggers.size() == 0) {
logger.error("Loaded 0 triggers. Failed to load trigger " + triggerId);
throw new TriggerLoaderException("Loaded 0 triggers. Failed to load trigger " + triggerId);
}
return triggers.get(0);
} catch (final SQLException ex) {
logger.error("Failed to load trigger " + triggerId);
throw new TriggerLoaderException("Load a specific trigger failed.", ex);
}
}
public static class TriggerResultHandler implements ResultSetHandler<List<Trigger>> {
@Override
public List<Trigger> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.<Trigger>emptyList();
}
final ArrayList<Trigger> triggers = new ArrayList<>();
do {
final int triggerId = rs.getInt(1);
final int encodingType = rs.getInt(4);
final byte[] data = rs.getBytes(5);
Object jsonObj = null;
if (data != null) {
final EncodingType encType = EncodingType.fromInteger(encodingType);
try {
// Convoluted way to inflate strings. Should find common package or
// helper function.
jsonObj = JSONUtils.parseJSONFromString(encType == EncodingType.GZIP ?
GZIPUtils.unGzipString(data, "UTF-8") : new String(data, "UTF-8"));
} catch (final IOException e) {
throw new SQLException("Error reconstructing trigger data ");
}
}
Trigger t = null;
try {
t = Trigger.fromJson(jsonObj);
triggers.add(t);
} catch (final Exception e) {
logger.error("Failed to load trigger " + triggerId, e);
}
} while (rs.next());
return triggers;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/Trigger.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import static java.util.Objects.requireNonNull;
import azkaban.utils.JSONUtils;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
public class Trigger {
private static final Logger logger = Logger.getLogger(Trigger.class);
private static ActionTypeLoader actionTypeLoader;
private final long submitTime;
private final String submitUser;
private final String source;
private final List<TriggerAction> actions;
private final List<TriggerAction> expireActions;
private Condition expireCondition;
private Condition triggerCondition;
private int triggerId = -1;
private long lastModifyTime;
private TriggerStatus status = TriggerStatus.READY;
private Map<String, Object> info = new HashMap<>();
private Map<String, Object> context = new HashMap<>();
private boolean resetOnTrigger = true;
private boolean resetOnExpire = true;
private long nextCheckTime = -1;
private Trigger() throws TriggerManagerException {
throw new TriggerManagerException("Triggers should always be specified");
}
private Trigger(final int triggerId, final long lastModifyTime, final long submitTime,
final String submitUser, final String source, final Condition triggerCondition,
final Condition expireCondition, final List<TriggerAction> actions,
final List<TriggerAction> expireActions, final Map<String, Object> info,
final Map<String, Object> context) {
requireNonNull(submitUser);
requireNonNull(source);
requireNonNull(triggerCondition);
requireNonNull(expireActions);
requireNonNull(info);
requireNonNull(context);
this.lastModifyTime = lastModifyTime;
this.submitTime = submitTime;
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
this.triggerId = triggerId;
this.expireActions = expireActions;
this.info = info;
this.context = context;
}
public static ActionTypeLoader getActionTypeLoader() {
return actionTypeLoader;
}
public static synchronized void setActionTypeLoader(final ActionTypeLoader loader) {
Trigger.actionTypeLoader = loader;
}
public static Trigger fromJson(final Object obj) throws Exception {
if (actionTypeLoader == null) {
throw new Exception("Trigger Action Type loader not initialized.");
}
final Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
Trigger trigger = null;
try {
logger.info("Decoding for " + JSONUtils.toJSON(obj));
final Condition triggerCond = Condition.fromJson(jsonObj.get("triggerCondition"));
final Condition expireCond = Condition.fromJson(jsonObj.get("expireCondition"));
final List<TriggerAction> actions = new ArrayList<>();
final List<Object> actionsJson = (List<Object>) jsonObj.get("actions");
for (final Object actObj : actionsJson) {
final Map<String, Object> oneActionJson = (HashMap<String, Object>) actObj;
final String type = (String) oneActionJson.get("type");
final TriggerAction act =
actionTypeLoader.createActionFromJson(type,
oneActionJson.get("actionJson"));
actions.add(act);
}
final List<TriggerAction> expireActions = new ArrayList<>();
final List<Object> expireActionsJson =
(List<Object>) jsonObj.get("expireActions");
for (final Object expireActObj : expireActionsJson) {
final Map<String, Object> oneExpireActionJson =
(HashMap<String, Object>) expireActObj;
final String type = (String) oneExpireActionJson.get("type");
final TriggerAction expireAct =
actionTypeLoader.createActionFromJson(type,
oneExpireActionJson.get("actionJson"));
expireActions.add(expireAct);
}
final boolean resetOnTrigger =
Boolean.valueOf((String) jsonObj.get("resetOnTrigger"));
final boolean resetOnExpire =
Boolean.valueOf((String) jsonObj.get("resetOnExpire"));
final String submitUser = (String) jsonObj.get("submitUser");
final String source = (String) jsonObj.get("source");
final long submitTime = Long.valueOf((String) jsonObj.get("submitTime"));
final long lastModifyTime =
Long.valueOf((String) jsonObj.get("lastModifyTime"));
final int triggerId = Integer.valueOf((String) jsonObj.get("triggerId"));
final TriggerStatus status =
TriggerStatus.valueOf((String) jsonObj.get("status"));
final Map<String, Object> info = (Map<String, Object>) jsonObj.get("info");
Map<String, Object> context =
(Map<String, Object>) jsonObj.get("context");
if (context == null) {
context = new HashMap<>();
}
for (final ConditionChecker checker : triggerCond.getCheckers().values()) {
checker.setContext(context);
}
for (final ConditionChecker checker : expireCond.getCheckers().values()) {
checker.setContext(context);
}
for (final TriggerAction action : actions) {
action.setContext(context);
}
for (final TriggerAction action : expireActions) {
action.setContext(context);
}
trigger = new Trigger.TriggerBuilder(submitUser,
source,
triggerCond,
expireCond,
actions)
.setId(triggerId)
.setLastModifyTime(lastModifyTime)
.setSubmitTime(submitTime)
.setExpireActions(expireActions)
.setInfo(info)
.setContext(context)
.build();
trigger.setResetOnExpire(resetOnExpire);
trigger.setResetOnTrigger(resetOnTrigger);
trigger.setStatus(status);
} catch (final Exception e) {
e.printStackTrace();
logger.error("Failed to decode the trigger.", e);
throw new Exception("Failed to decode the trigger.", e);
}
return trigger;
}
public void updateNextCheckTime() {
this.nextCheckTime = Math.min(this.triggerCondition.getNextCheckTime(),
this.expireCondition.getNextCheckTime());
}
public long getNextCheckTime() {
return this.nextCheckTime;
}
public void setNextCheckTime(final long nct) {
this.nextCheckTime = nct;
}
public long getSubmitTime() {
return this.submitTime;
}
public String getSubmitUser() {
return this.submitUser;
}
public TriggerStatus getStatus() {
return this.status;
}
public void setStatus(final TriggerStatus status) {
this.status = status;
}
public Condition getTriggerCondition() {
return this.triggerCondition;
}
public void setTriggerCondition(final Condition triggerCondition) {
this.triggerCondition = triggerCondition;
}
public Condition getExpireCondition() {
return this.expireCondition;
}
public void setExpireCondition(final Condition expireCondition) {
this.expireCondition = expireCondition;
}
public List<TriggerAction> getActions() {
return this.actions;
}
public List<TriggerAction> getExpireActions() {
return this.expireActions;
}
public Map<String, Object> getInfo() {
return this.info;
}
public void setInfo(final Map<String, Object> info) {
this.info = info;
}
public Map<String, Object> getContext() {
return this.context;
}
public void setContext(final Map<String, Object> context) {
this.context = context;
}
public boolean isResetOnTrigger() {
return this.resetOnTrigger;
}
public void setResetOnTrigger(final boolean resetOnTrigger) {
this.resetOnTrigger = resetOnTrigger;
}
public boolean isResetOnExpire() {
return this.resetOnExpire;
}
public void setResetOnExpire(final boolean resetOnExpire) {
this.resetOnExpire = resetOnExpire;
}
public long getLastModifyTime() {
return this.lastModifyTime;
}
public void setLastModifyTime(final long lastModifyTime) {
this.lastModifyTime = lastModifyTime;
}
public int getTriggerId() {
return this.triggerId;
}
public void setTriggerId(final int id) {
this.triggerId = id;
}
public boolean triggerConditionMet() {
return this.triggerCondition.isMet();
}
public boolean expireConditionMet() {
return this.expireCondition.isMet();
}
public void resetTriggerConditions() {
this.triggerCondition.resetCheckers();
updateNextCheckTime();
}
public void resetExpireCondition() {
this.expireCondition.resetCheckers();
updateNextCheckTime();
}
public List<TriggerAction> getTriggerActions() {
return this.actions;
}
public Map<String, Object> toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("triggerCondition", this.triggerCondition.toJson());
jsonObj.put("expireCondition", this.expireCondition.toJson());
final List<Object> actionsJson = new ArrayList<>();
for (final TriggerAction action : this.actions) {
final Map<String, Object> oneActionJson = new HashMap<>();
oneActionJson.put("type", action.getType());
oneActionJson.put("actionJson", action.toJson());
actionsJson.add(oneActionJson);
}
jsonObj.put("actions", actionsJson);
final List<Object> expireActionsJson = new ArrayList<>();
for (final TriggerAction expireAction : this.expireActions) {
final Map<String, Object> oneExpireActionJson = new HashMap<>();
oneExpireActionJson.put("type", expireAction.getType());
oneExpireActionJson.put("actionJson", expireAction.toJson());
expireActionsJson.add(oneExpireActionJson);
}
jsonObj.put("expireActions", expireActionsJson);
jsonObj.put("resetOnTrigger", String.valueOf(this.resetOnTrigger));
jsonObj.put("resetOnExpire", String.valueOf(this.resetOnExpire));
jsonObj.put("submitUser", this.submitUser);
jsonObj.put("source", this.source);
jsonObj.put("submitTime", String.valueOf(this.submitTime));
jsonObj.put("lastModifyTime", String.valueOf(this.lastModifyTime));
jsonObj.put("triggerId", String.valueOf(this.triggerId));
jsonObj.put("status", this.status.toString());
jsonObj.put("info", this.info);
jsonObj.put("context", this.context);
return jsonObj;
}
public String getSource() {
return this.source;
}
public String getDescription() {
final StringBuffer actionsString = new StringBuffer();
for (final TriggerAction act : this.actions) {
actionsString.append(", ");
actionsString.append(act.getDescription());
}
return "Trigger from " + getSource() + " with trigger condition of "
+ this.triggerCondition.getExpression() + " and expire condition of "
+ this.expireCondition.getExpression() + actionsString;
}
public void stopCheckers() {
for (final ConditionChecker checker : this.triggerCondition.getCheckers().values()) {
checker.stopChecker();
}
for (final ConditionChecker checker : this.expireCondition.getCheckers().values()) {
checker.stopChecker();
}
}
@Override
public String toString() {
return "Trigger Id: " + getTriggerId() + ", Description: " + getDescription();
}
public static class TriggerBuilder {
private final String submitUser;
private final String source;
private final TriggerStatus status = TriggerStatus.READY;
private final Condition triggerCondition;
private final List<TriggerAction> actions;
private final Condition expireCondition;
private int triggerId = -1;
private long lastModifyTime;
private long submitTime;
private List<TriggerAction> expireActions = new ArrayList<>();
private Map<String, Object> info = new HashMap<>();
private Map<String, Object> context = new HashMap<>();
public TriggerBuilder(final String submitUser,
final String source,
final Condition triggerCondition,
final Condition expireCondition,
final List<TriggerAction> actions) {
this.submitUser = submitUser;
this.source = source;
this.triggerCondition = triggerCondition;
this.actions = actions;
this.expireCondition = expireCondition;
final long now = DateTime.now().getMillis();
this.submitTime = now;
this.lastModifyTime = now;
}
public TriggerBuilder setId(final int id) {
this.triggerId = id;
return this;
}
public TriggerBuilder setSubmitTime(final long time) {
this.submitTime = time;
return this;
}
public TriggerBuilder setLastModifyTime(final long time) {
this.lastModifyTime = time;
return this;
}
public TriggerBuilder setExpireActions(final List<TriggerAction> actions) {
this.expireActions = actions;
return this;
}
public TriggerBuilder setInfo(final Map<String, Object> info) {
this.info = info;
return this;
}
public TriggerBuilder setContext(final Map<String, Object> context) {
this.context = context;
return this;
}
public Trigger build() {
return new Trigger(this.triggerId,
this.lastModifyTime,
this.submitTime,
this.submitUser,
this.source,
this.triggerCondition,
this.expireCondition,
this.actions,
this.expireActions,
this.info,
this.context);
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerAction.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import java.util.Map;
public interface TriggerAction {
String getId();
String getType();
TriggerAction fromJson(Object obj) throws Exception;
Object toJson();
void doAction() throws Exception;
void setContext(Map<String, Object> context);
String getDescription();
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerAgent.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import azkaban.utils.Props;
public interface TriggerAgent {
public void loadTriggerFromProps(Props props) throws Exception;
public String getTriggerSource();
public void start() throws Exception;
public void shutdown();
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerException.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
public class TriggerException extends Exception {
private static final long serialVersionUID = 1L;
public TriggerException(final String message) {
super(message);
}
public TriggerException(final String message, final Throwable cause) {
super(message, cause);
}
public TriggerException(final Throwable e) {
super(e);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerLoader.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import java.util.List;
public interface TriggerLoader {
public void addTrigger(Trigger t) throws TriggerLoaderException;
public void removeTrigger(Trigger s) throws TriggerLoaderException;
public void updateTrigger(Trigger t) throws TriggerLoaderException;
public List<Trigger> loadTriggers() throws TriggerLoaderException;
public Trigger loadTrigger(int triggerId) throws TriggerLoaderException;
public List<Trigger> getUpdatedTriggers(long lastUpdateTime)
throws TriggerLoaderException;
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerLoaderException.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
public class TriggerLoaderException extends Exception {
private static final long serialVersionUID = 1L;
public TriggerLoaderException(final String message) {
super(message);
}
public TriggerLoaderException(final String message, final Throwable cause) {
super(message, cause);
}
public TriggerLoaderException(final Throwable e) {
super(e);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerManager.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import static java.util.Objects.requireNonNull;
import azkaban.event.EventHandler;
import azkaban.executor.ExecutorManagerAdapter;
import azkaban.executor.ExecutorManagerException;
import azkaban.utils.Props;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.PriorityBlockingQueue;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.log4j.Logger;
@Singleton
public class TriggerManager extends EventHandler implements
TriggerManagerAdapter {
public static final long DEFAULT_SCANNER_INTERVAL_MS = 60000;
private static final Logger logger = Logger.getLogger(TriggerManager.class);
private static final Map<Integer, Trigger> triggerIdMap =
new ConcurrentHashMap<>();
private final TriggerScannerThread runnerThread;
private final Object syncObj = new Object();
private final CheckerTypeLoader checkerTypeLoader;
private final ActionTypeLoader actionTypeLoader;
private final TriggerLoader triggerLoader;
private final LocalTriggerJMX jmxStats = new LocalTriggerJMX();
private long lastRunnerThreadCheckTime = -1;
private long runnerThreadIdleTime = -1;
private String scannerStage = "";
@Inject
public TriggerManager(final Props props, final TriggerLoader triggerLoader,
final ExecutorManagerAdapter executorManagerAdapter) throws TriggerManagerException {
requireNonNull(props);
requireNonNull(executorManagerAdapter);
this.triggerLoader = requireNonNull(triggerLoader);
final long scannerInterval =
props.getLong("trigger.scan.interval", DEFAULT_SCANNER_INTERVAL_MS);
this.runnerThread = new TriggerScannerThread(scannerInterval);
this.checkerTypeLoader = new CheckerTypeLoader();
this.actionTypeLoader = new ActionTypeLoader();
try {
this.checkerTypeLoader.init(props);
this.actionTypeLoader.init(props);
} catch (final Exception e) {
throw new TriggerManagerException(e);
}
Condition.setCheckerLoader(this.checkerTypeLoader);
Trigger.setActionTypeLoader(this.actionTypeLoader);
logger.info("TriggerManager loaded.");
}
@Override
public void start() throws TriggerManagerException {
try {
// expect loader to return valid triggers
final List<Trigger> triggers = this.triggerLoader.loadTriggers();
for (final Trigger t : triggers) {
this.runnerThread.addTrigger(t);
triggerIdMap.put(t.getTriggerId(), t);
}
} catch (final Exception e) {
logger.error(e);
throw new TriggerManagerException(e);
}
this.runnerThread.start();
}
protected CheckerTypeLoader getCheckerLoader() {
return this.checkerTypeLoader;
}
protected ActionTypeLoader getActionLoader() {
return this.actionTypeLoader;
}
public void insertTrigger(final Trigger t) throws TriggerManagerException {
logger.info("Inserting trigger " + t + " in TriggerManager");
synchronized (this.syncObj) {
try {
this.triggerLoader.addTrigger(t);
} catch (final TriggerLoaderException e) {
throw new TriggerManagerException(e);
}
this.runnerThread.addTrigger(t);
triggerIdMap.put(t.getTriggerId(), t);
}
}
public void removeTrigger(final int id) throws TriggerManagerException {
logger.info("Removing trigger with id: " + id + " from TriggerManager");
synchronized (this.syncObj) {
final Trigger t = triggerIdMap.get(id);
if (t != null) {
removeTrigger(triggerIdMap.get(id));
}
}
}
public void updateTrigger(final Trigger t) throws TriggerManagerException {
logger.info("Updating trigger " + t + " in TriggerManager");
synchronized (this.syncObj) {
this.runnerThread.deleteTrigger(triggerIdMap.get(t.getTriggerId()));
this.runnerThread.addTrigger(t);
triggerIdMap.put(t.getTriggerId(), t);
try {
this.triggerLoader.updateTrigger(t);
} catch (final TriggerLoaderException e) {
throw new TriggerManagerException(e);
}
}
}
public void removeTrigger(final Trigger t) throws TriggerManagerException {
logger.info("Removing trigger " + t + " from TriggerManager");
synchronized (this.syncObj) {
this.runnerThread.deleteTrigger(t);
triggerIdMap.remove(t.getTriggerId());
try {
t.stopCheckers();
this.triggerLoader.removeTrigger(t);
} catch (final TriggerLoaderException e) {
throw new TriggerManagerException(e);
}
}
}
public List<Trigger> getTriggers() {
return new ArrayList<>(triggerIdMap.values());
}
public Map<String, Class<? extends ConditionChecker>> getSupportedCheckers() {
return this.checkerTypeLoader.getSupportedCheckers();
}
public Trigger getTrigger(final int triggerId) {
synchronized (this.syncObj) {
return triggerIdMap.get(triggerId);
}
}
public void expireTrigger(final int triggerId) {
final Trigger t = getTrigger(triggerId);
t.setStatus(TriggerStatus.EXPIRED);
}
@Override
public List<Trigger> getTriggers(final String triggerSource) {
final List<Trigger> triggers = new ArrayList<>();
for (final Trigger t : triggerIdMap.values()) {
if (t.getSource().equals(triggerSource)) {
triggers.add(t);
}
}
return triggers;
}
@Override
public List<Trigger> getTriggerUpdates(final String triggerSource,
final long lastUpdateTime) throws TriggerManagerException {
final List<Trigger> triggers = new ArrayList<>();
for (final Trigger t : triggerIdMap.values()) {
if (t.getSource().equals(triggerSource)
&& t.getLastModifyTime() > lastUpdateTime) {
triggers.add(t);
}
}
return triggers;
}
@Override
public List<Trigger> getAllTriggerUpdates(final long lastUpdateTime)
throws TriggerManagerException {
final List<Trigger> triggers = new ArrayList<>();
for (final Trigger t : triggerIdMap.values()) {
if (t.getLastModifyTime() > lastUpdateTime) {
triggers.add(t);
}
}
return triggers;
}
@Override
public void insertTrigger(final Trigger t, final String user)
throws TriggerManagerException {
insertTrigger(t);
}
@Override
public void removeTrigger(final int id, final String user) throws TriggerManagerException {
removeTrigger(id);
}
@Override
public void updateTrigger(final Trigger t, final String user)
throws TriggerManagerException {
updateTrigger(t);
}
@Override
public void shutdown() {
this.runnerThread.shutdown();
}
@Override
public TriggerJMX getJMX() {
return this.jmxStats;
}
@Override
public void registerCheckerType(final String name,
final Class<? extends ConditionChecker> checker) {
this.checkerTypeLoader.registerCheckerType(name, checker);
}
@Override
public void registerActionType(final String name,
final Class<? extends TriggerAction> action) {
this.actionTypeLoader.registerActionType(name, action);
}
private class TriggerScannerThread extends Thread {
private final long scannerInterval;
private final BlockingQueue<Trigger> triggers;
private boolean shutdown = false;
public TriggerScannerThread(final long scannerInterval) {
this.triggers = new PriorityBlockingQueue<>(1, new TriggerComparator());
this.setName("TriggerRunnerManager-Trigger-Scanner-Thread");
this.scannerInterval = scannerInterval;
}
public void shutdown() {
logger.error("Shutting down trigger manager thread " + this.getName());
this.shutdown = true;
this.interrupt();
}
public void addTrigger(final Trigger t) {
synchronized (TriggerManager.this.syncObj) {
t.updateNextCheckTime();
this.triggers.add(t);
}
}
public void deleteTrigger(final Trigger t) {
this.triggers.remove(t);
}
@Override
public void run() {
while (!this.shutdown) {
synchronized (TriggerManager.this.syncObj) {
try {
TriggerManager.this.lastRunnerThreadCheckTime = System.currentTimeMillis();
TriggerManager.this.scannerStage =
"Ready to start a new scan cycle at "
+ TriggerManager.this.lastRunnerThreadCheckTime;
try {
checkAllTriggers();
} catch (final Exception e) {
e.printStackTrace();
logger.error(e.getMessage());
} catch (final Throwable t) {
t.printStackTrace();
logger.error(t.getMessage());
}
TriggerManager.this.scannerStage = "Done flipping all triggers.";
TriggerManager.this.runnerThreadIdleTime =
this.scannerInterval
- (System.currentTimeMillis() - TriggerManager.this.lastRunnerThreadCheckTime);
if (TriggerManager.this.runnerThreadIdleTime < 0) {
logger.error("Trigger manager thread " + this.getName()
+ " is too busy!");
} else {
TriggerManager.this.syncObj.wait(TriggerManager.this.runnerThreadIdleTime);
}
} catch (final InterruptedException e) {
logger.info("Interrupted. Probably to shut down.");
}
}
}
}
private void checkAllTriggers() throws TriggerManagerException {
// sweep through the rest of them
for (final Trigger t : this.triggers) {
try {
TriggerManager.this.scannerStage = "Checking for trigger " + t.getTriggerId();
if (t.getStatus().equals(TriggerStatus.READY)) {
/**
* Prior to this change, expiration condition should never be called though
* we have some related code here. ExpireCondition used the same BasicTimeChecker
* as triggerCondition do. As a consequence, we need to figure out a way to distinguish
* the previous ExpireCondition and this commit's ExpireCondition.
*/
if (t.getExpireCondition().getExpression().contains("EndTimeChecker") && t
.expireConditionMet()) {
onTriggerPause(t);
} else if (t.triggerConditionMet()) {
onTriggerTrigger(t);
}
}
if (t.getStatus().equals(TriggerStatus.EXPIRED) && t.getSource().equals("azkaban")) {
removeTrigger(t);
} else {
t.updateNextCheckTime();
}
} catch (final Throwable th) {
//skip this trigger, moving on to the next one
logger.error("Failed to process trigger with id : " + t, th);
}
}
}
private void onTriggerTrigger(final Trigger t) throws TriggerManagerException {
final List<TriggerAction> actions = t.getTriggerActions();
for (final TriggerAction action : actions) {
try {
logger.info("Doing trigger actions " + action.getDescription() + " for " + t);
action.doAction();
} catch (final ExecutorManagerException e) {
if (e.getReason() == ExecutorManagerException.Reason.SkippedExecution) {
logger.info("Skipped action [" + action.getDescription() + "] for [" + t +
"] because: " + e.getMessage());
} else {
logger.error("Failed to do action [" + action.getDescription() + "] for [" + t + "]",
e);
}
} catch (final Throwable th) {
logger.error("Failed to do action [" + action.getDescription() + "] for [" + t + "]", th);
}
}
if (t.isResetOnTrigger()) {
t.resetTriggerConditions();
} else {
logger.info("NextCheckTime did not change. Setting status to expired for trigger"
+ t.getTriggerId());
t.setStatus(TriggerStatus.EXPIRED);
}
try {
TriggerManager.this.triggerLoader.updateTrigger(t);
} catch (final TriggerLoaderException e) {
throw new TriggerManagerException(e);
}
}
private void onTriggerPause(final Trigger t) throws TriggerManagerException {
final List<TriggerAction> expireActions = t.getExpireActions();
for (final TriggerAction action : expireActions) {
try {
logger.info("Doing expire actions for " + action.getDescription() + " for " + t);
action.doAction();
} catch (final Exception e) {
logger.error("Failed to do expire action " + action.getDescription() + " for " + t, e);
} catch (final Throwable th) {
logger.error("Failed to do expire action " + action.getDescription() + " for " + t, th);
}
}
logger.info("Pausing Trigger " + t.getDescription());
t.setStatus(TriggerStatus.PAUSED);
try {
TriggerManager.this.triggerLoader.updateTrigger(t);
} catch (final TriggerLoaderException e) {
throw new TriggerManagerException(e);
}
}
private class TriggerComparator implements Comparator<Trigger> {
@Override
public int compare(final Trigger arg0, final Trigger arg1) {
final long first = arg1.getNextCheckTime();
final long second = arg0.getNextCheckTime();
if (first == second) {
return 0;
} else if (first < second) {
return 1;
}
return -1;
}
}
}
private class LocalTriggerJMX implements TriggerJMX {
@Override
public long getLastRunnerThreadCheckTime() {
return TriggerManager.this.lastRunnerThreadCheckTime;
}
@Override
public boolean isRunnerThreadActive() {
return TriggerManager.this.runnerThread.isAlive();
}
@Override
public String getPrimaryServerHost() {
return "local";
}
@Override
public int getNumTriggers() {
return triggerIdMap.size();
}
@Override
public String getTriggerSources() {
final Set<String> sources = new HashSet<>();
for (final Trigger t : triggerIdMap.values()) {
sources.add(t.getSource());
}
return sources.toString();
}
@Override
public String getTriggerIds() {
return triggerIdMap.keySet().toString();
}
@Override
public long getScannerIdleTime() {
return TriggerManager.this.runnerThreadIdleTime;
}
@Override
public Map<String, Object> getAllJMXMbeans() {
return new HashMap<>();
}
@Override
public String getScannerThreadStage() {
return TriggerManager.this.scannerStage;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerManagerAdapter.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import java.util.List;
import java.util.Map;
public interface TriggerManagerAdapter {
public void insertTrigger(Trigger t, String user)
throws TriggerManagerException;
public void removeTrigger(int id, String user) throws TriggerManagerException;
public void updateTrigger(Trigger t, String user)
throws TriggerManagerException;
public List<Trigger> getAllTriggerUpdates(long lastUpdateTime)
throws TriggerManagerException;
public List<Trigger> getTriggerUpdates(String triggerSource,
long lastUpdateTime) throws TriggerManagerException;
public List<Trigger> getTriggers(String trigegerSource);
public void start() throws TriggerManagerException;
public void shutdown();
public void registerCheckerType(String name,
Class<? extends ConditionChecker> checker);
public void registerActionType(String name,
Class<? extends TriggerAction> action);
public TriggerJMX getJMX();
public interface TriggerJMX {
public long getLastRunnerThreadCheckTime();
public boolean isRunnerThreadActive();
public String getPrimaryServerHost();
public int getNumTriggers();
public String getTriggerSources();
public String getTriggerIds();
public long getScannerIdleTime();
public Map<String, Object> getAllJMXMbeans();
public String getScannerThreadStage();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerManagerException.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
public class TriggerManagerException extends Exception {
private static final long serialVersionUID = 1L;
public TriggerManagerException(final String message) {
super(message);
}
public TriggerManagerException(final String message, final Throwable cause) {
super(message, cause);
}
public TriggerManagerException(final Throwable e) {
super(e);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/TriggerStatus.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
public enum TriggerStatus {
READY(10), PAUSED(20), EXPIRED(30);
private final int numVal;
TriggerStatus(final int numVal) {
this.numVal = numVal;
}
public int getNumVal() {
return this.numVal;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/builtin/BasicTimeChecker.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import azkaban.trigger.ConditionChecker;
import azkaban.utils.TimeUtils;
import azkaban.utils.Utils;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import org.joda.time.DateTime;
import org.joda.time.DateTimeUtils;
import org.joda.time.DateTimeZone;
import org.joda.time.ReadablePeriod;
import org.quartz.CronExpression;
public class BasicTimeChecker implements ConditionChecker {
public static final String type = "BasicTimeChecker";
private final String id;
private final long firstCheckTime;
private final DateTimeZone timezone;
private final ReadablePeriod period;
private final String cronExpression;
private final CronExpression cronExecutionTime;
private long nextCheckTime;
private boolean isRecurring = true;
private boolean skipPastChecks = true;
public BasicTimeChecker(final String id, final long firstCheckTime,
final DateTimeZone timezone, final boolean isRecurring, final boolean skipPastChecks,
final ReadablePeriod period, final String cronExpression) {
this.id = id;
this.firstCheckTime = firstCheckTime;
this.timezone = timezone;
this.isRecurring = isRecurring;
this.skipPastChecks = skipPastChecks;
this.period = period;
this.nextCheckTime = firstCheckTime;
this.cronExpression = cronExpression;
this.cronExecutionTime = Utils.parseCronExpression(cronExpression, timezone);
this.nextCheckTime = calculateNextCheckTime();
}
public BasicTimeChecker(final String id, final long firstCheckTime,
final DateTimeZone timezone, final long nextCheckTime, final boolean isRecurring,
final boolean skipPastChecks, final ReadablePeriod period, final String cronExpression) {
this.id = id;
this.firstCheckTime = firstCheckTime;
this.timezone = timezone;
this.nextCheckTime = nextCheckTime;
this.isRecurring = isRecurring;
this.skipPastChecks = skipPastChecks;
this.period = period;
this.cronExpression = cronExpression;
this.cronExecutionTime = Utils.parseCronExpression(cronExpression, timezone);
}
public static BasicTimeChecker createFromJson(final Object obj) throws Exception {
return createFromJson((HashMap<String, Object>) obj);
}
public static BasicTimeChecker createFromJson(final HashMap<String, Object> obj)
throws Exception {
final Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
if (!jsonObj.get("type").equals(type)) {
throw new Exception("Cannot create checker of " + type + " from "
+ jsonObj.get("type"));
}
final Long firstCheckTime = Long.valueOf((String) jsonObj.get("firstCheckTime"));
final String timezoneId = (String) jsonObj.get("timezone");
final long nextCheckTime = Long.valueOf((String) jsonObj.get("nextCheckTime"));
final DateTimeZone timezone = DateTimeZone.forID(timezoneId);
final boolean isRecurring = Boolean.valueOf((String) jsonObj.get("isRecurring"));
final boolean skipPastChecks =
Boolean.valueOf((String) jsonObj.get("skipPastChecks"));
final ReadablePeriod period =
TimeUtils.parsePeriodString((String) jsonObj.get("period"));
final String id = (String) jsonObj.get("id");
final String cronExpression = (String) jsonObj.get("cronExpression");
final BasicTimeChecker checker =
new BasicTimeChecker(id, firstCheckTime, timezone, nextCheckTime,
isRecurring, skipPastChecks, period, cronExpression);
if (skipPastChecks) {
checker.updateNextCheckTime();
}
return checker;
}
public long getFirstCheckTime() {
return this.firstCheckTime;
}
public DateTimeZone getTimeZone() {
return this.timezone;
}
public boolean isRecurring() {
return this.isRecurring;
}
public boolean isSkipPastChecks() {
return this.skipPastChecks;
}
public ReadablePeriod getPeriod() {
return this.period;
}
@Override
public long getNextCheckTime() {
return this.nextCheckTime;
}
public String getCronExpression() {
return this.cronExpression;
}
@Override
public Boolean eval() {
return this.nextCheckTime < DateTimeUtils.currentTimeMillis();
}
@Override
public void reset() {
this.nextCheckTime = calculateNextCheckTime();
}
@Override
public String getId() {
return this.id;
}
@Override
public String getType() {
return type;
}
@Override
public BasicTimeChecker fromJson(final Object obj) throws Exception {
return createFromJson(obj);
}
private void updateNextCheckTime() {
this.nextCheckTime = calculateNextCheckTime();
}
private long calculateNextCheckTime() {
DateTime date = new DateTime(this.nextCheckTime).withZone(this.timezone);
int count = 0;
while (!date.isAfterNow()) {
if (count > 100000) {
throw new IllegalStateException(
"100000 increments of period did not get to present time.");
}
if (this.period == null && this.cronExpression == null) {
break;
} else if (this.cronExecutionTime != null) {
final Date nextDate = this.cronExecutionTime.getNextValidTimeAfter(date.toDate());
// Some Cron Expressions possibly do not have follow-up occurrences
if (nextDate != null) {
date = new DateTime(nextDate);
} else {
break;
}
} else {
date = date.plus(this.period);
}
count += 1;
}
return date.getMillis();
}
@Override
public Object getNum() {
return null;
}
@Override
public Object toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("type", type);
jsonObj.put("firstCheckTime", String.valueOf(this.firstCheckTime));
jsonObj.put("timezone", this.timezone.getID());
jsonObj.put("nextCheckTime", String.valueOf(this.nextCheckTime));
jsonObj.put("isRecurring", String.valueOf(this.isRecurring));
jsonObj.put("skipPastChecks", String.valueOf(this.skipPastChecks));
jsonObj.put("period", TimeUtils.createPeriodString(this.period));
jsonObj.put("id", this.id);
jsonObj.put("cronExpression", this.cronExpression);
return jsonObj;
}
@Override
public void stopChecker() {
return;
}
@Override
public void setContext(final Map<String, Object> context) {
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/builtin/CreateTriggerAction.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import azkaban.trigger.Trigger;
import azkaban.trigger.TriggerAction;
import azkaban.trigger.TriggerManager;
import java.util.HashMap;
import java.util.Map;
public class CreateTriggerAction implements TriggerAction {
public static final String type = "CreateTriggerAction";
private static TriggerManager triggerManager;
private final Trigger trigger;
private final String actionId;
private Map<String, Object> context;
public CreateTriggerAction(final String actionId, final Trigger trigger) {
this.actionId = actionId;
this.trigger = trigger;
}
public static void setTriggerManager(final TriggerManager trm) {
triggerManager = trm;
}
public static CreateTriggerAction createFromJson(final Object obj) throws Exception {
final Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
if (!jsonObj.get("type").equals(type)) {
throw new Exception("Cannot create action of " + type + " from "
+ jsonObj.get("type"));
}
final String actionId = (String) jsonObj.get("actionId");
final Trigger trigger = Trigger.fromJson(jsonObj.get("trigger"));
return new CreateTriggerAction(actionId, trigger);
}
@Override
public String getType() {
return type;
}
@Override
public CreateTriggerAction fromJson(final Object obj) throws Exception {
return createFromJson(obj);
}
@Override
public Object toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("actionId", this.actionId);
jsonObj.put("type", type);
jsonObj.put("trigger", this.trigger.toJson());
return jsonObj;
}
@Override
public void doAction() throws Exception {
triggerManager.insertTrigger(this.trigger);
}
@Override
public String getDescription() {
return "create another: " + this.trigger.getDescription();
}
@Override
public String getId() {
return this.actionId;
}
@Override
public void setContext(final Map<String, Object> context) {
this.context = context;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/builtin/ExecuteFlowAction.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.ExecutorManagerAdapter;
import azkaban.flow.Flow;
import azkaban.flow.FlowUtils;
import azkaban.project.Project;
import azkaban.project.ProjectManager;
import azkaban.sla.SlaOption;
import azkaban.trigger.TriggerAction;
import azkaban.trigger.TriggerManager;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
public class ExecuteFlowAction implements TriggerAction {
public static final String type = "ExecuteFlowAction";
public static final String EXEC_ID = "ExecuteFlowAction.execid";
private static ExecutorManagerAdapter executorManagerAdapter;
private static TriggerManager triggerManager;
private static ProjectManager projectManager;
private static Logger logger = Logger.getLogger(ExecuteFlowAction.class);
private final String actionId;
private final String projectName;
private int projectId;
private String flowName;
private String submitUser;
private ExecutionOptions executionOptions = new ExecutionOptions();
public ExecuteFlowAction(final String actionId, final int projectId, final String projectName,
final String flowName, final String submitUser, final ExecutionOptions executionOptions) {
this.actionId = actionId;
this.projectId = projectId;
this.projectName = projectName;
this.flowName = flowName;
this.submitUser = submitUser;
this.executionOptions = executionOptions;
}
public static void setLogger(final Logger logger) {
ExecuteFlowAction.logger = logger;
}
public static ExecutorManagerAdapter getExecutorManager() {
return executorManagerAdapter;
}
public static void setExecutorManager(
final ExecutorManagerAdapter executorManagerAdapter) {
ExecuteFlowAction.executorManagerAdapter = executorManagerAdapter;
}
public static TriggerManager getTriggerManager() {
return triggerManager;
}
public static void setTriggerManager(final TriggerManager triggerManager) {
ExecuteFlowAction.triggerManager = triggerManager;
}
public static ProjectManager getProjectManager() {
return projectManager;
}
public static void setProjectManager(final ProjectManager projectManager) {
ExecuteFlowAction.projectManager = projectManager;
}
public static TriggerAction createFromJson(final HashMap<String, Object> obj) {
final Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
final String objType = (String) jsonObj.get("type");
if (!objType.equals(type)) {
throw new RuntimeException("Cannot create action of " + type + " from "
+ objType);
}
final String actionId = (String) jsonObj.get("actionId");
final int projectId = Integer.valueOf((String) jsonObj.get("projectId"));
final String projectName = (String) jsonObj.get("projectName");
final String flowName = (String) jsonObj.get("flowName");
final String submitUser = (String) jsonObj.get("submitUser");
ExecutionOptions executionOptions = null;
if (jsonObj.containsKey("executionOptions")) {
executionOptions =
ExecutionOptions.createFromObject(jsonObj.get("executionOptions"));
}
if (jsonObj.containsKey("slaOptions")) {
ArrayList<SlaOption> slaOptions = new ArrayList<>();
final List<Object> slaOptionsObj = (List<Object>) jsonObj.get("slaOptions");
for (final Object slaObj : slaOptionsObj) {
slaOptions.add(SlaOption.fromObject(slaObj));
}
executionOptions.setSlaOptions(slaOptions);
}
return new ExecuteFlowAction(actionId, projectId, projectName, flowName,
submitUser, executionOptions);
}
public String getProjectName() {
return this.projectName;
}
public int getProjectId() {
return this.projectId;
}
protected void setProjectId(final int projectId) {
this.projectId = projectId;
}
public String getFlowName() {
return this.flowName;
}
protected void setFlowName(final String flowName) {
this.flowName = flowName;
}
public String getSubmitUser() {
return this.submitUser;
}
protected void setSubmitUser(final String submitUser) {
this.submitUser = submitUser;
}
public ExecutionOptions getExecutionOptions() {
return this.executionOptions;
}
protected void setExecutionOptions(final ExecutionOptions executionOptions) {
this.executionOptions = executionOptions;
}
@Override
public String getType() {
return type;
}
@Override
public TriggerAction fromJson(final Object obj) {
return createFromJson((HashMap<String, Object>) obj);
}
@Override
public Object toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("actionId", this.actionId);
jsonObj.put("type", type);
jsonObj.put("projectId", String.valueOf(this.projectId));
jsonObj.put("projectName", this.projectName);
jsonObj.put("flowName", this.flowName);
jsonObj.put("submitUser", this.submitUser);
if (this.executionOptions != null) {
jsonObj.put("executionOptions", this.executionOptions.toObject());
}
List<Object> slaOptionsObj = SlaOption.convertToObjects(this.executionOptions.getSlaOptions());
if (slaOptionsObj != null) {
jsonObj.put("slaOptions", slaOptionsObj);
}
return jsonObj;
}
@Override
public void doAction() throws Exception {
if (projectManager == null || executorManagerAdapter == null) {
throw new Exception("ExecuteFlowAction not properly initialized!");
}
final Project project = FlowUtils.getProject(projectManager, this.projectId);
final Flow flow = FlowUtils.getFlow(project, this.flowName);
final ExecutableFlow exflow = FlowUtils.createExecutableFlow(project, flow);
exflow.setSubmitUser(this.submitUser);
if (this.executionOptions == null) {
this.executionOptions = new ExecutionOptions();
}
if (!this.executionOptions.isFailureEmailsOverridden()) {
this.executionOptions.setFailureEmails(flow.getFailureEmails());
}
if (!this.executionOptions.isSuccessEmailsOverridden()) {
this.executionOptions.setSuccessEmails(flow.getSuccessEmails());
}
exflow.setExecutionOptions(this.executionOptions);
logger.info("Invoking flow " + project.getName() + "." + this.flowName);
executorManagerAdapter.submitExecutableFlow(exflow, this.submitUser);
logger.info("Invoked flow " + project.getName() + "." + this.flowName);
}
@Override
public String getDescription() {
return "Execute flow " + getFlowName() + " from project "
+ getProjectName();
}
@Override
public void setContext(final Map<String, Object> context) {
}
@Override
public String getId() {
return this.actionId;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/builtin/ExecutionChecker.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutableNode;
import azkaban.executor.ExecutorManagerAdapter;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.Status;
import azkaban.trigger.ConditionChecker;
import java.util.HashMap;
import java.util.Map;
public class ExecutionChecker implements ConditionChecker {
public static final String type = "ExecutionChecker";
public static ExecutorManagerAdapter executorManagerAdapter;
private final String checkerId;
private final int execId;
private final String jobName;
private final Status wantedStatus;
public ExecutionChecker(final String checkerId, final int execId, final String jobName,
final Status wantedStatus) {
this.checkerId = checkerId;
this.execId = execId;
this.jobName = jobName;
this.wantedStatus = wantedStatus;
}
public static void setExecutorManager(final ExecutorManagerAdapter em) {
executorManagerAdapter = em;
}
public static ExecutionChecker createFromJson(final HashMap<String, Object> jsonObj)
throws Exception {
if (!jsonObj.get("type").equals(type)) {
throw new Exception("Cannot create checker of " + type + " from "
+ jsonObj.get("type"));
}
final int execId = Integer.valueOf((String) jsonObj.get("execId"));
String jobName = null;
if (jsonObj.containsKey("jobName")) {
jobName = (String) jsonObj.get("jobName");
}
final String checkerId = (String) jsonObj.get("checkerId");
final Status wantedStatus = Status.valueOf((String) jsonObj.get("wantedStatus"));
return new ExecutionChecker(checkerId, execId, jobName, wantedStatus);
}
@Override
public Object eval() {
final ExecutableFlow exflow;
try {
exflow = executorManagerAdapter.getExecutableFlow(this.execId);
} catch (final ExecutorManagerException e) {
e.printStackTrace();
return Boolean.FALSE;
}
if (this.jobName != null) {
final ExecutableNode job = exflow.getExecutableNode(this.jobName);
if (job != null) {
return job.getStatus().equals(this.wantedStatus);
} else {
return Boolean.FALSE;
}
} else {
return exflow.getStatus().equals(this.wantedStatus);
}
}
@Override
public Object getNum() {
return null;
}
@Override
public void reset() {
}
@Override
public String getId() {
return this.checkerId;
}
@Override
public String getType() {
return type;
}
@Override
public ConditionChecker fromJson(final Object obj) throws Exception {
return createFromJson((HashMap<String, Object>) obj);
}
@Override
public Object toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("type", type);
jsonObj.put("execId", String.valueOf(this.execId));
if (this.jobName != null) {
jsonObj.put("jobName", this.jobName);
}
jsonObj.put("wantedStatus", this.wantedStatus.toString());
jsonObj.put("checkerId", this.checkerId);
return jsonObj;
}
@Override
public void stopChecker() {
}
@Override
public void setContext(final Map<String, Object> context) {
}
@Override
public long getNextCheckTime() {
return -1;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/builtin/KillExecutionAction.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import azkaban.Constants;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutorManagerAdapter;
import azkaban.executor.Status;
import azkaban.trigger.TriggerAction;
import java.util.HashMap;
import java.util.Map;
import org.apache.log4j.Logger;
/**
* @deprecated Create a new KillExecutionAction using FlowRunnerManager instead of ExecutorManager
* to kill flow. Still keep the old one here for being compatible with existing SLA trigger in the
* database. Will remove the old one when all existing triggers expire.
*/
@Deprecated
public class KillExecutionAction implements TriggerAction {
public static final String type = "KillExecutionAction";
private static final Logger logger = Logger
.getLogger(KillExecutionAction.class);
private static ExecutorManagerAdapter executorManagerAdapter;
private final String actionId;
private final int execId;
//todo chengren311: delete this class to executor module when all existing triggers in db are expired
public KillExecutionAction(final String actionId, final int execId) {
this.execId = execId;
this.actionId = actionId;
}
public static void setExecutorManager(final ExecutorManagerAdapter em) {
executorManagerAdapter = em;
}
public static KillExecutionAction createFromJson(final Object obj) {
return createFromJson((HashMap<String, Object>) obj);
}
public static KillExecutionAction createFromJson(final HashMap<String, Object> obj) {
final Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
final String objType = (String) jsonObj.get("type");
if (!objType.equals(type)) {
throw new RuntimeException("Cannot create action of " + type + " from "
+ objType);
}
final String actionId = (String) jsonObj.get("actionId");
final int execId = Integer.valueOf((String) jsonObj.get("execId"));
return new KillExecutionAction(actionId, execId);
}
@Override
public String getId() {
return this.actionId;
}
@Override
public String getType() {
return type;
}
@Override
public KillExecutionAction fromJson(final Object obj) throws Exception {
return createFromJson((HashMap<String, Object>) obj);
}
@Override
public Object toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("actionId", this.actionId);
jsonObj.put("type", type);
jsonObj.put("execId", String.valueOf(this.execId));
return jsonObj;
}
@Override
public void doAction() throws Exception {
final ExecutableFlow exFlow = executorManagerAdapter.getExecutableFlow(this.execId);
logger.info("ready to kill execution " + this.execId);
if (!Status.isStatusFinished(exFlow.getStatus())) {
logger.info("Killing execution " + this.execId);
executorManagerAdapter.cancelFlow(exFlow, Constants.AZKABAN_SLA_CHECKER_USERNAME);
}
}
@Override
public void setContext(final Map<String, Object> context) {
}
@Override
public String getDescription() {
return type + " for " + this.execId;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/builtin/SlaAlertAction.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import azkaban.ServiceProvider;
import azkaban.alert.Alerter;
import azkaban.executor.AlerterHolder;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutorLoader;
import azkaban.sla.SlaOption;
import azkaban.trigger.TriggerAction;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
public class SlaAlertAction implements TriggerAction {
public static final String type = "AlertAction";
private static final Logger logger = Logger.getLogger(SlaAlertAction.class);
private final String actionId;
private final SlaOption slaOption;
private final int execId;
private final AlerterHolder alerters;
private final ExecutorLoader executorLoader;
//todo chengren311: move this class to executor module when all existing triggers in db are expired
public SlaAlertAction(final String id, final SlaOption slaOption, final int execId) {
this.actionId = id;
this.slaOption = slaOption;
this.execId = execId;
this.alerters = ServiceProvider.SERVICE_PROVIDER.getInstance(AlerterHolder.class);
this.executorLoader = ServiceProvider.SERVICE_PROVIDER.getInstance(ExecutorLoader.class);
}
public static SlaAlertAction createFromJson(final Object obj) throws Exception {
return createFromJson((HashMap<String, Object>) obj);
}
public static SlaAlertAction createFromJson(final HashMap<String, Object> obj)
throws Exception {
final Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
if (!jsonObj.get("type").equals(type)) {
throw new Exception("Cannot create action of " + type + " from "
+ jsonObj.get("type"));
}
final String actionId = (String) jsonObj.get("actionId");
SlaOption slaOption;
List<String> emails;
// TODO edlu: is this being written? Handle both old and new formats, when written in new
// format
slaOption = SlaOption.fromObject(jsonObj.get("slaOption"));
final int execId = Integer.valueOf((String) jsonObj.get("execId"));
return new SlaAlertAction(actionId, slaOption, execId);
}
@Override
public String getId() {
return this.actionId;
}
@Override
public String getType() {
return type;
}
@Override
public TriggerAction fromJson(final Object obj) throws Exception {
return createFromJson(obj);
}
@Override
public Object toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("actionId", this.actionId);
jsonObj.put("type", type);
// TODO edlu: keeping the old format for now, upgrade to new format.
jsonObj.put("slaAction", this.slaOption.toObject());
jsonObj.put("execId", String.valueOf(this.execId));
return jsonObj;
}
@Override
public void doAction() throws Exception {
logger.info("Alerting on sla failure.");
if (slaOption.hasAlert()) {
final Alerter alerter = this.alerters.get(SlaOption.ALERT_TYPE_EMAIL);
if (alerter != null) {
try {
final ExecutableFlow flow = this.executorLoader.fetchExecutableFlow(this.execId);
alerter.alertOnSla(this.slaOption, slaOption.createSlaMessage(flow));
} catch (final Exception e) {
e.printStackTrace();
logger.error("Failed to alert by " + SlaOption.ALERT_TYPE_EMAIL);
}
} else {
logger.error("Alerter type " + SlaOption.ALERT_TYPE_EMAIL
+ " doesn't exist. Failed to alert.");
}
}
}
@Override
public void setContext(final Map<String, Object> context) {
}
@Override
public String getDescription() {
return type + " for " + this.execId + " with " + this.slaOption.toString();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/trigger/builtin/SlaChecker.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import azkaban.ServiceProvider;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutableNode;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.Status;
import azkaban.sla.SlaOption;
import azkaban.sla.SlaType.ComponentType;
import azkaban.sla.SlaType;
import azkaban.sla.SlaType.StatusType;
import azkaban.trigger.ConditionChecker;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
public class SlaChecker implements ConditionChecker {
public static final String type = "SlaChecker";
private static final Logger logger = Logger.getLogger(SlaChecker.class);
private final String id;
private final SlaOption slaOption;
private final int execId;
private final ExecutorLoader executorLoader;
private long checkTime = -1;
//todo chengren311: move this class to executor module when all existing triggers in db are expired
public SlaChecker(final String id, final SlaOption slaOption, final int execId) {
this.id = id;
this.slaOption = slaOption;
this.execId = execId;
this.executorLoader = ServiceProvider.SERVICE_PROVIDER.getInstance(ExecutorLoader.class);
}
public static SlaChecker createFromJson(final Object obj) throws Exception {
return createFromJson((HashMap<String, Object>) obj);
}
public static SlaChecker createFromJson(final HashMap<String, Object> obj)
throws Exception {
if (!obj.get("type").equals(type)) {
throw new Exception("Cannot create checker of " + type + " from "
+ obj.get("type"));
}
final String id = (String) obj.get("id");
final SlaOption slaOption = SlaOption.fromObject(obj.get("slaOption"));
final int execId = Integer.valueOf((String) obj.get("execId"));
return new SlaChecker(id, slaOption, execId);
}
private Boolean isSlaMissed(final ExecutableFlow flow) {
final SlaType type = slaOption.getType();
logger.info("SLA type for flow " + flow.getId() + " is " + type);
if (flow.getStartTime() < 0) {
logger.info("Start time is less than 0 for flow " + flow.getId());
return false;
}
Status status;
if (type.getComponent() == SlaType.ComponentType.FLOW) {
logger.info("SLA type is flow.");
if (this.checkTime < flow.getStartTime()) {
logger.info("checktime = " + this.checkTime);
logger.info("SLA duration = " + slaOption.getDuration().toMillis() + " ms");
this.checkTime = flow.getStartTime() + slaOption.getDuration().toMillis();
logger.info("checktime updated to " + this.checkTime);
}
status = flow.getStatus();
logger.info("Flow status = " + status.toString());
} else { // JOB
final ExecutableNode node = flow.getExecutableNode(slaOption.getJobName());
if (node.getStartTime() < 0) {
return false;
}
if (this.checkTime < node.getStartTime()) {
this.checkTime = node.getStartTime() + slaOption.getDuration().toMillis();
}
status = node.getStatus();
}
if (this.checkTime < DateTime.now().getMillis()) {
switch (slaOption.getType()) {
case FLOW_FINISH:
logger.info("isFlowFinished?");
return !isFlowFinished(status);
case FLOW_SUCCEED:
logger.info("isFlowSucceeded?");
return !isFlowSucceeded(status);
case JOB_FINISH:
return !isJobFinished(status);
case JOB_SUCCEED:
return !isJobFinished(status);
}
} else if (slaOption.getType().getStatus() == StatusType.SUCCEED) {
logger.info("slaOption.status = SUCCEED and status = " + status.toString());
return (status == Status.FAILED || status == Status.KILLED);
}
return false;
}
private Boolean isSlaGood(final ExecutableFlow flow) {
final SlaType type = this.slaOption.getType();
if (flow.getStartTime() < 0) {
return false;
}
Status status;
if (type.getComponent() == ComponentType.FLOW) {
if (this.checkTime < flow.getStartTime()) {
this.checkTime = flow.getStartTime() + this.slaOption.getDuration().toMillis();
}
status = flow.getStatus();
} else { // JOB
final String jobName = this.slaOption.getJobName();
final ExecutableNode node = flow.getExecutableNode(jobName);
if (node.getStartTime() < 0) {
return false;
}
if (this.checkTime < node.getStartTime()) {
this.checkTime = node.getStartTime() + slaOption.getDuration().toMillis();
}
status = node.getStatus();
}
switch(type) {
case FLOW_FINISH:
return isFlowFinished(status);
case FLOW_SUCCEED:
return isFlowSucceeded(status);
case JOB_FINISH:
return isJobFinished(status);
case JOB_SUCCEED:
return isJobSucceeded(status);
}
return false;
}
// return true to trigger sla action
@Override
public Object eval() {
logger.info("Checking sla for execution " + this.execId);
final ExecutableFlow flow;
try {
flow = this.executorLoader.fetchExecutableFlow(this.execId);
} catch (final ExecutorManagerException e) {
logger.error("Can't get executable flow.", e);
e.printStackTrace();
// something wrong, send out alerts
return true;
}
return isSlaMissed(flow);
}
public Object isSlaFailed() {
final ExecutableFlow flow;
try {
flow = this.executorLoader.fetchExecutableFlow(this.execId);
logger.info("Flow for execid " + this.execId + " is " + flow.getId());
} catch (final ExecutorManagerException e) {
logger.error("Can't get executable flow.", e);
// something wrong, send out alerts
return true;
}
return isSlaMissed(flow);
}
public Object isSlaPassed() {
final ExecutableFlow flow;
try {
flow = this.executorLoader.fetchExecutableFlow(this.execId);
} catch (final ExecutorManagerException e) {
logger.error("Can't get executable flow.", e);
// something wrong, send out alerts
return true;
}
return isSlaGood(flow);
}
@Override
public Object getNum() { return null; }
@Override
public void reset() { }
@Override
public String getId() { return id; }
@Override
public String getType() { return type; }
@Override
public ConditionChecker fromJson(final Object obj) throws Exception {
return createFromJson(obj);
}
@Override
public Object toJson() {
final Map<String, Object> jsonObj = new HashMap<>();
jsonObj.put("type", type);
jsonObj.put("id", id);
// TODO edlu: is this stored in db? Can we convert to the new format?
jsonObj.put("slaOption", this.slaOption.toObject());
jsonObj.put("execId", String.valueOf(this.execId));
return jsonObj;
}
@Override
public void stopChecker() { }
@Override
public void setContext(final Map<String, Object> context) { }
@Override
public long getNextCheckTime() { return this.checkTime; }
private boolean isFlowFinished(final Status status) {
if (status.equals(Status.FAILED) || status.equals(Status.KILLED)
|| status.equals(Status.SUCCEEDED)) {
return true;
} else {
return false;
}
}
private boolean isFlowSucceeded(final Status status) {
return status.equals(Status.SUCCEEDED);
}
private boolean isJobFinished(final Status status) {
if (status.equals(Status.FAILED) || status.equals(Status.KILLED)
|| status.equals(Status.SUCCEEDED)) {
return true;
} else {
return false;
}
}
private boolean isJobSucceeded(final Status status) {
return status.equals(Status.SUCCEEDED);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/FileWatcher.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
import com.sun.nio.file.SensitivityWatchEventModifier;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.nio.file.StandardWatchEventKinds;
import java.nio.file.WatchEvent;
import java.nio.file.WatchKey;
import java.nio.file.WatchService;
import java.util.List;
public class FileWatcher {
private final WatchService watchService;
private final SensitivityWatchEventModifier sensitivity;
public FileWatcher() throws IOException {
this(SensitivityWatchEventModifier.MEDIUM);
}
public FileWatcher(final SensitivityWatchEventModifier sensitivity) throws IOException {
this.sensitivity = sensitivity;
this.watchService = FileSystems.getDefault().newWatchService();
}
public WatchKey register(final Path dir) throws IOException {
return dir.register(this.watchService,
new WatchEvent.Kind[]{StandardWatchEventKinds.ENTRY_MODIFY},
this.sensitivity);
}
public void close() throws IOException {
this.watchService.close();
}
public WatchKey take() throws InterruptedException {
final WatchKey key = this.watchService.take();
// Wait for a second to ensure there is only one event for a modification.
// For a file update, WatchService creates two ENTRY_MODIFY events, 1 for content and 1
// for modification time.
// Adding the sleep consolidates both the events into one with a count of 2 which
// avoids multiple reloads of same file.
// One second seems excessive, however, these events happen very less often and it is
// more important that the config reloads successfully than immediately.
// If there is any modification happening to file(s) in the meantime, it is all queued up
// in the watch service.
Thread.sleep(1000L);
return key;
}
public List<WatchEvent<?>> pollEvents(final WatchKey key) {
try {
return key.pollEvents();
} finally {
// continue listening – without reset() this key wouldn't be returned by take() any more
key.reset();
}
}
@FunctionalInterface
public interface FileWatcherFactory {
FileWatcher get() throws IOException;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/ParseConfigFile.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
/**
* Lambda interface for parsing user config file.
*/
public interface ParseConfigFile {
void parseConfigFile();
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/Permission.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
import azkaban.utils.Utils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
public class Permission {
private final Set<Type> permissions = new HashSet<>();
public Permission() {
}
public Permission(final int flags) {
setPermissions(flags);
}
public Permission(final Type... list) {
addPermission(list);
}
public void addPermissions(final Permission perm) {
this.permissions.addAll(perm.getTypes());
}
public void setPermission(final Type type, final boolean set) {
if (set) {
addPermission(type);
} else {
removePermissions(type);
}
}
public void setPermissions(final int flags) {
this.permissions.clear();
if ((flags & Type.ADMIN.getFlag()) != 0) {
addPermission(Type.ADMIN);
} else {
for (final Type type : Type.values()) {
if ((flags & type.getFlag()) != 0) {
addPermission(type);
}
}
}
}
public void addPermission(final Type... list) {
// Admin is all encompassing permission. No need to add other types
if (!this.permissions.contains(Type.ADMIN)) {
for (final Type perm : list) {
this.permissions.add(perm);
}
// We add everything, and if there's Admin left, we make sure that only
// Admin is remaining.
if (this.permissions.contains(Type.ADMIN)) {
this.permissions.clear();
this.permissions.add(Type.ADMIN);
}
}
}
public void addPermissionsByName(final String... list) {
for (final String perm : list) {
final Type type = Type.valueOf(perm);
if (type != null) {
addPermission(type);
}
}
}
public void addPermissions(final Collection<Type> list) {
for (final Type perm : list) {
addPermission(perm);
}
}
public void addPermissionsByName(final Collection<String> list) {
for (final String perm : list) {
final Type type = Type.valueOf(perm);
if (type != null) {
addPermission(type);
}
}
}
public Set<Type> getTypes() {
return this.permissions;
}
public void removePermissions(final Type... list) {
for (final Type perm : list) {
this.permissions.remove(perm);
}
}
public void removePermissionsByName(final String... list) {
for (final String perm : list) {
final Type type = Type.valueOf(perm);
if (type != null) {
this.permissions.remove(type);
}
}
}
public boolean isPermissionSet(final Type permission) {
return this.permissions.contains(permission);
}
public boolean isPermissionNameSet(final String permission) {
return this.permissions.contains(Type.valueOf(permission));
}
public String[] toStringArray() {
final ArrayList<String> list = new ArrayList<>();
int count = 0;
for (final Type type : this.permissions) {
list.add(type.toString());
count++;
}
return list.toArray(new String[count]);
}
@Override
public String toString() {
return Utils.flattenToString(this.permissions, ",");
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result =
prime * result + ((this.permissions == null) ? 0 : this.permissions.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final Permission other = (Permission) obj;
if (this.permissions == null) {
if (other.permissions != null) {
return false;
}
} else if (!this.permissions.equals(other.permissions)) {
return false;
}
return true;
}
public int toFlags() {
int flag = 0;
for (final Type type : this.permissions) {
flag |= type.getFlag();
}
return flag;
}
public enum Type {
READ(0x0000001),
WRITE(0x0000002),
EXECUTE(0x0000004),
SCHEDULE(0x0000008),
METRICS(0x0000010),
CREATEPROJECTS(0x40000000), // Only used for roles
// Users with this permission can upload projects when the property "lockdown.upload.projects"
// is turned on
UPLOADPROJECTS(0x0008000),
ADMIN(0x8000000);
private final int numVal;
Type(final int numVal) {
this.numVal = numVal;
}
public int getFlag() {
return this.numVal;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/Role.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
public class Role {
private final String name;
private final Permission globalPermission;
public Role(final String name, final Permission permission) {
this.name = name;
this.globalPermission = permission;
}
public Permission getPermission() {
return this.globalPermission;
}
public String getName() {
return this.name;
}
@Override
public String toString() {
return "Role " + this.name;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/User.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class User {
private final String userid;
private final Set<String> roles = new HashSet<>();
private final Set<String> groups = new HashSet<>();
private final HashMap<String, String> properties = new HashMap<>();
private String email = "";
private UserPermissions userPermissions;
public User(final String userid) {
this.userid = userid;
}
public String getUserId() {
return this.userid;
}
public String getEmail() {
return this.email;
}
public void setEmail(final String email) {
this.email = email;
}
public UserPermissions getPermissions() {
return this.userPermissions;
}
public void setPermissions(final UserPermissions checker) {
this.userPermissions = checker;
}
public boolean hasPermission(final String permission) {
if (this.userPermissions == null) {
return false;
}
return this.userPermissions.hasPermission(permission);
}
public List<String> getGroups() {
return new ArrayList<>(this.groups);
}
public void clearGroup() {
this.groups.clear();
}
public void addGroup(final String name) {
this.groups.add(name);
}
public boolean isInGroup(final String group) {
return this.groups.contains(group);
}
public List<String> getRoles() {
return new ArrayList<>(this.roles);
}
public void addRole(final String role) {
this.roles.add(role);
}
public boolean hasRole(final String role) {
return this.roles.contains(role);
}
public String getProperty(final String name) {
return this.properties.get(name);
}
@Override
public String toString() {
String groupStr = "[";
for (final String group : this.groups) {
groupStr += group + ",";
}
groupStr += "]";
return this.userid + ": " + groupStr;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.userid == null) ? 0 : this.userid.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final User other = (User) obj;
if (this.userid == null) {
if (other.userid != null) {
return false;
}
} else if (!this.userid.equals(other.userid)) {
return false;
}
return true;
}
public static interface UserPermissions {
public boolean hasPermission(String permission);
public void addPermission(String permission);
}
public static class DefaultUserPermission implements UserPermissions {
Set<String> permissions;
public DefaultUserPermission() {
this(new HashSet<>());
}
public DefaultUserPermission(final Set<String> permissions) {
this.permissions = permissions;
}
@Override
public boolean hasPermission(final String permission) {
return this.permissions.contains(permission);
}
@Override
public void addPermission(final String permission) {
this.permissions.add(permission);
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/UserManager.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
/**
* Interface for the UserManager. Implementors will have to handle the retrieval of the User object
* given the username and password.
*
* The constructor will be called with a azkaban.utils.Props object passed as the only parameter. If
* such a constructor doesn't exist, than the UserManager instantiation may fail.
*/
public interface UserManager {
/**
* Retrieves the user given the username and password to authenticate against.
*
* @throws UserManagerException If the username/password combination doesn't exist.
*/
public User getUser(String username, String password)
throws UserManagerException;
/**
* Returns true if the user is valid. This is used when adding permissions for users
*/
public boolean validateUser(String username);
/**
* Returns true if the group is valid. This is used when adding permissions for groups.
*/
public boolean validateGroup(String group);
/**
* Returns the user role. This may return null.
*/
public Role getRole(String roleName);
public boolean validateProxyUser(String proxyUser, User realUser);
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/UserManagerException.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
/**
* Exception for the UserManager to capture login errors.
*/
public class UserManagerException extends Exception {
private static final long serialVersionUID = 1L;
public UserManagerException(final String message) {
super(message);
}
public UserManagerException(final String message, final Throwable cause) {
super(message, cause);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/UserUtils.java
|
package azkaban.user;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.WatchEvent;
import java.nio.file.WatchKey;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class UserUtils {
private static final Logger log = LoggerFactory.getLogger(UserUtils.class);
private UserUtils() {
}
/**
* @return - Returns true if the given user is an ADMIN, or if user has the required permission
* for the action requested.
*/
public static boolean hasPermissionforAction(final UserManager userManager, final User user,
final Permission.Type type) {
for (final String roleName : user.getRoles()) {
final Role role = userManager.getRole(roleName);
final Permission perm = role.getPermission();
if (perm.isPermissionSet(Permission.Type.ADMIN) || perm.isPermissionSet(type)) {
return true;
}
}
return false;
}
/**
* Creates a watch thread which listens to specified files' modification and reloads
* configurations
*/
public static void setupWatch(final Map<String, ParseConfigFile> configFileMap,
final FileWatcher watcher) {
Preconditions.checkNotNull(configFileMap);
Preconditions.checkArgument(configFileMap.size() > 0);
// Map to store WatchKey to Dir mapping
final Map<WatchKey, Path> keys = new HashMap<>();
// A directory to config files multimap
final Multimap<Path, String> dirToFilesMap = HashMultimap.create();
// Iterate over each file.
for (final Map.Entry<String, ParseConfigFile> entry : configFileMap.entrySet()) {
final String fileName = entry.getKey();
final ParseConfigFile parser = entry.getValue();
Preconditions.checkNotNull(fileName);
Preconditions.checkNotNull(parser);
final File file = new File(fileName);
if (!file.exists()) {
log.warn("Failed to setup watch service, user provided file " + fileName + " does not "
+ "exist.");
continue;
}
try {
final Path dir = Paths.get(fileName).getParent();
if (!dirToFilesMap.containsKey(dir)) {
// There is no entry for this directory, create a watchkey
final WatchKey watchKey = watcher.register(dir);
keys.put(watchKey, dir);
}
// Add the config file to dir map
dirToFilesMap.put(dir, fileName);
} catch (final IOException e) {
// Ignore the IOException
log.warn("IOException while setting up watch on conf " + fileName + ". ", e);
}
}
// Return if WatchService is not initialized
if (keys.size() == 0) {
log.warn("Watchservice was not setup for any config file(s).");
try {
watcher.close();
} catch (final IOException e) {
log.warn("IOException while closing watchService. ", e);
}
return;
}
final Runnable runnable = () -> {
// Watchservice is established, now listen for the events till eternity!
for (; ; ) {
final WatchKey watchKey;
try {
watchKey = watcher.take();
} catch (final InterruptedException ie) {
log.warn(ie.toString());
Thread.currentThread().interrupt();
return;
}
// Get the directory for which watch service event triggered.
final Path dir = keys.get(watchKey);
for (final WatchEvent<?> event : watcher.pollEvents(watchKey)) {
// Make sure the modification happened to user config file
@SuppressWarnings("unchecked") final Path name = ((WatchEvent<Path>) event).context();
final String filename = dir.resolve(name).toString();
// Lookup the file in dirToFilesMap
if (!dirToFilesMap.containsEntry(dir, filename)) {
continue;
}
// Match!
// Reparse the config file
log.info("Modification detected, reloading config file " + filename + ".");
try {
configFileMap.get(filename).parseConfigFile();
} catch (final Exception e) {
// If there is any exception while parsing the config file, log it and move on
log.warn("Reload failed for config file " + filename + " due to ", e);
}
}
}
};
final Thread thread = new Thread(runnable);
// allow JVM to terminate without waiting for this thread if the app is shutting down
thread.setDaemon(true);
log.info("Starting configuration watching thread.");
thread.start();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/user/XmlUserManager.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.user;
import azkaban.user.FileWatcher.FileWatcherFactory;
import azkaban.user.User.UserPermissions;
import azkaban.utils.Props;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
/**
* Xml implementation of the UserManager. Looks for the property user.manager.xml.file in the
* azkaban properties.
*
* The xml to be in the following form: <azkaban-users> <user username="username" password="azkaban"
* roles="admin" groups="azkaban"/> </azkaban-users>
*/
public class XmlUserManager implements UserManager {
public static final String XML_FILE_PARAM = "user.manager.xml.file";
public static final String AZKABAN_USERS_TAG = "azkaban-users";
public static final String USER_TAG = "user";
public static final String ROLE_TAG = "role";
public static final String GROUP_TAG = "group";
public static final String ROLENAME_ATTR = "name";
public static final String ROLEPERMISSIONS_ATTR = "permissions";
public static final String USERNAME_ATTR = "username";
public static final String PASSWORD_ATTR = "password";
public static final String EMAIL_ATTR = "email";
public static final String ROLES_ATTR = "roles";
public static final String PROXY_ATTR = "proxy";
public static final String GROUPS_ATTR = "groups";
public static final String GROUPNAME_ATTR = "name";
private static final Logger logger = LoggerFactory.getLogger(XmlUserManager.class);
private final String xmlPath;
private HashMap<String, User> users;
private HashMap<String, String> userPassword;
private HashMap<String, Role> roles;
private HashMap<String, Set<String>> groupRoles;
private HashMap<String, Set<String>> proxyUserMap;
/**
* The mandatory UserManager(Props) constructor, which is called via reflection.
*/
public XmlUserManager(final Props props) {
this(props, FileWatcher::new);
}
XmlUserManager(final Props props, final FileWatcherFactory fileWatcherFactory) {
this.xmlPath = props.getString(XML_FILE_PARAM);
parseXMLFile();
// Create a thread which listens to any change in user config file and
// reloads it.
final Map<String, ParseConfigFile> parseConfigFileMap = new HashMap<>();
parseConfigFileMap.put(this.xmlPath, this::parseXMLFile);
try {
UserUtils.setupWatch(parseConfigFileMap, fileWatcherFactory.get());
} catch (final IOException e) {
logger.warn("Failed to create WatchService", e);
}
}
private void parseXMLFile() {
final File file = new File(this.xmlPath);
if (!file.exists()) {
throw new IllegalArgumentException("User xml file " + this.xmlPath
+ " doesn't exist.");
}
final HashMap<String, User> users = new HashMap<>();
final HashMap<String, String> userPassword = new HashMap<>();
final HashMap<String, Role> roles = new HashMap<>();
final HashMap<String, Set<String>> groupRoles =
new HashMap<>();
final HashMap<String, Set<String>> proxyUserMap =
new HashMap<>();
// Creating the document builder to parse xml.
final DocumentBuilderFactory docBuilderFactory =
DocumentBuilderFactory.newInstance();
DocumentBuilder builder = null;
try {
builder = docBuilderFactory.newDocumentBuilder();
} catch (final ParserConfigurationException e) {
throw new IllegalArgumentException(
"Exception while parsing user xml. Document builder not created.", e);
}
Document doc = null;
try {
doc = builder.parse(file);
} catch (final SAXException e) {
throw new IllegalArgumentException("Exception while parsing " + this.xmlPath
+ ". Invalid XML.", e);
} catch (final IOException e) {
throw new IllegalArgumentException("Exception while parsing " + this.xmlPath
+ ". Error reading file.", e);
}
// Only look at first item, because we should only be seeing
// azkaban-users tag.
final NodeList tagList = doc.getChildNodes();
final Node azkabanUsers = tagList.item(0);
final NodeList azkabanUsersList = azkabanUsers.getChildNodes();
for (int i = 0; i < azkabanUsersList.getLength(); ++i) {
final Node node = azkabanUsersList.item(i);
if (node.getNodeType() == Node.ELEMENT_NODE) {
if (node.getNodeName().equals(USER_TAG)) {
parseUserTag(node, users, userPassword, proxyUserMap);
} else if (node.getNodeName().equals(ROLE_TAG)) {
parseRoleTag(node, roles);
} else if (node.getNodeName().equals(GROUP_TAG)) {
parseGroupRoleTag(node, groupRoles);
}
}
}
// Synchronize the swap. Similarly, the gets are synchronized to this.
synchronized (this) {
this.users = users;
this.userPassword = userPassword;
this.roles = roles;
this.proxyUserMap = proxyUserMap;
this.groupRoles = groupRoles;
}
}
private void parseUserTag(final Node node, final HashMap<String, User> users,
final HashMap<String, String> userPassword,
final HashMap<String, Set<String>> proxyUserMap) {
final NamedNodeMap userAttrMap = node.getAttributes();
final Node userNameAttr = userAttrMap.getNamedItem(USERNAME_ATTR);
if (userNameAttr == null) {
throw new RuntimeException("Error loading user. The '" + USERNAME_ATTR
+ "' attribute doesn't exist");
}
final Node passwordAttr = userAttrMap.getNamedItem(PASSWORD_ATTR);
if (passwordAttr == null) {
throw new RuntimeException("Error loading user. The '" + PASSWORD_ATTR
+ "' attribute doesn't exist");
}
// Add user to the user/password map
final String username = userNameAttr.getNodeValue();
final String password = passwordAttr.getNodeValue();
userPassword.put(username, password);
// Add the user to the node
final User user = new User(userNameAttr.getNodeValue());
users.put(username, user);
logger.info("Loading user " + user.getUserId());
final Node roles = userAttrMap.getNamedItem(ROLES_ATTR);
if (roles != null) {
final String value = roles.getNodeValue();
final String[] roleSplit = value.split("\\s*,\\s*");
for (final String role : roleSplit) {
user.addRole(role);
}
}
final Node proxy = userAttrMap.getNamedItem(PROXY_ATTR);
if (proxy != null) {
final String value = proxy.getNodeValue();
final String[] proxySplit = value.split("\\s*,\\s*");
for (final String proxyUser : proxySplit) {
Set<String> proxySet = proxyUserMap.get(username);
if (proxySet == null) {
proxySet = new HashSet<>();
proxyUserMap.put(username, proxySet);
}
proxySet.add(proxyUser);
}
}
final Node groups = userAttrMap.getNamedItem(GROUPS_ATTR);
if (groups != null) {
final String value = groups.getNodeValue();
final String[] groupSplit = value.split("\\s*,\\s*");
for (final String group : groupSplit) {
user.addGroup(group);
}
}
final Node emailAttr = userAttrMap.getNamedItem(EMAIL_ATTR);
if (emailAttr != null) {
user.setEmail(emailAttr.getNodeValue());
}
}
private void parseRoleTag(final Node node, final HashMap<String, Role> roles) {
final NamedNodeMap roleAttrMap = node.getAttributes();
final Node roleNameAttr = roleAttrMap.getNamedItem(ROLENAME_ATTR);
if (roleNameAttr == null) {
throw new RuntimeException(
"Error loading role. The role 'name' attribute doesn't exist");
}
final Node permissionAttr = roleAttrMap.getNamedItem(ROLEPERMISSIONS_ATTR);
if (permissionAttr == null) {
throw new RuntimeException(
"Error loading role. The role 'permissions' attribute doesn't exist");
}
final String roleName = roleNameAttr.getNodeValue();
final String permissions = permissionAttr.getNodeValue();
final String[] permissionSplit = permissions.split("\\s*,\\s*");
final Permission perm = new Permission();
for (final String permString : permissionSplit) {
try {
final Permission.Type type = Permission.Type.valueOf(permString);
perm.addPermission(type);
} catch (final IllegalArgumentException e) {
logger.error("Error adding type " + permString
+ ". Permission doesn't exist.", e);
}
}
final Role role = new Role(roleName, perm);
roles.put(roleName, role);
}
@Override
public User getUser(final String username, final String password)
throws UserManagerException {
if (username == null || username.trim().isEmpty()) {
throw new UserManagerException("Username is empty.");
} else if (password == null || password.trim().isEmpty()) {
throw new UserManagerException("Password is empty.");
}
// Minimize the synchronization of the get. Shouldn't matter if it
// doesn't exist.
String foundPassword = null;
User user = null;
synchronized (this) {
foundPassword = this.userPassword.get(username);
if (foundPassword != null) {
user = this.users.get(username);
}
}
if (foundPassword == null || !foundPassword.equals(password)) {
throw new UserManagerException("Username/Password not found.");
}
// Once it gets to this point, no exception has been thrown. User
// shoudn't be
// null, but adding this check for if user and user/password hash tables
// go
// out of sync.
if (user == null) {
throw new UserManagerException("Internal error: User not found.");
}
// Add all the roles the group has to the user
resolveGroupRoles(user);
user.setPermissions(new UserPermissions() {
@Override
public boolean hasPermission(final String permission) {
return true;
}
@Override
public void addPermission(final String permission) {
}
});
return user;
}
private void resolveGroupRoles(final User user) {
for (final String group : user.getGroups()) {
final Set<String> groupRoleSet = this.groupRoles.get(group);
if (groupRoleSet != null) {
for (final String role : groupRoleSet) {
user.addRole(role);
}
}
}
}
private void parseGroupRoleTag(final Node node,
final HashMap<String, Set<String>> groupRoles) {
final NamedNodeMap groupAttrMap = node.getAttributes();
final Node groupNameAttr = groupAttrMap.getNamedItem(GROUPNAME_ATTR);
if (groupNameAttr == null) {
throw new RuntimeException(
"Error loading role. The role 'name' attribute doesn't exist");
}
final String groupName = groupNameAttr.getNodeValue();
final Set<String> roleSet = new HashSet<>();
final Node roles = groupAttrMap.getNamedItem(ROLES_ATTR);
if (roles != null) {
final String value = roles.getNodeValue();
final String[] roleSplit = value.split("\\s*,\\s*");
for (final String role : roleSplit) {
roleSet.add(role);
}
}
groupRoles.put(groupName, roleSet);
logger.info("Group roles " + groupName + " added.");
}
@Override
public boolean validateUser(final String username) {
return this.users.containsKey(username);
}
@Override
public Role getRole(final String roleName) {
return this.roles.get(roleName);
}
@Override
public boolean validateGroup(final String group) {
// Return true. Validation should be added when groups are added to the xml.
return true;
}
@Override
public boolean validateProxyUser(final String proxyUser, final User realUser) {
if (this.proxyUserMap.containsKey(realUser.getUserId())
&& this.proxyUserMap.get(realUser.getUserId()).contains(proxyUser)) {
return true;
} else {
return false;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/AbstractMailer.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.util.Collection;
public class AbstractMailer {
private static final int MB_IN_BYTES = 1048576;
protected final EmailMessageCreator messageCreator;
private final String azkabanName;
private final long attachmentMazSizeInByte;
public AbstractMailer(final Props props, final EmailMessageCreator messageCreator) {
this.azkabanName = props.getString("azkaban.name", "azkaban");
this.messageCreator = messageCreator;
final long maxAttachmentSizeInMB =
props.getInt("mail.max.attachment.size.mb", 100);
this.attachmentMazSizeInByte = maxAttachmentSizeInMB * MB_IN_BYTES;
}
public EmailMessage createEmailMessage(final String subject, final String mimetype,
final Collection<String> emailList) {
final EmailMessage message = this.messageCreator.createMessage();
message.addAllToAddress(emailList);
message.setMimeType(mimetype);
message.setSubject(subject);
return message;
}
public String getAzkabanName() {
return this.azkabanName;
}
/**
* Attachment maximum size in bytes
*/
long getAttachmentMaxSize() {
return this.attachmentMazSizeInByte;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/AuthenticationUtils.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.net.HttpURLConnection;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The util class for hadoop authentication.
*/
public class AuthenticationUtils {
private static final Logger logger = LoggerFactory.getLogger(AuthenticationUtils.class);
public static HttpURLConnection loginAuthenticatedURL(final URL url, final String keytabPrincipal,
final String keytabPath) throws Exception {
logger.info(
"Logging in URL: " + url.toString() + " using Principal: " + keytabPrincipal + ", Keytab: "
+ keytabPath);
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
if (loginUser == null) {
UserGroupInformation.loginUserFromKeytab(keytabPrincipal, keytabPath);
loginUser = UserGroupInformation.getLoginUser();
logger.info("Logged in with user " + loginUser);
} else {
logger.info("Login user (" + loginUser + ") already created, refreshing tgt.");
loginUser.checkTGTAndReloginFromKeytab();
}
final HttpURLConnection connection = loginUser.doAs(
(PrivilegedExceptionAction<HttpURLConnection>) () -> {
final Token token = new Token();
return new AuthenticatedURL().openConnection(url, token);
});
return connection;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/CaseInsensitiveConcurrentHashMap.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.util.concurrent.ConcurrentHashMap;
/**
* A concurrent hash map with a case-insensitive string key.
*
* @param <V> the value type
*/
public class CaseInsensitiveConcurrentHashMap<V> {
private final ConcurrentHashMap<String, V> map = new ConcurrentHashMap<>();
public V put(final String key, final V value) {
return this.map.put(key.toLowerCase(), value);
}
public V get(final String key) {
return this.map.get(key.toLowerCase());
}
public boolean containsKey(final String key) {
return this.map.containsKey(key.toLowerCase());
}
public V remove(final String key) {
return this.map.remove(key.toLowerCase());
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/CircularBuffer.java
|
/*
* Copyright 2010 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import com.google.common.base.Joiner;
import com.google.common.collect.Iterators;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
/**
* A circular buffer of items of a given length. It will grow up to the give size as items are
* appended, then it will begin to overwrite older items.
*
* @param <T> The type of the item contained.
*/
public class CircularBuffer<T> implements Iterable<T> {
private final List<T> lines;
private final int size;
private int start;
public CircularBuffer(final int size) {
this.lines = new ArrayList<>();
this.size = size;
this.start = 0;
}
public void append(final T line) {
if (this.lines.size() < this.size) {
this.lines.add(line);
} else {
this.lines.set(this.start, line);
this.start = (this.start + 1) % this.size;
}
}
@Override
public String toString() {
return "[" + Joiner.on(", ").join(this.lines) + "]";
}
@Override
public Iterator<T> iterator() {
if (this.start == 0) {
return this.lines.iterator();
} else {
return Iterators.concat(this.lines.subList(this.start, this.lines.size()).iterator(),
this.lines.subList(0, this.start).iterator());
}
}
public int getMaxSize() {
return this.size;
}
public int getSize() {
return this.lines.size();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/DIUtils.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Dependency Injection Utilities
*/
public class DIUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(DIUtils.class);
/**
* Get ClassLoader From Resources
*
* @param resources list of resource
* @param parent parent class loader
* @return class loader
*/
public static ClassLoader getClassLoader(List<URL> resources, ClassLoader parent) {
return new URLClassLoader(resources.toArray(new URL[resources.size()]), parent);
}
/**
* Load Setting to Resources
*
* @param pluginDir plugin Directory
* @param globalClassPaths list of global class path
* @param classPaths list of local class path
* @param libDirectories list of lib directories
* @return
*/
public static List<URL> loadResources(
final File pluginDir,
final List<String> globalClassPaths,
final List<String> classPaths,
final List<String> libDirectories
) throws MalformedURLException {
final List<URL> resources = new ArrayList<>();
LOGGER.info("Adding global resources");
loadResources(globalClassPaths, resources);
LOGGER.info("Adding type resources");
loadResources(classPaths, resources);
LOGGER.info("Adding lib resources");
if (libDirectories != null) {
for (final String libDir : libDirectories) {
loadResources(new File(libDir), resources);
}
}
LOGGER.info("Adding type override resources");
loadResources(pluginDir, resources);
return resources;
}
/**
* Load Setting from a list of class paths to Resources
*
* @param classPaths list of class paths
* @param resources list of resource
* @throws MalformedURLException
*/
private static void loadResources(final List<String> classPaths, List<URL> resources) throws MalformedURLException {
if (classPaths != null) {
for (final String jar : classPaths) {
final URL cpItem = new File(jar).toURI().toURL();
if (!resources.contains(cpItem)) {
LOGGER.info("adding to classpath " + cpItem);
resources.add(cpItem);
}
}
}
}
/**
* Load Setting from file directory to Resources
*
* @param directory file directory for .jar files
* @param resources list of resource
* @throws MalformedURLException
*/
private static void loadResources(final File directory, List<URL> resources) throws MalformedURLException {
for (final File file : directory.listFiles()) {
if (file.getName().endsWith(".jar")) {
resources.add(file.toURI().toURL());
LOGGER.info("adding to classpath " + file.toURI().toURL());
}
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/DependencyTransferException.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
/**
* Indicates that a dependency failed to successfully upload or download.
*/
public class DependencyTransferException extends RuntimeException {
public DependencyTransferException() {
super();
}
public DependencyTransferException(final String s) {
super(s);
}
public DependencyTransferException(final String s, final Throwable e) {
super(s, e);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/DependencyTransferManager.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import azkaban.Constants;
import azkaban.spi.DependencyFile;
import azkaban.spi.Storage;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.io.IOUtils;
import static azkaban.utils.ThinArchiveUtils.*;
/**
* Handles downloading of dependencies. Used during the thin archive upload process, and upon starting the execution
* of a flow defined in a thin archive (to download necessary dependencies). Provides a thin layer of retry logic,
* checksum validation and parallelism on top of the base Storage::getDependency().
*/
@Singleton
public class DependencyTransferManager {
private static final int NUM_THREADS = 8;
public final int dependencyMaxDownloadTries;
private final Storage storage;
@Inject
public DependencyTransferManager(final Props props, final Storage storage) {
this.storage = storage;
this.dependencyMaxDownloadTries =
props.getInt(Constants.ConfigurationKeys.AZKABAN_DEPENDENCY_MAX_DOWNLOAD_TRIES, 2);
}
/**
* Return if the DependencyTransferManager will be able to download dependencies from the current
* Storage instance.
* @return if this class is enabled and can download dependencies.
*/
public boolean isEnabled() { return this.storage.dependencyFetchingEnabled(); }
private void ensureIsEnabled() {
if (!isEnabled()) {
throw new UnsupportedOperationException("Thin archive support is not enabled!");
}
}
/**
* downloads a set of dependencies from an origin. Each downloaded dependency is stored in the file
* returned by DependencyFile::getFile.
*
* @param deps set of DependencyFile to download
*/
public void downloadAllDependencies(final Set<DependencyFile> deps) {
if (deps.isEmpty()) {
// Nothing for us to do!
return;
}
ensureIsEnabled();
ExecutorService threadPool = Executors.newFixedThreadPool(NUM_THREADS);
CompletableFuture[] taskFutures = deps
.stream()
.map(f -> CompletableFuture.runAsync(() -> downloadDependency(f), threadPool))
.toArray(CompletableFuture[]::new);
try {
waitForAllToSucceedOrOneToFail(taskFutures);
} catch (InterruptedException e) {
// No point in continuing, let's stop any future downloads and try to interrupt currently running ones.
threadPool.shutdownNow();
throw new DependencyTransferException("Download interrupted.", e);
} catch (ExecutionException e) {
// ^^^ see above comment ^^^
threadPool.shutdownNow();
if (e.getCause() instanceof DependencyTransferException) {
throw (DependencyTransferException) e.getCause();
}
throw new RuntimeException(e.getCause());
}
}
private void downloadDependency(final DependencyFile f) {
try {
downloadDependency(f, 0);
} catch (IOException e) {
throw new DependencyTransferException("Error while downloading dependency " + f.getFileName(), e);
} catch (HashNotMatchException e) {
throw new DependencyTransferException("Checksum did not match when downloading dependency " + f.getFileName(), e);
}
}
private void downloadDependency(final DependencyFile f, final int retries)
throws HashNotMatchException, IOException {
try {
// Make any necessary directories
f.getFile().getParentFile().mkdirs();
FileOutputStream fos = new FileOutputStream(f.getFile());
IOUtils.copy(this.storage.getDependency(f), fos);
} catch (IOException e) {
if (retries + 1 < dependencyMaxDownloadTries) {
// downloadDependency will overwrite our destination file if attempted again
exponentialBackoffDelay(retries);
downloadDependency(f, retries + 1);
return;
}
throw e;
}
try {
validateDependencyHash(f);
} catch (HashNotMatchException e) {
if (retries + 1 < dependencyMaxDownloadTries) {
// downloadDependency will overwrite our destination file if attempted again
exponentialBackoffDelay(retries);
downloadDependency(f, retries + 1);
return;
}
throw e;
}
}
private static void exponentialBackoffDelay(final int retries) {
try {
// Will wait for 1, 2, 4, 8... seconds
Thread.sleep((long) (Math.pow(2, retries) * 1000));
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
private static void waitForAllToSucceedOrOneToFail(final CompletableFuture<?>[] futures)
throws InterruptedException, ExecutionException {
CompletableFuture<?> failure = new CompletableFuture();
for (CompletableFuture<?> f : futures) {
// f = f is redundant, but bug checker throws error if we don't do it because it doesn't like us ignoring a
// returned future...but we're still going to ignore it.
f = f.exceptionally(ex -> {
failure.completeExceptionally(ex);
return null;
});
}
// Wait for either the failure future to complete, or all of the actual futures to complete.
CompletableFuture.anyOf(failure, CompletableFuture.allOf(futures)).get();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/EmailMessage.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.io.File;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import javax.activation.DataHandler;
import javax.activation.DataSource;
import javax.activation.FileDataSource;
import javax.mail.BodyPart;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeBodyPart;
import javax.mail.internet.MimeMultipart;
import org.apache.log4j.Logger;
public class EmailMessage {
private static final int MAX_EMAIL_RETRY_COUNT = 5;
private static int _mailTimeout = 10000;
private static int _connectionTimeout = 10000;
private static long _totalAttachmentMaxSizeInByte = 1024 * 1024 * 1024; // 1
private final Logger logger = Logger.getLogger(EmailMessage.class);
private final List<String> _toAddress = new ArrayList<>();
private final int _mailPort;
private final ArrayList<BodyPart> _attachments = new ArrayList<>();
private final String _mailHost;
private final String _mailUser;
private final String _mailPassword;
private final EmailMessageCreator creator;
private String _subject;
private String _fromAddress;
private String _mimeType = "text/plain";
private String _tls;
private long _totalAttachmentSizeSoFar;
private boolean _usesAuth = true;
private boolean _enableAttachementEmbedment = true;
private StringBuffer _body = new StringBuffer();
public EmailMessage(final String host, final int port, final String user, final String password,
final EmailMessageCreator creator) {
this._mailUser = user;
this._mailHost = host;
this._mailPort = port;
this._mailPassword = password;
this.creator = creator;
}
public static void setTimeout(final int timeoutMillis) {
_mailTimeout = timeoutMillis;
}
public static void setConnectionTimeout(final int timeoutMillis) {
_connectionTimeout = timeoutMillis;
}
public static void setTotalAttachmentMaxSize(final long sizeInBytes) {
if (sizeInBytes < 1) {
throw new IllegalArgumentException(
"attachment max size can't be 0 or negative");
}
_totalAttachmentMaxSizeInByte = sizeInBytes;
}
public EmailMessage enableAttachementEmbedment(final boolean toEnable) {
this._enableAttachementEmbedment = toEnable;
return this;
}
public EmailMessage addAllToAddress(final Collection<? extends String> addresses) {
this._toAddress.addAll(addresses);
return this;
}
public EmailMessage addToAddress(final String address) {
this._toAddress.add(address);
return this;
}
public EmailMessage setFromAddress(final String fromAddress) {
this._fromAddress = fromAddress;
return this;
}
public EmailMessage setTLS(final String tls) {
this._tls = tls;
return this;
}
public EmailMessage setAuth(final boolean auth) {
this._usesAuth = auth;
return this;
}
public EmailMessage addAttachment(final File file) throws MessagingException {
return addAttachment(file.getName(), file);
}
public EmailMessage addAttachment(final String attachmentName, final File file)
throws MessagingException {
this._totalAttachmentSizeSoFar += file.length();
if (this._totalAttachmentSizeSoFar > _totalAttachmentMaxSizeInByte) {
throw new MessageAttachmentExceededMaximumSizeException(
"Adding attachment '" + attachmentName
+ "' will exceed the allowed maximum size of "
+ _totalAttachmentMaxSizeInByte);
}
final BodyPart attachmentPart = new MimeBodyPart();
final DataSource fileDataSource = new FileDataSource(file);
attachmentPart.setDataHandler(new DataHandler(fileDataSource));
attachmentPart.setFileName(attachmentName);
this._attachments.add(attachmentPart);
return this;
}
public EmailMessage addAttachment(final String attachmentName, final InputStream stream)
throws MessagingException {
final BodyPart attachmentPart = new MimeBodyPart(stream);
attachmentPart.setFileName(attachmentName);
this._attachments.add(attachmentPart);
return this;
}
private void checkSettings() {
if (this._mailHost == null) {
throw new RuntimeException("Mail host not set.");
}
if (this._fromAddress == null || this._fromAddress.length() == 0) {
throw new RuntimeException("From address not set.");
}
if (this._subject == null) {
throw new RuntimeException("Subject cannot be null");
}
if (this._toAddress.size() == 0) {
throw new RuntimeException("T");
}
}
public void sendEmail() throws MessagingException {
checkSettings();
final Properties props = new Properties();
if (this._usesAuth) {
props.put("mail.smtp.auth", "true");
props.put("mail.user", this._mailUser);
props.put("mail.password", this._mailPassword);
} else {
props.put("mail.smtp.auth", "false");
}
props.put("mail.smtp.host", this._mailHost);
props.put("mail.smtp.port", this._mailPort);
props.put("mail.smtp.timeout", _mailTimeout);
props.put("mail.smtp.connectiontimeout", _connectionTimeout);
props.put("mail.smtp.starttls.enable", this._tls);
props.put("mail.smtp.ssl.trust", this._mailHost);
final JavaxMailSender sender = this.creator.createSender(props);
final Message message = sender.createMessage();
final InternetAddress from = new InternetAddress(this._fromAddress, false);
message.setFrom(from);
for (final String toAddr : this._toAddress) {
message.addRecipient(Message.RecipientType.TO, new InternetAddress(
toAddr, false));
}
message.setSubject(this._subject);
message.setSentDate(new Date());
if (this._attachments.size() > 0) {
final MimeMultipart multipart =
this._enableAttachementEmbedment ? new MimeMultipart("related")
: new MimeMultipart();
final BodyPart messageBodyPart = new MimeBodyPart();
messageBodyPart.setContent(this._body.toString(), this._mimeType);
multipart.addBodyPart(messageBodyPart);
// Add attachments
for (final BodyPart part : this._attachments) {
multipart.addBodyPart(part);
}
message.setContent(multipart);
} else {
message.setContent(this._body.toString(), this._mimeType);
}
retryConnectToSMTPServer(sender);
retrySendMessage(sender, message);
sender.close();
}
private void connectToSMTPServer(final JavaxMailSender s) throws MessagingException {
if (this._usesAuth) {
s.connect(this._mailHost, this._mailPort, this._mailUser, this._mailPassword);
} else {
s.connect();
}
}
private void retryConnectToSMTPServer(final JavaxMailSender s) throws MessagingException {
int attempt;
for (attempt = 0; attempt < MAX_EMAIL_RETRY_COUNT; attempt++) {
try {
connectToSMTPServer(s);
return;
} catch (final Exception e) {
this.logger.error("Connecting to SMTP server failed, attempt: " + attempt, e);
}
}
s.close();
throw new MessagingException("Failed to connect to SMTP server after "
+ attempt + " attempts.");
}
private void retrySendMessage(final JavaxMailSender s, final Message message)
throws MessagingException {
int attempt;
for (attempt = 0; attempt < MAX_EMAIL_RETRY_COUNT; attempt++) {
try {
s.sendMessage(message, message.getRecipients(Message.RecipientType.TO));
return;
} catch (final Exception e) {
this.logger.error("Sending email messages failed, attempt: " + attempt, e);
}
}
s.close();
throw new MessagingException("Failed to send email messages after "
+ attempt + " attempts.");
}
public void setBody(final String body, final String mimeType) {
this._body = new StringBuffer(body);
this._mimeType = mimeType;
}
public EmailMessage setMimeType(final String mimeType) {
this._mimeType = mimeType;
return this;
}
public EmailMessage println(final Object str) {
this._body.append(str);
return this;
}
public String getBody() {
return this._body.toString();
}
public void setBody(final String body) {
setBody(body, this._mimeType);
}
public String getSubject() {
return this._subject;
}
public EmailMessage setSubject(final String subject) {
this._subject = subject;
return this;
}
public int getMailPort() {
return this._mailPort;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/EmailMessageCreator.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.util.Properties;
import javax.inject.Inject;
import javax.mail.NoSuchProviderException;
public class EmailMessageCreator {
public static final int DEFAULT_SMTP_PORT = 25;
private final String mailHost;
private final int mailPort;
private final String mailUser;
private final String mailPassword;
private final String mailSender;
private final String tls;
private final boolean usesAuth;
@Inject
public EmailMessageCreator(final Props props) {
this.mailHost = props.getString("mail.host", "localhost");
this.mailPort = props.getInt("mail.port", DEFAULT_SMTP_PORT);
this.mailUser = props.getString("mail.user", "");
this.mailPassword = props.getString("mail.password", "");
this.mailSender = props.getString("mail.sender", "");
this.tls = props.getString("mail.tls", "false");
this.usesAuth = props.getBoolean("mail.useAuth", true);
}
public EmailMessage createMessage() {
final EmailMessage message = new EmailMessage(
this.mailHost, this.mailPort, this.mailUser, this.mailPassword, this);
message.setFromAddress(this.mailSender);
message.setTLS(this.tls);
message.setAuth(this.usesAuth);
return message;
}
public JavaxMailSender createSender(final Properties props) throws NoSuchProviderException {
return new JavaxMailSender(props);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/Emailer.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import static java.util.Objects.requireNonNull;
import azkaban.Constants;
import azkaban.Constants.ConfigurationKeys;
import azkaban.alert.Alerter;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.Executor;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.mail.DefaultMailCreator;
import azkaban.executor.mail.MailCreator;
import azkaban.metrics.CommonMetrics;
import azkaban.sla.SlaOption;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimaps;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.mail.internet.AddressException;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
@Singleton
public class Emailer extends AbstractMailer implements Alerter {
private static final String HTTPS = "https";
private static final String HTTP = "http";
private static final Logger logger = Logger.getLogger(Emailer.class);
private final CommonMetrics commonMetrics;
private final String scheme;
private final String clientHostname;
private final String clientPortNumber;
private final String azkabanName;
private final ExecutorLoader executorLoader;
@Inject
public Emailer(final Props props, final CommonMetrics commonMetrics,
final EmailMessageCreator messageCreator, final ExecutorLoader executorLoader) {
super(props, messageCreator);
this.executorLoader = requireNonNull(executorLoader, "executorLoader is null.");
this.commonMetrics = requireNonNull(commonMetrics, "commonMetrics is null.");
this.azkabanName = props.getString("azkaban.name", "azkaban");
final int mailTimeout = props.getInt("mail.timeout.millis", 30000);
EmailMessage.setTimeout(mailTimeout);
final int connectionTimeout = props.getInt("mail.connection.timeout.millis", 30000);
EmailMessage.setConnectionTimeout(connectionTimeout);
EmailMessage.setTotalAttachmentMaxSize(getAttachmentMaxSize());
this.clientHostname = props.getString(ConfigurationKeys.AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME,
props.getString("jetty.hostname", "localhost"));
if (props.getBoolean("jetty.use.ssl", true)) {
this.scheme = HTTPS;
this.clientPortNumber = Integer.toString(props
.getInt(ConfigurationKeys.AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT,
props.getInt("jetty.ssl.port",
Constants.DEFAULT_SSL_PORT_NUMBER)));
} else {
this.scheme = HTTP;
this.clientPortNumber = Integer.toString(
props.getInt(ConfigurationKeys.AZKABAN_WEBSERVER_EXTERNAL_PORT, props.getInt("jetty.port",
Constants.DEFAULT_PORT_NUMBER)));
}
}
public String getAzkabanURL() {
return this.scheme + "://" + this.clientHostname + ":" + this.clientPortNumber;
}
/**
* Send an email to the specified email list
*/
public void sendEmail(final List<String> emailList, final String subject, final String body) {
if (emailList != null && !emailList.isEmpty()) {
final EmailMessage message = super.createEmailMessage(subject, "text/html", emailList);
message.setBody(body);
sendEmail(message, true, "email message " + body);
}
}
@Override
public void alertOnSla(final SlaOption slaOption, final String slaMessage) {
final String subject =
"SLA violation for " + getJobOrFlowName(slaOption) + " on " + getAzkabanName();
final List<String> emailList =
(List<String>) slaOption.getEmails();
logger.info("Sending SLA email " + slaMessage);
sendEmail(emailList, subject, slaMessage);
}
@Override
public void alertOnFirstError(final ExecutableFlow flow) {
final EmailMessage message = this.messageCreator.createMessage();
final MailCreator mailCreator = getMailCreator(flow);
final boolean mailCreated = mailCreator.createFirstErrorMessage(flow, message, this.azkabanName,
this.scheme, this.clientHostname, this.clientPortNumber);
sendEmail(message, mailCreated,
"first error email message for execution " + flow.getExecutionId());
}
@Override
public void alertOnError(final ExecutableFlow flow, final String... extraReasons) {
final EmailMessage message = this.messageCreator.createMessage();
final MailCreator mailCreator = getMailCreator(flow);
List<ExecutableFlow> last72hoursExecutions = new ArrayList<>();
if (flow.getStartTime() > 0) {
final long startTime = flow.getStartTime() - Duration.ofHours(72).toMillis();
try {
last72hoursExecutions = this.executorLoader.fetchFlowHistory(flow.getProjectId(), flow
.getFlowId(), startTime);
} catch (final ExecutorManagerException e) {
logger.error("unable to fetch past executions", e);
}
}
final boolean mailCreated = mailCreator.createErrorEmail(flow, last72hoursExecutions, message,
this.azkabanName, this.scheme, this.clientHostname, this.clientPortNumber, extraReasons);
sendEmail(message, mailCreated, "error email message for execution " + flow.getExecutionId());
}
@Override
public void alertOnSuccess(final ExecutableFlow flow) {
final EmailMessage message = this.messageCreator.createMessage();
final MailCreator mailCreator = getMailCreator(flow);
final boolean mailCreated = mailCreator.createSuccessEmail(flow, message, this.azkabanName,
this.scheme, this.clientHostname, this.clientPortNumber);
sendEmail(message, mailCreated, "success email message for execution " + flow.getExecutionId());
}
/**
* Sends as many emails as there are unique combinations of:
*
* [mail creator] x [failure email address list]
*
* Executions with the same combo are grouped into a single message.
*/
@Override
public void alertOnFailedUpdate(final Executor executor, List<ExecutableFlow> flows,
final ExecutorManagerException updateException) {
flows = flows.stream()
.filter(flow -> flow.getExecutionOptions() != null)
.filter(flow -> CollectionUtils.isNotEmpty(flow.getExecutionOptions().getFailureEmails()))
.collect(Collectors.toList());
// group by mail creator in case some flows use different creators
final ImmutableListMultimap<String, ExecutableFlow> creatorsToFlows = Multimaps
.index(flows, flow -> flow.getExecutionOptions().getMailCreator());
for (final String mailCreatorName : creatorsToFlows.keySet()) {
final ImmutableList<ExecutableFlow> creatorFlows = creatorsToFlows.get(mailCreatorName);
final MailCreator mailCreator = getMailCreator(mailCreatorName);
// group by recipients in case some flows have different failure email addresses
final ImmutableListMultimap<List<String>, ExecutableFlow> emailsToFlows = Multimaps
.index(creatorFlows, flow -> flow.getExecutionOptions().getFailureEmails());
for (final List<String> emailList : emailsToFlows.keySet()) {
sendFailedUpdateEmail(executor, updateException, mailCreator, emailsToFlows.get(emailList));
}
}
}
/**
* Use the default mail creator to send a failed executor healthcheck message to the given list
* of addresses. Message includes a list of flows impacted on the executor.
*/
@Override
public void alertOnFailedExecutorHealthCheck(Executor executor, List<ExecutableFlow> flows,
ExecutorManagerException failureException, List<String> emailList) {
if (emailList == null || emailList.isEmpty()) {
// We should consider throwing an exception here. For now this follows the model of the rest
// of the file and simply returns.
logger.error("No email list specified for failed health check alert");
return;
}
MailCreator mailCreator = DefaultMailCreator.getCreator(DefaultMailCreator.DEFAULT_MAIL_CREATOR);
final EmailMessage message = this.messageCreator.createMessage();
final boolean mailCreated = mailCreator
.createFailedExecutorHealthCheckMessage(flows, executor, failureException, message,
this.azkabanName, this.scheme, this.clientHostname, this.clientPortNumber, emailList);
final List<Integer> executionIds = Lists.transform(flows, ExecutableFlow::getExecutionId);
sendEmail(message, mailCreated, "failed health check message for executions " + executionIds);
}
/**
* Sends a single email about failed updates.
*/
private void sendFailedUpdateEmail(final Executor executor,
final ExecutorManagerException exception, final MailCreator mailCreator,
final ImmutableList<ExecutableFlow> flows) {
final EmailMessage message = this.messageCreator.createMessage();
final boolean mailCreated = mailCreator
.createFailedUpdateMessage(flows, executor, exception, message,
this.azkabanName, this.scheme, this.clientHostname, this.clientPortNumber);
final List<Integer> executionIds = Lists.transform(flows, ExecutableFlow::getExecutionId);
sendEmail(message, mailCreated, "failed update email message for executions " + executionIds);
}
private MailCreator getMailCreator(final ExecutableFlow flow) {
final String name = flow.getExecutionOptions().getMailCreator();
return getMailCreator(name);
}
private MailCreator getMailCreator(final String name) {
final MailCreator mailCreator = DefaultMailCreator.getCreator(name);
logger.debug("ExecutorMailer using mail creator:" + mailCreator.getClass().getCanonicalName());
return mailCreator;
}
public void sendEmail(final EmailMessage message, final boolean mailCreated,
final String operation) {
if (mailCreated) {
try {
message.sendEmail();
logger.info("Sent " + operation);
this.commonMetrics.markSendEmailSuccess();
} catch (final Exception e) {
logger.error("Failed to send " + operation, e);
if (!(e instanceof AddressException)) {
this.commonMetrics.markSendEmailFail();
}
}
}
}
private String getJobOrFlowName(final SlaOption slaOption) {
if (org.apache.commons.lang.StringUtils.isNotBlank(slaOption.getJobName())) {
return slaOption.getFlowName() + ":" + slaOption.getJobName();
} else {
return slaOption.getFlowName();
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/ExecuteAsUser.java
|
/*
* Copyright 2011 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.Logger;
/**
* This is a wrapper over the binary executable execute-as-user. It provides a simple API to run
* commands as another user while abstracting away the process logic, commandline handling, etc.
*/
public class ExecuteAsUser {
private final static Logger log = Logger.getLogger(ExecuteAsUser.class);
private final static String EXECUTE_AS_USER = "execute-as-user";
private final File binaryExecutable;
/**
* Construct the object
*
* @param nativeLibDirectory Absolute path to the native Lib Directory
*/
public ExecuteAsUser(final String nativeLibDirectory) {
this.binaryExecutable = new File(nativeLibDirectory, EXECUTE_AS_USER);
validate();
}
private void validate() {
if (!this.binaryExecutable.canExecute()) {
throw new RuntimeException("Unable to execute execute-as-user binary. Invalid Path: "
+ this.binaryExecutable.getAbsolutePath());
}
}
/**
* API to execute a command on behalf of another user.
*
* @param user The proxy user
* @param command the list containing the program and its arguments
* @return The return value of the shell command
*/
public int execute(final String user, final List<String> command) throws IOException {
log.info("Command: " + command);
final Process process = new ProcessBuilder()
.command(constructExecuteAsCommand(user, command))
.inheritIO()
.start();
int exitCode;
try {
exitCode = process.waitFor();
} catch (final InterruptedException e) {
log.error(e.getMessage(), e);
exitCode = 1;
}
return exitCode;
}
private List<String> constructExecuteAsCommand(final String user, final List<String> command) {
final List<String> commandList = new ArrayList<>();
commandList.add(this.binaryExecutable.getAbsolutePath());
commandList.add(user);
commandList.addAll(command);
return commandList;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/ExternalLinkUtils.java
|
/*
* Copyright 2016 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import azkaban.Constants;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import javax.servlet.http.HttpServletRequest;
import org.apache.log4j.Logger;
public class ExternalLinkUtils {
private static final Logger logger = Logger.getLogger(ExternalLinkUtils.class);
public static String getExternalAnalyzerOnReq(final Props azkProps,
final HttpServletRequest req) {
// If no topic was configured to be an external analyzer, return empty
if (!azkProps.containsKey(Constants.ConfigurationKeys.AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC)) {
return "";
}
// Find out which external link we should use to lead to our analyzer
final String topic = azkProps
.getString(Constants.ConfigurationKeys.AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC);
return getLinkFromRequest(topic, azkProps, req);
}
public static String getExternalLogViewer(final Props azkProps, final String jobId,
final Props jobProps) {
// If no topic was configured to be an external analyzer, return empty
if (!azkProps
.containsKey(Constants.ConfigurationKeys.AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC)) {
return "";
}
// Find out which external link we should use to lead to our log viewer
final String topic = azkProps
.getString(Constants.ConfigurationKeys.AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC);
return getLinkFromJobAndExecId(topic, azkProps, jobId, jobProps);
}
private static String getLinkFromJobAndExecId(final String topic, final Props azkProps,
final String jobId,
final Props jobProps) {
String urlTemplate = getURLForTopic(topic, azkProps);
if (urlTemplate.isEmpty()) {
logger.error("No URL specified for topic " + topic);
return "";
}
final String job = encodeToUTF8(jobId);
final String execid = encodeToUTF8(
jobProps.getString(Constants.FlowProperties.AZKABAN_FLOW_EXEC_ID));
urlTemplate = urlTemplate.replace("${jobid}", job).replace("${execid}", execid);
logger.info("Creating link: " + urlTemplate);
return urlTemplate;
}
private static String getLinkFromRequest(final String topic, final Props azkProps,
final HttpServletRequest req) {
String urlTemplate = getURLForTopic(topic, azkProps);
if (urlTemplate.isEmpty()) {
logger.error("No URL specified for topic " + topic);
return "";
}
String flowExecutionURL = "";
flowExecutionURL += req.getRequestURL();
flowExecutionURL += "?";
flowExecutionURL += req.getQueryString();
flowExecutionURL = encodeToUTF8(flowExecutionURL);
urlTemplate = urlTemplate.replace("${url}", flowExecutionURL);
logger.info("Creating link: " + urlTemplate);
return urlTemplate;
}
static String getURLForTopic(final String topic, final Props azkProps) {
return azkProps.getString(
Constants.ConfigurationKeys.AZKABAN_SERVER_EXTERNAL_TOPIC_URL.replace("${topic}", topic),
"");
}
static String encodeToUTF8(final String url) {
try {
return URLEncoder.encode(url, "UTF-8").replaceAll("\\+", "%20");
} catch (final UnsupportedEncodingException e) {
logger.error("Specified encoding is not supported", e);
}
return "";
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/FileIOUtils.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileFilter;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import org.apache.commons.fileupload.util.Streams;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Runs a few unix commands. Created this so that I can move to JNI in the future.
*/
public class FileIOUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(FileIOUtils.class);
/**
* Check if a directory is writable
*
* @param dir directory file object
* @return true if it is writable. false, otherwise
*/
public static boolean isDirWritable(final File dir) {
File testFile = null;
try {
testFile = new File(dir, "_tmp");
/*
* Create and delete a dummy file in order to check file permissions. Maybe
* there is a safer way for this check.
*/
testFile.createNewFile();
} catch (final IOException e) {
return false;
} finally {
if (testFile != null) {
testFile.delete();
}
}
return true;
}
/**
* Check if a directory is a valid directory
* @param directory directory file object
* @return true if it exists, valid and readable
*/
public static boolean isValidDirectory(File directory) {
if (!directory.exists()) {
LOGGER.warn("Directory [" + directory.toPath() + "] does not exist.");
return false;
}
if (!directory.isDirectory()) {
LOGGER.error("Directory [" + directory.toPath() + "] is invalid. No extra plugins will be loaded");
return false;
}
if (!directory.canRead()) {
LOGGER.error("Directory [" + directory.toPath() + "] is not readable. No extra plugins will be loaded");
return false;
}
return true;
}
/**
* Delete a directory, log the error if deletion fails.
*/
public static void deleteDirectorySilently(final File dir) {
if (dir != null) {
try {
FileUtils.deleteDirectory(dir);
} catch (final IOException e) {
LOGGER.error("error when deleting dir {}", dir, e);
}
}
}
/**
* Get Directory or Create a Directory if it does not exist
* @param parentDirectory parent Directory name
* @param folderName sub folder name
* @return handle of File
*/
public static File getDirectory(File parentDirectory, String folderName) {
File directory = new File(parentDirectory, folderName);
if (!directory.exists()) {
directory.mkdir();
}
return directory;
}
/**
* Move Files from source directory to destinationDirectory if the file name matches the given pattern
* @param sourceDirectory source directory
* @param destinationDiretory destination directory
* @param fileNamePattern regular expression pattern of file name's
* @throws IOException
*/
public static void moveFiles(File sourceDirectory, File destinationDiretory, String fileNamePattern)
throws IOException {
File[] files = sourceDirectory.listFiles();
for (File file : files) {
if (!file.isDirectory()) {
if (file.getName().matches(fileNamePattern)) {
Files.move(
Paths.get(file.getAbsolutePath()),
Paths.get(String.format("%s/%s", destinationDiretory.getAbsolutePath(), file.getName())));
}
}
}
}
/**
* Dumps a number into a new file.
*
* @param filePath the target file
* @param num the number to dump
* @throws IOException if file already exists
*/
public static void dumpNumberToFile(final Path filePath, final long num) throws IOException {
try (final BufferedWriter writer = Files
.newBufferedWriter(filePath, StandardCharsets.UTF_8)) {
writer.write(String.valueOf(num));
} catch (final IOException e) {
LOGGER.error("Failed to write the number {} to the file {}", num, filePath, e);
throw e;
}
}
/**
* Reads a number from a file.
*
* @param filePath the target file
*/
public static long readNumberFromFile(final Path filePath)
throws IOException, NumberFormatException {
final List<String> allLines = Files.readAllLines(filePath);
if (!allLines.isEmpty()) {
return Long.parseLong(allLines.get(0));
} else {
throw new NumberFormatException("unable to parse empty file " + filePath.toString());
}
}
public static String getSourcePathFromClass(final Class<?> containedClass) {
final String containedClassPath = containedClass.getProtectionDomain().getCodeSource()
.getLocation().getPath();
File file = new File(containedClassPath);
if (!file.isDirectory() && file.getName().endsWith(".class")) {
final String name = containedClass.getName();
final StringTokenizer tokenizer = new StringTokenizer(name, ".");
while (tokenizer.hasMoreTokens()) {
tokenizer.nextElement();
file = file.getParentFile();
}
return file.getPath();
} else {
return containedClassPath;
}
}
/**
* A thin wrapper for File.getCanonicalPath() that doesn't throw a checked exception
*
* @param file input file
* @return String canonical path of the file
*/
public static String getCanonicalPath(final File f) {
try {
return f.getCanonicalPath();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Load output file into a Props object
*
* @param file output properties file
* @return Props object
*/
public static Props loadOutputFileProps(final File file) {
InputStream reader = null;
try {
LOGGER.info("output properties file=" + file.getAbsolutePath());
reader = new BufferedInputStream(new FileInputStream(file));
final Props outputProps = new Props();
final String content = Streams.asString(reader).trim();
if (!content.isEmpty()) {
final Map<String, Object> propMap =
(Map<String, Object>) JSONUtils.parseJSONFromString(content);
for (final Map.Entry<String, Object> entry : propMap.entrySet()) {
outputProps.put(entry.getKey(), entry.getValue().toString());
}
}
return outputProps;
} catch (final FileNotFoundException e) {
LOGGER.info(
String.format("File[%s] wasn't found, returning empty props.", file)
);
return new Props();
} catch (final Exception e) {
LOGGER.error(
"Exception thrown when trying to load output file props. Returning empty Props instead of failing. Is this really the best thing to do?",
e);
return new Props();
} finally {
IOUtils.closeQuietly(reader);
}
}
/**
* Create Temp File in a working directory
*
* @param prefix file prefix
* @param suffix file suffix
* @param workingDir working directory
* @return File handle
*/
public static File createOutputPropsFile(final String prefix,
final String suffix, final String workingDir) {
try {
final File directory = new File(workingDir);
final File tempFile = File.createTempFile(prefix, suffix, directory);
return tempFile;
} catch (final IOException e) {
throw new RuntimeException("Failed to create temp output property file ", e);
}
}
/**
* Hard link files and recurse into directories.
*/
public static int createDeepHardlink(final File sourceDir, final File destDir)
throws IOException {
if (!sourceDir.exists()) {
throw new IOException("Source directory " + sourceDir.getPath()
+ " doesn't exist");
} else if (!destDir.exists()) {
throw new IOException("Destination directory " + destDir.getPath()
+ " doesn't exist");
} else if (sourceDir.isFile() && destDir.isFile()) {
throw new IOException("Source or Destination is not a directory.");
}
final Set<String> paths = new HashSet<>();
createDirsFindFiles(sourceDir, sourceDir, destDir, paths);
int linkCount = 0;
for (String path : paths) {
final File sourceLink = new File(sourceDir, path);
path = destDir + path;
final File[] targetFiles = sourceLink.listFiles();
for (final File targetFile : targetFiles) {
if (targetFile.isFile()) {
final File linkFile = new File(path, targetFile.getName());
// NOTE!! If modifying this, you must run this ignored test manually to validate:
// FileIOUtilsTest#testHardlinkCopyOfBigDir
Files.createLink(linkFile.toPath(), Paths.get(targetFile.getAbsolutePath()));
linkCount++;
}
}
}
return linkCount;
}
private static void createDirsFindFiles(final File baseDir, final File sourceDir,
final File destDir, final Set<String> paths) {
final File[] srcList = sourceDir.listFiles();
final String path = getRelativePath(baseDir, sourceDir);
paths.add(path);
for (final File file : srcList) {
if (file.isDirectory()) {
final File newDestDir = new File(destDir, file.getName());
newDestDir.mkdirs();
createDirsFindFiles(baseDir, file, newDestDir, paths);
}
}
}
private static String getRelativePath(final File basePath, final File sourceDir) {
return sourceDir.getPath().substring(basePath.getPath().length());
}
public static Pair<Integer, Integer> readUtf8File(final File file, final int offset,
final int length, final OutputStream stream) throws IOException {
final byte[] buffer = new byte[length];
final FileInputStream fileStream = new FileInputStream(file);
final long skipped = fileStream.skip(offset);
if (skipped < offset) {
fileStream.close();
return new Pair<>(0, 0);
}
BufferedInputStream inputStream = null;
try {
inputStream = new BufferedInputStream(fileStream);
inputStream.read(buffer);
} finally {
IOUtils.closeQuietly(inputStream);
}
final Pair<Integer, Integer> utf8Range = getUtf8Range(buffer, 0, length);
stream.write(buffer, utf8Range.getFirst(), utf8Range.getSecond());
return new Pair<>(offset + utf8Range.getFirst(),
utf8Range.getSecond());
}
public static LogData readUtf8File(final File file, final int fileOffset, final int length)
throws IOException {
final byte[] buffer = new byte[length];
final FileInputStream fileStream = new FileInputStream(file);
final long skipped = fileStream.skip(fileOffset);
if (skipped < fileOffset) {
fileStream.close();
return new LogData(fileOffset, 0, "");
}
BufferedInputStream inputStream = null;
int read = 0;
try {
inputStream = new BufferedInputStream(fileStream);
read = inputStream.read(buffer);
} finally {
IOUtils.closeQuietly(inputStream);
}
if (read <= 0) {
return new LogData(fileOffset, 0, "");
}
final Pair<Integer, Integer> utf8Range = getUtf8Range(buffer, 0, read);
final String outputString =
new String(buffer, utf8Range.getFirst(), utf8Range.getSecond(), StandardCharsets.UTF_8);
return new LogData(fileOffset + utf8Range.getFirst(),
utf8Range.getSecond(), outputString);
}
public static JobMetaData readUtf8MetaDataFile(final File file, final int fileOffset,
final int length) throws IOException {
final byte[] buffer = new byte[length];
final FileInputStream fileStream = new FileInputStream(file);
final long skipped = fileStream.skip(fileOffset);
if (skipped < fileOffset) {
fileStream.close();
return new JobMetaData(fileOffset, 0, "");
}
BufferedInputStream inputStream = null;
int read = 0;
try {
inputStream = new BufferedInputStream(fileStream);
read = inputStream.read(buffer);
} finally {
IOUtils.closeQuietly(inputStream);
}
if (read <= 0) {
return new JobMetaData(fileOffset, 0, "");
}
final Pair<Integer, Integer> utf8Range = getUtf8Range(buffer, 0, read);
final String outputString =
new String(buffer, utf8Range.getFirst(), utf8Range.getSecond(), StandardCharsets.UTF_8);
return new JobMetaData(fileOffset + utf8Range.getFirst(),
utf8Range.getSecond(), outputString);
}
/**
* Returns first and length.
*/
public static Pair<Integer, Integer> getUtf8Range(final byte[] buffer, final int offset,
final int length) {
final int start = getUtf8ByteStart(buffer, offset);
final int end = getUtf8ByteEnd(buffer, offset + length - 1);
return new Pair<>(start, end - start + 1);
}
private static int getUtf8ByteStart(final byte[] buffer, final int offset) {
// If it's a proper utf-8, we should find it within the next 6 bytes.
for (int i = offset; i < offset + 6 && i < buffer.length; i++) {
final byte b = buffer[i];
// check the mask 0x80 is 0, which is a proper ascii
if ((0x80 & b) == 0) {
return i;
} else if ((0xC0 & b) == 0xC0) {
return i;
}
}
// Don't know what it is, will just set it as 0
return offset;
}
private static int getUtf8ByteEnd(final byte[] buffer, final int offset) {
// If it's a proper utf-8, we should find it within the previous 12 bytes.
for (int i = offset; i > offset - 11 && i >= 0; i--) {
final byte b = buffer[i];
// check the mask 0x80 is 0, which is a proper ascii. Just return
if ((0x80 & b) == 0) {
return i;
}
if ((b & 0xE0) == 0xC0) { // two byte utf8 char. bits 110x xxxx
if (offset - i >= 1) {
// There is 1 following byte we're good.
return i + 1;
}
} else if ((b & 0xF0) == 0xE0) { // three byte utf8 char. bits 1110 xxxx
if (offset - i >= 2) {
// There is 1 following byte we're good.
return i + 2;
}
} else if ((b & 0xF8) == 0xF0) { // four byte utf8 char. bits 1111 0xxx
if (offset - i >= 3) {
// There is 1 following byte we're good.
return i + 3;
}
} else if ((b & 0xFC) >= 0xF8) { // five byte utf8 char. bits 1111 10xx
if (offset - i == 4) {
// There is 1 following byte we're good.
return i + 4;
}
} else if ((b & 0xFE) == 0xFC) { // six byte utf8 char. bits 1111 110x
if (offset - i >= 5) {
// There is 1 following byte we're good.
return i + 5;
}
}
}
// Don't know what it is, will just set it as 0
return offset;
}
public static class PrefixSuffixFileFilter implements FileFilter {
private final String prefix;
private final String suffix;
public PrefixSuffixFileFilter(final String prefix, final String suffix) {
this.prefix = prefix;
this.suffix = suffix;
}
@Override
public boolean accept(final File pathname) {
if (!pathname.isFile() || pathname.isHidden()) {
return false;
}
final String name = pathname.getName();
final int length = name.length();
if (this.suffix.length() > length || this.prefix.length() > length) {
return false;
}
return name.startsWith(this.prefix) && name.endsWith(this.suffix);
}
}
private static class NullLogger extends Thread {
private final BufferedReader inputReader;
private final CircularBuffer<String> buffer = new CircularBuffer<>(5);
public NullLogger(final InputStream stream) {
this.inputReader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8));
}
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
final String line = this.inputReader.readLine();
if (line == null) {
return;
}
this.buffer.append(line);
}
} catch (final IOException e) {
e.printStackTrace();
}
}
public String getLastMessages() {
final StringBuffer messageBuffer = new StringBuffer();
for (final String message : this.buffer) {
messageBuffer.append(message);
messageBuffer.append("\n");
}
return messageBuffer.toString();
}
}
public static class LogData {
private final int offset;
private final int length;
private final String data;
public LogData(final int offset, final int length, final String data) {
this.offset = offset;
this.length = length;
this.data = data;
}
public static LogData createLogDataFromObject(final Map<String, Object> map) {
final int offset = (Integer) map.get("offset");
final int length = (Integer) map.get("length");
final String data = (String) map.get("data");
return new LogData(offset, length, data);
}
public int getOffset() {
return this.offset;
}
public int getLength() {
return this.length;
}
public String getData() {
return this.data;
}
public Map<String, Object> toObject() {
final HashMap<String, Object> map = new HashMap<>();
map.put("offset", this.offset);
map.put("length", this.length);
map.put("data", this.data);
return map;
}
@Override
public String toString() {
return "[offset=" + this.offset + ",length=" + this.length + ",data=" + this.data + "]";
}
}
public static class JobMetaData {
private final int offset;
private final int length;
private final String data;
public JobMetaData(final int offset, final int length, final String data) {
this.offset = offset;
this.length = length;
this.data = data;
}
public static JobMetaData createJobMetaDataFromObject(
final Map<String, Object> map) {
final int offset = (Integer) map.get("offset");
final int length = (Integer) map.get("length");
final String data = (String) map.get("data");
return new JobMetaData(offset, length, data);
}
public int getOffset() {
return this.offset;
}
public int getLength() {
return this.length;
}
public String getData() {
return this.data;
}
public Map<String, Object> toObject() {
final HashMap<String, Object> map = new HashMap<>();
map.put("offset", this.offset);
map.put("length", this.length);
map.put("data", this.data);
return map;
}
@Override
public String toString() {
return "[offset=" + this.offset + ",length=" + this.length + ",data=" + this.data + "]";
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/GZIPUtils.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import azkaban.db.EncodingType;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.commons.io.IOUtils;
public class GZIPUtils {
public static byte[] gzipString(final String str, final String encType)
throws IOException {
final byte[] stringData = str.getBytes(encType);
return gzipBytes(stringData);
}
public static byte[] gzipBytes(final byte[] bytes) throws IOException {
return gzipBytes(bytes, 0, bytes.length);
}
public static byte[] gzipBytes(final byte[] bytes, final int offset, final int length)
throws IOException {
final ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream();
GZIPOutputStream gzipStream = null;
gzipStream = new GZIPOutputStream(byteOutputStream);
gzipStream.write(bytes, offset, length);
gzipStream.close();
return byteOutputStream.toByteArray();
}
public static byte[] unGzipBytes(final byte[] bytes) throws IOException {
final ByteArrayInputStream byteInputStream = new ByteArrayInputStream(bytes);
final GZIPInputStream gzipInputStream = new GZIPInputStream(byteInputStream);
final ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream();
IOUtils.copy(gzipInputStream, byteOutputStream);
return byteOutputStream.toByteArray();
}
public static String unGzipString(final byte[] bytes, final String encType)
throws IOException {
final byte[] response = unGzipBytes(bytes);
return new String(response, encType);
}
public static Object transformBytesToObject(final byte[] data, final EncodingType encType)
throws IOException {
if (encType == EncodingType.GZIP) {
final String jsonString = GZIPUtils.unGzipString(data, "UTF-8");
return JSONUtils.parseJSONFromString(jsonString);
} else {
final String jsonString = new String(data, "UTF-8");
return JSONUtils.parseJSONFromString(jsonString);
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/JavaxMailSender.java
|
package azkaban.utils;
import com.sun.mail.smtp.SMTPTransport;
import java.util.Properties;
import javax.mail.Address;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.NoSuchProviderException;
import javax.mail.Session;
import javax.mail.internet.MimeMessage;
/**
* Wraps javax.mail features, mostly because Session is a final class and can't be mocked.
*/
public class JavaxMailSender {
public static final String PROTOCOL = "smtp";
private final Session session;
private final SMTPTransport t;
public JavaxMailSender(final Properties props)
throws NoSuchProviderException {
this.session = Session.getInstance(props, null);
this.t = (SMTPTransport) this.session.getTransport(PROTOCOL);
}
public Message createMessage() {
return new MimeMessage(this.session);
}
public void connect(final String mailHost, final int mailPort, final String mailUser,
final String mailPassword) throws MessagingException {
this.t.connect(mailHost, mailPort, mailUser, mailPassword);
}
public void connect() throws MessagingException {
this.t.connect();
}
public void sendMessage(final Message message, final Address[] recipients)
throws MessagingException {
this.t.sendMessage(message, recipients);
}
public void close() throws MessagingException {
this.t.close();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/LogGobbler.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import com.google.common.base.Joiner;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
public class LogGobbler extends Thread {
private final BufferedReader inputReader;
private final Logger logger;
private final Level loggingLevel;
private final CircularBuffer<String> buffer;
public LogGobbler(final Reader inputReader, final Logger logger,
final Level level, final int bufferLines) {
this.inputReader = new BufferedReader(inputReader);
this.logger = logger;
this.loggingLevel = level;
this.buffer = new CircularBuffer<>(bufferLines);
}
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
final String line = this.inputReader.readLine();
if (line == null) {
return;
}
this.buffer.append(line);
log(line);
}
} catch (final IOException e) {
error("Error reading from logging stream:", e);
}
}
private void log(final String message) {
if (this.logger != null) {
this.logger.log(this.loggingLevel, message);
}
}
private void error(final String message, final Exception e) {
if (this.logger != null) {
this.logger.error(message, e);
}
}
private void info(final String message, final Exception e) {
if (this.logger != null) {
this.logger.info(message, e);
}
}
public void awaitCompletion(final long waitMs) {
try {
join(waitMs);
} catch (final InterruptedException e) {
info("I/O thread interrupted.", e);
}
}
public String getRecentLog() {
return Joiner.on(System.getProperty("line.separator")).join(this.buffer);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/MessageAttachmentExceededMaximumSizeException.java
|
package azkaban.utils;
import javax.mail.MessagingException;
/**
* To indicate the attachment size is larger than allowed size
*
* @author hluu
*/
public class MessageAttachmentExceededMaximumSizeException extends
MessagingException {
public MessageAttachmentExceededMaximumSizeException() {
super();
}
public MessageAttachmentExceededMaximumSizeException(final String s) {
super(s);
}
public MessageAttachmentExceededMaximumSizeException(final String s, final Exception e) {
super(s, e);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/OsCpuUtil.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayDeque;
import java.util.Deque;
import javax.inject.Inject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility class for getting CPU usage (in percentage)
*
* CPU information is obtained from /proc/stat, so only Linux systems will support this class.
* Calculation procedure is taken from:
* https://github.com/Leo-G/DevopsWiki/wiki/How-Linux-CPU-Usage-Time-and-Percentage-is-calculated
*
* Assumes frequent calls at regular intervals to {@link #getCpuLoad() getCpuLoad}. The length of
* time over which cpu load is calculated can be adjusted with parameter
* {@code numCpuStatsToCollect} in the class constructor and how often {@link #getCpuLoad()
* getCpuLoad} is called.
* Example: if {@link #getCpuLoad() getCpuLoad} is called every second and {@code
* numCpuStatsToCollect} is set to 60 then each call to {@link #getCpuLoad() getCpuLoad} returns
* the cpu load over the last minute.
*/
public class OsCpuUtil {
private static final Logger logger = LoggerFactory.getLogger(OsCpuUtil.class);
private static final String CPU_STAT_FILE = "/proc/stat";
private final Deque<CpuStats> collectedCpuStats;
@Inject
public OsCpuUtil(int numCpuStatsToCollect) {
if (numCpuStatsToCollect <= 0) {
numCpuStatsToCollect = 1;
}
this.collectedCpuStats = new ArrayDeque<>(numCpuStatsToCollect);
final CpuStats cpuStats = getCpuStats();
if (cpuStats != null) {
for (int i = 0; i < numCpuStatsToCollect; i++) {
this.collectedCpuStats.push(cpuStats);
}
}
}
/**
* Collects a new cpu stat data point and calculates cpu load with it and the oldest one
* collected which is then deleted.
*
* @return percentage of CPU usage. -1 if there are no cpu stats.
*/
public double getCpuLoad() {
if (this.collectedCpuStats.isEmpty()) {
return -1;
}
final CpuStats newestCpuStats = getCpuStats();
if (newestCpuStats == null) {
return -1;
}
final CpuStats oldestCpuStats = this.collectedCpuStats.pollLast();
this.collectedCpuStats.push(newestCpuStats);
return calcCpuLoad(oldestCpuStats, newestCpuStats);
}
private double calcCpuLoad(final CpuStats startCpuStats, final CpuStats endCpuStats) {
final long startSysUptime = startCpuStats.getSysUptime();
final long startTimeCpuIdle = startCpuStats.getTimeCpuIdle();
final long endSysUptime = endCpuStats.getSysUptime();
final long endTimeCpuIdle = endCpuStats.getTimeCpuIdle();
if (endSysUptime == startSysUptime) {
logger.error("Failed to calculate cpu load: division by zero");
return -1.0;
}
final double percentageCpuIdle =
(100.0 * (endTimeCpuIdle - startTimeCpuIdle)) / (endSysUptime - startSysUptime);
return 100.0 - percentageCpuIdle;
}
private CpuStats getCpuStats() {
if (!Files.isRegularFile(Paths.get(CPU_STAT_FILE))) {
// Mac doesn't use proc pseudo files for example.
return null;
}
final String cpuLine = getCpuLineFromStatFile();
if (cpuLine == null) {
return null;
}
return getCpuStatsFromLine(cpuLine);
}
private String getCpuLineFromStatFile() {
BufferedReader br = null;
try {
br = Files.newBufferedReader(Paths.get(CPU_STAT_FILE), StandardCharsets.UTF_8);
String line;
while ((line = br.readLine()) != null) {
// looking for a line starting with "cpu<space>" which aggregates the values in all of
// the other "cpuN" lines.
if (line.startsWith("cpu ")) {
return line;
}
}
} catch (final IOException e) {
final String errMsg = "Failed to read cpu stat file: " + CPU_STAT_FILE;
logger.error(errMsg, e);
} finally {
if (br != null) {
try {
br.close();
} catch (final IOException e) {
final String errMsg = "Failed to close cpu stat file: " + CPU_STAT_FILE;
logger.error(errMsg, e);
}
}
}
return null;
}
/**
* Parses cpu usage information from /proc/stat file.
* Example of line expected with the meanings of the values below:
* cpu 4705 356 584 3699 23 23 0 0 0 0
* ---- user nice system idle iowait irq softirq steal guest guest_nice
*
* Method visible within the package for testing purposes.
*
* @param line the text containing cpu usage statistics
* @return CpuStats object. null if there is an error.
*/
CpuStats getCpuStatsFromLine(final String line) {
try {
final String[] cpuInfo = line.split("\\s+");
final long user = Long.parseLong(cpuInfo[1]);
final long nice = Long.parseLong(cpuInfo[2]);
final long system = Long.parseLong(cpuInfo[3]);
final long idle = Long.parseLong(cpuInfo[4]);
final long iowait = Long.parseLong(cpuInfo[5]);
final long irq = Long.parseLong(cpuInfo[6]);
final long softirq = Long.parseLong(cpuInfo[7]);
final long steal = Long.parseLong(cpuInfo[8]);
// guest and guest_nice are counted on user and nice respectively, so don't add them
final long totalCpuTime = user + nice + system + idle + iowait + irq + softirq + steal;
final long idleCpuTime = idle + iowait;
return new CpuStats(totalCpuTime, idleCpuTime);
} catch (final NumberFormatException | ArrayIndexOutOfBoundsException e) {
final String errMsg = "Failed to parse cpu stats from line: " + line;
logger.error(errMsg, e);
}
return null;
}
static class CpuStats {
private final long sysUptime;
private final long timeCpuIdle;
public CpuStats(final long sysUptime, final long timeCpuIdle) {
this.sysUptime = sysUptime;
this.timeCpuIdle = timeCpuIdle;
}
public long getSysUptime() {
return this.sysUptime;
}
public long getTimeCpuIdle() {
return this.timeCpuIdle;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/OsMemoryUtil.java
|
package azkaban.utils;
import com.google.common.collect.ImmutableSet;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.List;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility class for getting system memory information
*
* Note: This check is designed for Linux only.
*/
class OsMemoryUtil {
private static final Logger logger = LoggerFactory.getLogger(OsMemoryUtil.class);
// This file is used by Linux. It doesn't exist on Mac for example.
private static final String MEM_INFO_FILE = "/proc/meminfo";
static final ImmutableSet<String> MEM_KEYS = ImmutableSet
.of("MemFree", "Buffers", "Cached", "SwapFree");
static final ImmutableSet<String> MEM_AVAILABLE_KEYS = ImmutableSet.of("MemFree", "Active(file)",
"Inactive(file)", "SReclaimable");
/**
* Includes OS cache and free swap.
*
* @return the total free memory size of the OS. 0 if there is an error or the OS doesn't support
* this memory check.
*/
long getOsTotalFreeMemorySize() {
return getAggregatedFreeMemorySize(MEM_KEYS);
}
/**
* @return the free physical memory size of the OS. 0 if there is an error or the OS doesn't
* support this memory check.
*/
long getOsFreePhysicalMemorySize() {
return getAggregatedFreeMemorySize(MEM_AVAILABLE_KEYS);
}
private long getAggregatedFreeMemorySize(final Set<String> memKeysToCombine) {
if (!Files.isRegularFile(Paths.get(MEM_INFO_FILE))) {
// Mac doesn't support /proc/meminfo for example.
return 0;
}
final List<String> lines;
// The file /proc/meminfo is assumed to contain only ASCII characters.
// The assumption is that the file is not too big. So it is simpler to read the whole file
// into memory.
try {
lines = Files.readAllLines(Paths.get(MEM_INFO_FILE), StandardCharsets.UTF_8);
} catch (final IOException e) {
final String errMsg = "Failed to open mem info file: " + MEM_INFO_FILE;
logger.error(errMsg, e);
return 0;
}
return getOsTotalFreeMemorySizeFromStrings(lines, memKeysToCombine);
}
/**
* @param lines text lines from the procinfo file
* @return the total size of free memory in kB. 0 if there is an error.
*/
long getOsTotalFreeMemorySizeFromStrings(final List<String> lines,
final Set<String> memKeysToCombine) {
long totalFree = 0;
int count = 0;
for (final String line : lines) {
for (final String keyName : memKeysToCombine) {
if (line.startsWith(keyName)) {
count++;
final long size = parseMemoryLine(line);
if (size == 0) {
return 0;
}
totalFree += size;
}
}
}
final int length = memKeysToCombine.size();
if (count != length) {
final String errMsg = String
.format("Expect %d keys in the meminfo file. Got %d. content: %s", length, count, lines);
logger.error(errMsg);
totalFree = 0;
}
return totalFree;
}
/**
* Example file: $ cat /proc/meminfo MemTotal: 65894008 kB MemFree: 59400536 kB
* Buffers: 409348 kB Cached: 4290236 kB SwapCached: 0 kB
*
* Make the method package private to make unit testing easier. Otherwise it can be made private.
*
* @param line the text for a memory usage statistics we are interested in
* @return size of the memory. unit kB. 0 if there is an error.
*/
long parseMemoryLine(final String line) {
final int idx1 = line.indexOf(":");
final int idx2 = line.lastIndexOf("kB");
final String sizeString = line.substring(idx1 + 1, idx2 - 1).trim();
try {
return Long.parseLong(sizeString);
} catch (final NumberFormatException e) {
final String err = "Failed to parse the meminfo file. Line: " + line;
logger.error(err);
return 0;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/Pair.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
/**
* Pair of values.
*/
public class Pair<F, S> {
private final F first;
private final S second;
public Pair(final F first, final S second) {
this.first = first;
this.second = second;
}
public F getFirst() {
return this.first;
}
public S getSecond() {
return this.second;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.first == null) ? 0 : this.first.hashCode());
result = prime * result + ((this.second == null) ? 0 : this.second.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final Pair other = (Pair) obj;
if (this.first == null) {
if (other.first != null) {
return false;
}
} else if (!this.first.equals(other.first)) {
return false;
}
if (this.second == null) {
if (other.second != null) {
return false;
}
} else if (!this.second.equals(other.second)) {
return false;
}
return true;
}
@Override
public String toString() {
return "{" + this.first.toString() + "," + this.second.toString() + "}";
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/PatternLayoutEscaped.java
|
package azkaban.utils;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.spi.LoggingEvent;
/**
* When we use the log4j Kafka appender, it seems that the appender simply does not log the stack
* trace anywhere Seeing as the stack trace is a very important piece of information, we create our
* own PatternLayout class that appends the stack trace to the log message that reported it, so that
* all the information regarding that error can be found one in place.
*/
public class PatternLayoutEscaped extends PatternLayout {
public PatternLayoutEscaped(final String s) {
super(s);
}
public PatternLayoutEscaped() {
super();
}
@Override
public String format(final LoggingEvent event) {
if (event.getMessage() instanceof String) {
return super.format(appendStackTraceToEvent(event));
}
return super.format(event);
}
/**
* Create a copy of event, but append a stack trace to the message (if it exists). Then it escapes
* the backslashes, tabs, newlines and quotes in its message as we are sending it as JSON and we
* don't want any corruption of the JSON object.
*/
private LoggingEvent appendStackTraceToEvent(final LoggingEvent event) {
String message = event.getMessage().toString();
// If there is a stack trace available, print it out
if (event.getThrowableInformation() != null) {
final String[] s = event.getThrowableStrRep();
for (final String line : s) {
message += "\n" + line;
}
}
message = message
.replace("\\", "\\\\")
.replace("\n", "\\n")
.replace("\"", "\\\"")
.replace("\t", "\\t");
final Throwable throwable = event.getThrowableInformation() == null ? null
: event.getThrowableInformation().getThrowable();
return new LoggingEvent(event.getFQNOfLoggerClass(),
event.getLogger(),
event.getTimeStamp(),
event.getLevel(),
message,
throwable);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/RestfulApiClient.java
|
/*
* Copyright 2015 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.message.BasicNameValuePair;
import org.apache.log4j.Logger;
/**
* class handles the communication between the application and a Restful API based web server.
*
* @param T : type of the returning response object. Note: the idea of this abstract class is to
* provide a wrapper for the logic around HTTP layer communication so development work can take this
* as a black box and focus on processing the result. With that said the abstract class will be
* provided as a template, which ideally can support different types of returning object
* (Dictionary, xmlDoc , text etc.)
*/
public abstract class RestfulApiClient<T> {
protected static Logger logger = Logger.getLogger(RestfulApiClient.class);
/**
* helper function to build a valid URI.
*
* @param host host name.
* @param port host port.
* @param path extra path after host.
* @param isHttp indicates if whether Http or HTTPS should be used.
* @param params extra query parameters.
* @return the URI built from the inputs.
*/
public static URI buildUri(final String host, final int port, final String path,
final boolean isHttp, final Pair<String, String>... params) throws IOException {
final URIBuilder builder = new URIBuilder();
builder.setScheme(isHttp ? "http" : "https").setHost(host).setPort(port);
if (null != path && path.length() > 0) {
builder.setPath(path);
}
if (params != null) {
for (final Pair<String, String> pair : params) {
builder.setParameter(pair.getFirst(), pair.getSecond());
}
}
try {
return builder.build();
} catch (final URISyntaxException e) {
throw new IOException(e);
}
}
/**
* helper function to fill the request with header entries and posting body .
*/
private static HttpEntityEnclosingRequestBase completeRequest(
final HttpEntityEnclosingRequestBase request,
final List<Pair<String, String>> params) throws UnsupportedEncodingException {
if (request != null) {
if (null != params && !params.isEmpty()) {
final List<NameValuePair> formParams = params.stream()
.map(pair -> new BasicNameValuePair(pair.getFirst(), pair.getSecond()))
.collect(Collectors.toList());
final HttpEntity entity = new UrlEncodedFormEntity(formParams, "UTF-8");
request.setEntity(entity);
}
}
return request;
}
/**
* Method to transform the response returned by the httpClient into the type specified. Note:
* Method need to handle case such as failed request. Also method is not supposed to pass the
* response object out via the returning value as the response will be closed after the execution
* steps out of the method context.
**/
protected abstract T parseResponse(HttpResponse response) throws IOException;
/**
* function to perform a Post http request.
*
* @param uri the URI of the request.
* @param params the form params to be posted, optional.
* @return the response object type of which is specified by user.
* @throws UnsupportedEncodingException, IOException
*/
public T httpPost(final URI uri, final List<Pair<String, String>> params) throws IOException {
// shortcut if the passed url is invalid.
if (null == uri) {
logger.error(" unable to perform httpPost as the passed uri is null.");
return null;
}
final HttpPost post = new HttpPost(uri);
return this.sendAndReturn(completeRequest(post, params));
}
/**
* function to dispatch the request and pass back the response.
*/
protected T sendAndReturn(final HttpUriRequest request) throws IOException {
try (CloseableHttpClient client = HttpClients.createDefault()) {
return this.parseResponse(client.execute(request));
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/SplitterOutputStream.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
public class SplitterOutputStream extends OutputStream {
List<OutputStream> outputs;
public SplitterOutputStream(final OutputStream... outputs) {
this.outputs = new ArrayList<>(outputs.length);
for (final OutputStream output : outputs) {
this.outputs.add(output);
}
}
@Override
public void write(final int b) throws IOException {
for (final OutputStream output : this.outputs) {
output.write(b);
}
}
@Override
public void write(final byte[] b) throws IOException {
for (final OutputStream output : this.outputs) {
output.write(b);
}
}
@Override
public void write(final byte[] b, final int off, final int len) throws IOException {
for (final OutputStream output : this.outputs) {
output.write(b, off, len);
}
}
@Override
public void flush() throws IOException {
IOException exception = null;
for (final OutputStream output : this.outputs) {
try {
output.flush();
} catch (final IOException e) {
exception = e;
}
}
if (exception != null) {
throw exception;
}
}
@Override
public void close() throws IOException {
IOException exception = null;
for (final OutputStream output : this.outputs) {
try {
output.close();
} catch (final IOException e) {
exception = e;
}
}
if (exception != null) {
throw exception;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/StdOutErrRedirect.java
|
/*
* Copyright 2016 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.io.OutputStream;
import java.io.PrintStream;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
/**
* A class to encapsulate the redirection of stdout and stderr to log4j This allows us to catch
* messages written to the console (although we should not be using System.out to write out).
*/
public class StdOutErrRedirect {
private static final Logger logger = Logger.getLogger(StdOutErrRedirect.class);
private static final PrintStream infoStream = createStream(System.out, Level.INFO);
private static final PrintStream errorStream = createStream(System.out, Level.ERROR);
public static void redirectOutAndErrToLog() {
System.setOut(infoStream);
System.setErr(errorStream);
}
private static PrintStream createStream(final PrintStream stream, final Level level) {
return new LogStream(stream, level);
}
private static class LogStream extends PrintStream {
private final Level level;
public LogStream(final OutputStream out, final Level level) {
super(out);
this.level = level;
}
// Underlying mechanism to log to log4j - all print methods will use this
private void write(final String string) {
logger.log(this.level, string);
}
// String
@Override
public void println(final String string) {
print(string);
}
@Override
public void print(final String string) {
write(string);
}
// Boolean
@Override
public void println(final boolean bool) {
print(bool);
}
@Override
public void print(final boolean bool) {
write(String.valueOf(bool));
}
// Int
@Override
public void println(final int i) {
print(i);
}
@Override
public void print(final int i) {
write(String.valueOf(i));
}
// Float
@Override
public void println(final float f) {
print(f);
}
@Override
public void print(final float f) {
write(String.valueOf(f));
}
// Char
@Override
public void println(final char c) {
print(c);
}
@Override
public void print(final char c) {
write(String.valueOf(c));
}
// Long
@Override
public void println(final long l) {
print(l);
}
@Override
public void print(final long l) {
write(String.valueOf(l));
}
// Double
@Override
public void println(final double d) {
print(d);
}
@Override
public void print(final double d) {
write(String.valueOf(d));
}
// Char []
@Override
public void println(final char[] c) {
print(c);
}
@Override
public void print(final char[] c) {
write(new String(c));
}
// Object
@Override
public void println(final Object o) {
print(o);
}
@Override
public void print(final Object o) {
write(o.toString());
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/StorageUtils.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import azkaban.spi.Dependency;
import org.apache.commons.codec.binary.Hex;
import static azkaban.utils.ThinArchiveUtils.convertIvyCoordinateToPath;
public class StorageUtils {
public static String getTargetProjectFilename(final int projectId, byte[] hash) {
return String.format("%s-%s.zip",
String.valueOf(projectId),
new String(Hex.encodeHex(hash))
);
}
public static String getTargetDependencyPath(final Dependency dep) {
// For simplicity, we will set the path to store dependencies the same as the in the URL used
// for fetching the dependencies. It will follow the pattern:
// samsa/samsa-api/0.6.0/samsa-api-0.6.0.jar
return convertIvyCoordinateToPath(dep);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/StringUtils.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.util.Collection;
import java.util.List;
import java.util.regex.Pattern;
public class StringUtils {
public static final char SINGLE_QUOTE = '\'';
public static final char DOUBLE_QUOTE = '\"';
private static final Pattern BROWSWER_PATTERN = Pattern
.compile(".*Gecko.*|.*AppleWebKit.*|.*Trident.*|.*Chrome.*");
public static String shellQuote(final String s, final char quoteCh) {
final StringBuffer buf = new StringBuffer(s.length() + 2);
buf.append(quoteCh);
for (int i = 0; i < s.length(); i++) {
final char ch = s.charAt(i);
if (ch == quoteCh) {
buf.append('\\');
}
buf.append(ch);
}
buf.append(quoteCh);
return buf.toString();
}
@Deprecated
public static String join(final List<String> list, final String delimiter) {
final StringBuffer buffer = new StringBuffer();
for (final String str : list) {
buffer.append(str);
buffer.append(delimiter);
}
return buffer.toString();
}
/**
* Use this when you don't want to include Apache Common's string for plugins.
*/
public static String join(final Collection<String> list, final String delimiter) {
final StringBuffer buffer = new StringBuffer();
for (final String str : list) {
buffer.append(str);
buffer.append(delimiter);
}
return buffer.toString();
}
/**
* Don't bother to add delimiter for last element
*
* @return String - elements in the list separated by delimiter
*/
public static String join2(final Collection<String> list, final String delimiter) {
final StringBuffer buffer = new StringBuffer();
boolean first = true;
for (final String str : list) {
if (!first) {
buffer.append(delimiter);
}
buffer.append(str);
first = false;
}
return buffer.toString();
}
public static boolean isFromBrowser(final String userAgent) {
if (userAgent == null) {
return false;
}
if (BROWSWER_PATTERN.matcher(userAgent).matches()) {
return true;
} else {
return false;
}
}
public static boolean isEmpty(final String value) {
if (value == null) {
return true;
}
return (value.trim().isEmpty());
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/SwapQueue.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
/**
* Queue that swaps its lists. Allows for non-blocking writes when reading. Swap should be called
* before every read.
*/
public class SwapQueue<T> implements Iterable<T> {
ArrayList<T> primaryQueue;
ArrayList<T> secondaryQueue;
public SwapQueue() {
this.primaryQueue = new ArrayList<>();
this.secondaryQueue = new ArrayList<>();
}
/**
* Swaps primaryQueue with secondary queue. The previous primary queue will be released.
*/
public synchronized void swap() {
this.primaryQueue = this.secondaryQueue;
this.secondaryQueue = new ArrayList<>();
}
/**
* Returns a count of the secondary queue.
*/
public synchronized int getSwapQueueSize() {
return this.secondaryQueue.size();
}
public synchronized int getPrimarySize() {
return this.primaryQueue.size();
}
public synchronized void addAll(final Collection<T> col) {
this.secondaryQueue.addAll(col);
}
/**
* Returns both the secondary and primary size
*/
public synchronized int getSize() {
return this.secondaryQueue.size() + this.primaryQueue.size();
}
public synchronized void add(final T element) {
this.secondaryQueue.add(element);
}
/**
* Returns iterator over the primary queue.
*/
@Override
public synchronized Iterator<T> iterator() {
return this.primaryQueue.iterator();
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/SystemMemoryInfo.java
|
package azkaban.utils;
import javax.inject.Inject;
import org.slf4j.LoggerFactory;
/**
* This class is used to maintain system memory information. Processes utilizing large amount of
* memory should consult this class to see if the system has enough memory to proceed the
* operation.
*
* Memory information is obtained from /proc/meminfo, so only Unix/Linux like system will support
* this class.
*
* All the memory size used in this function is in KB.
*/
public class SystemMemoryInfo {
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(SystemMemoryInfo.class);
private static final long LOW_MEM_THRESHOLD = 3L * 1024L * 1024L; //3 GB
private final OsMemoryUtil util;
@Inject
public SystemMemoryInfo(final OsMemoryUtil util) {
this.util = util;
}
/**
* @param xmx Xmx for the process
* @return true if the system can satisfy the memory request
*
* Given Xmx value (in kb) used by java process, determine if system can satisfy the memory
* request.
*/
public boolean canSystemGrantMemory(final long xmx) {
final long freeMemSize = this.util.getOsTotalFreeMemorySize();
if (freeMemSize == 0) {
// Fail open.
// On the platforms that don't support the mem info file, the returned size will be 0.
return true;
}
if (freeMemSize - xmx < LOW_MEM_THRESHOLD) {
logger.info(String.format(
"Free memory amount minus Xmx (%d - %d kb) is less than low mem threshold (%d kb), "
+ "memory request declined.",
freeMemSize, xmx, LOW_MEM_THRESHOLD));
return false;
}
return true;
}
/**
* @param memKb represents a memory value in kb
* @return true if available physical memory is greater than memKb
*
* Verifies if the currently available physical memory is greater than a given value.
*/
public boolean isFreePhysicalMemoryAbove(final long memKb) {
final long freeMemSize = this.util.getOsFreePhysicalMemorySize();
if (freeMemSize == 0) {
// Fail open.
// On the platforms that don't support the mem info file, the returned size will be 0.
return true;
}
return freeMemSize - memKb > 0;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/ThinArchiveUtils.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import azkaban.spi.Dependency;
import azkaban.spi.DependencyFile;
import azkaban.spi.Storage;
import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
public class ThinArchiveUtils {
// Root of folder in storage containing startup dependencies
public static final String DEPENDENCY_STORAGE_ROOT_PATH_PROP = "dependency.storage.path.prefix";
public static File getStartupDependenciesFile(final File projectFolder) {
return new File(projectFolder.getPath() + "/app-meta/startup-dependencies.json");
}
public static DependencyFile getDependencyFile(final File projectFolder, final Dependency d) {
return d.makeDependencyFile(new File(projectFolder, d.getDestination() + File.separator + d.getFileName()));
}
public static Set<Dependency> parseStartupDependencies(final String rawJson) throws IOException, InvalidHashException {
if (rawJson.isEmpty()) {
return new HashSet<>();
}
List<Map<String, String>> rawParseResult =
((HashMap<String, List<Map<String, String>>>) JSONUtils.parseJSONFromString(rawJson)).get("dependencies");
if (rawParseResult == null) {
throw new IOException("Could not find 'dependencies' key in startup-dependencies.json file.");
}
Set<Dependency> finalDependencies = new HashSet<>();
for (Map<String, String> rawDependency : rawParseResult) {
finalDependencies.add(new Dependency(rawDependency));
}
return finalDependencies;
}
public static Set<Dependency> parseStartupDependencies(final File f) throws IOException, InvalidHashException {
return parseStartupDependencies(FileUtils.readFileToString(f));
}
public static void writeStartupDependencies(final File f,
final Set<Dependency> dependencies) throws IOException {
Map<String, Set<Dependency>> outputFormat = new HashMap<>();
outputFormat.put("dependencies", dependencies);
FileUtils.writeStringToFile(f, JSONUtils.toJSON(outputFormat));
}
public static String convertIvyCoordinateToPath(final Dependency dep) {
String[] coordinateParts = dep.getIvyCoordinates().split(":");
return coordinateParts[0].replace(".", "/") + "/"
+ coordinateParts[1] + "/"
+ coordinateParts[2] + "/"
+ dep.getFileName();
}
/**
* Taking a string with comma seperated file paths of jars within a project folder, if the project has a
* startup-dependencies.json file (therefore is from a thin archive) each file path will be compared against the
* cached dependencies listed in startup-dependencies.json. If a match is found, the file path will be replaced
* with a hdfs:// path to the cached dependency. If a match is not found, the original local file path will be
* included in the returned comma separated list. IF the project does not have a startup-dependencies.json file
* (is not a thin archive) - the string of file paths passed in will be returned without modification.
*
* @param projectFolder root folder of uncompressed project
* @param localJarSpec string of comma separated file paths to jar dependencies within the project folder
* MUST BE RELATIVE PATHS!!!
* @param jobProps job properties
* @return list of file path strings and hdfs:// path strings, one for each dependency
*/
public static String replaceLocalPathsWithStoragePaths(final File projectFolder,
String localJarSpec, final Props jobProps, final Logger log) {
File startupDependenciesFile = getStartupDependenciesFile(projectFolder);
String baseDependencyPath = jobProps.get(DEPENDENCY_STORAGE_ROOT_PATH_PROP);
if (!startupDependenciesFile.exists() || baseDependencyPath == null) {
// This is not a thin archive OR we don't have a baseDependencyPath - so we can't do any replacing
log.debug("Skipping replacing dependency paths with common HDFS startup dependencies.");
return localJarSpec;
}
String[] localDependencies = localJarSpec.split(",");
try {
Set<Dependency> startupDeps = parseStartupDependencies(startupDependenciesFile);
Map<String, Dependency> pathToDep = new HashMap<>();
for (Dependency dep : startupDeps) {
pathToDep.put(getDependencyFile(projectFolder, dep).getFile().getCanonicalPath(), dep);
}
List<String> finalDependencies = new ArrayList<>();
for (String localDepPath : localDependencies) {
final String localDepCanonicalPath = new File(projectFolder, localDepPath).getCanonicalPath();
if (pathToDep.containsKey(localDepCanonicalPath)) {
// This dependency was listed in startup-dependencies.json so we can replace its local filepath
// with a storage path!
if (baseDependencyPath.endsWith("/")) {
baseDependencyPath = baseDependencyPath.substring(0, baseDependencyPath.length() - 1);
}
String pathToDependencyInStorage =
baseDependencyPath + "/" + convertIvyCoordinateToPath(pathToDep.get(localDepCanonicalPath));
finalDependencies.add(pathToDependencyInStorage);
} else {
// This dependency was not found in startup-dependencies.json so just keep it's original local filepath
// entry
finalDependencies.add(localDepPath);
}
}
String localAndRemoteJarSpec = String.join(",", finalDependencies);
log.debug("replaceLocalPathsWithStoragePaths: localAndRemoteJarSpec: " + localAndRemoteJarSpec);
return localAndRemoteJarSpec;
} catch (IOException | InvalidHashException e) {
// If something goes wrong, swallow the error and just return the original string.
log.warn("Error while opening and parsing startup dependencies file "
+ startupDependenciesFile.getAbsolutePath());
return localJarSpec;
}
}
public static void validateDependencyHash(final DependencyFile f)
throws HashNotMatchException {
validateDependencyHash(f.getFile(), f);
}
public static void validateDependencyHash(final File f, final Dependency d)
throws HashNotMatchException {
try {
final byte[] actualFileHash = HashUtils.SHA1.getHashBytes(f);
if (!HashUtils.isSameHash(d.getSHA1(), actualFileHash)) {
throw new HashNotMatchException(String.format("SHA1 Dependency hash check failed. File: %s Expected: %s Actual: %s",
d.getFileName(),
d.getSHA1(),
HashUtils.bytesHashToString(actualFileHash)));
}
} catch (DecoderException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/ThreadPoolExecutingListener.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
/**
* Interface for listener to get notified before and after a task has been executed.
*
* @author hluu
*/
public interface ThreadPoolExecutingListener {
public void beforeExecute(Runnable r);
public void afterExecute(Runnable r);
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/TrackingThreadPool.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Logger;
/**
* A simple subclass of {@link ThreadPoolExecutor} to keep track of in progress tasks as well as
* other interesting statistics.
*
* The content of this class is copied from article "Java theory and practice: Instrumenting
* applications with JMX"
*
* @author hluu
*/
public class TrackingThreadPool extends ThreadPoolExecutor {
private static final Logger logger = Logger.getLogger(TrackingThreadPool.class);
private final Map<Runnable, Boolean> inProgress =
new ConcurrentHashMap<>();
private final ThreadLocal<Long> startTime = new ThreadLocal<>();
private ThreadPoolExecutingListener executingListener =
new NoOpThreadPoolExecutingListener();
private long totalTime;
private int totalTasks;
public TrackingThreadPool(final int corePoolSize, final int maximumPoolSize,
final long keepAliveTime, final TimeUnit unit, final BlockingQueue<Runnable> workQueue,
final ThreadPoolExecutingListener listener) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue);
if (listener != null) {
this.executingListener = listener;
}
}
@Override
protected void beforeExecute(final Thread t, final Runnable r) {
try {
this.executingListener.beforeExecute(r);
} catch (final Throwable e) {
// to ensure the listener doesn't cause any issues
logger.warn("Listener threw exception", e);
}
super.beforeExecute(t, r);
this.inProgress.put(r, Boolean.TRUE);
this.startTime.set(Long.valueOf(System.currentTimeMillis()));
}
@Override
protected void afterExecute(final Runnable r, final Throwable t) {
final long time = System.currentTimeMillis() - this.startTime.get().longValue();
synchronized (this) {
this.totalTime += time;
++this.totalTasks;
}
this.inProgress.remove(r);
super.afterExecute(r, t);
try {
this.executingListener.afterExecute(r);
} catch (final Throwable e) {
// to ensure the listener doesn't cause any issues
logger.warn("Listener threw exception", e);
}
}
public Set<Runnable> getInProgressTasks() {
return Collections.unmodifiableSet(this.inProgress.keySet());
}
public synchronized int getTotalTasks() {
return this.totalTasks;
}
public synchronized double getAverageTaskTime() {
return (this.totalTasks == 0) ? 0 : this.totalTime / this.totalTasks;
}
private static class NoOpThreadPoolExecutingListener implements
ThreadPoolExecutingListener {
@Override
public void beforeExecute(final Runnable r) {
}
@Override
public void afterExecute(final Runnable r) {
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/Triple.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
/**
* Like pair, but with 3 values.
*/
public class Triple<F, S, T> {
private final F first;
private final S second;
private final T third;
public Triple(final F first, final S second, final T third) {
this.first = first;
this.second = second;
this.third = third;
}
public F getFirst() {
return this.first;
}
public S getSecond() {
return this.second;
}
public T getThird() {
return this.third;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.first == null) ? 0 : this.first.hashCode());
result = prime * result + ((this.second == null) ? 0 : this.second.hashCode());
result = prime * result + ((this.third == null) ? 0 : this.third.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final Triple other = (Triple) obj;
if (this.first == null) {
if (other.first != null) {
return false;
}
} else if (!this.first.equals(other.first)) {
return false;
}
if (this.second == null) {
if (other.second != null) {
return false;
}
} else if (!this.second.equals(other.second)) {
return false;
}
if (this.third == null) {
if (other.third != null) {
return false;
}
} else if (!this.third.equals(other.third)) {
return false;
}
return true;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/TypedMapWrapper.java
|
/*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public class TypedMapWrapper<K, V> {
private final Map<K, V> map;
public TypedMapWrapper(final Map<K, V> map) {
this.map = map;
}
public String getString(final K key) {
return getString(key, null);
}
public String getString(final K key, final String defaultVal) {
final Object obj = this.map.get(key);
if (obj == null) {
return defaultVal;
}
if (obj instanceof String) {
return (String) obj;
}
return obj.toString();
}
public Boolean getBool(final K key, final Boolean defaultVal) {
final Object obj = this.map.get(key);
if (obj == null) {
return defaultVal;
}
return (Boolean) obj;
}
public Integer getInt(final K key) {
return getInt(key, -1);
}
public Integer getInt(final K key, final Integer defaultVal) {
final Object obj = this.map.get(key);
if (obj == null) {
return defaultVal;
}
if (obj instanceof Integer) {
return (Integer) obj;
} else if (obj instanceof String) {
return Integer.valueOf((String) obj);
} else {
return defaultVal;
}
}
public Long getLong(final K key) {
return getLong(key, -1L);
}
public Long getLong(final K key, final Long defaultVal) {
final Object obj = this.map.get(key);
if (obj == null) {
return defaultVal;
}
if (obj instanceof Long) {
return (Long) obj;
} else if (obj instanceof Integer) {
return Long.valueOf((Integer) obj);
} else if (obj instanceof String) {
return Long.valueOf((String) obj);
} else {
return defaultVal;
}
}
public Double getDouble(final K key) {
return getDouble(key, -1.0d);
}
public Double getDouble(final K key, final Double defaultVal) {
final Object obj = this.map.get(key);
if (obj == null) {
return defaultVal;
}
if (obj instanceof Double) {
return (Double) obj;
} else if (obj instanceof String) {
return Double.valueOf((String) obj);
} else {
return defaultVal;
}
}
public Collection<String> getStringCollection(final K key) {
final Object obj = this.map.get(key);
return (Collection<String>) obj;
}
public Collection<String> getStringCollection(final K key,
final Collection<String> defaultVal) {
final Object obj = this.map.get(key);
if (obj == null) {
return defaultVal;
}
return (Collection<String>) obj;
}
public <C> Collection<C> getCollection(final K key) {
final Object obj = this.map.get(key);
if (obj instanceof Collection) {
return (Collection<C>) obj;
}
return null;
}
public <L> List<L> getList(final K key) {
final Object obj = this.map.get(key);
if (obj instanceof List) {
return (List<L>) obj;
}
return null;
}
public <L> List<L> getList(final K key, final List<L> defaultVal) {
final Object obj = this.map.get(key);
if (obj instanceof List) {
return (List<L>) obj;
}
return defaultVal;
}
public Object getObject(final K key) {
return this.map.get(key);
}
public Map<K, V> getMap() {
return this.map;
}
public <S, T> Map<S, T> getMap(final K key) {
return (Map<S, T>) this.map.get(key);
}
public boolean containsKey(final K key) {
return this.map.containsKey(key);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-common/3.90.0/azkaban/utils/ValidatorUtils.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.utils;
import azkaban.project.Project;
import azkaban.project.validator.ValidationReport;
import azkaban.project.validator.ValidatorManager;
import azkaban.project.validator.XmlValidatorManager;
import java.io.File;
import java.util.Map;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class ValidatorUtils {
private static final Logger logger = LoggerFactory.getLogger(ValidatorUtils.class);
private final ValidatorManager validatorManager;
@Inject
public ValidatorUtils(final Props prop) {
logger.info("Creating XmlValidatorManager instance (loading validators)...");
this.validatorManager = new XmlValidatorManager(prop);
logger.info("XmlValidatorManager instance created.");
}
public String getCacheKey(final Project project, final File folder, final Props props) {
return this.validatorManager.getCacheKey(project, folder, props);
}
public Map<String, ValidationReport> validateProject(final Project project, final File folder, final Props props) {
logger.info("Validating project " + project.getName()
+ " using the registered validators "
+ this.validatorManager.getValidatorsInfo().toString());
return this.validatorManager.validate(project, folder, props);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/dag/Dag.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.dag;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.VisibleForTesting;
import java.util.ArrayList;
import java.util.List;
/**
* A DAG (Directed acyclic graph) consists of {@link Node}s.
*
* <p>Most of the methods in this class should remain package private. Code outside of this
* package should mainly interact with the {@link DagService}.
*/
public class Dag {
private final String name;
private final DagProcessor dagProcessor;
private final List<Node> nodes = new ArrayList<>();
private Status status = Status.READY;
Dag(final String name, final DagProcessor dagProcessor) {
requireNonNull(name, "The name of the Dag can't be null");
this.name = name;
requireNonNull(dagProcessor, "The dagProcessor parameter can't be null.");
this.dagProcessor = dagProcessor;
}
/**
* Adds a node to the current dag.
*
* <p>It's important NOT to expose this method as public. The design relies on this to ensure
* correctness. The DAG's structure shouldn't change after it is created.
*
* <p>The DagBuilder will check that node names are unique within a dag. No check is necessary
* here since the method package private and where it is called is carefully controlled within
* a relatively small package.
* </p>
*
* @param node a node to add
*/
void addNode(final Node node) {
assert (node.getDag() == this);
this.nodes.add(node);
}
void start() {
assert (this.status == Status.READY);
changeStatus(Status.RUNNING);
for (final Node node : this.nodes) {
node.runIfAllowed();
}
// It's possible that all nodes are disabled. In this rare case the dag should be
// marked success. Otherwise it will be stuck in the the running state.
updateDagStatus();
}
void kill() {
if (this.status.isTerminal() || this.status == Status.KILLING) {
// It is possible that a kill is issued after a dag has finished or multiple kill requests
// are received. Without this check, this method will make duplicate calls to the
// DagProcessor.
return;
}
changeStatus(Status.KILLING);
for (final Node node : this.nodes) {
node.kill();
}
updateDagStatus();
}
/**
* Update the final dag status when all nodes are done.
*
* <p>If any node has not reached its terminal state, this method will simply return.
*/
void updateDagStatus() {
// A dag may have nodes that are disabled. It's safer to scan all the nodes.
// Assume the overhead is minimal. If it is not the case, we can optimize later.
boolean failed = false;
for (final Node node : this.nodes) {
final Status nodeStatus = node.getStatus();
if (!nodeStatus.isTerminal()) {
return;
}
if (nodeStatus == Status.FAILURE) {
failed = true;
}
}
// Update the dag status only after all nodes have reached terminal states.
updateDagStatusInternal(failed);
}
/**
* Update the final dag status.
*
* @param failed true if any of the jobs has failed
*/
private void updateDagStatusInternal(final boolean failed) {
if (this.status == Status.KILLING) {
/*
It's possible that some nodes have failed when the dag is killed.
Since killing a dag signals an intent from an operator, it is more important to make
the dag status reflect the result of that explict intent. e.g. if the killing is a
result of handing a job failure, users more likely want to know that someone has taken
an action rather than that a job has failed. Operators can still see the individual job
status.
*/
changeStatus(Status.KILLED);
} else if (failed) {
changeStatus(Status.FAILURE);
} else {
changeStatus(Status.SUCCESS);
}
}
private void changeStatus(final Status status) {
this.status = status;
this.dagProcessor.changeStatus(this, status);
}
@Override
public String toString() {
return String.format("dag (%s), status (%s)", this.name, this.status);
}
String getName() {
return this.name;
}
Status getStatus() {
return this.status;
}
@VisibleForTesting
void setStatus(final Status status) {
this.status = status;
}
@VisibleForTesting
public List<Node> getNodes() {
return this.nodes;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/dag/DagBuilder.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.dag;
import static java.util.Objects.requireNonNull;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* A builder to build DAGs.
*
* <p>Use the {@link DagBuilder#createNode} method to create NodeBuilder instances. Call
* methods on NodeBuilder to add dependencies among them. Call the {@link DagBuilder#build()} method
* to build a Dag.
*/
public class DagBuilder {
private final Dag dag;
private final Map<String, Node> nameToNodeMap = new HashMap<>();
// The builder can only be used to build a DAG once to prevent modifying an existing DAG after it
// is built.
private boolean isBuilt = false;
/**
* A builder for building a DAG.
*
* @param name name of the DAG
* @param dagProcessor the associated DagProcessor
*/
public DagBuilder(final String name, final DagProcessor dagProcessor) {
requireNonNull(name, "The name of the DagBuilder can't be null");
requireNonNull(dagProcessor, "The dagProcessor of the DagBuilder can't be null");
this.dag = new Dag(name, dagProcessor);
}
/**
* Creates a new node and adds it to the DagBuilder.
*
* @param name name of the node
* @param nodeProcessor node processor associated with this node
* @return a new node
* @throws DagException if the name is not unique in the DAG.
*/
public Node createNode(final String name, final NodeProcessor nodeProcessor) {
checkIsBuilt();
if (this.nameToNodeMap.get(name) != null) {
throw new DagException(String.format("Node names in %s need to be unique. The name "
+ "(%s) already exists.", this, name));
}
final Node node = new Node(name, nodeProcessor, this.dag);
this.nameToNodeMap.put(name, node);
return node;
}
/**
* Throws an exception if the {@link DagBuilder#build()} method has been called.
*/
private void checkIsBuilt() {
if (this.isBuilt) {
final String msg = String
.format("The DAG (%s) is built already. Can't create new nodes.", this);
throw new DagException(msg);
}
}
/**
* Add a parent node to a child node. All the names should have been registered with this builder
* with the {@link DagBuilder#createNode(String, NodeProcessor)} call.
*
* @param childNodeName name of the child node
* @param parentNodeName name of the parent node
*/
public void addParentNode(final String childNodeName, final String parentNodeName) {
checkIsBuilt();
final Node child = this.nameToNodeMap.get(childNodeName);
if (child == null) {
throw new DagException(String.format("Unknown child node (%s). Did you create the node?",
childNodeName));
}
final Node parent = this.nameToNodeMap.get(parentNodeName);
if (parent == null) {
throw new DagException(
String.format("Unknown parent node (%s). Did you create the node?", parentNodeName));
}
child.addParent(parent);
}
/**
* Builds the dag.
*
* <p>Once this method is called, subsequent calls via NodeBuilder to modify the nodes's
* relationships in the dag will have no effect on the returned Dag object.
* </p>
*
* @return the Dag reflecting the current state of the DagBuilder
*/
public Dag build() {
checkIsBuilt();
checkCircularDependencies();
this.isBuilt = true;
return this.dag;
}
/**
* Checks if the builder contains nodes that form a circular dependency ring.
*
* <p>The depth first algorithm is described in this article
* <a href="https://en.wikipedia.org/wiki/Topological_sorting">https://en.wikipedia.org/wiki/Topological_sorting</a>
* </p>
*
* @throws DagException if true
*/
private void checkCircularDependencies() {
class CircularDependencyChecker {
// The nodes that need to be visited
private final Set<Node> toVisit = new HashSet<>(DagBuilder.this.nameToNodeMap.values());
// The nodes that have finished traversing all their parent nodes
private final Set<Node> finished = new HashSet<>();
// The nodes that are waiting for their parent nodes to finish visit.
private final Set<Node> ongoing = new HashSet<>();
// One sample of nodes that form a circular dependency
private final List<Node> sampleCircularNodes = new ArrayList<>();
/**
* Checks if the builder contains nodes that form a circular dependency ring.
*
* @throws DagException if true
*/
private void check() {
while (!this.toVisit.isEmpty()) {
final Node node = removeOneNodeFromToVisitSet();
if (checkNode(node)) {
final String msg = String.format("Circular dependency detected. Sample: %s",
this.sampleCircularNodes);
throw new DagException(msg);
}
}
}
/**
* Removes one node from the toVisit set and returns that node.
*
* @return a node
*/
private Node removeOneNodeFromToVisitSet() {
final Iterator<Node> iterator = this.toVisit.iterator();
final Node node = iterator.next();
iterator.remove();
return node;
}
/**
* Checks if the node is part of a group of nodes that form a circular dependency ring.
*
* <p>If true, the node will be added to the sampleCircularNodes list</p>
*
* @param node node to check
* @return true if it is
*/
private boolean checkNode(final Node node) {
if (this.finished.contains(node)) {
return false;
}
if (this.ongoing.contains(node)) {
this.sampleCircularNodes.add(node);
return true;
}
this.toVisit.remove(node);
this.ongoing.add(node);
for (final Node parent : node.getParents()) {
if (checkNode(parent)) {
this.sampleCircularNodes.add(node);
return true;
}
}
this.ongoing.remove(node);
this.finished.add(node);
return false;
}
}
final CircularDependencyChecker checker = new CircularDependencyChecker();
checker.check();
}
@Override
public String toString() {
return String.format("DagBuilder (%s)", this.dag.getName());
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/dag/DagException.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.dag;
public class DagException extends RuntimeException {
public DagException(final String message) {
super(message);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/dag/DagProcessor.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.dag;
public interface DagProcessor {
/**
* Changes the status of the dag.
*
* @param dag the dag to change
* @param status the new status
*/
void changeStatus(Dag dag, Status status);
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/dag/DagService.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.dag;
import azkaban.utils.ExecutorServiceUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.time.Duration;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Thread safe and non blocking service for DAG processing.
*
* <p>Allow external inputs to be given to a dag or node to allow the dag to transition states
* . Since only one thread is used to progress the DAG, thread synchronization is avoided.
*/
@SuppressWarnings("FutureReturnValueIgnored")
@Singleton
public class DagService {
private static final Duration SHUTDOWN_WAIT_TIMEOUT = Duration.ofSeconds(10);
private static final Logger logger = LoggerFactory.getLogger(DagService.class);
private final ExecutorServiceUtils executorServiceUtils;
private final ExecutorService executorService;
@Inject
public DagService(final ExecutorServiceUtils executorServiceUtils) {
// Give the thread a name to make debugging easier.
this.executorServiceUtils = executorServiceUtils;
final ThreadFactory namedThreadFactory = new ThreadFactoryBuilder()
.setNameFormat("Dag-service").build();
this.executorService = Executors.newSingleThreadExecutor(namedThreadFactory);
}
public void startDag(final Dag dag) {
this.executorService.submit(dag::start);
}
/**
* Transitions the node to the success state.
*/
public void markNodeSuccess(final Node node) {
this.executorService.submit(node::markSuccess);
}
/**
* Transitions the node from the killing state to the killed state.
*/
public void markNodeKilled(final Node node) {
this.executorService.submit(node::markKilled);
}
/**
* Transitions the node to the failure state.
*/
public void markNodeFailed(final Node node) {
this.executorService.submit(node::markFailed);
}
/**
* Kills a DAG.
*/
public void killDag(final Dag dag) {
this.executorService.submit(dag::kill);
}
/**
* Shuts down the service and waits for the tasks to finish.
*/
public void shutdownAndAwaitTermination() throws InterruptedException {
logger.info("DagService is shutting down.");
this.executorServiceUtils.gracefulShutdown(this.executorService, SHUTDOWN_WAIT_TIMEOUT);
}
@VisibleForTesting
ExecutorService getExecutorService() {
return this.executorService;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/dag/Node.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.dag;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.VisibleForTesting;
import java.util.ArrayList;
import java.util.List;
/**
* Node in a DAG: Directed acyclic graph.
*/
public class Node {
private final String name;
private final NodeProcessor nodeProcessor;
// The nodes that this node depends on.
private final List<Node> parents = new ArrayList<>();
// The nodes that depend on this node.
private final List<Node> children = new ArrayList<>();
private Status status = Status.READY;
private final Dag dag;
Node(final String name, final NodeProcessor nodeProcessor, final Dag dag) {
requireNonNull(nodeProcessor, "The nodeProcessor parameter can't be null.");
this.nodeProcessor = nodeProcessor;
requireNonNull(name, "The name of the node can't be null");
this.name = name;
requireNonNull(dag, "The dag of the node can't be null");
this.dag = dag;
dag.addNode(this);
}
Dag getDag() {
return this.dag;
}
/**
* Adds the node as the current node's parent i.e. the current node depends on the given node.
*
* <p>It's important NOT to expose this method as public. The design relies on this to ensure
* correctness. The DAG's structure shouldn't change after it is created.
*/
void addParent(final Node node) {
this.parents.add(node);
node.addChild(this);
}
private void addChild(final Node node) {
this.children.add(node);
}
boolean hasParent() {
return !this.parents.isEmpty();
}
/**
* Checks if the node is ready to run.
*
* @return true if the node is ready to run
*/
private boolean isReady() {
if (this.status != Status.READY) {
// e.g. if the node is disabled, it is not ready to run.
return false;
}
for (final Node parent : this.parents) {
if (!parent.status.isSuccessEffectively()) {
return false;
}
}
return true;
}
/**
* Transitions the node to the success state.
*/
void markSuccess() {
// It's possible that the dag is killed before this method is called.
assertRunningOrKilling();
changeStatus(Status.SUCCESS);
for (final Node child : this.children) {
child.runIfAllowed();
}
this.dag.updateDagStatus();
}
/**
* Checks if all the dependencies are met and run if they are.
*/
void runIfAllowed() {
if (isReady()) {
changeStatus(Status.RUNNING);
}
}
/**
* Transitions the node to the failure state.
*/
void markFailed() {
// It's possible that the dag is killed before this method is called.
assertRunningOrKilling();
changeStatus(Status.FAILURE);
for (final Node child : this.children) {
child.cancel();
}
//todo: HappyRay support failure options "Finish Current Running" and "Cancel All"
this.dag.updateDagStatus();
}
private void cancel() {
// The node shouldn't have started.
assert (this.status.isPreRunState());
if (this.status != Status.DISABLED) {
changeStatus(Status.CANCELED);
}
for (final Node node : this.children) {
node.cancel();
}
}
/**
* Asserts that the state is running or killing.
*/
private void assertRunningOrKilling() {
assert (this.status == Status.RUNNING || this.status == Status.KILLING);
}
private void changeStatus(final Status status) {
this.status = status;
this.nodeProcessor.changeStatus(this, this.status);
}
/**
* Kills a node.
*
* <p>A node is not designed to be killed individually. This method expects {@link Dag#kill()}
* method to kill all nodes. Thus this method itself doesn't need to propagate the kill signal to
* the node's children nodes.
*/
void kill() {
assert (this.dag.getStatus() == Status.KILLING);
if (this.status == Status.READY || this.status == Status.BLOCKED) {
// If the node is disabled, keep the status as disabled.
changeStatus(Status.CANCELED);
} else if (this.status == Status.RUNNING) {
changeStatus(Status.KILLING);
}
// If the node has finished, leave the status intact.
}
/**
* Transition the node from the killing state to the killed state.
*/
void markKilled() {
assert (this.status == Status.KILLING);
changeStatus(Status.KILLED);
this.dag.updateDagStatus();
}
@Override
public String toString() {
return String.format("Node (%s) status (%s) in %s", this.name, this.status, this.dag);
}
Status getStatus() {
return this.status;
}
@VisibleForTesting
void setStatus(final Status status) {
this.status = status;
}
String getName() {
return this.name;
}
@VisibleForTesting
List<Node> getChildren() {
return this.children;
}
@VisibleForTesting
List<Node> getParents() {
return this.parents;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/dag/NodeProcessor.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.dag;
public interface NodeProcessor {
/**
* Changes the status of the node.
*
* <p>Typically a processor implementation should handle the RUNNING and KILLING status by
* starting or killing a unit of work and call the {@link DagService} to transition the node
* to the next status.
*
* <p>The call will be made in the context of the DagService's one and only thread. Thus a
* processor should limit the time it takes to process the call. For lengthy operations such as
* I/O operations, consider offloading them to other threads.
*
* @param node the node to change
* @param status the new status
*/
void changeStatus(Node node, Status status);
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/dag/Status.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.dag;
import com.google.common.collect.ImmutableSet;
public enum Status {
READY, // ready to run
DISABLED, // disabled by users. Treat as the node has the status of success
BLOCKED, // temporarily blocked. Need to be unblocked by another external event
RUNNING,
SUCCESS,
FAILURE,
// doesn't run because one of the nodes it depends on fails or is killed. Applies to a node only.
CANCELED,
KILLING, // in the process of killing a running job
KILLED; // explicitly killed by a user
// The states that will not transition to other states
static final ImmutableSet<Status> TERMINAL_STATES = ImmutableSet.of(DISABLED, SUCCESS, FAILURE,
CANCELED, KILLED);
public boolean isTerminal() {
return TERMINAL_STATES.contains(this);
}
// The states that are considered as success effectively
private static final ImmutableSet<Status> EFFECTIVE_SUCCESS_STATES = ImmutableSet.of(DISABLED,
SUCCESS);
boolean isSuccessEffectively() {
return EFFECTIVE_SUCCESS_STATES.contains(this);
}
// The states that are possible before a node ever starts to run or be killed or canceled
private static final ImmutableSet<Status> PRE_RUN_STATES = ImmutableSet
.of(DISABLED, READY, BLOCKED);
boolean isPreRunState() {
return PRE_RUN_STATES.contains(this);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/AzkabanExecServerModule.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.execapp;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_EVENT_REPORTING_CLASS_PARAM;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_EVENT_REPORTING_ENABLED;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.JdbcExecutorLoader;
import azkaban.spi.AzkabanEventReporter;
import azkaban.utils.Props;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.log4j.Logger;
/**
* This Guice module is currently a one place container for all bindings in the current module. This
* is intended to help during the migration process to Guice. Once this class starts growing we can
* move towards more modular structuring of Guice components.
*/
public class AzkabanExecServerModule extends AbstractModule {
private static final Logger logger = Logger.getLogger(AzkabanExecServerModule.class);
@Override
protected void configure() {
install(new ExecJettyServerModule());
bind(ExecutorLoader.class).to(JdbcExecutorLoader.class);
}
@Inject
@Provides
@Singleton
public AzkabanEventReporter createAzkabanEventReporter(final Props props) {
final boolean eventReporterEnabled =
props.getBoolean(AZKABAN_EVENT_REPORTING_ENABLED, false);
if (!eventReporterEnabled) {
logger.info("Event reporter is not enabled");
return null;
}
final Class<?> eventReporterClass =
props.getClass(AZKABAN_EVENT_REPORTING_CLASS_PARAM, null);
if (eventReporterClass != null && eventReporterClass.getConstructors().length > 0) {
this.logger.info("Loading event reporter class " + eventReporterClass.getName());
try {
final Constructor<?> eventReporterClassConstructor =
eventReporterClass.getConstructor(Props.class);
return (AzkabanEventReporter) eventReporterClassConstructor.newInstance(props);
} catch (final InvocationTargetException e) {
this.logger.error(e.getTargetException().getMessage());
if (e.getTargetException() instanceof IllegalArgumentException) {
throw new IllegalArgumentException(e);
} else {
throw new RuntimeException(e);
}
} catch (final Exception e) {
this.logger.error("Could not instantiate EventReporter " + eventReporterClass.getName());
throw new RuntimeException(e);
}
}
return null;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/AzkabanExecutorServer.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import static azkaban.Constants.ConfigurationKeys;
import static azkaban.Constants.DEFAULT_EXECUTOR_PORT_FILE;
import static azkaban.ServiceProvider.SERVICE_PROVIDER;
import static azkaban.execapp.ExecJettyServerModule.EXEC_JETTY_SERVER;
import static azkaban.execapp.ExecJettyServerModule.EXEC_ROOT_CONTEXT;
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;
import azkaban.AzkabanCommonModule;
import azkaban.Constants;
import azkaban.execapp.event.JobCallbackManager;
import azkaban.execapp.jmx.JmxFlowRampManager;
import azkaban.execapp.jmx.JmxFlowRunnerManager;
import azkaban.execapp.jmx.JmxJobMBeanManager;
import azkaban.execapp.metric.NumFailedFlowMetric;
import azkaban.execapp.metric.NumFailedJobMetric;
import azkaban.execapp.metric.NumQueuedFlowMetric;
import azkaban.execapp.metric.NumRunningFlowMetric;
import azkaban.execapp.metric.NumRunningJobMetric;
import azkaban.executor.Executor;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.jmx.JmxJettyServer;
import azkaban.metric.IMetricEmitter;
import azkaban.metric.MetricException;
import azkaban.metric.MetricReportManager;
import azkaban.metric.inmemoryemitter.InMemoryMetricEmitter;
import azkaban.metrics.MetricsManager;
import azkaban.server.AzkabanServer;
import azkaban.server.IMBeanRegistrable;
import azkaban.server.MBeanRegistrationManager;
import azkaban.utils.FileIOUtils;
import azkaban.utils.Props;
import azkaban.utils.StdOutErrRedirect;
import azkaban.utils.Utils;
import com.google.inject.Guice;
import com.google.inject.Injector;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.reflect.Constructor;
import java.net.InetAddress;
import java.nio.charset.StandardCharsets;
import java.nio.file.Paths;
import java.security.Permission;
import java.security.Policy;
import java.security.ProtectionDomain;
import java.time.Duration;
import java.util.TimeZone;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.joda.time.DateTimeZone;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
@Singleton
public class AzkabanExecutorServer implements IMBeanRegistrable {
public static final String JOBTYPE_PLUGIN_DIR = "azkaban.jobtype.plugin.dir";
public static final String RAMPPOLICY_PLUGIN_DIR = "azkaban.ramppolicy.plugin.dir";
public static final String METRIC_INTERVAL = "executor.metric.milisecinterval.";
private static final String CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY = "jmx.attribute.processor.class";
private static final Logger logger = Logger.getLogger(AzkabanExecutorServer.class);
private static final String DEFAULT_TIMEZONE_ID = "default.timezone.id";
private static AzkabanExecutorServer app;
private final MBeanRegistrationManager mbeanRegistrationManager = new MBeanRegistrationManager();
private final ExecutorLoader executionLoader;
private final FlowRunnerManager runnerManager;
private final FlowRampManager rampManager;
private final MetricsManager metricsManager;
private final Props props;
private final Server server;
private final Context root;
@Inject
public AzkabanExecutorServer(final Props props,
final ExecutorLoader executionLoader,
final FlowRunnerManager runnerManager,
final FlowRampManager rampManager,
final MetricsManager metricsManager,
@Named(EXEC_JETTY_SERVER) final Server server,
@Named(EXEC_ROOT_CONTEXT) final Context root) {
this.props = props;
this.executionLoader = executionLoader;
this.runnerManager = runnerManager;
this.rampManager = rampManager;
this.metricsManager = metricsManager;
this.server = server;
this.root = root;
}
/**
* Returns the currently executing executor server, if one exists.
*/
public static AzkabanExecutorServer getApp() {
return app;
}
/**
* Azkaban using Jetty
*/
public static void main(final String[] args) throws Exception {
// Redirect all std out and err messages into log4j
StdOutErrRedirect.redirectOutAndErrToLog();
logger.info("Starting Jetty Azkaban Executor...");
if (System.getSecurityManager() == null) {
Policy.setPolicy(new Policy() {
@Override
public boolean implies(final ProtectionDomain domain, final Permission permission) {
return true; // allow all
}
});
System.setSecurityManager(new SecurityManager());
}
final Props props = AzkabanServer.loadProps(args);
if (props == null) {
logger.error("Azkaban Properties not loaded.");
logger.error("Exiting Azkaban Executor Server...");
return;
}
/* Initialize Guice Injector */
final Injector injector = Guice.createInjector(
new AzkabanCommonModule(props),
new AzkabanExecServerModule()
);
SERVICE_PROVIDER.setInjector(injector);
launch(injector.getInstance(AzkabanExecutorServer.class));
}
public static void launch(final AzkabanExecutorServer azkabanExecutorServer) throws Exception {
azkabanExecutorServer.start();
setupTimeZone(azkabanExecutorServer.getAzkabanProps());
app = azkabanExecutorServer;
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
logTopMemoryConsumers();
} catch (final Exception e) {
AzkabanExecutorServer.logger.info(("Exception when logging top memory consumers"), e);
}
final String host = AzkabanExecutorServer.app.getHost();
final int port = AzkabanExecutorServer.app.getPort();
try {
AzkabanExecutorServer.logger.info(String
.format("Removing executor(host: %s, port: %s) entry from database...", host, port));
AzkabanExecutorServer.app.getExecutorLoader().removeExecutor(host, port);
} catch (final ExecutorManagerException ex) {
AzkabanExecutorServer.logger.error(
String.format("Exception when removing executor(host: %s, port: %s)", host, port),
ex);
}
AzkabanExecutorServer.logger.warn("Shutting down executor...");
try {
AzkabanExecutorServer.app.shutdownNow();
AzkabanExecutorServer.app.getFlowRunnerManager().deleteExecutionDirectory();
} catch (final Exception e) {
AzkabanExecutorServer.logger.error("Error while shutting down http server.", e);
}
}
public void logTopMemoryConsumers() throws Exception, IOException {
if (new File("/bin/bash").exists() && new File("/bin/ps").exists()
&& new File("/usr/bin/head").exists()) {
AzkabanExecutorServer.logger.info("logging top memory consumer");
final java.lang.ProcessBuilder processBuilder =
new java.lang.ProcessBuilder("/bin/bash", "-c",
"/bin/ps aux --sort -rss | /usr/bin/head");
final Process p = processBuilder.start();
p.waitFor();
final InputStream is = p.getInputStream();
final java.io.BufferedReader reader =
new java.io.BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8));
String line = null;
while ((line = reader.readLine()) != null) {
AzkabanExecutorServer.logger.info(line);
}
is.close();
}
}
});
}
private static void setupTimeZone(final Props azkabanSettings) {
if (azkabanSettings.containsKey(DEFAULT_TIMEZONE_ID)) {
final String timezoneId = azkabanSettings.getString(DEFAULT_TIMEZONE_ID);
System.setProperty("user.timezone", timezoneId);
TimeZone timeZone = TimeZone.getTimeZone(timezoneId);
TimeZone.setDefault(timeZone);
DateTimeZone.setDefault(DateTimeZone.forTimeZone(timeZone));
logger.info("Setting timezone to " + timezoneId);
}
}
private void start() throws Exception {
this.root.setAttribute(Constants.AZKABAN_SERVLET_CONTEXT_KEY, this);
JmxJobMBeanManager.getInstance().initialize(this.props);
// make sure this happens before
configureJobCallback(this.props);
configureMBeanServer();
configureMetricReports();
loadCustomJMXAttributeProcessor(this.props);
// Before starting, make FlowRunnerManager accept executions if active=true
initActive();
try {
this.server.start();
} catch (final Exception e) {
logger.error(e);
Utils.croak(e.getMessage(), 1);
}
insertExecutorEntryIntoDB();
dumpPortToFile();
logger.info("Started Executor Server on " + getExecutorHostPort());
if (this.props.getBoolean(ConfigurationKeys.IS_METRICS_ENABLED, false)) {
startReportingExecMetrics();
}
}
private void startReportingExecMetrics() {
logger.info("starting reporting Executor Metrics");
this.metricsManager.startReporting("AZ-EXEC", this.props);
}
private void initActive() throws ExecutorManagerException {
final Executor executor;
final int port = this.props.getInt(ConfigurationKeys.EXECUTOR_PORT, -1);
if (port != -1) {
final String host = requireNonNull(getHost());
// Check if this executor exists previously in the DB
try {
executor = this.executionLoader.fetchExecutor(host, port);
} catch (final ExecutorManagerException e) {
logger.error("Error fetching executor entry from DB", e);
throw e;
}
if (executor == null) {
logger.info("This executor wasn't found in the DB. Setting active=false.");
getFlowRunnerManager().setActiveInternal(false);
} else {
logger.info("This executor is already in the DB. Found active=" + executor.isActive());
getFlowRunnerManager().setActiveInternal(executor.isActive());
}
} else {
// In case of "pick any free port" executor can't be activated based on the value in DB like above, because port
// is only available after the jetty server has started.
logger.info(ConfigurationKeys.EXECUTOR_PORT
+ " wasn't set - free port will be picked automatically. Executor " +
"is started with active=false and must be activated separately.");
}
}
private void insertExecutorEntryIntoDB() throws ExecutorManagerException {
try {
final String host = requireNonNull(getHost());
final int port = getPort();
checkState(port != -1);
final Executor executor = this.executionLoader.fetchExecutor(host, port);
if (executor == null) {
logger.info("This executor wasn't found in the DB. Adding self.");
this.executionLoader.addExecutor(host, port);
} else {
logger.info("This executor is already in the DB. Found: " + executor);
}
// If executor already exists, ignore it
} catch (final ExecutorManagerException e) {
logger.error("Error inserting executor entry into DB", e);
throw e;
}
}
private void dumpPortToFile() throws IOException {
// By default this should write to the working directory
final String portFileName = this.props
.getString(ConfigurationKeys.EXECUTOR_PORT_FILE, DEFAULT_EXECUTOR_PORT_FILE);
FileIOUtils.dumpNumberToFile(Paths.get(portFileName), getPort());
}
private void configureJobCallback(final Props props) {
final boolean jobCallbackEnabled =
props.getBoolean("azkaban.executor.jobcallback.enabled", true);
logger.info("Job callback enabled? " + jobCallbackEnabled);
if (jobCallbackEnabled) {
JobCallbackManager.initialize(props);
}
}
/**
* Configure Metric Reporting as per azkaban.properties settings
*/
private void configureMetricReports() throws MetricException {
final Props props = getAzkabanProps();
if (props != null && props.getBoolean("executor.metric.reports", false)) {
logger.info("Starting to configure Metric Reports");
final MetricReportManager metricManager = MetricReportManager.getInstance();
final IMetricEmitter metricEmitter = new InMemoryMetricEmitter(props);
metricManager.addMetricEmitter(metricEmitter);
logger.info("Adding number of failed flow metric");
metricManager.addMetric(new NumFailedFlowMetric(metricManager, props
.getInt(METRIC_INTERVAL
+ NumFailedFlowMetric.NUM_FAILED_FLOW_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Adding number of failed jobs metric");
metricManager.addMetric(new NumFailedJobMetric(metricManager, props
.getInt(METRIC_INTERVAL
+ NumFailedJobMetric.NUM_FAILED_JOB_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Adding number of running Jobs metric");
metricManager.addMetric(new NumRunningJobMetric(metricManager, props
.getInt(METRIC_INTERVAL
+ NumRunningJobMetric.NUM_RUNNING_JOB_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Adding number of running flows metric");
metricManager.addMetric(new NumRunningFlowMetric(this.runnerManager,
metricManager, props.getInt(METRIC_INTERVAL
+ NumRunningFlowMetric.NUM_RUNNING_FLOW_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Adding number of queued flows metric");
metricManager.addMetric(new NumQueuedFlowMetric(this.runnerManager,
metricManager, props.getInt(METRIC_INTERVAL
+ NumQueuedFlowMetric.NUM_QUEUED_FLOW_METRIC_NAME,
props.getInt(METRIC_INTERVAL + "default"))));
logger.info("Completed configuring Metric Reports");
}
}
/**
* Load a custom class, which is provided by a configuration CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY.
* <p>
* This method will try to instantiate an instance of this custom class and with given properties
* as the argument in the constructor.
* <p>
* Basically the custom class must have a constructor that takes an argument with type
* Properties.
*/
private void loadCustomJMXAttributeProcessor(final Props props) {
final String jmxAttributeEmitter =
props.get(CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY);
if (jmxAttributeEmitter != null) {
try {
logger.info("jmxAttributeEmitter: " + jmxAttributeEmitter);
final Constructor<Props>[] constructors =
(Constructor<Props>[]) Class.forName(jmxAttributeEmitter).getConstructors();
constructors[0].newInstance(props.toProperties());
} catch (final Exception e) {
logger.error("Encountered error while loading and instantiating "
+ jmxAttributeEmitter, e);
throw new IllegalStateException(
"Encountered error while loading and instantiating "
+ jmxAttributeEmitter, e);
}
} else {
logger.info("No value for property: "
+ CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY + " was found");
}
}
public ExecutorLoader getExecutorLoader() {
return this.executionLoader;
}
/**
* Returns the global azkaban properties
*/
public Props getAzkabanProps() {
return this.props;
}
public FlowRunnerManager getFlowRunnerManager() {
return this.runnerManager;
}
public FlowRampManager getFlowRampManager() {
return this.rampManager;
}
/**
* Get the hostname
*
* @return hostname
*/
public String getHost() {
if (this.props.containsKey(ConfigurationKeys.AZKABAN_SERVER_HOST_NAME)) {
final String hostName = this.props
.getString(Constants.ConfigurationKeys.AZKABAN_SERVER_HOST_NAME);
if (!StringUtils.isEmpty(hostName)) {
return hostName;
}
}
String host = "unkownHost";
try {
host = InetAddress.getLocalHost().getCanonicalHostName();
} catch (final Exception e) {
logger.error("Failed to fetch LocalHostName");
}
return host;
}
/**
* Get the current server port
*
* @return the port at which the executor server is running
*/
public int getPort() {
final Connector[] connectors = this.server.getConnectors();
checkState(connectors.length >= 1, "Server must have at least 1 connector");
// The first connector is created upon initializing the server. That's the one that has the port.
return connectors[0].getLocalPort();
}
/**
* Returns host:port combination for currently running executor
*/
public String getExecutorHostPort() {
return getHost() + ":" + getPort();
}
private void sleep(final Duration duration) {
try {
Thread.sleep(duration.toMillis());
} catch (final InterruptedException e) {
logger.error(e);
}
}
/**
* Shutdown the server. - performs a safe shutdown. Waits for completion of current tasks - spawns
* a shutdown thread and returns immediately.
*/
public void shutdown() {
logger.warn("Shutting down AzkabanExecutorServer...");
new Thread(() -> {
// Hack: Sleep for a little time to allow API calls to complete
sleep(Duration.ofSeconds(2));
shutdownInternal();
}, "shutdown").start();
}
/**
* (internal API) Note: This should be run in a separate thread.
* <p>
* Shutdown the server. (blocking call) - waits for jobs to finish - doesn't accept any new jobs
*/
private void shutdownInternal() {
getFlowRampManager().shutdown();
getFlowRunnerManager().shutdown();
// Sleep for an hour to wait for web server updater thread
// {@link azkaban.executor.RunningExecutionsUpdaterThread#updateExecutions} to finalize updating
sleep(Duration.ofHours(1));
// trigger shutdown hook
System.exit(0);
}
/**
* Shutdown the server now! (unsafe)
*/
public void shutdownNow() throws Exception {
this.server.stop();
this.server.destroy();
getFlowRampManager().shutdownNow();
getFlowRunnerManager().shutdownNow();
this.mbeanRegistrationManager.closeMBeans();
}
@Override
public void configureMBeanServer() {
logger.info("Registering MBeans...");
this.mbeanRegistrationManager.registerMBean("executorJetty", new JmxJettyServer(this.server));
this.mbeanRegistrationManager.registerMBean("flowRunnerManager", new JmxFlowRunnerManager(this.runnerManager));
this.mbeanRegistrationManager.registerMBean("flowRampManager", new JmxFlowRampManager(this.rampManager));
this.mbeanRegistrationManager.registerMBean("jobJMXMBean", JmxJobMBeanManager.getInstance());
if (JobCallbackManager.isInitialized()) {
final JobCallbackManager jobCallbackMgr = JobCallbackManager.getInstance();
this.mbeanRegistrationManager
.registerMBean("jobCallbackJMXMBean", jobCallbackMgr.getJmxJobCallbackMBean());
}
}
@Override
public MBeanRegistrationManager getMBeanRegistrationManager() {
return this.mbeanRegistrationManager;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/ConditionalWorkflowUtils.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import static azkaban.flow.ConditionOnJobStatus.ALL_FAILED;
import static azkaban.flow.ConditionOnJobStatus.ALL_SUCCESS;
import static azkaban.flow.ConditionOnJobStatus.ONE_FAILED;
import static azkaban.flow.ConditionOnJobStatus.ONE_SUCCESS;
import azkaban.executor.ExecutableNode;
import azkaban.executor.Status;
import azkaban.flow.ConditionOnJobStatus;
public class ConditionalWorkflowUtils {
public static final String SATISFIED = "satisfied";
public static final String PENDING = "pending";
public static final String FAILED = "failed";
public static String checkConditionOnJobStatus(final ExecutableNode node) {
final ConditionOnJobStatus conditionOnJobStatus = node.getConditionOnJobStatus();
switch (conditionOnJobStatus) {
case ALL_SUCCESS:
case ALL_FAILED:
case ALL_DONE:
return checkAllStatus(node, conditionOnJobStatus);
case ONE_FAILED:
case ONE_SUCCESS:
return checkOneStatus(node, conditionOnJobStatus);
default:
return checkAllStatus(node, ALL_SUCCESS);
}
}
private static String checkAllStatus(final ExecutableNode node, final ConditionOnJobStatus
condition) {
String result = SATISFIED;
for (final String dependency : node.getInNodes()) {
final ExecutableNode dependencyNode = node.getParentFlow().getExecutableNode(dependency);
final Status depStatus = dependencyNode.getStatus();
if (!Status.isStatusFinished(depStatus)) {
return PENDING;
} else if ((condition.equals(ALL_SUCCESS) && Status.isStatusFailed(depStatus)) ||
(condition.equals(ALL_FAILED) && Status.isStatusSucceeded(depStatus))) {
result = FAILED;
}
}
return result;
}
private static String checkOneStatus(final ExecutableNode node, final ConditionOnJobStatus
condition) {
String result = FAILED;
for (final String dependency : node.getInNodes()) {
final ExecutableNode dependencyNode = node.getParentFlow().getExecutableNode(dependency);
final Status depStatus = dependencyNode.getStatus();
if (!Status.isStatusFinished(depStatus)) {
return PENDING;
} else if ((condition.equals(ONE_SUCCESS) && Status.isStatusSucceeded(depStatus)) ||
(condition.equals(ONE_FAILED) && Status.isStatusFailed(depStatus))) {
result = SATISFIED;
}
}
return result;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/ExecJettyServerModule.java
|
package azkaban.execapp;
import azkaban.Constants.ConfigurationKeys;
import azkaban.utils.Props;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import javax.inject.Named;
import javax.inject.Singleton;
import org.apache.log4j.Logger;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.thread.QueuedThreadPool;
public class ExecJettyServerModule extends AbstractModule {
public static final String EXEC_JETTY_SERVER = "ExecServer";
public static final String EXEC_ROOT_CONTEXT = "root";
private static final int DEFAULT_THREAD_NUMBER = 50;
private static final int DEFAULT_HEADER_BUFFER_SIZE = 4096;
private static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024;
private static final Logger logger = Logger.getLogger(ExecJettyServerModule.class);
@Override
protected void configure() {
}
@Provides
@Named(EXEC_JETTY_SERVER)
@Singleton
private Server createJettyServer(final Props props) {
final int maxThreads = props.getInt("executor.maxThreads", DEFAULT_THREAD_NUMBER);
/*
* Default to a port number 0 (zero)
* The Jetty server automatically finds an unused port when the port number is set to zero
* TODO: This is using a highly outdated version of jetty [year 2010]. needs to be updated.
*/
final Server server = new Server(props.getInt(ConfigurationKeys.EXECUTOR_PORT, 0));
final QueuedThreadPool httpThreadPool = new QueuedThreadPool(maxThreads);
server.setThreadPool(httpThreadPool);
final boolean isStatsOn = props.getBoolean("executor.connector.stats", true);
logger.info("Setting up connector with stats on: " + isStatsOn);
for (final Connector connector : server.getConnectors()) {
connector.setStatsOn(isStatsOn);
logger.info(String.format(
"Jetty connector name: %s, default header buffer size: %d",
connector.getName(), connector.getHeaderBufferSize()));
connector.setHeaderBufferSize(props.getInt("jetty.headerBufferSize",
DEFAULT_HEADER_BUFFER_SIZE));
logger.info(String.format(
"Jetty connector name: %s, (if) new header buffer size: %d",
connector.getName(), connector.getHeaderBufferSize()));
}
return server;
}
@Provides
@Named(EXEC_ROOT_CONTEXT)
@Singleton
private Context createRootContext(@Named(EXEC_JETTY_SERVER) final Server server) {
final Context root = new Context(server, "/", Context.SESSIONS);
root.setMaxFormContentSize(MAX_FORM_CONTENT_SIZE);
root.addServlet(new ServletHolder(new ExecutorServlet()), "/executor");
root.addServlet(new ServletHolder(new JMXHttpServlet()), "/jmx");
root.addServlet(new ServletHolder(new StatsServlet()), "/stats");
root.addServlet(new ServletHolder(new ServerStatisticsServlet()), "/serverStatistics");
return root;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/ExecMetrics.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.execapp.metric.ProjectCacheHitRatio;
import azkaban.metrics.MetricsManager;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* This class ExecMetrics is in charge of collecting metrics from executors.
*/
@Singleton
public class ExecMetrics {
public static final String NUM_RUNNING_FLOWS_NAME = "EXEC-NumRunningFlows";
public static final String NUM_QUEUED_FLOWS_NAME = "EXEC-NumQueuedFlows";
public static final String PROJECT_DIR_CACHE_HIT_RATIO_NAME = "project-dir-cache-hit-ratio";
public static final String FLOW_SETUP_TIMER_NAME = "flow-setup-timer";
public static final String FLOW_KILLING_COUNTER_NAME = "flow-killing-counter";
public static final String FLOW_TIME_TO_KILL_HISTOGRAM_NAME = "flow-time-to-kill-histogram";
public static final String FLOW_KILLED_METER_NAME = "flow-killed-meter";
public static final String FLOW_SUCCESS_METER_NAME = "flow-success-meter";
public static final String JOB_FAIL_METER_NAME = "job-fail-meter";
public static final String JOB_SUCCESS_METER_NAME = "job-success-meter";
public static final String JOB_KILLED_METER_NAME = "job-killed-meter";
private final MetricsManager metricsManager;
private Timer flowSetupTimer;
private final ProjectCacheHitRatio projectCacheHitRatio;
private Counter flowKillingCounter;
private Histogram flowTimeToKillHistogram;
private Meter flowKilledMeter;
private Meter flowSuccessMeter;
private Meter jobFailMeter;
private Meter jobSuccessMeter;
private Meter jobKilledMeter;
// TODO ypadron-in: add metrics to measure the time between flow submission and flow execution
// preparation/start after clock skew issues in execution times are resolved.
@Inject
ExecMetrics(final MetricsManager metricsManager) {
this.metricsManager = metricsManager;
// setup project cache ratio metrics
this.projectCacheHitRatio = new ProjectCacheHitRatio();
this.metricsManager.addGauge(PROJECT_DIR_CACHE_HIT_RATIO_NAME,
this.projectCacheHitRatio::getValue);
this.flowSetupTimer = this.metricsManager.addTimer(FLOW_SETUP_TIMER_NAME);
this.flowKillingCounter = this.metricsManager.addCounter(FLOW_KILLING_COUNTER_NAME);
this.flowTimeToKillHistogram =
this.metricsManager.addHistogram(FLOW_TIME_TO_KILL_HISTOGRAM_NAME);
this.flowKilledMeter = this.metricsManager.addMeter(FLOW_KILLED_METER_NAME);
this.flowSuccessMeter = this.metricsManager.addMeter(FLOW_SUCCESS_METER_NAME);
this.jobFailMeter = this.metricsManager.addMeter(JOB_FAIL_METER_NAME);
this.jobSuccessMeter = this.metricsManager.addMeter(JOB_SUCCESS_METER_NAME);
this.jobKilledMeter = this.metricsManager.addMeter(JOB_KILLED_METER_NAME);
}
ProjectCacheHitRatio getProjectCacheHitRatio() {
return this.projectCacheHitRatio;
}
public void addFlowRunnerManagerMetrics(final FlowRunnerManager flowRunnerManager) {
this.metricsManager
.addGauge(NUM_RUNNING_FLOWS_NAME, flowRunnerManager::getNumRunningFlows);
this.metricsManager
.addGauge(NUM_QUEUED_FLOWS_NAME, flowRunnerManager::getNumQueuedFlows);
}
/**
* @return the {@link Timer.Context} for the timer.
*/
public Timer.Context getFlowSetupTimerContext() {
return this.flowSetupTimer.time();
}
/**
* Increment the number of flow executions in killing status.
*/
public void incrementFlowKillingCount() {
this.flowKillingCounter.inc();
}
/**
* Decrement the number of flow executions in killing status.
*/
public void decrementFlowKillingCount() {
this.flowKillingCounter.dec();
}
/**
* Add the time it took to kill all the jobs in an execution.
*
* @param time killing-to-killed time for a flow
*/
public void addFlowTimeToKill(final long time) {
this.flowTimeToKillHistogram.update(time);
}
/**
* Record a killed flow execution event.
*/
public void markFlowKilled() { this.flowKilledMeter.mark(); }
/**
* Record a successful flow execution event.
*/
public void markFlowSuccess() { this.flowSuccessMeter.mark(); }
/**
* Record a failed job execution event.
*/
public void markJobFail() { this.jobFailMeter.mark(); }
/**
* Record a successful job execution event.
*/
public void markJobSuccess() { this.jobSuccessMeter.mark(); }
/**
* Record a killed job execution event.
*/
public void markJobKilled() { this.jobKilledMeter.mark(); }
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/ExecutorServlet.java
|
/*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import static java.util.Objects.requireNonNull;
import azkaban.Constants;
import azkaban.executor.ConnectorParams;
import azkaban.executor.ExecutableFlowBase;
import azkaban.executor.Executor;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.utils.FileIOUtils.JobMetaData;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.JSONUtils;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
import org.codehaus.jackson.map.ObjectMapper;
public class ExecutorServlet extends HttpServlet implements ConnectorParams {
public static final String JSON_MIME_TYPE = "application/json";
private static final Logger logger = Logger.getLogger(ExecutorServlet.class
.getName());
private static final long serialVersionUID = -3528600004096666451L;
private AzkabanExecutorServer application;
private FlowRunnerManager flowRunnerManager;
public ExecutorServlet() {
super();
}
@Override
public void init(final ServletConfig config) {
this.application =
(AzkabanExecutorServer) config.getServletContext().getAttribute(
Constants.AZKABAN_SERVLET_CONTEXT_KEY);
if (this.application == null) {
throw new IllegalStateException(
"No batch application is defined in the servlet context!");
}
this.flowRunnerManager = this.application.getFlowRunnerManager();
}
protected void writeJSON(final HttpServletResponse resp, final Object obj)
throws IOException {
resp.setContentType(JSON_MIME_TYPE);
final ObjectMapper mapper = new ObjectMapper();
final OutputStream stream = resp.getOutputStream();
mapper.writeValue(stream, obj);
}
/**
* @deprecated GET available for seamless upgrade. azkaban-web now uses POST.
*/
@Deprecated
@Override
public void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws IOException {
handleRequest(req, resp);
}
@Override
public void doPost(final HttpServletRequest req, final HttpServletResponse resp)
throws IOException {
handleRequest(req, resp);
}
public void handleRequest(final HttpServletRequest req, final HttpServletResponse resp)
throws IOException {
final HashMap<String, Object> respMap = new HashMap<>();
try {
if (!hasParam(req, ConnectorParams.ACTION_PARAM)) {
logger.error("Parameter action not set");
respMap.put("error", "Parameter action not set");
} else {
final String action = getParam(req, ConnectorParams.ACTION_PARAM);
if (action.equals(ConnectorParams.UPDATE_ACTION)) {
handleAjaxUpdateRequest(req, respMap);
} else if (action.equals(ConnectorParams.PING_ACTION)) {
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_ALIVE);
} else if (action.equals(ConnectorParams.RELOAD_JOBTYPE_PLUGINS_ACTION)) {
logger.info("Reloading Jobtype plugins");
handleReloadJobTypePlugins(respMap);
} else if (action.equals(ConnectorParams.ACTIVATE)) {
logger.warn("Setting ACTIVE flag to true");
setActive(true, respMap);
} else if (action.equals(ConnectorParams.GET_STATUS)) {
logger.debug("Get Executor Status: ");
getStatus(respMap);
} else if (action.equals(ConnectorParams.DEACTIVATE)) {
logger.warn("Setting ACTIVE flag to false");
setActive(false, respMap);
} else if (action.equals(ConnectorParams.SHUTDOWN)) {
shutdown(respMap);
} else {
final int execid = Integer.parseInt(getParam(req, ConnectorParams.EXECID_PARAM));
final String user = getParam(req, ConnectorParams.USER_PARAM, null);
logger.info("User " + user + " has called action " + action + " on "
+ execid);
if (action.equals(ConnectorParams.METADATA_ACTION)) {
handleFetchMetaDataEvent(execid, req, resp, respMap);
} else if (action.equals(ConnectorParams.LOG_ACTION)) {
handleFetchLogEvent(execid, req, resp, respMap);
} else if (action.equals(ConnectorParams.ATTACHMENTS_ACTION)) {
handleFetchAttachmentsEvent(execid, req, resp, respMap);
} else if (action.equals(ConnectorParams.EXECUTE_ACTION)) {
handleAjaxExecute(req, respMap, execid);
} else if (action.equals(ConnectorParams.STATUS_ACTION)) {
handleAjaxFlowStatus(respMap, execid);
} else if (action.equals(ConnectorParams.CANCEL_ACTION)) {
logger.info("Cancel called.");
handleAjaxCancel(respMap, execid, user);
} else if (action.equals(ConnectorParams.PAUSE_ACTION)) {
logger.info("Paused called.");
handleAjaxPause(respMap, execid, user);
} else if (action.equals(ConnectorParams.RESUME_ACTION)) {
logger.info("Resume called.");
handleAjaxResume(respMap, execid, user);
} else if (action.equals(ConnectorParams.MODIFY_EXECUTION_ACTION)) {
logger.info("Modify Execution Action");
handleModifyExecutionRequest(respMap, execid, user, req);
} else {
logger.error("action: '" + action + "' not supported.");
respMap.put("error", "action: '" + action + "' not supported.");
}
}
}
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
writeJSON(resp, respMap);
resp.flushBuffer();
}
private void handleModifyExecutionRequest(final Map<String, Object> respMap,
final int execId, final String user, final HttpServletRequest req) throws ServletException {
if (!hasParam(req, ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE)) {
respMap.put(ConnectorParams.RESPONSE_ERROR, "Modification type not set.");
}
final String modificationType = getParam(req, ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE);
try {
if (ConnectorParams.MODIFY_RETRY_FAILURES.equals(modificationType)) {
this.flowRunnerManager.retryFailures(execId, user);
}
} catch (final ExecutorManagerException e) {
logger.error(e.getMessage(), e);
respMap.put("error", e.getMessage());
}
}
private void handleFetchLogEvent(final int execId, final HttpServletRequest req,
final HttpServletResponse resp, final Map<String, Object> respMap)
throws ServletException {
final String type = getParam(req, "type");
final int startByte = getIntParam(req, "offset");
final int length = getIntParam(req, "length");
resp.setContentType("text/plain");
resp.setCharacterEncoding("utf-8");
if (type.equals("flow")) {
final LogData result;
try {
result = this.flowRunnerManager.readFlowLogs(execId, startByte, length);
respMap.putAll(result.toObject());
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
} else {
final int attempt = getIntParam(req, "attempt", 0);
final String jobId = getParam(req, "jobId");
try {
final LogData result =
this.flowRunnerManager.readJobLogs(execId, jobId, attempt, startByte,
length);
respMap.putAll(result.toObject());
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put("error", e.getMessage());
}
}
}
private void handleFetchAttachmentsEvent(final int execId, final HttpServletRequest req,
final HttpServletResponse resp, final Map<String, Object> respMap)
throws ServletException {
final String jobId = getParam(req, "jobId");
final int attempt = getIntParam(req, "attempt", 0);
try {
final List<Object> result =
this.flowRunnerManager.readJobAttachments(execId, jobId, attempt);
respMap.put("attachments", result);
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put("error", e.getMessage());
}
}
private void handleFetchMetaDataEvent(final int execId, final HttpServletRequest req,
final HttpServletResponse resp, final Map<String, Object> respMap)
throws ServletException {
final int startByte = getIntParam(req, "offset");
final int length = getIntParam(req, "length");
resp.setContentType("text/plain");
resp.setCharacterEncoding("utf-8");
final int attempt = getIntParam(req, "attempt", 0);
final String jobId = getParam(req, "jobId");
try {
final JobMetaData result =
this.flowRunnerManager.readJobMetaData(execId, jobId, attempt, startByte,
length);
respMap.putAll(result.toObject());
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put("error", e.getMessage());
}
}
private void handleAjaxUpdateRequest(final HttpServletRequest req,
final Map<String, Object> respMap) throws ServletException, IOException {
final ArrayList<Object> updateTimesList =
(ArrayList<Object>) JSONUtils.parseJSONFromString(getParam(req,
ConnectorParams.UPDATE_TIME_LIST_PARAM));
final ArrayList<Object> execIDList =
(ArrayList<Object>) JSONUtils.parseJSONFromString(getParam(req,
ConnectorParams.EXEC_ID_LIST_PARAM));
final ArrayList<Object> updateList = new ArrayList<>();
for (int i = 0; i < execIDList.size(); ++i) {
final long updateTime = JSONUtils.getLongFromObject(updateTimesList.get(i));
final int execId = (Integer) execIDList.get(i);
final ExecutableFlowBase flow = this.flowRunnerManager.getExecutableFlow(execId);
if (flow == null) {
final Map<String, Object> errorResponse = new HashMap<>();
errorResponse.put(ConnectorParams.RESPONSE_ERROR, "Flow does not exist");
errorResponse.put(ConnectorParams.UPDATE_MAP_EXEC_ID, execId);
updateList.add(errorResponse);
continue;
}
if (flow.getUpdateTime() > updateTime) {
updateList.add(flow.toUpdateObject(updateTime));
}
}
respMap.put(ConnectorParams.RESPONSE_UPDATED_FLOWS, updateList);
}
private void handleAjaxExecute(final HttpServletRequest req,
final Map<String, Object> respMap, final int execId) {
try {
this.flowRunnerManager.submitFlow(execId);
} catch (final ExecutorManagerException e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
}
private void handleAjaxFlowStatus(final Map<String, Object> respMap, final int execid) {
final ExecutableFlowBase flow = this.flowRunnerManager.getExecutableFlow(execid);
if (flow == null) {
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_NOTFOUND);
} else {
respMap.put(ConnectorParams.STATUS_PARAM, flow.getStatus().toString());
respMap.put(ConnectorParams.RESPONSE_UPDATETIME, flow.getUpdateTime());
}
}
private void handleAjaxPause(final Map<String, Object> respMap, final int execid,
final String user) {
if (user == null) {
respMap.put(ConnectorParams.RESPONSE_ERROR, "user has not been set");
return;
}
try {
this.flowRunnerManager.pauseFlow(execid, user);
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_SUCCESS);
} catch (final ExecutorManagerException e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
}
private void handleAjaxResume(final Map<String, Object> respMap, final int execid,
final String user) throws ServletException {
if (user == null) {
respMap.put(ConnectorParams.RESPONSE_ERROR, "user has not been set");
return;
}
try {
this.flowRunnerManager.resumeFlow(execid, user);
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_SUCCESS);
} catch (final ExecutorManagerException e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
}
private void handleAjaxCancel(final Map<String, Object> respMap, final int execid,
final String user) {
if (user == null) {
respMap.put(ConnectorParams.RESPONSE_ERROR, "user has not been set");
return;
}
try {
this.flowRunnerManager.cancelFlow(execid, user);
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_SUCCESS);
} catch (final ExecutorManagerException e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
}
private void handleReloadJobTypePlugins(final Map<String, Object> respMap) {
try {
this.flowRunnerManager.reloadJobTypePlugins();
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_SUCCESS);
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
}
private void setActive(final boolean value, final Map<String, Object> respMap) {
try {
setActiveInternal(value);
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_SUCCESS);
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
}
private void setActiveInternal(final boolean value)
throws ExecutorManagerException, InterruptedException {
this.flowRunnerManager.setExecutorActive(value,
this.application.getHost(), this.application.getPort());
}
/**
* Prepare the executor for shutdown.
*
* @param respMap json response object
*/
private void shutdown(final Map<String, Object> respMap) {
try {
logger.warn("Shutting down executor...");
// Set the executor to inactive. Will receive no new flows.
setActiveInternal(false);
this.application.shutdown();
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_SUCCESS);
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
}
private void getStatus(final Map<String, Object> respMap) {
try {
final ExecutorLoader executorLoader = this.application.getExecutorLoader();
final Executor executor = requireNonNull(
executorLoader.fetchExecutor(this.application.getHost(), this.application.getPort()),
"The executor can not be null");
respMap.put("executor_id", Integer.toString(executor.getId()));
respMap.put("isActive", String.valueOf(executor.isActive()));
respMap.put(ConnectorParams.STATUS_PARAM, ConnectorParams.RESPONSE_SUCCESS);
} catch (final Exception e) {
logger.error(e.getMessage(), e);
respMap.put(ConnectorParams.RESPONSE_ERROR, e.getMessage());
}
}
/**
* Duplicated code with AbstractAzkabanServlet, but ne
*/
public boolean hasParam(final HttpServletRequest request, final String param) {
return request.getParameter(param) != null;
}
public String getParam(final HttpServletRequest request, final String name)
throws ServletException {
final String p = request.getParameter(name);
if (p == null) {
throw new ServletException("Missing required parameter '" + name + "'.");
} else {
return p;
}
}
public String getParam(final HttpServletRequest request, final String name,
final String defaultVal) {
final String p = request.getParameter(name);
if (p == null) {
return defaultVal;
}
return p;
}
public int getIntParam(final HttpServletRequest request, final String name)
throws ServletException {
final String p = getParam(request, name);
return Integer.parseInt(p);
}
public int getIntParam(final HttpServletRequest request, final String name,
final int defaultVal) {
if (hasParam(request, name)) {
try {
return getIntParam(request, name);
} catch (final Exception e) {
return defaultVal;
}
}
return defaultVal;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/FlowPreparer.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.execapp;
import static azkaban.utils.ThinArchiveUtils.getDependencyFile;
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;
import azkaban.execapp.metric.ProjectCacheHitRatio;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutorManagerException;
import azkaban.project.ProjectFileHandler;
import azkaban.spi.Dependency;
import azkaban.spi.DependencyFile;
import azkaban.storage.ProjectStorageManager;
import azkaban.utils.DependencyTransferException;
import azkaban.utils.DependencyTransferManager;
import azkaban.utils.FileIOUtils;
import azkaban.utils.Utils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.FileTime;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.zip.ZipFile;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class FlowPreparer {
// Name of the file which keeps project directory size
static final String PROJECT_DIR_SIZE_FILE_NAME = "___azkaban_project_dir_size_in_bytes___";
private static final Logger LOGGER = LoggerFactory.getLogger(FlowPreparer.class);
// TODO spyne: move to config class
private final File executionsDir;
// TODO spyne: move to config class
private final File projectCacheDir;
private final ProjectStorageManager projectStorageManager;
// Null if cache clean-up is disabled
private final Optional<ProjectCacheCleaner> projectCacheCleaner;
private final ProjectCacheHitRatio projectCacheHitRatio;
private final DependencyTransferManager dependencyTransferManager;
FlowPreparer(final ProjectStorageManager projectStorageManager, final DependencyTransferManager dependencyTransferManager,
final File projectsDir, final ProjectCacheCleaner cleaner, final ProjectCacheHitRatio projectCacheHitRatio,
final File executionsDir) {
Preconditions.checkNotNull(projectStorageManager);
Preconditions.checkNotNull(executionsDir);
Preconditions.checkNotNull(projectsDir);
Preconditions.checkNotNull(projectCacheHitRatio);
Preconditions.checkArgument(projectsDir.exists());
Preconditions.checkArgument(executionsDir.exists());
this.projectStorageManager = projectStorageManager;
this.executionsDir = executionsDir;
this.projectCacheDir = projectsDir;
this.projectCacheCleaner = Optional.ofNullable(cleaner);
this.projectCacheHitRatio = projectCacheHitRatio;
this.dependencyTransferManager = dependencyTransferManager;
}
/**
* Calculate the directory size and save it to a file.
*
* @param dir the directory whose size needs to be saved.
* @return the size of the dir.
*/
static long calculateDirSizeAndSave(final File dir) throws IOException {
final Path path = Paths.get(dir.getPath(), FlowPreparer.PROJECT_DIR_SIZE_FILE_NAME);
if (!Files.exists(path)) {
final long sizeInByte = FileUtils.sizeOfDirectory(dir);
FileIOUtils.dumpNumberToFile(path, sizeInByte);
return sizeInByte;
} else {
return FileIOUtils.readNumberFromFile(path);
}
}
/**
* Prepare the flow directory for execution.
*
* @param flow Executable Flow instance.
*/
void setup(final ExecutableFlow flow) throws ExecutorManagerException {
final ProjectFileHandler projectFileHandler = null;
File tempDir = null;
try {
final ProjectDirectoryMetadata project = new ProjectDirectoryMetadata(
flow.getProjectId(),
flow.getVersion());
final long flowPrepStartTime = System.currentTimeMillis();
tempDir = downloadProjectIfNotExists(project, flow.getExecutionId());
LOGGER.info("Project is setup for execution {}", flow.getExecutionId());
// With synchronization, only one thread is allowed to proceed to avoid complicated race
// conditions which could arise when multiple threads are downloading/deleting/hard-linking
// the same project. But it doesn't prevent multiple executor processes interfering with each
// other triggering race conditions. So it's important to operationally make sure that only
// one executor process is setting up flow execution against the shared project directory.
long criticalSectionStartTime = -1;
File execDir = null;
synchronized (this) {
LOGGER.info("Setting up execution dir for {}", flow.getExecutionId());
criticalSectionStartTime = System.currentTimeMillis();
if (!project.getInstalledDir().exists() && tempDir != null) {
// If new project is downloaded and project dir cache clean-up feature is enabled, then
// perform clean-up if size of all project dirs exceeds the cache size.
if (this.projectCacheCleaner.isPresent()) {
this.projectCacheCleaner.get()
.deleteProjectDirsIfNecessary(project.getDirSizeInByte());
}
// Rename temp dir to a proper project directory name.
Files.move(tempDir.toPath(), project.getInstalledDir().toPath());
}
final long start = System.currentTimeMillis();
execDir = setupExecutionDir(project.getInstalledDir(), flow);
final long end = System.currentTimeMillis();
LOGGER.info("Setting up execution dir {} took {} sec(s)", execDir, (end - start) / 1000);
}
final long flowPrepCompletionTime = System.currentTimeMillis();
LOGGER.info("Flow preparation completed in {} sec(s), out ot which {} sec(s) was spent inside "
+ "critical section. [execid: {}, path: {}]",
(flowPrepCompletionTime - flowPrepStartTime) / 1000,
(flowPrepCompletionTime - criticalSectionStartTime) / 1000,
flow.getExecutionId(), execDir.getPath());
} catch (final Exception ex) {
FileIOUtils.deleteDirectorySilently(tempDir);
LOGGER.error("Error in preparing flow execution {}", flow.getExecutionId(), ex);
throw new ExecutorManagerException(ex);
} finally {
if (projectFileHandler != null) {
projectFileHandler.deleteLocalFile();
}
}
}
private File setupExecutionDir(final File installedDir, final ExecutableFlow flow)
throws IOException {
File execDir = null;
try {
execDir = createExecDir(flow);
// Create hardlinks from the project
FileIOUtils.createDeepHardlink(installedDir, execDir);
return execDir;
} catch (final Exception ex) {
FileIOUtils.deleteDirectorySilently(execDir);
throw ex;
}
}
/**
* Update last modified time of the file if it exists.
*
* @param path path to the target file
*/
@VisibleForTesting
void updateLastModifiedTime(final Path path) {
try {
Files.setLastModifiedTime(path, FileTime.fromMillis(System.currentTimeMillis()));
} catch (final IOException ex) {
LOGGER.warn("Error when updating last modified time for {}", path, ex);
}
}
/**
* @return the project directory name of a project
*/
private String generateProjectDirName(final ProjectDirectoryMetadata proj) {
return String.valueOf(proj.getProjectId()) + "." + String.valueOf(proj.getVersion());
}
private File createTempDir(final ProjectDirectoryMetadata proj) {
final String projectDir = generateProjectDirName(proj);
final File tempDir = new File(this.projectCacheDir,
"_temp." + projectDir + "." + System.currentTimeMillis());
tempDir.mkdirs();
return tempDir;
}
@VisibleForTesting
void downloadAndUnzipProject(final ProjectDirectoryMetadata proj, final int execId, final File dest)
throws IOException {
final long start = System.currentTimeMillis();
final ProjectFileHandler projectFileHandler = requireNonNull(this.projectStorageManager
.getProjectFile(proj.getProjectId(), proj.getVersion()));
LOGGER.info("Downloading zip file for project {} when preparing "
+ "execution [execid {}] completed in {} second(s)", proj, execId,
(System.currentTimeMillis() - start) / 1000);
try {
checkState("zip".equalsIgnoreCase(projectFileHandler.getFileType()));
final File zipFile = requireNonNull(projectFileHandler.getLocalFile());
final ZipFile zip = new ZipFile(zipFile);
Utils.unzip(zip, dest);
// Download all startup dependencies. If this is a fat archive, it will be an empty set (so we won't download
// anything). Note that we are getting our list of startup dependencies from the DB, NOT from the
// startup-dependencies.json file contained in the archive. Both should be IDENTICAL, however we chose to get the
// list from the DB because this will be consistent with how containerized executions determine the startup
// dependency list.
downloadAllDependencies(proj, execId, dest, projectFileHandler.getStartupDependencies());
proj.setDirSizeInByte(calculateDirSizeAndSave(dest));
} finally {
projectFileHandler.deleteLocalFile();
}
}
/**
* Download necessary JAR dependencies from storage
*
* @param proj project to download
* @param execId execution id number
* @param folder root of unzipped project
* @param dependencies the set of dependencies to download
*/
private void downloadAllDependencies(final ProjectDirectoryMetadata proj, final int execId, final File folder,
final Set<Dependency> dependencies) {
// Download all of the dependencies from storage
LOGGER.info("Downloading {} JAR dependencies... Project: {}, ExecId: {}", dependencies.size(), proj, execId);
Set<DependencyFile> depFiles = dependencies
.stream()
.map(d -> getDependencyFile(folder, d))
.collect(Collectors.toSet());
try {
final long start = System.currentTimeMillis();
this.dependencyTransferManager.downloadAllDependencies(depFiles);
LOGGER.info("Downloading {} JAR dependencies for project {} when preparing "
+ "execution [execid {}] completed in {} second(s)", dependencies.size(), proj, execId,
(System.currentTimeMillis() - start) / 1000);
} catch (DependencyTransferException e) {
LOGGER.error("Unable to download one or more dependencies when preparing execId {}.",
execId, proj);
throw e;
}
}
/**
* Download project zip and unzip it if not exists locally.
*
* @param proj project to download
* @param execId execution id number
* @return the temp dir where the new project is downloaded to, null if no project is downloaded.
* @throws IOException if downloading or unzipping fails.
*/
@VisibleForTesting
File downloadProjectIfNotExists(final ProjectDirectoryMetadata proj, final int execId)
throws IOException {
final String projectDir = generateProjectDirName(proj);
if (proj.getInstalledDir() == null) {
proj.setInstalledDir(new File(this.projectCacheDir, projectDir));
}
// If directory exists, assume it's prepared and skip.
if (proj.getInstalledDir().exists()) {
LOGGER.info("Project {} already cached. Skipping download. ExecId: {}", proj, execId);
// Hit the local cache.
this.projectCacheHitRatio.markHit();
// Update last modified time of the file keeping project dir size when the project is
// accessed. This last modified time will be used to determined least recently used
// projects when performing project directory clean-up.
updateLastModifiedTime(
Paths.get(proj.getInstalledDir().getPath(), PROJECT_DIR_SIZE_FILE_NAME));
return null;
}
this.projectCacheHitRatio.markMiss();
// Download project to a temp dir if not exists in local cache.
final File tempDir = createTempDir(proj);
downloadAndUnzipProject(proj, execId, tempDir);
return tempDir;
}
private File createExecDir(final ExecutableFlow flow) {
final int execId = flow.getExecutionId();
final File execDir = new File(this.executionsDir, String.valueOf(execId));
flow.setExecutionPath(execDir.getPath());
execDir.mkdirs();
return execDir;
}
public void shutdown() {
if (projectCacheCleaner.isPresent()) {
this.projectCacheCleaner.get().shutdown();
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/FlowRampManager.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import static azkaban.ServiceProvider.SERVICE_PROVIDER;
import azkaban.Constants;
import azkaban.event.Event;
import azkaban.event.EventListener;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutableFlowRampMetadata;
import azkaban.executor.ExecutableRamp;
import azkaban.executor.ExecutableRamp.Action;
import azkaban.executor.ExecutableRampDependencyMap;
import azkaban.executor.ExecutableRampExceptionalFlowItemsMap;
import azkaban.executor.ExecutableRampExceptionalItems;
import azkaban.executor.ExecutableRampExceptionalJobItemsMap;
import azkaban.executor.ExecutableRampItemsMap;
import azkaban.executor.ExecutableRampMap;
import azkaban.executor.ExecutableRampStatus;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.Status;
import azkaban.ramppolicy.RampPolicy;
import azkaban.ramppolicy.RampPolicyManager;
import azkaban.spi.EventType;
import azkaban.utils.FileIOUtils;
import azkaban.utils.OsCpuUtil;
import azkaban.utils.Props;
import azkaban.utils.SystemMemoryInfo;
import azkaban.utils.ThreadPoolExecutingListener;
import azkaban.utils.TimeUtils;
import com.google.common.annotations.VisibleForTesting;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Flow Ramp Manager
*/
@Singleton
public class FlowRampManager implements EventListener, ThreadPoolExecutingListener {
private static final String JAR_DEPENDENCY_PREFIX = "jar:";
private static String LIB_JAR_REG_EXP_FORMATTER = "^(%s)-\\d.*(.jar)$";
private static String ALL_LIB_JAR_REG_EXP = "^.*(.jar)$";
private static String LIB_SUB_FOLDER_NAME = "lib";
private static String EXCLUDED_SUB_FOLDER_NAME = "excluded";
private static String EXCLUDED_LIB_SUB_FOLDER_NAME = "excluded/lib";
private static final Logger LOGGER = LoggerFactory.getLogger(FlowRampManager.class);
private final boolean isRampFeatureEnabled;
private final boolean isRampPollingServiceEnabled;
private final int statusPollingIntervalMinutes;
private final int statusPushIntervalMax;
private final int statusPullIntervalMax;
private final RampPolicyManager rampPolicyManager;
private ExecutorLoader executorLoader;
private Props azkabanProps;
private Props globalProps;
private PollingService pollingService = null;
// Hosting All Active Ramps, Map.Key is rampId
private volatile ExecutableRampMap executableRampMap = null;
// Hosting All Ramp Items, Map.Key is rampId
private volatile ExecutableRampItemsMap executableRampItemsMap = null;
// Hosting All Default Value of dependencies, Map.Key is dependencyId
private volatile ExecutableRampDependencyMap executableRampDependencyMap = null;
// Hosting Flow Level Special Treatment List, Map.Key is rampId
private volatile ExecutableRampExceptionalFlowItemsMap executableRampExceptionalFlowItemsMap = null;
// Hosting Job Level Special Treatment List, Map.Key is RampId + FlowId
private volatile ExecutableRampExceptionalJobItemsMap executableRampExceptionalJobItemsMap = null;
private volatile RampDataModel rampDataModel = new RampDataModel();
// private volatile boolean active;
private volatile long latestDataBaseSynchronizationTimeStamp = 0;
@Inject
public FlowRampManager(final Props props, final ExecutorLoader executorLoader) throws IOException {
this.executorLoader = executorLoader;
this.azkabanProps = props;
// Check ramp.feature.enabled azkaban setting for backward compatible
isRampFeatureEnabled = this.azkabanProps.getBoolean(
Constants.ConfigurationKeys.AZKABAN_RAMP_ENABLED,
Constants.DEFAULT_AZKABAN_RAMP_ENABLED);
statusPushIntervalMax = this.azkabanProps.getInt(
Constants.ConfigurationKeys.AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX,
Constants.DEFAULT_AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX);
statusPullIntervalMax = this.azkabanProps.getInt(
Constants.ConfigurationKeys.AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX,
Constants.DEFAULT_AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX);
isRampPollingServiceEnabled = this.azkabanProps.getBoolean(
Constants.ConfigurationKeys.AZKABAN_RAMP_STATUS_POLLING_ENABLED,
Constants.DEFAULT_AZKABAN_RAMP_STATUS_POOLING_ENABLED);
statusPollingIntervalMinutes = this.azkabanProps.getInt(
Constants.ConfigurationKeys.AZKABAN_RAMP_STATUS_POLLING_INTERVAL,
Constants.DEFAULT_AZKABAN_RAMP_STATUS_POLLING_INTERVAL);
if (isRampFeatureEnabled) {
// load global Props
String globalPropertiesExtPath = props.getString(
Constants.ConfigurationKeys.AZKABAN_GLOBAL_PROPERTIES_EXT_PATH, null);
this.globalProps = globalPropertiesExtPath == null ? null : new Props(null, globalPropertiesExtPath);
this.rampPolicyManager = new RampPolicyManager(
props.getString(AzkabanExecutorServer.RAMPPOLICY_PLUGIN_DIR, Constants.PluginManager.RAMPPOLICY_DEFAULTDIR),
this.globalProps, getClass().getClassLoader());
this.executableRampMap = ExecutableRampMap.createInstance();
this.executableRampItemsMap = ExecutableRampItemsMap.createInstance();
this.executableRampDependencyMap = ExecutableRampDependencyMap.createInstance();
this.executableRampExceptionalFlowItemsMap = ExecutableRampExceptionalFlowItemsMap.createInstance();
this.executableRampExceptionalJobItemsMap = ExecutableRampExceptionalJobItemsMap.createInstance();
this.rampDataModel = new RampDataModel();
//Load current Ramp Setting from DB
loadSettings();
// Start Polling Service to synchronize ramp status cross multiple ExecServer if it is necessary
if (isRampPollingServiceEnabled) {
this.LOGGER.info("Starting polling service.");
this.pollingService = new FlowRampManager.PollingService(this.statusPollingIntervalMinutes,
new FlowRampManager.PollingCriteria(this.azkabanProps, this.rampDataModel));
this.pollingService.start();
}
} else {
this.rampPolicyManager = null;
}
}
/**
* Check if the system is activating the ramp feature, aka some system configuration is ramping.
*/
private boolean isRampFeatureActivated() {
if (isRampFeatureEnabled || executableRampMap != null) {
if (!executableRampMap.getActivatedAll().isEmpty()) {
return true;
}
}
return false;
}
/**
* Ramp Feature need to track the health of running flow to determine if stop/pause Ramp will be necessary.
* FlowRampManager is registered into the FlowRunnerManager's Listeners
*/
@Override
public void handleEvent(Event event) {
if (!isRampFeatureActivated()) return;
if (event.getType() == EventType.FLOW_STARTED || event.getType() == EventType.FLOW_FINISHED) {
final FlowRunner flowRunner = (FlowRunner) event.getRunner();
logFlowEvent(flowRunner, event.getType());
}
}
@Override
public void beforeExecute(Runnable r) {
}
@Override
public void afterExecute(Runnable r) {
}
/**
* This shuts down the flow ramp. The call is blocking and awaits execution of all jobs.
*/
public void shutdown() {
LOGGER.warn("Shutting down FlowRampManager...");
if (isRampPollingServiceEnabled) {
pollingService.shutdown();
}
// Persistent cached data into DB
saveSettings();
LOGGER.warn("Shutdown FlowRampManager complete.");
}
/**
* This attempts shuts down the flow runner immediately (unsafe). This doesn't wait for jobs to
* finish but interrupts all threads.
*/
public void shutdownNow() {
LOGGER.warn("Shutting down FlowRampManager now...");
if (isRampPollingServiceEnabled) {
pollingService.shutdown();
}
}
public int getNumOfRamps() {
return executableRampMap.getActivatedAll().size();
}
/**
* Load all ramp Settings from DB
*/
@VisibleForTesting
synchronized void loadSettings() {
loadExecutableRamps();
loadExecutableRampItems();
loadExecutableRampDependencies();
loadExecutableRampExceptionalFlowItems();
loadExecutableRampExceptionalJobItems();
latestDataBaseSynchronizationTimeStamp = System.currentTimeMillis();
LOGGER.info(String.format("Ramp Settings had been successfully loaded at [%d].",
latestDataBaseSynchronizationTimeStamp));
}
/**
* Load All active ramps, Key = rampId
*/
@VisibleForTesting
synchronized void loadExecutableRamps() {
try {
if (executableRampMap == null) {
executableRampMap = executorLoader.fetchExecutableRampMap();
} else {
executableRampMap.refresh(executorLoader.fetchExecutableRampMap());
}
} catch (ExecutorManagerException e) {
LOGGER.error("Load all active Executable Ramp failure");
}
}
/**
* Load All dependency properties into the executableRampProperties Map, Key = rampId,
*/
@VisibleForTesting
synchronized void loadExecutableRampItems() {
try {
if (executableRampItemsMap == null) {
executableRampItemsMap = executorLoader.fetchExecutableRampItemsMap();
} else {
executableRampItemsMap.refresh(executorLoader.fetchExecutableRampItemsMap());
}
} catch (ExecutorManagerException e) {
LOGGER.error("Load Executable Ramp Items failure");
}
}
/**
* Load All Default dependency values for ramp
* When the dependency does not have ramp setting, the default Value will be applied.
*/
@VisibleForTesting
synchronized void loadExecutableRampDependencies() {
try {
if (executableRampDependencyMap == null) {
executableRampDependencyMap = executorLoader.fetchExecutableRampDependencyMap();
} else {
executableRampDependencyMap.refresh(executorLoader.fetchExecutableRampDependencyMap());
}
} catch (ExecutorManagerException e) {
LOGGER.error("Load Executable Ramp Dependencies failure");
}
}
/**
* Load All Ramp Exceptional Items on Flow Level
*/
@VisibleForTesting
synchronized void loadExecutableRampExceptionalFlowItems() {
try {
if (executableRampExceptionalFlowItemsMap == null) {
executableRampExceptionalFlowItemsMap = executorLoader.fetchExecutableRampExceptionalFlowItemsMap();
} else {
executableRampExceptionalFlowItemsMap.refresh(executorLoader.fetchExecutableRampExceptionalFlowItemsMap());
}
} catch (ExecutorManagerException e) {
LOGGER.error("Load Executable Ramp Exceptional Items on Flow Level Failure");
}
}
/**
* Load All Ramp Exceptional Items on Job Level
*/
@VisibleForTesting
synchronized void loadExecutableRampExceptionalJobItems() {
try {
if (executableRampExceptionalJobItemsMap == null) {
executableRampExceptionalJobItemsMap = executorLoader.fetchExecutableRampExceptionalJobItemsMap();
} else {
executableRampExceptionalJobItemsMap.refresh(executorLoader.fetchExecutableRampExceptionalJobItemsMap());
}
} catch (ExecutorManagerException e) {
LOGGER.error("Load Executable Ramp Exceptional Items on Job Level Failure");
}
}
/**
* Save all ramp settings into DB
*/
@VisibleForTesting
synchronized void saveSettings() {
executableRampMap
.getAll()
.stream()
.filter(ExecutableRamp::isChanged)
.forEach(this::updateExecutableRamp);
executableRampExceptionalFlowItemsMap
.entrySet()
.stream()
.forEach(this::updateExecutedRampFlows);
rampDataModel.resetFlowCountAfterSave();
LOGGER.info("Ramp Settings had been successfully saved.");
}
@VisibleForTesting
/**
* Persistent all Executable Ramp Status in this azkaban executor into the DB
*/
synchronized void updateExecutableRamp(ExecutableRamp executableRamp) {
try {
// Save all cachedNumTrail, cachedNumSuccess, cachedNumFailure, cachedNumIgnored,
// save isPaused, endTime when it is not zero, lastUpdatedTime when it is changed.
executorLoader.updateExecutableRamp(executableRamp);
// mark cache has been saved
executableRamp.cacheSaved();
} catch (ExecutorManagerException e) {
LOGGER.error(String.format("Update Executable Ramp [%s] Failure.", executableRamp.getId()));
}
}
/**
* Save All Ramp Exceptional Items on Flow Level
*/
@VisibleForTesting
synchronized void updateExecutedRampFlows(Map.Entry<String, ExecutableRampExceptionalItems> entry) {
try {
// Save all Identified workflow into the DB
executorLoader.updateExecutedRampFlows(entry.getKey(), entry.getValue());
} catch (ExecutorManagerException e) {
LOGGER.error("Fail to append ramp items into DB.", e);
}
}
/**
* Call to set Executable Ramp Metadata into ExecutableFlow
*/
synchronized public void configure(ExecutableFlow executableFlow, File flowDirectory) {
if (!isRampFeatureActivated()) return;
// To be safe, check if there is any jar files in ./excluded folder
// and move them back to the place in original location of the package
moveFiles(
FileIOUtils.getDirectory(flowDirectory, EXCLUDED_SUB_FOLDER_NAME),
flowDirectory,
ALL_LIB_JAR_REG_EXP
);
moveFiles(
FileIOUtils.getDirectory(flowDirectory, EXCLUDED_LIB_SUB_FOLDER_NAME),
FileIOUtils.getDirectory(flowDirectory, LIB_SUB_FOLDER_NAME),
ALL_LIB_JAR_REG_EXP
);
String flowName = executableFlow.getFlowName();
ExecutableFlowRampMetadata executableFlowRampMetadata =
ExecutableFlowRampMetadata.createInstance(
executableRampDependencyMap,
executableRampExceptionalJobItemsMap.getExceptionalJobItemsByFlow(flowName)
);
for (ExecutableRamp executableRamp : executableRampMap.getActivatedAll()) {
try {
String rampId = executableRamp.getId();
LOGGER.info("RAMP_CHECK: (rampId = {}, rampStage = {}, executionId = {}, flowName = {}, RampPercentageId = {})",
rampId,
executableRamp.getStage(),
executableFlow.getExecutionId(),
flowName,
executableFlow.getRampPercentageId()
);
// get Base Props
Props baseProps = new Props();
baseProps.putAll(executableRampDependencyMap.getDefaultValues(executableRampItemsMap.getDependencies(rampId)));
ExecutableRampStatus status = executableRampExceptionalFlowItemsMap.check(rampId, flowName);
LOGGER.info("RAMP_STATUS: (Status = {}, flowName = {})", status.name(), flowName);
switch (status) {
case BLACKLISTED: // blacklist
executableFlowRampMetadata.setRampProps(
rampId,
Props.getInstance(
Props.clone(executableRampItemsMap.getRampItems(rampId)),
baseProps,
ExecutableRampStatus.BLACKLISTED.name()
)
);
LOGGER.info("RAMP_BLACKLISTED: (rampId = {}, flowName = {})", rampId, flowName);
break;
case WHITELISTED: // whitelist
executableFlowRampMetadata.setRampProps(
rampId,
Props.getInstance(
baseProps,
Props.clone(executableRampItemsMap.getRampItems(rampId)),
ExecutableRampStatus.WHITELISTED.name()
)
);
LOGGER.info("RAMP_WHITELISTED: (rampId = {}, flowName = {})", rampId, flowName);
break;
case SELECTED: // selected
executableFlowRampMetadata.setRampProps(
rampId,
Props.getInstance(
baseProps,
Props.clone(executableRampItemsMap.getRampItems(rampId)),
ExecutableRampStatus.SELECTED.name()
)
);
LOGGER.info("RAMP_SELECTED: (rampId = {}, flowName = {})", rampId, flowName);
break;
case UNSELECTED: // selected
executableFlowRampMetadata.setRampProps(
rampId,
Props.getInstance(
Props.clone(executableRampItemsMap.getRampItems(rampId)),
baseProps,
ExecutableRampStatus.UNSELECTED.name()
)
);
LOGGER.info("RAMP_UNSELECTED: (rampId = {}, flowName = {})", rampId, flowName);
break;
case EXCLUDED:
executableFlowRampMetadata.setRampProps(
rampId,
Props.getInstance(
null,
baseProps,
ExecutableRampStatus.EXCLUDED.name()
)
);
LOGGER.info("RAMP_EXECLUDED: (rampId = {}, flowName = {})", rampId, flowName);
break;
default:
RampPolicy rampPolicy = rampPolicyManager.buildRampPolicyExecutor(executableRamp.getPolicy(), globalProps);
LOGGER.info ("RAMP_POLICY_SELECTING: (policy = {}, rampId = {}, flowName = {}, executionId = {}, RampPercentageId = {})",
rampPolicy.getClass().getName(),
rampId,
flowName,
executableFlow.getExecutionId(),
executableFlow.getRampPercentageId()
);
if (rampPolicy.check(executableFlow, executableRamp)) {
// Ramp Enabled
executableFlowRampMetadata.setRampProps(
rampId,
Props.getInstance(
baseProps,
Props.clone(executableRampItemsMap.getRampItems(rampId)),
ExecutableRampStatus.SELECTED.name()
)
);
LOGGER.info("RAMP_POLICY_SELECTED: (rampId = {}, flowName = {})", rampId, flowName);
} else {
executableFlowRampMetadata.setRampProps(
rampId,
Props.getInstance(
Props.clone(executableRampItemsMap.getRampItems(rampId)),
baseProps,
ExecutableRampStatus.UNSELECTED.name()
)
);
LOGGER.info("RAMP_POLICY_UNSELECTED: (rampId = {}, flowName = {})", rampId, flowName);
}
break;
}
// Remove Package Dependencies
List<String> removableDependencies = executableRampItemsMap
.getDependencies(rampId)
.stream()
.filter(key -> key.startsWith(JAR_DEPENDENCY_PREFIX))
.filter(key -> (!baseProps.get(key).isEmpty() || !executableFlowRampMetadata.getRampItemValue(rampId, key).isEmpty()))
.map(key -> key.substring(JAR_DEPENDENCY_PREFIX.length()))
.collect(Collectors.toList());
String regExpression = String.format(LIB_JAR_REG_EXP_FORMATTER, String.join("|", removableDependencies));
if (!removableDependencies.isEmpty()) {
// Move those selected jar dependencies in ./ and ./lib folders
// into the ./excluded and ./excluded/lib folder
moveFiles(
flowDirectory,
FileIOUtils.getDirectory(flowDirectory, EXCLUDED_SUB_FOLDER_NAME),
regExpression
);
moveFiles(
FileIOUtils.getDirectory(flowDirectory, LIB_SUB_FOLDER_NAME),
FileIOUtils.getDirectory(flowDirectory, EXCLUDED_LIB_SUB_FOLDER_NAME),
regExpression
);
}
} catch (Exception e) {
LOGGER.error("RAMP_EXEC_ERROR: (message = {})", e.getMessage());
}
}
// Append the result into the executable flow
executableFlow.setExecutableFlowRampMetadata(executableFlowRampMetadata);
}
private void moveFiles(File sourceDir, File destinationDir, String regExpression) {
try {
FileIOUtils.moveFiles(sourceDir, destinationDir, regExpression);
LOGGER.info("Success to move files from {} to {} with REGEXP {}",
sourceDir.getAbsolutePath(),
destinationDir.getAbsolutePath(),
regExpression);
} catch (IOException e) {
LOGGER.error(
String.format("Fail to move files from %s to %s with REGEXP %s",
sourceDir.getAbsolutePath(), destinationDir.getAbsolutePath(), regExpression
), e);
}
}
synchronized private void logFlowEvent(FlowRunner flowRunner, EventType eventType) {
final ExecutableFlow flow = flowRunner.getExecutableFlow();
LOGGER.info("RAMP_FLOW_EVENT_CAPTURED: (ID = {}, FlowName = {}, ExecutionId = {}, FlowStatus = {})",
flow.getId(),
flow.getFlowName(),
flow.getExecutionId(),
flow.getStatus().toString());
if (eventType == EventType.FLOW_STARTED) {
Set<String> activeRamps = flow.getExecutableFlowRampMetadata().getActiveRamps();
rampDataModel.beginFlow(flow.getExecutionId(), activeRamps);
LOGGER.info("RAMP_STARTED: (FlowName = {}, ExecutionId = {}, Ramps = {})",
flow.getFlowName(),
flow.getExecutionId(), activeRamps.toString());
if (isDatabasePullingActionRequired()) {
LOGGER.info("BEGIN Reload ramp settings from DB ......");
loadSettings();
LOGGER.info("END Reload ramp settings from DB ......");
}
} else { // EventType.FLOW_FINISHED
logFlowAction(flowRunner, convertToAction(flow.getStatus()));
Set<String> ramps = rampDataModel.endFlow(flow.getExecutionId());
LOGGER.info("RAMP_FINISHED: (FlowName = {}, ExecutionId = {}, Ramps = {})",
flow.getFlowName(),
flow.getExecutionId(), ramps.toString());
if (isDatabasePushingActionRequired()) {
LOGGER.info("BEGIN Save ramp settings into DB ......");
saveSettings();
LOGGER.info("END Save ramp settings into DB ......");
}
}
}
synchronized private void logFlowAction(FlowRunner flowRunner, Action action) {
flowRunner.getExecutableFlow()
.getExecutableFlowRampMetadata()
.getActiveRamps()
.stream()
.map(executableRampMap::get)
.forEach(executableRamp -> {
LOGGER.info("FlowRunner Save Result after Ramp. [rampId = {}, action = {}]",
executableRamp.getId(), action.name());
executableRamp.cacheResult(action);
if (Action.FAILED.equals(action)) {
String rampId = executableRamp.getId();
String flowName = flowRunner.getExecutableFlow().getFlowName();
LOGGER.warn("RAMP_EXCLUDE_FLOW: [executionId = {}, rampId = {}, flowName = {}, action = {}, ramp = {}]",
flowRunner.getExecutableFlow().getExecutionId(),
rampId,
flowName,
action.name(),
flowRunner.isRamping()
);
executableRampExceptionalFlowItemsMap.add(rampId, flowName, ExecutableRampStatus.EXCLUDED,
System.currentTimeMillis(), true);
}
});
}
// This check function is only applied on non-polling mode
synchronized private boolean isDatabasePushingActionRequired() {
return ((!isRampPollingServiceEnabled) && (statusPushIntervalMax <= rampDataModel.getEndFlowCount()));
}
// This check function is only applied on non-polling mode
synchronized private boolean isDatabasePullingActionRequired() {
return ((!isRampPollingServiceEnabled) && (statusPullIntervalMax <= rampDataModel.getBeginFlowCount()));
}
synchronized private Action convertToAction(Status status) {
if (Status.FAILED.equals(status)) return Action.FAILED;
if (Status.isStatusSucceeded(status)) return Action.SUCCEEDED;
return Action.IGNORED;
}
@VisibleForTesting
static class RampDataModel {
// Host the current processing ramp flows
// Map.key = Any ID that uniquely identifies each execution. (we will use executionId). Map.value = Set of ramps
private volatile Map<Integer, Set<String>> executingFlows = new HashMap<>();
private Lock lock = new ReentrantLock();
private volatile int beginFlowCount = 0;
private volatile int endFlowCount = 0;
public RampDataModel() {
}
public synchronized void beginFlow(final int executionId, Set<String> ramps) {
lock.lock();
executingFlows.put(executionId, ramps);
beginFlowCount++;
lock.unlock();
}
public synchronized Set<String> endFlow(final int executionId) {
Set<String> ramps = executingFlows.get(executionId);
lock.lock();
executingFlows.remove(executionId);
endFlowCount++;
lock.unlock();
return ramps;
}
public Map<Integer, Set<String>> getExecutingFlows() {
return this.executingFlows;
}
public int getBeginFlowCount() {
return beginFlowCount;
}
public int getEndFlowCount() {
return endFlowCount;
}
public void resetFlowCountAfterSave() {
lock.lock();
beginFlowCount = executingFlows.size();
endFlowCount = 0;
lock.unlock();
}
public boolean hasUnsavedFinishedFlow() {
return endFlowCount > 0;
}
}
/**
* Polls new executions from DB periodically and submits the executions to run on the executor.
*
* Polling Service, here, will periodically persistent and reload the diff of the staging ramping status/configurations.
* It is the key mechanism to communicate the ramp status cross multiple Azkaban ExecServer.
*/
@SuppressWarnings("FutureReturnValueIgnored")
private class PollingService {
private final ScheduledExecutorService scheduler;
private final FlowRampManager.PollingCriteria pollingCriteria;
private final int statusPollingIntervalMinutes;
public PollingService(final int statusPollingIntervalMinutes, final FlowRampManager.PollingCriteria pollingCriteria) {
this.statusPollingIntervalMinutes = statusPollingIntervalMinutes;
this.scheduler = Executors.newSingleThreadScheduledExecutor();
this.pollingCriteria = pollingCriteria;
}
public void start() {
this.scheduler.scheduleAtFixedRate(() -> pollExecution(), 0L, this.statusPollingIntervalMinutes,
TimeUnit.MINUTES);
}
private void pollExecution() {
if (this.pollingCriteria.shouldPoll()) {
if (this.pollingCriteria.satisfiesUnsavedDataAvailableCriteria()) {
LOGGER.info("Save Ramp Setting to Database.");
FlowRampManager.this.saveSettings();
}
LOGGER.info("Load Ramp Setting from Database.");
FlowRampManager.this.loadSettings();
}
}
public void shutdown() {
this.scheduler.shutdown();
this.scheduler.shutdownNow();
}
}
private class PollingCriteria {
private final Props azkabanProps;
private final RampDataModel rampDataModel;
private final SystemMemoryInfo memInfo = SERVICE_PROVIDER.getInstance(SystemMemoryInfo.class);
private final OsCpuUtil cpuUtil = SERVICE_PROVIDER.getInstance(OsCpuUtil.class);
// private boolean areFlowThreadsAvailable;
private boolean isFreeMemoryAvailable;
private boolean isCpuLoadUnderMax;
public PollingCriteria(final Props azkabanProps, final RampDataModel rampDataModel) {
this.azkabanProps = azkabanProps;
this.rampDataModel = rampDataModel;
}
public boolean shouldPoll() {
return (satisfiesFreeMemoryCriteria() && satisfiesCpuUtilizationCriteria() && satisfiesTimeIntervalCriteria());
}
private boolean satisfiesUnsavedDataAvailableCriteria() {
return this.rampDataModel.hasUnsavedFinishedFlow();
}
private boolean satisfiesTimeIntervalCriteria() {
// To avoid too frequently load, especially avoid the load from polling after initialization
return TimeUtils.timeEscapedOver(FlowRampManager.this.latestDataBaseSynchronizationTimeStamp, 50);
}
private boolean satisfiesFreeMemoryCriteria() {
final int minFreeMemoryConfigGb = this.azkabanProps.
getInt(Constants.ConfigurationKeys.AZKABAN_RAMP_STATUS_POLLING_MEMORY_MIN, 0);
// allow polling if not present or configured with invalid value
if (minFreeMemoryConfigGb > 0) {
final int minFreeMemoryConfigKb = minFreeMemoryConfigGb * 1024 * 1024;
final boolean haveEnoughMemory = this.memInfo.isFreePhysicalMemoryAbove(minFreeMemoryConfigKb);
if (this.isFreeMemoryAvailable != haveEnoughMemory) {
this.isFreeMemoryAvailable = haveEnoughMemory;
if (haveEnoughMemory) {
FlowRampManager.LOGGER.info("Polling criteria satisfied: available free memory.");
} else {
FlowRampManager.LOGGER.info("Polling criteria NOT satisfied: available free memory.");
}
}
return haveEnoughMemory;
}
return true;
}
private boolean satisfiesCpuUtilizationCriteria() {
final double maxCpuUtilizationConfig = this.azkabanProps.
getDouble(Constants.ConfigurationKeys.AZKABAN_RAMP_STATUS_POLLING_CPU_MAX, 100);
if (maxCpuUtilizationConfig > 0 && maxCpuUtilizationConfig < 100) {
final double cpuLoad = this.cpuUtil.getCpuLoad();
if (cpuLoad != -1) {
final boolean cpuLoadWithinParams = cpuLoad < maxCpuUtilizationConfig;
if (this.isCpuLoadUnderMax != cpuLoadWithinParams) {
this.isCpuLoadUnderMax = cpuLoadWithinParams;
if (cpuLoadWithinParams) {
FlowRampManager.LOGGER.info("Polling criteria satisfied: Cpu utilization (" + cpuLoad + "%).");
} else {
FlowRampManager.LOGGER.info("Polling criteria NOT satisfied: Cpu utilization (" + cpuLoad + "%).");
}
}
return cpuLoadWithinParams;
}
}
return true;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/FlowRunner.java
|
/*
* Copyright 2013 LinkedIn Corp
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_SERVER_HOST_NAME;
import static azkaban.Constants.ConfigurationKeys.AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME;
import static azkaban.execapp.ConditionalWorkflowUtils.FAILED;
import static azkaban.execapp.ConditionalWorkflowUtils.PENDING;
import static azkaban.execapp.ConditionalWorkflowUtils.checkConditionOnJobStatus;
import static azkaban.project.DirectoryYamlFlowLoader.CONDITION_ON_JOB_STATUS_PATTERN;
import static azkaban.project.DirectoryYamlFlowLoader.CONDITION_VARIABLE_REPLACEMENT_PATTERN;
import azkaban.Constants;
import azkaban.Constants.ConfigurationKeys;
import azkaban.Constants.JobProperties;
import azkaban.ServiceProvider;
import azkaban.event.Event;
import azkaban.event.EventData;
import azkaban.event.EventHandler;
import azkaban.event.EventListener;
import azkaban.execapp.event.FlowWatcher;
import azkaban.execapp.event.JobCallbackManager;
import azkaban.execapp.jmx.JmxJobMBeanManager;
import azkaban.execapp.metric.NumFailedJobMetric;
import azkaban.execapp.metric.NumRunningJobMetric;
import azkaban.executor.AlerterHolder;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutableFlowBase;
import azkaban.executor.ExecutableNode;
import azkaban.executor.ExecutionControllerUtils;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.ExecutionOptions.FailureAction;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.Status;
import azkaban.flow.ConditionOnJobStatus;
import azkaban.flow.FlowProps;
import azkaban.flow.FlowUtils;
import azkaban.jobExecutor.ProcessJob;
import azkaban.jobtype.JobTypeManager;
import azkaban.metric.MetricReportManager;
import azkaban.metrics.CommonMetrics;
import azkaban.project.FlowLoaderUtils;
import azkaban.project.ProjectFileHandler;
import azkaban.project.ProjectLoader;
import azkaban.project.ProjectManagerException;
import azkaban.sla.SlaOption;
import azkaban.spi.AzkabanEventReporter;
import azkaban.spi.EventType;
import azkaban.utils.Props;
import azkaban.utils.SwapQueue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.Files;
import java.io.File;
import java.io.IOException;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.security.PrivilegedExceptionAction;
import java.security.ProtectionDomain;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.regex.Matcher;
import javax.script.ScriptEngine;
import javax.script.ScriptEngineManager;
import javax.script.ScriptException;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.Appender;
import org.apache.log4j.FileAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
/**
* Class that handles the running of a ExecutableFlow DAG
*/
public class FlowRunner extends EventHandler implements Runnable {
private static final Splitter SPLIT_ON_COMMA = Splitter.on(",").omitEmptyStrings().trimResults();
private static final Layout DEFAULT_LAYOUT = new PatternLayout(
"%d{dd-MM-yyyy HH:mm:ss z} %c{1} %p - %m\n");
// We check update every 5 minutes, just in case things get stuck. But for the
// most part, we'll be idling.
private static final long CHECK_WAIT_MS = 5 * 60 * 1000;
private final ExecutableFlow flow;
// Sync object for queuing
private final Object mainSyncObj = new Object();
private final JobTypeManager jobtypeManager;
private final Layout loggerLayout = DEFAULT_LAYOUT;
private final ExecutorLoader executorLoader;
private final ProjectLoader projectLoader;
private final int execId;
private final File execDir;
private final ExecutionOptions.FailureAction failureAction;
// Properties map
private final Props azkabanProps;
private final Map<String, Props> sharedProps = new HashMap<>();
private final JobRunnerEventListener listener = new JobRunnerEventListener();
private final FlowRunnerEventListener flowListener = new FlowRunnerEventListener();
private final Set<JobRunner> activeJobRunners = Collections
.newSetFromMap(new ConcurrentHashMap<>());
// Thread safe swap queue for finishedExecutions.
private final SwapQueue<ExecutableNode> finishedNodes;
private final AzkabanEventReporter azkabanEventReporter;
private final AlerterHolder alerterHolder;
private Logger logger;
private Appender flowAppender;
private File logFile;
private ExecutorService executorService;
private Thread flowRunnerThread;
private int numJobThreads = 10;
// Used for pipelining
private Integer pipelineLevel = null;
private Integer pipelineExecId = null;
// Watches external flows for execution.
private FlowWatcher watcher = null;
private Set<String> proxyUsers = null;
private boolean validateUserProxy;
private String jobLogFileSize = "5MB";
private int jobLogNumFiles = 4;
private volatile boolean flowPaused = false;
private volatile boolean flowFailed = false;
private volatile boolean flowFinished = false;
private volatile boolean flowKilled = false;
private volatile boolean flowIsRamping = false;
private volatile long flowKillTime = -1;
// For flow related metrics
private final CommonMetrics commonMetrics;
private final ExecMetrics execMetrics;
// The following is state that will trigger a retry of all failed jobs
private volatile boolean retryFailedJobs = false;
// Project upload data for events
private final ProjectFileHandler projectFileHandler;
/**
* Constructor. This will create its own ExecutorService for thread pools
*/
public FlowRunner(final ExecutableFlow flow, final ExecutorLoader executorLoader,
final ProjectLoader projectLoader, final JobTypeManager jobtypeManager,
final Props azkabanProps, final AzkabanEventReporter azkabanEventReporter,
final AlerterHolder alerterHolder, final CommonMetrics commonMetrics,
final ExecMetrics execMetrics)
throws ExecutorManagerException {
this(flow, executorLoader, projectLoader, jobtypeManager, null, azkabanProps,
azkabanEventReporter, alerterHolder, commonMetrics, execMetrics);
}
/**
* Constructor. If executorService is null, then it will create it's own for thread pools.
*/
public FlowRunner(final ExecutableFlow flow, final ExecutorLoader executorLoader,
final ProjectLoader projectLoader, final JobTypeManager jobtypeManager,
final ExecutorService executorService, final Props azkabanProps,
final AzkabanEventReporter azkabanEventReporter, final AlerterHolder alerterHolder,
final CommonMetrics commonMetrics, final ExecMetrics execMetrics)
throws ExecutorManagerException {
this.execId = flow.getExecutionId();
this.flow = flow;
this.executorLoader = executorLoader;
this.projectLoader = projectLoader;
this.execDir = new File(flow.getExecutionPath());
this.jobtypeManager = jobtypeManager;
final ExecutionOptions options = flow.getExecutionOptions();
this.pipelineLevel = options.getPipelineLevel();
this.pipelineExecId = options.getPipelineExecutionId();
this.failureAction = options.getFailureAction();
this.proxyUsers = flow.getProxyUsers();
this.executorService = executorService;
this.finishedNodes = new SwapQueue<>();
this.azkabanProps = azkabanProps;
this.alerterHolder = alerterHolder;
this.commonMetrics = commonMetrics;
this.execMetrics = execMetrics;
// Add the flow listener only if a non-null eventReporter is available.
if (azkabanEventReporter != null) {
this.addListener(this.flowListener);
}
// Create logger and execution dir in flowRunner initialization instead of flow runtime to avoid NPE
// where the uninitialized logger is used in flow preparing state
createLogger(this.flow.getFlowId());
this.azkabanEventReporter = azkabanEventReporter;
projectFileHandler =
this.projectLoader.fetchProjectMetaData(this.flow.getProjectId(), this.flow.getVersion());
}
public FlowRunner setFlowWatcher(final FlowWatcher watcher) {
this.watcher = watcher;
return this;
}
public FlowRunner setNumJobThreads(final int jobs) {
this.numJobThreads = jobs;
return this;
}
public FlowRunner setJobLogSettings(final String jobLogFileSize, final int jobLogNumFiles) {
this.jobLogFileSize = jobLogFileSize;
this.jobLogNumFiles = jobLogNumFiles;
return this;
}
public FlowRunner setValidateProxyUser(final boolean validateUserProxy) {
this.validateUserProxy = validateUserProxy;
return this;
}
public File getExecutionDir() {
return this.execDir;
}
@VisibleForTesting
AlerterHolder getAlerterHolder() {
return this.alerterHolder;
}
@Override
public void run() {
try {
if (this.executorService == null) {
this.executorService = Executors.newFixedThreadPool(this.numJobThreads);
}
setupFlowExecution();
this.flow.setStartTime(System.currentTimeMillis());
this.logger.info("Updating initial flow directory.");
updateFlow();
this.logger.info("Fetching job and shared properties.");
if (!FlowLoaderUtils.isAzkabanFlowVersion20(this.flow.getAzkabanFlowVersion())) {
loadAllProperties();
}
this.fireEventListeners(
Event.create(this, EventType.FLOW_STARTED, new EventData(this.getExecutableFlow())));
runFlow();
} catch (final Throwable t) {
if (this.logger != null) {
this.logger
.error("An error has occurred during the running of the flow. Quiting.", t);
}
if (Status.KILLING.equals(this.flow.getStatus())) {
this.execMetrics.decrementFlowKillingCount();
}
this.flow.setStatus(Status.FAILED);
} finally {
try {
if (this.watcher != null) {
this.logger.info("Watcher is attached. Stopping watcher.");
this.watcher.stopWatcher();
this.logger
.info("Watcher cancelled status is " + this.watcher.isWatchCancelled());
}
this.flow.setEndTime(System.currentTimeMillis());
this.logger.info("Setting end time for flow " + this.execId + " to "
+ System.currentTimeMillis());
closeLogger();
updateFlow();
} finally {
reportFlowFinishedMetrics();
this.fireEventListeners(
Event.create(this, EventType.FLOW_FINISHED, new EventData(this.flow)));
this.logger
.info("Created " + EventType.FLOW_FINISHED + " event for " + flow.getExecutionId());
// In polling model, executor will be responsible for sending alerting emails when a flow
// finishes.
// Todo jamiesjc: switch to event driven model and alert on FLOW_FINISHED event.
if (this.azkabanProps.getBoolean(ConfigurationKeys.AZKABAN_POLL_MODEL, false)) {
ExecutionControllerUtils.alertUserOnFlowFinished(this.flow, this.alerterHolder,
ExecutionControllerUtils.getFinalizeFlowReasons("Flow finished", null));
}
}
}
}
private void reportFlowFinishedMetrics() {
final Status status = this.flow.getStatus();
switch (status) {
case SUCCEEDED:
this.execMetrics.markFlowSuccess();
break;
case FAILED:
this.commonMetrics.markFlowFail();
break;
case KILLED:
this.execMetrics.markFlowKilled();
this.execMetrics.addFlowTimeToKill(
this.flowKillTime == -1 ? -1 : System.currentTimeMillis() - this.flowKillTime);
break;
default:
break;
}
}
private void setupFlowExecution() {
final int projectId = this.flow.getProjectId();
final int version = this.flow.getVersion();
final String flowId = this.flow.getFlowId();
// Add a bunch of common azkaban properties
Props commonFlowProps = FlowUtils.addCommonFlowProperties(null, this.flow);
if (FlowLoaderUtils.isAzkabanFlowVersion20(this.flow.getAzkabanFlowVersion())) {
final Props flowProps = loadPropsFromYamlFile(this.flow.getId());
if (flowProps != null) {
flowProps.setParent(commonFlowProps);
commonFlowProps = flowProps;
}
} else {
if (this.flow.getJobSource() != null) {
final String source = this.flow.getJobSource();
final Props flowProps = this.sharedProps.get(source);
flowProps.setParent(commonFlowProps);
commonFlowProps = flowProps;
}
}
// If there are flow overrides, we apply them now.
final Map<String, String> flowParam =
this.flow.getExecutionOptions().getFlowParameters();
if (flowParam != null && !flowParam.isEmpty()) {
commonFlowProps = new Props(commonFlowProps, flowParam);
}
this.flow.setInputProps(commonFlowProps);
if (this.watcher != null) {
this.watcher.setLogger(this.logger);
}
// Avoid NPE in unit tests when the static app instance is not set
if (AzkabanExecutorServer.getApp() != null) {
this.logger
.info("Assigned executor : " + AzkabanExecutorServer.getApp().getExecutorHostPort());
}
this.logger.info("Running execid:" + this.execId + " flow:" + flowId + " project:"
+ projectId + " version:" + version);
if (this.pipelineExecId != null) {
this.logger.info("Running simulateously with " + this.pipelineExecId
+ ". Pipelining level " + this.pipelineLevel);
}
// The current thread is used for interrupting blocks
this.flowRunnerThread = Thread.currentThread();
this.flowRunnerThread.setName("FlowRunner-exec-" + this.flow.getExecutionId());
}
private void updateFlow() {
updateFlow(System.currentTimeMillis());
}
private synchronized void updateFlow(final long time) {
try {
this.flow.setUpdateTime(time);
this.executorLoader.updateExecutableFlow(this.flow);
} catch (final ExecutorManagerException e) {
this.logger.error("Error updating flow.", e);
}
}
/**
* setup logger and execution dir for the flowId
*/
private void createLogger(final String flowId) {
// Create logger
final String loggerName = this.execId + "." + flowId;
this.logger = Logger.getLogger(loggerName);
// Create file appender
final String logName = "_flow." + loggerName + ".log";
this.logFile = new File(this.execDir, logName);
final String absolutePath = this.logFile.getAbsolutePath();
this.flowAppender = null;
try {
this.flowAppender = new FileAppender(this.loggerLayout, absolutePath, false);
this.logger.addAppender(this.flowAppender);
} catch (final IOException e) {
this.logger.error("Could not open log file in " + this.execDir, e);
}
}
private void closeLogger() {
if (this.logger != null) {
this.logger.removeAppender(this.flowAppender);
this.flowAppender.close();
try {
this.executorLoader.uploadLogFile(this.execId, "", 0, this.logFile);
} catch (final ExecutorManagerException e) {
e.printStackTrace();
}
}
}
private void loadAllProperties() throws IOException {
// First load all the properties
for (final FlowProps fprops : this.flow.getFlowProps()) {
final String source = fprops.getSource();
final File propsPath = new File(this.execDir, source);
final Props props = new Props(null, propsPath);
this.sharedProps.put(source, props);
}
// Resolve parents
for (final FlowProps fprops : this.flow.getFlowProps()) {
if (fprops.getInheritedSource() != null) {
final String source = fprops.getSource();
final String inherit = fprops.getInheritedSource();
final Props props = this.sharedProps.get(source);
final Props inherits = this.sharedProps.get(inherit);
props.setParent(inherits);
}
}
}
/**
* Main method that executes the jobs.
*/
private void runFlow() throws Exception {
this.logger.info("Starting flows");
runReadyJob(this.flow);
updateFlow();
while (!this.flowFinished) {
synchronized (this.mainSyncObj) {
if (this.flowPaused) {
try {
this.mainSyncObj.wait(CHECK_WAIT_MS);
} catch (final InterruptedException e) {
}
continue;
} else {
if (this.retryFailedJobs) {
retryAllFailures();
} else if (!progressGraph()) {
try {
this.mainSyncObj.wait(CHECK_WAIT_MS);
} catch (final InterruptedException e) {
}
}
}
}
}
this.logger.info("Finishing up flow. Awaiting Termination");
this.executorService.shutdown();
updateFlow();
this.logger.info("Finished Flow");
}
private void retryAllFailures() throws IOException {
this.logger.info("Restarting all failed jobs");
this.retryFailedJobs = false;
this.flowKilled = false;
this.flowFailed = false;
this.flow.setStatus(Status.RUNNING);
final ArrayList<ExecutableNode> retryJobs = new ArrayList<>();
resetFailedState(this.flow, retryJobs);
for (final ExecutableNode node : retryJobs) {
if (node.getStatus() == Status.READY
|| node.getStatus() == Status.DISABLED) {
runReadyJob(node);
} else if (node.getStatus() == Status.SUCCEEDED) {
for (final String outNodeId : node.getOutNodes()) {
final ExecutableFlowBase base = node.getParentFlow();
runReadyJob(base.getExecutableNode(outNodeId));
}
}
runReadyJob(node);
}
updateFlow();
}
private boolean progressGraph() throws IOException {
this.finishedNodes.swap();
// The following nodes are finished, so we'll collect a list of outnodes
// that are candidates for running next.
final HashSet<ExecutableNode> nodesToCheck = new HashSet<>();
for (final ExecutableNode node : this.finishedNodes) {
Set<String> outNodeIds = node.getOutNodes();
ExecutableFlowBase parentFlow = node.getParentFlow();
// If a job is seen as failed or killed due to failing SLA, then we set the parent flow to
// FAILED_FINISHING
if (node.getStatus() == Status.FAILED || (node.getStatus() == Status.KILLED && node
.isKilledBySLA())) {
// The job cannot be retried or has run out of retry attempts. We will
// fail the job and its flow now.
if (!retryJobIfPossible(node)) {
setFlowFailed(node);
} else {
nodesToCheck.add(node);
continue;
}
}
if (outNodeIds.isEmpty() && isFlowReadytoFinalize(parentFlow)) {
// Todo jamiesjc: For conditional workflows, if conditionOnJobStatus is ONE_SUCCESS or
// ONE_FAILED, some jobs might still be running when the end nodes have finished. In this
// case, we need to kill all running jobs before finalizing the flow.
finalizeFlow(parentFlow);
finishExecutableNode(parentFlow);
// If the parent has a parent, then we process
if (!(parentFlow instanceof ExecutableFlow)) {
outNodeIds = parentFlow.getOutNodes();
parentFlow = parentFlow.getParentFlow();
}
}
// Add all out nodes from the finished job. We'll check against this set
// to
// see if any are candidates for running.
for (final String nodeId : outNodeIds) {
final ExecutableNode outNode = parentFlow.getExecutableNode(nodeId);
nodesToCheck.add(outNode);
}
}
// Runs candidate jobs. The code will check to see if they are ready to run
// before
// Instant kill or skip if necessary.
boolean jobsRun = false;
for (final ExecutableNode node : nodesToCheck) {
if (notReadyToRun(node.getStatus())) {
// Really shouldn't get in here.
continue;
}
jobsRun |= runReadyJob(node);
}
if (jobsRun || this.finishedNodes.getSize() > 0) {
updateFlow();
return true;
}
return false;
}
private void setFlowFailed(final ExecutableNode node) {
boolean shouldFail = true;
// As long as there is no outNodes or at least one outNode has conditionOnJobStatus of
// ALL_SUCCESS, we should set the flow to failed. Otherwise, it could still statisfy the
// condition of conditional workflows, so don't set the flow to failed.
for (final String outNodeId : node.getOutNodes()) {
if (node.getParentFlow().getExecutableNode(outNodeId).getConditionOnJobStatus()
.equals(ConditionOnJobStatus.ALL_SUCCESS)) {
shouldFail = true;
break;
} else {
shouldFail = false;
}
}
if (shouldFail) {
propagateStatusAndAlert(node.getParentFlow(),
node.getStatus() == Status.KILLED ? Status.KILLED : Status.FAILED_FINISHING);
if (this.failureAction == FailureAction.CANCEL_ALL) {
this.kill();
}
this.flowFailed = true;
}
}
private boolean notReadyToRun(final Status status) {
return Status.isStatusFinished(status)
|| Status.isStatusRunning(status)
|| Status.KILLING == status;
}
private boolean runReadyJob(final ExecutableNode node) throws IOException {
if (Status.isStatusFinished(node.getStatus())
|| Status.isStatusRunning(node.getStatus())) {
return false;
}
final Status nextNodeStatus = getImpliedStatus(node);
if (nextNodeStatus == null) {
return false;
}
if (nextNodeStatus == Status.CANCELLED) {
// if node is root flow
if (node instanceof ExecutableFlow && node.getParentFlow() == null) {
this.logger.info(String.format("Flow '%s' was cancelled before execution had started.",
node.getId()));
finalizeFlow((ExecutableFlow) node);
} else {
this.logger.info(String.format("Cancelling '%s' due to prior errors.", node.getNestedId()));
node.cancelNode(System.currentTimeMillis());
finishExecutableNode(node);
}
} else if (nextNodeStatus == Status.SKIPPED) {
this.logger.info("Skipping disabled job '" + node.getId() + "'.");
node.skipNode(System.currentTimeMillis());
finishExecutableNode(node);
} else if (nextNodeStatus == Status.READY) {
if (node instanceof ExecutableFlowBase) {
final ExecutableFlowBase flow = ((ExecutableFlowBase) node);
this.logger.info("Running flow '" + flow.getNestedId() + "'.");
flow.setStatus(Status.RUNNING);
// don't overwrite start time of root flows
if (flow.getStartTime() <= 0) {
flow.setStartTime(System.currentTimeMillis());
}
prepareJobProperties(flow);
for (final String startNodeId : ((ExecutableFlowBase) node).getStartNodes()) {
final ExecutableNode startNode = flow.getExecutableNode(startNodeId);
runReadyJob(startNode);
}
} else {
runExecutableNode(node);
}
}
return true;
}
private boolean retryJobIfPossible(final ExecutableNode node) {
if (node instanceof ExecutableFlowBase) {
return false;
}
if (node.getRetries() > node.getAttempt()) {
this.logger.info("Job '" + node.getId() + "' will be retried. Attempt "
+ node.getAttempt() + " of " + node.getRetries());
node.setDelayedExecution(node.getRetryBackoff());
node.resetForRetry();
return true;
} else {
if (node.getRetries() > 0) {
this.logger.info("Job '" + node.getId() + "' has run out of retry attempts");
// Setting delayed execution to 0 in case this is manually re-tried.
node.setDelayedExecution(0);
}
return false;
}
}
/**
* Recursively propagate status to parent flow. Alert on first error of the flow in new AZ
* dispatching design.
*
* @param base the base flow
* @param status the status to be propagated
*/
private void propagateStatusAndAlert(final ExecutableFlowBase base, final Status status) {
if (!Status.isStatusFinished(base.getStatus()) && base.getStatus() != Status.KILLING) {
this.logger.info("Setting " + base.getNestedId() + " to " + status);
boolean shouldAlert = false;
if (base.getStatus() != status) {
base.setStatus(status);
shouldAlert = true;
}
if (base.getParentFlow() != null) {
propagateStatusAndAlert(base.getParentFlow(), status);
} else if (this.azkabanProps.getBoolean(ConfigurationKeys.AZKABAN_POLL_MODEL, false)) {
// Alert on the root flow if the first error is encountered.
// Todo jamiesjc: Add a new FLOW_STATUS_CHANGED event type and alert on that event.
if (shouldAlert && base.getStatus() == Status.FAILED_FINISHING) {
ExecutionControllerUtils.alertUserOnFirstError((ExecutableFlow) base, this.alerterHolder);
}
}
}
}
private void finishExecutableNode(final ExecutableNode node) {
this.finishedNodes.add(node);
final EventData eventData = new EventData(node.getStatus(), node.getNestedId());
fireEventListeners(Event.create(this, EventType.JOB_FINISHED, eventData));
}
private boolean isFlowReadytoFinalize(final ExecutableFlowBase flow) {
// Only when all the end nodes are finished, the flow is ready to finalize.
for (final String end : flow.getEndNodes()) {
if (!Status.isStatusFinished(flow.getExecutableNode(end).getStatus())) {
return false;
}
}
return true;
}
private void finalizeFlow(final ExecutableFlowBase flow) {
final String id = flow == this.flow ? flow.getNestedId() : "";
// If it's not the starting flow, we'll create set of output props
// for the finished flow.
boolean succeeded = true;
Props previousOutput = null;
for (final String end : flow.getEndNodes()) {
final ExecutableNode node = flow.getExecutableNode(end);
if (node.getStatus() == Status.KILLED
|| node.getStatus() == Status.KILLING
|| node.getStatus() == Status.FAILED
|| node.getStatus() == Status.CANCELLED) {
succeeded = false;
}
Props output = node.getOutputProps();
if (output != null) {
output = Props.clone(output);
output.setParent(previousOutput);
previousOutput = output;
}
}
flow.setOutputProps(previousOutput);
if (!succeeded && (flow.getStatus() == Status.RUNNING)) {
flow.setStatus(Status.KILLED);
}
flow.setEndTime(System.currentTimeMillis());
flow.setUpdateTime(System.currentTimeMillis());
final long durationSec = (flow.getEndTime() - flow.getStartTime()) / 1000;
switch (flow.getStatus()) {
case FAILED_FINISHING:
this.logger.info("Setting flow '" + id + "' status to FAILED in "
+ durationSec + " seconds");
flow.setStatus(Status.FAILED);
break;
case KILLING:
this.logger
.info("Setting flow '" + id + "' status to KILLED in " + durationSec + " seconds");
flow.setStatus(Status.KILLED);
this.execMetrics.decrementFlowKillingCount();
break;
case FAILED:
case KILLED:
case CANCELLED:
case FAILED_SUCCEEDED:
this.logger.info("Flow '" + id + "' is set to " + flow.getStatus().toString()
+ " in " + durationSec + " seconds");
break;
default:
flow.setStatus(Status.SUCCEEDED);
this.logger.info("Flow '" + id + "' is set to " + flow.getStatus().toString()
+ " in " + durationSec + " seconds");
}
// If the finalized flow is actually the top level flow, than we finish
// the main loop.
if (flow instanceof ExecutableFlow) {
this.flowFinished = true;
}
}
private void prepareJobProperties(final ExecutableNode node) throws IOException {
if (node instanceof ExecutableFlow) {
return;
}
Props props = null;
if (!FlowLoaderUtils.isAzkabanFlowVersion20(this.flow.getAzkabanFlowVersion())) {
// 1. Shared properties (i.e. *.properties) for the jobs only. This takes
// the
// least precedence
if (!(node instanceof ExecutableFlowBase)) {
final String sharedProps = node.getPropsSource();
if (sharedProps != null) {
props = this.sharedProps.get(sharedProps);
}
}
}
// The following is the hiearchical ordering of dependency resolution
// 2. Parent Flow Properties
final ExecutableFlowBase parentFlow = node.getParentFlow();
if (parentFlow != null) {
final Props flowProps = Props.clone(parentFlow.getInputProps());
flowProps.setEarliestAncestor(props);
props = flowProps;
}
// 3. Output Properties. The call creates a clone, so we can overwrite it.
final Props outputProps = collectOutputProps(node);
if (outputProps != null) {
outputProps.setEarliestAncestor(props);
props = outputProps;
}
// 4. The job source.
final Props jobSource = loadJobProps(node);
if (jobSource != null) {
jobSource.setParent(props);
props = jobSource;
}
node.setInputProps(props);
}
/**
* @param props This method is to put in any job properties customization before feeding to the
* job.
*/
private void customizeJobProperties(final Props props) {
final boolean memoryCheck = this.flow.getExecutionOptions().getMemoryCheck();
props.put(ProcessJob.AZKABAN_MEMORY_CHECK, Boolean.toString(memoryCheck));
}
private Props loadJobProps(final ExecutableNode node) throws IOException {
Props props = null;
if (FlowLoaderUtils.isAzkabanFlowVersion20(this.flow.getAzkabanFlowVersion())) {
final String jobPath =
node.getParentFlow().getFlowId() + Constants.PATH_DELIMITER + node.getId();
props = loadPropsFromYamlFile(jobPath);
if (props == null) {
this.logger.info("Job props loaded from yaml file is empty for job " + node.getId());
return props;
}
} else {
final String source = node.getJobSource();
if (source == null) {
return null;
}
// load the override props if any
try {
props =
this.projectLoader.fetchProjectProperty(this.flow.getProjectId(),
this.flow.getVersion(), node.getId() + Constants.JOB_OVERRIDE_SUFFIX);
} catch (final ProjectManagerException e) {
e.printStackTrace();
this.logger.error("Error loading job override property for job "
+ node.getId());
}
final File path = new File(this.execDir, source);
if (props == null) {
// if no override prop, load the original one on disk
try {
props = new Props(null, path);
} catch (final IOException e) {
e.printStackTrace();
this.logger.error("Error loading job file " + source + " for job "
+ node.getId());
}
}
// setting this fake source as this will be used to determine the location
// of log files.
if (path.getPath() != null) {
props.setSource(path.getPath());
}
}
customizeJobProperties(props);
return props;
}
private Props loadPropsFromYamlFile(final String path) {
File tempDir = null;
Props props = null;
try {
tempDir = Files.createTempDir();
props = FlowLoaderUtils.getPropsFromYamlFile(path, getFlowFile(tempDir));
} catch (final Exception e) {
this.logger.error("Failed to get props from flow file. " + e);
} finally {
if (tempDir != null && tempDir.exists()) {
try {
FileUtils.deleteDirectory(tempDir);
} catch (final IOException e) {
this.logger.error("Failed to delete temp directory." + e);
tempDir.deleteOnExit();
}
}
}
return props;
}
private File getFlowFile(final File tempDir) throws Exception {
final List<FlowProps> flowPropsList = ImmutableList.copyOf(this.flow.getFlowProps());
// There should be exact one source (file name) for each flow file.
if (flowPropsList.isEmpty() || flowPropsList.get(0) == null) {
throw new ProjectManagerException(
"Failed to get flow file source. Flow props is empty for " + this.flow.getId());
}
final String source = flowPropsList.get(0).getSource();
final int flowVersion = this.projectLoader
.getLatestFlowVersion(this.flow.getProjectId(), this.flow.getVersion(), source);
final File flowFile = this.projectLoader
.getUploadedFlowFile(this.flow.getProjectId(), this.flow.getVersion(), source,
flowVersion, tempDir);
return flowFile;
}
@SuppressWarnings("FutureReturnValueIgnored")
private void runExecutableNode(final ExecutableNode node) throws IOException {
// Collect output props from the job's dependencies.
prepareJobProperties(node);
node.setStatus(Status.QUEUED);
// Attach Ramp Props if there is any desired properties
String jobId = node.getId();
String jobType = Optional.ofNullable(node.getInputProps()).map(props -> props.getString("type")).orElse(null);
if (jobType != null && jobId != null) {
Props rampProps = this.flow.getRampPropsForJob(jobId, jobType);
if (rampProps != null) {
this.flowIsRamping = true;
logger.info(String.format(
"RAMP_FLOW_ATTACH_PROPS_FOR_JOB : (flow.ExecId = %d, flow.Id = %s, flow.flowName = %s, job.id = %s, job.type = %s, props = %s)",
this.flow.getExecutionId(), this.flow.getId(), this.flow.getFlowName(), jobId, jobType, rampProps.toString()));
node.setRampProps(rampProps);
}
} else {
logger.warn(String.format(
"RAMP_FLOW_ATTACH_PROPS_FOR_JOB : (flow.ExecId = %d, flow.Id = %s, flow.flowName = %s) does not have Job Type or Id",
this.flow.getExecutionId(), this.flow.getId(), this.flow.getFlowName()));
}
final JobRunner runner = createJobRunner(node);
this.logger.info("Submitting job '" + node.getNestedId() + "' to run.");
try {
this.executorService.submit(runner);
this.activeJobRunners.add(runner);
} catch (final RejectedExecutionException e) {
this.logger.error(e);
}
}
/**
* Determines what the state of the next node should be. Returns null if the node should not be
* run.
*/
public Status getImpliedStatus(final ExecutableNode node) {
// If it's running or finished with 'SUCCEEDED', than don't even
// bother starting this job.
if (Status.isStatusRunning(node.getStatus())
|| node.getStatus() == Status.SUCCEEDED) {
return null;
}
// Go through the node's dependencies. If all of the previous job's
// statuses is finished and not FAILED or KILLED, than we can safely
// run this job.
Status status = Status.READY;
// Check if condition on job status is satisfied
switch (checkConditionOnJobStatus(node)) {
case FAILED:
this.logger.info("Condition on job status: " + node.getConditionOnJobStatus() + " is "
+ "evaluated to false for " + node.getId());
status = Status.CANCELLED;
break;
// Condition not satisfied yet, need to wait
case PENDING:
return null;
default:
break;
}
if (status != Status.CANCELLED && !isConditionOnRuntimeVariableMet(node)) {
status = Status.CANCELLED;
}
// If it's disabled but ready to run, we want to make sure it continues
// being disabled.
if (node.getStatus() == Status.DISABLED
|| node.getStatus() == Status.SKIPPED) {
return Status.SKIPPED;
}
// If the flow has failed, and we want to finish only the currently running
// jobs, we just
// kill everything else. We also kill, if the flow has been cancelled.
if (this.flowFailed
&& this.failureAction == ExecutionOptions.FailureAction.FINISH_CURRENTLY_RUNNING) {
return Status.CANCELLED;
} else if (isKilled()) {
return Status.CANCELLED;
}
return status;
}
private Boolean isConditionOnRuntimeVariableMet(final ExecutableNode node) {
final String condition = node.getCondition();
if (condition == null) {
return true;
}
String replaced = condition;
// Replace the condition on job status macro with "true" to skip the evaluation by Script
// Engine since it has already been evaluated.
final Matcher jobStatusMatcher = CONDITION_ON_JOB_STATUS_PATTERN.matcher
(condition);
if (jobStatusMatcher.find()) {
replaced = condition.replace(jobStatusMatcher.group(1), "true");
}
final Matcher variableMatcher = CONDITION_VARIABLE_REPLACEMENT_PATTERN.matcher(replaced);
while (variableMatcher.find()) {
final String value = findValueForJobVariable(node, variableMatcher.group(1),
variableMatcher.group(2));
if (value != null) {
replaced = replaced.replace(variableMatcher.group(), "'" + value + "'");
}
this.logger.info("Resolved condition of " + node.getId() + " is " + replaced);
}
// Evaluate string expression using script engine
return evaluateExpression(replaced);
}
private String findValueForJobVariable(final ExecutableNode node, final String jobName, final
String variable) {
// Get job output props
final ExecutableNode target = node.getParentFlow().getExecutableNode(jobName);
if (target == null) {
this.logger.error("Not able to load props from output props file, job name " + jobName
+ " might be invalid.");
return null;
}
final Props outputProps = target.getOutputProps();
if (outputProps != null && outputProps.containsKey(variable)) {
return outputProps.get(variable);
}
return null;
}
private boolean evaluateExpression(final String expression) {
boolean result = false;
final ScriptEngineManager sem = new ScriptEngineManager();
final ScriptEngine se = sem.getEngineByName("JavaScript");
// Restrict permission using the two-argument form of doPrivileged()
try {
final Object object = AccessController.doPrivileged(
new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws ScriptException {
return se.eval(expression);
}
},
new AccessControlContext(
new ProtectionDomain[]{new ProtectionDomain(null, null)}) // no permissions
);
if (object != null) {
result = (boolean) object;
}
} catch (final Exception e) {
this.logger.error("Failed to evaluate the condition.", e);
}
this.logger.info("Condition is evaluated to " + result);
return result;
}
private Props collectOutputProps(final ExecutableNode node) {
Props previousOutput = null;
// Iterate the in nodes again and create the dependencies
for (final String dependency : node.getInNodes()) {
Props output =
node.getParentFlow().getExecutableNode(dependency).getOutputProps();
if (output != null) {
output = Props.clone(output);
output.setParent(previousOutput);
previousOutput = output;
}
}
return previousOutput;
}
private JobRunner createJobRunner(final ExecutableNode node) {
// Load job file.
final File path = new File(this.execDir, node.getJobSource());
final JobRunner jobRunner =
new JobRunner(node, path.getParentFile(), this.executorLoader,
this.jobtypeManager, this.azkabanProps);
if (this.watcher != null) {
jobRunner.setPipeline(this.watcher, this.pipelineLevel);
}
if (this.validateUserProxy) {
jobRunner.setValidatedProxyUsers(this.proxyUsers);
}
jobRunner.setDelayStart(node.getDelayedExecution());
jobRunner.setLogSettings(this.logger, this.jobLogFileSize, this.jobLogNumFiles);
jobRunner.addListener(this.listener);
if (JobCallbackManager.isInitialized()) {
jobRunner.addListener(JobCallbackManager.getInstance());
}
configureJobLevelMetrics(jobRunner);
return jobRunner;
}
/**
* Configure Azkaban metrics tracking for a new jobRunner instance
*/
private void configureJobLevelMetrics(final JobRunner jobRunner) {
this.logger.info("Configuring Azkaban metrics tracking for jobrunner object");
if (MetricReportManager.isAvailable()) {
final MetricReportManager metricManager = MetricReportManager.getInstance();
// Adding NumRunningJobMetric listener
jobRunner.addListener((NumRunningJobMetric) metricManager
.getMetricFromName(NumRunningJobMetric.NUM_RUNNING_JOB_METRIC_NAME));
// Adding NumFailedJobMetric listener
jobRunner.addListener((NumFailedJobMetric) metricManager
.getMetricFromName(NumFailedJobMetric.NUM_FAILED_JOB_METRIC_NAME));
}
jobRunner.addListener(JmxJobMBeanManager.getInstance());
}
public void pause(final String user) throws IllegalStateException {
synchronized (this.mainSyncObj) {
this.logger.info("Execution pause requested by " + user);
if (!this.isKilled() && !this.flowFinished) {
this.flowPaused = true;
this.flow.setStatus(Status.PAUSED);
updateFlow();
this.logger.info("Execution " + this.execId + " has been paused.");
} else {
final String errorMessage = "Execution " + this.execId + " with status " +
this.flow.getStatus() + " cannot be paused.";
this.logger.warn(errorMessage);
throw new IllegalStateException(errorMessage);
}
}
interrupt();
}
public void resume(final String user) {
synchronized (this.mainSyncObj) {
if (!this.flowPaused) {
this.logger.info("Cannot resume flow that isn't paused");
} else {
this.logger.info("Flow resumed by " + user);
this.flowPaused = false;
if (this.flowFailed) {
this.flow.setStatus(Status.FAILED_FINISHING);
} else if (isKilled()) {
this.flow.setStatus(Status.KILLING);
this.execMetrics.incrementFlowKillingCount();
} else {
this.flow.setStatus(Status.RUNNING);
}
updateFlow();
}
}
interrupt();
}
public void kill(final String user) {
this.logger.info("Flow killed by " + user);
kill();
}
public void kill() {
synchronized (this.mainSyncObj) {
if (isKilled()) {
return;
}
this.logger.info("Kill has been called on execution " + this.execId);
this.flow.setStatus(Status.KILLING);
this.execMetrics.incrementFlowKillingCount();
this.flowKillTime = System.currentTimeMillis();
// If the flow is paused, then we'll also unpause
this.flowPaused = false;
this.flowKilled = true;
if (this.watcher != null) {
this.logger.info("Watcher is attached. Stopping watcher.");
this.watcher.stopWatcher();
this.logger
.info("Watcher cancelled status is " + this.watcher.isWatchCancelled());
}
this.logger.info("Killing " + this.activeJobRunners.size() + " jobs.");
for (final JobRunner runner : this.activeJobRunners) {
runner.kill();
}
updateFlow();
}
interrupt();
}
public void retryFailures(final String user) {
synchronized (this.mainSyncObj) {
this.logger.info("Retrying failures invoked by " + user);
this.retryFailedJobs = true;
interrupt();
}
}
private void resetFailedState(final ExecutableFlowBase flow,
final List<ExecutableNode> nodesToRetry) {
// bottom up
final LinkedList<ExecutableNode> queue = new LinkedList<>();
for (final String id : flow.getEndNodes()) {
final ExecutableNode node = flow.getExecutableNode(id);
queue.add(node);
}
long maxStartTime = -1;
while (!queue.isEmpty()) {
final ExecutableNode node = queue.poll();
final Status oldStatus = node.getStatus();
maxStartTime = Math.max(node.getStartTime(), maxStartTime);
final long currentTime = System.currentTimeMillis();
if (node.getStatus() == Status.SUCCEEDED) {
// This is a candidate parent for restart
nodesToRetry.add(node);
continue;
} else if (node.getStatus() == Status.RUNNING) {
continue;
} else if (node.getStatus() == Status.KILLING) {
continue;
} else if (node.getStatus() == Status.SKIPPED) {
node.setStatus(Status.DISABLED);
node.setEndTime(-1);
node.setStartTime(-1);
node.setUpdateTime(currentTime);
} else if (node instanceof ExecutableFlowBase) {
final ExecutableFlowBase base = (ExecutableFlowBase) node;
switch (base.getStatus()) {
case CANCELLED:
node.setStatus(Status.READY);
node.setEndTime(-1);
node.setStartTime(-1);
node.setUpdateTime(currentTime);
// Break out of the switch. We'll reset the flow just like a normal
// node
break;
case KILLED:
case FAILED:
case FAILED_FINISHING:
resetFailedState(base, nodesToRetry);
continue;
default:
// Continue the while loop. If the job is in a finished state that's
// not
// a failure, we don't want to reset the job.
continue;
}
} else if (node.getStatus() == Status.CANCELLED) {
// Not a flow, but killed
node.setStatus(Status.READY);
node.setStartTime(-1);
node.setEndTime(-1);
node.setUpdateTime(currentTime);
} else if (node.getStatus() == Status.FAILED
|| node.getStatus() == Status.KILLED) {
node.resetForRetry();
nodesToRetry.add(node);
}
if (!(node instanceof ExecutableFlowBase)
&& node.getStatus() != oldStatus) {
this.logger.info("Resetting job '" + node.getNestedId() + "' from "
+ oldStatus + " to " + node.getStatus());
}
for (final String inId : node.getInNodes()) {
final ExecutableNode nodeUp = flow.getExecutableNode(inId);
queue.add(nodeUp);
}
}
// At this point, the following code will reset the flow
final Status oldFlowState = flow.getStatus();
if (maxStartTime == -1) {
// Nothing has run inside the flow, so we assume the flow hasn't even
// started running yet.
flow.setStatus(Status.READY);
} else {
flow.setStatus(Status.RUNNING);
// Add any READY start nodes. Usually it means the flow started, but the
// start node has not.
for (final String id : flow.getStartNodes()) {
final ExecutableNode node = flow.getExecutableNode(id);
if (node.getStatus() == Status.READY
|| node.getStatus() == Status.DISABLED) {
nodesToRetry.add(node);
}
}
}
flow.setUpdateTime(System.currentTimeMillis());
flow.setEndTime(-1);
this.logger.info("Resetting flow '" + flow.getNestedId() + "' from "
+ oldFlowState + " to " + flow.getStatus());
}
private void interrupt() {
if(this.flowRunnerThread != null) {
this.flowRunnerThread.interrupt();
}
}
public boolean isKilled() {
return this.flowKilled;
}
public boolean isRamping() {
return this.flowIsRamping;
}
public ExecutableFlow getExecutableFlow() {
return this.flow;
}
public File getFlowLogFile() {
return this.logFile;
}
public File getJobLogFile(final String jobId, final int attempt) {
final ExecutableNode node = this.flow.getExecutableNodePath(jobId);
final File path = new File(this.execDir, node.getJobSource());
final String logFileName = JobRunner.createLogFileName(node, attempt);
final File logFile = new File(path.getParentFile(), logFileName);
if (!logFile.exists()) {
return null;
}
return logFile;
}
public File getJobAttachmentFile(final String jobId, final int attempt) {
final ExecutableNode node = this.flow.getExecutableNodePath(jobId);
final File path = new File(this.execDir, node.getJobSource());
final String attachmentFileName =
JobRunner.createAttachmentFileName(node, attempt);
final File attachmentFile = new File(path.getParentFile(), attachmentFileName);
if (!attachmentFile.exists()) {
return null;
}
return attachmentFile;
}
public File getJobMetaDataFile(final String jobId, final int attempt) {
final ExecutableNode node = this.flow.getExecutableNodePath(jobId);
final File path = new File(this.execDir, node.getJobSource());
final String metaDataFileName = JobRunner.createMetaDataFileName(node, attempt);
final File metaDataFile = new File(path.getParentFile(), metaDataFileName);
if (!metaDataFile.exists()) {
return null;
}
return metaDataFile;
}
public boolean isRunnerThreadAlive() {
if (this.flowRunnerThread != null) {
return this.flowRunnerThread.isAlive();
}
return false;
}
public int getExecutionId() {
return this.execId;
}
public Set<JobRunner> getActiveJobRunners() {
return ImmutableSet.copyOf(this.activeJobRunners);
}
public FlowRunnerEventListener getFlowRunnerEventListener() {
return this.flowListener;
}
// Class helps report the flow start and stop events.
@VisibleForTesting
class FlowRunnerEventListener implements EventListener {
public FlowRunnerEventListener() {
}
@VisibleForTesting
synchronized Map<String, String> getFlowMetadata(final FlowRunner flowRunner) {
final ExecutableFlow flow = flowRunner.getExecutableFlow();
final Props props = ServiceProvider.SERVICE_PROVIDER.getInstance(Props.class);
final Map<String, String> metaData = new HashMap<>();
metaData.put("flowName", flow.getId());
// Azkaban executor hostname
metaData.put("azkabanHost", props.getString(AZKABAN_SERVER_HOST_NAME, "unknown"));
// As per web server construct, When AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME is set use that,
// or else use jetty.hostname
metaData.put("azkabanWebserver", props.getString(AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME,
props.getString("jetty.hostname", "localhost")));
metaData.put("projectName", flow.getProjectName());
metaData.put("submitUser", flow.getSubmitUser());
metaData.put("executionId", String.valueOf(flow.getExecutionId()));
metaData.put("startTime", String.valueOf(flow.getStartTime()));
metaData.put("submitTime", String.valueOf(flow.getSubmitTime()));
// project upload info
final ProjectFileHandler handler = flowRunner.projectFileHandler;
metaData.put("projectFileUploadUser", handler.getUploader());
metaData.put("projectFileUploaderIpAddr", handler.getUploaderIpAddr());
metaData.put("projectFileName", handler.getFileName());
metaData.put("projectFileUploadTime", String.valueOf(handler.getUploadTime()));
// Propagate flow properties to Event Reporter
if (FlowLoaderUtils.isAzkabanFlowVersion20(flow.getAzkabanFlowVersion())) {
// In Flow 2.0, flow has designated properties (defined at its own level in Yaml)
FlowRunner.propagateMetadataFromProps(metaData, flow.getInputProps(), "flow", flow.getId(),
FlowRunner.this.logger);
} else {
// In Flow 1.0, flow properties are combination of shared properties in individual files (order not defined,
// .. because it's loaded by fs list order and put in a HashMap).
Props combinedProps = new Props();
for (final Props sharedProp : flowRunner.sharedProps.values()) {
// sharedProp.getFlattened() gets its parent's props too, so we don't have to recurse
combinedProps.putAll(sharedProp.getFlattened());
}
// In Flow 1.0, flow's inputProps contains overrides, so apply that as override to combined shared props
combinedProps = new Props(combinedProps, flow.getInputProps());
FlowRunner.propagateMetadataFromProps(metaData, combinedProps, "flow", flow.getId(),
FlowRunner.this.logger);
}
return metaData;
}
@Override
public synchronized void handleEvent(final Event event) {
if (event.getType() == EventType.FLOW_STARTED) {
final FlowRunner flowRunner = (FlowRunner) event.getRunner();
final ExecutableFlow flow = flowRunner.getExecutableFlow();
FlowRunner.this.logger.info("Flow started: " + flow.getId());
FlowRunner.this.azkabanEventReporter.report(event.getType(), getFlowMetadata(flowRunner));
} else if (event.getType() == EventType.FLOW_FINISHED) {
final FlowRunner flowRunner = (FlowRunner) event.getRunner();
final ExecutableFlow flow = flowRunner.getExecutableFlow();
FlowRunner.this.logger.info("Flow ended: " + flow.getId());
final Map<String, String> flowMetadata = getFlowMetadata(flowRunner);
flowMetadata.put("endTime", String.valueOf(flow.getEndTime()));
flowMetadata.put("flowStatus", flow.getStatus().name());
FlowRunner.this.azkabanEventReporter.report(event.getType(), flowMetadata);
}
}
}
@VisibleForTesting
class JobRunnerEventListener implements EventListener {
public JobRunnerEventListener() {
}
@VisibleForTesting
synchronized Map<String, String> getJobMetadata(final JobRunner jobRunner) {
final ExecutableNode node = jobRunner.getNode();
final Props props = ServiceProvider.SERVICE_PROVIDER.getInstance(Props.class);
final Map<String, String> metaData = new HashMap<>();
metaData.put("jobId", node.getId());
// Flow specific properties
final ExecutableFlow executableFlow = node.getExecutableFlow();
metaData.put("executionID", String.valueOf(executableFlow.getExecutionId()));
metaData.put("flowName", executableFlow.getId());
metaData.put("projectName", executableFlow.getProjectName());
metaData.put("startTime", String.valueOf(node.getStartTime()));
metaData.put("jobType", String.valueOf(node.getType()));
// Azkaban executor hostname
metaData.put("azkabanHost", props.getString(AZKABAN_SERVER_HOST_NAME, "unknown"));
// As per web server construct, When AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME is set use that,
// or else use jetty.hostname
metaData.put("azkabanWebserver", props.getString(AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME,
props.getString("jetty.hostname", "localhost")));
metaData.put("jobProxyUser", jobRunner.getEffectiveUser());
// attempt id
metaData.put("attemptID", String.valueOf(node.getAttempt()));
// Propagate job properties to Event Reporter
FlowRunner.propagateMetadataFromProps(metaData, node.getInputProps(), "job", node.getId(),
FlowRunner.this.logger);
return metaData;
}
@Override
public synchronized void handleEvent(final Event event) {
if (event.getType() == EventType.JOB_STATUS_CHANGED) {
updateFlow();
} else if (event.getType() == EventType.JOB_FINISHED) {
final EventData eventData = event.getData();
final JobRunner jobRunner = (JobRunner) event.getRunner();
final ExecutableNode node = jobRunner.getNode();
reportJobFinishedMetrics(node);
if (FlowRunner.this.azkabanEventReporter != null) {
final Map<String, String> jobMetadata = getJobMetadata(jobRunner);
jobMetadata.put("jobStatus", node.getStatus().name());
jobMetadata.put("endTime", String.valueOf(node.getEndTime()));
FlowRunner.this.azkabanEventReporter.report(event.getType(), jobMetadata);
}
final long seconds = (node.getEndTime() - node.getStartTime()) / 1000;
synchronized (FlowRunner.this.mainSyncObj) {
FlowRunner.this.logger.info("Job " + eventData.getNestedId() + " finished with status "
+ eventData.getStatus() + " in " + seconds + " seconds");
// Cancellation is handled in the main thread, but if the flow is
// paused, the main thread is paused too.
// This unpauses the flow for cancellation.
if (FlowRunner.this.flowPaused && eventData.getStatus() == Status.FAILED
&& FlowRunner.this.failureAction == FailureAction.CANCEL_ALL) {
FlowRunner.this.flowPaused = false;
}
FlowRunner.this.finishedNodes.add(node);
FlowRunner.this.activeJobRunners.remove(jobRunner);
node.getParentFlow().setUpdateTime(System.currentTimeMillis());
interrupt();
fireEventListeners(event);
}
} else if (event.getType() == EventType.JOB_STARTED) {
final EventData eventData = event.getData();
FlowRunner.this.logger.info("Job Started: " + eventData.getNestedId());
if (FlowRunner.this.azkabanEventReporter != null) {
final JobRunner jobRunner = (JobRunner) event.getRunner();
FlowRunner.this.azkabanEventReporter.report(event.getType(), getJobMetadata(jobRunner));
}
// add job level checker
final TriggerManager triggerManager = ServiceProvider.SERVICE_PROVIDER
.getInstance(TriggerManager.class);
triggerManager
.addTrigger(FlowRunner.this.flow.getExecutionId(),
SlaOption.getJobLevelSLAOptions(
FlowRunner.this.flow.getExecutionOptions().getSlaOptions()));
}
}
private void reportJobFinishedMetrics(final ExecutableNode node) {
final Status status = node.getStatus();
switch (status) {
case SUCCEEDED:
FlowRunner.this.execMetrics.markJobSuccess();
break;
case FAILED:
FlowRunner.this.execMetrics.markJobFail();
break;
case KILLED:
FlowRunner.this.execMetrics.markJobKilled();
break;
default:
break;
}
}
}
/***
* Propagate properties (specified in {@code AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE})
* to metadata for event reporting.
* @param metaData Metadata map to update with properties.
* @param inputProps Input properties for flow or job.
* @param nodeType Flow or job.
* @param nodeName Flow or job name.
* @param logger Logger from invoking class for log sanity.
*/
@VisibleForTesting
static void propagateMetadataFromProps(final Map<String, String> metaData, final Props inputProps,
final String nodeType, final String nodeName, final Logger logger) {
// Backward compatibility: Unless user specifies, this will be absent from flows and jobs
// .. if so, do a no-op like before
if (!inputProps.containsKey(AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE)) {
return;
}
if (null == metaData || null == inputProps || null == logger ||
Strings.isNullOrEmpty(nodeType) || Strings.isNullOrEmpty(nodeName)) {
throw new IllegalArgumentException("Input params should not be null or empty.");
}
final String propsToPropagate = inputProps
.getString(AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE);
if (Strings.isNullOrEmpty(propsToPropagate)) {
// Nothing to propagate
logger.info(
String.format("No properties to propagate to metadata for %s: %s", nodeType, nodeName));
return;
} else {
logger.info(String
.format("Propagating: %s to metadata for %s: %s", propsToPropagate, nodeType, nodeName));
}
final List<String> propsToPropagateList = SPLIT_ON_COMMA.splitToList(propsToPropagate);
for (final String propKey : propsToPropagateList) {
if (!inputProps.containsKey(propKey)) {
logger.warn(String.format("%s does not contains: %s property; "
+ "skipping propagation to metadata", nodeName, propKey));
continue;
}
metaData.put(propKey, inputProps.getString(propKey));
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/FlowRunnerManager.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import static java.util.Objects.requireNonNull;
import azkaban.Constants;
import azkaban.Constants.ConfigurationKeys;
import azkaban.ServiceProvider;
import azkaban.event.Event;
import azkaban.event.EventListener;
import azkaban.execapp.event.FlowWatcher;
import azkaban.execapp.event.LocalFlowWatcher;
import azkaban.execapp.event.RemoteFlowWatcher;
import azkaban.execapp.metric.NumFailedFlowMetric;
import azkaban.executor.AlerterHolder;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.Executor;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.Status;
import azkaban.jobtype.JobTypeManager;
import azkaban.jobtype.JobTypeManagerException;
import azkaban.metric.MetricReportManager;
import azkaban.metrics.CommonMetrics;
import azkaban.project.ProjectLoader;
import azkaban.project.ProjectWhitelist;
import azkaban.project.ProjectWhitelist.WhitelistType;
import azkaban.sla.SlaOption;
import azkaban.spi.AzkabanEventReporter;
import azkaban.spi.EventType;
import azkaban.spi.Storage;
import azkaban.storage.ProjectStorageManager;
import azkaban.utils.DependencyTransferManager;
import azkaban.utils.FileIOUtils;
import azkaban.utils.FileIOUtils.JobMetaData;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.JSONUtils;
import azkaban.utils.OsCpuUtil;
import azkaban.utils.Props;
import azkaban.utils.SystemMemoryInfo;
import azkaban.utils.ThinArchiveUtils;
import azkaban.utils.ThreadPoolExecutingListener;
import azkaban.utils.TrackingThreadPool;
import azkaban.utils.UndefinedPropertyException;
import com.codahale.metrics.Timer;
import com.google.common.base.Preconditions;
import java.io.File;
import java.io.IOException;
import java.lang.Thread.State;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Execution manager for the server side execution.
* <p>
* When a flow is submitted to FlowRunnerManager, it is the {@link Status#PREPARING} status. When a
* flow is about to be executed by FlowRunner, its status is updated to {@link Status#RUNNING}
* <p>
* Two main data structures are used in this class to maintain flows.
* <p>
* runningFlows: this is used as a bookkeeping for submitted flows in FlowRunnerManager. It has
* nothing to do with the executor service that is used to execute the flows. This bookkeeping is
* used at the time of canceling or killing a flow. The flows in this data structure is removed in
* the handleEvent method.
* <p>
* submittedFlows: this is used to keep track the execution of the flows, so it has the mapping
* between a Future<?> and an execution id. This would allow us to find out the execution ids of the
* flows that are in the Status.PREPARING status. The entries in this map is removed once the flow
* execution is completed.
*/
@Singleton
public class FlowRunnerManager implements EventListener,
ThreadPoolExecutingListener {
private static final Logger LOGGER = LoggerFactory.getLogger(FlowRunnerManager.class);
private static final String EXECUTOR_USE_BOUNDED_THREADPOOL_QUEUE = "executor.use.bounded.threadpool.queue";
private static final String EXECUTOR_THREADPOOL_WORKQUEUE_SIZE = "executor.threadpool.workqueue.size";
private static final String EXECUTOR_FLOW_THREADS = "executor.flow.threads";
private static final String FLOW_NUM_JOB_THREADS = "flow.num.job.threads";
// recently finished secs to clean up. 1 minute
private static final int RECENTLY_FINISHED_TIME_TO_LIVE = 60 * 1000;
private static final int DEFAULT_NUM_EXECUTING_FLOWS = 30;
private static final int DEFAULT_FLOW_NUM_JOB_TREADS = 10;
// this map is used to store the flows that have been submitted to
// the executor service. Once a flow has been submitted, it is either
// in the queue waiting to be executed or in executing state.
private final Map<Future<?>, Integer> submittedFlows = new ConcurrentHashMap<>();
private final Map<Integer, FlowRunner> runningFlows = new ConcurrentHashMap<>();
// keep track of the number of flow being setup({@link createFlowRunner()})
private final AtomicInteger preparingFlowCount = new AtomicInteger(0);
private final Map<Integer, ExecutableFlow> recentlyFinishedFlows = new ConcurrentHashMap<>();
private final TrackingThreadPool executorService;
private final CleanerThread cleanerThread;
private final ExecutorLoader executorLoader;
private final ProjectLoader projectLoader;
private final JobTypeManager jobtypeManager;
private final FlowPreparer flowPreparer;
private final TriggerManager triggerManager;
private final FlowRampManager flowRampManager;
private final AlerterHolder alerterHolder;
private final AzkabanEventReporter azkabanEventReporter;
private final Props azkabanProps;
private final File executionDirectory;
private final File projectDirectory;
private final Object executionDirDeletionSync = new Object();
private final CommonMetrics commonMetrics;
private final ExecMetrics execMetrics;
private final DependencyTransferManager dependencyTransferManager;
private final Storage storage;
private final int numThreads;
private final int numJobThreadPerFlow;
// We want to limit the log sizes to about 20 megs
private final String jobLogChunkSize;
private final int jobLogNumFiles;
// If true, jobs will validate proxy user against a list of valid proxy users.
private final boolean validateProxyUser;
private PollingService pollingService;
private int threadPoolQueueSize = -1;
private Props globalProps;
private long lastCleanerThreadCheckTime = -1;
// date time of the the last flow submitted.
private long lastFlowSubmittedDate = 0;
// Indicate if the executor is set to active.
private volatile boolean active;
@Inject
public FlowRunnerManager(final Props props,
final ExecutorLoader executorLoader,
final ProjectLoader projectLoader,
final ProjectStorageManager projectStorageManager,
final TriggerManager triggerManager,
final FlowRampManager flowRampManager,
final AlerterHolder alerterHolder,
final CommonMetrics commonMetrics,
final ExecMetrics execMetrics,
final DependencyTransferManager dependencyTransferManager,
final Storage storage,
@Nullable final AzkabanEventReporter azkabanEventReporter) throws IOException {
this.azkabanProps = props;
this.azkabanEventReporter = azkabanEventReporter;
this.executionDirectory = new File(props.getString("azkaban.execution.dir", "executions"));
if (!this.executionDirectory.exists()) {
this.executionDirectory.mkdirs();
setgidPermissionOnExecutionDirectory();
}
this.projectDirectory = new File(props.getString("azkaban.project.dir", "projects"));
if (!this.projectDirectory.exists()) {
this.projectDirectory.mkdirs();
}
// azkaban.temp.dir
this.numThreads = props.getInt(EXECUTOR_FLOW_THREADS, DEFAULT_NUM_EXECUTING_FLOWS);
this.numJobThreadPerFlow = props.getInt(FLOW_NUM_JOB_THREADS, DEFAULT_FLOW_NUM_JOB_TREADS);
this.executorService = createExecutorService(this.numThreads);
this.executorLoader = executorLoader;
this.projectLoader = projectLoader;
this.triggerManager = triggerManager;
this.alerterHolder = alerterHolder;
this.commonMetrics = commonMetrics;
this.execMetrics = execMetrics;
this.dependencyTransferManager = dependencyTransferManager;
this.storage = storage;
this.flowRampManager = flowRampManager;
this.jobLogChunkSize = this.azkabanProps.getString("job.log.chunk.size", "5MB");
this.jobLogNumFiles = this.azkabanProps.getInt("job.log.backup.index", 4);
this.validateProxyUser = this.azkabanProps.getBoolean("proxy.user.lock.down", false);
final String globalPropsPath = props.getString("executor.global.properties", null);
if (globalPropsPath != null) {
this.globalProps = new Props(null, globalPropsPath);
}
// Add dependency root path to globalProps
addStartupDependencyPathToProps(this.globalProps);
this.jobtypeManager =
new JobTypeManager(props.getString(AzkabanExecutorServer.JOBTYPE_PLUGIN_DIR,
Constants.PluginManager.JOBTYPE_DEFAULTDIR), this.globalProps,
getClass().getClassLoader());
ProjectCacheCleaner cleaner = null;
this.LOGGER.info("Configuring Project Cache");
double projectCacheSizePercentage = 0.0;
double projectCacheThrottlePercentage = 0.0;
try {
projectCacheSizePercentage =
props.getDouble(ConfigurationKeys.PROJECT_CACHE_SIZE_PERCENTAGE);
projectCacheThrottlePercentage =
props.getDouble(ConfigurationKeys.PROJECT_CACHE_THROTTLE_PERCENTAGE);
this.LOGGER.info("Configuring Cache Cleaner with {} % as threshold", projectCacheSizePercentage);
cleaner = new ProjectCacheCleaner(this.projectDirectory,
projectCacheSizePercentage,
projectCacheThrottlePercentage);
this.LOGGER.info("ProjectCacheCleaner configured.");
} catch (final UndefinedPropertyException ex) {
if (projectCacheSizePercentage == 0.0) {
this.LOGGER.info("Property {} not set. Project Cache directory will not be auto-cleaned as it gets full",
ConfigurationKeys.PROJECT_CACHE_SIZE_PERCENTAGE);
} else {
// Exception must have been fired because Throttle percentage is not set. Initialize the cleaner
// with the default throttle value
this.LOGGER.info("Property {} not set. Initializing with default value of Throttle Percentage",
ConfigurationKeys.PROJECT_CACHE_THROTTLE_PERCENTAGE);
cleaner = new ProjectCacheCleaner(this.projectDirectory, projectCacheSizePercentage);
}
}
// Create a flow preparer
this.flowPreparer = new FlowPreparer(projectStorageManager, this.dependencyTransferManager,
this.projectDirectory, cleaner, this.execMetrics.getProjectCacheHitRatio(),
this.executionDirectory);
this.execMetrics.addFlowRunnerManagerMetrics(this);
this.cleanerThread = new CleanerThread();
this.cleanerThread.start();
if (this.azkabanProps.getBoolean(ConfigurationKeys.AZKABAN_POLL_MODEL, false)) {
this.LOGGER.info("Starting polling service.");
this.pollingService = new PollingService(this.azkabanProps
.getLong(ConfigurationKeys.AZKABAN_POLLING_INTERVAL_MS,
Constants.DEFAULT_AZKABAN_POLLING_INTERVAL_MS),
new PollingCriteria(this.azkabanProps));
this.pollingService.start();
}
}
/**
* Add the startup dependency path to props if the current storage instance returns a non-null
* dependencyRootPath.
*
* @param props Props to add the startup dependency path to.
*/
private void addStartupDependencyPathToProps(final Props props) {
if (this.storage.getDependencyRootPath() != null) {
props.put(ThinArchiveUtils.DEPENDENCY_STORAGE_ROOT_PATH_PROP,
this.storage.getDependencyRootPath());
}
}
/**
* Setting the gid bit on the execution directory forces all files/directories created within the
* directory to be a part of the group associated with the azkaban process. Then, when users
* create their own files, the azkaban cleanup thread can properly remove them.
* <p>
* Java does not provide a standard library api for setting the gid bit because the gid bit is
* system dependent, so the only way to set this bit is to start a new process and run the shell
* command "chmod g+s " + execution directory name.
* <p>
* Note that this should work on most Linux distributions and MacOS, but will not work on
* Windows.
*/
private void setgidPermissionOnExecutionDirectory() throws IOException {
LOGGER.info("Creating subprocess to run shell command: chmod g+s "
+ this.executionDirectory.toString());
Runtime.getRuntime().exec("chmod g+s " + this.executionDirectory.toString());
}
private TrackingThreadPool createExecutorService(final int nThreads) {
final boolean useNewThreadPool =
this.azkabanProps.getBoolean(EXECUTOR_USE_BOUNDED_THREADPOOL_QUEUE, false);
LOGGER.info("useNewThreadPool: " + useNewThreadPool);
if (useNewThreadPool) {
this.threadPoolQueueSize =
this.azkabanProps.getInt(EXECUTOR_THREADPOOL_WORKQUEUE_SIZE, nThreads);
LOGGER.info("workQueueSize: " + this.threadPoolQueueSize);
// using a bounded queue for the work queue. The default rejection policy
// {@ThreadPoolExecutor.AbortPolicy} is used
final TrackingThreadPool executor =
new TrackingThreadPool(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(this.threadPoolQueueSize), this);
return executor;
} else {
// the old way of using unbounded task queue.
// if the running tasks are taking a long time or stuck, this queue
// will be very very long.
return new TrackingThreadPool(nThreads, nThreads, 0L,
TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), this);
}
}
public void setExecutorActive(final boolean isActive, final String host, final int port)
throws ExecutorManagerException, InterruptedException {
final Executor executor = this.executorLoader.fetchExecutor(host, port);
Preconditions.checkState(executor != null, "Unable to obtain self entry in DB");
if (executor.isActive() != isActive) {
executor.setActive(isActive);
this.executorLoader.updateExecutor(executor);
} else {
LOGGER.info(
"Set active action ignored. Executor is already " + (isActive ? "active" : "inactive"));
}
this.active = isActive;
if (!this.active) {
// When deactivating this executor, this call will wait to return until every thread in {@link
// #createFlowRunner} has finished. When deploying new executor, old running executor will be
// deactivated before new one is activated and only one executor is allowed to
// delete/hard-linking project dirs to avoid race condition described in {@link
// FlowPreparer#setup}. So to make deactivation process block until flow preparation work
// finishes guarantees the old executor won't access {@link FlowPreparer#setup} after
// deactivation.
waitUntilFlowPreparationFinish();
}
}
public void setActiveInternal(final boolean isActive) {
this.active = isActive;
}
/**
* Wait until ongoing flow preparation work finishes.
*/
private void waitUntilFlowPreparationFinish() throws InterruptedException {
final Duration SLEEP_INTERVAL = Duration.ofSeconds(5);
while (this.preparingFlowCount.intValue() != 0) {
LOGGER.info(this.preparingFlowCount + " flow(s) is/are still being setup before complete "
+ "deactivation.");
Thread.sleep(SLEEP_INTERVAL.toMillis());
}
}
public long getLastFlowSubmittedTime() {
// Note: this is not thread safe and may result in providing dirty data.
// we will provide this data as is for now and will revisit if there
// is a string justification for change.
return this.lastFlowSubmittedDate;
}
public Props getGlobalProps() {
return this.globalProps;
}
public void setGlobalProps(final Props globalProps) {
this.globalProps = globalProps;
}
public void submitFlow(final int execId) throws ExecutorManagerException {
if (isAlreadyRunning(execId)) {
return;
}
final FlowRunner runner = createFlowRunner(execId);
// Check again.
if (isAlreadyRunning(execId)) {
return;
}
submitFlowRunner(runner);
}
private boolean isAlreadyRunning(final int execId) throws ExecutorManagerException {
if (this.runningFlows.containsKey(execId)) {
LOGGER.info("Execution " + execId + " is already in running.");
if (!this.submittedFlows.containsValue(execId)) {
// Execution had been added to running flows but not submitted - something's wrong.
// Return a response with error: this is a cue for the dispatcher to retry or finalize the
// execution as failed.
throw new ExecutorManagerException("Execution " + execId +
" is in runningFlows but not in submittedFlows. Most likely submission had failed.");
}
// Already running, everything seems fine. Report as a successful submission.
return true;
}
return false;
}
/**
* return whether this execution has useExecutor defined. useExecutor is for running test
* executions on inactive executor.
*/
private boolean isExecutorSpecified(final ExecutableFlow flow) {
return flow.getExecutionOptions().getFlowParameters()
.containsKey(ExecutionOptions.USE_EXECUTOR);
}
private FlowRunner createFlowRunner(final int execId) throws ExecutorManagerException {
final ExecutableFlow flow = this.executorLoader.fetchExecutableFlow(execId);
if (flow == null) {
throw new ExecutorManagerException("Error loading flow with exec " + execId);
}
// Sets up the project files and execution directory.
this.preparingFlowCount.incrementAndGet();
final Timer.Context flowPrepTimerContext = this.execMetrics.getFlowSetupTimerContext();
try {
if (this.active || isExecutorSpecified(flow)) {
this.flowPreparer.setup(flow);
} else {
// Unset the executor.
this.executorLoader.unsetExecutorIdForExecution(execId);
throw new ExecutorManagerException("executor became inactive before setting up the "
+ "flow " + execId);
}
} finally {
this.preparingFlowCount.decrementAndGet();
flowPrepTimerContext.stop();
}
// Setup flow runner
FlowWatcher watcher = null;
final ExecutionOptions options = flow.getExecutionOptions();
if (options.getPipelineExecutionId() != null) {
final Integer pipelineExecId = options.getPipelineExecutionId();
final FlowRunner runner = this.runningFlows.get(pipelineExecId);
if (runner != null) {
watcher = new LocalFlowWatcher(runner);
} else {
// also ends up here if execute is called with pipelineExecId that's not running any more
// (it could have just finished, for example)
watcher = new RemoteFlowWatcher(pipelineExecId, this.executorLoader);
}
}
int numJobThreads = this.numJobThreadPerFlow;
if (options.getFlowParameters().containsKey(FLOW_NUM_JOB_THREADS)) {
try {
final int numJobs =
Integer.valueOf(options.getFlowParameters().get(
FLOW_NUM_JOB_THREADS));
if (numJobs > 0 && (numJobs <= numJobThreads || ProjectWhitelist
.isProjectWhitelisted(flow.getProjectId(),
WhitelistType.NumJobPerFlow))) {
numJobThreads = numJobs;
}
} catch (final Exception e) {
throw new ExecutorManagerException(
"Failed to set the number of job threads "
+ options.getFlowParameters().get(FLOW_NUM_JOB_THREADS)
+ " for flow " + execId, e);
}
}
//Contact Flow Global Configuration Manager to re-configure Flow Runner if there is any ramp-up configuration
this.flowRampManager
.configure(flow, FileIOUtils.getDirectory(this.projectDirectory, flow.getDirectory()));
final FlowRunner runner =
new FlowRunner(flow, this.executorLoader, this.projectLoader, this.jobtypeManager,
this.azkabanProps, this.azkabanEventReporter, this.alerterHolder, this.commonMetrics,
this.execMetrics);
runner.setFlowWatcher(watcher)
.setJobLogSettings(this.jobLogChunkSize, this.jobLogNumFiles)
.setValidateProxyUser(this.validateProxyUser)
.setNumJobThreads(numJobThreads)
.addListeners(this, this.flowRampManager);
configureFlowLevelMetrics(runner);
return runner;
}
private void submitFlowRunner(final FlowRunner runner) throws ExecutorManagerException {
this.runningFlows.put(runner.getExecutionId(), runner);
try {
// The executorService already has a queue.
// The submit method below actually returns an instance of FutureTask,
// which implements interface RunnableFuture, which extends both
// Runnable and Future interfaces
final Future<?> future = this.executorService.submit(runner);
// keep track of this future
this.submittedFlows.put(future, runner.getExecutionId());
// update the last submitted time.
this.lastFlowSubmittedDate = System.currentTimeMillis();
} catch (final RejectedExecutionException re) {
this.runningFlows.remove(runner.getExecutionId());
final StringBuffer errorMsg = new StringBuffer(
"Azkaban executor can't execute any more flows. ");
if (this.executorService.isShutdown()) {
errorMsg.append("The executor is being shut down.");
}
throw new ExecutorManagerException(errorMsg.toString(), re);
}
}
/**
* Configure Azkaban metrics tracking for a new flowRunner instance
*/
private void configureFlowLevelMetrics(final FlowRunner flowRunner) {
LOGGER.info("Configuring Azkaban metrics tracking for flow runner object");
if (MetricReportManager.isAvailable()) {
final MetricReportManager metricManager = MetricReportManager.getInstance();
// Adding NumFailedFlow Metric listener
flowRunner.addListener((NumFailedFlowMetric) metricManager
.getMetricFromName(NumFailedFlowMetric.NUM_FAILED_FLOW_METRIC_NAME));
}
}
public void cancelJobBySLA(final int execId, final String jobId)
throws ExecutorManagerException {
final FlowRunner flowRunner = this.runningFlows.get(execId);
if (flowRunner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
for (final JobRunner jobRunner : flowRunner.getActiveJobRunners()) {
if (jobRunner.getJobId().equals(jobId)) {
LOGGER.info("Killing job " + jobId + " in execution " + execId + " by SLA");
jobRunner.killBySLA();
break;
}
}
}
public void cancelFlow(final int execId, final String user)
throws ExecutorManagerException {
final FlowRunner flowRunner = this.runningFlows.get(execId);
if (flowRunner == null) {
throw new ExecutorManagerException("Execution " + execId + " is not running.");
}
// account for those unexpected cases where a completed execution remains in the runningFlows
//collection due to, for example, the FLOW_FINISHED event not being emitted/handled.
if(Status.isStatusFinished(flowRunner.getExecutableFlow().getStatus())) {
LOGGER.warn("Found a finished execution in the list of running flows: " + execId);
throw new ExecutorManagerException("Execution " + execId + " is already finished.");
}
flowRunner.kill(user);
}
public void pauseFlow(final int execId, final String user)
throws ExecutorManagerException {
final FlowRunner runner = this.runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
try {
runner.pause(user);
} catch (final IllegalStateException e) {
throw new ExecutorManagerException(e.getMessage());
}
}
public void resumeFlow(final int execId, final String user)
throws ExecutorManagerException {
final FlowRunner runner = this.runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.resume(user);
}
public void retryFailures(final int execId, final String user)
throws ExecutorManagerException {
final FlowRunner flowRunner = this.runningFlows.get(execId);
if (flowRunner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
flowRunner.retryFailures(user);
}
public ExecutableFlow getExecutableFlow(final int execId) {
final FlowRunner runner = this.runningFlows.get(execId);
if (runner == null) {
return this.recentlyFinishedFlows.get(execId);
}
return runner.getExecutableFlow();
}
/**
* delete execution dir pertaining to the given execution id
*/
private void deleteExecutionDir(final int executionId) {
LOGGER.info("Deleting execution directory for " + executionId);
synchronized (this.executionDirDeletionSync) {
LOGGER.info("Starting execution directory deletion for " + executionId);
final Path flowExecutionDir = Paths.get(this.executionDirectory.toPath().toString(),
String.valueOf(executionId));
try {
FileUtils.deleteDirectory(flowExecutionDir.toFile());
} catch (final IOException e) {
LOGGER.warn("Error when deleting directory " + flowExecutionDir.toAbsolutePath() + ".", e);
}
}
}
@Override
public void handleEvent(final Event event) {
if (event.getType() == EventType.FLOW_FINISHED || event.getType() == EventType.FLOW_STARTED) {
final FlowRunner flowRunner = (FlowRunner) event.getRunner();
final ExecutableFlow flow = flowRunner.getExecutableFlow();
if (event.getType() == EventType.FLOW_FINISHED) {
this.recentlyFinishedFlows.put(flow.getExecutionId(), flow);
LOGGER.info("Flow " + flow.getExecutionId()
+ " is finished. Adding it to recently finished flows list.");
this.runningFlows.remove(flow.getExecutionId());
this.deleteExecutionDir(flow.getExecutionId());
} else if (event.getType() == EventType.FLOW_STARTED) {
// add flow level SLA checker
this.triggerManager
.addTrigger(flow.getExecutionId(), SlaOption.getFlowLevelSLAOptions(flow
.getExecutionOptions().getSlaOptions()));
}
}
}
public LogData readFlowLogs(final int execId, final int startByte, final int length)
throws ExecutorManagerException {
final FlowRunner runner = this.runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
final File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (this.executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
final File logFile = runner.getFlowLogFile();
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
throw new ExecutorManagerException("Flow log file doesn't exist.");
}
}
} catch (final IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public LogData readJobLogs(final int execId, final String jobId, final int attempt,
final int startByte, final int length) throws ExecutorManagerException {
final FlowRunner runner = this.runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
final File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (this.executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
final File logFile = runner.getJobLogFile(jobId, attempt);
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
throw new ExecutorManagerException("Job log file doesn't exist.");
}
}
} catch (final IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public List<Object> readJobAttachments(final int execId, final String jobId, final int attempt)
throws ExecutorManagerException {
final FlowRunner runner = this.runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
final File dir = runner.getExecutionDir();
if (dir == null || !dir.exists()) {
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
try {
synchronized (this.executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
final File attachmentFile = runner.getJobAttachmentFile(jobId, attempt);
if (attachmentFile == null || !attachmentFile.exists()) {
return null;
}
final List<Object> jobAttachments =
(ArrayList<Object>) JSONUtils.parseJSONFromFile(attachmentFile);
return jobAttachments;
}
} catch (final IOException e) {
throw new ExecutorManagerException(e);
}
}
public JobMetaData readJobMetaData(final int execId, final String jobId, final int attempt,
final int startByte, final int length) throws ExecutorManagerException {
final FlowRunner runner = this.runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
final File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (this.executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
final File metaDataFile = runner.getJobMetaDataFile(jobId, attempt);
if (metaDataFile != null && metaDataFile.exists()) {
return FileIOUtils.readUtf8MetaDataFile(metaDataFile, startByte,
length);
} else {
throw new ExecutorManagerException("Job log file doesn't exist.");
}
}
} catch (final IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public long getLastCleanerThreadCheckTime() {
return this.lastCleanerThreadCheckTime;
}
public boolean isCleanerThreadActive() {
return this.cleanerThread.isAlive();
}
public State getCleanerThreadState() {
return this.cleanerThread.getState();
}
public boolean isExecutorThreadPoolShutdown() {
return this.executorService.isShutdown();
}
public int getNumQueuedFlows() {
return this.executorService.getQueue().size();
}
public int getNumRunningFlows() {
return this.executorService.getActiveCount();
}
public String getRunningFlowIds() {
// The in progress tasks are actually of type FutureTask
final Set<Runnable> inProgressTasks = this.executorService.getInProgressTasks();
final List<Integer> runningFlowIds =
new ArrayList<>(inProgressTasks.size());
for (final Runnable task : inProgressTasks) {
// add casting here to ensure it matches the expected type in
// submittedFlows
final Integer execId = this.submittedFlows.get((Future<?>) task);
if (execId != null) {
runningFlowIds.add(execId);
} else {
LOGGER.warn("getRunningFlowIds: got null execId for task: " + task);
}
}
Collections.sort(runningFlowIds);
return runningFlowIds.toString();
}
public String getQueuedFlowIds() {
final List<Integer> flowIdList =
new ArrayList<>(this.executorService.getQueue().size());
for (final Runnable task : this.executorService.getQueue()) {
final Integer execId = this.submittedFlows.get(task);
if (execId != null) {
flowIdList.add(execId);
} else {
LOGGER.warn("getQueuedFlowIds: got null execId for queuedTask: " + task);
}
}
Collections.sort(flowIdList);
return flowIdList.toString();
}
public int getMaxNumRunningFlows() {
return this.numThreads;
}
public int getTheadPoolQueueSize() {
return this.threadPoolQueueSize;
}
public void reloadJobTypePlugins() throws JobTypeManagerException {
this.jobtypeManager.loadPlugins();
}
public int getTotalNumExecutedFlows() {
return this.executorService.getTotalTasks();
}
@Override
public void beforeExecute(final Runnable r) {
}
@Override
public void afterExecute(final Runnable r) {
this.submittedFlows.remove(r);
}
/**
* This shuts down the flow runner. The call is blocking and awaits execution of all jobs.
*/
public void shutdown() {
LOGGER.warn("Shutting down FlowRunnerManager...");
if (this.azkabanProps.getBoolean(ConfigurationKeys.AZKABAN_POLL_MODEL, false)) {
this.pollingService.shutdown();
}
this.executorService.shutdown();
boolean result = false;
while (!result) {
LOGGER.info("Awaiting Shutdown. # of executing flows: " + getNumRunningFlows());
try {
result = this.executorService.awaitTermination(1, TimeUnit.MINUTES);
} catch (final InterruptedException e) {
LOGGER.error(e.getMessage());
}
}
flowPreparer.shutdown();
LOGGER.warn("Shutdown FlowRunnerManager complete.");
}
/**
* This attempts shuts down the flow runner immediately (unsafe). This doesn't wait for jobs to
* finish but interrupts all threads.
*/
public void shutdownNow() {
LOGGER.warn("Shutting down FlowRunnerManager now...");
if (this.azkabanProps.getBoolean(ConfigurationKeys.AZKABAN_POLL_MODEL, false)) {
this.pollingService.shutdown();
}
this.executorService.shutdownNow();
this.triggerManager.shutdown();
}
/**
* Deleting old execution directory to free disk space.
*/
public void deleteExecutionDirectory() {
LOGGER.warn("Deleting execution dir: " + this.executionDirectory.getAbsolutePath());
try {
FileUtils.deleteDirectory(this.executionDirectory);
} catch (final IOException e) {
LOGGER.error(e.getMessage());
}
}
private class CleanerThread extends Thread {
// Every 2 mins clean the recently finished list
private static final long RECENTLY_FINISHED_INTERVAL_MS = 2 * 60 * 1000;
// Every 5 mins kill flows running longer than allowed max running time
private static final long LONG_RUNNING_FLOW_KILLING_INTERVAL_MS = 5 * 60 * 1000;
private final long flowMaxRunningTimeInMins = FlowRunnerManager.this.azkabanProps.getInt(
Constants.ConfigurationKeys.AZKABAN_MAX_FLOW_RUNNING_MINS, -1);
private boolean shutdown = false;
private long lastRecentlyFinishedCleanTime = -1;
private long lastLongRunningFlowCleanTime = -1;
public CleanerThread() {
this.setName("FlowRunnerManager-Cleaner-Thread");
setDaemon(true);
}
public void shutdown() {
this.shutdown = true;
this.interrupt();
}
private boolean isFlowRunningLongerThan(final ExecutableFlow flow,
final long flowMaxRunningTimeInMins) {
final Set<Status> nonFinishingStatusAfterFlowStarts = new HashSet<>(
Arrays.asList(Status.RUNNING, Status.QUEUED, Status.PAUSED, Status.FAILED_FINISHING));
return nonFinishingStatusAfterFlowStarts.contains(flow.getStatus()) && flow.getStartTime() > 0
&& TimeUnit.MILLISECONDS.toMinutes(System.currentTimeMillis() - flow.getStartTime())
>= flowMaxRunningTimeInMins;
}
@Override
public void run() {
while (!this.shutdown) {
synchronized (this) {
try {
FlowRunnerManager.this.lastCleanerThreadCheckTime = System.currentTimeMillis();
FlowRunnerManager.LOGGER.info("# of executing flows: " + getNumRunningFlows());
// Cleanup old stuff.
final long currentTime = System.currentTimeMillis();
if (currentTime - RECENTLY_FINISHED_INTERVAL_MS > this.lastRecentlyFinishedCleanTime) {
FlowRunnerManager.LOGGER.info("Cleaning recently finished");
cleanRecentlyFinished();
this.lastRecentlyFinishedCleanTime = currentTime;
}
if (this.flowMaxRunningTimeInMins > 0
&& currentTime - LONG_RUNNING_FLOW_KILLING_INTERVAL_MS
> this.lastLongRunningFlowCleanTime) {
FlowRunnerManager.LOGGER
.info(String.format("Killing long jobs running longer than %s mins",
this.flowMaxRunningTimeInMins));
for (final FlowRunner flowRunner : FlowRunnerManager.this.runningFlows.values()) {
if (isFlowRunningLongerThan(flowRunner.getExecutableFlow(),
this.flowMaxRunningTimeInMins)) {
FlowRunnerManager.LOGGER.info(String
.format("Killing job [id: %s, status: %s]. It has been running for %s mins",
flowRunner.getExecutableFlow().getId(),
flowRunner.getExecutableFlow().getStatus(), TimeUnit.MILLISECONDS
.toMinutes(System.currentTimeMillis() - flowRunner.getExecutableFlow()
.getStartTime())));
flowRunner.kill();
}
}
this.lastLongRunningFlowCleanTime = currentTime;
}
wait(FlowRunnerManager.RECENTLY_FINISHED_TIME_TO_LIVE);
} catch (final InterruptedException e) {
FlowRunnerManager.LOGGER.info("Interrupted. Probably to shut down.", e.getMessage());
} catch (final Throwable t) {
t.printStackTrace();
FlowRunnerManager.LOGGER.warn(
"Uncaught throwable, please look into why it is not caught", t.getMessage());
}
}
}
}
private void cleanRecentlyFinished() {
final long cleanupThreshold =
System.currentTimeMillis() - FlowRunnerManager.RECENTLY_FINISHED_TIME_TO_LIVE;
final ArrayList<Integer> executionToKill = new ArrayList<>();
for (final ExecutableFlow flow : FlowRunnerManager.this.recentlyFinishedFlows.values()) {
if (flow.getEndTime() < cleanupThreshold) {
executionToKill.add(flow.getExecutionId());
}
}
for (final Integer id : executionToKill) {
FlowRunnerManager.LOGGER.info("Cleaning execution " + id
+ " from recently finished flows list.");
FlowRunnerManager.this.recentlyFinishedFlows.remove(id);
}
}
}
/**
* Polls new executions from DB periodically and submits the executions to run on the executor.
*/
@SuppressWarnings("FutureReturnValueIgnored")
private class PollingService {
private final ScheduledExecutorService scheduler;
private final PollingCriteria pollingCriteria;
private final long pollingIntervalMs;
private int executorId = -1;
private int numRetries = 0;
public PollingService(final long pollingIntervalMs, final PollingCriteria pollingCriteria) {
this.pollingIntervalMs = pollingIntervalMs;
this.scheduler = Executors.newSingleThreadScheduledExecutor();
this.pollingCriteria = pollingCriteria;
}
public void start() {
this.scheduler.scheduleAtFixedRate(() -> pollExecution(), 0L, this.pollingIntervalMs,
TimeUnit.MILLISECONDS);
}
private void pollExecution() {
if (this.executorId == -1) {
if (AzkabanExecutorServer.getApp() != null) {
try {
final Executor executor = requireNonNull(FlowRunnerManager.this.executorLoader
.fetchExecutor(AzkabanExecutorServer.getApp().getHost(),
AzkabanExecutorServer.getApp().getPort()), "The executor can not be null");
this.executorId = executor.getId();
} catch (final Exception e) {
FlowRunnerManager.LOGGER.error("Failed to fetch executor ", e);
}
}
} else if (this.pollingCriteria.shouldPoll()) {
try {
final int execId;
if (FlowRunnerManager.this.azkabanProps.getBoolean(ConfigurationKeys.AZKABAN_POLLING_LOCK_ENABLED, false)) {
execId = FlowRunnerManager.this.executorLoader.selectAndUpdateExecutionWithLocking(
this.executorId, FlowRunnerManager.this.active);
} else {
execId = FlowRunnerManager.this.executorLoader.selectAndUpdateExecution(this.executorId,
FlowRunnerManager.this.active);
}
if (execId != -1) {
FlowRunnerManager.LOGGER.info("Submitting flow " + execId);
try {
submitFlow(execId);
FlowRunnerManager.this.commonMetrics.markDispatchSuccess();
this.numRetries = 0;
} catch (final ExecutorManagerException e) {
// If the flow fails to be submitted, then unset its executor id in DB so that other
// executors can pick up this flow and submit again.
FlowRunnerManager.this.executorLoader.unsetExecutorIdForExecution(execId);
throw new ExecutorManagerException(
"Unset executor id " + this.executorId + " for execution " + execId, e);
}
}
} catch (final Exception e) {
FlowRunnerManager.LOGGER.error("Failed to submit flow ", e);
FlowRunnerManager.this.commonMetrics.markDispatchFail();
this.numRetries = this.numRetries + 1;
try {
// Implement exponential backoff retries when flow submission fails,
// i.e., sleep 1s, 2s, 4s, 8s ... before next retries.
Thread.sleep((long) (Math.pow(2, this.numRetries) * 1000));
} catch (final InterruptedException ie) {
FlowRunnerManager.LOGGER
.warn("Sleep after flow submission failure was interrupted - ignoring");
}
}
}
}
public void shutdown() {
this.scheduler.shutdown();
this.scheduler.shutdownNow();
}
}
private class PollingCriteria {
private final Props azkabanProps;
private final SystemMemoryInfo memInfo = ServiceProvider.SERVICE_PROVIDER
.getInstance(SystemMemoryInfo.class);
private final OsCpuUtil cpuUtil = ServiceProvider.SERVICE_PROVIDER.getInstance(OsCpuUtil.class);
private boolean areFlowThreadsAvailable;
private boolean isFreeMemoryAvailable;
private boolean isCpuLoadUnderMax;
public PollingCriteria(final Props azkabanProps) {
this.azkabanProps = azkabanProps;
}
public boolean shouldPoll() {
if (satisfiesFlowThreadsAvailableCriteria() && satisfiesFreeMemoryCriteria()
&& satisfiesCpuUtilizationCriteria()) {
return true;
}
return false;
}
private boolean satisfiesFlowThreadsAvailableCriteria() {
final boolean flowThreadsAvailableConfig = this.azkabanProps.
getBoolean(ConfigurationKeys.AZKABAN_POLLING_CRITERIA_FLOW_THREADS_AVAILABLE, false);
// allow polling if not present or configured with invalid value
if (!flowThreadsAvailableConfig) {
return true;
}
final int remainingFlowThreads = FlowRunnerManager.this.getMaxNumRunningFlows() -
FlowRunnerManager.this.getNumRunningFlows();
final boolean flowThreadsAvailable = remainingFlowThreads > 0;
if (this.areFlowThreadsAvailable != flowThreadsAvailable) {
this.areFlowThreadsAvailable = flowThreadsAvailable;
if (flowThreadsAvailable) {
FlowRunnerManager.LOGGER.info("Polling criteria satisfied: available flow threads (" +
remainingFlowThreads + ").");
} else {
FlowRunnerManager.LOGGER.info("Polling criteria NOT satisfied: available flow threads (" +
remainingFlowThreads + ").");
}
}
return flowThreadsAvailable;
}
private boolean satisfiesFreeMemoryCriteria() {
final int minFreeMemoryConfigGb = this.azkabanProps.
getInt(ConfigurationKeys.AZKABAN_POLLING_CRITERIA_MIN_FREE_MEMORY_GB, 0);
// allow polling if not present or configured with invalid value
if (minFreeMemoryConfigGb <= 0) {
return true;
}
final int minFreeMemoryConfigKb = minFreeMemoryConfigGb * 1024 * 1024;
final boolean haveEnoughMemory =
this.memInfo.isFreePhysicalMemoryAbove(minFreeMemoryConfigKb);
if (this.isFreeMemoryAvailable != haveEnoughMemory) {
this.isFreeMemoryAvailable = haveEnoughMemory;
if (haveEnoughMemory) {
FlowRunnerManager.LOGGER.info("Polling criteria satisfied: available free memory.");
} else {
FlowRunnerManager.LOGGER.info("Polling criteria NOT satisfied: available free memory.");
}
}
return haveEnoughMemory;
}
private boolean satisfiesCpuUtilizationCriteria() {
final double maxCpuUtilizationConfig = this.azkabanProps.
getDouble(ConfigurationKeys.AZKABAN_POLLING_CRITERIA_MAX_CPU_UTILIZATION_PCT, 100);
// allow polling if criteria not present or configured with invalid value
if (maxCpuUtilizationConfig <= 0 || maxCpuUtilizationConfig >= 100) {
return true;
}
final double cpuLoad = this.cpuUtil.getCpuLoad();
if (cpuLoad == -1) {
return true;
}
final boolean cpuLoadWithinParams = cpuLoad < maxCpuUtilizationConfig;
if (this.isCpuLoadUnderMax != cpuLoadWithinParams) {
this.isCpuLoadUnderMax = cpuLoadWithinParams;
if (cpuLoadWithinParams) {
FlowRunnerManager.LOGGER.info("Polling criteria satisfied: Cpu utilization (" +
cpuLoad + "%).");
} else {
FlowRunnerManager.LOGGER.info("Polling criteria NOT satisfied: Cpu utilization (" +
cpuLoad + "%).");
}
}
return cpuLoadWithinParams;
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/JMXHttpServlet.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.Constants;
import azkaban.executor.ConnectorParams;
import azkaban.server.HttpRequestUtils;
import azkaban.utils.JSONUtils;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
public class JMXHttpServlet extends HttpServlet implements ConnectorParams {
private static final long serialVersionUID = -3085603824826446270L;
private static final Logger logger = Logger.getLogger(JMXHttpServlet.class);
private AzkabanExecutorServer server;
@Override
public void init(final ServletConfig config) throws ServletException {
this.server =
(AzkabanExecutorServer) config.getServletContext().getAttribute(
Constants.AZKABAN_SERVLET_CONTEXT_KEY);
}
public boolean hasParam(final HttpServletRequest request, final String param) {
return HttpRequestUtils.hasParam(request, param);
}
public String getParam(final HttpServletRequest request, final String name)
throws ServletException {
return HttpRequestUtils.getParam(request, name);
}
/**
* @deprecated GET available for seamless upgrade. azkaban-web now uses POST.
*/
@Deprecated
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
doPost(req, resp);
}
@Override
protected void doPost(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
final Map<String, Object> ret = new HashMap<>();
if (hasParam(req, JMX_GET_MBEANS)) {
ret.put("mbeans", this.server.getMBeanRegistrationManager().getMBeanNames());
} else if (hasParam(req, JMX_GET_ALL_MBEAN_ATTRIBUTES)) {
if (!hasParam(req, JMX_MBEAN)) {
ret.put("error", "Parameters 'mbean' must be set");
} else {
ret.putAll(
this.server.getMBeanRegistrationManager().getMBeanResult(getParam(req, JMX_MBEAN)));
}
}
JSONUtils.toJSON(ret, resp.getOutputStream(), true);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/JobRunner.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.Constants;
import azkaban.Constants.JobProperties;
import azkaban.event.Event;
import azkaban.event.EventData;
import azkaban.event.EventHandler;
import azkaban.execapp.event.BlockingStatus;
import azkaban.execapp.event.FlowWatcher;
import azkaban.executor.ExecutableFlowBase;
import azkaban.executor.ExecutableNode;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.Status;
import azkaban.flow.CommonJobProperties;
import azkaban.jobExecutor.AbstractProcessJob;
import azkaban.jobExecutor.JavaProcessJob;
import azkaban.jobExecutor.Job;
import azkaban.jobtype.JobTypeManager;
import azkaban.jobtype.JobTypeManagerException;
import azkaban.spi.EventType;
import azkaban.utils.ExternalLinkUtils;
import azkaban.utils.PatternLayoutEscaped;
import azkaban.utils.Props;
import azkaban.utils.StringUtils;
import azkaban.utils.UndefinedPropertyException;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import org.apache.kafka.log4jappender.KafkaLog4jAppender;
import org.apache.log4j.Appender;
import org.apache.log4j.EnhancedPatternLayout;
import org.apache.log4j.FileAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Logger;
import org.apache.log4j.RollingFileAppender;
public class JobRunner extends EventHandler implements Runnable {
public static final String AZKABAN_WEBSERVER_URL = "azkaban.webserver.url";
private static final Logger serverLogger = Logger.getLogger(JobRunner.class);
private static final Object logCreatorLock = new Object();
private static final String DEFAULT_LAYOUT =
"%d{dd-MM-yyyy HH:mm:ss z} %c{1} %p - %m\n";
private final Object syncObject = new Object();
private final JobTypeManager jobtypeManager;
private final ExecutorLoader loader;
private final Props props;
private final Props azkabanProps;
private final ExecutableNode node;
private final File workingDir;
private final Layout loggerLayout;
private final String jobId;
private final Set<String> pipelineJobs = new HashSet<>();
private Logger logger = null;
private Logger flowLogger = null;
private Appender jobAppender = null;
private Optional<Appender> kafkaAppender = Optional.empty();
private File logFile;
private String attachmentFileName;
private Job job;
private int executionId = -1;
// Used by the job to watch and block against another flow
private Integer pipelineLevel = null;
private FlowWatcher watcher = null;
private Set<String> proxyUsers = null;
private String jobLogChunkSize;
private int jobLogBackupIndex;
private long delayStartMs = 0;
private volatile boolean killed = false;
private BlockingStatus currentBlockStatus = null;
public JobRunner(final ExecutableNode node, final File workingDir, final ExecutorLoader loader,
final JobTypeManager jobtypeManager, final Props azkabanProps) {
this.props = node.getInputProps();
this.node = node;
this.workingDir = workingDir;
this.executionId = node.getParentFlow().getExecutionId();
this.jobId = node.getId();
this.loader = loader;
this.jobtypeManager = jobtypeManager;
this.azkabanProps = azkabanProps;
final String jobLogLayout = this.props.getString(
JobProperties.JOB_LOG_LAYOUT, DEFAULT_LAYOUT);
this.loggerLayout = new EnhancedPatternLayout(jobLogLayout);
}
public static String createLogFileName(final ExecutableNode node, final int attempt) {
final int executionId = node.getExecutableFlow().getExecutionId();
String jobId = node.getId();
if (node.getExecutableFlow() != node.getParentFlow()) {
// Posix safe file delimiter
jobId = node.getPrintableId("._.");
}
return attempt > 0 ? "_job." + executionId + "." + attempt + "." + jobId
+ ".log" : "_job." + executionId + "." + jobId + ".log";
}
public static String createLogFileName(final ExecutableNode node) {
return JobRunner.createLogFileName(node, node.getAttempt());
}
public static String createMetaDataFileName(final ExecutableNode node, final int attempt) {
final int executionId = node.getExecutableFlow().getExecutionId();
String jobId = node.getId();
if (node.getExecutableFlow() != node.getParentFlow()) {
// Posix safe file delimiter
jobId = node.getPrintableId("._.");
}
return attempt > 0 ? "_job." + executionId + "." + attempt + "." + jobId
+ ".meta" : "_job." + executionId + "." + jobId + ".meta";
}
public static String createMetaDataFileName(final ExecutableNode node) {
return JobRunner.createMetaDataFileName(node, node.getAttempt());
}
public static String createAttachmentFileName(final ExecutableNode node) {
return JobRunner.createAttachmentFileName(node, node.getAttempt());
}
public static String createAttachmentFileName(final ExecutableNode node, final int attempt) {
final int executionId = node.getExecutableFlow().getExecutionId();
String jobId = node.getId();
if (node.getExecutableFlow() != node.getParentFlow()) {
// Posix safe file delimiter
jobId = node.getPrintableId("._.");
}
return attempt > 0 ? "_job." + executionId + "." + attempt + "." + jobId
+ ".attach" : "_job." + executionId + "." + jobId + ".attach";
}
public void setValidatedProxyUsers(final Set<String> proxyUsers) {
this.proxyUsers = proxyUsers;
}
public void setLogSettings(final Logger flowLogger, final String logFileChuckSize,
final int numLogBackup) {
this.flowLogger = flowLogger;
this.jobLogChunkSize = logFileChuckSize;
this.jobLogBackupIndex = numLogBackup;
}
public Props getProps() {
return this.props;
}
public String getEffectiveUser() {
return this.props.getString(JobProperties.USER_TO_PROXY,
this.getNode().getExecutableFlow().getSubmitUser());
}
public void setPipeline(final FlowWatcher watcher, final int pipelineLevel) {
this.watcher = watcher;
this.pipelineLevel = pipelineLevel;
if (this.pipelineLevel == 1) {
this.pipelineJobs.add(this.node.getNestedId());
} else if (this.pipelineLevel == 2) {
this.pipelineJobs.add(this.node.getNestedId());
final ExecutableFlowBase parentFlow = this.node.getParentFlow();
if (parentFlow.getEndNodes().contains(this.node.getId())) {
if (!parentFlow.getOutNodes().isEmpty()) {
final ExecutableFlowBase grandParentFlow = parentFlow.getParentFlow();
for (final String outNode : parentFlow.getOutNodes()) {
final ExecutableNode nextNode =
grandParentFlow.getExecutableNode(outNode);
// If the next node is a nested flow, then we add the nested
// starting nodes
if (nextNode instanceof ExecutableFlowBase) {
final ExecutableFlowBase nextFlow = (ExecutableFlowBase) nextNode;
findAllStartingNodes(nextFlow, this.pipelineJobs);
} else {
this.pipelineJobs.add(nextNode.getNestedId());
}
}
}
} else {
for (final String outNode : this.node.getOutNodes()) {
final ExecutableNode nextNode = parentFlow.getExecutableNode(outNode);
// If the next node is a nested flow, then we add the nested starting
// nodes
if (nextNode instanceof ExecutableFlowBase) {
final ExecutableFlowBase nextFlow = (ExecutableFlowBase) nextNode;
findAllStartingNodes(nextFlow, this.pipelineJobs);
} else {
this.pipelineJobs.add(nextNode.getNestedId());
}
}
}
}
}
private void findAllStartingNodes(final ExecutableFlowBase flow,
final Set<String> pipelineJobs) {
for (final String startingNode : flow.getStartNodes()) {
final ExecutableNode node = flow.getExecutableNode(startingNode);
if (node instanceof ExecutableFlowBase) {
findAllStartingNodes((ExecutableFlowBase) node, pipelineJobs);
} else {
pipelineJobs.add(node.getNestedId());
}
}
}
/**
* Returns a list of jobs that this JobRunner will wait upon to finish before starting. It is only
* relevant if pipeline is turned on.
*/
public Set<String> getPipelineWatchedJobs() {
return this.pipelineJobs;
}
public long getDelayStart() {
return this.delayStartMs;
}
public void setDelayStart(final long delayMS) {
this.delayStartMs = delayMS;
}
public ExecutableNode getNode() {
return this.node;
}
public String getJobId() {
return this.node.getId();
}
public String getLogFilePath() {
return this.logFile == null ? null : this.logFile.getPath();
}
private void createLogger() {
// Create logger
synchronized (logCreatorLock) {
final String loggerName =
System.currentTimeMillis() + "." + this.executionId + "."
+ this.jobId;
this.logger = Logger.getLogger(loggerName);
try {
attachFileAppender(createFileAppender());
} catch (final IOException e) {
removeAppender(this.jobAppender);
this.flowLogger.error("Could not open log file in " + this.workingDir
+ " for job " + this.jobId, e);
}
if (this.props.getBoolean(Constants.JobProperties.AZKABAN_JOB_LOGGING_KAFKA_ENABLE, false)) {
// Only attempt appender construction if required properties are present
if (this.azkabanProps
.containsKey(Constants.ConfigurationKeys.AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST)
&& this.azkabanProps
.containsKey(Constants.ConfigurationKeys.AZKABAN_SERVER_LOGGING_KAFKA_TOPIC)) {
try {
attachKafkaAppender(createKafkaAppender());
} catch (final Exception e) {
removeAppender(this.kafkaAppender);
this.flowLogger.error("Failed to create Kafka appender for job " + this.jobId, e);
}
} else {
this.flowLogger.info(
"Kafka appender not created as brokerlist or topic not provided by executor server");
}
}
}
final String externalViewer = ExternalLinkUtils
.getExternalLogViewer(this.azkabanProps, this.jobId,
this.props);
if (!externalViewer.isEmpty()) {
this.logger.info("If you want to leverage AZ ELK logging support, you need to follow the "
+ "instructions: http://azkaban.github.io/azkaban/docs/latest/#how-to");
this.logger.info("If you did the above step, see logs at: " + externalViewer);
}
}
private void attachFileAppender(final FileAppender appender) {
// If present, remove the existing file appender
assert (this.jobAppender == null);
this.jobAppender = appender;
this.logger.addAppender(this.jobAppender);
this.logger.setAdditivity(false);
this.flowLogger.info("Attached file appender for job " + this.jobId);
}
private FileAppender createFileAppender() throws IOException {
// Set up log files
final String logName = createLogFileName(this.node);
this.logFile = new File(this.workingDir, logName);
final String absolutePath = this.logFile.getAbsolutePath();
this.flowLogger.info("Log file path for job: " + this.jobId + " is: " + absolutePath);
// Attempt to create FileAppender
final RollingFileAppender fileAppender =
new RollingFileAppender(this.loggerLayout, absolutePath, true);
fileAppender.setMaxBackupIndex(this.jobLogBackupIndex);
fileAppender.setMaxFileSize(this.jobLogChunkSize);
this.flowLogger.info("Created file appender for job " + this.jobId);
return fileAppender;
}
private void createAttachmentFile() {
final String fileName = createAttachmentFileName(this.node);
final File file = new File(this.workingDir, fileName);
this.attachmentFileName = file.getAbsolutePath();
}
private void attachKafkaAppender(final KafkaLog4jAppender appender) {
// This should only be called once
assert (!this.kafkaAppender.isPresent());
this.kafkaAppender = Optional.of(appender);
this.logger.addAppender(this.kafkaAppender.get());
this.logger.setAdditivity(false);
this.flowLogger.info("Attached new Kafka appender for job " + this.jobId);
}
private KafkaLog4jAppender createKafkaAppender() throws UndefinedPropertyException {
final KafkaLog4jAppender kafkaProducer = new KafkaLog4jAppender();
kafkaProducer.setSyncSend(false);
kafkaProducer.setBrokerList(this.azkabanProps
.getString(Constants.ConfigurationKeys.AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST));
kafkaProducer.setTopic(
this.azkabanProps
.getString(Constants.ConfigurationKeys.AZKABAN_SERVER_LOGGING_KAFKA_TOPIC));
final String layoutString = LogUtil.createLogPatternLayoutJsonString(this.props, this.jobId);
kafkaProducer.setLayout(new PatternLayoutEscaped(layoutString));
kafkaProducer.activateOptions();
this.flowLogger.info("Created kafka appender for " + this.jobId);
return kafkaProducer;
}
private void removeAppender(final Optional<Appender> appender) {
if (appender.isPresent()) {
removeAppender(appender.get());
}
}
private void removeAppender(final Appender appender) {
if (appender != null) {
this.logger.removeAppender(appender);
appender.close();
}
}
private void closeLogger() {
if (this.jobAppender != null) {
removeAppender(this.jobAppender);
}
if (this.kafkaAppender.isPresent()) {
removeAppender(this.kafkaAppender);
}
}
private void writeStatus() {
try {
this.node.setUpdateTime(System.currentTimeMillis());
this.loader.updateExecutableNode(this.node);
} catch (final ExecutorManagerException e) {
this.flowLogger.error("Could not update job properties in db for "
+ this.jobId, e);
}
}
/**
* Used to handle non-ready and special status's (i.e. KILLED). Returns true if they handled
* anything.
*/
private boolean handleNonReadyStatus() {
synchronized (this.syncObject) {
Status nodeStatus = this.node.getStatus();
boolean quickFinish = false;
final long time = System.currentTimeMillis();
if (Status.isStatusFinished(nodeStatus)) {
quickFinish = true;
} else if (nodeStatus == Status.DISABLED) {
nodeStatus = changeStatus(Status.SKIPPED, time);
quickFinish = true;
} else if (this.isKilled()) {
nodeStatus = changeStatus(Status.KILLED, time);
quickFinish = true;
}
if (quickFinish) {
this.node.setStartTime(time);
fireEvent(
Event.create(this, EventType.JOB_STARTED,
new EventData(nodeStatus, this.node.getNestedId())));
this.node.setEndTime(time);
fireEvent(
Event
.create(this, EventType.JOB_FINISHED,
new EventData(nodeStatus, this.node.getNestedId())));
return true;
}
return false;
}
}
/**
* If pipelining is set, will block on another flow's jobs.
*/
private boolean blockOnPipeLine() {
if (this.isKilled()) {
return true;
}
// For pipelining of jobs. Will watch other jobs.
if (!this.pipelineJobs.isEmpty()) {
String blockedList = "";
final ArrayList<BlockingStatus> blockingStatus =
new ArrayList<>();
for (final String waitingJobId : this.pipelineJobs) {
final Status status = this.watcher.peekStatus(waitingJobId);
if (status != null && !Status.isStatusFinished(status)) {
final BlockingStatus block = this.watcher.getBlockingStatus(waitingJobId);
blockingStatus.add(block);
blockedList += waitingJobId + ",";
}
}
if (!blockingStatus.isEmpty()) {
this.logger.info("Pipeline job " + this.jobId + " waiting on " + blockedList
+ " in execution " + this.watcher.getExecId());
for (final BlockingStatus bStatus : blockingStatus) {
this.logger.info("Waiting on pipelined job " + bStatus.getJobId());
this.currentBlockStatus = bStatus;
bStatus.blockOnFinishedStatus();
if (this.isKilled()) {
this.logger.info("Job was killed while waiting on pipeline. Quiting.");
return true;
} else {
this.logger.info("Pipelined job " + bStatus.getJobId() + " finished.");
}
}
}
}
this.currentBlockStatus = null;
return false;
}
private boolean delayExecution() {
synchronized (this) {
if (this.isKilled()) {
return true;
}
final long currentTime = System.currentTimeMillis();
if (this.delayStartMs > 0) {
this.logger.info("Delaying start of execution for " + this.delayStartMs
+ " milliseconds.");
try {
this.wait(this.delayStartMs);
this.logger.info("Execution has been delayed for " + this.delayStartMs
+ " ms. Continuing with execution.");
} catch (final InterruptedException e) {
this.logger.error("Job " + this.jobId + " was to be delayed for "
+ this.delayStartMs + ". Interrupted after "
+ (System.currentTimeMillis() - currentTime));
}
if (this.isKilled()) {
this.logger.info("Job was killed while in delay. Quiting.");
return true;
}
}
}
return false;
}
private void finalizeLogFile(final int attemptNo) {
closeLogger();
if (this.logFile == null) {
this.flowLogger.info("Log file for job " + this.jobId + " is null");
return;
}
try {
final File[] files = this.logFile.getParentFile().listFiles(new FilenameFilter() {
@Override
public boolean accept(final File dir, final String name) {
return name.startsWith(JobRunner.this.logFile.getName());
}
});
Arrays.sort(files, Collections.reverseOrder());
this.loader.uploadLogFile(this.executionId, this.node.getNestedId(), attemptNo,
files);
} catch (final ExecutorManagerException e) {
this.flowLogger.error(
"Error writing out logs for job " + this.node.getNestedId(), e);
}
}
private void finalizeAttachmentFile() {
if (this.attachmentFileName == null) {
this.flowLogger.info("Attachment file for job " + this.jobId + " is null");
return;
}
try {
final File file = new File(this.attachmentFileName);
if (!file.exists()) {
this.flowLogger.info("No attachment file for job " + this.jobId
+ " written.");
return;
}
this.loader.uploadAttachmentFile(this.node, file);
} catch (final ExecutorManagerException e) {
this.flowLogger.error(
"Error writing out attachment for job " + this.node.getNestedId(), e);
}
}
/**
* The main run thread.
*/
@Override
public void run() {
try {
doRun();
} catch (final Exception e) {
serverLogger.error("Unexpected exception", e);
throw e;
}
}
private void doRun() {
Thread.currentThread().setName(
"JobRunner-" + this.jobId + "-" + this.executionId);
// If the job is cancelled, disabled, killed. No log is created in this case
if (handleNonReadyStatus()) {
return;
}
createAttachmentFile();
createLogger();
boolean errorFound = false;
// Delay execution if necessary. Will return a true if something went wrong.
errorFound |= delayExecution();
// For pipelining of jobs. Will watch other jobs. Will return true if
// something went wrong.
errorFound |= blockOnPipeLine();
// Start the node.
this.node.setStartTime(System.currentTimeMillis());
Status finalStatus = this.node.getStatus();
uploadExecutableNode();
if (!errorFound && !isKilled()) {
fireEvent(Event.create(this, EventType.JOB_STARTED, new EventData(this.node)));
final Status prepareStatus = prepareJob();
if (prepareStatus != null) {
// Writes status to the db
writeStatus();
fireEvent(Event.create(this, EventType.JOB_STATUS_CHANGED,
new EventData(prepareStatus, this.node.getNestedId())));
finalStatus = runJob();
} else {
finalStatus = changeStatus(Status.FAILED);
logError("Job run failed preparing the job.");
}
}
this.node.setEndTime(System.currentTimeMillis());
if (isKilled()) {
// even if it's killed, there is a chance that the job failed is marked as
// failure,
// So we set it to KILLED to make sure we know that we forced kill it
// rather than
// it being a legitimate failure.
finalStatus = changeStatus(Status.KILLED);
}
logInfo(
"Finishing job " + this.jobId + getNodeRetryLog() + " at " + this.node.getEndTime()
+ " with status " + this.node.getStatus());
try {
finalizeLogFile(this.node.getAttempt());
finalizeAttachmentFile();
writeStatus();
} finally {
try {
// note that FlowRunner thread does node.attempt++ when it receives the JOB_FINISHED event
fireEvent(Event.create(this, EventType.JOB_FINISHED,
new EventData(finalStatus, this.node.getNestedId())), false);
} catch (final RuntimeException e) {
serverLogger.warn("Error in fireEvent for JOB_FINISHED for execId:" + this.executionId
+ " jobId: " + this.jobId);
serverLogger.warn(e.getMessage(), e);
}
}
}
private String getNodeRetryLog() {
return this.node.getAttempt() > 0 ? (" retry: " + this.node.getAttempt()) : "";
}
private void uploadExecutableNode() {
try {
this.loader.uploadExecutableNode(this.node, this.props);
} catch (final ExecutorManagerException e) {
this.logger.error("Error writing initial node properties", e);
}
}
private Status prepareJob() throws RuntimeException {
// Check pre conditions
if (this.props == null || this.isKilled()) {
logError("Failing job. The job properties don't exist");
return null;
}
final Status finalStatus;
synchronized (this.syncObject) {
if (this.node.getStatus() == Status.FAILED || this.isKilled()) {
return null;
}
logInfo("Starting job " + this.jobId + getNodeRetryLog() + " at " + this.node.getStartTime());
// If it's an embedded flow, we'll add the nested flow info to the job
// conf
if (this.node.getExecutableFlow() != this.node.getParentFlow()) {
final String subFlow = this.node.getPrintableId(":");
this.props.put(CommonJobProperties.NESTED_FLOW_PATH, subFlow);
}
insertJobMetadata();
insertJVMAargs();
this.props.put(CommonJobProperties.JOB_ID, this.jobId);
this.props.put(CommonJobProperties.JOB_ATTEMPT, this.node.getAttempt());
this.props.put(CommonJobProperties.JOB_METADATA_FILE,
createMetaDataFileName(this.node));
this.props.put(CommonJobProperties.JOB_ATTACHMENT_FILE, this.attachmentFileName);
this.props.put(CommonJobProperties.JOB_LOG_FILE, this.logFile.getAbsolutePath());
finalStatus = changeStatus(Status.RUNNING);
// Ability to specify working directory
if (this.props.containsKey(AbstractProcessJob.WORKING_DIR)) {
if (!IsSpecifiedWorkingDirectoryValid()) {
logError("Specified " + AbstractProcessJob.WORKING_DIR + " is not valid: " +
this.props.get(AbstractProcessJob.WORKING_DIR) + ". Must be a subdirectory of " +
this.workingDir.getAbsolutePath());
return null;
}
} else {
this.props.put(AbstractProcessJob.WORKING_DIR, this.workingDir.getAbsolutePath());
}
if (this.props.containsKey(JobProperties.USER_TO_PROXY)) {
final String jobProxyUser = this.props.getString(JobProperties.USER_TO_PROXY);
if (this.proxyUsers != null && !this.proxyUsers.contains(jobProxyUser)) {
final String permissionsPageURL = getProjectPermissionsURL();
this.logger.error("User " + jobProxyUser
+ " has no permission to execute this job " + this.jobId + "!"
+ " If you want to execute this flow as " + jobProxyUser
+ ", please add it to Proxy Users under project permissions page: " +
permissionsPageURL);
return null;
}
} else {
final String submitUser = this.getNode().getExecutableFlow().getSubmitUser();
this.props.put(JobProperties.USER_TO_PROXY, submitUser);
this.logger.info("user.to.proxy property was not set, defaulting to submit user " +
submitUser);
}
final Props props = this.node.getRampProps();
if (props != null) {
this.logger.info(String
.format("RAMP_JOB_ATTACH_PROPS : (id = %s, props = %s)", this.node.getId(),
props.toString()));
this.props.putAll(props);
}
try {
this.job = this.jobtypeManager.buildJobExecutor(this.jobId, this.props, this.logger);
} catch (final JobTypeManagerException e) {
this.logger.error("Failed to build job type", e);
return null;
}
}
return finalStatus;
}
/**
* Validates execution directory specified by user.
*/
private boolean IsSpecifiedWorkingDirectoryValid() {
final File usersWorkingDir = new File(this.props.get(AbstractProcessJob.WORKING_DIR));
try {
if (!usersWorkingDir.getCanonicalPath().startsWith(this.workingDir.getCanonicalPath())) {
return false;
}
} catch (final IOException e) {
this.logger.error("Failed to validate user's " + AbstractProcessJob.WORKING_DIR +
" property.", e);
return false;
}
return true;
}
/**
* Get project permissions page URL
*/
private String getProjectPermissionsURL() {
String projectPermissionsURL = null;
final String baseURL = this.azkabanProps.get(AZKABAN_WEBSERVER_URL);
if (baseURL != null) {
final String projectName = this.node.getParentFlow().getProjectName();
projectPermissionsURL = String
.format("%s/manager?project=%s&permissions", baseURL, projectName);
}
return projectPermissionsURL;
}
/**
* Add useful JVM arguments so it is easier to map a running Java process to a flow, execution id
* and job
*/
private void insertJVMAargs() {
final String flowName = this.node.getParentFlow().getFlowId();
final String jobId = this.node.getId();
String jobJVMArgs =
String.format(
"'-Dazkaban.flowid=%s' '-Dazkaban.execid=%s' '-Dazkaban.jobid=%s'",
flowName, this.executionId, jobId);
final String previousJVMArgs = this.props.get(JavaProcessJob.JVM_PARAMS);
jobJVMArgs += (previousJVMArgs == null) ? "" : " " + previousJVMArgs;
this.logger.info("job JVM args: " + jobJVMArgs);
this.props.put(JavaProcessJob.JVM_PARAMS, jobJVMArgs);
}
/**
* Add relevant links to the job properties so that downstream consumers may know what executions
* initiated their execution.
*/
private void insertJobMetadata() {
final String baseURL = this.azkabanProps.get(AZKABAN_WEBSERVER_URL);
if (baseURL != null) {
final String flowName = this.node.getParentFlow().getFlowId();
final String projectName = this.node.getParentFlow().getProjectName();
this.props.put(CommonJobProperties.AZKABAN_URL, baseURL);
this.props.put(CommonJobProperties.EXECUTION_LINK,
String.format("%s/executor?execid=%d", baseURL, this.executionId));
this.props.put(CommonJobProperties.JOBEXEC_LINK, String.format(
"%s/executor?execid=%d&job=%s", baseURL, this.executionId, this.node.getNestedId()));
this.props.put(CommonJobProperties.ATTEMPT_LINK, String.format(
"%s/executor?execid=%d&job=%s&attempt=%d", baseURL, this.executionId,
this.node.getNestedId(), this.node.getAttempt()));
this.props.put(CommonJobProperties.WORKFLOW_LINK, String.format(
"%s/manager?project=%s&flow=%s", baseURL, projectName, flowName));
this.props.put(CommonJobProperties.JOB_LINK, String.format(
"%s/manager?project=%s&flow=%s&job=%s", baseURL, projectName,
flowName, this.jobId));
} else {
if (this.logger != null) {
this.logger.info(AZKABAN_WEBSERVER_URL + " property was not set");
}
}
// out nodes
this.props.put(CommonJobProperties.OUT_NODES,
StringUtils.join2(this.node.getOutNodes(), ","));
// in nodes
this.props.put(CommonJobProperties.IN_NODES,
StringUtils.join2(this.node.getInNodes(), ","));
}
private Status runJob() {
Status finalStatus;
try {
this.job.run();
finalStatus = this.node.getStatus();
} catch (final Throwable e) {
synchronized (this.syncObject) {
if (this.props.getBoolean("job.succeed.on.failure", false)) {
finalStatus = changeStatus(Status.FAILED_SUCCEEDED);
logError("Job run failed, but will treat it like success.");
logError(e.getMessage() + " cause: " + e.getCause(), e);
} else {
if (isKilled() || this.node.getStatus() == Status.KILLED) {
finalStatus = Status.KILLED;
logError("Job run killed!", e);
} else {
finalStatus = changeStatus(Status.FAILED);
logError("Job run failed!", e);
}
logError(e.getMessage() + " cause: " + e.getCause());
}
}
}
if (this.job != null) {
this.node.setOutputProps(this.job.getJobGeneratedProperties());
}
synchronized (this.syncObject) {
// If the job is still running (but not killed), set the status to Success.
if (!Status.isStatusFinished(finalStatus) && !isKilled()) {
finalStatus = changeStatus(Status.SUCCEEDED);
}
}
return finalStatus;
}
private Status changeStatus(final Status status) {
changeStatus(status, System.currentTimeMillis());
return status;
}
private Status changeStatus(final Status status, final long time) {
this.node.setStatus(status);
this.node.setUpdateTime(time);
return status;
}
private void fireEvent(final Event event) {
fireEvent(event, true);
}
private void fireEvent(final Event event, final boolean updateTime) {
if (updateTime) {
this.node.setUpdateTime(System.currentTimeMillis());
}
this.fireEventListeners(event);
}
public void killBySLA() {
synchronized (this.syncObject) {
kill();
this.getNode().setKilledBySLA(true);
}
}
public void kill() {
synchronized (this.syncObject) {
if (Status.isStatusFinished(this.node.getStatus())) {
return;
}
logError("Kill has been called.");
this.changeStatus(Status.KILLING);
this.killed = true;
final BlockingStatus status = this.currentBlockStatus;
if (status != null) {
status.unblock();
}
// Cancel code here
if (this.job == null) {
logError("Job hasn't started yet.");
// Just in case we're waiting on the delay
synchronized (this) {
this.notify();
}
return;
}
try {
this.job.cancel();
} catch (final Exception e) {
logError(e.getMessage());
logError(
"Failed trying to cancel job. Maybe it hasn't started running yet or just finished.");
}
}
}
public boolean isKilled() {
return this.killed;
}
public Status getStatus() {
return this.node.getStatus();
}
private void logError(final String message) {
if (this.logger != null) {
this.logger.error(message);
}
}
private void logError(final String message, final Throwable t) {
if (this.logger != null) {
this.logger.error(message, t);
}
}
private void logInfo(final String message) {
if (this.logger != null) {
this.logger.info(message);
}
}
public File getLogFile() {
return this.logFile;
}
public Logger getLogger() {
return this.logger;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/LogUtil.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.Constants;
import azkaban.utils.Props;
import com.google.gson.Gson;
import java.util.HashMap;
import java.util.Map;
class LogUtil {
static String createLogPatternLayoutJsonString(final Props props, final String jobId) {
final Map<String, String> layout = new HashMap<>();
layout.put("category", "%c{1}");
layout.put("level", "%p");
layout.put("message", "%m");
layout.put("projectname",
props.getString(Constants.FlowProperties.AZKABAN_FLOW_PROJECT_NAME));
layout.put("flowid", props.getString(Constants.FlowProperties.AZKABAN_FLOW_FLOW_ID));
layout.put("jobid", jobId);
layout
.put("submituser", props.getString(Constants.FlowProperties.AZKABAN_FLOW_SUBMIT_USER));
layout.put("execid", props.getString(Constants.FlowProperties.AZKABAN_FLOW_EXEC_ID));
layout.put("projectversion",
props.getString(Constants.FlowProperties.AZKABAN_FLOW_PROJECT_VERSION));
layout.put("logsource", "userJob");
final Gson gson = new Gson();
return gson.toJson(layout);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/ProjectCacheCleaner.java
|
/*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.utils.ExecutorServiceUtils;
import azkaban.utils.FileIOUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.io.File;
import java.io.FilenameFilter;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is responsible for deleting least recently accessed projects in the shared project
* cache when there's no room to accommodate a new project.
*/
class ProjectCacheCleaner {
// Root directory for project cache
private final File projectCacheDir;
// cache size in percentage of disk partition where {@link projectCacheDir} belongs to
private final double percentageOfDisk;
private static final Logger log = LoggerFactory.getLogger(ProjectCacheCleaner.class);
// Number of threads in the cache cleanup service
private static final int CLEANING_SERVICE_THREAD_NUM = 8;
private static final double DEFAULT_THROTTLE_PERCENTAGE = 0.92; // 92%
// If space in Cache partition goes above this Percentage, incoming request must wait
// till the current cache cleanup cycle is done
private double throttlePercentage;
// Currently cached projects
private final Map<Path, ProjectDirectoryMetadata> cachedProjects = new HashMap<>();
// A record of projects under deletion
private final ConcurrentMap<Path, File> projectsUnderDeletion = new ConcurrentHashMap<>();
// Executor service responsible for cache cleanup
private final ExecutorService deletionService;
// This is leveraged as a barrier mechanism to stall an incoming
// request until ongoing cache cleanup cycle is done. This is only necessary if new projects
// get added very rapidly driving cache space above throttlePercentage
private final Lock barrier = new ReentrantLock();
private Condition emptyQCond;
public static final String STATE_AVAILABLE = "CACHE_AVAILABLE";
public static final String STATE_CLEANING = "CACHE_CLEANING";
public ProjectCacheCleaner(final File projectCacheDir, final double percentageOfDisk) {
this(projectCacheDir, percentageOfDisk, DEFAULT_THROTTLE_PERCENTAGE);
}
public ProjectCacheCleaner(final File projectCacheDir, final double percentageOfDisk,
final double throttlePercentage) {
Preconditions.checkNotNull(projectCacheDir);
Preconditions.checkArgument(projectCacheDir.exists());
Preconditions.checkArgument(percentageOfDisk > 0 && percentageOfDisk <= 1);
this.projectCacheDir = projectCacheDir;
this.percentageOfDisk = percentageOfDisk;
this.throttlePercentage = throttlePercentage;
log.info("ProjectCacheCleaner constructor called. ProjectCacheDir = {}, thresh-hold = {} %, throttle at {} %",
projectCacheDir.toPath(), this.percentageOfDisk, this.throttlePercentage);
emptyQCond = barrier.newCondition();
deletionService = Executors.newFixedThreadPool(CLEANING_SERVICE_THREAD_NUM);
}
/**
* Get metadata from the OS for the underlying path, lastAccessTime is fetched from the OS
* regardless of whether the given project already exists, but the space calculation for a
* project directory is only performed one-time.
*
* @param project path for the project cache. Project filepath encodes projectID & version
* within the filename
*
* @return OS Metadata for the given path
*/
private ProjectDirectoryMetadata fetchProjectMetadata(final Path project) {
ProjectDirectoryMetadata projectDirectoryMetadata = this.cachedProjects.get(project);
try {
if (projectDirectoryMetadata == null) {
final String fileName = project.getFileName().toString();
final int projectId = Integer.parseInt(fileName.split("\\.")[0]);
final int versionNum = Integer.parseInt(fileName.split("\\.")[1]);
projectDirectoryMetadata = new ProjectDirectoryMetadata(projectId, versionNum, project.toFile());
/*
* Calculate used-space (Equivalent of du command) only if the metadata for
* this project was never fetched before. This optimization is important as
* recursive space calculation is a very expensive operation.
*/
projectDirectoryMetadata.setDirSizeInByte(
FlowPreparer.calculateDirSizeAndSave(projectDirectoryMetadata.getInstalledDir()));
}
projectDirectoryMetadata.setLastAccessTime(
Files.getLastModifiedTime(Paths.get(projectDirectoryMetadata.getInstalledDir().toString(),
FlowPreparer.PROJECT_DIR_SIZE_FILE_NAME)));
} catch (final Exception e) {
log.warn("Error while loading project dir metadata for project {}",
project.getFileName(), e);
}
return projectDirectoryMetadata;
}
/**
* Browse Cache root directory to fetch all valid projects and unclean files. If a project
* already exists in the cache, don't bother to re-fetch the OS metadata again.
*/
private void loadAllProjects() {
final List<Path> projects = new ArrayList<>();
for (final File project : Objects
.requireNonNull(this.projectCacheDir.listFiles(new FilenameFilter() {
String pattern = "[0-9]+\\.[0-9]+";
@Override
public boolean accept(final File dir, final String name) {
return name.matches(this.pattern);
}
}))) {
if (project.exists() && project.isDirectory() &&
!projectsUnderDeletion.containsKey(project.toPath())) {
ProjectDirectoryMetadata projectDirectoryMetadata = fetchProjectMetadata(project.toPath());
if (projectDirectoryMetadata != null) {
cachedProjects.put(project.toPath(), projectDirectoryMetadata);
}
}
} // end of for loop
}
/**
* @return sum of the size of all project dirs
*/
private long getProjectDirsTotalSizeInBytes() {
long totalSizeInBytes = 0;
for (ProjectDirectoryMetadata metadata : cachedProjects.values()) {
totalSizeInBytes += metadata.getDirSizeInByte();
}
return totalSizeInBytes;
}
private void addToDeletionQueue(final File toDelete) {
try {
barrier.lock();
projectsUnderDeletion.put(toDelete.toPath(), toDelete);
} finally {
barrier.unlock();
}
}
private void removeFromDeletionQueue(final Path toDelete) {
try {
barrier.lock();
projectsUnderDeletion.remove(toDelete);
emptyQCond.signal();
} finally {
barrier.unlock();
}
}
/**
* Submit a project directory for deletion
*
* @param toDelete project dir for deletion
*/
@SuppressWarnings("FutureReturnValueIgnored")
private void submitProjectForDeletion(final File toDelete) {
addToDeletionQueue(toDelete);
deletionService.submit(() -> {
log.info("Deleting project dir {} from project cache to free up space", toDelete);
final long start = System.currentTimeMillis();
FileIOUtils.deleteDirectorySilently(toDelete);
log.info("Deleting project dir {} completed in {} msec(s)", toDelete, System.currentTimeMillis() - start);
removeFromDeletionQueue(toDelete.toPath());
});
}
/**
*
* Delete least recently used projects to free up space
*
* @param sizeToFreeInBytes space to free up
*/
private void deleteLeastRecentlyUsedProjects(long sizeToFreeInBytes) {
final List<ProjectDirectoryMetadata> lruList = new ArrayList<>(cachedProjects.values());
lruList.sort(Comparator.comparing(ProjectDirectoryMetadata::getLastAccessTime));
for (ProjectDirectoryMetadata lruEntry : lruList) {
if (sizeToFreeInBytes > 0) {
if (lruEntry.getInstalledDir() != null) {
cachedProjects.remove(lruEntry.getInstalledDir().toPath());
submitProjectForDeletion(lruEntry.getInstalledDir());
sizeToFreeInBytes -= lruEntry.getDirSizeInByte();
}
} else {
break;
}
}
}
private long bytesToMB(final long bytes) {
return bytes / (1024 * 1024);
}
/**
*
* This method will block until all active cleanup threads finish deleting submitted
* cleanup jobs.
*/
@VisibleForTesting
void finishPendingCleanup() {
final long start = System.currentTimeMillis();
try {
this.barrier.lock();
while (!projectsUnderDeletion.isEmpty()) {
log.info("{} entries left in the cache directory deletion Q. Waiting for the cleanup to finish",
this.projectsUnderDeletion.size());
this.emptyQCond.await(10, TimeUnit.SECONDS);
}
log.info("Took {} ms to complete ongoing cache cleanup.", (System.currentTimeMillis() - start));
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
this.barrier.unlock();
}
}
/**
* Deleting least recently accessed project dirs when there's no room to accommodate new project.
*
* The logic:
* 1. Calculates the total dynamic size available for the project cache.
* This = (Usable space left in the disk partition + Space currently occupied by the project cache).
* 2. Calculates high water mark & throttle water marks based on the above number.
* 3. If the occupied bytes > high water mark, lazy (Non-blocking) LRU eviction kicks in
* 4. If the occupied bytes > throttle water mark, the method will block until LRU eviction is complete.
* In each case, LRU eviction attempts to keep the occupied space below high water mark.
*
* @param newProjectSizeInBytes space in bytes the new project will add to the existing cache
*/
public void deleteProjectDirsIfNecessary(final long newProjectSizeInBytes) {
final long cachePartitionSize = this.projectCacheDir.getTotalSpace();
final long availablePartitionSize = this.projectCacheDir.getUsableSpace();
final long start = System.currentTimeMillis();
loadAllProjects();
log.info("Loading {} project dirs metadata completed in {} msecs",
cachedProjects.size(), System.currentTimeMillis() - start);
final long currentCacheSize = getProjectDirsTotalSizeInBytes();
final long projectCacheDirCapacity = currentCacheSize + availablePartitionSize;
boolean throttleAfterDeletion = false;
final long highWatermark = (long) (projectCacheDirCapacity * this.percentageOfDisk);
final long throttleWatermark = (long) (projectCacheDirCapacity * this.throttlePercentage);
long projectedCacheSize = currentCacheSize + newProjectSizeInBytes;
log.info("Partition = {} MB, Total Capacity = {} MB, Cache Size = {} MB, Projected Size = {} MB",
bytesToMB(cachePartitionSize),
bytesToMB(projectCacheDirCapacity),
bytesToMB(currentCacheSize),
bytesToMB(projectedCacheSize));
log.info("High Watermark = {} MB, Throttle Watermark = {} MB",
bytesToMB(highWatermark),
bytesToMB(throttleWatermark));
if (projectedCacheSize >= throttleWatermark) {
throttleAfterDeletion = true;
}
if (projectedCacheSize >= highWatermark) {
log.info("Projected cache size exceeds High Watermark. LRU Eviction will kick in");
deleteLeastRecentlyUsedProjects(projectedCacheSize - highWatermark);
}
if (throttleAfterDeletion) {
/*
* Block till already submitted cleanup is done.
*/
log.info("Throttle Watermark was hit. Blocking till LRU eviction is complete.");
finishPendingCleanup();
}
}
/**
*
* @return Return the current state of the cleaner service
*/
public String queryState() {
if (projectsUnderDeletion.isEmpty()) {
return STATE_AVAILABLE;
}
return STATE_CLEANING;
}
/**
* Makes sure the Cache deletion process cleanly terminates so the possibility of unclean
* cache directories is eliminated.
*/
public void shutdown() {
try {
new ExecutorServiceUtils().gracefulShutdown(deletionService, Duration.ofDays(1));
} catch (final InterruptedException e) {
log.warn("Error when deleting files", e);
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/ProjectDirectoryMetadata.java
|
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import static com.google.common.base.Preconditions.checkArgument;
import java.io.File;
import java.nio.file.attribute.FileTime;
class ProjectDirectoryMetadata {
private final int projectId;
private final int version;
private File installedDir;
private Long dirSizeInByte;
private FileTime lastAccessTime;
ProjectDirectoryMetadata(final int projectId, final int version) {
checkArgument(projectId > 0);
checkArgument(version > 0);
this.projectId = projectId;
this.version = version;
}
ProjectDirectoryMetadata(final int projectId, final int version, final File installedDir) {
this(projectId, version);
this.installedDir = installedDir;
}
Long getDirSizeInByte() {
return this.dirSizeInByte;
}
void setDirSizeInByte(final Long dirSize) {
this.dirSizeInByte = dirSize;
}
int getProjectId() {
return this.projectId;
}
int getVersion() {
return this.version;
}
File getInstalledDir() {
return this.installedDir;
}
void setInstalledDir(final File installedDir) {
this.installedDir = installedDir;
}
@Override
public String toString() {
return "ProjectVersion{" +
"projectId=" + this.projectId +
", version=" + this.version +
", installedDir=" + this.installedDir +
", dirSizeInByte=" + this.dirSizeInByte +
", lastAccessTime=" + this.lastAccessTime +
'}';
}
FileTime getLastAccessTime() {
return this.lastAccessTime;
}
void setLastAccessTime(final FileTime lastAccessTime) {
this.lastAccessTime = lastAccessTime;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/ServerStatisticsServlet.java
|
/*
* Copyright 2015 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.executor.ExecutorInfo;
import azkaban.utils.JSONUtils;
import azkaban.utils.Utils;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
public class ServerStatisticsServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
private static final int cacheTimeInMilliseconds = 1000;
private static final Logger logger = Logger.getLogger(ServerStatisticsServlet.class);
private static final String noCacheParamName = "nocache";
private static final boolean exists_Bash = new File("/bin/bash").exists();
private static final boolean exists_Cat = new File("/bin/cat").exists();
private static final boolean exists_Grep = new File("/bin/grep").exists();
private static final boolean exists_Meminfo = new File("/proc/meminfo").exists();
private static final boolean exists_LoadAvg = new File("/proc/loadavg").exists();
protected static long lastRefreshedTime = 0;
protected static ExecutorInfo cachedstats = null;
/**
* @deprecated GET available for seamless upgrade. azkaban-web now uses POST.
*/
@Deprecated
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
doPost(req, resp);
}
/**
* Handle all requests to Statistics Servlet {@inheritDoc}
*/
@Override
protected void doPost(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
final boolean noCache = null != req && Boolean.valueOf(req.getParameter(noCacheParamName));
if (noCache || System.currentTimeMillis() - lastRefreshedTime > cacheTimeInMilliseconds) {
this.populateStatistics(noCache);
}
JSONUtils.toJSON(cachedstats, resp.getOutputStream(), true);
}
/**
* fill the result set with the percent of the remaining system memory on the server.
*
* @param stats reference to the result container which contains all the results, this specific
* method will only work work on the property "remainingMemory" and "remainingMemoryPercent".
*
* NOTE: a double value will be used to present the remaining memory, a returning value of '55.6'
* means 55.6%
*/
protected void fillRemainingMemoryPercent(final ExecutorInfo stats) {
if (exists_Bash && exists_Cat && exists_Grep && exists_Meminfo) {
try {
final ArrayList<String> output = Utils.runProcess("/bin/bash", "-c",
"/bin/cat /proc/meminfo | grep -E \"^MemTotal:|^MemFree:|^Buffers:|^Cached:|^SwapCached:\"");
long totalMemory = 0;
long totalFreeMemory = 0;
Long parsedResult = (long) 0;
// process the output from bash call.
// we expect the result from the bash call to be something like following -
// MemTotal: 65894264 kB
// MemFree: 57753844 kB
// Buffers: 305552 kB
// Cached: 3802432 kB
// SwapCached: 0 kB
// Note : total free memory = freeMemory + cached + buffers + swapCached
// TODO : think about merging the logic in systemMemoryInfo as the logic is similar
if (output.size() == 5) {
for (final String result : output) {
// find the total memory and value the variable.
parsedResult = extractMemoryInfo("MemTotal", result);
if (null != parsedResult) {
totalMemory = parsedResult;
continue;
}
// find the free memory.
parsedResult = extractMemoryInfo("MemFree", result);
if (null != parsedResult) {
totalFreeMemory += parsedResult;
continue;
}
// find the Buffers.
parsedResult = extractMemoryInfo("Buffers", result);
if (null != parsedResult) {
totalFreeMemory += parsedResult;
continue;
}
// find the Cached.
parsedResult = extractMemoryInfo("SwapCached", result);
if (null != parsedResult) {
totalFreeMemory += parsedResult;
continue;
}
// find the Cached.
parsedResult = extractMemoryInfo("Cached", result);
if (null != parsedResult) {
totalFreeMemory += parsedResult;
continue;
}
}
} else {
logger.error(
"failed to get total/free memory info as the bash call returned invalid result."
+ String.format(" Output from the bash call - %s ", output.toString()));
}
// the number got from the proc file is in KBs we want to see the number in MBs so we are dividing it by 1024.
stats.setRemainingMemoryInMB(totalFreeMemory / 1024);
stats.setRemainingMemoryPercent(
totalMemory == 0 ? 0 : ((double) totalFreeMemory / (double) totalMemory) * 100);
} catch (final Exception ex) {
logger.error("failed fetch system memory info "
+ "as exception is captured when fetching result from bash call. Ex -" + ex
.getMessage());
}
} else {
logger.error(
"failed fetch system memory info, one or more files from the following list are missing - "
+ "'/bin/bash'," + "'/bin/cat'," + "'/proc/loadavg'");
}
}
private Long extractMemoryInfo(final String field, final String result) {
Long returnResult = null;
if (null != result && null != field && result.matches(String.format("^%s:.*", field))
&& result.split("\\s+").length > 2) {
try {
returnResult = Long.parseLong(result.split("\\s+")[1]);
logger.debug(field + ":" + returnResult);
} catch (final NumberFormatException e) {
returnResult = 0L;
logger.error(String.format("yielding 0 for %s as output is invalid - %s", field, result));
}
}
return returnResult;
}
/**
* call the data providers to fill the returning data container for statistics data. This function
* refreshes the static cached copy of data in case if necessary.
*/
protected synchronized void populateStatistics(final boolean noCache) {
//check again before starting the work.
if (noCache || System.currentTimeMillis() - lastRefreshedTime > cacheTimeInMilliseconds) {
final ExecutorInfo stats = new ExecutorInfo();
fillRemainingMemoryPercent(stats);
fillRemainingFlowCapacityAndLastDispatchedTime(stats);
fillCpuUsage(stats);
cachedstats = stats;
lastRefreshedTime = System.currentTimeMillis();
}
}
/**
* fill the result set with the remaining flow capacity .
*
* @param stats reference to the result container which contains all the results, this specific
* method will only work on the property "remainingFlowCapacity".
*/
protected void fillRemainingFlowCapacityAndLastDispatchedTime(final ExecutorInfo stats) {
final AzkabanExecutorServer server = AzkabanExecutorServer.getApp();
if (server != null) {
final FlowRunnerManager runnerMgr = AzkabanExecutorServer.getApp().getFlowRunnerManager();
final int assignedFlows = runnerMgr.getNumRunningFlows() + runnerMgr.getNumQueuedFlows();
stats.setRemainingFlowCapacity(runnerMgr.getMaxNumRunningFlows() - assignedFlows);
stats.setNumberOfAssignedFlows(assignedFlows);
stats.setLastDispatchedTime(runnerMgr.getLastFlowSubmittedTime());
} else {
logger.error("failed to get data for remaining flow capacity or LastDispatchedTime"
+ " as the AzkabanExecutorServer has yet been initialized.");
}
}
/**
* <pre>
* fill the result set with the CPU usage .
* Note : As the 'Top' bash call doesn't yield accurate result for the system load,
* the implementation has been changed to load from the "proc/loadavg" which keeps
* the moving average of the system load, we are pulling the average for the recent 1 min.
* </pre>
*
* @param stats reference to the result container which contains all the results, this specific
* method will only work on the property "cpuUsage".
*/
protected void fillCpuUsage(final ExecutorInfo stats) {
if (exists_Bash && exists_Cat && exists_LoadAvg) {
try {
final ArrayList<String> output = Utils
.runProcess("/bin/bash", "-c", "/bin/cat /proc/loadavg");
// process the output from bash call.
if (output.size() > 0) {
final String[] splitedresult = output.get(0).split("\\s+");
double cpuUsage = 0.0;
try {
cpuUsage = Double.parseDouble(splitedresult[0]);
} catch (final NumberFormatException e) {
logger.error("yielding 0.0 for CPU usage as output is invalid -" + output.get(0));
}
logger.info("System load : " + cpuUsage);
stats.setCpuUpsage(cpuUsage);
}
} catch (final Exception ex) {
logger.error("failed fetch system load info "
+ "as exception is captured when fetching result from bash call. Ex -" + ex
.getMessage());
}
} else {
logger.error(
"failed fetch system load info, one or more files from the following list are missing - "
+ "'/bin/bash'," + "'/bin/cat'," + "'/proc/loadavg'");
}
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/StatsServlet.java
|
/*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.executor.ConnectorParams;
import azkaban.metric.IMetric;
import azkaban.metric.IMetricEmitter;
import azkaban.metric.MetricReportManager;
import azkaban.metric.TimeBasedReportingMetric;
import azkaban.metric.inmemoryemitter.InMemoryHistoryNode;
import azkaban.metric.inmemoryemitter.InMemoryMetricEmitter;
import azkaban.server.HttpRequestUtils;
import azkaban.utils.JSONUtils;
import java.io.IOException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
/**
* Servlet to communicate with Azkaban exec server This servlet get requests from stats servlet in
* Azkaban Web server
*/
public class StatsServlet extends HttpServlet implements ConnectorParams {
private static final long serialVersionUID = 2L;
private static final Logger logger = Logger.getLogger(StatsServlet.class);
public boolean hasParam(final HttpServletRequest request, final String param) {
return HttpRequestUtils.hasParam(request, param);
}
public String getParam(final HttpServletRequest request, final String name)
throws ServletException {
return HttpRequestUtils.getParam(request, name);
}
public Boolean getBooleanParam(final HttpServletRequest request, final String name)
throws ServletException {
return HttpRequestUtils.getBooleanParam(request, name);
}
public long getLongParam(final HttpServletRequest request, final String name)
throws ServletException {
return HttpRequestUtils.getLongParam(request, name);
}
/**
* @deprecated GET available for seamless upgrade. azkaban-web now uses POST.
*/
@Deprecated
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
doPost(req, resp);
}
/**
* Handle all requests to Stats Servlet {@inheritDoc}
*/
@Override
protected void doPost(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
final Map<String, Object> ret = new HashMap<>();
if (hasParam(req, ACTION_PARAM)) {
final String action = getParam(req, ACTION_PARAM);
if (action.equals(STATS_SET_REPORTINGINTERVAL)) {
handleChangeMetricInterval(req, ret);
} else if (action.equals(STATS_SET_CLEANINGINTERVAL)) {
handleChangeCleaningInterval(req, ret);
} else if (action.equals(STATS_SET_MAXREPORTERPOINTS)) {
handleChangeEmitterPoints(req, ret);
} else if (action.equals(STATS_GET_ALLMETRICSNAME)) {
handleGetAllMMetricsName(req, ret);
} else if (action.equals(STATS_GET_METRICHISTORY)) {
handleGetMetricHistory(req, ret);
} else if (action.equals(STATS_SET_ENABLEMETRICS)) {
handleChangeManagerStatusRequest(req, ret, true);
} else if (action.equals(STATS_SET_DISABLEMETRICS)) {
handleChangeManagerStatusRequest(req, ret, false);
} else {
ret.put(RESPONSE_ERROR, "Invalid action");
}
}
JSONUtils.toJSON(ret, resp.getOutputStream(), true);
}
/**
* enable or disable metric Manager A disable will also purge all data from all metric emitters
*/
private void handleChangeManagerStatusRequest(final HttpServletRequest req,
final Map<String, Object> ret, final boolean enableMetricManager) {
try {
logger.info("Updating metric manager status");
if ((enableMetricManager && MetricReportManager.isInstantiated())
|| MetricReportManager.isAvailable()) {
final MetricReportManager metricManager = MetricReportManager.getInstance();
if (enableMetricManager) {
metricManager.enableManager();
} else {
metricManager.disableManager();
}
ret.put(STATUS_PARAM, RESPONSE_SUCCESS);
} else {
ret.put(RESPONSE_ERROR, "MetricManager is not available");
}
} catch (final Exception e) {
logger.error(e);
ret.put(RESPONSE_ERROR, e.getMessage());
}
}
/**
* Update number of display snapshots for /stats graphs
*/
private void handleChangeEmitterPoints(final HttpServletRequest req,
final Map<String, Object> ret) {
try {
final long numInstance = getLongParam(req, STATS_MAP_EMITTERNUMINSTANCES);
if (MetricReportManager.isAvailable()) {
final MetricReportManager metricManager = MetricReportManager.getInstance();
final InMemoryMetricEmitter memoryEmitter =
extractInMemoryMetricEmitter(metricManager);
memoryEmitter.setReportingInstances(numInstance);
ret.put(STATUS_PARAM, RESPONSE_SUCCESS);
} else {
ret.put(RESPONSE_ERROR, "MetricManager is not available");
}
} catch (final Exception e) {
logger.error(e);
ret.put(RESPONSE_ERROR, e.getMessage());
}
}
/**
* Update InMemoryMetricEmitter interval to maintain metric snapshots
*/
private void handleChangeCleaningInterval(final HttpServletRequest req,
final Map<String, Object> ret) {
try {
final long newInterval = getLongParam(req, STATS_MAP_CLEANINGINTERVAL);
if (MetricReportManager.isAvailable()) {
final MetricReportManager metricManager = MetricReportManager.getInstance();
final InMemoryMetricEmitter memoryEmitter =
extractInMemoryMetricEmitter(metricManager);
memoryEmitter.setReportingInterval(newInterval);
ret.put(STATUS_PARAM, RESPONSE_SUCCESS);
} else {
ret.put(RESPONSE_ERROR, "MetricManager is not available");
}
} catch (final Exception e) {
logger.error(e);
ret.put(RESPONSE_ERROR, e.getMessage());
}
}
/**
* Get metric snapshots for a metric and date specification
*/
private void handleGetMetricHistory(final HttpServletRequest req,
final Map<String, Object> ret) throws ServletException {
if (MetricReportManager.isAvailable()) {
final MetricReportManager metricManager = MetricReportManager.getInstance();
final InMemoryMetricEmitter memoryEmitter =
extractInMemoryMetricEmitter(metricManager);
// if we have a memory emitter
if (memoryEmitter != null) {
try {
final List<InMemoryHistoryNode> result =
memoryEmitter.getMetrics(
getParam(req, STATS_MAP_METRICNAMEPARAM),
parseDate(getParam(req, STATS_MAP_STARTDATE)),
parseDate(getParam(req, STATS_MAP_ENDDATE)),
getBooleanParam(req, STATS_MAP_METRICRETRIEVALMODE));
if (result != null && result.size() > 0) {
ret.put("data", result);
} else {
ret.put(RESPONSE_ERROR, "No metric stats available");
}
} catch (final ParseException ex) {
ret.put(RESPONSE_ERROR, "Invalid Date filter");
}
} else {
ret.put(RESPONSE_ERROR, "InMemoryMetricEmitter not instantiated");
}
} else {
ret.put(RESPONSE_ERROR, "MetricReportManager is not available");
}
}
/**
* Get InMemoryMetricEmitter, if available else null
*/
private InMemoryMetricEmitter extractInMemoryMetricEmitter(
final MetricReportManager metricManager) {
InMemoryMetricEmitter memoryEmitter = null;
for (final IMetricEmitter emitter : metricManager.getMetricEmitters()) {
if (emitter instanceof InMemoryMetricEmitter) {
memoryEmitter = (InMemoryMetricEmitter) emitter;
break;
}
}
return memoryEmitter;
}
/**
* Get all the metrics tracked by metric manager
*/
private void handleGetAllMMetricsName(final HttpServletRequest req,
final Map<String, Object> ret) {
if (MetricReportManager.isAvailable()) {
final MetricReportManager metricManager = MetricReportManager.getInstance();
final List<IMetric<?>> result = metricManager.getAllMetrics();
if (result.size() == 0) {
ret.put(RESPONSE_ERROR, "No Metric being tracked");
} else {
final List<String> metricNames = new LinkedList<>();
for (final IMetric<?> metric : result) {
metricNames.add(metric.getName());
}
ret.put("data", metricNames);
}
} else {
ret.put(RESPONSE_ERROR, "MetricReportManager is not available");
}
}
/**
* Update tracking interval for a given metrics
*/
private void handleChangeMetricInterval(final HttpServletRequest req,
final Map<String, Object> ret) throws ServletException {
try {
final String metricName = getParam(req, STATS_MAP_METRICNAMEPARAM);
final long newInterval = getLongParam(req, STATS_MAP_REPORTINGINTERVAL);
if (MetricReportManager.isAvailable()) {
final MetricReportManager metricManager = MetricReportManager.getInstance();
final TimeBasedReportingMetric<?> metric =
(TimeBasedReportingMetric<?>) metricManager
.getMetricFromName(metricName);
metric.updateInterval(newInterval);
ret.put(STATUS_PARAM, RESPONSE_SUCCESS);
} else {
ret.put(RESPONSE_ERROR, "MetricManager is not available");
}
} catch (final Exception e) {
logger.error(e);
ret.put(RESPONSE_ERROR, e.getMessage());
}
}
private Date parseDate(final String date) throws ParseException {
final DateFormat format = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss zzz");
return format.parse(date);
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/Trigger.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.execapp.action.KillExecutionAction;
import azkaban.trigger.Condition;
import azkaban.trigger.TriggerAction;
import java.util.List;
import org.apache.log4j.Logger;
public class Trigger implements Runnable {
private static final Logger logger = Logger.getLogger(azkaban.execapp.Trigger.class);
private final int execId;
// condition to trigger actions(ex. flow running longer than X mins)
private final Condition triggerCondition;
// condition to expire this trigger(ex. flow finishes before violating SLA)
private final Condition expireCondition;
private final List<TriggerAction> actions;
public Trigger(final int execId,
final Condition triggerCondition,
final Condition expireCondition,
final List<TriggerAction> actions) {
this.execId = execId;
this.triggerCondition = triggerCondition;
this.expireCondition = expireCondition;
this.actions = actions;
}
/**
* Perform the action if trigger condition is met
*/
@Override
public void run() {
logger.info("Running trigger for " + this);
if (isTriggerExpired()) {
logger.info(this + " expired");
return;
}
logger.info("Check if trigger condition met for " + this);
final boolean isTriggerConditionMet = this.triggerCondition.isMet();
logger.info("Trigger condition for execid = " + this.execId + " met? = " + isTriggerConditionMet);
if (isTriggerConditionMet) {
logger.info("Condition " + this.triggerCondition.getExpression() + " met");
for (final TriggerAction action : this.actions) {
try {
if (action instanceof KillExecutionAction) {
logger.info("Killing execution " + this.execId);
}
action.doAction();
} catch (final Exception e) {
logger.error("Failed to do action " + action.getDescription()
+ " for execution " + this.execId, e);
}
}
}
}
/**
* Check if the trigger is expired and reset isExpired
*
* @return true if trigger is expired
*/
public boolean isTriggerExpired() {
return this.expireCondition.isMet();
}
@Override
public String toString() {
final StringBuilder actionsString = new StringBuilder();
for (final TriggerAction act : this.actions) {
actionsString.append(", ");
actionsString.append(act.getDescription());
}
return "Trigger for execution " + this.execId + " with trigger condition of "
+ this.triggerCondition.getExpression() + " and expire condition of "
+ this.expireCondition.getExpression() + actionsString;
}
}
|
0
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban
|
java-sources/ai/databand/azkaban/azkaban-exec-server/3.90.0/azkaban/execapp/TriggerManager.java
|
/*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.execapp.action.KillExecutionAction;
import azkaban.execapp.action.KillJobAction;
import azkaban.sla.SlaOption;
import azkaban.trigger.Condition;
import azkaban.trigger.ConditionChecker;
import azkaban.trigger.TriggerAction;
import azkaban.trigger.builtin.SlaAlertAction;
import azkaban.trigger.builtin.SlaChecker;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.log4j.Logger;
@Singleton
public class TriggerManager {
private static final int SCHEDULED_THREAD_POOL_SIZE = 4;
private static final Logger logger = Logger.getLogger(TriggerManager.class);
private final ScheduledExecutorService scheduledService;
@Inject
public TriggerManager() {
this.scheduledService = Executors.newScheduledThreadPool(SCHEDULED_THREAD_POOL_SIZE);
}
private Condition createCondition(final SlaOption sla, final int execId, final String checkerName,
final String checkerMethod) {
final SlaChecker slaFailChecker = new SlaChecker(checkerName, sla, execId);
final Map<String, ConditionChecker> slaCheckers = new HashMap<>();
slaCheckers.put(slaFailChecker.getId(), slaFailChecker);
return new Condition(slaCheckers, slaFailChecker.getId() + "." + checkerMethod);
}
private List<TriggerAction> createActions(final SlaOption sla, final int execId) {
final List<TriggerAction> actions = new ArrayList<>();
if (sla.hasAlert()) {
TriggerAction action = new SlaAlertAction(SlaOption.ACTION_ALERT, sla, execId);
}
if (sla.hasAlert()) {
actions.add(new SlaAlertAction(SlaOption.ACTION_ALERT, sla, execId));
}
if(sla.hasKill()) {
switch(sla.getType().getComponent()) {
case FLOW:
actions.add(new KillExecutionAction(SlaOption.ACTION_CANCEL_FLOW, execId));
break;
case JOB:
actions.add(new KillJobAction(SlaOption.ACTION_KILL_JOB, execId, sla.getJobName()));
break;
default:
logger.info("Unknown action type " + sla.getType().getComponent());
break;
}
}
return actions;
}
@SuppressWarnings("FutureReturnValueIgnored")
public void addTrigger(final int execId, final List<SlaOption> slaOptions) {
for (final SlaOption slaOption : slaOptions) {
final Condition triggerCond = createCondition(slaOption, execId, "slaFailChecker",
"isSlaFailed()");
// if whole flow finish before violating sla, just expire the checker
final Condition expireCond = createCondition(slaOption, execId, "slaPassChecker", "isSlaPassed"
+ "()");
final List<TriggerAction> actions = createActions(slaOption, execId);
final Trigger trigger = new Trigger(execId, triggerCond, expireCond, actions);
final Duration duration = slaOption.getDuration();
final long durationInMillis = duration.toMillis();
logger.info("Adding sla trigger " + slaOption.toString() + " to execution " + execId
+ ", scheduled to trigger in " + durationInMillis / 1000 + " seconds");
this.scheduledService.schedule(trigger, durationInMillis, TimeUnit.MILLISECONDS);
}
}
public void shutdown() {
this.scheduledService.shutdownNow();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.