index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/ModelServerListener.java
|
/*
* Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.serving.wlm.util;
import ai.djl.Device;
import ai.djl.serving.wlm.Adapter;
import ai.djl.serving.wlm.ModelInfo;
import java.nio.file.Path;
/** An interface that represent a model server event listener. */
public interface ModelServerListener {
/**
* Invoked when model downloading started.
*
* @param model the model
*/
void onModelDownloading(ModelInfo<?, ?> model);
/**
* Invoked when model downloading finished.
*
* @param model the model
* @param downloadPath the model download directory
*/
void onModelDownloaded(ModelInfo<?, ?> model, Path downloadPath);
/**
* Invoked when model conversion started.
*
* @param model the model
* @param type the conversion type
*/
void onModelConverting(ModelInfo<?, ?> model, String type);
/**
* Invoked when model conversion finished.
*
* @param model the model
* @param type the conversion type
*/
void onModelConverted(ModelInfo<?, ?> model, String type);
/**
* Invoked when model properties configuration finished.
*
* @param model the model
*/
void onModelConfigured(ModelInfo<?, ?> model);
/**
* Invoked when model loading start.
*
* @param model the model
* @param device the device to load the model
*/
void onModelLoading(ModelInfo<?, ?> model, Device device);
/**
* Invoked when model loading finished.
*
* @param model the model
*/
void onModelLoaded(ModelInfo<?, ?> model);
/**
* Invoked when adapter loading start.
*
* @param model the model
* @param adapterPath the adapter path
*/
void onAdapterLoading(ModelInfo<?, ?> model, Path adapterPath);
/**
* Invoked when adapter loading finished.
*
* @param model the model
* @param adapter the adapter
*/
void onAdapterLoaded(ModelInfo<?, ?> model, Adapter adapter);
}
|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/ModelServerListenerAdapter.java
|
/*
* Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.serving.wlm.util;
import ai.djl.Device;
import ai.djl.serving.wlm.Adapter;
import ai.djl.serving.wlm.ModelInfo;
import java.nio.file.Path;
/** Base implementation of the {@link ModelServerListener} that does nothing. */
public abstract class ModelServerListenerAdapter implements ModelServerListener {
/** {@inheritDoc} */
@Override
public void onModelDownloading(ModelInfo<?, ?> model) {}
/** {@inheritDoc} */
@Override
public void onModelDownloaded(ModelInfo<?, ?> model, Path downloadPath) {}
/** {@inheritDoc} */
@Override
public void onModelConverting(ModelInfo<?, ?> model, String type) {}
/** {@inheritDoc} */
@Override
public void onModelConverted(ModelInfo<?, ?> model, String type) {}
/** {@inheritDoc} */
@Override
public void onModelConfigured(ModelInfo<?, ?> model) {}
/** {@inheritDoc} */
@Override
public void onModelLoading(ModelInfo<?, ?> model, Device device) {}
/** {@inheritDoc} */
@Override
public void onModelLoaded(ModelInfo<?, ?> model) {}
/** {@inheritDoc} */
@Override
public void onAdapterLoading(ModelInfo<?, ?> model, Path adapterPath) {}
/** {@inheritDoc} */
@Override
public void onAdapterLoaded(ModelInfo<?, ?> model, Adapter adapter) {}
}
|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/WlmCapacityException.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.serving.wlm.util;
/** Thrown to throttle when a job is run but the job queue capacity is exceeded. */
public class WlmCapacityException extends WlmException {
static final long serialVersionUID = 1L;
/**
* Constructs a {@link WlmCapacityException} with the specified detail message.
*
* @param message The detail message (which is saved for later retrieval by the {@link
* #getMessage()} method)
*/
public WlmCapacityException(String message) {
super(message);
}
/**
* Constructs a {@link WlmCapacityException} with the specified detail message and cause.
*
* <p>Note that the detail message associated with {@code cause} is <i>not</i> automatically
* incorporated into this exception's detail message.
*
* @param message The detail message (which is saved for later retrieval by the {@link
* #getMessage()} method)
* @param cause The cause (which is saved for later retrieval by the {@link #getCause()}
* method). (A null value is permitted, and indicates that the cause is nonexistent or
* unknown.)
*/
public WlmCapacityException(String message, Throwable cause) {
super(message, cause);
}
}
|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/WlmConfigManager.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.serving.wlm.util;
/** This manages some configurations used by the {@link ai.djl.serving.wlm.WorkLoadManager}. */
public final class WlmConfigManager {
private int jobQueueSize = 1000;
private int maxIdleSeconds = 60;
private int batchSize = 1;
private int maxBatchDelayMillis = 100;
private int reservedMemoryMb = 500;
private String loadOnDevices;
private static final WlmConfigManager INSTANCE = new WlmConfigManager();
private WlmConfigManager() {}
/**
* Returns the singleton {@code ConfigManager} instance.
*
* @return the singleton {@code ConfigManager} instance
*/
public static WlmConfigManager getInstance() {
return INSTANCE;
}
/**
* Returns if debug is enabled.
*
* @return {@code true} if debug is enabled
*/
public boolean isDebug() {
return Boolean.getBoolean("ai.djl.serving.debug");
}
/**
* Returns the default job queue size.
*
* @return the default job queue size
*/
public int getJobQueueSize() {
return jobQueueSize;
}
/**
* Sets the default job queue size.
*
* @param jobQueueSize the new default job queue size
*/
public void setJobQueueSize(int jobQueueSize) {
this.jobQueueSize = jobQueueSize;
}
/**
* Returns the default max idle time for workers.
*
* @return the default max idle time
*/
public int getMaxIdleSeconds() {
return maxIdleSeconds;
}
/**
* Sets the default max idle time in seconds for workers.
*
* @param maxIdleSeconds the new default max idle time in seconds
*/
public void setMaxIdleSeconds(int maxIdleSeconds) {
this.maxIdleSeconds = maxIdleSeconds;
}
/**
* Returns the default batchSize for workers.
*
* @return the default max idle time
*/
public int getBatchSize() {
return batchSize;
}
/**
* Sets the default batchSize for workers.
*
* @param batchSize the new default batchSize
*/
public void setBatchSize(int batchSize) {
this.batchSize = batchSize;
}
/**
* Returns the default max batch delay in milliseconds for the working queue.
*
* @return the default max batch delay in milliseconds
*/
public int getMaxBatchDelayMillis() {
return maxBatchDelayMillis;
}
/**
* Sets the default max batch delay in milliseconds for the working queue.
*
* @param maxBatchDelayMillis the new default max batch delay in milliseconds
*/
public void setMaxBatchDelayMillis(int maxBatchDelayMillis) {
this.maxBatchDelayMillis = maxBatchDelayMillis;
}
/**
* Returns the default reserved memory in MB.
*
* @return the default reserved memory in MB
*/
public int getReservedMemoryMb() {
return reservedMemoryMb;
}
/**
* Sets the reserved memory in MB.
*
* @param reservedMemoryMb the reserved memory in MB
*/
public void setReservedMemoryMb(int reservedMemoryMb) {
this.reservedMemoryMb = reservedMemoryMb;
}
/**
* Returns the devices the model will be loaded on at startup.
*
* @return the devices the model will be loaded on at startup
*/
public String getLoadOnDevices() {
return loadOnDevices;
}
/**
* Sets the devices the model will be loaded on at startup.
*
* @param loadOnDevices thes the default model will be loaded on at startup
*/
public void setLoadOnDevices(String loadOnDevices) {
this.loadOnDevices = loadOnDevices;
}
}
|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/WlmException.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.serving.wlm.util;
/** Thrown when an exception occurs inside the {@link ai.djl.serving.wlm.WorkLoadManager}. */
public class WlmException extends RuntimeException {
static final long serialVersionUID = 1L;
/**
* Constructs a {@link WlmException} with the specified detail message.
*
* @param message The detail message (which is saved for later retrieval by the {@link
* #getMessage()} method)
*/
public WlmException(String message) {
super(message);
}
/**
* Constructs a {@link WlmException} with the specified detail message and cause.
*
* <p>Note that the detail message associated with {@code cause} is <i>not</i> automatically
* incorporated into this exception's detail message.
*
* @param message The detail message (which is saved for later retrieval by the {@link
* #getMessage()} method)
* @param cause The cause (which is saved for later retrieval by the {@link #getCause()}
* method). (A null value is permitted, and indicates that the cause is nonexistent or
* unknown.)
*/
public WlmException(String message, Throwable cause) {
super(message, cause);
}
}
|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/WlmOutOfMemoryException.java
|
/*
* Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.serving.wlm.util;
/** Thrown when no enough memory to load the model. */
public class WlmOutOfMemoryException extends WlmException {
static final long serialVersionUID = 1L;
/**
* Constructs a {@link WlmOutOfMemoryException} with the specified detail message.
*
* @param message the detail message
*/
public WlmOutOfMemoryException(String message) {
super(message);
}
}
|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/WlmShutdownException.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.serving.wlm.util;
/** Thrown when a job is run but all workers are shutdown. */
public class WlmShutdownException extends WlmException {
static final long serialVersionUID = 1L;
/**
* Constructs a {@link WlmShutdownException} with the specified detail message.
*
* @param message The detail message (which is saved for later retrieval by the {@link
* #getMessage()} method)
*/
public WlmShutdownException(String message) {
super(message);
}
/**
* Constructs a {@link WlmShutdownException} with the specified detail message and cause.
*
* <p>Note that the detail message associated with {@code cause} is <i>not</i> automatically
* incorporated into this exception's detail message.
*
* @param message The detail message (which is saved for later retrieval by the {@link
* #getMessage()} method)
* @param cause The cause (which is saved for later retrieval by the {@link #getCause()}
* method). (A null value is permitted, and indicates that the cause is nonexistent or
* unknown.)
*/
public WlmShutdownException(String message, Throwable cause) {
super(message, cause);
}
}
|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/WorkerJob.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.serving.wlm.util;
import ai.djl.serving.wlm.Job;
import java.util.concurrent.CompletableFuture;
/** A {@link Job} containing metadata from the {@link ai.djl.serving.wlm.WorkLoadManager}. */
public final class WorkerJob<I, O> {
private final Job<I, O> job;
private final CompletableFuture<O> future;
/**
* Constructs a new {@link WorkerJob}.
*
* @param job the job to execute
* @param future the future containing the job response
*/
public WorkerJob(Job<I, O> job, CompletableFuture<O> future) {
this.job = job;
this.future = future;
}
/**
* Returns the {@link Job}.
*
* @return the {@link Job}
*/
public Job<I, O> getJob() {
return job;
}
/**
* Returns the future for the job.
*
* @return the future for the job
*/
public CompletableFuture<O> getFuture() {
return future;
}
}
|
0
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm
|
java-sources/ai/djl/serving/wlm/0.28.0/ai/djl/serving/wlm/util/package-info.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains utilities to support the {@link ai.djl.serving.wlm.WorkLoadManager}. */
package ai.djl.serving.wlm.util;
|
0
|
java-sources/ai/djl/spring/djl-spring-boot-starter-autoconfigure/0.26/ai/djl/spring
|
java-sources/ai/djl/spring/djl-spring-boot-starter-autoconfigure/0.26/ai/djl/spring/configuration/ApplicationType.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.spring.configuration;
import ai.djl.Application;
import ai.djl.Application.CV;
import ai.djl.Application.NLP;
public enum ApplicationType {
QUESTION_ANSWER(NLP.QUESTION_ANSWER),
TEXT_CLASSIFICATION(NLP.TEXT_CLASSIFICATION),
IMAGE_CLASSIFICATION(CV.IMAGE_CLASSIFICATION),
OBJECT_DETECTION(CV.OBJECT_DETECTION),
ACTION_RECOGNITION(CV.ACTION_RECOGNITION),
INSTANCE_SEGMENTATION(CV.INSTANCE_SEGMENTATION),
POSE_ESTIMATION(CV.POSE_ESTIMATION),
SEMANTIC_SEGMENTATION(CV.SEMANTIC_SEGMENTATION);
private Application application;
ApplicationType(Application application) {
this.application = application;
}
Application application() {
return application;
}
}
|
0
|
java-sources/ai/djl/spring/djl-spring-boot-starter-autoconfigure/0.26/ai/djl/spring
|
java-sources/ai/djl/spring/djl-spring-boot-starter-autoconfigure/0.26/ai/djl/spring/configuration/DjlAutoConfiguration.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.spring.configuration;
import java.io.IOException;
import java.util.function.Supplier;
import ai.djl.MalformedModelException;
import ai.djl.inference.Predictor;
import ai.djl.modality.cv.Image;
import ai.djl.modality.cv.output.DetectedObjects;
import ai.djl.repository.zoo.Criteria;
import ai.djl.repository.zoo.ModelNotFoundException;
import ai.djl.repository.zoo.ModelZoo;
import ai.djl.repository.zoo.ZooModel;
import ai.djl.training.util.ProgressBar;
import ai.djl.translate.TranslatorFactory;
import ai.djl.util.ClassLoaderUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.Yaml;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.util.StringUtils;
@Configuration
@ConditionalOnMissingBean(ZooModel.class)
@EnableConfigurationProperties(DjlConfigurationProperties.class)
public class DjlAutoConfiguration {
private static final Logger LOG = LoggerFactory.getLogger(DjlAutoConfiguration.class);
@Autowired
private DjlConfigurationProperties properties;
@Bean
public ZooModel<?, ?> model() throws MalformedModelException, ModelNotFoundException, IOException {
var applicationType = properties.getApplicationType();
var filter = properties.getModelFilter();
var arguments = properties.getArguments();
var artifactId = properties.getModelArtifactId();
var inputClass = properties.getInputClass();
var urls = properties.getUrls();
var translatorFactory = properties.getTranslatorFactory();
if (inputClass == null) {
LOG.warn("Input class is not defined. Using default: BufferedImage");
inputClass = Image.class;
}
Class<?> outputClass = properties.getOutputClass();
if (outputClass == null) {
LOG.warn("Input class is not defined. Using default: DetectedObjects");
outputClass = DetectedObjects.class;
}
Criteria.Builder<?, ?> builder = Criteria.builder().setTypes(inputClass, outputClass);
if (applicationType != null) {
builder.optApplication(applicationType.application());
}
if (filter != null) {
builder.optFilters(filter);
}
if (artifactId != null) {
builder.optArtifactId(artifactId);
}
if (arguments != null) {
builder.optArguments(arguments);
}
if (translatorFactory != null) {
ClassLoader cl = ClassLoaderUtils.getContextClassLoader();
TranslatorFactory factory = ClassLoaderUtils.initClass(cl, TranslatorFactory.class, translatorFactory);
builder.optTranslatorFactory(factory);
}
if(urls != null && urls.length > 0) {
builder.optModelUrls(StringUtils.arrayToCommaDelimitedString(urls));
}
if (properties.isProgress()) {
builder.optProgress(new ProgressBar());
}
if (StringUtils.hasText(properties.getModelName())) {
builder.optModelName(properties.getModelName());
}
if (StringUtils.hasText(properties.getGroupId())) {
builder.optGroupId(properties.getGroupId());
}
if (StringUtils.hasText(properties.getEngine())) {
builder.optEngine(properties.getEngine());
}
try {
var zooModel = builder.build().loadModel();
LOG.info("Successfully loaded model {}", zooModel.getName());
return zooModel;
}
catch(ModelNotFoundException ex) {
Yaml yaml = createYamlDumper();
LOG.error("Requested model was not found");
LOG.error("List of available models {}", yaml.dump(ModelZoo.listModels()));
throw ex;
}
}
private Yaml createYamlDumper() {
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
options.setPrettyFlow(true);
return new Yaml(options);
}
/**
* Expected to be used with try-with-resources. The provided predictor is {@link AutoCloseable}.
*
* @param model injected configured model
* @return provider of the predictor object
*/
@Bean
public Supplier<Predictor<?, ?>> predictorProvider(ZooModel<?, ?> model) {
return model::newPredictor;
}
}
|
0
|
java-sources/ai/djl/spring/djl-spring-boot-starter-autoconfigure/0.26/ai/djl/spring
|
java-sources/ai/djl/spring/djl-spring-boot-starter-autoconfigure/0.26/ai/djl/spring/configuration/DjlConfigurationProperties.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.spring.configuration;
import org.springframework.boot.context.properties.ConfigurationProperties;
import java.util.Map;
@ConfigurationProperties("djl")
public class DjlConfigurationProperties {
/**
* Defines the type of application for the model.
*
* <p>
* For more information on available applications, see
* https://github.com/deepjavalibrary/djl/blob/master/api/src/main/java/ai/djl/Application.java
*/
private ApplicationType applicationType;
/**
* Defines the java data type used as input for inference. For example, {@link java.awt.image.BufferedImage} can be used for cases when input is image.
*/
private Class<?> inputClass;
/**
* Defines the java data type expected for inference output. {@link ai.djl.modality.cv.output.DetectedObjects} is a common output for classifications, object detection.
*/
private Class<?> outputClass;
/**
* Defines the artifactId of the model to be loaded.
*/
private String modelArtifactId;
/**
* Model repository URLs. Multiple may be supplied to search for models. Specifying a single URL can be used
* to load a specific model. Can be specified as comma delimited field or as an array in the configuration file.
*/
private String[] urls;
/**
* Defines the translatorFactory for the model.
*/
private String translatorFactory;
/**
* Arguments that allow the user to override pre-process/post-process behavior.
*
* <p>
* The key/value pairs are model specific, check specific (@code ModelLoader} class for detail.
*
*/
private Map<String, Object> arguments;
/**
* Filters used to lookup a model from model zoo.
*
* <p>
* For more information on available filters that are currently part of the repository, see
* https://github.com/deepjavalibrary/djl/tree/master/model-zoo#how-to-find-a-pre-trained-model-in-the-model-zoo
*/
private Map<String, String> modelFilter;
/**
* Supported engine names: * https://docs.djl.ai/docs/engine.html#supported-engines
* Possible values are: MXNet, PyTorch, TensorFlow, TFLite, OnnxRuntime, PaddlePaddle, TensorRT
*/
private String engine;
/**
* Defines the groupId of the model to be loaded.
*/
private String groupId;
/**
* Defines the modelName of the model to be loaded.
* Leave it empty if you want to load the latest version of the model.
* Use "saved_model" for TensorFlow saved models.
*/
private String modelName;
/**
* Defines whether to show progress bar when loading the model.
*/
private boolean progress = true;
public Map<String, String> getModelFilter() {
return modelFilter;
}
public void setModelFilter(Map<String, String> modelFilter) {
this.modelFilter = modelFilter;
}
public ApplicationType getApplicationType() {
return applicationType;
}
public void setApplicationType(ApplicationType applicationType) {
this.applicationType = applicationType;
}
public Class<?> getInputClass() {
return inputClass;
}
public void setInputClass(Class<?> inputClass) {
this.inputClass = inputClass;
}
public Class<?> getOutputClass() {
return outputClass;
}
public void setOutputClass(Class<?> outputClass) {
this.outputClass = outputClass;
}
public String getModelArtifactId() {
return modelArtifactId;
}
public void setModelArtifactId(String modelArtifactId) {
this.modelArtifactId = modelArtifactId;
}
public Map<String, Object> getArguments() {
return arguments;
}
public void setArguments(Map<String, Object> arguments) {
this.arguments = arguments;
}
public String[] getUrls() {
return urls;
}
public void setUrls(String[] urls) {
this.urls = urls;
}
public String getTranslatorFactory() {
return translatorFactory;
}
public void setTranslatorFactory(String translatorFactory) {
this.translatorFactory = translatorFactory;
}
public String getEngine() {
return engine;
}
public void setEngine(String engine) {
this.engine = engine;
}
public String getGroupId() {
return groupId;
}
public void setGroupId(String groupId) {
this.groupId = groupId;
}
public String getModelName() {
return modelName;
}
public void setModelName(String modelName) {
this.modelName = modelName;
}
public boolean isProgress() {
return progress;
}
public void setProgress(boolean progress) {
this.progress = progress;
}
}
|
0
|
java-sources/ai/djl/tablesaw/tablesaw/0.34.0/ai/djl
|
java-sources/ai/djl/tablesaw/tablesaw/0.34.0/ai/djl/tablesaw/TablesawDataset.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tablesaw;
import ai.djl.basicdataset.tabular.TabularDataset;
import ai.djl.util.Progress;
import tech.tablesaw.api.Row;
import tech.tablesaw.api.Table;
import tech.tablesaw.io.ReadOptions;
import java.util.Collections;
import java.util.List;
/** {@code TablesawDataset} represents the dataset that stored in a .csv file. */
public class TablesawDataset extends TabularDataset {
protected ReadOptions readOptions;
protected Table table;
protected TablesawDataset(TablesawBuilder<?> builder) {
super(builder);
readOptions = builder.readOptions;
}
/** {@inheritDoc} */
@Override
public String getCell(long rowIndex, String featureName) {
Row row = table.row(Math.toIntExact(rowIndex));
return row.getString(featureName);
}
/** {@inheritDoc} */
@Override
protected long availableSize() {
return table.rowCount();
}
/** {@inheritDoc} */
@Override
public void prepare(Progress progress) {
table = Table.read().usingOptions(readOptions);
prepareFeaturizers();
}
/**
* Creates a builder to build a {@link TablesawDataset}.
*
* @return a new builder
*/
public static TablesawBuilder<?> builder() {
return new TablesawBuilder<>();
}
/**
* Returns the column names of the Tablesaw file.
*
* @return a list of column name
*/
public List<String> getColumnNames() {
if (table.isEmpty()) {
return Collections.emptyList();
}
return table.columnNames();
}
/** Used to build a {@link TablesawDataset}. */
public static class TablesawBuilder<T extends TablesawBuilder<T>>
extends TabularDataset.BaseBuilder<T> {
protected ReadOptions readOptions;
/** {@inheritDoc} */
@Override
@SuppressWarnings("unchecked")
protected T self() {
return (T) this;
}
/**
* Sets the reading options.
*
* @param readOptions the {@code ReadOptions}
* @return this builder
*/
public T setReadOptions(ReadOptions readOptions) {
this.readOptions = readOptions;
return self();
}
/**
* Builds the new {@link TablesawDataset}.
*
* @return the new {@link TablesawDataset}
*/
public TablesawDataset build() {
return new TablesawDataset(this);
}
}
}
|
0
|
java-sources/ai/djl/tablesaw/tablesaw/0.34.0/ai/djl
|
java-sources/ai/djl/tablesaw/tablesaw/0.34.0/ai/djl/tablesaw/package-info.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes to support tabular datasets in Tablesaw format. */
package ai.djl.tablesaw;
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/SavedModelBundle.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import org.tensorflow.internal.c_api.TF_Graph;
import org.tensorflow.internal.c_api.TF_Operation;
import org.tensorflow.internal.c_api.TF_Session;
import org.tensorflow.internal.c_api.global.tensorflow;
import org.tensorflow.proto.CollectionDef;
import org.tensorflow.proto.MetaGraphDef;
import org.tensorflow.proto.SignatureDef;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
/** The wrapper class for native resources required for SavedModelBundle. */
public class SavedModelBundle implements AutoCloseable {
private static final String INIT_OP_SIGNATURE_KEY = "__saved_model_init_op";
private static final String MAIN_OP_COLLECTION_KEY = "saved_model_main_op";
private static final String LEGACY_INIT_OP_COLLECTION_KEY = "legacy_init_op";
private static final String TABLE_INITIALIZERS_COLLECTION_KEY = "table_initializer";
private TF_Graph graphHandle;
private TF_Session sessionHandle;
private MetaGraphDef metaGraphDef;
private TF_Operation[] targetOpHandles;
private AtomicBoolean closed;
public SavedModelBundle(
TF_Graph graphHandle, TF_Session sessionHandle, MetaGraphDef metaGraphDef) {
this.graphHandle = graphHandle;
this.sessionHandle = sessionHandle;
this.metaGraphDef = metaGraphDef;
closed = new AtomicBoolean(false);
Map<String, SignatureDef> functions = new ConcurrentHashMap<>();
metaGraphDef
.getSignatureDefMap()
.forEach(
(signatureName, signatureDef) -> {
if (!functions.containsKey(signatureName)) {
functions.put(signatureName, signatureDef);
}
});
List<TF_Operation> initOps = new ArrayList<>();
TF_Operation initOp = findInitOp(functions, metaGraphDef.getCollectionDefMap());
if (initOp != null) {
initOps.add(initOp);
}
if (metaGraphDef.containsCollectionDef(TABLE_INITIALIZERS_COLLECTION_KEY)) {
metaGraphDef
.getCollectionDefMap()
.get(TABLE_INITIALIZERS_COLLECTION_KEY)
.getNodeList()
.getValueList()
.forEach(
node -> {
initOps.add(tensorflow.TF_GraphOperationByName(graphHandle, node));
});
}
targetOpHandles = initOps.toArray(new TF_Operation[0]);
}
private TF_Operation findInitOp(
Map<String, SignatureDef> signatures, Map<String, CollectionDef> collections) {
SignatureDef initSig = signatures.get(INIT_OP_SIGNATURE_KEY);
if (initSig != null) {
String opName = initSig.getOutputsMap().get(INIT_OP_SIGNATURE_KEY).getName();
return tensorflow.TF_GraphOperationByName(graphHandle, opName);
}
CollectionDef initCollection;
if (collections.containsKey(MAIN_OP_COLLECTION_KEY)) {
initCollection = collections.get(MAIN_OP_COLLECTION_KEY);
} else {
initCollection = collections.get(LEGACY_INIT_OP_COLLECTION_KEY);
}
if (initCollection != null) {
CollectionDef.NodeList nodes = initCollection.getNodeList();
if (nodes.getValueCount() != 1) {
throw new IllegalArgumentException("Expected exactly one main op in saved model.");
}
String opName = nodes.getValue(0);
return tensorflow.TF_GraphOperationByName(graphHandle, opName);
}
return null;
}
/**
* Returns the graph handle.
*
* @return the graph handle
*/
public TF_Graph getGraph() {
return graphHandle;
}
/**
* Returns the session handle.
*
* @return the session handle
*/
public TF_Session getSession() {
return sessionHandle;
}
/**
* Returns the MetaGraphDef protol buf.
*
* @return the MetaGraphDef protol buf
*/
public MetaGraphDef getMetaGraphDef() {
return metaGraphDef;
}
TF_Operation[] getTargetOpHandles() {
return targetOpHandles;
}
/** {@inheritDoc} */
@Override
public void close() {
// to prevent double free
if (closed.getAndSet(true)) {
return;
}
if (graphHandle != null && !graphHandle.isNull()) {
graphHandle.close();
}
if (sessionHandle != null && !sessionHandle.isNull()) {
sessionHandle.close();
}
metaGraphDef = null;
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfDataType.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.ndarray.types.DataType;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/** Helper to convert between {@link DataType} an the TensorFlow internal DataTypes. */
public final class TfDataType {
private static Map<DataType, Integer> toTfMap = createToTfMap();
private static Map<Integer, DataType> fromTfMap = createFromTfMap();
private TfDataType() {}
private static Map<DataType, Integer> createToTfMap() {
Map<DataType, Integer> map = new ConcurrentHashMap<>();
map.put(DataType.FLOAT32, 1);
map.put(DataType.FLOAT64, 2);
map.put(DataType.INT32, 3);
map.put(DataType.INT64, 9);
map.put(DataType.UINT8, 4);
map.put(DataType.INT8, 6);
map.put(DataType.BOOLEAN, 10);
map.put(DataType.STRING, 7);
return map;
}
private static Map<Integer, DataType> createFromTfMap() {
Map<Integer, DataType> map = new ConcurrentHashMap<>();
map.put(1, DataType.FLOAT32);
map.put(2, DataType.FLOAT64);
map.put(3, DataType.INT32);
map.put(4, DataType.UINT8);
map.put(6, DataType.INT8);
map.put(7, DataType.STRING);
map.put(9, DataType.INT64);
map.put(10, DataType.BOOLEAN);
return map;
}
/**
* Converts a {@link DataType} into the corresponding TensorFlow type value.
*
* @param dataType the {@link DataType} to convert
* @return the converted TensorFlow type value
*/
public static int toTf(DataType dataType) {
Integer tfType = toTfMap.get(dataType);
if (tfType == null) {
throw new UnsupportedOperationException("Unsupported data type: " + dataType);
}
return tfType;
}
/**
* Converts a TensorFlow type value into a {@link DataType}.
*
* @param dataType the TensorFlow type value to convert
* @return the {@link DataType}
*/
public static DataType fromTf(int dataType) {
return fromTfMap.get(dataType);
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfEngine.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.Device;
import ai.djl.Model;
import ai.djl.engine.Engine;
import ai.djl.engine.EngineException;
import ai.djl.engine.StandardCapabilities;
import ai.djl.ndarray.NDManager;
import ai.djl.nn.SymbolBlock;
import ai.djl.tensorflow.engine.javacpp.JavacppUtils;
import ai.djl.tensorflow.engine.javacpp.LibUtils;
import org.bytedeco.javacpp.PointerScope;
import org.tensorflow.TensorFlow;
import org.tensorflow.internal.c_api.TFE_Context;
import org.tensorflow.internal.c_api.TF_DeviceList;
import org.tensorflow.internal.c_api.TF_Status;
import org.tensorflow.internal.c_api.global.tensorflow;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicReference;
/**
* The {@code TfEngine} is an implementation of the {@link Engine} based on the <a
* href="https://www.tensorflow.org/">Tensorflow Deep Learning Framework</a>.
*
* <p>To get an instance of the {@code TfEngine} when it is not the default Engine, call {@link
* Engine#getEngine(String)} with the Engine name "TensorFlow".
*/
public final class TfEngine extends Engine implements AutoCloseable {
public static final String ENGINE_NAME = "TensorFlow";
static final int RANK = 3;
private static AtomicReference<TFE_Context> eagerSessionHandle;
private TfEngine() {}
static TfEngine newInstance() {
try {
LibUtils.loadLibrary();
eagerSessionHandle =
new AtomicReference<>(
JavacppUtils.createEagerSession(
true, 2, JavacppUtils.getSessionConfig()));
// call a function from tensorflow-java package to
// load the native library right here
// if it throws exception, we can catch it here
TensorFlow.version();
return new TfEngine();
} catch (Throwable t) {
if (t.getMessage() != null && t.getMessage().contains("libstdc++")) {
// Does not mention .so to work with osx .dylib
String msg =
"There was an issue with your libstdc++ file required for the Tensorflow"
+ " native library.\n"
+ "It can be installed or upgraded through gcc by following the"
+ " instructions on the TensorFlow install page:"
+ " https://docs.djl.ai/master/engines/tensorflow/index.html#note";
throw new EngineException(msg, t);
}
throw new EngineException("Failed to load TensorFlow native library", t);
}
}
/** {@inheritDoc} */
@Override
public Model newModel(String name, Device device) {
return new TfModel(name, device);
}
/** {@inheritDoc} */
@Override
public SymbolBlock newSymbolBlock(NDManager manager) {
throw new UnsupportedOperationException("TensorFlow does not support empty SymbolBlock");
}
/** {@inheritDoc} */
@Override
public Engine getAlternativeEngine() {
return null;
}
/** {@inheritDoc} */
@Override
public String getEngineName() {
return ENGINE_NAME;
}
/** {@inheritDoc} */
@Override
public int getRank() {
return RANK;
}
/** {@inheritDoc} */
@Override
public String getVersion() {
return TensorFlow.version();
}
/** {@inheritDoc} */
@Override
@SuppressWarnings({"unchecked", "try"})
public boolean hasCapability(String capability) {
if (StandardCapabilities.MKL.equals(capability)) {
return true;
} else if (StandardCapabilities.CUDA.equals(capability)) {
TF_DeviceList deviceList = null;
try (PointerScope ignore = new PointerScope()) {
TF_Status status = tensorflow.TF_NewStatus();
deviceList = tensorflow.TFE_ContextListDevices(eagerSessionHandle.get(), status);
int deviceCount = tensorflow.TF_DeviceListCount(deviceList);
for (int i = 0; i < deviceCount; i++) {
if (tensorflow.TF_DeviceListName(deviceList, i, status)
.getString()
.toLowerCase()
.contains("gpu")) {
return true;
}
}
} finally {
// deviceList isn't registered with deallocator
// so it's not closed by PointerScope
// close it manually
synchronized (Objects.requireNonNull(deviceList)) {
if (!deviceList.isNull()) {
tensorflow.TF_DeleteDeviceList(deviceList);
}
}
}
return false;
}
return false;
}
/** {@inheritDoc} */
@Override
public NDManager newBaseManager() {
return TfNDManager.getSystemManager().newSubManager();
}
/** {@inheritDoc} */
@Override
public NDManager newBaseManager(Device device) {
return TfNDManager.getSystemManager().newSubManager(device);
}
TFE_Context getEagerSession() {
return eagerSessionHandle.get();
}
/** {@inheritDoc} */
@Override
public String toString() {
StringBuilder sb = new StringBuilder(200);
sb.append(getEngineName())
.append(':')
.append(getVersion())
.append(", capabilities: [\n\t" + StandardCapabilities.MKL + ",\n");
if (hasCapability(StandardCapabilities.CUDA)) {
sb.append("\t").append(StandardCapabilities.CUDA).append(",\n"); // NOPMD
}
sb.append("]\nTensorFlow Library: ").append(LibUtils.getLibName());
return sb.toString();
}
/** {@inheritDoc} */
@Override
public void close() {
TFE_Context handle = eagerSessionHandle.getAndSet(null);
if (handle != null && !handle.isNull()) {
handle.close();
}
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfEngineProvider.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.engine.Engine;
import ai.djl.engine.EngineProvider;
/** {@code TfEngineProvider} is the TensorFlow implementation of {@link EngineProvider}. */
public class TfEngineProvider implements EngineProvider {
private static volatile Engine engine; // NOPMD
/** {@inheritDoc} */
@Override
public String getEngineName() {
return TfEngine.ENGINE_NAME;
}
/** {@inheritDoc} */
@Override
public int getEngineRank() {
return TfEngine.RANK;
}
/** {@inheritDoc} */
@Override
public Engine getEngine() {
if (engine == null) {
synchronized (TfEngineProvider.class) {
if (engine == null) {
engine = TfEngine.newInstance();
}
}
}
return engine;
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfModel.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.BaseModel;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.Model;
import ai.djl.nn.Block;
import ai.djl.tensorflow.engine.javacpp.JavacppUtils;
import ai.djl.util.Utils;
import com.google.protobuf.InvalidProtocolBufferException;
import org.tensorflow.proto.ConfigProto;
import org.tensorflow.proto.RunOptions;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Base64;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/** {@code TfModel} is the TensorFlow implementation of {@link Model}. */
public class TfModel extends BaseModel {
private static final String DEFAULT_SERVING_SIGNATURE_DEF_KEY = "serving_default";
/**
* Constructs a new Model on a given device.
*
* @param name the model name
* @param device the device the model should be located on
*/
TfModel(String name, Device device) {
super(name);
properties = new ConcurrentHashMap<>();
manager = TfNDManager.getSystemManager().newSubManager(device);
manager.setName("tfModel");
}
/** {@inheritDoc} */
@Override
public void load(Path modelPath, String prefix, Map<String, ?> options)
throws FileNotFoundException, MalformedModelException {
setModelDir(modelPath);
wasLoaded = true;
if (prefix == null) {
prefix = modelName;
}
Path exportDir = findModelDir(prefix);
if (exportDir == null) {
exportDir = findModelDir("saved_model.pb");
if (exportDir == null) {
throw new FileNotFoundException("No TensorFlow model found in: " + modelDir);
}
}
String[] tags = null;
ConfigProto configProto = null;
RunOptions runOptions = null;
String signatureDefKey = DEFAULT_SERVING_SIGNATURE_DEF_KEY;
if (options != null) {
Object tagOption = options.get("Tags");
if (tagOption instanceof String[]) {
tags = (String[]) tagOption;
} else if (tagOption instanceof String) {
if (((String) tagOption).isEmpty()) {
tags = Utils.EMPTY_ARRAY;
} else {
tags = ((String) tagOption).split(",");
}
}
Object config = options.get("ConfigProto");
if (config instanceof ConfigProto) {
configProto = (ConfigProto) config;
} else if (config instanceof String) {
try {
byte[] buf = Base64.getDecoder().decode((String) config);
configProto = ConfigProto.parseFrom(buf);
} catch (InvalidProtocolBufferException e) {
throw new MalformedModelException("Invalid ConfigProto: " + config, e);
}
}
Object run = options.get("RunOptions");
if (run instanceof RunOptions) {
runOptions = (RunOptions) run;
} else if (run instanceof String) {
try {
byte[] buf = Base64.getDecoder().decode((String) run);
runOptions = RunOptions.parseFrom(buf);
} catch (InvalidProtocolBufferException e) {
throw new MalformedModelException("Invalid RunOptions: " + run, e);
}
}
if (options.containsKey("SignatureDefKey")) {
signatureDefKey = (String) options.get("SignatureDefKey");
}
}
if (tags == null) {
tags = new String[] {"serve"};
}
if (configProto == null) {
// default one
configProto = JavacppUtils.getSessionConfig();
}
SavedModelBundle bundle =
JavacppUtils.loadSavedModelBundle(
exportDir.toString(), tags, configProto, runOptions);
block = new TfSymbolBlock(bundle, signatureDefKey);
}
private Path findModelDir(String prefix) {
Path path = modelDir.resolve(prefix);
if (!Files.exists(path)) {
return null;
}
if (Files.isRegularFile(path)) {
return modelDir;
} else if (Files.isDirectory(path)) {
Path file = path.resolve("saved_model.pb");
if (Files.exists(file) && Files.isRegularFile(file)) {
return path;
}
}
return null;
}
/** {@inheritDoc} */
@Override
public void save(Path modelPath, String newModelName) {
throw new UnsupportedOperationException("Not supported for TensorFlow Engine");
}
/** {@inheritDoc} */
@Override
public void setBlock(Block block) {
throw new UnsupportedOperationException("Not supported for TensorFlow Engine");
}
/** {@inheritDoc} */
@Override
public String[] getArtifactNames() {
try (Stream<Path> stream = Files.walk(modelDir)) {
List<Path> files = stream.filter(Files::isRegularFile).collect(Collectors.toList());
List<String> ret = new ArrayList<>(files.size());
for (Path path : files) {
String fileName = path.toFile().getName();
if (fileName.endsWith(".pb")) {
// ignore model files.
continue;
}
Path relative = modelDir.relativize(path);
ret.add(relative.toString());
}
return ret.toArray(Utils.EMPTY_ARRAY);
} catch (IOException e) {
throw new AssertionError("Failed list files", e);
}
}
/** {@inheritDoc} */
@Override
public void close() {
if (block != null) {
((TfSymbolBlock) block).close();
block = null;
}
super.close();
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfNDArray.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.Device;
import ai.djl.ndarray.BaseNDManager;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.NDScope;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.ndarray.types.SparseFormat;
import ai.djl.tensorflow.engine.javacpp.JavacppUtils;
import ai.djl.util.NativeResource;
import ai.djl.util.Preconditions;
import org.tensorflow.internal.c_api.TFE_TensorHandle;
import org.tensorflow.internal.c_api.TF_Tensor;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
/** {@code TfNDArray} is the TensorFlow implementation of {@link NDArray}. */
@SuppressWarnings("PMD.UseTryWithResources")
public class TfNDArray extends NativeResource<TFE_TensorHandle> implements NDArray {
private Shape shape;
private Device device;
private TfNDManager manager;
private String name;
private TfNDArrayEx tfNDArrayEx;
private DataType dataType;
private TF_Tensor tensor;
TfNDArray(TfNDManager manager, TFE_TensorHandle handle) {
super(handle);
this.manager = manager;
manager.attachInternal(getUid(), this);
tfNDArrayEx = new TfNDArrayEx(this);
NDScope.register(this);
}
TfNDArray(TfNDManager manager, TFE_TensorHandle handle, TF_Tensor tensor) {
this(manager, handle);
this.tensor = tensor;
}
/** {@inheritDoc} */
@Override
public TfNDManager getManager() {
return manager;
}
/** {@inheritDoc} */
@Override
public String getName() {
return name;
}
/** {@inheritDoc} */
@Override
public void setName(String name) {
this.name = name;
}
/** {@inheritDoc} */
@Override
public DataType getDataType() {
if (dataType == null) {
Preconditions.checkArgument(
getHandle() != null && !getHandle().isNull(), "Eager session has been closed");
dataType = JavacppUtils.getDataType(getHandle());
}
return dataType;
}
/** {@inheritDoc} */
@Override
public Device getDevice() {
if (device == null) {
Preconditions.checkArgument(
getHandle() != null && !getHandle().isNull(), "Eager session has been closed");
device = JavacppUtils.getDevice(getHandle());
}
return device;
}
/** {@inheritDoc} */
@Override
public Shape getShape() {
if (shape == null) {
Preconditions.checkArgument(
getHandle() != null && !getHandle().isNull(), "Eager session has been closed");
shape = JavacppUtils.getShape(getHandle());
}
return shape;
}
/** {@inheritDoc} */
@Override
public SparseFormat getSparseFormat() {
return SparseFormat.DENSE;
}
/** {@inheritDoc} */
@Override
public NDArray toDevice(Device device, boolean copy) {
if (device.equals(getDevice()) && !copy) {
return this;
} else if (device.equals(getDevice()) && copy) {
// tensorflow toDevice don't do the copy if data is already in the same device
return duplicate();
}
return new TfNDArray(
manager, JavacppUtils.toDevice(getHandle(), manager.getEagerSession(), device));
}
/** {@inheritDoc} */
@Override
public NDArray toType(DataType dataType, boolean copy) {
if (dataType.equals(getDataType()) && !copy) {
return this;
}
return manager.opExecutor("Cast")
.addInput(this)
.addParam("DstT", dataType)
.buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public void setRequiresGradient(boolean requiresGrad) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray getGradient() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public boolean hasGradient() {
return false;
}
/** {@inheritDoc} */
@Override
public NDArray stopGradient() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public String[] toStringArray(Charset charset) {
int size = Math.toIntExact(getShape().size());
return JavacppUtils.getString(getHandle(), size, charset);
}
/** {@inheritDoc} */
@Override
public ByteBuffer toByteBuffer(boolean tryDirect) {
if (getDataType() == DataType.STRING) {
throw new IllegalArgumentException("Please use toStringArray() for String NDArray.");
}
return JavacppUtils.getByteBuffer(getHandle());
}
/** {@inheritDoc} */
@Override
public void set(Buffer buffer) {
if (getDevice().isGpu()) {
// TODO: Implement set for GPU
throw new UnsupportedOperationException("GPU Tensor cannot be modified after creation");
}
int size = Math.toIntExact(getShape().size());
DataType type = getDataType();
BaseNDManager.validateBuffer(buffer, type, size);
if (buffer instanceof ByteBuffer) {
JavacppUtils.setByteBuffer(getHandle(), (ByteBuffer) buffer);
return;
}
ByteBuffer bb = getManager().allocateDirect(size * type.getNumOfBytes());
BaseNDManager.copyBuffer(buffer, bb);
JavacppUtils.setByteBuffer(getHandle(), bb);
}
/** {@inheritDoc} */
@Override
public NDArray gather(NDArray index, int axis) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray gatherNd(NDArray index) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray take(NDManager manager, NDArray index) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray put(NDArray index, NDArray value) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray scatter(NDArray index, NDArray value, int axis) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public void attach(NDManager manager) {
detach();
this.manager = (TfNDManager) manager;
manager.attachInternal(getUid(), this);
}
/** {@inheritDoc} */
@Override
public void returnResource(NDManager manager) {
detach();
this.manager = (TfNDManager) manager;
manager.attachUncappedInternal(getUid(), this);
}
/** {@inheritDoc} */
@Override
public void tempAttach(NDManager manager) {
NDManager original = this.manager;
detach();
this.manager = (TfNDManager) manager;
manager.tempAttachInternal(original, getUid(), this);
}
/** {@inheritDoc} */
@Override
public void detach() {
manager.detachInternal(getUid());
manager = TfNDManager.getSystemManager();
}
/** {@inheritDoc} */
@Override
public NDArray duplicate() {
return manager.opExecutor("DeepCopy").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray booleanMask(NDArray index, int axis) {
// handle scalar case manually to behave like numpy
if (isScalar()) {
if (!index.isScalar()) {
throw new IllegalArgumentException("Input is scalar, index must also be scalar.");
}
if (index.toBooleanArray()[0]) {
return expandDims(0);
} else {
return manager.create(new Shape());
}
}
try (NDArray where = manager.opExecutor("Where").addInput(index).buildSingletonOrThrow();
NDArray squeeze = where.squeeze(1);
NDArray axisArr = manager.create(axis)) {
return manager.opExecutor("GatherV2")
.addInput(this)
.addInput(squeeze)
.addInput(axisArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray sequenceMask(NDArray sequenceLength, float value) {
throw new UnsupportedOperationException("Not implemented yet");
}
/** {@inheritDoc} */
@Override
public NDArray sequenceMask(NDArray sequenceLength) {
throw new UnsupportedOperationException("Not implemented yet");
}
/** {@inheritDoc} */
@Override
public boolean contentEquals(Number number) {
if (number == null || getDataType().isBoolean()) {
return false;
}
try (NDArray result = eq(number)) {
return result.all().getBoolean();
}
}
/** {@inheritDoc} */
@Override
public boolean contentEquals(NDArray other) {
if (other == null || (!shapeEquals(other))) {
return false;
}
if (getDataType() != other.getDataType()) {
return false;
}
TfNDArray eq = (TfNDArray) eq(other);
return eq.all().toBooleanArray()[0];
}
/** {@inheritDoc} */
@Override
public NDArray eq(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return eq(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray eq(NDArray other) {
return manager.opExecutor("Equal").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray neq(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return neq(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray neq(NDArray other) {
return manager.opExecutor("NotEqual")
.addInput(this)
.addInput(other)
.buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray gt(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return gt(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray gt(NDArray other) {
return manager.opExecutor("Greater").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray gte(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return gte(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray gte(NDArray other) {
return manager.opExecutor("GreaterEqual")
.addInput(this)
.addInput(other)
.buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray lt(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return lt(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray lt(NDArray other) {
return manager.opExecutor("Less").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray lte(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return lte(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray lte(NDArray other) {
return manager.opExecutor("LessEqual")
.addInput(this)
.addInput(other)
.buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray all() {
try (NDArray casted = toType(DataType.BOOLEAN, true);
NDArray axes = manager.arange(getShape().dimension())) {
return manager.opExecutor("All")
.addInput(casted)
.addInput(axes)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray any() {
try (NDArray casted = toType(DataType.BOOLEAN, true);
NDArray axes = manager.arange(getShape().dimension())) {
return manager.opExecutor("Any")
.addInput(casted)
.addInput(axes)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray erfinv() {
return manager.opExecutor("Erfinv").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray erf() {
return manager.opExecutor("Erf").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray norm(boolean keepDims) {
// We have to flatten first to be able to simulate "numpy.linalg.norm" whenever axis isn't
// specified
if (getDataType() == DataType.FLOAT64) {
throw new UnsupportedOperationException("float64 is not supported");
}
NDArray flatten = flatten();
NDArray axis = manager.create(0);
NDArray res =
manager.opExecutor("EuclideanNorm")
.addInput(flatten)
.addInput(axis)
.addParam("keep_dims", keepDims)
.buildSingletonOrThrow();
// close the temp NDArray
flatten.close();
axis.close();
if (!keepDims) {
return res;
}
try {
long[] shapes = LongStream.generate(() -> 1).limit(getShape().dimension()).toArray();
return res.reshape(shapes);
} finally {
res.close();
}
}
/** {@inheritDoc} */
@Override
public NDArray norm(int ord, int[] axes, boolean keepDims) {
if (ord != 2) {
throw new UnsupportedOperationException("Only ord=2 is supported");
}
try (NDArray axesArr = manager.create(axes)) {
return manager.opExecutor("EuclideanNorm")
.addInput(this)
.addInput(axesArr)
.addParam("keep_dims", keepDims)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray oneHot(int depth, float onValue, float offValue, DataType dataType) {
try (NDArray depthArr = manager.create(depth);
NDArray onValueArr = manager.create(onValue);
NDArray offValueArr = manager.create(offValue);
NDArray result =
manager.opExecutor("OneHot")
.addInput(this)
.addInput(depthArr)
.addInput(onValueArr)
.addInput(offValueArr)
.addParam("axis", -1)
.buildSingletonOrThrow()) {
return result.toType(dataType, true);
}
}
/** {@inheritDoc} */
@Override
public NDArray batchDot(NDArray other) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray complex() {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray real() {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray conj() {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray add(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return add(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray add(NDArray other) {
return manager.opExecutor("Add").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray sub(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return sub(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray sub(NDArray other) {
return manager.opExecutor("Sub").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray mul(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return mul(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray mul(NDArray other) {
return manager.opExecutor("Mul").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray div(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return div(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray div(NDArray other) {
return manager.opExecutor("Div").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray mod(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return mod(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray mod(NDArray other) {
return manager.opExecutor("Mod").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray pow(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return pow(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray pow(NDArray other) {
return manager.opExecutor("Pow").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray maximum(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return maximum(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray maximum(NDArray other) {
return manager.opExecutor("Maximum").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray minimum(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return minimum(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray minimum(NDArray other) {
return manager.opExecutor("Minimum").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray addi(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return addi(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray addi(NDArray other) {
TFE_TensorHandle newHandle =
manager.opExecutor("Add").addInput(this).addInput(other).buildRawPointer(1)[0];
setHandle(newHandle);
return this;
}
/** {@inheritDoc} */
@Override
public NDArray subi(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return subi(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray subi(NDArray other) {
TFE_TensorHandle newHandle =
manager.opExecutor("Sub").addInput(this).addInput(other).buildRawPointer(1)[0];
setHandle(newHandle);
return this;
}
/** {@inheritDoc} */
@Override
public NDArray muli(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return muli(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray muli(NDArray other) {
TFE_TensorHandle newHandle =
manager.opExecutor("Mul").addInput(this).addInput(other).buildRawPointer(1)[0];
setHandle(newHandle);
return this;
}
/** {@inheritDoc} */
@Override
public NDArray divi(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return divi(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray divi(NDArray other) {
TFE_TensorHandle newHandle =
manager.opExecutor("Div").addInput(this).addInput(other).buildRawPointer(1)[0];
setHandle(newHandle);
return this;
}
/** {@inheritDoc} */
@Override
public NDArray toSparse(SparseFormat fmt) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray modi(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return modi(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray modi(NDArray other) {
TFE_TensorHandle newHandle =
manager.opExecutor("Mod").addInput(this).addInput(other).buildRawPointer(1)[0];
setHandle(newHandle);
return this;
}
/** {@inheritDoc} */
@Override
public NDArray powi(Number n) {
try (NDArray number = manager.create(n).toType(getDataType(), false)) {
return powi(number);
}
}
/** {@inheritDoc} */
@Override
public NDArray powi(NDArray other) {
TFE_TensorHandle newHandle =
manager.opExecutor("Pow").addInput(this).addInput(other).buildRawPointer(1)[0];
setHandle(newHandle);
return this;
}
/** {@inheritDoc} */
@Override
public NDArray sign() {
return manager.opExecutor("Sign").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray signi() {
TFE_TensorHandle newHandle =
manager.opExecutor("Sign").addInput(this).buildRawPointer(1)[0];
setHandle(newHandle);
return this;
}
/** {@inheritDoc} */
@Override
public NDArray neg() {
return manager.opExecutor("Neg").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray negi() {
TFE_TensorHandle newHandle = manager.opExecutor("Neg").addInput(this).buildRawPointer(1)[0];
setHandle(newHandle);
return this;
}
/** {@inheritDoc} */
@Override
public NDArray abs() {
return manager.opExecutor("Abs").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray square() {
return manager.opExecutor("Square").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray sqrt() {
return manager.opExecutor("Sqrt").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray cbrt() {
NDArray pow;
if (getDataType().equals(DataType.FLOAT64)) {
pow = manager.create(1.0 / 3);
} else {
pow = manager.create(1f / 3);
}
try {
return manager.opExecutor("Pow").addInput(this).addInput(pow).buildSingletonOrThrow();
} finally {
pow.close();
}
}
/** {@inheritDoc} */
@Override
public NDArray floor() {
return manager.opExecutor("Floor").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray ceil() {
return manager.opExecutor("Ceil").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray round() {
return manager.opExecutor("Round").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray trunc() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray exp() {
return manager.opExecutor("Exp").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray gammaln() {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray log() {
return manager.opExecutor("Log").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray log10() {
return log().div(Math.log(10));
}
/** {@inheritDoc} */
@Override
public NDArray log2() {
return log().div(Math.log(2));
}
/** {@inheritDoc} */
@Override
public NDArray sin() {
return manager.opExecutor("Sin").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray cos() {
return manager.opExecutor("Cos").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray tan() {
return manager.opExecutor("Tan").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray asin() {
return manager.opExecutor("Asin").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray acos() {
return manager.opExecutor("Acos").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray atan() {
return manager.opExecutor("Atan").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray atan2(NDArray other) {
return manager.opExecutor("Atan2").addInput(this).addInput(other).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray sinh() {
return manager.opExecutor("Sinh").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray cosh() {
return manager.opExecutor("Cosh").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray tanh() {
return manager.opExecutor("Tanh").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray asinh() {
return manager.opExecutor("Asinh").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray acosh() {
return manager.opExecutor("Acosh").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray atanh() {
return manager.opExecutor("Atanh").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray toDegrees() {
return mul(180).div(Math.PI);
}
/** {@inheritDoc} */
@Override
public NDArray toRadians() {
return mul(Math.PI).div(180);
}
/** {@inheritDoc} */
@Override
public NDArray max() {
try (NDArray axes = manager.arange(getShape().dimension())) {
return manager.opExecutor("Max").addInput(this).addInput(axes).buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray max(int[] axes, boolean keepDims) {
try (NDArray axesArr = manager.create(axes)) {
return manager.opExecutor("Max")
.addInput(this)
.addInput(axesArr)
.addParam("keep_dims", keepDims)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray min() {
try (NDArray axes = manager.arange(getShape().dimension())) {
return manager.opExecutor("Min").addInput(this).addInput(axes).buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray min(int[] axes, boolean keepDims) {
try (NDArray axesArr = manager.create(axes)) {
return manager.opExecutor("Min")
.addInput(this)
.addInput(axesArr)
.addParam("keep_dims", keepDims)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray sum() {
// sum on all axis
NDArray array = this;
// tf can't sum boolean values
DataType type = array.getDataType();
if (type == DataType.BOOLEAN || type.isInteger()) {
array = array.toType(DataType.INT64, false);
}
try (NDArray axes = manager.arange(getShape().dimension())) {
return manager.opExecutor("Sum").addInput(array).addInput(axes).buildSingletonOrThrow();
} finally {
if (array != this) {
array.close();
}
}
}
/** {@inheritDoc} */
@Override
public NDArray sum(int[] axes, boolean keepDims) {
try (NDArray axesArr = manager.create(axes)) {
return manager.opExecutor("Sum")
.addInput(this)
.addInput(axesArr)
.addParam("keep_dims", keepDims)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray cumProd(int axis) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray cumProd(int axis, DataType dataType) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray prod() {
try (NDArray axes = manager.arange(getShape().dimension())) {
return manager.opExecutor("Prod").addInput(this).addInput(axes).buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray prod(int[] axes, boolean keepDims) {
try (NDArray axesArr = manager.create(axes)) {
return manager.opExecutor("Prod")
.addInput(this)
.addInput(axesArr)
.addParam("keep_dims", keepDims)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray mean() {
try (NDArray axes = manager.arange(getShape().dimension())) {
return manager.opExecutor("Mean").addInput(this).addInput(axes).buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray mean(int[] axes, boolean keepDims) {
try (NDArray axesArr = manager.create(axes)) {
return manager.opExecutor("Mean")
.addInput(this)
.addInput(axesArr)
.addParam("keep_dims", keepDims)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray normalize(double p, long dim, double eps) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray rotate90(int times, int[] axes) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray trace(int offset, int axis1, int axis2) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList split(long[] indices, int axis) {
if (indices.length == 0) {
return new NDList(duplicate());
}
List<Long> sizes = new ArrayList<>();
int lastIndex = indices.length - 1;
long dimSize = getShape().get(axis);
// does not add to size if indices starts at 0
if (indices[0] > 0) {
sizes.add(indices[0]);
}
for (int i = 1; i < indices.length; i++) {
sizes.add(indices[i] - indices[i - 1]);
}
// add last size if last index smaller than max size of that axis
if (indices[lastIndex] < dimSize) {
sizes.add(dimSize - indices[lastIndex]);
}
long totalSize = sizes.stream().mapToLong(Long::longValue).sum();
if (totalSize != getShape().get(axis)) {
throw new IllegalArgumentException(
"split sizes :"
+ totalSize
+ " must sum to dimension on axis "
+ axis
+ ": "
+ getShape().get(axis));
}
try (NDArray sizesArr = manager.create(sizes.stream().mapToInt(Long::intValue).toArray());
NDArray axisArr = manager.create(axis)) {
return new NDList(
manager.opExecutor("SplitV")
.addInput(this)
.addInput(sizesArr)
.addInput(axisArr)
.addParam("num_split", sizes.size())
.build(indices.length + 1));
}
}
/** {@inheritDoc} */
@Override
public NDArray flatten() {
return reshape(new Shape(-1));
}
/** {@inheritDoc} */
@Override
public NDArray flatten(int startDim, int endDim) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray fft(long length, long axis) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray rfft(long length, long axis) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray ifft(long length, long axis) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray irfft(long length, long axis) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray stft(
long nFft,
long hopLength,
boolean center,
NDArray window,
boolean normalize,
boolean returnComplex) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray fft2(long[] sizes, long[] axes) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray pad(Shape padding, double value) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray ifft2(long[] sizes, long[] axes) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray reshape(Shape shape) {
try (NDArray shapeArr = manager.create(shape.getShape())) {
return manager.opExecutor("Reshape")
.addInput(this)
.addInput(shapeArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray expandDims(int axis) {
try (NDArray axisArr = manager.create(axis)) {
return manager.opExecutor("ExpandDims")
.addInput(this)
.addInput(axisArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray squeeze(int[] axes) {
if (isScalar()) {
axes = new int[0];
}
return manager.opExecutor("Squeeze")
.addInput(this)
.addParam("squeeze_dims", Arrays.stream(axes).mapToLong(i -> i).toArray())
.buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDList unique(Integer dim, boolean sorted, boolean returnInverse, boolean returnCounts) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray logicalAnd(NDArray n) {
try (NDArray input1 = toType(DataType.BOOLEAN, true);
NDArray input2 = n.toType(DataType.BOOLEAN, true)) {
return manager.opExecutor("LogicalAnd")
.addInput(input1)
.addInput(input2)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray logicalOr(NDArray n) {
try (NDArray input1 = toType(DataType.BOOLEAN, true);
NDArray input2 = n.toType(DataType.BOOLEAN, true)) {
return manager.opExecutor("LogicalOr")
.addInput(input1)
.addInput(input2)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray logicalXor(NDArray n) {
return logicalOr(n).logicalAnd(logicalAnd(n).logicalNot());
}
/** {@inheritDoc} */
@Override
public NDArray logicalNot() {
try (NDArray input = toType(DataType.BOOLEAN, true)) {
return manager.opExecutor("LogicalNot").addInput(input).buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray argSort(int axis, boolean ascending) {
return sortHelper(axis, ascending, true);
}
/** {@inheritDoc} */
@Override
public NDArray sort(int axis) {
return sortHelper(axis, true, false);
}
/** {@inheritDoc} */
@Override
public NDArray sort() {
return sortHelper(-1, true, false);
}
private NDArray sortHelper(int axis, boolean ascending, boolean returnIndices) {
if (isScalar()) {
return duplicate();
}
// using topK to implement argSort
int k;
int rank = getShape().dimension();
int[] transposition;
NDArray input;
NDArray result;
try (TfNDManager subManager = (TfNDManager) manager.newSubManager()) {
attach(subManager);
if (axis == -1 || axis + 1 == getShape().dimension()) {
// last axis
transposition = null;
input = this;
long[] arrayShape = getShape().getShape();
k = (int) arrayShape[arrayShape.length - 1];
} else {
k = (int) getShape().getShape()[axis];
NDArray axisArr = subManager.arange(0, axis, 1, DataType.INT32, getDevice());
NDArray axisArr1 = subManager.create(new int[] {rank - 1});
NDArray axisArr2 =
subManager.arange(axis + 1, rank - 1, 1, DataType.INT32, getDevice());
NDArray axisArr3 = subManager.create(new int[] {axis});
transposition =
NDArrays.concat(new NDList(axisArr, axisArr1, axisArr2, axisArr3))
.toIntArray();
input = transpose(transposition);
}
NDArray[] outputs;
NDArray kArr = subManager.create(k);
if (ascending) {
outputs =
subManager
.opExecutor("TopKV2")
.addInput(input.neg())
.addInput(kArr)
.build(2);
} else {
outputs = subManager.opExecutor("TopKV2").addInput(input).addInput(kArr).build(2);
}
if (returnIndices) {
result = outputs[1].toType(DataType.INT64, false);
} else {
result = outputs[0];
}
if (transposition != null) {
result = result.transpose(transposition);
}
// re-apply neg after sort if ascending
if (ascending && !returnIndices) {
result = result.neg();
}
attach(subManager.getParentManager());
result.attach(subManager.getParentManager());
return result;
}
}
/** {@inheritDoc} */
@Override
public NDArray softmax(int axis) {
return softmaxHelper("Softmax", axis);
}
/** {@inheritDoc} */
@Override
public NDArray logSoftmax(int axis) {
return softmaxHelper("LogSoftmax", axis);
}
private NDArray softmaxHelper(String operation, int axis) {
long dim = getShape().dimension();
if (dim == 0) {
return duplicate();
}
if (axis == -1 || axis == dim - 1) {
return manager.opExecutor(operation).addInput(this).buildSingletonOrThrow();
}
if (axis < -dim || axis >= dim) {
throw new IllegalArgumentException(
"Invalid axes value: "
+ axis
+ ", must be in range ["
+ -dim
+ ", "
+ dim
+ ") where "
+ dim
+ " is the number of dimensions in the input.");
}
// tf.softmax always apply on last dimension, transpose input to make axes[0] last dimension
try (NDManager subManager = manager.newSubManager()) {
attach(subManager);
NDList concatList = new NDList();
concatList.add(subManager.arange((int) (axis % dim)));
concatList.add(subManager.create((int) dim - 1).expandDims(0));
concatList.add(subManager.arange(axis + 1, (int) dim - 1));
concatList.add(subManager.create(axis).expandDims(0));
int[] axes = NDArrays.concat(concatList, 0).toIntArray();
NDArray transposed = transpose(axes);
NDArray output =
((TfNDManager) subManager)
.opExecutor(operation)
.addInput(transposed)
.buildSingletonOrThrow();
NDArray result = output.transpose(axes);
result.attach(subManager.getParentManager());
attach(subManager.getParentManager());
return result;
}
}
/** {@inheritDoc} */
@Override
public NDArray cumSum(int axis) {
// just expand dim for scalar
if (isScalar()) {
return expandDims(0);
}
// return 0 shape if any of the dim is 0
if (Arrays.stream(getShape().getShape()).anyMatch(dim -> dim == 0L)) {
return manager.create(new Shape(0));
}
try (NDArray axisArr = manager.create(axis)) {
return manager.opExecutor("Cumsum")
.addInput(this)
.addInput(axisArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray cumSum() {
return cumSum(0);
}
/** {@inheritDoc} */
@Override
public NDArray diagonal() {
return manager.opExecutor("DiagPart").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray diagonal(int offset) {
throw new UnsupportedOperationException();
}
/** {@inheritDoc} */
@Override
public NDArray diagonal(int offset, int axis1, int axis2) {
throw new UnsupportedOperationException();
}
/** {@inheritDoc} */
@Override
public void intern(NDArray replaced) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray isInfinite() {
return manager.opExecutor("IsInf").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray inverse() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray isNaN() {
return manager.opExecutor("IsNan").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray tile(long repeats) {
// tf tile doesn't support scalar
if (isScalar()) {
try (NDArray temp = reshape(1)) {
return temp.tile(repeats);
}
}
long[] multiples = new long[getShape().dimension()];
Arrays.fill(multiples, repeats);
return tile(multiples);
}
/** {@inheritDoc} */
@Override
public NDArray tile(int axis, long repeats) {
long[] multiples = new long[getShape().dimension()];
Arrays.fill(multiples, 1);
multiples[axis] = repeats;
return tile(multiples);
}
/** {@inheritDoc} */
@Override
public NDArray tile(long[] repeats) {
try (NDArray repeatsArr = manager.create(repeats)) {
return manager.opExecutor("Tile")
.addInput(this)
.addInput(repeatsArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray tile(Shape desiredShape) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray repeat(long repeats) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray repeat(int axis, long repeats) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray repeat(long[] repeats) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray repeat(Shape desiredShape) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray dot(NDArray other) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray matMul(NDArray other) {
if (isScalar() || other.isScalar()) {
throw new IllegalArgumentException("scalar is not allowed for matMul()");
}
if (getShape().dimension() > 2 || other.getShape().dimension() > 2) {
return manager.opExecutor("BatchMatMulV2")
.addInput(this)
.addInput(other)
.buildSingletonOrThrow();
}
boolean broadcast = false;
NDArray lhs = this;
NDArray rhs = other;
if (getShape().dimension() == 1) {
lhs = broadcast(1, getShape().get(0));
broadcast = true;
}
if (other.getShape().dimension() == 1) {
rhs = rhs.broadcast(1, getShape().get(0));
broadcast = true;
}
NDArray result =
manager.opExecutor("MatMul").addInput(lhs).addInput(rhs).buildSingletonOrThrow();
try {
if (broadcast) {
return result.squeeze();
}
return result;
} finally {
if (lhs != this) {
lhs.close();
}
if (rhs != other) {
rhs.close();
}
if (broadcast) {
result.close();
}
}
}
@Override
public NDArray batchMatMul(NDArray other) {
throw new UnsupportedOperationException();
}
@Override
public NDArray xlogy(NDArray other) {
throw new UnsupportedOperationException();
}
/** {@inheritDoc} */
@Override
public NDArray clip(Number min, Number max) {
try (NDArray minArr = manager.create(min.floatValue());
NDArray maxArr = manager.create(max.floatValue())) {
return manager.opExecutor("ClipByValue")
.addInput(this)
.addInput(minArr)
.addInput(maxArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray flip(int... axes) {
try (NDArray axesArr = manager.create(axes)) {
return manager.opExecutor("ReverseV2")
.addInput(this)
.addInput(axesArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray transpose() {
int dim = getShape().dimension();
int[] reversedShape = IntStream.range(0, dim).map(i -> dim - i - 1).toArray();
return transpose(reversedShape);
}
/** {@inheritDoc} */
@Override
public NDArray transpose(int... dimensions) {
if (Arrays.stream(dimensions).anyMatch(d -> d < 0)) {
throw new UnsupportedOperationException(
"Passing -1 for broadcasting the dimension is not currently supported");
}
if (!Arrays.equals(
Arrays.stream(dimensions).sorted().toArray(),
IntStream.range(0, getShape().dimension()).toArray())) {
throw new IllegalArgumentException(
"You must include each of the dimensions from 0 until "
+ getShape().dimension());
}
try (NDArray dimensionsArr = manager.create(dimensions)) {
return manager.opExecutor("Transpose")
.addInput(this)
.addInput(dimensionsArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray broadcast(Shape shape) {
try (NDArray shapeArr = manager.create(shape.getShape())) {
return manager.opExecutor("BroadcastTo")
.addInput(this)
.addInput(shapeArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray argMax() {
if (isEmpty()) {
throw new IllegalArgumentException("attempt to get argMax of an empty NDArray");
}
return manager.opExecutor("ArgMax").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray argMax(int axis) {
try (NDArray axisArr = manager.create(axis)) {
return manager.opExecutor("ArgMax")
.addInput(this)
.addInput(axisArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDList topK(int k, int axis, boolean largest, boolean sorted) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public NDArray argMin() {
if (isEmpty()) {
throw new IllegalArgumentException("attempt to get argMin of an empty NDArray");
}
return manager.opExecutor("ArgMin").addInput(this).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray argMin(int axis) {
try (NDArray axisArr = manager.create(axis)) {
return manager.opExecutor("ArgMin")
.addInput(this)
.addInput(axisArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray percentile(Number percentile) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray percentile(Number percentile, int[] dimension) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray median() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray median(int[] axes) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray toDense() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray nonzero() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArrayEx getNDArrayInternal() {
return tfNDArrayEx;
}
/** {@inheritDoc} */
@Override
public NDArray diff(int n, int dim) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public boolean equals(Object obj) {
if (obj instanceof TfNDArray) {
return contentEquals((TfNDArray) obj);
}
return false;
}
/** {@inheritDoc} */
@Override
public int hashCode() {
return 0;
}
/** {@inheritDoc} */
@Override
public String toString() {
if (isReleased()) {
return "This array is already closed";
}
return toDebugString();
}
/** {@inheritDoc} */
@Override
public void close() {
onClose();
TFE_TensorHandle tensorHandle = handle.getAndSet(null);
if (tensorHandle != null && !tensorHandle.isNull()) {
tensorHandle.close();
if (tensor != null) {
tensor.close();
}
manager.detachInternal(getUid());
manager = null;
}
tfNDArrayEx = null;
}
// TensorFlow doesn't support in-place operation
// each operator execution will generate a new node in the graph
// workaround the limitation by updating the handle
protected void setHandle(TFE_TensorHandle newHandle) {
TFE_TensorHandle oldHandle = handle.getAndSet(newHandle);
oldHandle.close();
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfNDArrayEx.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.NDUtils;
import ai.djl.ndarray.index.NDArrayIndexer;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.ndarray.types.SparseFormat;
import ai.djl.nn.recurrent.RNN;
import org.tensorflow.internal.c_api.TFE_TensorHandle;
import java.util.List;
public class TfNDArrayEx implements NDArrayEx {
private TfNDArray array;
/**
* Constructs an {@code MxNDArrayEx} given a {@link NDArray}.
*
* @param array the array
*/
TfNDArrayEx(TfNDArray array) {
this.array = array;
}
/** {@inheritDoc} */
@Override
public NDArray rdivi(NDArray b) {
TFE_TensorHandle newHandle =
array.getManager()
.opExecutor("Div")
.addInput(b)
.addInput(array)
.buildRawPointer(1)[0];
array.setHandle(newHandle);
return array;
}
/** {@inheritDoc} */
@Override
public NDArray rsubi(NDArray b) {
TFE_TensorHandle newHandle =
array.getManager()
.opExecutor("Sub")
.addInput(b)
.addInput(array)
.buildRawPointer(1)[0];
array.setHandle(newHandle);
return array;
}
/** {@inheritDoc} */
@Override
public NDArray rmodi(NDArray b) {
TFE_TensorHandle newHandle =
array.getManager()
.opExecutor("Mod")
.addInput(b)
.addInput(array)
.buildRawPointer(1)[0];
array.setHandle(newHandle);
return array;
}
/** {@inheritDoc} */
@Override
public NDArray rpowi(Number n) {
TfNDManager manager = array.getManager();
try (NDArray temp = manager.create(n);
NDArray casted = temp.toType(array.getDataType(), false)) {
TFE_TensorHandle newHandle =
manager.opExecutor("Pow")
.addInput(casted)
.addInput(array)
.buildRawPointer(1)[0];
array.setHandle(newHandle);
return array;
}
}
/** {@inheritDoc} */
@Override
public NDArray relu() {
return array.getManager().opExecutor("Relu").addInput(array).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray sigmoid() {
return array.getManager().opExecutor("Sigmoid").addInput(array).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray tanh() {
return array.tanh();
}
/** {@inheritDoc} */
@Override
public NDArray softPlus() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
@SuppressWarnings({"rawtypes", "unchecked"})
public NDArray softSign() {
return array.getManager().opExecutor("Softsign").addInput(array).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray leakyRelu(float alpha) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray elu(float alpha) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
@SuppressWarnings({"rawtypes", "unchecked"})
public NDArray selu() {
return array.getManager().opExecutor("Selu").addInput(array).buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray gelu() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray maxPool(Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray globalMaxPool() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray avgPool(
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode,
boolean countIncludePad) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray globalAvgPool() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray lpPool(
float normType, Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray globalLpPool(float normType) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public void adadeltaUpdate(
NDList inputs,
NDList weights,
float weightDecay,
float rescaleGrad,
float clipGrad,
float rho,
float epsilon) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public void adagradUpdate(
NDList inputs,
NDList weights,
float learningRate,
float weightDecay,
float rescaleGrad,
float clipGrad,
float epsilon) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public void adamUpdate(
NDList inputs,
NDList weights,
float learningRate,
float learningRateBiasCorrection,
float weightDecay,
float rescaleGrad,
float clipGrad,
float beta1,
float beta2,
float epsilon,
boolean lazyUpdate,
boolean adamw) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public void nagUpdate(
NDList inputs,
NDList weights,
float learningRate,
float weightDecay,
float rescaleGrad,
float clipGrad,
float momentum) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public void rmspropUpdate(
NDList inputs,
NDList weights,
float learningRate,
float weightDecay,
float rescaleGrad,
float clipGrad,
float rho,
float momentum,
float epsilon,
boolean centered) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public void sgdUpdate(
NDList inputs,
NDList weights,
float learningRate,
float weightDecay,
float rescaleGrad,
float clipGrad,
float momentum,
boolean lazyUpdate) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList convolution(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation,
int groups) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList deconvolution(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding,
Shape dilation,
int groups) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList linear(NDArray input, NDArray weight, NDArray bias) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList embedding(NDArray input, NDArray weight, SparseFormat sparse) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList prelu(NDArray input, NDArray alpha) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList dropout(NDArray input, float rate, boolean training) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList layerNorm(
NDArray input, Shape normalizedShape, NDArray gamma, NDArray beta, float eps) {
throw new UnsupportedOperationException();
}
/** {@inheritDoc} */
@Override
public NDList batchNorm(
NDArray input,
NDArray runningMean,
NDArray runningVar,
NDArray gamma,
NDArray beta,
int axis,
float momentum,
float eps,
boolean training) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList rnn(
NDArray input,
NDArray state,
NDList params,
boolean hasBiases,
int numLayers,
RNN.Activation activation,
double dropRate,
boolean train,
boolean bidirectional,
boolean batchFirst) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList gru(
NDArray input,
NDArray state,
NDList params,
boolean hasBiases,
int numLayers,
double dropRate,
boolean training,
boolean bidirectional,
boolean batchFirst) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList lstm(
NDArray input,
NDList states,
NDList params,
boolean hasBiases,
int numLayers,
double dropRate,
boolean training,
boolean bidirectional,
boolean batchFirst) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray normalize(float[] mean, float[] std) {
// TODO: TensorFlow does not support channels first on CPU for conv2d
// https://github.com/tensorflow/tensorflow/issues/32691
// https://github.com/tensorflow/tensorflow/issues/26411
int dim = getArray().getShape().dimension();
Shape shape = (dim == 3) ? new Shape(1, 1, 3) : new Shape(1, 1, 1, 3);
TfNDManager manager = array.getManager();
try (NDArray meanArr = manager.create(mean, shape);
NDArray stdArr = manager.create(std, shape)) {
return getArray().sub(meanArr).divi(stdArr);
}
}
/** {@inheritDoc} */
@Override
public NDArray toTensor() {
// TODO: TensorFlow does not support channels first on CPU for conv2d
// https://github.com/tensorflow/tensorflow/issues/32691
// https://github.com/tensorflow/tensorflow/issues/26411
TfNDManager manager = array.getManager();
try (TfNDManager subManager = (TfNDManager) manager.newSubManager()) {
array.attach(subManager);
NDArray input = array;
int dim = input.getShape().dimension();
if (dim == 3) {
input = input.expandDims(0);
}
input = input.div(255.0);
if (dim == 3) {
input = input.squeeze(0);
}
// The network by default takes float32
NDArray output =
(!input.getDataType().equals(DataType.FLOAT32))
? input.toType(DataType.FLOAT32, false)
: input;
array.attach(manager);
output.attach(manager);
return output;
}
}
/** {@inheritDoc} */
@Override
public NDArray interpolation(long[] size, int mode, boolean alignCorners) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray resize(int width, int height, int interpolation) {
if (array.isEmpty()) {
throw new IllegalArgumentException("Can't resize image with 0 dims.");
}
String op = getResizeOpName(interpolation);
TfNDManager manager = array.getManager();
if (array.getShape().dimension() == 3) {
try (NDArray temp = array.expandDims(0);
NDArray size = manager.create(new int[] {height, width});
NDArray image =
manager.opExecutor(op)
.addInput(temp)
.addInput(size)
.buildSingletonOrThrow()) {
return image.squeeze();
}
}
try (NDArray size = manager.create(new int[] {height, width})) {
return manager.opExecutor(op).addInput(array).addInput(size).buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray randomFlipLeftRight() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray randomFlipTopBottom() {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray randomBrightness(float brightness) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray randomHue(float hue) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray randomColorJitter(
float brightness, float contrast, float saturation, float hue) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArrayIndexer getIndexer(NDManager manager) {
return new TfNDArrayIndexer((TfNDManager) manager);
}
/** {@inheritDoc} */
@Override
public NDArray where(NDArray condition, NDArray other) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray stack(NDList arrays) {
return stack(arrays, 0);
}
/** {@inheritDoc} */
@Override
public NDArray stack(NDList arrays, int axis) {
NDArray[] srcArray = new NDArray[arrays.size() + 1];
srcArray[0] = array;
System.arraycopy(arrays.toArray(new NDArray[0]), 0, srcArray, 1, arrays.size());
return array.getManager()
.opExecutor("Pack")
.addInputList(srcArray)
.addParam("axis", axis)
.buildSingletonOrThrow();
}
/** {@inheritDoc} */
@Override
public NDArray concat(NDList arrays, int axis) {
NDUtils.checkConcatInput(arrays);
NDArray[] srcArray = new NDArray[arrays.size() + 1];
srcArray[0] = array;
System.arraycopy(arrays.toArray(new NDArray[0]), 0, srcArray, 1, arrays.size());
TfNDManager manager = array.getManager();
try (NDArray axisArr = manager.create(axis)) {
return manager.opExecutor("ConcatV2")
.addInputList(srcArray)
.addInput(axisArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDList multiBoxTarget(
NDList inputs,
float iouThreshold,
float ignoreLabel,
float negativeMiningRatio,
float negativeMiningThreshold,
int minNegativeSamples) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList multiBoxPrior(
List<Float> sizes,
List<Float> ratios,
List<Float> steps,
List<Float> offsets,
boolean clip) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDList multiBoxDetection(
NDList inputs,
boolean clip,
float threshold,
int backgroundId,
float nmsThreshold,
boolean forceSuppress,
int nmsTopK) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray getArray() {
return array;
}
private String getResizeOpName(int interpolate) {
switch (interpolate) {
case 0:
return "ResizeNearestNeighbor";
case 1:
return "ResizeBilinear";
case 2:
return "ResizeArea";
case 3:
return "ResizeBicubic";
default:
throw new UnsupportedOperationException(
"The kind of interpolation is not supported.");
}
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfNDArrayIndexer.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.index.NDArrayIndexer;
import ai.djl.ndarray.index.full.NDIndexFullPick;
import ai.djl.ndarray.index.full.NDIndexFullSlice;
import ai.djl.ndarray.index.full.NDIndexFullTake;
/** The {@link NDArrayIndexer} used by the {@link TfNDArray}. */
public class TfNDArrayIndexer extends NDArrayIndexer {
private TfNDManager manager;
TfNDArrayIndexer(TfNDManager manager) {
this.manager = manager;
}
/** {@inheritDoc} */
@Override
public NDArray get(NDArray array, NDIndexFullPick fullPick) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray get(NDArray array, NDIndexFullTake fullTake) {
throw new UnsupportedOperationException("Not implemented");
}
/** {@inheritDoc} */
@Override
public NDArray get(NDArray array, NDIndexFullSlice fullSlice) {
array = manager.from(array);
int[] toSqueeze = fullSlice.getToSqueeze();
try (NDArray begin = manager.create(fullSlice.getMin());
NDArray end = manager.create(fullSlice.getMax());
NDArray step = manager.create(fullSlice.getStep())) {
NDArray result =
manager.opExecutor("StridedSlice")
.addInput(array)
.addInput(begin)
.addInput(end)
.addInput(step)
.buildSingletonOrThrow();
if (toSqueeze.length > 0) {
NDArray oldResult = result;
result = result.squeeze(toSqueeze);
oldResult.close();
}
return result;
}
}
/** {@inheritDoc} */
@Override
public void set(NDArray array, NDIndexFullSlice fullSlice, NDArray value) {
throw new UnsupportedOperationException("Tensor cannot be modified after creation");
}
/** {@inheritDoc} */
@Override
public void set(NDArray array, NDIndexFullSlice fullSlice, Number value) {
throw new UnsupportedOperationException("Tensor cannot be modified after creation");
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfNDManager.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.Device;
import ai.djl.engine.Engine;
import ai.djl.ndarray.BaseNDManager;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.tensorflow.engine.javacpp.JavacppUtils;
import ai.djl.util.Pair;
import org.tensorflow.internal.c_api.TFE_Context;
import org.tensorflow.internal.c_api.TFE_TensorHandle;
import org.tensorflow.internal.c_api.TF_Tensor;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.Charset;
@SuppressWarnings("PMD.UseTryWithResources")
public class TfNDManager extends BaseNDManager {
static final TfNDManager SYSTEM_MANAGER = new SystemManager();
private TfNDManager(NDManager parent, Device device) {
super(parent, device);
}
static TfNDManager getSystemManager() {
return SYSTEM_MANAGER;
}
/** {@inheritDoc} */
@Override
public ByteBuffer allocateDirect(int capacity) {
return ByteBuffer.allocateDirect(capacity).order(ByteOrder.nativeOrder());
}
/** {@inheritDoc} */
@Override
public TfNDArray from(NDArray array) {
if (array == null || array instanceof TfNDArray) {
return (TfNDArray) array;
}
TfNDArray result = create(array.toByteBuffer(), array.getShape(), array.getDataType());
result.setName(array.getName());
return result;
}
/** {@inheritDoc} */
@Override
public NDArray create(Shape shape, DataType dataType) {
if (dataType == DataType.STRING) {
throw new IllegalArgumentException(
"Use NDManager.create(String[], Charset, Shape) to create String NDArray.");
}
if (shape.dimension() == 0) {
// TensorFlow does not support empty scalar(emtpy NDArray with 0 dimension)
// initialize with scalar 0
return create(0f).toType(dataType, false);
}
TFE_TensorHandle handle =
JavacppUtils.createEmptyTFETensor(shape, dataType, getEagerSession(), device);
return new TfNDArray(this, handle);
}
/** {@inheritDoc} */
@Override
public TfNDArray create(Buffer data, Shape shape, DataType dataType) {
if (dataType == DataType.STRING) {
throw new IllegalArgumentException(
"Use NDManager.create(String[], Charset, Shape) to create String NDArray.");
}
int size = Math.toIntExact(shape.size());
BaseNDManager.validateBuffer(data, dataType, size);
if (data.isDirect() && data instanceof ByteBuffer) {
TFE_TensorHandle handle =
JavacppUtils.createTFETensorFromByteBuffer(
(ByteBuffer) data, shape, dataType, getEagerSession(), device);
return new TfNDArray(this, handle);
}
ByteBuffer buf = allocateDirect(size * dataType.getNumOfBytes());
copyBuffer(data, buf);
TFE_TensorHandle handle =
JavacppUtils.createTFETensorFromByteBuffer(
buf, shape, dataType, getEagerSession(), device);
return new TfNDArray(this, handle);
}
/** {@inheritDoc} */
@Override
public NDArray create(String[] data, Charset charset, Shape shape) {
ByteBuffer[] buf = new ByteBuffer[data.length];
for (int i = 0; i < data.length; ++i) {
buf[i] = ByteBuffer.wrap(data[i].getBytes(charset));
}
return createStringTensor(shape, buf);
}
/**
* Creates a String {@link NDArray} based on the provided shape.
*
* @param shape the shape of the String NDArray
* @param data the flattened String array
* @return a new instance of {@code NDArray}
*/
public NDArray createStringTensor(Shape shape, ByteBuffer... data) {
Pair<TF_Tensor, TFE_TensorHandle> pair =
JavacppUtils.createStringTensor(shape.getShape(), data);
return new TfNDArray(this, pair.getValue(), pair.getKey());
}
/** {@inheritDoc} */
@Override
public final Engine getEngine() {
return Engine.getEngine(TfEngine.ENGINE_NAME);
}
/** {@inheritDoc} */
@Override
public NDArray zeros(Shape shape, DataType dataType) {
return full(shape, 0, dataType);
}
/** {@inheritDoc} */
@Override
public NDArray ones(Shape shape, DataType dataType) {
return full(shape, 1, dataType);
}
/** {@inheritDoc} */
@Override
public NDArray full(Shape shape, float value, DataType dataType) {
try (NDArray valueArr = create(value);
NDArray castedValueArr = valueArr.toType(dataType, false);
NDArray dimArr = create(shape.getShape())) {
return opExecutor("Fill")
.addInput(dimArr)
.addInput(castedValueArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray arange(float start, float stop, float step, DataType dataType) {
if (stop <= start && step > 0) {
return create(new Shape(0), dataType);
}
try (NDArray startArr = create(start);
NDArray stopArr = create(stop);
NDArray stepArr = create(step);
NDArray castedStartArr = startArr.toType(dataType, false);
NDArray castedStopArr = stopArr.toType(dataType, false);
NDArray castedStepArr = stepArr.toType(dataType, false)) {
return opExecutor("Range")
.addInput(castedStartArr)
.addInput(castedStopArr)
.addInput(castedStepArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray eye(int rows, int cols, int k, DataType dataType) {
try (NDArray ones = ones(new Shape(Math.min(rows, cols)), dataType);
NDArray kArr = create(k);
NDArray rowsArr = create(rows);
NDArray colsArr = create(cols);
NDArray zeros = zeros(new Shape(), dataType)) {
return opExecutor("MatrixDiagV3")
.addInput(ones)
.addInput(kArr)
.addInput(rowsArr)
.addInput(colsArr)
.addInput(zeros)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray linspace(float start, float stop, int num, boolean endpoint) {
if (num < 0) {
throw new IllegalArgumentException("number of samples must be non-negative.");
}
if (num == 0) {
return create(new Shape(0));
}
if (!endpoint && num > 1) {
stop -= (int) ((stop - start) / num);
}
try (NDArray startArr = create(start);
NDArray stopArr = create(stop);
NDArray numArr = create(num)) {
return opExecutor("LinSpace")
.addInput(startArr)
.addInput(stopArr)
.addInput(numArr)
.buildSingletonOrThrow();
}
}
/** {@inheritDoc} */
@Override
public NDArray randomUniform(float low, float high, Shape shape, DataType dataType) {
if (DataType.STRING.equals(dataType)) {
throw new IllegalArgumentException("String data type is not supported!");
}
NDArray axes = create(shape.getShape());
TfOpExecutor opBuilder =
opExecutor("RandomUniform").addInput(axes).addParam("dtype", dataType);
Integer seed = getEngine().getSeed();
if (seed != null) {
// seed1 is graph-level seed
// set it to default graph seed used by tensorflow
// https://github.com/tensorflow/tensorflow/blob/85c8b2a817f95a3e979ecd1ed95bff1dc1335cff/tensorflow/python/framework/random_seed.py#L31
opBuilder.addParam("seed", 87654321);
// seed2 is op-level seed
opBuilder.addParam("seed2", seed);
}
try (NDArray array = opBuilder.buildSingletonOrThrow();
NDArray temp = array.mul(high - low)) {
return temp.add(low);
} finally {
axes.close();
}
}
/** {@inheritDoc} */
@Override
public NDArray randomNormal(float loc, float scale, Shape shape, DataType dataType) {
if (DataType.STRING.equals(dataType)) {
throw new IllegalArgumentException("String data type is not supported!");
}
NDArray axes = create(shape.getShape());
TfOpExecutor opBuilder =
opExecutor("RandomStandardNormal").addInput(axes).addParam("dtype", dataType);
Integer seed = getEngine().getSeed();
if (seed != null) {
// seed1 is graph-level seed
// set it to default graph seed used by tensorflow
// https://github.com/tensorflow/tensorflow/blob/85c8b2a817f95a3e979ecd1ed95bff1dc1335cff/tensorflow/python/framework/random_seed.py#L31
opBuilder.addParam("seed", 87654321);
opBuilder.addParam("seed2", seed);
}
try (NDArray array = opBuilder.buildSingletonOrThrow();
NDArray temp = array.mul(scale)) {
return temp.add(loc);
} finally {
axes.close();
}
}
/** {@inheritDoc} */
@Override
public NDArray truncatedNormal(float loc, float scale, Shape shape, DataType dataType) {
if (DataType.STRING.equals(dataType)) {
throw new IllegalArgumentException("String data type is not supported!");
}
NDArray axes = create(shape.getShape());
TfOpExecutor opBuilder =
opExecutor("TruncatedNormal").addInput(axes).addParam("dtype", dataType);
Integer seed = getEngine().getSeed();
if (seed != null) {
// seed1 is graph-level seed
// set it to default graph seed used by tensorflow
// https://github.com/tensorflow/tensorflow/blob/85c8b2a817f95a3e979ecd1ed95bff1dc1335cff/tensorflow/python/framework/random_seed.py#L31
opBuilder.addParam("seed", 87654321);
opBuilder.addParam("seed2", seed);
}
try (NDArray array = opBuilder.buildSingletonOrThrow();
NDArray temp = array.mul(scale)) {
return temp.add(loc);
} finally {
axes.close();
}
}
/** {@inheritDoc} */
@Override
public TfNDManager newSubManager(Device device) {
TfNDManager manager = new TfNDManager(this, device);
attachInternal(manager.uid, manager);
return manager;
}
public TFE_Context getEagerSession() {
return ((TfEngine) getEngine()).getEagerSession();
}
public TfOpExecutor opExecutor(String operation) {
return new TfOpExecutor(this, getEagerSession(), operation);
}
private static final class SystemManager extends TfNDManager implements SystemNDManager {
SystemManager() {
super(null, null);
}
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfOpExecutor.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.Device;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.types.DataType;
import ai.djl.tensorflow.engine.javacpp.JavacppUtils;
import ai.djl.util.Preconditions;
import org.bytedeco.javacpp.BytePointer;
import org.bytedeco.javacpp.IntPointer;
import org.bytedeco.javacpp.Pointer;
import org.bytedeco.javacpp.PointerPointer;
import org.bytedeco.javacpp.PointerScope;
import org.tensorflow.internal.c_api.TFE_Context;
import org.tensorflow.internal.c_api.TFE_Op;
import org.tensorflow.internal.c_api.TFE_TensorHandle;
import org.tensorflow.internal.c_api.TF_Status;
import org.tensorflow.internal.c_api.global.tensorflow;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicBoolean;
/** An {@code TfOpExecutor} for executing TensorFlow operation eagerly. */
final class TfOpExecutor implements AutoCloseable {
private TfNDManager manager;
private TFE_Op opHandle;
private AtomicBoolean closed;
@SuppressWarnings({"unchecked", "try"})
TfOpExecutor(TfNDManager manager, TFE_Context eagerSessionHandle, String operation) {
if (manager == null || eagerSessionHandle == null) {
throw new IllegalArgumentException("eagerSessionHandle cannot be null");
}
this.manager = manager;
closed = new AtomicBoolean(false);
try (PointerScope ignore = new PointerScope()) {
TF_Status status = TF_Status.newStatus();
opHandle = TFE_Op.newOp(eagerSessionHandle, operation, status);
status.throwExceptionIfNotOK();
// keep the native pointer alive outside of the scope
opHandle.retainReference();
}
setDevice(manager.getDevice());
}
public NDArray[] build(int numOutputs) {
TFE_TensorHandle[] handles = buildRawPointer(numOutputs);
NDArray[] outputs = new NDArray[handles.length];
for (int i = 0; i < handles.length; ++i) {
// attach the TfNDArray along with pointer to manager
outputs[i] = new TfNDArray(manager, handles[i]);
}
return outputs;
}
public NDArray buildSingletonOrThrow() {
TFE_TensorHandle[] handles = buildRawPointer(1);
try {
Preconditions.checkArgument(
handles.length == 1,
"The expected size of outputs is 1 but got " + handles.length);
} catch (IllegalArgumentException e) {
Arrays.stream(handles).forEach(Pointer::close);
throw e;
}
return new TfNDArray(manager, handles[0]);
}
// please make sure you close the output manually or attach to NDManager
@SuppressWarnings({"unchecked", "try"})
public TFE_TensorHandle[] buildRawPointer(int numOutputs) {
try (PointerScope ignore = new PointerScope()) {
IntPointer numReturnValues = new IntPointer(1).put(numOutputs);
PointerPointer<TFE_TensorHandle> returnValues = new PointerPointer<>(numOutputs);
TF_Status status = TF_Status.newStatus();
// TODO(improvement): check if TFE_Execute is able to be called twice
// and evaluate if it worth calling the TFE_Execute twice to get the # of outputs
// in sacrifice of performance
tensorflow.TFE_Execute(opHandle, returnValues, numReturnValues, status);
status.throwExceptionIfNotOK();
TFE_TensorHandle[] results = new TFE_TensorHandle[numReturnValues.get()];
for (int i = 0; i < results.length; ++i) {
results[i] =
returnValues
.get(TFE_TensorHandle.class, i)
.withDeallocator()
.retainReference();
}
return results;
} finally {
close();
}
}
@SuppressWarnings({"unchecked", "try"})
public TfOpExecutor addInput(NDArray input) {
try (PointerScope ignore = new PointerScope()) {
TF_Status status = TF_Status.newStatus();
tensorflow.TFE_OpAddInput(opHandle, ((TfNDArray) input).getHandle(), status);
status.throwExceptionIfNotOK();
}
return this;
}
@SuppressWarnings({"unchecked", "try"})
public TfOpExecutor addInputList(NDArray[] inputs) {
TFE_TensorHandle[] inputHandles =
Arrays.stream(inputs)
.map(array -> ((TfNDArray) array).getHandle())
.toArray(TFE_TensorHandle[]::new);
try (PointerScope ignore = new PointerScope()) {
PointerPointer<TFE_TensorHandle> tensorPointers =
new PointerPointer<>(inputHandles.length);
for (int i = 0; i < inputHandles.length; ++i) {
tensorPointers.put(i, inputHandles[i]);
}
TF_Status status = TF_Status.newStatus();
tensorflow.TFE_OpAddInputList(opHandle, tensorPointers, inputHandles.length, status);
status.throwExceptionIfNotOK();
}
return this;
}
@SuppressWarnings({"unchecked", "try"})
public TfOpExecutor setDevice(Device device) {
try (PointerScope ignore = new PointerScope()) {
String deviceStr = JavacppUtils.toTfDevice(device);
TF_Status status = TF_Status.newStatus();
tensorflow.TFE_OpSetDevice(opHandle, deviceStr, status);
status.throwExceptionIfNotOK();
return this;
} catch (Exception e) {
close();
throw e;
}
}
@SuppressWarnings({"unchecked", "try"})
public TfOpExecutor addParam(String name, String value) {
byte[] bytes = value.getBytes(StandardCharsets.UTF_8);
try (PointerScope ignore = new PointerScope()) {
tensorflow.TFE_OpSetAttrString(opHandle, name, new BytePointer(bytes), bytes.length);
}
return this;
}
public TfOpExecutor addParam(String name, long value) {
tensorflow.TFE_OpSetAttrInt(opHandle, name, value);
return this;
}
public TfOpExecutor addParam(String name, float value) {
tensorflow.TFE_OpSetAttrFloat(opHandle, name, value);
return this;
}
public TfOpExecutor addParam(String name, boolean value) {
tensorflow.TFE_OpSetAttrBool(opHandle, name, (byte) (value ? 1 : 0));
return this;
}
public TfOpExecutor addParam(String name, DataType dataType) {
tensorflow.TFE_OpSetAttrType(opHandle, name, TfDataType.toTf(dataType));
return this;
}
public TfOpExecutor addParam(String name, long[] values) {
tensorflow.TFE_OpSetAttrIntList(opHandle, name, values, values.length);
return this;
}
@Override
public void close() {
if (closed.getAndSet(true) || opHandle == null || opHandle.isNull()) {
return;
}
opHandle.close();
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/TfSymbolBlock.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractSymbolBlock;
import ai.djl.nn.ParameterList;
import ai.djl.nn.SymbolBlock;
import ai.djl.tensorflow.engine.javacpp.JavacppUtils;
import ai.djl.training.ParameterStore;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import org.bytedeco.javacpp.Pointer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.tensorflow.internal.c_api.TF_Graph;
import org.tensorflow.internal.c_api.TF_Operation;
import org.tensorflow.internal.c_api.TF_Session;
import org.tensorflow.internal.c_api.TF_Tensor;
import org.tensorflow.proto.MetaGraphDef;
import org.tensorflow.proto.SignatureDef;
import org.tensorflow.proto.TensorInfo;
import org.tensorflow.proto.TensorShapeProto;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
/** {@code TfSymbolBlock} is the TensorFlow implementation of {@link SymbolBlock}. */
public class TfSymbolBlock extends AbstractSymbolBlock implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(TfSymbolBlock.class);
private SavedModelBundle bundle;
private TF_Graph graphHandle;
private TF_Session sessionHandle;
private SignatureDef servingDefault;
private PairList<String, Shape> inputDescriptions;
private PairList<String, Shape> outputDescriptions;
// cached input & output information
private TF_Operation[] inputOpHandles;
private int[] inputOpIndices;
private TF_Operation[] outputOpHandles;
private int[] outputOpIndices;
private TF_Operation[] targetOpHandles;
public TfSymbolBlock(SavedModelBundle bundle, String signatureDefKey) {
this.bundle = bundle;
graphHandle = bundle.getGraph();
sessionHandle = bundle.getSession();
targetOpHandles = bundle.getTargetOpHandles();
MetaGraphDef metaGraphDef = bundle.getMetaGraphDef();
Map<String, SignatureDef> signatureDefMap = metaGraphDef.getSignatureDefMap();
if (signatureDefMap.containsKey(signatureDefKey)) {
servingDefault = signatureDefMap.get(signatureDefKey);
} else {
Set<String> keys = signatureDefMap.keySet();
logger.warn(
"SignatureDefKey: {} not found in Saved Model Bundle.Available keys: {} Please"
+ " use .optOption(\"SignatureDefKey\", \"value\") with Criteria.builder to"
+ " load the model.Normally the value is \"default\" for TF1.x models and"
+ " \"serving_default\" for TF2.x models. Refer to:"
+ " https://www.tensorflow.org/guide/saved_modelLoading the model using"
+ " next available key.",
signatureDefKey,
String.join(" ", keys));
servingDefault = signatureDefMap.get(keys.iterator().next());
}
describeInput();
describeOutput();
}
/**
* Returns the {@code SavedModelBundle} for the model.
*
* @return the {@code SavedModelBundle} for the model
*/
public SavedModelBundle getSavedModelBundle() {
return bundle;
}
/** {@inheritDoc} */
@Override
public void removeLastBlock() {
throw new UnsupportedOperationException("Not supported for TensorFlow Engine");
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
TF_Tensor[] inputTensorHandles = new TF_Tensor[inputDescriptions.size()];
for (int i = 0; i < inputDescriptions.size(); i++) {
String inputName = inputDescriptions.get(i).getKey();
TfNDArray currentNDArray = (TfNDArray) inputs.get(i);
// if no name specified in input array or
// the input order matches inputDescriptions
// use default order from translator
String name = currentNDArray.getName();
if (name == null || name.isEmpty() || name.equals(inputName)) {
inputTensorHandles[i] = JavacppUtils.resolveTFETensor(currentNDArray.getHandle());
continue;
}
// for loop to search the right NDArray
for (NDArray array : inputs) {
if (array.getName().equals(inputName)) {
inputTensorHandles[i] =
JavacppUtils.resolveTFETensor(((TfNDArray) array).getHandle());
}
}
}
TF_Tensor[] outputs =
JavacppUtils.runSession(
sessionHandle,
null,
inputTensorHandles,
inputOpHandles,
inputOpIndices,
outputOpHandles,
outputOpIndices,
targetOpHandles);
TfNDManager tfNDManager = (TfNDManager) inputs.head().getManager();
NDList resultNDList = new NDList();
for (int i = 0; i < outputs.length; i++) {
TfNDArray array = new TfNDArray(tfNDManager, JavacppUtils.createTFETensor(outputs[i]));
array.setName(outputDescriptions.get(i).getKey());
resultNDList.add(array);
}
// free all unused native resources
Arrays.stream(inputTensorHandles).forEach(Pointer::close);
Arrays.stream(outputs).forEach(Pointer::close);
return resultNDList;
}
/** {@inheritDoc} */
@Override
public void initialize(NDManager manager, DataType dataType, Shape... inputShapes) {
throw new IllegalStateException("TfSymbolBlock can't be initialized");
}
/** {@inheritDoc} */
@Override
public boolean isInitialized() {
return bundle != null;
}
/** {@inheritDoc} */
@Override
public final PairList<String, Shape> describeInput() {
if (inputDescriptions == null) {
inputDescriptions = new PairList<>();
Map<String, TensorInfo> inputsMap = servingDefault.getInputsMap();
List<String> keys = new ArrayList<>(inputsMap.keySet());
Collections.sort(keys);
inputOpHandles = new TF_Operation[keys.size()];
inputOpIndices = new int[keys.size()];
for (int i = 0; i < keys.size(); ++i) {
TensorInfo tensorInfo = inputsMap.get(keys.get(i));
TensorShapeProto shapeProto = tensorInfo.getTensorShape();
inputDescriptions.add(
keys.get(i),
new Shape(
shapeProto.getDimList().stream()
.mapToLong(TensorShapeProto.Dim::getSize)
.toArray()));
Pair<TF_Operation, Integer> pair =
JavacppUtils.getGraphOperationByName(graphHandle, tensorInfo.getName());
inputOpHandles[i] = pair.getKey();
inputOpIndices[i] = pair.getValue();
}
}
return inputDescriptions;
}
/** {@inheritDoc} */
@Override
public ParameterList getDirectParameters() {
throw new UnsupportedOperationException("Not yet supported");
}
/** {@inheritDoc} */
@Override
public final PairList<String, Shape> describeOutput() {
if (outputDescriptions == null) {
outputDescriptions = new PairList<>();
Map<String, TensorInfo> outputsMap = servingDefault.getOutputsMap();
List<String> keys = new ArrayList<>(outputsMap.keySet());
Collections.sort(keys);
List<TF_Operation> outputOpHandlesList = new ArrayList<>();
List<Integer> outputOpIndicesList = new ArrayList<>();
for (String key : keys) {
TensorInfo tensorInfo = outputsMap.get(key);
TensorShapeProto shapeProto = tensorInfo.getTensorShape();
outputDescriptions.add(
key,
new Shape(
shapeProto.getDimList().stream()
.mapToLong(TensorShapeProto.Dim::getSize)
.toArray()));
Pair<TF_Operation, Integer> pair =
JavacppUtils.getGraphOperationByName(graphHandle, tensorInfo.getName());
outputOpHandlesList.add(pair.getKey());
outputOpIndicesList.add(pair.getValue());
}
outputOpHandles = outputOpHandlesList.toArray(new TF_Operation[0]);
outputOpIndices = outputOpIndicesList.stream().mapToInt(i -> i).toArray();
}
return outputDescriptions;
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[0];
}
/** {@inheritDoc} */
@Override
public void close() {
if (bundle != null) {
bundle.close();
}
// free cached input & output native resources
Arrays.stream(inputOpHandles).forEach(Pointer::close);
Arrays.stream(outputOpHandles).forEach(Pointer::close);
Arrays.stream(targetOpHandles).forEach(Pointer::close);
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains implementations of interfaces within the DJL API for the Tensorflow Engine.
*
* @see ai.djl.tensorflow.engine.TfEngine
*/
package ai.djl.tensorflow.engine;
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/javacpp/JavacppUtils.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine.javacpp;
import ai.djl.Device;
import ai.djl.engine.EngineException;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.tensorflow.engine.SavedModelBundle;
import ai.djl.tensorflow.engine.TfDataType;
import ai.djl.util.Pair;
import ai.djl.util.cuda.CudaUtils;
import com.google.protobuf.InvalidProtocolBufferException;
import org.bytedeco.javacpp.BytePointer;
import org.bytedeco.javacpp.Loader;
import org.bytedeco.javacpp.Pointer;
import org.bytedeco.javacpp.PointerPointer;
import org.bytedeco.javacpp.PointerScope;
import org.tensorflow.exceptions.TensorFlowException;
import org.tensorflow.internal.c_api.AbstractTFE_Context;
import org.tensorflow.internal.c_api.AbstractTFE_TensorHandle;
import org.tensorflow.internal.c_api.AbstractTF_Tensor;
import org.tensorflow.internal.c_api.TFE_Context;
import org.tensorflow.internal.c_api.TFE_ContextOptions;
import org.tensorflow.internal.c_api.TFE_TensorHandle;
import org.tensorflow.internal.c_api.TF_Buffer;
import org.tensorflow.internal.c_api.TF_Graph;
import org.tensorflow.internal.c_api.TF_Operation;
import org.tensorflow.internal.c_api.TF_Output;
import org.tensorflow.internal.c_api.TF_Session;
import org.tensorflow.internal.c_api.TF_SessionOptions;
import org.tensorflow.internal.c_api.TF_Status;
import org.tensorflow.internal.c_api.TF_TString;
import org.tensorflow.internal.c_api.TF_Tensor;
import org.tensorflow.internal.c_api.global.tensorflow;
import org.tensorflow.proto.ConfigProto;
import org.tensorflow.proto.GPUOptions;
import org.tensorflow.proto.MetaGraphDef;
import org.tensorflow.proto.RunOptions;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/** A class containing utilities to interact with the TensorFlow Engine's Javacpp layer. */
public final class JavacppUtils {
private static final Pattern DEVICE_PATTERN = Pattern.compile(".*device:([A-Z]PU):(\\d+)");
private JavacppUtils() {}
@SuppressWarnings({"unchecked", "try"})
public static SavedModelBundle loadSavedModelBundle(
String exportDir, String[] tags, ConfigProto config, RunOptions runOptions) {
try (PointerScope ignored = new PointerScope()) {
TF_Status status = TF_Status.newStatus();
// allocate parameters for TF_LoadSessionFromSavedModel
TF_SessionOptions opts = TF_SessionOptions.newSessionOptions();
if (config != null) {
BytePointer configBytes = new BytePointer(config.toByteArray());
tensorflow.TF_SetConfig(opts, configBytes, configBytes.capacity(), status);
status.throwExceptionIfNotOK();
}
TF_Buffer runOpts = TF_Buffer.newBufferFromString(runOptions);
// load the session
TF_Graph graphHandle = TF_Graph.newGraph();
TF_Buffer metaGraphDef = TF_Buffer.newBuffer();
TF_Session sessionHandle =
TF_Session.loadSessionFromSavedModel(
opts, runOpts, exportDir, tags, graphHandle, metaGraphDef, status);
status.throwExceptionIfNotOK();
// handle the result
SavedModelBundle bundle;
try {
bundle =
new SavedModelBundle(
graphHandle,
sessionHandle,
MetaGraphDef.parseFrom(metaGraphDef.dataAsByteBuffer()));
} catch (InvalidProtocolBufferException e) {
throw new TensorFlowException("Cannot parse MetaGraphDef protocol buffer", e);
}
graphHandle.retainReference();
sessionHandle.retainReference();
return bundle;
}
}
private static TF_Operation getGraphOpByName(TF_Graph graphHandle, String operation) {
TF_Operation opHandle;
synchronized (graphHandle) {
opHandle = tensorflow.TF_GraphOperationByName(graphHandle, operation);
}
if (opHandle == null || opHandle.isNull()) {
throw new IllegalArgumentException(
"No Operation named [" + operation + "] in the Graph");
}
return opHandle;
}
public static Pair<TF_Operation, Integer> getGraphOperationByName(
TF_Graph graphHandle, String operation) {
int colon = operation.lastIndexOf(':');
if (colon == -1 || colon == operation.length() - 1) {
return new Pair<>(getGraphOpByName(graphHandle, operation), 0);
}
try {
String op = operation.substring(0, colon);
int index = Integer.parseInt(operation.substring(colon + 1));
return new Pair<>(getGraphOpByName(graphHandle, op), index);
} catch (NumberFormatException e) {
return new Pair<>(getGraphOpByName(graphHandle, operation), 0);
}
}
@SuppressWarnings({"unchecked", "try"})
public static TF_Tensor[] runSession(
TF_Session handle,
RunOptions runOptions,
TF_Tensor[] inputTensorHandles,
TF_Operation[] inputOpHandles,
int[] inputOpIndices,
TF_Operation[] outputOpHandles,
int[] outputOpIndices,
TF_Operation[] targetOpHandles) {
int numInputs = inputTensorHandles.length;
int numOutputs = outputOpHandles.length;
int numTargets = targetOpHandles.length;
try (PointerScope ignored = new PointerScope()) {
// TODO: check with sig-jvm if TF_Output here is freed
TF_Output inputs = new TF_Output(numInputs);
PointerPointer<TF_Tensor> inputValues = new PointerPointer<>(numInputs);
TF_Output outputs = new TF_Output(numOutputs);
PointerPointer<TF_Tensor> outputValues = new PointerPointer<>(numOutputs);
PointerPointer<TF_Operation> targets = new PointerPointer<>(numTargets);
// set input
for (int i = 0; i < numInputs; ++i) {
inputValues.put(i, inputTensorHandles[i]);
}
// set TF_Output for inputs
for (int i = 0; i < numInputs; ++i) {
inputs.position(i).oper(inputOpHandles[i]).index(inputOpIndices[i]);
}
inputs.position(0);
// set TF_Output for outputs
for (int i = 0; i < numOutputs; ++i) {
outputs.position(i).oper(outputOpHandles[i]).index(outputOpIndices[i]);
}
outputs.position(0);
// set target
for (int i = 0; i < numTargets; ++i) {
targets.put(i, targetOpHandles[i]);
}
TF_Status status = TF_Status.newStatus();
TF_Buffer runOpts = TF_Buffer.newBufferFromString(runOptions);
tensorflow.TF_SessionRun(
handle,
runOpts,
inputs,
inputValues,
numInputs,
outputs,
outputValues,
numOutputs,
targets,
numTargets,
null,
status);
status.throwExceptionIfNotOK();
TF_Tensor[] ret = new TF_Tensor[numOutputs];
for (int i = 0; i < numOutputs; ++i) {
ret[i] = outputValues.get(TF_Tensor.class, i).withDeallocator().retainReference();
}
return ret;
}
}
@SuppressWarnings({"unchecked", "try"})
public static TFE_Context createEagerSession(
boolean async, int devicePlacementPolicy, ConfigProto config) {
try (PointerScope ignored = new PointerScope()) {
TFE_ContextOptions opts = TFE_ContextOptions.newContextOptions();
TF_Status status = TF_Status.newStatus();
if (config != null) {
BytePointer configBytes = new BytePointer(config.toByteArray());
tensorflow.TFE_ContextOptionsSetConfig(
opts, configBytes, configBytes.capacity(), status);
status.throwExceptionIfNotOK();
}
tensorflow.TFE_ContextOptionsSetAsync(opts, (byte) (async ? 1 : 0));
tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(opts, devicePlacementPolicy);
TFE_Context context = AbstractTFE_Context.newContext(opts, status);
status.throwExceptionIfNotOK();
return context.retainReference();
}
}
@SuppressWarnings({"unchecked", "try"})
public static Device getDevice(TFE_TensorHandle handle) {
try (PointerScope ignored = new PointerScope()) {
TF_Status status = TF_Status.newStatus();
BytePointer pointer = tensorflow.TFE_TensorHandleDeviceName(handle, status);
String device = new String(pointer.getStringBytes(), StandardCharsets.UTF_8);
return fromTfDevice(device);
}
}
public static DataType getDataType(TFE_TensorHandle handle) {
return TfDataType.fromTf(tensorflow.TFE_TensorHandleDataType(handle));
}
@SuppressWarnings({"unchecked", "try"})
public static Shape getShape(TFE_TensorHandle handle) {
try (PointerScope ignored = new PointerScope()) {
TF_Status status = TF_Status.newStatus();
int numDims = tensorflow.TFE_TensorHandleNumDims(handle, status);
status.throwExceptionIfNotOK();
long[] shapeArr = new long[numDims];
for (int i = 0; i < numDims; i++) {
shapeArr[i] = tensorflow.TFE_TensorHandleDim(handle, i, status);
status.throwExceptionIfNotOK();
}
return new Shape(shapeArr);
}
}
private static TF_Tensor createEmptyTFTensor(Shape shape, DataType dataType) {
int dType = TfDataType.toTf(dataType);
long[] dims = shape.getShape();
long numBytes = dataType.getNumOfBytes() * shape.size();
TF_Tensor tensor = AbstractTF_Tensor.allocateTensor(dType, dims, numBytes);
if (tensor == null || tensor.isNull()) {
throw new IllegalStateException("unable to allocate memory for the Tensor");
}
return tensor;
}
@SuppressWarnings({"unchecked", "try"})
public static TFE_TensorHandle createEmptyTFETensor(
Shape shape, DataType dataType, TFE_Context eagerSessionHandle, Device device) {
try (PointerScope ignored = new PointerScope()) {
TF_Tensor tensor = createEmptyTFTensor(shape, dataType);
TF_Status status = TF_Status.newStatus();
TFE_TensorHandle handle = AbstractTFE_TensorHandle.newTensor(tensor, status);
status.throwExceptionIfNotOK();
if (device.isGpu()) {
return toDevice(handle, eagerSessionHandle, device);
}
return handle.retainReference();
}
}
@SuppressWarnings({"unchecked", "try"})
public static Pair<TF_Tensor, TFE_TensorHandle> createStringTensor(
long[] dims, ByteBuffer[] src) {
int dType = TfDataType.toTf(DataType.STRING);
long numBytes = (long) Loader.sizeof(TF_TString.class) * src.length;
try (PointerScope ignored = new PointerScope()) {
/*
* String tensor allocates a separate TF_TString memory. The TF_TString will
* be deleted when the string tensor is closed. We have to track TF_TString
* memory by ourselves and make sure thw TF_TString lifecycle align with
* TFE_TensorHandle. TF_Tensor already handles TF_TString automatically, We
* can just keep a TF_Tensor reference in TfNDArray.
*/
TF_Tensor tensor = AbstractTF_Tensor.allocateTensor(dType, dims, numBytes);
Pointer pointer = tensorflow.TF_TensorData(tensor).capacity(numBytes);
TF_TString data = new TF_TString(pointer).capacity(pointer.position() + src.length);
for (int i = 0; i < src.length; ++i) {
TF_TString tstring = data.getPointer(i);
tensorflow.TF_TString_Copy(tstring, new BytePointer(src[i]), src[i].remaining());
}
TF_Status status = TF_Status.newStatus();
TFE_TensorHandle handle = AbstractTFE_TensorHandle.newTensor(tensor, status);
status.throwExceptionIfNotOK();
handle.retainReference();
tensor.retainReference();
return new Pair<>(tensor, handle);
}
}
@SuppressWarnings({"unchecked", "try"})
public static TFE_TensorHandle createTFETensorFromByteBuffer(
ByteBuffer buf,
Shape shape,
DataType dataType,
TFE_Context eagerSessionHandle,
Device device) {
int dType = TfDataType.toTf(dataType);
long[] dims = shape.getShape();
long numBytes;
if (dataType == DataType.STRING) {
numBytes = buf.remaining() + 1;
} else {
numBytes = shape.size() * dataType.getNumOfBytes();
}
try (PointerScope ignored = new PointerScope()) {
TF_Tensor tensor = AbstractTF_Tensor.allocateTensor(dType, dims, numBytes);
// get data pointer in native engine
Pointer pointer = tensorflow.TF_TensorData(tensor).capacity(numBytes);
pointer.asByteBuffer().put(buf);
TF_Status status = TF_Status.newStatus();
TFE_TensorHandle handle = AbstractTFE_TensorHandle.newTensor(tensor, status);
status.throwExceptionIfNotOK();
if (device.isGpu()) {
return toDevice(handle, eagerSessionHandle, device);
}
return handle.retainReference();
}
}
@SuppressWarnings({"unchecked", "try"})
public static TF_Tensor resolveTFETensor(TFE_TensorHandle handle) {
try (PointerScope ignored = new PointerScope()) {
TF_Status status = TF_Status.newStatus();
TF_Tensor tensor = tensorflow.TFE_TensorHandleResolve(handle, status).withDeallocator();
status.throwExceptionIfNotOK();
return tensor.retainReference();
}
}
@SuppressWarnings({"unchecked", "try"})
public static TFE_TensorHandle createTFETensor(TF_Tensor handle) {
try (PointerScope ignored = new PointerScope()) {
TF_Status status = TF_Status.newStatus();
TFE_TensorHandle tensor = AbstractTFE_TensorHandle.newTensor(handle, status);
status.throwExceptionIfNotOK();
return tensor.retainReference();
}
}
@SuppressWarnings({"unchecked", "try"})
public static String[] getString(TFE_TensorHandle handle, int count, Charset charset) {
try (PointerScope ignored = new PointerScope()) {
// convert to TF_Tensor
TF_Status status = TF_Status.newStatus();
// should not add .withDeallocator() here, otherwise sting data will be destroyed
TF_Tensor tensor = tensorflow.TFE_TensorHandleResolve(handle, status);
status.throwExceptionIfNotOK();
long tensorSize = tensorflow.TF_TensorByteSize(tensor);
Pointer pointer = tensorflow.TF_TensorData(tensor).capacity(tensorSize);
TF_TString data = new TF_TString(pointer).capacity(pointer.position() + count);
String[] ret = new String[count];
for (int i = 0; i < count; ++i) {
TF_TString tstring = data.getPointer(i);
long size = tensorflow.TF_TString_GetSize(tstring);
BytePointer bp = tensorflow.TF_TString_GetDataPointer(tstring).capacity(size);
ret[i] = bp.getString(charset);
}
tensorflow.TF_DeleteTensor(tensor); // manually delete tensor
return ret;
}
}
@SuppressWarnings({"unchecked", "try"})
public static void setByteBuffer(TFE_TensorHandle handle, ByteBuffer data) {
try (PointerScope ignored = new PointerScope()) {
// convert to TF_Tensor
TF_Status status = TF_Status.newStatus();
TF_Tensor tensor = tensorflow.TFE_TensorHandleResolve(handle, status).withDeallocator();
status.throwExceptionIfNotOK();
Pointer pointer =
tensorflow.TF_TensorData(tensor).capacity(tensorflow.TF_TensorByteSize(tensor));
pointer.asByteBuffer().put(data);
}
}
@SuppressWarnings({"unchecked", "try"})
public static ByteBuffer getByteBuffer(TFE_TensorHandle handle) {
try (PointerScope ignored = new PointerScope()) {
// convert to TF_Tensor
TF_Status status = TF_Status.newStatus();
TF_Tensor tensor = tensorflow.TFE_TensorHandleResolve(handle, status).withDeallocator();
status.throwExceptionIfNotOK();
Pointer pointer =
tensorflow.TF_TensorData(tensor).capacity(tensorflow.TF_TensorByteSize(tensor));
ByteBuffer buf = pointer.asByteBuffer();
// do the copy since we should make sure the returned ByteBuffer is still valid after
// the tensor is deleted
ByteBuffer ret = ByteBuffer.allocate(buf.capacity());
buf.rewind();
ret.put(buf);
ret.flip();
return ret.order(ByteOrder.nativeOrder());
}
}
@SuppressWarnings({"unchecked", "try"})
public static TFE_TensorHandle toDevice(
TFE_TensorHandle handle, TFE_Context eagerSessionHandle, Device device) {
try (PointerScope ignored = new PointerScope()) {
String deviceName = toTfDevice(device);
TF_Status status = TF_Status.newStatus();
TFE_TensorHandle newHandle =
tensorflow.TFE_TensorHandleCopyToDevice(
handle, eagerSessionHandle, deviceName, status);
status.throwExceptionIfNotOK();
// C API does not have deallocator by default
newHandle.withDeallocator();
return newHandle.retainReference();
}
}
public static ConfigProto getSessionConfig() {
Integer interop = Integer.getInteger("ai.djl.tensorflow.num_interop_threads");
Integer intraop = Integer.getInteger("ai.djl.tensorflow.num_intraop_threads");
ConfigProto.Builder configBuilder = ConfigProto.newBuilder();
if (interop != null) {
configBuilder.setInterOpParallelismThreads(interop);
}
if (intraop != null) {
configBuilder.setIntraOpParallelismThreads(intraop);
}
int gpuCount = CudaUtils.getGpuCount();
if (gpuCount > 0) {
StringBuilder sb = new StringBuilder("0");
for (int i = 1; i < gpuCount; ++i) {
sb.append(',').append(i);
}
GPUOptions gpuOptions =
GPUOptions.newBuilder().setVisibleDeviceList(sb.toString()).build();
configBuilder.setGpuOptions(gpuOptions);
configBuilder.setAllowSoftPlacement(true);
if (Boolean.getBoolean("ai.djl.tensorflow.debug")) {
configBuilder.setLogDevicePlacement(true);
}
}
return configBuilder.build();
}
public static Device fromTfDevice(String device) {
Matcher m = DEVICE_PATTERN.matcher(device);
if (m.matches()) {
if ("CPU".equals(m.group(1))) {
return Device.cpu();
} else if ("GPU".equals(m.group(1))) {
return Device.of(Device.Type.GPU, Integer.parseInt(m.group(2)));
}
}
throw new EngineException("Unknown device type to TensorFlow Engine: " + device);
}
public static String toTfDevice(Device device) {
if (device.getDeviceType().equals(Device.Type.CPU)) {
return "/device:CPU:0";
} else if (device.getDeviceType().equals(Device.Type.GPU)) {
return "/device:GPU:" + device.getDeviceId();
} else {
throw new EngineException("Unknown device type to TensorFlow Engine: " + device);
}
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/javacpp/LibUtils.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.engine.javacpp;
import ai.djl.util.ClassLoaderUtils;
import ai.djl.util.Platform;
import ai.djl.util.Utils;
import org.bytedeco.javacpp.Loader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.GZIPInputStream;
@SuppressWarnings("MissingJavadocMethod")
public final class LibUtils {
private static final Logger logger = LoggerFactory.getLogger(LibUtils.class);
private static final String LIB_NAME = "jnitensorflow";
private static final Pattern VERSION_PATTERN =
Pattern.compile("(\\d+\\.\\d+\\.\\d+(-[a-z]+)?)(-SNAPSHOT)?(-\\d+)?");
private LibUtils() {}
public static void loadLibrary() {
String libName = getLibName();
if (libName != null) {
logger.debug("Loading TensorFlow library from: {}", libName);
String path = new File(libName).getParentFile().toString();
System.setProperty("org.bytedeco.javacpp.platform.preloadpath", path);
// workaround javacpp physical memory check bug
System.setProperty("org.bytedeco.javacpp.maxBytes", "0");
System.setProperty("org.bytedeco.javacpp.maxPhysicalBytes", "0");
// https://github.com/deepjavalibrary/djl/issues/2318
Loader.loadProperties(true);
}
// defer to tensorflow-core-api to handle loading native library.
}
public static String getLibName() {
String libName = LibUtils.findOverrideLibrary();
if (libName == null) {
libName = LibUtils.findLibraryInClasspath();
}
return libName;
}
private static String findOverrideLibrary() {
String libPath = Utils.getEnvOrSystemProperty("TENSORFLOW_LIBRARY_PATH");
if (libPath != null) {
String libName = findLibraryInPath(libPath);
if (libName != null) {
return libName;
}
}
libPath = System.getProperty("java.library.path");
if (libPath != null) {
return findLibraryInPath(libPath);
}
return null;
}
private static synchronized String findLibraryInClasspath() {
// TensorFlow doesn't support native library override
Platform platform = Platform.detectPlatform("tensorflow");
if (platform.isPlaceholder()) {
return downloadTensorFlow(platform);
}
return loadLibraryFromClasspath(platform);
}
private static String loadLibraryFromClasspath(Platform platform) {
Path tmp = null;
try {
String libName = System.mapLibraryName(LIB_NAME);
Path cacheFolder = Utils.getEngineCacheDir("tensorflow");
String version = platform.getVersion();
String flavor = platform.getFlavor();
String classifier = platform.getClassifier();
Path dir = cacheFolder.resolve(version + '-' + flavor + '-' + classifier);
logger.debug("Using cache dir: {}", dir);
Path path = dir.resolve(libName);
if (Files.exists(path)) {
return path.toAbsolutePath().toString();
}
Files.createDirectories(cacheFolder);
tmp = Files.createTempDirectory(cacheFolder, "tmp");
for (String file : platform.getLibraries()) {
String libPath = "native/lib/" + file;
logger.info("Extracting {} to cache ...", libPath);
try (InputStream is = ClassLoaderUtils.getResourceAsStream(libPath)) {
Files.copy(is, tmp.resolve(file), StandardCopyOption.REPLACE_EXISTING);
}
}
Utils.moveQuietly(tmp, dir);
return path.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Failed to extract Tensorflow native library", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static String findLibraryInPath(String libPath) {
String[] paths = libPath.split(File.pathSeparator);
String mapLibraryName = System.mapLibraryName(LIB_NAME);
for (String path : paths) {
File p = new File(path);
if (!p.exists()) {
continue;
}
if (p.isFile() && p.getName().endsWith(mapLibraryName)) {
return p.getAbsolutePath();
}
File file = new File(path, mapLibraryName);
if (file.exists() && file.isFile()) {
return file.getAbsolutePath();
}
}
return null;
}
private static String downloadTensorFlow(Platform platform) {
String version = platform.getVersion();
String classifier = platform.getClassifier();
String cudaArch = platform.getCudaArch();
String flavor = platform.getFlavor();
String libName = System.mapLibraryName(LIB_NAME);
Path cacheDir = Utils.getEngineCacheDir("tensorflow");
logger.debug("Using cache dir: {}", cacheDir);
Path dir = cacheDir.resolve(version + '-' + flavor + '-' + classifier);
Path path = dir.resolve(libName);
if (Files.exists(path)) {
return path.toAbsolutePath().toString();
}
Matcher matcher = VERSION_PATTERN.matcher(version);
if (!matcher.matches()) {
throw new IllegalArgumentException("Unexpected version: " + version);
}
Path tmp = null;
String link = "https://publish.djl.ai/tensorflow/" + matcher.group(1);
try (InputStream is = Utils.openUrl(link + "/files.txt")) {
Files.createDirectories(cacheDir);
tmp = Files.createTempDirectory(cacheDir, "tmp");
List<String> lines = Utils.readLines(is);
boolean found = downloadFiles(lines, link, classifier, flavor, tmp);
if (!found && cudaArch != null) {
// fallback to cpu
String cpuFlavor = "cpu";
dir = cacheDir.resolve(version + '-' + cpuFlavor + '-' + classifier);
path = dir.resolve(libName);
if (Files.exists(path)) {
logger.warn(
"No matching CUDA flavor for {} found: {}/sm_{}, fallback to CPU.",
classifier,
flavor,
cudaArch);
return path.toAbsolutePath().toString();
}
flavor = cpuFlavor;
found = downloadFiles(lines, link, classifier, flavor, tmp);
}
if (!found) {
throw new IllegalStateException(
"No TensorFlow native library matches your operating system: " + platform);
}
Utils.moveQuietly(tmp, dir);
return path.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Failed to download Tensorflow native library", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static boolean downloadFiles(
List<String> lines, String link, String classifier, String flavor, Path tmp)
throws IOException {
boolean found = false;
String prefix;
if (flavor.startsWith("cu12") && "linux-x86_64".equals(classifier)) {
prefix = "cu121/linux-x86_64";
} else {
prefix = flavor + '/' + classifier + '/';
}
for (String line : lines) {
if (line.startsWith(prefix)) {
found = true;
URL url = new URL(link + '/' + line.replace("+", "%2B"));
String fileName = line.substring(line.lastIndexOf('/') + 1, line.length() - 3);
logger.info("Downloading {} ...", url);
try (InputStream fis = new GZIPInputStream(Utils.openUrl(url))) {
Files.copy(fis, tmp.resolve(fileName), StandardCopyOption.REPLACE_EXISTING);
}
}
}
return found;
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine
|
java-sources/ai/djl/tensorflow/tensorflow-engine/0.34.0/ai/djl/tensorflow/engine/javacpp/package-info.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains implementations of interfaces within the DJL API for the Tensorflow Engine.
*
* @see ai.djl.tensorflow.engine.TfEngine
*/
package ai.djl.tensorflow.engine.javacpp;
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/TfModelZoo.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.zoo;
import ai.djl.Application.CV;
import ai.djl.repository.RemoteRepository;
import ai.djl.repository.Repository;
import ai.djl.repository.zoo.ModelZoo;
import ai.djl.tensorflow.engine.TfEngine;
import java.util.Collections;
import java.util.Set;
/** TfModelZoo is a repository that contains the TensorFlow models for DJL. */
public class TfModelZoo extends ModelZoo {
private static final Repository REPOSITORY = new RemoteRepository("TensorFlow", DJL_REPO_URL);
public static final String GROUP_ID = "ai.djl.tensorflow";
TfModelZoo() {
addModel(REPOSITORY.model(CV.IMAGE_CLASSIFICATION, GROUP_ID, "resnet", "0.0.1"));
addModel(REPOSITORY.model(CV.IMAGE_CLASSIFICATION, GROUP_ID, "mobilenet", "0.0.1"));
addModel(REPOSITORY.model(CV.OBJECT_DETECTION, GROUP_ID, "ssd", "0.0.1"));
}
/** {@inheritDoc} */
@Override
public String getGroupId() {
return GROUP_ID;
}
/** {@inheritDoc} */
@Override
public Set<String> getSupportedEngines() {
return Collections.singleton(TfEngine.ENGINE_NAME);
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/TfZooProvider.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.zoo;
import ai.djl.repository.zoo.ModelZoo;
import ai.djl.repository.zoo.ZooProvider;
/**
* An TensorFlow model zoo provider implements the {@link ai.djl.repository.zoo.ZooProvider}
* interface.
*/
public class TfZooProvider implements ZooProvider {
/** {@inheritDoc} */
@Override
public ModelZoo getModelZoo() {
return new TfModelZoo();
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains the built-in {@link ai.djl.tensorflow.zoo.TfModelZoo}. */
package ai.djl.tensorflow.zoo;
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/cv
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/cv/objectdetction/TfSsdTranslator.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.zoo.cv.objectdetction;
import ai.djl.modality.cv.Image;
import ai.djl.modality.cv.output.BoundingBox;
import ai.djl.modality.cv.output.DetectedObjects;
import ai.djl.modality.cv.output.Rectangle;
import ai.djl.modality.cv.translator.ObjectDetectionTranslator;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.translate.ArgumentsUtil;
import ai.djl.translate.TranslatorContext;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
/**
* A {@link TfSsdTranslator} that post-process the {@link NDArray} into {@link DetectedObjects} with
* boundaries. Reference implementation: <a
* href="https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1">SSD</a>.
*/
public class TfSsdTranslator extends ObjectDetectionTranslator {
private int maxBoxes;
private String numDetectionsOutputName;
private String boundingBoxOutputName;
private String scoresOutputName;
private String classLabelOutputName;
/**
* Creates the SSD translator from the given builder.
*
* @param builder the builder for the translator
*/
protected TfSsdTranslator(Builder builder) {
super(builder);
this.maxBoxes = builder.maxBoxes;
this.numDetectionsOutputName = builder.numDetectionsOutputName;
this.boundingBoxOutputName = builder.boundingBoxOutputName;
this.scoresOutputName = builder.scoresOutputName;
this.classLabelOutputName = builder.classLabelOutputName;
}
/** {@inheritDoc} */
@Override
public NDList processInput(TranslatorContext ctx, Image input) {
// TensorFlow object detection model does not support batch input
// and require input dimension to be 4 with the first dim equals to 1,
// remove batchifier and manually batch input in preprocess. See:
// https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1
return new NDList(super.processInput(ctx, input).get(0).expandDims(0));
}
/** {@inheritDoc} */
@Override
public DetectedObjects processOutput(TranslatorContext ctx, NDList list) {
int len = (int) list.get(0).getShape().get(0);
for (NDArray array : list) {
if (numDetectionsOutputName.equals(array.getName())) {
len = array.toArray()[0].intValue();
break;
}
}
float[] scores = new float[len];
long[] classIds = new long[len];
NDArray boundingBoxes = list.get(0);
for (NDArray array : list) {
if (scoresOutputName.equals(array.getName())) {
scores = array.toFloatArray();
} else if (boundingBoxOutputName.equals(array.getName())) {
boundingBoxes = array;
} else if (classLabelOutputName.equals(array.getName())) {
classIds = Arrays.stream(array.toArray()).mapToLong(Number::longValue).toArray();
}
}
List<String> retNames = new ArrayList<>();
List<Double> retProbs = new ArrayList<>();
List<BoundingBox> retBB = new ArrayList<>();
// results are already sorted according to scores
for (int i = 0; i < Math.min(classIds.length, maxBoxes); ++i) {
long classId = classIds[i];
double score = scores[i];
// classId starts from 0, -1 means background
if (classId >= 0 && score > threshold) {
if (classId >= classes.size()) {
throw new AssertionError("Unexpected index: " + classId);
}
String className = classes.get((int) classId - 1);
float[] box = boundingBoxes.get(i).toFloatArray();
float yMin = box[0];
float xMin = box[1];
float yMax = box[2];
float xMax = box[3];
double w = xMax - xMin;
double h = yMax - yMin;
Rectangle rect = new Rectangle(xMin, yMin, w, h);
retNames.add(className);
retProbs.add(score);
retBB.add(rect);
}
}
return new DetectedObjects(retNames, retProbs, retBB);
}
/**
* Creates a builder to build a {@code TfSSDTranslator}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/**
* Creates a builder to build a {@code TfSSDTranslator} with specified arguments.
*
* @param arguments arguments to specify builder options
* @return a new builder
*/
public static Builder builder(Map<String, ?> arguments) {
Builder builder = new Builder();
builder.configPreProcess(arguments);
builder.configPostProcess(arguments);
return builder;
}
/** The builder for TensorFlow SSD translator. */
public static class Builder extends ObjectDetectionBuilder<Builder> {
int maxBoxes = 10;
String numDetectionsOutputName = "num_detections";
String boundingBoxOutputName = "detection_boxes";
String scoresOutputName = "detection_scores";
String classLabelOutputName = "detection_class_labels";
/**
* Set the output name used for number of detections.
*
* <p>You can find the output names of TensorFlow models by calling `model.describeOutput()`
* after loading it.
*
* @param numDetectionsOutputName output name for number of detections
* @return this builder
*/
public Builder optNumDetectionsOutputName(String numDetectionsOutputName) {
this.numDetectionsOutputName = numDetectionsOutputName;
return this;
}
/**
* Set the output name used for bounding boxes. You can find the output names of TensorFlow
* models by calling `model.describeOutput()` after loading it.
*
* @param boundingBoxOutputName output name for bounding boxes
* @return this builder
*/
public Builder optBoundingBoxOutputName(String boundingBoxOutputName) {
this.boundingBoxOutputName = boundingBoxOutputName;
return this;
}
/**
* Set the output name used for detection scores. You can find the output names of
* TensorFlow models by calling `model.describeOutput()` after loading it.
*
* @param scoresOutputName output name for detection scores
* @return this builder
*/
public Builder optScoresOutputName(String scoresOutputName) {
this.scoresOutputName = scoresOutputName;
return this;
}
/**
* Set the output name used for class label. You can find the output names of TensorFlow
* models by calling `model.describeOutput()` after loading it.
*
* @param classLabelOutputName output name for class label
* @return this builder
*/
public Builder optClassLabelOutputName(String classLabelOutputName) {
this.classLabelOutputName = classLabelOutputName;
return this;
}
/**
* Set the maximum number of bounding boxes to display.
*
* @param maxBoxes maximum number of bounding boxes to display
* @return this builder
*/
public Builder optMaxBoxes(int maxBoxes) {
this.maxBoxes = maxBoxes;
return this;
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/** {@inheritDoc} */
@Override
protected void configPreProcess(Map<String, ?> arguments) {
super.configPreProcess(arguments);
optBatchifier(null); // override parent batchifier
}
/** {@inheritDoc} */
@Override
protected void configPostProcess(Map<String, ?> arguments) {
super.configPostProcess(arguments);
maxBoxes = ArgumentsUtil.intValue(arguments, "maxBoxes", 10);
threshold = ArgumentsUtil.floatValue(arguments, "threshold", 0.4f);
numDetectionsOutputName =
ArgumentsUtil.stringValue(
arguments, "numDetectionsOutputName", "num_detections");
boundingBoxOutputName =
ArgumentsUtil.stringValue(
arguments, "boundingBoxOutputName", "detection_boxes");
scoresOutputName =
ArgumentsUtil.stringValue(arguments, "scoresOutputName", "detection_scores");
classLabelOutputName =
ArgumentsUtil.stringValue(
arguments, "classLabelOutputName", "detection_class_labels");
}
/**
* Builds the translator.
*
* @return the new translator
*/
public TfSsdTranslator build() {
validate();
return new TfSsdTranslator(this);
}
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/cv
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/cv/objectdetction/TfSsdTranslatorFactory.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorflow.zoo.cv.objectdetction;
import ai.djl.Model;
import ai.djl.modality.cv.Image;
import ai.djl.modality.cv.output.DetectedObjects;
import ai.djl.modality.cv.translator.ObjectDetectionTranslatorFactory;
import ai.djl.translate.Translator;
import ai.djl.translate.TranslatorFactory;
import java.util.Map;
/** An {@link TranslatorFactory} that creates a {@link TfSsdTranslator} instance. */
public class TfSsdTranslatorFactory extends ObjectDetectionTranslatorFactory {
/** {@inheritDoc} */
@Override
protected Translator<Image, DetectedObjects> buildBaseTranslator(
Model model, Map<String, ?> arguments) {
return TfSsdTranslator.builder(arguments).build();
}
}
|
0
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/cv
|
java-sources/ai/djl/tensorflow/tensorflow-model-zoo/0.34.0/ai/djl/tensorflow/zoo/cv/objectdetction/package-info.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains classes for the {@link ai.djl.Application.CV#OBJECT_DETECTION} models in the {@link
* ai.djl.tensorflow.zoo.TfModelZoo}.
*/
package ai.djl.tensorflow.zoo.cv.objectdetction;
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/TrtEngine.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.engine;
import ai.djl.Device;
import ai.djl.Model;
import ai.djl.engine.Engine;
import ai.djl.engine.EngineException;
import ai.djl.engine.StandardCapabilities;
import ai.djl.tensorrt.jni.JniUtils;
import ai.djl.tensorrt.jni.LibUtils;
import ai.djl.util.Utils;
import java.io.FileNotFoundException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
/**
* The {@code TrtEngine} is an implementation of the {@link Engine} based on the <a
* href="https://github.com/NVIDIA/TensorRT">TensorRT</a>.
*
* <p>To get an instance of the {@code TrtEngine} when it is not the default Engine, call {@link
* Engine#getEngine(String)} with the Engine name "TensorRT".
*/
public final class TrtEngine extends Engine {
public static final String ENGINE_NAME = "TensorRT";
static final int RANK = 10;
private Engine alternativeEngine;
private boolean initialized;
private TrtEngine() {}
static Engine newInstance() {
try {
LibUtils.loadLibrary();
JniUtils.initPlugins("");
String paths = Utils.getEnvOrSystemProperty("TENSORRT_EXTRA_LIBRARY_PATH");
if (paths != null) {
String[] files = paths.split(",");
for (String file : files) {
Path path = Paths.get(file);
if (Files.notExists(path)) {
throw new FileNotFoundException(
"TensorRT extra Library not found: " + file);
}
System.load(path.toAbsolutePath().toString()); // NOPMD
}
}
return new TrtEngine();
} catch (Throwable t) {
throw new EngineException("Failed to load TensorRT native library", t);
}
}
/** {@inheritDoc} */
@Override
public Engine getAlternativeEngine() {
if (!initialized && !Boolean.getBoolean("ai.djl.tensorrt.disable_alternative")) {
Engine engine = Engine.getInstance();
if (engine.getRank() < getRank()) {
// alternativeEngine should not have the same rank as TensorRT
alternativeEngine = engine;
}
initialized = true;
}
return alternativeEngine;
}
/** {@inheritDoc} */
@Override
public String getEngineName() {
return ENGINE_NAME;
}
/** {@inheritDoc} */
@Override
public int getRank() {
return RANK;
}
/** {@inheritDoc} */
@Override
public String getVersion() {
return JniUtils.getTrtVersion();
}
/** {@inheritDoc} */
@Override
public boolean hasCapability(String capability) {
return StandardCapabilities.CUDA.equals(capability);
}
/** {@inheritDoc} */
@Override
public Model newModel(String name, Device device) {
return new TrtModel(name, newBaseManager(device));
}
/** {@inheritDoc} */
@Override
public TrtNDManager newBaseManager() {
return newBaseManager(null);
}
/** {@inheritDoc} */
@Override
public TrtNDManager newBaseManager(Device device) {
// Only support GPU for now
device = device == null ? defaultDevice() : device;
if (!device.isGpu()) {
throw new IllegalArgumentException("TensorRT only support GPU");
}
return TrtNDManager.getSystemManager().newSubManager(device);
}
/** {@inheritDoc} */
@Override
public String toString() {
StringBuilder sb = new StringBuilder(200);
sb.append(getEngineName()).append(':').append(getVersion()).append(", ");
if (alternativeEngine != null) {
sb.append("Alternative engine: ").append(alternativeEngine.getEngineName());
} else {
sb.append("No alternative engine found");
}
return sb.toString();
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/TrtEngineProvider.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.engine;
import ai.djl.engine.Engine;
import ai.djl.engine.EngineProvider;
/** {@code TrtEngineProvider} is the TensorRT implementation of {@link EngineProvider}. */
public class TrtEngineProvider implements EngineProvider {
/** {@inheritDoc} */
@Override
public String getEngineName() {
return TrtEngine.ENGINE_NAME;
}
/** {@inheritDoc} */
@Override
public int getEngineRank() {
return TrtEngine.RANK;
}
/** {@inheritDoc} */
@Override
public Engine getEngine() {
return InstanceHolder.INSTANCE;
}
private static class InstanceHolder {
static final Engine INSTANCE = TrtEngine.newInstance();
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/TrtModel.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.engine;
import ai.djl.BaseModel;
import ai.djl.Device;
import ai.djl.Model;
import ai.djl.inference.Predictor;
import ai.djl.ndarray.types.DataType;
import ai.djl.tensorrt.jni.JniUtils;
import ai.djl.translate.Translator;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Map;
/**
* {@code TrtModel} is the TensorRT implementation of {@link Model}.
*
* <p>OrtModel contains all the methods in Model to load and process a model. In addition, it
* provides TensorRT Specific functionality
*/
public class TrtModel extends BaseModel {
/**
* Constructs a new Model on a given device.
*
* @param name the model name
* @param manager the {@link TrtNDManager} to holds the NDArray
*/
TrtModel(String name, TrtNDManager manager) {
super(name);
this.manager = manager;
this.manager.setName("tensorrtModel");
dataType = DataType.FLOAT32;
}
/** {@inheritDoc} */
@Override
public void load(Path modelPath, String prefix, Map<String, ?> options) throws IOException {
setModelDir(modelPath);
wasLoaded = true;
if (block != null) {
throw new UnsupportedOperationException("TensorRT does not support dynamic blocks");
}
if (prefix == null) {
prefix = modelName;
}
Path modelFile = findModelFile(prefix);
if (modelFile == null) {
modelFile = findModelFile(modelDir.toFile().getName());
if (modelFile == null) {
throw new FileNotFoundException(prefix + ".* file not found in: " + modelDir);
}
}
String filePath = modelFile.toString();
int modelType;
if (filePath.endsWith(".onnx")) {
modelType = 0;
} else if (filePath.endsWith(".uff")) {
modelType = 1;
} else {
modelType = 2;
}
long modelHandle = JniUtils.loadModel(modelType, filePath, manager.getDevice(), options);
block = new TrtSymbolBlock(modelHandle);
}
/** {@inheritDoc} */
@Override
public <I, O> Predictor<I, O> newPredictor(Translator<I, O> translator, Device device) {
TrtSymbolBlock trtSymbol = ((TrtSymbolBlock) block);
TrtNDManager predManager = ((TrtNDManager) manager).newSubManager(device);
TrtSession session = trtSymbol.createSession(predManager);
return new TrtPredictor<>(this, translator, device, session);
}
private Path findModelFile(String prefix) {
if (Files.isRegularFile(modelDir)) {
Path file = modelDir;
modelDir = modelDir.getParent();
String fileName = file.toFile().getName();
if (fileName.endsWith(".onnx")) {
modelName = fileName.substring(0, fileName.length() - 5);
} else if (fileName.endsWith(".trt") || fileName.endsWith(".uff")) {
modelName = fileName.substring(0, fileName.length() - 4);
} else {
modelName = fileName;
}
return file;
}
Path modelFile = modelDir.resolve(prefix);
if (Files.notExists(modelFile) || !Files.isRegularFile(modelFile)) {
if (prefix.endsWith(".onnx") || prefix.endsWith(".trt") || prefix.endsWith(".uff")) {
return null;
}
modelFile = modelDir.resolve(prefix + ".onnx");
if (Files.notExists(modelFile) || !Files.isRegularFile(modelFile)) {
modelFile = modelDir.resolve(prefix + ".trt");
if (Files.notExists(modelFile) || !Files.isRegularFile(modelFile)) {
modelFile = modelDir.resolve(prefix + ".uff");
if (Files.notExists(modelFile) || !Files.isRegularFile(modelFile)) {
return null;
}
}
}
}
return modelFile;
}
/** {@inheritDoc} */
@Override
public void close() {
if (block != null) {
((TrtSymbolBlock) block).close();
block = null;
}
super.close();
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/TrtNDArray.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.engine;
import ai.djl.ndarray.BaseNDManager;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrayAdapter;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.util.UUID;
/** {@code TrtNDArray} is the TensorRT implementation of {@link NDArray}. */
public class TrtNDArray extends NDArrayAdapter {
private TrtNDManager manager;
private ByteBuffer data;
TrtNDArray(
TrtNDManager manager,
NDManager alternativeManager,
ByteBuffer data,
Shape shape,
DataType dataType) {
super(manager, alternativeManager, shape, dataType, UUID.randomUUID().toString());
this.data = data;
manager.attachInternal(uid, this);
}
/** {@inheritDoc} */
@Override
public void intern(NDArray replaced) {
data = ((TrtNDArray) replaced).data;
}
/** {@inheritDoc} */
@Override
public void detach() {
manager.detachInternal(getUid());
manager = TrtNDManager.getSystemManager();
}
/** {@inheritDoc} */
@Override
public ByteBuffer toByteBuffer(boolean tryDirect) {
data.rewind();
return data;
}
/** {@inheritDoc} */
@Override
public void set(Buffer buffer) {
int size = Math.toIntExact(shape.size());
BaseNDManager.validateBuffer(buffer, dataType, size);
BaseNDManager.copyBuffer(buffer, data);
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/TrtNDManager.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.engine;
import ai.djl.Device;
import ai.djl.engine.Engine;
import ai.djl.ndarray.BaseNDManager;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.util.Float16Utils;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/** {@code TrtNDManager} is the TensorRT implementation of {@link NDManager}. */
public class TrtNDManager extends BaseNDManager {
private static final TrtNDManager SYSTEM_MANAGER = new SystemManager();
private TrtNDManager(NDManager parent, Device device) {
super(parent, device);
}
static TrtNDManager getSystemManager() {
return SYSTEM_MANAGER;
}
/** {@inheritDoc} */
@Override
public final Engine getEngine() {
return Engine.getEngine(TrtEngine.ENGINE_NAME);
}
/** {@inheritDoc} */
@Override
public ByteBuffer allocateDirect(int capacity) {
return ByteBuffer.allocateDirect(capacity).order(ByteOrder.nativeOrder());
}
/** {@inheritDoc} */
@Override
public TrtNDArray from(NDArray array) {
if (array == null || array instanceof TrtNDArray) {
return (TrtNDArray) array;
}
TrtNDArray result = create(array.toByteBuffer(), array.getShape(), array.getDataType());
result.setName(array.getName());
return result;
}
/** {@inheritDoc} */
@Override
public TrtNDManager newSubManager(Device dev) {
TrtNDManager manager = new TrtNDManager(this, dev);
attachInternal(manager.uid, manager);
return manager;
}
/** {@inheritDoc} */
@Override
public TrtNDArray create(Buffer data, Shape shape, DataType dataType) {
int size = Math.toIntExact(shape.size());
BaseNDManager.validateBuffer(data, dataType, size);
if (data.isDirect() && data instanceof ByteBuffer) {
return new TrtNDArray(this, alternativeManager, (ByteBuffer) data, shape, dataType);
}
ByteBuffer bb = allocateDirect(size * dataType.getNumOfBytes());
BaseNDManager.copyBuffer(data, bb);
return new TrtNDArray(this, alternativeManager, bb, shape, dataType);
}
/** {@inheritDoc} */
@Override
public NDArray zeros(Shape shape, DataType dataType) {
int size = Math.toIntExact(dataType.getNumOfBytes() * shape.size());
ByteBuffer bb = allocateDirect(size);
return create(bb, shape, dataType);
}
/** {@inheritDoc} */
@Override
public NDArray ones(Shape shape, DataType dataType) {
int size = (int) shape.size();
ByteBuffer bb = allocateDirect(size * dataType.getNumOfBytes());
for (int i = 0; i < size; ++i) {
switch (dataType) {
case BOOLEAN:
case INT8:
case UINT8:
bb.put((byte) 1);
break;
case FLOAT16:
bb.putShort(Float16Utils.floatToHalf(1));
break;
case FLOAT32:
bb.putFloat(1f);
break;
case FLOAT64:
bb.putDouble(1);
break;
case INT32:
bb.putInt(1);
break;
case INT64:
bb.putLong(1);
break;
default:
throw new UnsupportedOperationException("Unsupported dataType: " + dataType);
}
}
bb.rewind();
return create(bb, shape, dataType);
}
/** {@inheritDoc} */
@Override
public void close() {
super.close();
if (alternativeManager != null) {
alternativeManager.close();
alternativeManager = null;
}
}
/** The SystemManager is the root {@link TrtNDManager} of which all others are children. */
private static final class SystemManager extends TrtNDManager implements SystemNDManager {
SystemManager() {
super(null, null);
}
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/TrtPredictor.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.engine;
import ai.djl.Device;
import ai.djl.inference.Predictor;
import ai.djl.translate.Translator;
class TrtPredictor<I, O> extends Predictor<I, O> {
TrtPredictor(TrtModel model, Translator<I, O> translator, Device device, TrtSession session) {
super(model, translator, device, false);
block = session;
}
/** {@inheritDoc} */
@Override
public void close() {
super.close();
((TrtSession) block).close();
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/TrtSession.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.engine;
import ai.djl.engine.EngineException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.tensorrt.jni.JniUtils;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.Arrays;
/** {@code TrtSession} represents the TensorRT's execution context. */
public class TrtSession extends AbstractBlock implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(JniUtils.class);
private long session;
private NDList inputBindings;
private NDList outputBindings;
private Shape[] outputShapes;
TrtSession(TrtNDManager manager, long modelHandle, long session) {
this.session = session;
inputNames = Arrays.asList(JniUtils.getInputNames(modelHandle));
DataType[] inputTypes = JniUtils.getInputDataTypes(modelHandle);
inputShapes = new Shape[inputTypes.length];
inputBindings = new NDList(inputTypes.length);
for (int i = 0; i < inputTypes.length; ++i) {
String inputName = inputNames.get(i);
inputShapes[i] = new Shape(JniUtils.getShape(session, inputName));
int size = Math.toIntExact(inputShapes[i].size() * inputTypes[i].getNumOfBytes());
ByteBuffer bb = manager.allocateDirect(size);
JniUtils.bind(session, inputName, bb);
NDArray array = manager.create(bb, inputShapes[i], inputTypes[i]);
array.setName(inputName);
inputBindings.add(array);
}
String[] outputNames = JniUtils.getOutputNames(modelHandle);
DataType[] outputTypes = JniUtils.getOutputDataTypes(modelHandle);
outputShapes = new Shape[outputNames.length];
outputBindings = new NDList(outputShapes.length);
for (int i = 0; i < outputShapes.length; ++i) {
outputShapes[i] = new Shape(JniUtils.getShape(session, outputNames[i]));
int size = Math.toIntExact(outputShapes[i].size() * outputTypes[i].getNumOfBytes());
ByteBuffer bb = manager.allocateDirect(size);
JniUtils.bind(session, outputNames[i], bb);
NDArray array = manager.create(bb, outputShapes[i], outputTypes[i]);
array.setName(outputNames[i]);
outputBindings.add(array);
}
if (logger.isDebugEnabled()) {
logger.debug("Model information: ");
for (int i = 0; i < inputTypes.length; ++i) {
logger.debug(
"input_{}[{}]: {}, {}",
i,
inputNames.get(i),
inputTypes[i],
inputShapes[i]);
}
for (int i = 0; i < outputTypes.length; ++i) {
logger.debug(
"output_{}[{}]: {}, {}",
i,
outputNames[i],
outputTypes[i],
outputShapes[i]);
}
}
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
int size = inputs.size();
if (this.inputBindings.size() != size) {
throw new EngineException(
"Unexpected number of inputs: " + size + ", expected: " + inputBindings.size());
}
for (int i = 0; i < size; ++i) {
NDArray array = inputs.get(i);
NDArray bound = inputBindings.get(i);
if (bound != array) {
if (bound.getDataType() != array.getDataType()) {
throw new EngineException(
"Unexpected input_"
+ i
+ '['
+ bound.getName()
+ "] dataType: "
+ array.getDataType()
+ ", expected: "
+ bound.getDataType());
} else if (!bound.getShape().equals(array.getShape())) {
throw new EngineException(
"Unexpected input_"
+ i
+ '['
+ bound.getName()
+ "] shape: "
+ array.getShape()
+ ", expected: "
+ bound.getShape());
}
bound.set(array.toByteBuffer());
}
}
JniUtils.runTrtModel(session);
return outputBindings;
}
/**
* Returns the input {@code NDList} that bound to TensorRT engine.
*
* @return the input {@code NDList} that bound to TensorRT engine
*/
public NDList getInputBindings() {
return inputBindings;
}
/**
* Returns the output {@code NDList} that bound to TensorRT engine.
*
* @return the output {@code NDList} that bound to TensorRT engine
*/
public NDList getOutputBindings() {
return outputBindings;
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return outputShapes;
}
/** {@inheritDoc} */
@Override
public void close() {
JniUtils.deleteSession(session);
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/TrtSymbolBlock.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.engine;
import ai.djl.ndarray.NDList;
import ai.djl.nn.AbstractSymbolBlock;
import ai.djl.nn.ParameterList;
import ai.djl.nn.SymbolBlock;
import ai.djl.tensorrt.jni.JniUtils;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.concurrent.atomic.AtomicReference;
/**
* {@code TrtSymbolBlock} is the TensorRT implementation of {@link SymbolBlock}.
*
* <p>You can create a {@code TrtSymbolBlock} using {@link ai.djl.Model#load(java.nio.file.Path,
* String)}.
*/
public class TrtSymbolBlock extends AbstractSymbolBlock implements AutoCloseable {
private AtomicReference<Long> handle;
/**
* Constructs a {@code TrtSymbolBlock}.
*
* <p>You can create a {@code TrtSymbolBlock} using {@link ai.djl.Model#load(java.nio.file.Path,
* String)}.
*
* @param handle the handle for native TensorRT model
*/
public TrtSymbolBlock(long handle) {
this.handle = new AtomicReference<>(handle);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
throw new UnsupportedOperationException("Use TrtExecutionContext instead.");
}
/** {@inheritDoc} */
@Override
public void close() {
Long pointer = handle.getAndSet(null);
if (pointer != null) {
JniUtils.deleteTrtModel(pointer);
}
}
TrtSession createSession(TrtNDManager manager) {
long session = JniUtils.createSession(handle.get());
return new TrtSession(manager, handle.get(), session);
}
/** {@inheritDoc} */
@Override
public ParameterList getDirectParameters() {
throw new UnsupportedOperationException("Not yet supported");
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/engine/package-info.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes to interface with the underlying TensorRT Engine. */
package ai.djl.tensorrt.engine;
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/jni/JniUtils.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.jni;
import ai.djl.Device;
import ai.djl.ndarray.types.DataType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.Map;
/**
* A class containing utilities to interact with the PyTorch Engine's Java Native Interface (JNI)
* layer.
*/
@SuppressWarnings("MissingJavadocMethod")
public final class JniUtils {
private static final Logger logger = LoggerFactory.getLogger(JniUtils.class);
private JniUtils() {}
public static void initPlugins(String namespace) {
int logLevel = 1;
if (logger.isWarnEnabled()) {
logLevel = 2;
}
if (logger.isInfoEnabled()) {
logLevel = 3;
}
if (logger.isTraceEnabled()) {
logLevel = 4;
}
TrtLibrary.LIB.initPlugins(namespace, logLevel);
}
public static long loadModel(
int modelType, String path, Device device, Map<String, ?> options) {
int deviceId = device == null ? 0 : device.getDeviceId();
String[] keys = new String[options.size()];
String[] values = new String[keys.length];
int index = 0;
String modelTypeName = modelType == 0 ? "ONNX" : "UFF";
logger.debug("Loading TensorRT {} model {} with options:", modelTypeName, path);
for (Map.Entry<String, ?> entry : options.entrySet()) {
keys[index] = entry.getKey();
values[index] = entry.getValue().toString();
logger.debug("{}: {}", keys[index], values[index]);
++index;
}
return TrtLibrary.LIB.loadTrtModel(modelType, path, deviceId, keys, values);
}
public static void deleteTrtModel(long model) {
TrtLibrary.LIB.deleteTrtModel(model);
}
public static long createSession(long model) {
return TrtLibrary.LIB.createSession(model);
}
public static void deleteSession(long session) {
TrtLibrary.LIB.deleteSession(session);
}
public static String[] getInputNames(long model) {
return TrtLibrary.LIB.getInputNames(model);
}
public static DataType[] getInputDataTypes(long model) {
int[] types = TrtLibrary.LIB.getInputDataTypes(model);
DataType[] ret = new DataType[types.length];
for (int i = 0; i < types.length; ++i) {
ret[i] = fromTrt(types[i]);
}
return ret;
}
public static String[] getOutputNames(long model) {
return TrtLibrary.LIB.getOutputNames(model);
}
public static DataType[] getOutputDataTypes(long model) {
int[] types = TrtLibrary.LIB.getOutputDataTypes(model);
DataType[] ret = new DataType[types.length];
for (int i = 0; i < types.length; ++i) {
ret[i] = fromTrt(types[i]);
}
return ret;
}
public static long[] getShape(long session, String name) {
return TrtLibrary.LIB.getShape(session, name);
}
public static void bind(long session, String name, ByteBuffer buffer) {
TrtLibrary.LIB.bind(session, name, buffer);
}
public static void runTrtModel(long session) {
TrtLibrary.LIB.runTrtModel(session);
}
public static String getTrtVersion() {
int version = TrtLibrary.LIB.getTrtVersion();
int major = version / 1000;
int minor = version / 100 - major * 10;
int patch = version % 100;
return major + "." + minor + '.' + patch;
}
public static DataType fromTrt(int trtType) {
switch (trtType) {
case 0:
return DataType.FLOAT32;
case 1:
return DataType.FLOAT16;
case 2:
return DataType.INT8;
case 3:
return DataType.INT32;
case 4:
return DataType.BOOLEAN;
default:
throw new UnsupportedOperationException("Unsupported TensorRT type: " + trtType);
}
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/jni/LibUtils.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.jni;
import ai.djl.util.ClassLoaderUtils;
import ai.djl.util.Platform;
import ai.djl.util.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
/**
* Utilities for finding the TensorRT Engine binary on the System.
*
* <p>The Engine will be searched for in a variety of locations in the following order:
*
* <ol>
* <li>In the path specified by the TENSORRT_LIBRARY_PATH environment variable
* </ol>
*/
@SuppressWarnings("MissingJavadocMethod")
public final class LibUtils {
private static final Logger logger = LoggerFactory.getLogger(LibUtils.class);
private static final String LIB_NAME = "djl_trt";
private LibUtils() {}
public static void loadLibrary() {
if (!System.getProperty("os.name").startsWith("Linux")) {
throw new UnsupportedOperationException("TensorRT only supports Linux.");
}
String libName = copyJniLibraryFromClasspath();
logger.debug("Loading TensorRT JNI library from: {}", libName);
System.load(libName); // NOPMD
}
private static String copyJniLibraryFromClasspath() {
String name = System.mapLibraryName(LIB_NAME);
Platform platform = Platform.detectPlatform("tensorrt");
String classifier = platform.getClassifier();
String version = platform.getVersion();
Path cacheDir = Utils.getEngineCacheDir("tensorrt");
Path dir = cacheDir.resolve(version + '-' + classifier);
Path path = dir.resolve(name);
if (Files.exists(path)) {
return path.toAbsolutePath().toString();
}
Path tmp = null;
String libPath = "native/lib/" + classifier + "/" + name;
logger.info("Extracting {} to cache ...", libPath);
try (InputStream is = ClassLoaderUtils.getResourceAsStream(libPath)) {
Files.createDirectories(dir);
tmp = Files.createTempFile(cacheDir, "jni", "tmp");
Files.copy(is, tmp, StandardCopyOption.REPLACE_EXISTING);
Utils.moveQuietly(tmp, path);
return path.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Cannot copy jni files", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/jni/TrtLibrary.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tensorrt.jni;
import java.nio.ByteBuffer;
/** A class containing utilities to interact with the TensorRT Engine's JNI layer. */
@SuppressWarnings("MissingJavadocMethod")
final class TrtLibrary {
static final TrtLibrary LIB = new TrtLibrary();
private TrtLibrary() {}
native void initPlugins(String namespace, int logLevel);
native long loadTrtModel(
int modelType,
String modelPath,
int deviceId,
String[] optionKeys,
String[] optionValues);
native void deleteTrtModel(long modelHandle);
native String[] getInputNames(long modelHandle);
native int[] getInputDataTypes(long modelHandle);
native String[] getOutputNames(long modelHandle);
native int[] getOutputDataTypes(long modelHandle);
native long createSession(long modelHandle);
native void deleteSession(long sessionHandle);
native long[] getShape(long sessionHandle, String name);
native void bind(long sessionHandle, String name, ByteBuffer buffer);
native void runTrtModel(long sessionHandle);
native int getTrtVersion();
}
|
0
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt
|
java-sources/ai/djl/tensorrt/tensorrt/0.33.0/ai/djl/tensorrt/jni/package-info.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes to interface with the underlying TensorRT Engine. */
package ai.djl.tensorrt.jni;
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/LibUtils.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.engine;
import ai.djl.util.ClassLoaderUtils;
import ai.djl.util.Platform;
import ai.djl.util.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.GZIPInputStream;
/**
* Utilities for finding the TFLite Engine binary on the System.
*
* <p>The Engine will be searched for in a variety of locations in the following order:
*
* <ol>
* <li>In the path specified by the TFLite_LIBRARY_PATH environment variable
* </ol>
*/
@SuppressWarnings("MissingJavadocMethod")
public final class LibUtils {
private static final Logger logger = LoggerFactory.getLogger(LibUtils.class);
private static final String LIB_NAME = "tensorflowlite_jni";
private static final Pattern VERSION_PATTERN =
Pattern.compile("(\\d+\\.\\d+\\.\\d+(-[a-z]+)?)(-SNAPSHOT)?(-\\d+)?");
private LibUtils() {}
public static void loadLibrary() {
String libName = findLibraryInClasspath();
logger.debug("Loading TFLite native library from: {}", libName);
System.load(libName); // NOPMD
}
private static synchronized String findLibraryInClasspath() {
Platform platform = Platform.detectPlatform("tflite");
if (platform.isPlaceholder()) {
return downloadTfLite(platform);
}
return loadLibraryFromClasspath(platform);
}
private static String loadLibraryFromClasspath(Platform platform) {
Path tmp = null;
try {
String libName = System.mapLibraryName(LIB_NAME);
Path cacheFolder = Utils.getEngineCacheDir("tflite");
String version = platform.getVersion();
String flavor = platform.getFlavor();
String classifier = platform.getClassifier();
Path dir = cacheFolder.resolve(version + '-' + flavor + '-' + classifier);
logger.debug("Using cache dir: {}", dir);
Path path = dir.resolve(libName);
if (Files.exists(path)) {
return path.toAbsolutePath().toString();
}
Files.createDirectories(cacheFolder);
tmp = Files.createTempDirectory(cacheFolder, "tmp");
for (String file : platform.getLibraries()) {
String libPath = "native/lib/" + file;
logger.info("Extracting {} to cache ...", libPath);
try (InputStream is = ClassLoaderUtils.getResourceAsStream(libPath)) {
Files.copy(is, tmp.resolve(file), StandardCopyOption.REPLACE_EXISTING);
}
}
Utils.moveQuietly(tmp, dir);
return path.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Failed to extract TFLite native library", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static String downloadTfLite(Platform platform) {
String version = platform.getVersion();
String flavor = platform.getFlavor();
String classifier = platform.getClassifier();
String os = platform.getOsPrefix();
String libName = System.mapLibraryName(LIB_NAME);
Path cacheDir = Utils.getEngineCacheDir("tflite");
logger.debug("Using cache dir: {}", cacheDir);
Path dir = cacheDir.resolve(version + '-' + flavor + '-' + classifier);
Path path = dir.resolve(libName);
if (Files.exists(path)) {
return path.toAbsolutePath().toString();
}
Matcher matcher = VERSION_PATTERN.matcher(version);
if (!matcher.matches()) {
throw new IllegalArgumentException("Unexpected version: " + version);
}
String link = "https://publish.djl.ai/tflite/" + matcher.group(1);
Path tmp = null;
try (InputStream is = Utils.openUrl(link + "/files.txt")) {
Files.createDirectories(cacheDir);
List<String> lines = Utils.readLines(is);
if (flavor.startsWith("cu")
&& !lines.contains(flavor + '/' + classifier + '/' + libName + ".gz")) {
logger.warn("No matching cuda flavor for {} found: {}.", os, flavor);
// fallback to CPU
flavor = "cpu";
// check again
dir = cacheDir.resolve(version + '-' + flavor + '-' + classifier);
path = dir.resolve(libName);
if (Files.exists(path)) {
return path.toAbsolutePath().toString();
}
}
tmp = Files.createTempDirectory(cacheDir, "tmp");
boolean found = false;
for (String line : lines) {
if (line.startsWith(flavor + '/' + classifier + '/')) {
found = true;
URL url = new URL(link + '/' + line);
String fileName = line.substring(line.lastIndexOf('/') + 1, line.length() - 3);
logger.info("Downloading {} ...", url);
try (InputStream fis = new GZIPInputStream(Utils.openUrl(url))) {
Files.copy(fis, tmp.resolve(fileName), StandardCopyOption.REPLACE_EXISTING);
}
}
}
if (!found) {
throw new IllegalStateException(
"No TFLite native library matches your operating system: " + platform);
}
Utils.moveQuietly(tmp, dir);
return path.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Failed to download TensorflowLite native library", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/TfLiteDataType.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.engine;
import ai.djl.ndarray.types.DataType;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/** Converts between DJL and TFLite data types. */
public final class TfLiteDataType {
private static Map<DataType, org.tensorflow.lite.DataType> toTf = createMapToTf();
private static Map<org.tensorflow.lite.DataType, DataType> fromTf = createMapFromTf();
private TfLiteDataType() {}
private static Map<DataType, org.tensorflow.lite.DataType> createMapToTf() {
Map<DataType, org.tensorflow.lite.DataType> map = new ConcurrentHashMap<>();
map.put(DataType.FLOAT32, org.tensorflow.lite.DataType.FLOAT32);
map.put(DataType.INT32, org.tensorflow.lite.DataType.INT32);
map.put(DataType.INT64, org.tensorflow.lite.DataType.INT64);
map.put(DataType.UINT8, org.tensorflow.lite.DataType.UINT8);
map.put(DataType.INT8, org.tensorflow.lite.DataType.INT8);
map.put(DataType.BOOLEAN, org.tensorflow.lite.DataType.BOOL);
map.put(DataType.STRING, org.tensorflow.lite.DataType.STRING);
return map;
}
private static Map<org.tensorflow.lite.DataType, DataType> createMapFromTf() {
Map<org.tensorflow.lite.DataType, DataType> map = new ConcurrentHashMap<>();
map.put(org.tensorflow.lite.DataType.FLOAT32, DataType.FLOAT32);
map.put(org.tensorflow.lite.DataType.INT32, DataType.INT32);
map.put(org.tensorflow.lite.DataType.INT64, DataType.INT64);
map.put(org.tensorflow.lite.DataType.UINT8, DataType.UINT8);
map.put(org.tensorflow.lite.DataType.BOOL, DataType.BOOLEAN);
map.put(org.tensorflow.lite.DataType.STRING, DataType.STRING);
return map;
}
/**
* Converts from a TFLite data type to a DJL data type.
*
* @param tfType the TFLite data type
* @return the DJL data type
*/
public static DataType fromTf(org.tensorflow.lite.DataType tfType) {
return fromTf.get(tfType);
}
/**
* Converts from a DJL data type to a TFLite data type.
*
* @param type the DJL data type
* @return the TFLite data type
*/
public static org.tensorflow.lite.DataType toTf(DataType type) {
return toTf.get(type);
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/TfLiteEngine.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.engine;
import ai.djl.Device;
import ai.djl.Model;
import ai.djl.engine.Engine;
import ai.djl.ndarray.NDManager;
/**
* The {@code TfLiteEngine} is an implementation of the {@link Engine} based on the <a
* href="https://www.tensorflow.org/lite">TFLite Deep Learning Library</a>.
*
* <p>To get an instance of the {@code TfLiteEngine} when it is not the default Engine, call {@link
* Engine#getEngine(String)} with the Engine name "TFLite".
*/
public final class TfLiteEngine extends Engine {
public static final String ENGINE_NAME = "TFLite";
static final int RANK = 10;
private Engine alternativeEngine;
private boolean initialized;
private TfLiteEngine() {
LibUtils.loadLibrary();
}
static Engine newInstance() {
return new TfLiteEngine();
}
/** {@inheritDoc} */
@Override
public Engine getAlternativeEngine() {
if (!initialized && !Boolean.getBoolean("ai.djl.tflite.disable_alternative")) {
Engine engine = Engine.getInstance();
if (engine.getRank() < getRank()) {
// alternativeEngine should not have the same rank as TFLite
alternativeEngine = engine;
}
initialized = true;
}
return alternativeEngine;
}
/** {@inheritDoc} */
@Override
public String getEngineName() {
return ENGINE_NAME;
}
/** {@inheritDoc} */
@Override
public int getRank() {
return RANK;
}
/** {@inheritDoc} */
@Override
public String getVersion() {
return "2.4.1";
}
/** {@inheritDoc} */
@Override
public boolean hasCapability(String capability) {
// TODO: Support GPU
return false;
}
/** {@inheritDoc} */
@Override
public Model newModel(String name, Device device) {
// We need pass TfLiteManager explicitly
return new TfLiteModel(name, newBaseManager(device));
}
/** {@inheritDoc} */
@Override
public NDManager newBaseManager() {
return newBaseManager(null);
}
/** {@inheritDoc} */
@Override
public NDManager newBaseManager(Device device) {
return TfLiteNDManager.getSystemManager().newSubManager(device);
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/TfLiteEngineProvider.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.engine;
import ai.djl.engine.Engine;
import ai.djl.engine.EngineProvider;
/** {@code TfLiteEngineProvider} is the TFLite implementation of {@link EngineProvider}. */
public class TfLiteEngineProvider implements EngineProvider {
/** {@inheritDoc} */
@Override
public String getEngineName() {
return TfLiteEngine.ENGINE_NAME;
}
/** {@inheritDoc} */
@Override
public int getEngineRank() {
return TfLiteEngine.RANK;
}
/** {@inheritDoc} */
@Override
public Engine getEngine() {
return InstanceHolder.INSTANCE;
}
private static class InstanceHolder {
static final Engine INSTANCE = TfLiteEngine.newInstance();
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/TfLiteModel.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.engine;
import ai.djl.BaseModel;
import ai.djl.Model;
import ai.djl.engine.Engine;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.util.Utils;
import org.tensorflow.lite.Interpreter;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Map;
/**
* {@code TfLiteModel} is the TFLite implementation of {@link Model}.
*
* <p>TfLiteModel contains all the methods in Model to load and process a model. In addition, it
* provides TFLite Specific functionality
*/
public class TfLiteModel extends BaseModel {
/**
* Constructs a new Model on a given device.
*
* @param name the model name
* @param manager the {@link NDManager} to holds the NDArray
*/
TfLiteModel(String name, NDManager manager) {
super(name);
this.manager = TfLiteNDManager.getSystemManager().newSubManager();
manager.setName("TfLiteModel");
dataType = DataType.FLOAT32;
}
/** {@inheritDoc} */
@Override
public void load(Path modelPath, String prefix, Map<String, ?> options) throws IOException {
setModelDir(modelPath);
if (block != null) {
throw new UnsupportedOperationException("TFLite does not support dynamic blocks");
}
Path modelFile = findModelFile(prefix);
if (modelFile == null) {
modelFile = findModelFile(modelDir.toFile().getName());
if (modelFile == null) {
throw new FileNotFoundException("TFLite model file not found in: " + modelPath);
}
}
Interpreter interpreter = new Interpreter(modelFile.toFile());
setBlock(new TfLiteSymbolBlock(interpreter, getNDManager()));
}
/** {@inheritDoc} */
@Override
public void load(InputStream is, Map<String, ?> options) throws IOException {
if (block != null) {
throw new UnsupportedOperationException("TFLite does not support dynamic blocks");
}
modelDir = Files.createTempDirectory("tflite-model");
modelDir.toFile().deleteOnExit();
byte[] buf = Utils.toByteArray(is);
Engine engine = Engine.getEngine(TfLiteEngine.ENGINE_NAME);
ByteBuffer bb = engine.newBaseManager().allocateDirect(buf.length);
bb.put(buf);
Interpreter interpreter = new Interpreter(bb);
setBlock(new TfLiteSymbolBlock(interpreter, getNDManager()));
}
/** {@inheritDoc} */
@Override
public TfLiteNDManager getNDManager() {
return (TfLiteNDManager) super.getNDManager();
}
private Path findModelFile(String prefix) {
if (Files.isRegularFile(modelDir)) {
Path file = modelDir;
modelDir = modelDir.getParent();
String fileName = file.toFile().getName();
if (fileName.endsWith(".tflite")) {
modelName = fileName.substring(0, fileName.length() - 5);
} else {
modelName = fileName;
}
return file;
}
if (prefix == null) {
prefix = modelName;
}
Path modelFile = modelDir.resolve(prefix);
if (Files.notExists(modelFile) || !Files.isRegularFile(modelFile)) {
if (prefix.endsWith(".tflite")) {
return null;
}
modelFile = modelDir.resolve(prefix + ".tflite");
if (Files.notExists(modelFile) || !Files.isRegularFile(modelFile)) {
return null;
}
}
return modelFile;
}
/** {@inheritDoc} */
@Override
public void close() {
if (block != null) {
((TfLiteSymbolBlock) block).close();
block = null;
}
super.close();
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/TfLiteNDArray.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.engine;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrayAdapter;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import org.tensorflow.lite.Tensor;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.UUID;
/** {@code TfLiteNDArray} is the TFLite implementation of {@link NDArray}. */
public class TfLiteNDArray extends NDArrayAdapter {
private Tensor tensor; // this tensor was used in Interpreter, and should not be closed
private ByteBuffer data;
TfLiteNDArray(NDManager manager, NDManager alternativeManager, Tensor tensor) {
super(
manager,
alternativeManager,
new Shape(Arrays.stream(tensor.shape()).mapToLong(i -> i).toArray()),
TfLiteDataType.fromTf(tensor.dataType()),
UUID.randomUUID().toString());
this.tensor = tensor;
manager.attachInternal(uid, this);
}
TfLiteNDArray(
NDManager manager,
NDManager alternativeManager,
ByteBuffer data,
Shape shape,
DataType dataType) {
super(manager, alternativeManager, shape, dataType, UUID.randomUUID().toString());
this.data = data;
manager.attachInternal(uid, this);
}
/** {@inheritDoc} */
@Override
public void intern(NDArray replaced) {
if (tensor != null) {
tensor.close();
}
this.data = ((TfLiteNDArray) replaced).data;
this.tensor = ((TfLiteNDArray) replaced).tensor;
}
/** {@inheritDoc} */
@Override
public void detach() {
manager.detachInternal(getUid());
manager = TfLiteNDManager.getSystemManager();
}
/** {@inheritDoc} */
@Override
public NDArray toType(DataType dataType, boolean copy) {
if (dataType.equals(this.dataType)) {
if (!copy) {
return this;
}
return new TfLiteNDArray(
manager, alternativeManager, toByteBuffer().duplicate(), shape, dataType);
}
Number[] array = toArray();
switch (dataType) {
case FLOAT64:
double[] doubleResult =
Arrays.stream(array).mapToDouble(Number::doubleValue).toArray();
return manager.create(doubleResult).reshape(shape);
case FLOAT32:
float[] floatResult = new float[array.length];
for (int i = 0; i < array.length; i++) {
floatResult[i] = array[i].floatValue();
}
return manager.create(floatResult).reshape(shape);
case INT32:
int[] intResult = Arrays.stream(array).mapToInt(Number::intValue).toArray();
return manager.create(intResult).reshape(shape);
case INT64:
long[] longResult = Arrays.stream(array).mapToLong(Number::longValue).toArray();
return manager.create(longResult).reshape(shape);
case INT8:
byte[] booleanResult = new byte[array.length];
for (int i = 0; i < array.length; i++) {
booleanResult[i] = array[i].byteValue();
}
return manager.create(booleanResult).reshape(shape);
default:
throw new UnsupportedOperationException(
"Type conversion is not supported for TFLite for data type " + dataType);
}
}
/** {@inheritDoc} */
@Override
public ByteBuffer toByteBuffer() {
if (data == null) {
data = tensor.buffer();
}
data.rewind();
return data;
}
/** {@inheritDoc} */
@Override
public NDArray reshape(Shape shape) {
if (tensor != null) {
throw new UnsupportedOperationException("Not supported for TFLite");
} else {
if (Arrays.stream(shape.getShape()).anyMatch(n -> n < 0)) {
throw new UnsupportedOperationException(
"Negative shape is not supported for TFLite");
}
return new TfLiteNDArray(manager, alternativeManager, data, shape, dataType);
}
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/TfLiteNDManager.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.engine;
import ai.djl.Device;
import ai.djl.engine.Engine;
import ai.djl.ndarray.BaseNDManager;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import org.tensorflow.lite.Tensor;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/** {@code TfLiteNDManager} is the TFLite implementation of {@link NDManager}. */
public class TfLiteNDManager extends BaseNDManager {
private static final TfLiteNDManager SYSTEM_MANAGER = new SystemManager();
private TfLiteNDManager(NDManager parent, Device device) {
super(parent, device);
}
static TfLiteNDManager getSystemManager() {
return SYSTEM_MANAGER;
}
/** {@inheritDoc} */
@Override
public ByteBuffer allocateDirect(int capacity) {
return ByteBuffer.allocateDirect(capacity).order(ByteOrder.nativeOrder());
}
/** {@inheritDoc} */
@Override
public TfLiteNDArray from(NDArray array) {
if (array == null || array instanceof TfLiteNDArray) {
return (TfLiteNDArray) array;
}
TfLiteNDArray result = create(array.toByteBuffer(), array.getShape(), array.getDataType());
result.setName(array.getName());
return result;
}
TfLiteNDArray createInternal(Tensor tensor) {
return new TfLiteNDArray(this, alternativeManager, tensor);
}
/** {@inheritDoc} */
@Override
public TfLiteNDArray create(Buffer data, Shape shape, DataType dataType) {
int size = Math.toIntExact(shape.size());
BaseNDManager.validateBuffer(data, dataType, size);
if (data.isDirect() && data instanceof ByteBuffer) {
return new TfLiteNDArray(this, alternativeManager, (ByteBuffer) data, shape, dataType);
}
ByteBuffer buf = allocateDirect(size * dataType.getNumOfBytes());
copyBuffer(data, buf);
return new TfLiteNDArray(this, alternativeManager, buf, shape, dataType);
}
/** {@inheritDoc} */
@Override
public TfLiteNDManager newSubManager(Device device) {
TfLiteNDManager manager = new TfLiteNDManager(this, device);
attachInternal(manager.uid, manager);
return manager;
}
/** {@inheritDoc} */
@Override
public final Engine getEngine() {
return Engine.getEngine(TfLiteEngine.ENGINE_NAME);
}
/** The SystemManager is the root {@link TfLiteNDManager} of which all others are children. */
private static final class SystemManager extends TfLiteNDManager implements SystemNDManager {
SystemManager() {
super(null, null);
}
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/TfLiteSymbolBlock.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.engine;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.nn.AbstractSymbolBlock;
import ai.djl.nn.ParameterList;
import ai.djl.nn.SymbolBlock;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import org.tensorflow.lite.Interpreter;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* {@code TfLiteSymbolBlock} is the TFLite implementation of {@link SymbolBlock}.
*
* <p>You can create a {@code TfLiteSymbolBlock} using {@link ai.djl.Model#load(java.nio.file.Path,
* String)}.
*/
public class TfLiteSymbolBlock extends AbstractSymbolBlock implements AutoCloseable {
private TfLiteNDManager manager;
private Interpreter interpreter;
private static final Map<Integer, Object> EMPTY = new ConcurrentHashMap<>();
TfLiteSymbolBlock(Interpreter interpreter, TfLiteNDManager manager) {
this.interpreter = interpreter;
this.manager = manager;
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
Object[] intInput = inputs.stream().map(NDArray::toByteBuffer).toArray();
interpreter.runForMultipleInputsOutputs(intInput, EMPTY);
int outputSize = interpreter.getOutputTensorCount();
NDList result = new NDList(outputSize);
for (int i = 0; i < outputSize; i++) {
result.add(manager.createInternal(interpreter.getOutputTensor(i)));
}
result.attach(inputs.head().getManager());
return result;
}
/** {@inheritDoc} */
@Override
public void close() {
interpreter.close();
}
/** {@inheritDoc} */
@Override
public ParameterList getDirectParameters() {
throw new UnsupportedOperationException("Not yet supported");
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/engine/package-info.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes to interface with the underlying TFLite Engine. */
package ai.djl.tflite.engine;
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/zoo/TfLiteModelZoo.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.zoo;
import ai.djl.Application.CV;
import ai.djl.repository.Repository;
import ai.djl.repository.zoo.ModelZoo;
import ai.djl.tflite.engine.TfLiteEngine;
import java.util.Collections;
import java.util.Set;
/** TfLiteModelZoo is a repository that contains all TFLite models for DJL. */
public class TfLiteModelZoo extends ModelZoo {
private static final String DJL_REPO_URL = "https://mlrepo.djl.ai/";
private static final Repository REPOSITORY = Repository.newInstance("TFLite", DJL_REPO_URL);
public static final String GROUP_ID = "ai.djl.tflite";
TfLiteModelZoo() {
addModel(REPOSITORY.model(CV.IMAGE_CLASSIFICATION, GROUP_ID, "mobilenet", "0.0.1"));
}
/** {@inheritDoc} */
@Override
public String getGroupId() {
return GROUP_ID;
}
/** {@inheritDoc} */
@Override
public Set<String> getSupportedEngines() {
return Collections.singleton(TfLiteEngine.ENGINE_NAME);
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/zoo/TfLiteZooProvider.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.tflite.zoo;
import ai.djl.repository.zoo.ModelZoo;
import ai.djl.repository.zoo.ZooProvider;
/**
* An TFLite model zoo provider implements the {@link ai.djl.repository.zoo.ZooProvider} interface.
*/
public class TfLiteZooProvider implements ZooProvider {
/** {@inheritDoc} */
@Override
public ModelZoo getModelZoo() {
return new TfLiteModelZoo();
}
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/ai/djl/tflite/zoo/package-info.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains the built-in {@link ai.djl.tflite.zoo.TfLiteModelZoo}. */
package ai.djl.tflite.zoo;
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/DataType.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
/** Represents the type of elements in a TensorFlow Lite {@link Tensor} as an enum. */
public enum DataType {
/** 32-bit single precision floating point. */
FLOAT32(1),
/** 32-bit signed integer. */
INT32(2),
/** 8-bit unsigned integer. */
UINT8(3),
/** 64-bit signed integer. */
INT64(4),
/** Strings. */
STRING(5),
/** Bool. */
BOOL(6),
/** 16-bit signed integer. */
INT16(7),
/** 8-bit signed integer. */
INT8(9);
private final int value;
DataType(int value) {
this.value = value;
}
/**
* Returns the size of an element of this type, in bytes, or -1 if element size is variable.
*
* @return the size of an element of this type
*/
public int byteSize() {
switch (this) {
case FLOAT32:
case INT32:
return 4;
case INT16:
return 2;
case INT8:
case UINT8:
return 1;
case INT64:
return 8;
case BOOL:
// Boolean size is JVM-dependent.
return -1;
case STRING:
return -1;
}
throw new IllegalArgumentException(
"DataType error: DataType " + this + " is not supported yet");
}
/** Corresponding value of the TfLiteType enum in the TensorFlow Lite C API. */
int c() {
return value;
}
/** Converts a C TfLiteType enum value to the corresponding type. */
static DataType fromC(int c) {
for (DataType t : values) {
if (t.value == c) {
return t;
}
}
throw new IllegalArgumentException(
"DataType error: DataType "
+ c
+ " is not recognized in Java (version "
+ TensorFlowLite.runtimeVersion()
+ ")");
}
/** Gets string names of the data type. */
String toStringName() {
switch (this) {
case FLOAT32:
return "float";
case INT32:
return "int";
case INT16:
return "short";
case INT8:
case UINT8:
return "byte";
case INT64:
return "long";
case BOOL:
return "bool";
case STRING:
return "string";
}
throw new IllegalArgumentException(
"DataType error: DataType " + this + " is not supported yet");
}
// Cached to avoid copying it
private static final DataType[] values = values();
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/Delegate.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
/**
* Wrapper for a native TensorFlow Lite Delegate.
*
* <p>WARNING: This is an experimental interface that is subject to change.
*
* <p>If a delegate implementation holds additional resources or memory that should be explicitly
* freed, then best practice is to add a {@code close()} method to the implementation and have the
* client call that explicitly when the delegate instance is no longer in use. While this approach
* technically allows sharing of a single delegate instance across multiple interpreter instances,
* the delegate implementation must explicitly support this.
*/
public interface Delegate {
/**
* Returns a native handle to the TensorFlow Lite delegate implementation.
*
* <p>Note: The Java {@link Delegate} maintains ownership of the native delegate instance, and
* must ensure its existence for the duration of usage with any {@link Interpreter}.
*
* @return The native delegate handle. In C/C++, this should be a pointer to
* 'TfLiteOpaqueDelegate'.
*/
public long getNativeHandle();
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/Interpreter.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
import java.io.File;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@SuppressWarnings("MissingJavadocMethod")
public final class Interpreter implements InterpreterApi {
public static class Options extends InterpreterApi.Options {
public Options() {}
@Override
public Options setNumThreads(int numThreads) {
super.setNumThreads(numThreads);
return this;
}
@Override
public Options setUseNNAPI(boolean useNNAPI) {
super.setUseNNAPI(useNNAPI);
return this;
}
@Deprecated
public Options setAllowFp16PrecisionForFp32(boolean allow) {
this.allowFp16PrecisionForFp32 = allow;
return this;
}
public Options addDelegate(Delegate delegate) {
delegates.add(delegate);
return this;
}
public Options setAllowBufferHandleOutput(boolean allow) {
this.allowBufferHandleOutput = allow;
return this;
}
@Override
public Options setCancellable(boolean allow) {
super.setCancellable(allow);
return this;
}
public Options setUseXNNPACK(boolean useXNNPACK) {
this.useXNNPACK = useXNNPACK;
return this;
}
Boolean allowFp16PrecisionForFp32;
Boolean allowBufferHandleOutput;
// TODO(b/171856982): update the comment when applying XNNPACK delegate by default is
// enabled for C++ TfLite library on Android platform.
// Note: the initial "null" value indicates default behavior which may mean XNNPACK
// delegate will be applied by default.
Boolean useXNNPACK;
final List<Delegate> delegates = new ArrayList<>();
}
public Interpreter(File modelFile) {
this(modelFile, /*options = */ null);
}
public Interpreter(File modelFile, Options options) {
wrapper = new NativeInterpreterWrapper(modelFile.getAbsolutePath(), options);
signatureNameList = getSignatureDefNames();
}
public Interpreter(ByteBuffer byteBuffer) {
this(byteBuffer, /* options= */ null);
}
public Interpreter(ByteBuffer byteBuffer, Options options) {
wrapper = new NativeInterpreterWrapper(byteBuffer, options);
signatureNameList = getSignatureDefNames();
}
@Override
public void run(Object input, Object output) {
Object[] inputs = {input};
Map<Integer, Object> outputs = new HashMap<>();
outputs.put(0, output);
runForMultipleInputsOutputs(inputs, outputs);
}
@Override
public void runForMultipleInputsOutputs(Object[] inputs, Map<Integer, Object> outputs) {
checkNotClosed();
wrapper.run(inputs, outputs);
}
public void runSignature(
Map<String, Object> inputs, Map<String, Object> outputs, String methodName) {
checkNotClosed();
if (methodName == null && signatureNameList.length == 1) {
methodName = signatureNameList[0];
}
if (methodName == null) {
throw new IllegalArgumentException(
"Input error: SignatureDef methodName should not be null. null is only allowed"
+ " if the model has a single Signature. Available Signatures: "
+ Arrays.toString(signatureNameList));
}
wrapper.runSignature(inputs, outputs, methodName);
}
public void runSignature(Map<String, Object> inputs, Map<String, Object> outputs) {
checkNotClosed();
runSignature(inputs, outputs, null);
}
@Override
public void allocateTensors() {
checkNotClosed();
wrapper.allocateTensors();
}
@Override
public void resizeInput(int idx, int[] dims) {
checkNotClosed();
wrapper.resizeInput(idx, dims, false);
}
@Override
public void resizeInput(int idx, int[] dims, boolean strict) {
checkNotClosed();
wrapper.resizeInput(idx, dims, strict);
}
@Override
public int getInputTensorCount() {
checkNotClosed();
return wrapper.getInputTensorCount();
}
@Override
public int getInputIndex(String opName) {
checkNotClosed();
return wrapper.getInputIndex(opName);
}
@Override
public Tensor getInputTensor(int inputIndex) {
checkNotClosed();
return wrapper.getInputTensor(inputIndex);
}
public Tensor getInputTensorFromSignature(String inputName, String methodName) {
checkNotClosed();
if (methodName == null && signatureNameList.length == 1) {
methodName = signatureNameList[0];
}
if (methodName == null) {
throw new IllegalArgumentException(
"Input error: SignatureDef methodName should not be null. null is only allowed"
+ " if the model has a single Signature. Available Signatures: "
+ Arrays.toString(signatureNameList));
}
return wrapper.getInputTensor(inputName, methodName);
}
public String[] getSignatureDefNames() {
checkNotClosed();
return wrapper.getSignatureDefNames();
}
public String[] getSignatureInputs(String methodName) {
checkNotClosed();
return wrapper.getSignatureInputs(methodName);
}
public String[] getSignatureOutputs(String methodName) {
checkNotClosed();
return wrapper.getSignatureOutputs(methodName);
}
@Override
public int getOutputTensorCount() {
checkNotClosed();
return wrapper.getOutputTensorCount();
}
@Override
public int getOutputIndex(String opName) {
checkNotClosed();
return wrapper.getOutputIndex(opName);
}
@Override
public Tensor getOutputTensor(int outputIndex) {
checkNotClosed();
return wrapper.getOutputTensor(outputIndex);
}
public Tensor getOutputTensorFromSignature(String outputName, String methodName) {
checkNotClosed();
if (methodName == null && signatureNameList.length == 1) {
methodName = signatureNameList[0];
}
if (methodName == null) {
throw new IllegalArgumentException(
"Input error: SignatureDef methodName should not be null. null is only allowed"
+ " if the model has a single Signature. Available Signatures: "
+ Arrays.toString(signatureNameList));
}
return wrapper.getOutputTensor(outputName, methodName);
}
@Override
public Long getLastNativeInferenceDurationNanoseconds() {
checkNotClosed();
return wrapper.getLastNativeInferenceDurationNanoseconds();
}
public void resetVariableTensors() {
checkNotClosed();
wrapper.resetVariableTensors();
}
public void setCancelled(boolean cancelled) {
wrapper.setCancelled(cancelled);
}
int getExecutionPlanLength() {
checkNotClosed();
return wrapper.getExecutionPlanLength();
}
@Override
public void close() {
if (wrapper != null) {
wrapper.close();
wrapper = null;
}
}
// for Object.finalize, see https://bugs.openjdk.java.net/browse/JDK-8165641
@SuppressWarnings("deprecation")
@Override
protected void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private void checkNotClosed() {
if (wrapper == null) {
throw new IllegalStateException(
"Internal error: The Interpreter has already been closed.");
}
}
NativeInterpreterWrapper wrapper;
String[] signatureNameList;
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/InterpreterApi.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
import java.util.Map;
@SuppressWarnings("MissingJavadocMethod")
public interface InterpreterApi extends AutoCloseable {
public static class Options {
public Options() {}
public Options setNumThreads(int numThreads) {
this.numThreads = numThreads;
return this;
}
public Options setUseNNAPI(boolean useNNAPI) {
this.useNNAPI = useNNAPI;
return this;
}
public Options setCancellable(boolean allow) {
this.allowCancellation = allow;
return this;
}
int numThreads = -1;
Boolean useNNAPI;
Boolean allowCancellation;
}
public void run(Object input, Object output);
public void runForMultipleInputsOutputs(Object[] inputs, Map<Integer, Object> outputs);
public void allocateTensors();
public void resizeInput(int idx, int[] dims);
public void resizeInput(int idx, int[] dims, boolean strict);
public int getInputTensorCount();
public int getInputIndex(String opName);
public Tensor getInputTensor(int inputIndex);
public int getOutputTensorCount();
public int getOutputIndex(String opName);
public Tensor getOutputTensor(int outputIndex);
public Long getLastNativeInferenceDurationNanoseconds();
@Override
void close();
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/NativeInterpreterWrapper.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
import org.tensorflow.lite.nnapi.NnApiDelegate;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.MappedByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
final class NativeInterpreterWrapper implements AutoCloseable {
NativeInterpreterWrapper(String modelPath) {
this(modelPath, /* options= */ null);
}
NativeInterpreterWrapper(ByteBuffer byteBuffer) {
this(byteBuffer, /* options= */ null);
}
NativeInterpreterWrapper(String modelPath, Interpreter.Options options) {
TensorFlowLite.init();
long errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
long modelHandle = createModel(modelPath, errorHandle);
init(errorHandle, modelHandle, options);
}
NativeInterpreterWrapper(ByteBuffer buffer, Interpreter.Options options) {
TensorFlowLite.init();
if (buffer == null
|| (!(buffer instanceof MappedByteBuffer)
&& (!buffer.isDirect() || buffer.order() != ByteOrder.nativeOrder()))) {
throw new IllegalArgumentException(
"Model ByteBuffer should be either a MappedByteBuffer of the model file, or a"
+ " direct ByteBuffer using ByteOrder.nativeOrder() which contains bytes of"
+ " model content.");
}
this.modelByteBuffer = buffer;
long errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
long modelHandle = createModelWithBuffer(modelByteBuffer, errorHandle);
init(errorHandle, modelHandle, options);
}
private void init(long errorHandle, long modelHandle, Interpreter.Options options) {
if (options == null) {
options = new Interpreter.Options();
}
this.errorHandle = errorHandle;
this.modelHandle = modelHandle;
this.interpreterHandle = createInterpreter(modelHandle, errorHandle, options.numThreads);
if (options.allowCancellation != null && options.allowCancellation) {
this.cancellationFlagHandle = createCancellationFlag(interpreterHandle);
}
this.inputTensors = new Tensor[getInputCount(interpreterHandle)];
this.outputTensors = new Tensor[getOutputCount(interpreterHandle)];
if (options.allowFp16PrecisionForFp32 != null) {
allowFp16PrecisionForFp32(
interpreterHandle, options.allowFp16PrecisionForFp32.booleanValue());
}
if (options.allowBufferHandleOutput != null) {
allowBufferHandleOutput(
interpreterHandle, options.allowBufferHandleOutput.booleanValue());
}
applyDelegates(options);
// Simply use "-1" to represent the default mode.
int applyXNNPACKMode = -1;
if (options.useXNNPACK != null) {
applyXNNPACKMode = options.useXNNPACK.booleanValue() ? 1 : 0;
}
// TODO(b/171856982): uncomment the following when applying XNNPACK delegate by default is
// enabled for C++ TfLite library on Android platform.
if (applyXNNPACKMode == 1 /*|| applyXNNPACKMode == -1*/) {
useXNNPACK(interpreterHandle, errorHandle, applyXNNPACKMode, options.numThreads);
}
allocateTensors(interpreterHandle, errorHandle);
this.isMemoryAllocated = true;
}
@Override
public void close() {
// Close the tensors first as they may reference the native interpreter.
for (int i = 0; i < inputTensors.length; ++i) {
if (inputTensors[i] != null) {
inputTensors[i].close();
inputTensors[i] = null;
}
}
for (int i = 0; i < outputTensors.length; ++i) {
if (outputTensors[i] != null) {
outputTensors[i].close();
outputTensors[i] = null;
}
}
delete(errorHandle, modelHandle, interpreterHandle);
deleteCancellationFlag(cancellationFlagHandle);
errorHandle = 0;
modelHandle = 0;
interpreterHandle = 0;
cancellationFlagHandle = 0;
modelByteBuffer = null;
inputsIndexes = null;
outputsIndexes = null;
isMemoryAllocated = false;
delegates.clear();
for (AutoCloseable ownedDelegate : ownedDelegates) {
try {
ownedDelegate.close();
} catch (Exception e) {
System.err.println("Failed to close flex delegate: " + e);
}
}
ownedDelegates.clear();
}
public void runSignature(
Map<String, Object> inputs, Map<String, Object> outputs, String methodName) {
if (inputs == null || inputs.isEmpty()) {
throw new IllegalArgumentException("Input error: Inputs should not be null or empty.");
}
if (outputs == null) {
throw new IllegalArgumentException("Input error: Outputs should not be null.");
}
initTensorIndexesMaps();
// Map inputs/output to input indexes.
Map<Integer, Object> inputsWithInputIndex = new TreeMap<>();
Map<Integer, Object> outputsWithOutputIndex = new TreeMap<>();
for (Map.Entry<String, Object> input : inputs.entrySet()) {
int tensorIndex =
getInputTensorIndexFromSignature(interpreterHandle, input.getKey(), methodName);
inputsWithInputIndex.put(tensorToInputsIndexes.get(tensorIndex), input.getValue());
}
for (Map.Entry<String, Object> output : outputs.entrySet()) {
int tensorIndex =
getOutputTensorIndexFromSignature(
interpreterHandle, output.getKey(), methodName);
outputsWithOutputIndex.put(tensorToOutputsIndexes.get(tensorIndex), output.getValue());
}
Object[] inputsList = new Object[inputs.size()];
int index = 0;
for (Map.Entry<Integer, Object> input : inputsWithInputIndex.entrySet()) {
inputsList[index++] = input.getValue();
}
run(inputsList, outputsWithOutputIndex);
}
void run(Object[] inputs, Map<Integer, Object> outputs) {
inferenceDurationNanoseconds = -1;
if (inputs == null || inputs.length == 0) {
throw new IllegalArgumentException("Input error: Inputs should not be null or empty.");
}
if (outputs == null) {
throw new IllegalArgumentException("Input error: Outputs should not be null.");
}
// TODO(b/80431971): Remove implicit resize after deprecating multi-dimensional array
// inputs.
// Rather than forcing an immediate resize + allocation if an input's shape differs, we
// first
// flush all resizes, avoiding redundant allocations.
for (int i = 0; i < inputs.length; ++i) {
Tensor tensor = getInputTensor(i);
int[] newShape = tensor.getInputShapeIfDifferent(inputs[i]);
if (newShape != null) {
resizeInput(i, newShape);
}
}
boolean needsAllocation = !isMemoryAllocated;
if (needsAllocation) {
allocateTensors(interpreterHandle, errorHandle);
isMemoryAllocated = true;
}
for (int i = 0; i < inputs.length; ++i) {
getInputTensor(i).setTo(inputs[i]);
}
long inferenceStartNanos = System.nanoTime();
run(interpreterHandle, errorHandle);
long inferenceDurationNanoseconds = System.nanoTime() - inferenceStartNanos;
// Allocation can trigger dynamic resizing of output tensors, so refresh all output shapes.
if (needsAllocation) {
for (int i = 0; i < outputTensors.length; ++i) {
if (outputTensors[i] != null) {
outputTensors[i].refreshShape();
}
}
}
for (Map.Entry<Integer, Object> output : outputs.entrySet()) {
// Null output placeholders are allowed and ignored.
if (output.getValue() != null) {
getOutputTensor(output.getKey()).copyTo(output.getValue());
}
}
// Only set if the entire operation succeeds.
this.inferenceDurationNanoseconds = inferenceDurationNanoseconds;
}
private static native void run(long interpreterHandle, long errorHandle);
void resizeInput(int idx, int[] dims) {
resizeInput(idx, dims, false);
}
void resizeInput(int idx, int[] dims, boolean strict) {
if (resizeInput(interpreterHandle, errorHandle, idx, dims, strict)) {
// Tensor allocation is deferred until either an explicit `allocateTensors()` call or
// `invoke()` avoiding redundant allocations if multiple tensors are simultaneosly
// resized.
isMemoryAllocated = false;
if (inputTensors[idx] != null) {
inputTensors[idx].refreshShape();
}
}
}
private static native boolean resizeInput(
long interpreterHandle, long errorHandle, int inputIdx, int[] dims, boolean strict);
void allocateTensors() {
if (isMemoryAllocated) {
return;
}
isMemoryAllocated = true;
allocateTensors(interpreterHandle, errorHandle);
for (int i = 0; i < outputTensors.length; ++i) {
if (outputTensors[i] != null) {
outputTensors[i].refreshShape();
}
}
}
private static native long allocateTensors(long interpreterHandle, long errorHandle);
void resetVariableTensors() {
resetVariableTensors(interpreterHandle, errorHandle);
}
int getInputIndex(String name) {
if (inputsIndexes == null) {
String[] names = getInputNames(interpreterHandle);
inputsIndexes = new HashMap<>();
if (names != null) {
for (int i = 0; i < names.length; ++i) {
inputsIndexes.put(names[i], i);
}
}
}
if (inputsIndexes.containsKey(name)) {
return inputsIndexes.get(name);
} else {
throw new IllegalArgumentException(
String.format(
"Input error: '%s' is not a valid name for any input. Names of inputs"
+ " and their indexes are %s",
name, inputsIndexes));
}
}
private void initTensorIndexesMaps() {
if (tensorToInputsIndexes != null) {
return;
}
tensorToInputsIndexes = new HashMap<>();
tensorToOutputsIndexes = new HashMap<>();
int inputCount = getInputTensorCount();
for (int i = 0; i < inputCount; ++i) {
int tensorIndex = getInputTensorIndex(interpreterHandle, i);
tensorToInputsIndexes.put(tensorIndex, i);
}
int outputCount = getOutputTensorCount();
for (int i = 0; i < outputCount; ++i) {
int tensorIndex = getOutputTensorIndex(interpreterHandle, i);
tensorToOutputsIndexes.put(tensorIndex, i);
}
}
int getOutputIndex(String name) {
if (outputsIndexes == null) {
String[] names = getOutputNames(interpreterHandle);
outputsIndexes = new HashMap<>();
if (names != null) {
for (int i = 0; i < names.length; ++i) {
outputsIndexes.put(names[i], i);
}
}
}
if (outputsIndexes.containsKey(name)) {
return outputsIndexes.get(name);
} else {
throw new IllegalArgumentException(
String.format(
"Input error: '%s' is not a valid name for any output. Names of outputs"
+ " and their indexes are %s",
name, outputsIndexes));
}
}
Long getLastNativeInferenceDurationNanoseconds() {
return (inferenceDurationNanoseconds < 0) ? null : inferenceDurationNanoseconds;
}
int getInputTensorCount() {
return inputTensors.length;
}
Tensor getInputTensor(int index) {
if (index < 0 || index >= inputTensors.length) {
throw new IllegalArgumentException("Invalid input Tensor index: " + index);
}
Tensor inputTensor = inputTensors[index];
if (inputTensor == null) {
inputTensor =
inputTensors[index] =
Tensor.fromIndex(
interpreterHandle,
getInputTensorIndex(interpreterHandle, index));
}
return inputTensor;
}
Tensor getInputTensor(String inputName, String methodName) {
if (inputName == null) {
throw new IllegalArgumentException("Invalid input tensor name provided (null)");
}
initTensorIndexesMaps();
int tensorIndex =
getInputTensorIndexFromSignature(interpreterHandle, inputName, methodName);
if (!tensorToInputsIndexes.containsKey(tensorIndex)) {
throw new IllegalArgumentException(
String.format(
"Invalid input tensor name (%s) for signature (%s).",
inputName, methodName));
}
return getInputTensor(tensorToInputsIndexes.get(tensorIndex));
}
public String[] getSignatureDefNames() {
return getSignatureDefNames(interpreterHandle);
}
private static native String[] getSignatureDefNames(long interpreterHandle);
String[] getSignatureInputs(String methodName) {
return getSignatureInputs(interpreterHandle, methodName);
}
private static native String[] getSignatureInputs(long interpreterHandle, String methodName);
String[] getSignatureOutputs(String methodName) {
return getSignatureOutputs(interpreterHandle, methodName);
}
private static native String[] getSignatureOutputs(long interpreterHandle, String methodName);
int getOutputTensorCount() {
return outputTensors.length;
}
Tensor getOutputTensor(int index) {
if (index < 0 || index >= outputTensors.length) {
throw new IllegalArgumentException("Invalid output Tensor index: " + index);
}
Tensor outputTensor = outputTensors[index];
if (outputTensor == null) {
outputTensor =
outputTensors[index] =
Tensor.fromIndex(
interpreterHandle,
getOutputTensorIndex(interpreterHandle, index));
}
return outputTensor;
}
Tensor getOutputTensor(String outputName, String methodName) {
if (outputName == null) {
throw new IllegalArgumentException("Invalid output tensor name provided (null)");
}
initTensorIndexesMaps();
int tensorIndex =
getOutputTensorIndexFromSignature(interpreterHandle, outputName, methodName);
if (!tensorToOutputsIndexes.containsKey(tensorIndex)) {
throw new IllegalArgumentException(
String.format(
"Invalid output tensor name (%s) for signature (%s).",
outputName, methodName));
}
return getOutputTensor(tensorToOutputsIndexes.get(tensorIndex));
}
int getExecutionPlanLength() {
return getExecutionPlanLength(interpreterHandle);
}
void setCancelled(boolean value) {
if (cancellationFlagHandle == 0) {
throw new IllegalStateException(
"Cannot cancel the inference. Have you called"
+ " Interpreter.Options.setCancellable?");
}
setCancelled(interpreterHandle, cancellationFlagHandle, value);
}
private static native void setCancelled(
long interpreterHandle, long cancellationFlagHandle, boolean value);
private void applyDelegates(Interpreter.Options options) {
// First apply the flex delegate if necessary. This ensures the graph is fully resolved
// before
// applying other delegates.
boolean originalGraphHasUnresolvedFlexOp = hasUnresolvedFlexOp(interpreterHandle);
if (originalGraphHasUnresolvedFlexOp) {
Delegate optionalFlexDelegate = maybeCreateFlexDelegate(options.delegates);
if (optionalFlexDelegate != null) {
ownedDelegates.add((AutoCloseable) optionalFlexDelegate);
applyDelegate(
interpreterHandle, errorHandle, optionalFlexDelegate.getNativeHandle());
}
}
// Now apply the user-supplied delegates.
try {
for (Delegate delegate : options.delegates) {
applyDelegate(interpreterHandle, errorHandle, delegate.getNativeHandle());
delegates.add(delegate);
}
if (options.useNNAPI != null && options.useNNAPI.booleanValue()) {
NnApiDelegate optionalNnApiDelegate = new NnApiDelegate();
ownedDelegates.add(optionalNnApiDelegate);
applyDelegate(
interpreterHandle, errorHandle, optionalNnApiDelegate.getNativeHandle());
}
} catch (IllegalArgumentException e) {
// Suppress exceptions where a delegate fails to apply after the flex delegate is
// successfuly
// applied. This can be a common occurrence, as the flex delegate makes the graph
// dynamic,
// which is typically unsupported by most delegates (e.g., NNAPI, GPU delegates). We
// should
// still log an error to indicate that the delegate application was a no-op.
// TODO(b/142678372): Fix the flex delegate to not unconditionally mark graphs as
// dynamic.
boolean shouldSuppressException =
originalGraphHasUnresolvedFlexOp && !hasUnresolvedFlexOp(interpreterHandle);
if (!shouldSuppressException) {
throw e;
}
System.err.println("Ignoring failed delegate application: " + e);
}
}
private static Delegate maybeCreateFlexDelegate(List<Delegate> delegates) {
try {
Class<?> clazz = Class.forName("org.tensorflow.lite.flex.FlexDelegate");
// No need to create the Flex delegate if one has already been provided.
for (Delegate delegate : delegates) {
if (clazz.isInstance(delegate)) {
return null;
}
}
return (Delegate) clazz.getConstructor().newInstance();
} catch (Exception e) {
// The error will propagate when tensors are allocated.
return null;
}
}
private static native int getOutputDataType(long interpreterHandle, int outputIdx);
private static final int ERROR_BUFFER_SIZE = 512;
private long errorHandle;
private long interpreterHandle;
private long modelHandle;
private long cancellationFlagHandle = 0;
private long inferenceDurationNanoseconds = -1;
private ByteBuffer modelByteBuffer;
// Lazily constructed maps of input and output names to input and output Tensor indexes.
private Map<String, Integer> inputsIndexes;
private Map<String, Integer> outputsIndexes;
// Lazily constructed maps of tensor index to index in input and output indexes.
private Map<Integer, Integer> tensorToInputsIndexes;
private Map<Integer, Integer> tensorToOutputsIndexes;
// Lazily constructed and populated arrays of input and output Tensor wrappers.
private Tensor[] inputTensors;
private Tensor[] outputTensors;
private boolean isMemoryAllocated = false;
// As the Java Delegate owns the native delegate instance, we keep a strong ref to any injected
// delegates for safety.
private final List<Delegate> delegates = new ArrayList<>();
// List of owned delegates that must be closed when the interpreter is closed.
private final List<AutoCloseable> ownedDelegates = new ArrayList<>();
private static native boolean hasUnresolvedFlexOp(long interpreterHandle);
private static native int getInputTensorIndex(long interpreterHandle, int inputIdx);
private static native int getInputTensorIndexFromSignature(
long interpreterHandle, String signatureInputName, String methodName);
private static native int getOutputTensorIndexFromSignature(
long interpreterHandle, String signatureInputName, String methodName);
private static native int getOutputTensorIndex(long interpreterHandle, int outputIdx);
private static native int getInputCount(long interpreterHandle);
private static native int getOutputCount(long interpreterHandle);
private static native int getExecutionPlanLength(long interpreterHandle);
private static native String[] getInputNames(long interpreterHandle);
private static native String[] getOutputNames(long interpreterHandle);
private static native void allowFp16PrecisionForFp32(long interpreterHandle, boolean allow);
private static native void allowBufferHandleOutput(long interpreterHandle, boolean allow);
private static native void useXNNPACK(
long interpreterHandle, long errorHandle, int state, int numThreads);
private static native long createErrorReporter(int size);
private static native long createModel(String modelPathOrBuffer, long errorHandle);
private static native long createModelWithBuffer(ByteBuffer modelBuffer, long errorHandle);
private static native long createInterpreter(
long modelHandle, long errorHandle, int numThreads);
private static native void applyDelegate(
long interpreterHandle, long errorHandle, long delegateHandle);
private static native void resetVariableTensors(long interpreterHandle, long errorHandle);
private static native long createCancellationFlag(long interpreterHandle);
private static native long deleteCancellationFlag(long cancellationFlagHandle);
private static native void delete(long errorHandle, long modelHandle, long interpreterHandle);
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/Tensor.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
import java.lang.reflect.Array;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.LongBuffer;
import java.nio.ShortBuffer;
import java.util.Arrays;
// TODO(b/153882978): Add scalar getters similar to TF's Java API.
@SuppressWarnings("MissingJavadocMethod")
public final class Tensor {
static Tensor fromIndex(long nativeInterpreterHandle, int tensorIndex) {
return new Tensor(create(nativeInterpreterHandle, tensorIndex));
}
public static class QuantizationParams {
private final float scale;
private final int zeroPoint;
public QuantizationParams(final float scale, final int zeroPoint) {
this.scale = scale;
this.zeroPoint = zeroPoint;
}
public float getScale() {
return scale;
}
public int getZeroPoint() {
return zeroPoint;
}
}
public void close() {
delete(nativeHandle);
nativeHandle = 0;
}
public DataType dataType() {
return dtype;
}
public int numDimensions() {
return shapeCopy.length;
}
public int numBytes() {
return numBytes(nativeHandle);
}
public int numElements() {
return computeNumElements(shapeCopy);
}
public int[] shape() {
return shapeCopy;
}
public int[] shapeSignature() {
return shapeSignatureCopy;
}
public int index() {
return index(nativeHandle);
}
public String name() {
return name(nativeHandle);
}
public QuantizationParams quantizationParams() {
return quantizationParamsCopy;
}
public ByteBuffer asReadOnlyBuffer() {
// Note that the ByteBuffer order is not preserved when duplicated or marked read only, so
// we have to repeat the call.
return buffer().asReadOnlyBuffer().order(ByteOrder.nativeOrder());
}
void setTo(Object src) {
if (src == null) {
if (hasDelegateBufferHandle(nativeHandle)) {
return;
}
throw new IllegalArgumentException(
"Null inputs are allowed only if the Tensor is bound to a buffer handle.");
}
throwIfTypeIsIncompatible(src);
throwIfSrcShapeIsIncompatible(src);
if (isBuffer(src)) {
setTo((Buffer) src);
} else if (dtype == DataType.STRING && shapeCopy.length == 0) {
// Update scalar string input with 1-d byte array.
writeScalar(nativeHandle, src);
} else if (src.getClass().isArray()) {
writeMultiDimensionalArray(nativeHandle, src);
} else {
writeScalar(nativeHandle, src);
}
}
private void setTo(Buffer src) {
// Note that we attempt to use a direct memcpy optimization for direct, native-ordered
// buffers.
// There are no base Buffer#order() or Buffer#put() methods, so again we have to ugly cast.
if (src instanceof ByteBuffer) {
ByteBuffer srcBuffer = (ByteBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().put(srcBuffer);
}
} else if (src instanceof LongBuffer) {
LongBuffer srcBuffer = (LongBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().asLongBuffer().put(srcBuffer);
}
} else if (src instanceof FloatBuffer) {
FloatBuffer srcBuffer = (FloatBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().asFloatBuffer().put(srcBuffer);
}
} else if (src instanceof IntBuffer) {
IntBuffer srcBuffer = (IntBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().asIntBuffer().put(srcBuffer);
}
} else if (src instanceof ShortBuffer) {
ShortBuffer srcBuffer = (ShortBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().asShortBuffer().put(srcBuffer);
}
} else {
throw new IllegalArgumentException("Unexpected input buffer type: " + src);
}
}
Object copyTo(Object dst) {
if (dst == null) {
if (hasDelegateBufferHandle(nativeHandle)) {
return dst;
}
throw new IllegalArgumentException(
"Null outputs are allowed only if the Tensor is bound to a buffer handle.");
}
throwIfTypeIsIncompatible(dst);
throwIfDstShapeIsIncompatible(dst);
if (isBuffer(dst)) {
copyTo((Buffer) dst);
} else {
readMultiDimensionalArray(nativeHandle, dst);
}
return dst;
}
private void copyTo(Buffer dst) {
// There is no base Buffer#put() method, so we have to ugly cast.
if (dst instanceof ByteBuffer) {
((ByteBuffer) dst).put(buffer());
} else if (dst instanceof FloatBuffer) {
((FloatBuffer) dst).put(buffer().asFloatBuffer());
} else if (dst instanceof LongBuffer) {
((LongBuffer) dst).put(buffer().asLongBuffer());
} else if (dst instanceof IntBuffer) {
((IntBuffer) dst).put(buffer().asIntBuffer());
} else if (dst instanceof ShortBuffer) {
((ShortBuffer) dst).put(buffer().asShortBuffer());
} else {
throw new IllegalArgumentException("Unexpected output buffer type: " + dst);
}
}
// TODO(b/80431971): Remove this method after deprecating multi-dimensional array inputs.
int[] getInputShapeIfDifferent(Object input) {
if (input == null) {
return null;
}
// Implicit resizes based on ByteBuffer capacity isn't supported, so short-circuit that
// path.
// The Buffer's size will be validated against this Tensor's size in {@link #setTo(Object)}.
if (isBuffer(input)) {
return null;
}
throwIfTypeIsIncompatible(input);
int[] inputShape = computeShapeOf(input);
if (Arrays.equals(shapeCopy, inputShape)) {
return null;
}
return inputShape;
}
void refreshShape() {
this.shapeCopy = shape(nativeHandle);
}
DataType dataTypeOf(Object o) {
if (o != null) {
Class<?> c = o.getClass();
// For arrays, the data elements must be a *primitive* type, e.g., an
// array of floats is fine, but not an array of Floats.
if (c.isArray()) {
while (c.isArray()) {
c = c.getComponentType();
}
if (float.class.equals(c)) {
return DataType.FLOAT32;
} else if (int.class.equals(c)) {
return DataType.INT32;
} else if (short.class.equals(c)) {
return DataType.INT16;
} else if (byte.class.equals(c)) {
// Byte array can be used for storing string tensors, especially for
// ParseExample op.
if (dtype == DataType.STRING) {
return DataType.STRING;
}
return DataType.UINT8;
} else if (long.class.equals(c)) {
return DataType.INT64;
} else if (boolean.class.equals(c)) {
return DataType.BOOL;
} else if (String.class.equals(c)) {
return DataType.STRING;
}
} else {
// For scalars, the type will be boxed.
if (Float.class.equals(c) || o instanceof FloatBuffer) {
return DataType.FLOAT32;
} else if (Integer.class.equals(c) || o instanceof IntBuffer) {
return DataType.INT32;
} else if (Short.class.equals(c) || o instanceof ShortBuffer) {
return DataType.INT16;
} else if (Byte.class.equals(c)) {
// Note that we don't check for ByteBuffer here; ByteBuffer payloads
// are allowed to map to any type, and should be handled earlier
// in the input/output processing pipeline.
return DataType.UINT8;
} else if (Long.class.equals(c) || o instanceof LongBuffer) {
return DataType.INT64;
} else if (Boolean.class.equals(c)) {
return DataType.BOOL;
} else if (String.class.equals(c)) {
return DataType.STRING;
}
}
} else {
throw new NullPointerException();
}
throw new IllegalArgumentException(
"DataType error: cannot resolve DataType of " + o.getClass().getName());
}
int[] computeShapeOf(Object o) {
int size = computeNumDimensions(o);
if (dtype == DataType.STRING) {
Class<?> c = o.getClass();
if (c.isArray()) {
while (c.isArray()) {
c = c.getComponentType();
}
// If the given string data is stored in byte streams, the last array dimension
// should be
// treated as a value.
if (byte.class.equals(c)) {
--size;
}
}
}
int[] dimensions = new int[size];
fillShape(o, 0, dimensions);
return dimensions;
}
static int computeNumElements(int[] shape) {
int n = 1;
for (int i = 0; i < shape.length; ++i) {
n *= shape[i];
}
return n;
}
static int computeNumDimensions(Object o) {
if (o == null || !o.getClass().isArray()) {
return 0;
}
if (Array.getLength(o) == 0) {
throw new IllegalArgumentException("Array lengths cannot be 0.");
}
return 1 + computeNumDimensions(Array.get(o, 0));
}
static void fillShape(Object o, int dim, int[] shape) {
if (shape == null || dim == shape.length) {
return;
}
final int len = Array.getLength(o);
if (shape[dim] == 0) {
shape[dim] = len;
} else if (shape[dim] != len) {
throw new IllegalArgumentException(
String.format(
"Mismatched lengths (%d and %d) in dimension %d",
shape[dim], len, dim));
}
for (int i = 0; i < len; ++i) {
fillShape(Array.get(o, i), dim + 1, shape);
}
}
private void throwIfTypeIsIncompatible(Object o) {
// ByteBuffer payloads can map to any type, so exempt it from the check.
if (isByteBuffer(o)) {
return;
}
DataType oType = dataTypeOf(o);
if (oType != dtype) {
// INT8 and UINT8 have the same string name, "byte"
if (oType.toStringName().equals(dtype.toStringName())) {
return;
}
throw new IllegalArgumentException(
String.format(
"Cannot convert between a TensorFlowLite tensor with type %s and a Java"
+ " object of type %s (which is compatible with the TensorFlowLite"
+ " type %s).",
dtype, o.getClass().getName(), oType));
}
}
private void throwIfSrcShapeIsIncompatible(Object src) {
if (isBuffer(src)) {
Buffer srcBuffer = (Buffer) src;
int bytes = numBytes();
// Note that we allow the client to provide a ByteBuffer even for non-byte Tensors.
// In such cases, we only care that the raw byte capacity matches the tensor byte
// capacity.
int srcBytes =
isByteBuffer(src)
? srcBuffer.capacity()
: srcBuffer.capacity() * dtype.byteSize();
if (bytes != srcBytes) {
throw new IllegalArgumentException(
String.format(
"Cannot copy to a TensorFlowLite tensor (%s) with %d bytes from a "
+ "Java Buffer with %d bytes.",
name(), bytes, srcBytes));
}
return;
}
int[] srcShape = computeShapeOf(src);
if (!Arrays.equals(srcShape, shapeCopy)) {
throw new IllegalArgumentException(
String.format(
"Cannot copy to a TensorFlowLite tensor (%s) with shape %s from a Java"
+ " object with shape %s.",
name(), Arrays.toString(shapeCopy), Arrays.toString(srcShape)));
}
}
private void throwIfDstShapeIsIncompatible(Object dst) {
if (isBuffer(dst)) {
Buffer dstBuffer = (Buffer) dst;
int bytes = numBytes();
// Note that we allow the client to provide a ByteBuffer even for non-byte Tensors.
// In such cases, we only care that the raw byte capacity fits the tensor byte capacity.
// This is subtly different than Buffer *inputs*, where the size should be exact.
int dstBytes =
isByteBuffer(dst)
? dstBuffer.capacity()
: dstBuffer.capacity() * dtype.byteSize();
if (bytes > dstBytes) {
throw new IllegalArgumentException(
String.format(
"Cannot copy from a TensorFlowLite tensor (%s) with %d bytes to a "
+ "Java Buffer with %d bytes.",
name(), bytes, dstBytes));
}
return;
}
int[] dstShape = computeShapeOf(dst);
if (!Arrays.equals(dstShape, shapeCopy)) {
throw new IllegalArgumentException(
String.format(
"Cannot copy from a TensorFlowLite tensor (%s) with shape %s to a Java"
+ " object with shape %s.",
name(), Arrays.toString(shapeCopy), Arrays.toString(dstShape)));
}
}
private static boolean isBuffer(Object o) {
return o instanceof Buffer;
}
private static boolean isByteBuffer(Object o) {
return o instanceof ByteBuffer;
}
private long nativeHandle;
private final DataType dtype;
private int[] shapeCopy;
private final int[] shapeSignatureCopy;
private final QuantizationParams quantizationParamsCopy;
private Tensor(long nativeHandle) {
this.nativeHandle = nativeHandle;
this.dtype = DataType.fromC(dtype(nativeHandle));
this.shapeCopy = shape(nativeHandle);
this.shapeSignatureCopy = shapeSignature(nativeHandle);
this.quantizationParamsCopy =
new QuantizationParams(
quantizationScale(nativeHandle), quantizationZeroPoint(nativeHandle));
}
public ByteBuffer buffer() {
return buffer(nativeHandle).order(ByteOrder.nativeOrder());
}
private static native long create(long interpreterHandle, int tensorIndex);
private static native void delete(long handle);
private static native ByteBuffer buffer(long handle);
private static native void writeDirectBuffer(long handle, Buffer src);
private static native int dtype(long handle);
private static native int[] shape(long handle);
private static native int[] shapeSignature(long handle);
private static native int numBytes(long handle);
private static native boolean hasDelegateBufferHandle(long handle);
private static native void readMultiDimensionalArray(long handle, Object dst);
private static native void writeMultiDimensionalArray(long handle, Object src);
private static native void writeScalar(long handle, Object src);
private static native int index(long handle);
private static native String name(long handle);
private static native float quantizationScale(long handle);
private static native int quantizationZeroPoint(long handle);
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/TensorFlowLite.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
/** Static utility methods loading the TensorFlowLite runtime. */
public final class TensorFlowLite {
private static volatile boolean isInit = false;
private TensorFlowLite() {}
/**
* Returns the version of the underlying TensorFlowLite model schema.
*
* @return the version of the underlying TensorFlowLite model schema
* @deprecated Prefer using {@link #runtimeVersion() or #schemaVersion()}.
*/
@Deprecated
public static String version() {
return schemaVersion();
}
/**
* Returns the version of the underlying TensorFlowLite runtime.
*
* @return the version of the underlying TensorFlowLite runtime
*/
public static String runtimeVersion() {
init();
return nativeRuntimeVersion();
}
/**
* Returns the version of the underlying TensorFlowLite model schema.
*
* @return the version of the underlying TensorFlowLite model schema
*/
public static String schemaVersion() {
init();
return nativeSchemaVersion();
}
/**
* Ensure the TensorFlowLite native library has been loaded.
*
* <p>If unsuccessful, throws an UnsatisfiedLinkError with the appropriate error message.
*/
public static void init() {
if (isInit) {
return;
}
nativeRuntimeVersion();
isInit = true;
}
private static native String nativeRuntimeVersion();
private static native String nativeSchemaVersion();
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/package-info.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/** Defines classes to load and execute TensorFlowLite models. */
package org.tensorflow.lite;
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/annotations/UsedByReflection.java
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Target;
/**
* Annotation used for marking methods and fields that are called by reflection. Useful for keeping
* components that would otherwise be removed by Proguard. Use the value parameter to mention a file
* that calls this method.
*/
@Target({ElementType.METHOD, ElementType.FIELD, ElementType.TYPE, ElementType.CONSTRUCTOR})
public @interface UsedByReflection {
String value();
}
|
0
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite
|
java-sources/ai/djl/tflite/tflite-engine/0.27.0/org/tensorflow/lite/nnapi/NnApiDelegate.java
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite.nnapi;
import org.tensorflow.lite.Delegate;
import org.tensorflow.lite.TensorFlowLite;
@SuppressWarnings("MissingJavadocMethod")
public class NnApiDelegate implements Delegate, AutoCloseable {
private static final long INVALID_DELEGATE_HANDLE = 0;
private long delegateHandle;
public static final class Options {
public Options() {}
public static final int EXECUTION_PREFERENCE_UNDEFINED = -1;
public static final int EXECUTION_PREFERENCE_LOW_POWER = 0;
public static final int EXECUTION_PREFERENCE_FAST_SINGLE_ANSWER = 1;
public static final int EXECUTION_PREFERENCE_SUSTAINED_SPEED = 2;
public Options setExecutionPreference(int preference) {
this.executionPreference = preference;
return this;
}
public Options setAcceleratorName(String name) {
this.acceleratorName = name;
return this;
}
public Options setCacheDir(String cacheDir) {
this.cacheDir = cacheDir;
return this;
}
public Options setModelToken(String modelToken) {
this.modelToken = modelToken;
return this;
}
public Options setMaxNumberOfDelegatedPartitions(int limit) {
this.maxDelegatedPartitions = limit;
return this;
}
public Options setUseNnapiCpu(boolean enable) {
this.useNnapiCpu = enable;
return this;
}
public Options setAllowFp16(boolean enable) {
this.allowFp16 = enable;
return this;
}
private int executionPreference = EXECUTION_PREFERENCE_UNDEFINED;
private String acceleratorName = null;
private String cacheDir = null;
private String modelToken = null;
private Integer maxDelegatedPartitions = null;
private Boolean useNnapiCpu = null;
private Boolean allowFp16 = null;
}
public NnApiDelegate(Options options) {
// Ensure the native TensorFlow Lite libraries are available.
TensorFlowLite.init();
delegateHandle =
createDelegate(
options.executionPreference,
options.acceleratorName,
options.cacheDir,
options.modelToken,
options.maxDelegatedPartitions != null
? options.maxDelegatedPartitions
: -1,
/*overrideDisallowCpu=*/ options.useNnapiCpu != null,
/*disallowCpuValue=*/ options.useNnapiCpu != null
? !options.useNnapiCpu.booleanValue()
: true,
options.allowFp16 != null ? options.allowFp16 : false);
}
public NnApiDelegate() {
this(new Options());
}
@Override
public long getNativeHandle() {
return delegateHandle;
}
@Override
public void close() {
if (delegateHandle != INVALID_DELEGATE_HANDLE) {
deleteDelegate(delegateHandle);
delegateHandle = INVALID_DELEGATE_HANDLE;
}
}
public int getNnapiErrno() {
checkNotClosed();
return getNnapiErrno(delegateHandle);
}
public boolean hasErrors() {
return getNnapiErrno(delegateHandle) != 0 /*ANEURALNETWORKS_NO_ERROR*/;
}
private void checkNotClosed() {
if (delegateHandle == INVALID_DELEGATE_HANDLE) {
throw new IllegalStateException("Should not access delegate after it has been closed.");
}
}
//
private static native long createDelegate(
int preference,
String deviceName,
String cacheDir,
String modelToken,
int maxDelegatedPartitions,
boolean overrideDisallowCpu,
boolean disallowCpuValue,
boolean allowFp16);
private static native void deleteDelegate(long delegateHandle);
private static native int getNnapiErrno(long delegateHandle);
}
|
0
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow/lite/DataType.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
/** Represents the type of elements in a TensorFlow Lite {@link Tensor} as an enum. */
public enum DataType {
/** 32-bit single precision floating point. */
FLOAT32(1),
/** 32-bit signed integer. */
INT32(2),
/** 8-bit unsigned integer. */
UINT8(3),
/** 64-bit signed integer. */
INT64(4),
/** Strings. */
STRING(5),
/** Bool. */
BOOL(6),
/** 8-bit signed integer. */
INT8(9);
private final int value;
DataType(int value) {
this.value = value;
}
/** Returns the size of an element of this type, in bytes, or -1 if element size is variable. */
public int byteSize() {
switch (this) {
case FLOAT32:
case INT32:
return 4;
case INT8:
case UINT8:
return 1;
case INT64:
return 8;
case BOOL:
// Boolean size is JVM-dependent.
return -1;
case STRING:
return -1;
}
throw new IllegalArgumentException(
"DataType error: DataType " + this + " is not supported yet");
}
/** Corresponding value of the TfLiteType enum in the TensorFlow Lite C API. */
int c() {
return value;
}
/** Converts a C TfLiteType enum value to the corresponding type. */
static DataType fromC(int c) {
for (DataType t : values) {
if (t.value == c) {
return t;
}
}
throw new IllegalArgumentException(
"DataType error: DataType "
+ c
+ " is not recognized in Java (version "
+ TensorFlowLite.runtimeVersion()
+ ")");
}
/** Gets string names of the data type. */
String toStringName() {
switch (this) {
case FLOAT32:
return "float";
case INT32:
return "int";
case INT8:
case UINT8:
return "byte";
case INT64:
return "long";
case BOOL:
return "bool";
case STRING:
return "string";
}
throw new IllegalArgumentException(
"DataType error: DataType " + this + " is not supported yet");
}
// Cached to avoid copying it
private static final DataType[] values = values();
}
|
0
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow/lite/Delegate.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
/**
* Wrapper for a native TensorFlow Lite Delegate.
*
* <p>WARNING: This is an experimental interface that is subject to change.
*
* <p>If a delegate implementation holds additional resources or memory that should be explicitly
* freed, then best practice is to add a {@code close()} method to the implementation and have the
* client call that explicitly when the delegate instance is no longer in use. While this approach
* technically allows sharing of a single delegate instance across multiple interpreter instances,
* the delegate implementation must explicitly support this.
*/
public interface Delegate {
/**
* Returns a native handle to the TensorFlow Lite delegate implementation.
*
* <p>Note: The Java {@link Delegate} maintains ownership of the native delegate instance, and
* must ensure its existence for the duration of usage with any {@link Interpreter}.
*
* @return The native delegate handle.
*/
public long getNativeHandle();
}
|
0
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow/lite/Interpreter.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
import java.io.File;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.util.ArrayList;
import java.util.List;
/**
* Driver class to drive model inference with TensorFlow Lite.
*
* <p>A {@code Interpreter} encapsulates a pre-trained TensorFlow Lite model, in which operations
* are executed for model inference.
*
* <p>For example, if a model takes only one input and returns only one output:
*
* <pre>{@code
* try (Interpreter interpreter = new Interpreter(file_of_a_tensorflowlite_model)) {
* interpreter.run(input, output);
* }
* }</pre>
*
* <p>If a model takes multiple inputs or outputs:
*
* <pre>{@code
* Object[] inputs = {input0, input1, ...};
* Map<Integer, Object> map_of_indices_to_outputs = new HashMap<>();
* FloatBuffer ith_output = FloatBuffer.allocateDirect(3 * 2 * 4); // Float tensor, shape 3x2x4.
* ith_output.order(ByteOrder.nativeOrder());
* map_of_indices_to_outputs.put(i, ith_output);
* try (Interpreter interpreter = new Interpreter(file_of_a_tensorflowlite_model)) {
* interpreter.runForMultipleInputsOutputs(inputs, map_of_indices_to_outputs);
* }
* }</pre>
*
* <p>If a model takes or produces string tensors:
*
* <pre>{@code
* String[] input = {"foo", "bar"}; // Input tensor shape is [2].
* String[] output = new String[3][2]; // Output tensor shape is [3, 2].
* try (Interpreter interpreter = new Interpreter(file_of_a_tensorflowlite_model)) {
* interpreter.runForMultipleInputsOutputs(input, output);
* }
* }</pre>
*
* <p>Orders of inputs and outputs are determined when converting TensorFlow model to TensorFlowLite
* model with Toco, as are the default shapes of the inputs.
*
* <p>When inputs are provided as (multi-dimensional) arrays, the corresponding input tensor(s) will
* be implicitly resized according to that array's shape. When inputs are provided as {@link Buffer}
* types, no implicit resizing is done; the caller must ensure that the {@link Buffer} byte size
* either matches that of the corresponding tensor, or that they first resize the tensor via {@link
* Interpreter#resizeInput}. Tensor shape and type information can be obtained via the {@link
* Tensor} class, available via {@link #getInputTensor(int)} and {@link #getOutputTensor(int)}.
*
* <p><b>WARNING:</b>Instances of a {@code Interpreter} is <b>not</b> thread-safe. A {@code
* Interpreter} owns resources that <b>must</b> be explicitly freed by invoking {@link #close()}
*
* <p>The TFLite library is built against NDK API 19. It may work for Android API levels below 19,
* but is not guaranteed.
*/
public final class Interpreter implements AutoCloseable {
/** An options class for controlling runtime interpreter behavior. */
public static class Options {
public Options() {}
/**
* Sets the number of threads to be used for ops that support multi-threading. Defaults to a
* platform-dependent value.
*/
public Options setNumThreads(int numThreads) {
this.numThreads = numThreads;
return this;
}
/**
* Sets whether to use NN API (if available) for op execution. Defaults to false (disabled).
*/
public Options setUseNNAPI(boolean useNNAPI) {
this.useNNAPI = useNNAPI;
return this;
}
/**
* Adds a {@link Delegate} to be applied during interpreter creation.
*
* <p>WARNING: This is an experimental interface that is subject to change.
*/
public Options addDelegate(Delegate delegate) {
delegates.add(delegate);
return this;
}
/**
* Advanced: Set if buffer handle output is allowed.
*
* <p>When a {@link Delegate} supports hardware acceleration, the interpreter will make the
* data of output tensors available in the CPU-allocated tensor buffers by default. If the
* client can consume the buffer handle directly (e.g. reading output from OpenGL texture),
* it can set this flag to false, avoiding the copy of data to the CPU buffer. The delegate
* documentation should indicate whether this is supported and how it can be used.
*
* <p>WARNING: This is an experimental interface that is subject to change.
*/
public Options setAllowBufferHandleOutput(boolean allow) {
this.allowBufferHandleOutput = allow;
return this;
}
/** Advanced: Set if the interpreter is able to be cancelled. */
public Options setCancellable(boolean allow) {
this.allowCancellation = allow;
return this;
}
/**
* Experimental: Enable an optimized set of floating point CPU kernels (provided by
* XNNPACK).
*
* <p>Enabling this flag will enable use of a new, highly optimized set of CPU kernels
* provided via the XNNPACK delegate. Currently, this is restricted to a subset of floating
* point operations. Eventually, we plan to enable this by default, as it can provide
* significant peformance benefits for many classes of floating point models. See
* https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/delegates/xnnpack/README.md
* for more details.
*
* <p>Things to keep in mind when enabling this flag:
*
* <ul>
* <li>Startup time and resize time may increase.
* <li>Baseline memory consumption may increase.
* <li>May be ignored if another delegate (eg NNAPI) have been applied.
* <li>Quantized models will not see any benefit.
* </ul>
*
* <p>WARNING: This is an experimental interface that is subject to change.
*/
public Options setUseXNNPACK(boolean useXNNPACK) {
this.useXNNPACK = useXNNPACK;
return this;
}
int numThreads = -1;
Boolean useNNAPI;
Boolean allowFp16PrecisionForFp32;
Boolean allowBufferHandleOutput;
Boolean allowCancellation;
Boolean useXNNPACK;
final List<Delegate> delegates = new ArrayList<>();
}
/**
* Initializes a {@code Interpreter}
*
* @param modelFile a File of a pre-trained TF Lite model.
* @throws IllegalArgumentException if {@code modelFile} does not encode a valid TensorFlow Lite
* model.
*/
public Interpreter(File modelFile) {
this(modelFile, /*options = */ null);
}
/**
* Initializes a {@code Interpreter} and specifies the number of threads used for inference.
*
* @param modelFile a file of a pre-trained TF Lite model
* @param numThreads number of threads to use for inference
* @deprecated Prefer using the {@link #Interpreter(File,Options)} constructor. This method will
* be removed in a future release.
*/
@Deprecated
public Interpreter(File modelFile, int numThreads) {
this(modelFile, new Options().setNumThreads(numThreads));
}
/**
* Initializes a {@code Interpreter} and specifies the number of threads used for inference.
*
* @param modelFile a file of a pre-trained TF Lite model
* @param options a set of options for customizing interpreter behavior
* @throws IllegalArgumentException if {@code modelFile} does not encode a valid TensorFlow Lite
* model.
*/
public Interpreter(File modelFile, Options options) {
wrapper = new NativeInterpreterWrapper(modelFile.getAbsolutePath(), options);
}
/**
* Initializes a {@code Interpreter} with a {@code ByteBuffer} of a model file.
*
* <p>The ByteBuffer should not be modified after the construction of a {@code Interpreter}. The
* {@code ByteBuffer} can be either a {@code MappedByteBuffer} that memory-maps a model file, or
* a direct {@code ByteBuffer} of nativeOrder() that contains the bytes content of a model.
*
* @throws IllegalArgumentException if {@code byteBuffer} is not a {@link MappedByteBuffer} nor
* a direct {@link ByteBuffer} of nativeOrder.
*/
public Interpreter(ByteBuffer byteBuffer) {
this(byteBuffer, /* options= */ null);
}
/**
* Initializes a {@code Interpreter} with a {@code ByteBuffer} of a model file and specifies the
* number of threads used for inference.
*
* <p>The ByteBuffer should not be modified after the construction of a {@code Interpreter}. The
* {@code ByteBuffer} can be either a {@code MappedByteBuffer} that memory-maps a model file, or
* a direct {@code ByteBuffer} of nativeOrder() that contains the bytes content of a model.
*
* @deprecated Prefer using the {@link #Interpreter(ByteBuffer,Options)} constructor. This
* method will be removed in a future release.
*/
@Deprecated
public Interpreter(ByteBuffer byteBuffer, int numThreads) {
this(byteBuffer, new Options().setNumThreads(numThreads));
}
/**
* Initializes a {@code Interpreter} with a {@code MappedByteBuffer} to the model file.
*
* <p>The {@code MappedByteBuffer} should remain unchanged after the construction of a {@code
* Interpreter}.
*
* @deprecated Prefer using the {@link #Interpreter(ByteBuffer,Options)} constructor. This
* method will be removed in a future release.
*/
@Deprecated
public Interpreter(MappedByteBuffer mappedByteBuffer) {
this(mappedByteBuffer, /* options= */ null);
}
/**
* Initializes a {@code Interpreter} with a {@code ByteBuffer} of a model file and a set of
* custom {@link Options}.
*
* <p>The ByteBuffer should not be modified after the construction of a {@code Interpreter}. The
* {@code ByteBuffer} can be either a {@link MappedByteBuffer} that memory-maps a model file, or
* a direct {@link ByteBuffer} of nativeOrder() that contains the bytes content of a model.
*
* @throws IllegalArgumentException if {@code byteBuffer} is not a {@link MappedByteBuffer} nor
* a direct {@link ByteBuffer} of nativeOrder.
*/
public Interpreter(ByteBuffer byteBuffer, Options options) {
wrapper = new NativeInterpreterWrapper(byteBuffer, options);
}
/**
* Runs model inference if the model takes only one input, and provides only one output.
*
* <p>Warning: The API is more efficient if a {@link Buffer} (preferably direct, but not
* required) is used as the input/output data type. Please consider using {@link Buffer} to feed
* and fetch primitive data for better performance. The following concrete {@link Buffer} types
* are supported:
*
* <ul>
* <li>{@link ByteBuffer} - compatible with any underlying primitive Tensor type.
* <li>{@link java.nio.FloatBuffer} - compatible with float Tensors.
* <li>{@link java.nio.IntBuffer} - compatible with int32 Tensors.
* <li>{@link java.nio.LongBuffer} - compatible with int64 Tensors.
* </ul>
*
* Note that boolean types are only supported as arrays, not {@link Buffer}s, or as scalar
* inputs.
*
* @param input an array or multidimensional array, or a {@link Buffer} of primitive types
* including int, float, long, and byte. {@link Buffer} is the preferred way to pass large
* input data for primitive types, whereas string types require using the
* (multi-dimensional) array input path. When a {@link Buffer} is used, its content should
* remain unchanged until model inference is done, and the caller must ensure that the
* {@link Buffer} is at the appropriate read position. A {@code null} value is allowed only
* if the caller is using a {@link Delegate} that allows buffer handle interop, and such a
* buffer has been bound to the input {@link Tensor}.
* @throws IllegalArgumentException if {@code input} or {@code output} is null or empty, or if
* error occurs when running the inference.
* @throws IllegalArgumentException (EXPERIMENTAL, subject to change) if the inference is
* interrupted by {@code setCancelled(true)}.
*/
public void run(Object input) {
Object[] inputs = {input};
runForMultipleInputsOutputs(inputs);
}
/**
* Runs model inference if the model takes multiple inputs, or returns multiple outputs.
*
* <p>Warning: The API is more efficient if {@link Buffer}s (preferably direct, but not
* required) are used as the input/output data types. Please consider using {@link Buffer} to
* feed and fetch primitive data for better performance. The following concrete {@link Buffer}
* types are supported:
*
* <ul>
* <li>{@link ByteBuffer} - compatible with any underlying primitive Tensor type.
* <li>{@link java.nio.FloatBuffer} - compatible with float Tensors.
* <li>{@link java.nio.IntBuffer} - compatible with int32 Tensors.
* <li>{@link java.nio.LongBuffer} - compatible with int64 Tensors.
* </ul>
*
* Note that boolean types are only supported as arrays, not {@link Buffer}s, or as scalar
* inputs.
*
* <p>Note: {@code null} values for invididual elements of {@code inputs} and {@code outputs} is
* allowed only if the caller is using a {@link Delegate} that allows buffer handle interop, and
* such a buffer has been bound to the corresponding input or output {@link Tensor}(s).
*
* @param inputs an array of input data. The inputs should be in the same order as inputs of the
* model. Each input can be an array or multidimensional array, or a {@link Buffer} of
* primitive types including int, float, long, and byte. {@link Buffer} is the preferred way
* to pass large input data, whereas string types require using the (multi-dimensional)
* array input path. When {@link Buffer} is used, its content should remain unchanged until
* model inference is done, and the caller must ensure that the {@link Buffer} is at the
* appropriate read position.
* @throws IllegalArgumentException if {@code inputs} or {@code outputs} is null or empty, or if
* error occurs when running the inference.
*/
public void runForMultipleInputsOutputs(Object[] inputs) {
checkNotClosed();
wrapper.run(inputs);
}
/**
* Expicitly updates allocations for all tensors, if necessary.
*
* <p>This will propagate shapes and memory allocations for all dependent tensors using the
* input tensor shape(s) as given.
*
* <p>Note: This call is *purely optional*. Tensor allocation will occur automatically during
* execution if any input tensors have been resized. This call is most useful in determining the
* shapes for any output tensors before executing the graph, e.g.,
*
* <pre>{@code
* interpreter.resizeInput(0, new int[]{1, 4, 4, 3}));
* interpreter.allocateTensors();
* FloatBuffer input = FloatBuffer.allocate(interpreter.getInputTensor(0),numElements());
* // Populate inputs...
* FloatBuffer output = FloatBuffer.allocate(interpreter.getOutputTensor(0).numElements());
* interpreter.run(input, output)
* // Process outputs...
* }</pre>
*
* @throws IllegalStateException if the graph's tensors could not be successfully allocated.
*/
public void allocateTensors() {
checkNotClosed();
wrapper.allocateTensors();
}
/**
* Resizes idx-th input of the native model to the given dims.
*
* @throws IllegalArgumentException if {@code idx} is negtive or is not smaller than the number
* of model inputs; or if error occurs when resizing the idx-th input.
*/
public void resizeInput(int idx, int[] dims) {
checkNotClosed();
wrapper.resizeInput(idx, dims, false);
}
/**
* Resizes idx-th input of the native model to the given dims.
*
* <p>When `strict` is True, only unknown dimensions can be resized. Unknown dimensions are
* indicated as `-1` in the array returned by `Tensor.shapeSignature()`.
*
* @throws IllegalArgumentException if {@code idx} is negtive or is not smaller than the number
* of model inputs; or if error occurs when resizing the idx-th input. Additionally, the
* error occurs when attempting to resize a tensor with fixed dimensions when `struct` is
* True.
*/
public void resizeInput(int idx, int[] dims, boolean strict) {
checkNotClosed();
wrapper.resizeInput(idx, dims, strict);
}
/** Gets the number of input tensors. */
public int getInputTensorCount() {
checkNotClosed();
return wrapper.getInputTensorCount();
}
/**
* Gets index of an input given the op name of the input.
*
* @throws IllegalArgumentException if {@code opName} does not match any input in the model used
* to initialize the {@link Interpreter}.
*/
public int getInputIndex(String opName) {
checkNotClosed();
return wrapper.getInputIndex(opName);
}
/**
* Gets the Tensor associated with the provdied input index.
*
* @throws IllegalArgumentException if {@code inputIndex} is negtive or is not smaller than the
* number of model inputs.
*/
public Tensor getInputTensor(int inputIndex) {
checkNotClosed();
return wrapper.getInputTensor(inputIndex);
}
/** Gets the number of output Tensors. */
public int getOutputTensorCount() {
checkNotClosed();
return wrapper.getOutputTensorCount();
}
/**
* Gets index of an output given the op name of the output.
*
* @throws IllegalArgumentException if {@code opName} does not match any output in the model
* used to initialize the {@link Interpreter}.
*/
public int getOutputIndex(String opName) {
checkNotClosed();
return wrapper.getOutputIndex(opName);
}
/**
* Gets the Tensor associated with the provdied output index.
*
* <p>Note: Output tensor details (e.g., shape) may not be fully populated until after inference
* is executed. If you need updated details *before* running inference (e.g., after resizing an
* input tensor, which may invalidate output tensor shapes), use {@link #allocateTensors()} to
* explicitly trigger allocation and shape propagation. Note that, for graphs with output shapes
* that are dependent on input *values*, the output shape may not be fully determined until
* running inference.
*
* @throws IllegalArgumentException if {@code outputIndex} is negtive or is not smaller than the
* number of model outputs.
*/
public Tensor getOutputTensor(int outputIndex) {
checkNotClosed();
return wrapper.getOutputTensor(outputIndex);
}
/**
* Returns native inference timing.
*
* @throws IllegalArgumentException if the model is not initialized by the {@link Interpreter}.
*/
public Long getLastNativeInferenceDurationNanoseconds() {
checkNotClosed();
return wrapper.getLastNativeInferenceDurationNanoseconds();
}
/**
* Sets the number of threads to be used for ops that support multi-threading.
*
* @deprecated Prefer using {@link Options#setNumThreads(int)} directly for controlling thread
* multi-threading. This method will be removed in a future release.
*/
@Deprecated
public void setNumThreads(int numThreads) {
checkNotClosed();
wrapper.setNumThreads(numThreads);
}
/**
* Advanced: Modifies the graph with the provided {@link Delegate}.
*
* @throws IllegalArgumentException if error occurs when modifying graph with {@code delegate}.
* @deprecated Prefer using {@link Options#addDelegate} to provide delegates at creation time.
* This method will be removed in a future release.
*/
@Deprecated
public void modifyGraphWithDelegate(Delegate delegate) {
checkNotClosed();
wrapper.modifyGraphWithDelegate(delegate);
}
/**
* Advanced: Resets all variable tensors to the default value.
*
* <p>If a variable tensor doesn't have an associated buffer, it will be reset to zero.
*
* <p>WARNING: This is an experimental API and subject to change.
*/
public void resetVariableTensors() {
checkNotClosed();
wrapper.resetVariableTensors();
}
/**
* Advanced: Interrupts inference in the middle of a call to {@link Interpreter#run}.
*
* <p>A cancellation flag will be set to true when this function gets called. The interpreter
* will check the flag between Op invocations, and if it's {@code true}, the interpreter will
* stop execution. The interpreter will remain a cancelled state until explicitly "uncancelled"
* by {@code setCancelled(false)}.
*
* <p>WARNING: This is an experimental API and subject to change.
*
* @param cancelled {@code true} to cancel inference in a best-effort way; {@code false} to
* resume.
* @throws IllegalStateException if the interpreter is not initialized with the cancellable
* option, which is by default off.
*/
public void setCancelled(boolean cancelled) {
wrapper.setCancelled(cancelled);
}
int getExecutionPlanLength() {
checkNotClosed();
return wrapper.getExecutionPlanLength();
}
/** Release resources associated with the {@code Interpreter}. */
@Override
public void close() {
if (wrapper != null) {
wrapper.close();
wrapper = null;
}
}
// for Object.finalize, see https://bugs.openjdk.java.net/browse/JDK-8165641
@SuppressWarnings("deprecation")
@Override
protected void finalize() throws Throwable {
try {
close();
} finally {
super.finalize();
}
}
private void checkNotClosed() {
if (wrapper == null) {
throw new IllegalStateException(
"Internal error: The Interpreter has already been closed.");
}
}
NativeInterpreterWrapper wrapper;
}
|
0
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow/lite/NativeInterpreterWrapper.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.MappedByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* An internal wrapper that wraps native interpreter and controls model execution.
*
* <p><b>WARNING:</b> Resources consumed by the {@code NativeInterpreterWrapper} object must be
* explicitly freed by invoking the {@link #close()} method when the {@code
* NativeInterpreterWrapper} object is no longer needed.
*/
final class NativeInterpreterWrapper implements AutoCloseable {
NativeInterpreterWrapper(String modelPath) {
this(modelPath, /* options= */ null);
}
NativeInterpreterWrapper(ByteBuffer byteBuffer) {
this(byteBuffer, /* options= */ null);
}
NativeInterpreterWrapper(String modelPath, Interpreter.Options options) {
TensorFlowLite.init();
long errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
long modelHandle = createModel(modelPath, errorHandle);
init(errorHandle, modelHandle, options);
}
NativeInterpreterWrapper(ByteBuffer buffer, Interpreter.Options options) {
TensorFlowLite.init();
if (buffer == null
|| (!(buffer instanceof MappedByteBuffer)
&& (!buffer.isDirect() || buffer.order() != ByteOrder.nativeOrder()))) {
throw new IllegalArgumentException(
"Model ByteBuffer should be either a MappedByteBuffer of the model file, or a direct "
+ "ByteBuffer using ByteOrder.nativeOrder() which contains bytes of model content.");
}
this.modelByteBuffer = buffer;
long errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
long modelHandle = createModelWithBuffer(modelByteBuffer, errorHandle);
init(errorHandle, modelHandle, options);
}
private void init(long errorHandle, long modelHandle, Interpreter.Options options) {
if (options == null) {
options = new Interpreter.Options();
}
this.errorHandle = errorHandle;
this.modelHandle = modelHandle;
this.interpreterHandle = createInterpreter(modelHandle, errorHandle, options.numThreads);
if (options.allowCancellation != null && options.allowCancellation) {
this.cancellationFlagHandle = createCancellationFlag(interpreterHandle);
}
this.inputTensors = new Tensor[getInputCount(interpreterHandle)];
this.outputTensors = new Tensor[getOutputCount(interpreterHandle)];
if (options.allowFp16PrecisionForFp32 != null) {
allowFp16PrecisionForFp32(
interpreterHandle, options.allowFp16PrecisionForFp32.booleanValue());
}
if (options.allowBufferHandleOutput != null) {
allowBufferHandleOutput(
interpreterHandle, options.allowBufferHandleOutput.booleanValue());
}
applyDelegates(options);
if (options.useXNNPACK != null) {
useXNNPACK(
interpreterHandle,
errorHandle,
options.useXNNPACK.booleanValue(),
options.numThreads);
}
allocateTensors(interpreterHandle, errorHandle);
this.isMemoryAllocated = true;
}
/** Releases resources associated with this {@code NativeInterpreterWrapper}. */
@Override
public void close() {
// Close the tensors first as they may reference the native interpreter.
for (int i = 0; i < inputTensors.length; ++i) {
if (inputTensors[i] != null) {
inputTensors[i].close();
inputTensors[i] = null;
}
}
for (int i = 0; i < outputTensors.length; ++i) {
if (outputTensors[i] != null) {
outputTensors[i].close();
outputTensors[i] = null;
}
}
delete(errorHandle, modelHandle, interpreterHandle);
deleteCancellationFlag(cancellationFlagHandle);
errorHandle = 0;
modelHandle = 0;
interpreterHandle = 0;
cancellationFlagHandle = 0;
modelByteBuffer = null;
inputsIndexes = null;
outputsIndexes = null;
isMemoryAllocated = false;
delegates.clear();
for (AutoCloseable ownedDelegate : ownedDelegates) {
try {
ownedDelegate.close();
} catch (Exception e) {
System.err.println("Failed to close flex delegate: " + e);
}
}
ownedDelegates.clear();
}
/** Sets inputs, runs model inference and returns outputs. */
void run(Object[] inputs) {
inferenceDurationNanoseconds = -1;
if (inputs == null || inputs.length == 0) {
throw new IllegalArgumentException("Input error: Inputs should not be null or empty.");
}
// TODO(b/80431971): Remove implicit resize after deprecating multi-dimensional array
// inputs.
// Rather than forcing an immediate resize + allocation if an input's shape differs, we
// first
// flush all resizes, avoiding redundant allocations.
for (int i = 0; i < inputs.length; ++i) {
Tensor tensor = getInputTensor(i);
int[] newShape = tensor.getInputShapeIfDifferent(inputs[i]);
if (newShape != null) {
resizeInput(i, newShape);
}
}
boolean needsAllocation = !isMemoryAllocated;
if (needsAllocation) {
allocateTensors(interpreterHandle, errorHandle);
isMemoryAllocated = true;
}
for (int i = 0; i < inputs.length; ++i) {
getInputTensor(i).setTo(inputs[i]);
}
long inferenceStartNanos = System.nanoTime();
run(interpreterHandle, errorHandle);
long inferenceDurationNanoseconds = System.nanoTime() - inferenceStartNanos;
// Allocation can trigger dynamic resizing of output tensors, so refresh all output shapes.
if (needsAllocation) {
for (int i = 0; i < outputTensors.length; ++i) {
if (outputTensors[i] != null) {
outputTensors[i].refreshShape();
}
}
}
// Only set if the entire operation succeeds.
this.inferenceDurationNanoseconds = inferenceDurationNanoseconds;
}
private static native void run(long interpreterHandle, long errorHandle);
/** Resizes dimensions of a specific input. */
void resizeInput(int idx, int[] dims) {
resizeInput(idx, dims, false);
}
/** Resizes dimensions of a specific input. */
void resizeInput(int idx, int[] dims, boolean strict) {
if (resizeInput(interpreterHandle, errorHandle, idx, dims, strict)) {
// Tensor allocation is deferred until either an explicit `allocateTensors()` call or
// `invoke()` avoiding redundant allocations if multiple tensors are simultaneosly
// resized.
isMemoryAllocated = false;
if (inputTensors[idx] != null) {
inputTensors[idx].refreshShape();
}
}
}
private static native boolean resizeInput(
long interpreterHandle, long errorHandle, int inputIdx, int[] dims, boolean strict);
/** Triggers explicit allocation of tensors. */
void allocateTensors() {
if (isMemoryAllocated) {
return;
}
isMemoryAllocated = true;
allocateTensors(interpreterHandle, errorHandle);
for (int i = 0; i < outputTensors.length; ++i) {
if (outputTensors[i] != null) {
outputTensors[i].refreshShape();
}
}
}
private static native long allocateTensors(long interpreterHandle, long errorHandle);
void setNumThreads(int numThreads) {
numThreads(interpreterHandle, numThreads);
}
void modifyGraphWithDelegate(Delegate delegate) {
applyDelegate(interpreterHandle, errorHandle, delegate.getNativeHandle());
delegates.add(delegate);
}
void resetVariableTensors() {
resetVariableTensors(interpreterHandle, errorHandle);
}
/** Gets index of an input given its name. */
int getInputIndex(String name) {
if (inputsIndexes == null) {
String[] names = getInputNames(interpreterHandle);
inputsIndexes = new HashMap<>();
if (names != null) {
for (int i = 0; i < names.length; ++i) {
inputsIndexes.put(names[i], i);
}
}
}
if (inputsIndexes.containsKey(name)) {
return inputsIndexes.get(name);
} else {
throw new IllegalArgumentException(
String.format(
"Input error: '%s' is not a valid name for any input. Names of inputs and their "
+ "indexes are %s",
name, inputsIndexes.toString()));
}
}
/** Gets index of an output given its name. */
int getOutputIndex(String name) {
if (outputsIndexes == null) {
String[] names = getOutputNames(interpreterHandle);
outputsIndexes = new HashMap<>();
if (names != null) {
for (int i = 0; i < names.length; ++i) {
outputsIndexes.put(names[i], i);
}
}
}
if (outputsIndexes.containsKey(name)) {
return outputsIndexes.get(name);
} else {
throw new IllegalArgumentException(
String.format(
"Input error: '%s' is not a valid name for any output. Names of outputs and their "
+ "indexes are %s",
name, outputsIndexes.toString()));
}
}
/**
* Gets the last inference duration in nanoseconds. It returns null if there is no previous
* inference run or the last inference run failed.
*/
Long getLastNativeInferenceDurationNanoseconds() {
return (inferenceDurationNanoseconds < 0) ? null : inferenceDurationNanoseconds;
}
/** Gets the number of input tensors. */
int getInputTensorCount() {
return inputTensors.length;
}
/**
* Gets the input {@link Tensor} for the provided input index.
*
* @throws IllegalArgumentException if the input index is invalid.
*/
Tensor getInputTensor(int index) {
if (index < 0 || index >= inputTensors.length) {
throw new IllegalArgumentException("Invalid input Tensor index: " + index);
}
Tensor inputTensor = inputTensors[index];
if (inputTensor == null) {
inputTensor =
inputTensors[index] =
Tensor.fromIndex(
interpreterHandle,
getInputTensorIndex(interpreterHandle, index));
}
return inputTensor;
}
/** Gets the number of output tensors. */
int getOutputTensorCount() {
return outputTensors.length;
}
/**
* Gets the output {@link Tensor} for the provided output index.
*
* @throws IllegalArgumentException if the output index is invalid.
*/
Tensor getOutputTensor(int index) {
if (index < 0 || index >= outputTensors.length) {
throw new IllegalArgumentException("Invalid output Tensor index: " + index);
}
Tensor outputTensor = outputTensors[index];
if (outputTensor == null) {
outputTensor =
outputTensors[index] =
Tensor.fromIndex(
interpreterHandle,
getOutputTensorIndex(interpreterHandle, index));
}
return outputTensor;
}
/** Gets the number of ops in the execution plan. */
int getExecutionPlanLength() {
return getExecutionPlanLength(interpreterHandle);
}
/**
* Sets internal cancellation flag. If it's true, the interpreter will try to interrupt any
* invocation between ops.
*/
void setCancelled(boolean value) {
if (cancellationFlagHandle == 0) {
throw new IllegalStateException(
"Cannot cancel the inference. Have you called Interpreter.Options.setCancellable?");
}
setCancelled(interpreterHandle, cancellationFlagHandle, value);
}
private static native void setCancelled(
long interpreterHandle, long cancellationFlagHandle, boolean value);
private void applyDelegates(Interpreter.Options options) {
// First apply the flex delegate if necessary. This ensures the graph is fully resolved
// before
// applying other delegates.
boolean originalGraphHasUnresolvedFlexOp = hasUnresolvedFlexOp(interpreterHandle);
if (originalGraphHasUnresolvedFlexOp) {
Delegate optionalFlexDelegate = maybeCreateFlexDelegate(options.delegates);
if (optionalFlexDelegate != null) {
ownedDelegates.add((AutoCloseable) optionalFlexDelegate);
applyDelegate(
interpreterHandle, errorHandle, optionalFlexDelegate.getNativeHandle());
}
}
// Now apply the user-supplied delegates.
try {
for (Delegate delegate : options.delegates) {
applyDelegate(interpreterHandle, errorHandle, delegate.getNativeHandle());
delegates.add(delegate);
}
if (options.useNNAPI != null && options.useNNAPI.booleanValue()) {
// Could not determine what the NnApiDelegate are because no such class
// exists
// NnApiDelegate optionalNnApiDelegate = new NnApiDelegate();
// ownedDelegates.add(optionalNnApiDelegate);
// applyDelegate(interpreterHandle, errorHandle,
// optionalNnApiDelegate.getNativeHandle());
}
} catch (IllegalArgumentException e) {
// Suppress exceptions where a delegate fails to apply after the flex delegate is
// successfuly
// applied. This can be a common occurrence, as the flex delegate makes the graph
// dynamic,
// which is typically unsupported by most delegates (e.g., NNAPI, GPU delegates). We
// should
// still log an error to indicate that the delegate application was a no-op.
// TODO(b/142678372): Fix the flex delegate to not unconditionally mark graphs as
// dynamic.
boolean shouldSuppressException =
originalGraphHasUnresolvedFlexOp && !hasUnresolvedFlexOp(interpreterHandle);
if (!shouldSuppressException) {
throw e;
}
System.err.println("Ignoring failed delegate application: " + e);
}
}
private static Delegate maybeCreateFlexDelegate(List<Delegate> delegates) {
try {
Class<?> clazz = Class.forName("org.tensorflow.org.tensorflow.lite.flex.FlexDelegate");
// No need to create the Flex delegate if one has already been provided.
for (Delegate delegate : delegates) {
if (clazz.isInstance(delegate)) {
return null;
}
}
return (Delegate) clazz.getConstructor().newInstance();
} catch (Exception e) {
// The error will propagate when tensors are allocated.
return null;
}
}
private static native int getOutputDataType(long interpreterHandle, int outputIdx);
private static final int ERROR_BUFFER_SIZE = 512;
private long errorHandle;
private long interpreterHandle;
private long modelHandle;
private long cancellationFlagHandle = 0;
private long inferenceDurationNanoseconds = -1;
private ByteBuffer modelByteBuffer;
// Lazily constructed maps of input and output names to input and output Tensor indexes.
private Map<String, Integer> inputsIndexes;
private Map<String, Integer> outputsIndexes;
// Lazily constructed and populated arrays of input and output Tensor wrappers.
private Tensor[] inputTensors;
private Tensor[] outputTensors;
private boolean isMemoryAllocated = false;
// As the Java Delegate owns the native delegate instance, we keep a strong ref to any injected
// delegates for safety.
private final List<Delegate> delegates = new ArrayList<>();
// List of owned delegates that must be closed when the interpreter is closed.
private final List<AutoCloseable> ownedDelegates = new ArrayList<>();
private static native boolean hasUnresolvedFlexOp(long interpreterHandle);
private static native int getInputTensorIndex(long interpreterHandle, int inputIdx);
private static native int getOutputTensorIndex(long interpreterHandle, int outputIdx);
private static native int getInputCount(long interpreterHandle);
private static native int getOutputCount(long interpreterHandle);
private static native int getExecutionPlanLength(long interpreterHandle);
private static native String[] getInputNames(long interpreterHandle);
private static native String[] getOutputNames(long interpreterHandle);
private static native void numThreads(long interpreterHandle, int numThreads);
private static native void allowFp16PrecisionForFp32(long interpreterHandle, boolean allow);
private static native void allowBufferHandleOutput(long interpreterHandle, boolean allow);
private static native void useXNNPACK(
long interpreterHandle, long errorHandle, boolean state, int numThreads);
private static native long createErrorReporter(int size);
private static native long createModel(String modelPathOrBuffer, long errorHandle);
private static native long createModelWithBuffer(ByteBuffer modelBuffer, long errorHandle);
private static native long createInterpreter(
long modelHandle, long errorHandle, int numThreads);
private static native void applyDelegate(
long interpreterHandle, long errorHandle, long delegateHandle);
private static native void resetVariableTensors(long interpreterHandle, long errorHandle);
private static native long createCancellationFlag(long interpreterHandle);
private static native long deleteCancellationFlag(long cancellationFlagHandle);
private static native void delete(long errorHandle, long modelHandle, long interpreterHandle);
}
|
0
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow/lite/Tensor.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
import java.lang.reflect.Array;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.LongBuffer;
import java.util.Arrays;
/**
* A typed multi-dimensional array used in Tensorflow Lite.
*
* <p>The native handle of a {@code Tensor} is managed by {@code NativeInterpreterWrapper}, and does
* not needed to be closed by the client. However, once the {@code NativeInterpreterWrapper} has
* been closed, the tensor handle will be invalidated.
*/
// TODO(b/153882978): Add scalar getters similar to TF's Java API.
public final class Tensor {
/**
* Creates a Tensor wrapper from the provided interpreter instance and tensor index.
*
* <p>The caller is responsible for closing the created wrapper, and ensuring the provided
* native interpreter is valid until the tensor is closed.
*/
static Tensor fromIndex(long nativeInterpreterHandle, int tensorIndex) {
return new Tensor(create(nativeInterpreterHandle, tensorIndex));
}
/**
* Quantization parameters that corresponds to the table, {@code QuantizationParameters}, in the
* <a
* href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/schema/schema.fbs">TFLite
* Model schema file.</a>
*
* <p>Since per-channel quantization does not apply to input and output tensors, {@code scale}
* and {@code zero_point} are both single values instead of arrays.
*
* <p>For tensor that are not quantized, the values of scale and zero_point are both 0.
*
* <p>Given a quantized value q, the corresponding float value f should be: <br>
* f = scale * (q - zero_point) <br>
*/
public static class QuantizationParams {
/** The scale value used in quantization. */
private final float scale;
/** The zero point value used in quantization. */
private final int zeroPoint;
/**
* Creates a {@link QuantizationParams} with {@code scale} and {@code zero_point}.
*
* @param scale The scale value used in quantization.
* @param zeroPoint The zero point value used in quantization.
*/
public QuantizationParams(final float scale, final int zeroPoint) {
this.scale = scale;
this.zeroPoint = zeroPoint;
}
/** Returns the scale value. */
public float getScale() {
return scale;
}
/** Returns the zero point value. */
public int getZeroPoint() {
return zeroPoint;
}
}
/** Disposes of any resources used by the Tensor wrapper. */
public void close() {
delete(nativeHandle);
nativeHandle = 0;
}
/** Returns the {@link DataType} of elements stored in the Tensor. */
public DataType dataType() {
return dtype;
}
/**
* Returns the number of dimensions (sometimes referred to as <a
* href="https://www.tensorflow.org/resources/dims_types.html#rank">rank</a>) of the Tensor.
*
* <p>Will be 0 for a scalar, 1 for a vector, 2 for a matrix, 3 for a 3-dimensional tensor etc.
*/
public int numDimensions() {
return shapeCopy.length;
}
/** Returns the size, in bytes, of the tensor data. */
public int numBytes() {
return numBytes(nativeHandle);
}
/** Returns the number of elements in a flattened (1-D) view of the tensor. */
public int numElements() {
return computeNumElements(shapeCopy);
}
/**
* Returns the <a href="https://www.tensorflow.org/resources/dims_types.html#shape">shape</a> of
* the Tensor, i.e., the sizes of each dimension.
*
* @return an array where the i-th element is the size of the i-th dimension of the tensor.
*/
public int[] shape() {
return shapeCopy;
}
/**
* Returns the original <a
* href="https://www.tensorflow.org/resources/dims_types.html#shape">shape</a> of the Tensor,
* i.e., the sizes of each dimension - before any resizing was performed. Unknown dimensions are
* designated with a value of -1.
*
* @return an array where the i-th element is the size of the i-th dimension of the tensor.
*/
public int[] shapeSignature() {
return shapeSignatureCopy;
}
/** Returns the (global) index of the tensor within the owning {@link Interpreter}. */
public int index() {
return index(nativeHandle);
}
/** Returns the name of the tensor within the owning {@link Interpreter}. */
public String name() {
return name(nativeHandle);
}
/**
* Returns the quantization parameters of the tensor within the owning {@link Interpreter}.
*
* <p>Only quantized tensors have valid {@code QuantizationParameters}. For tensor that are not
* quantized, the values of scale and zero_point are both 0.
*/
public QuantizationParams quantizationParams() {
return quantizationParamsCopy;
}
/**
* Copies the contents of the provided {@code src} object to the Tensor.
*
* <p>The {@code src} should either be a (multi-dimensional) array with a shape matching that of
* this tensor, a {@link ByteBuffer} of compatible primitive type with a matching flat size, or
* {@code null} iff the tensor has an underlying delegate buffer handle.
*
* @throws IllegalArgumentException if the tensor is a scalar or if {@code src} is not
* compatible with the tensor (for example, mismatched data types or shapes).
*/
public void setTo(Object src) {
if (src == null) {
if (hasDelegateBufferHandle(nativeHandle)) {
return;
}
throw new IllegalArgumentException(
"Null inputs are allowed only if the Tensor is bound to a buffer handle.");
}
throwIfTypeIsIncompatible(src);
throwIfSrcShapeIsIncompatible(src);
if (isBuffer(src)) {
setTo((Buffer) src);
} else if (src.getClass().isArray()) {
writeMultiDimensionalArray(nativeHandle, src);
} else {
writeScalar(nativeHandle, src);
}
}
public void setTo(Buffer src) {
// Note that we attempt to use a direct memcpy optimization for direct, native-ordered
// buffers.
// There are no base Buffer#order() or Buffer#put() methods, so again we have to ugly cast.
if (src instanceof ByteBuffer) {
ByteBuffer srcBuffer = (ByteBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().put(srcBuffer);
}
} else if (src instanceof LongBuffer) {
LongBuffer srcBuffer = (LongBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().asLongBuffer().put(srcBuffer);
}
} else if (src instanceof FloatBuffer) {
FloatBuffer srcBuffer = (FloatBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().asFloatBuffer().put(srcBuffer);
}
} else if (src instanceof IntBuffer) {
IntBuffer srcBuffer = (IntBuffer) src;
if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
writeDirectBuffer(nativeHandle, src);
} else {
buffer().asIntBuffer().put(srcBuffer);
}
} else {
throw new IllegalArgumentException("Unexpected input buffer type: " + src);
}
}
/**
* Copies the contents of the tensor to {@code dst} and returns {@code dst}.
*
* @param dst the destination buffer, either an explicitly-typed array, a {@link ByteBuffer} or
* {@code null} iff the tensor has an underlying delegate buffer handle.
* @throws IllegalArgumentException if {@code dst} is not compatible with the tensor (for
* example, mismatched data types or shapes).
*/
public Object copyTo(Object dst) {
if (dst == null) {
if (hasDelegateBufferHandle(nativeHandle)) {
return dst;
}
throw new IllegalArgumentException(
"Null outputs are allowed only if the Tensor is bound to a buffer handle.");
}
throwIfTypeIsIncompatible(dst);
throwIfDstShapeIsIncompatible(dst);
if (isBuffer(dst)) {
copyTo((Buffer) dst);
} else {
readMultiDimensionalArray(nativeHandle, dst);
}
return dst;
}
public void copyTo(Buffer dst) {
// There is no base Buffer#put() method, so we have to ugly cast.
if (dst instanceof ByteBuffer) {
((ByteBuffer) dst).put(buffer());
} else if (dst instanceof FloatBuffer) {
((FloatBuffer) dst).put(buffer().asFloatBuffer());
} else if (dst instanceof LongBuffer) {
((LongBuffer) dst).put(buffer().asLongBuffer());
} else if (dst instanceof IntBuffer) {
((IntBuffer) dst).put(buffer().asIntBuffer());
} else {
throw new IllegalArgumentException("Unexpected output buffer type: " + dst);
}
}
/** Returns the provided buffer's shape if specified and different from this Tensor's shape. */
// TODO(b/80431971): Remove this method after deprecating multi-dimensional array inputs.
int[] getInputShapeIfDifferent(Object input) {
if (input == null) {
return null;
}
// Implicit resizes based on ByteBuffer capacity isn't supported, so short-circuit that
// path.
// The Buffer's size will be validated against this Tensor's size in {@link #setTo(Object)}.
if (isBuffer(input)) {
return null;
}
throwIfTypeIsIncompatible(input);
int[] inputShape = computeShapeOf(input);
if (Arrays.equals(shapeCopy, inputShape)) {
return null;
}
return inputShape;
}
/**
* Forces a refresh of the tensor's cached shape.
*
* <p>This is useful if the tensor is resized or has a dynamic shape.
*/
void refreshShape() {
this.shapeCopy = shape(nativeHandle);
}
/** Returns the type of the data. */
DataType dataTypeOf(Object o) {
if (o != null) {
Class<?> c = o.getClass();
// For arrays, the data elements must be a *primitive* type, e.g., an
// array of floats is fine, but not an array of Floats.
if (c.isArray()) {
while (c.isArray()) {
c = c.getComponentType();
}
if (float.class.equals(c)) {
return DataType.FLOAT32;
} else if (int.class.equals(c)) {
return DataType.INT32;
} else if (byte.class.equals(c)) {
// Byte array can be used for storing string tensors, especially for
// ParseExample op.
if (dtype == DataType.STRING) {
return DataType.STRING;
}
return DataType.UINT8;
} else if (long.class.equals(c)) {
return DataType.INT64;
} else if (boolean.class.equals(c)) {
return DataType.BOOL;
} else if (String.class.equals(c)) {
return DataType.STRING;
}
} else {
// For scalars, the type will be boxed.
if (Float.class.equals(c) || o instanceof FloatBuffer) {
return DataType.FLOAT32;
} else if (Integer.class.equals(c) || o instanceof IntBuffer) {
return DataType.INT32;
} else if (Byte.class.equals(c)) {
// Note that we don't check for ByteBuffer here; ByteBuffer payloads
// are allowed to map to any type, and should be handled earlier
// in the input/output processing pipeline.
return DataType.UINT8;
} else if (Long.class.equals(c) || o instanceof LongBuffer) {
return DataType.INT64;
} else if (Boolean.class.equals(c)) {
return DataType.BOOL;
} else if (String.class.equals(c)) {
return DataType.STRING;
}
}
}
throw new IllegalArgumentException(
"DataType error: cannot resolve DataType of " + o.getClass().getName());
}
/** Returns the shape of an object as an int array. */
int[] computeShapeOf(Object o) {
int size = computeNumDimensions(o);
if (dtype == DataType.STRING) {
Class<?> c = o.getClass();
if (c.isArray()) {
while (c.isArray()) {
c = c.getComponentType();
}
// If the given string data is stored in byte streams, the last array dimension
// should be
// treated as a value.
if (byte.class.equals(c)) {
--size;
}
}
}
int[] dimensions = new int[size];
fillShape(o, 0, dimensions);
return dimensions;
}
/** Returns the number of elements in a flattened (1-D) view of the tensor's shape. */
static int computeNumElements(int[] shape) {
int n = 1;
for (int i = 0; i < shape.length; ++i) {
n *= shape[i];
}
return n;
}
/** Returns the number of dimensions of a multi-dimensional array, otherwise 0. */
static int computeNumDimensions(Object o) {
if (o == null || !o.getClass().isArray()) {
return 0;
}
if (Array.getLength(o) == 0) {
throw new IllegalArgumentException("Array lengths cannot be 0.");
}
return 1 + computeNumDimensions(Array.get(o, 0));
}
/** Recursively populates the shape dimensions for a given (multi-dimensional) array. */
static void fillShape(Object o, int dim, int[] shape) {
if (shape == null || dim == shape.length) {
return;
}
final int len = Array.getLength(o);
if (shape[dim] == 0) {
shape[dim] = len;
} else if (shape[dim] != len) {
throw new IllegalArgumentException(
String.format(
"Mismatched lengths (%d and %d) in dimension %d",
shape[dim], len, dim));
}
for (int i = 0; i < len; ++i) {
fillShape(Array.get(o, i), dim + 1, shape);
}
}
private void throwIfTypeIsIncompatible(Object o) {
// ByteBuffer payloads can map to any type, so exempt it from the check.
if (isByteBuffer(o)) {
return;
}
DataType oType = dataTypeOf(o);
if (oType != dtype) {
// INT8 and UINT8 have the same string name, "byte"
if (oType.toStringName().equals(dtype.toStringName())) {
return;
}
throw new IllegalArgumentException(
String.format(
"Cannot convert between a TensorFlowLite tensor with type %s and a Java "
+ "object of type %s (which is compatible with the TensorFlowLite type %s).",
dtype, o.getClass().getName(), oType));
}
}
private void throwIfSrcShapeIsIncompatible(Object src) {
if (isBuffer(src)) {
Buffer srcBuffer = (Buffer) src;
int bytes = numBytes();
// Note that we allow the client to provide a ByteBuffer even for non-byte Tensors.
// In such cases, we only care that the raw byte capacity matches the tensor byte
// capacity.
int srcBytes =
isByteBuffer(src)
? srcBuffer.capacity()
: srcBuffer.capacity() * dtype.byteSize();
if (bytes != srcBytes) {
throw new IllegalArgumentException(
String.format(
"Cannot copy to a TensorFlowLite tensor (%s) with %d bytes from a "
+ "Java Buffer with %d bytes.",
name(), bytes, srcBytes));
}
return;
}
int[] srcShape = computeShapeOf(src);
if (!Arrays.equals(srcShape, shapeCopy)) {
throw new IllegalArgumentException(
String.format(
"Cannot copy to a TensorFlowLite tensor (%s) with shape %s from a Java object "
+ "with shape %s.",
name(), Arrays.toString(shapeCopy), Arrays.toString(srcShape)));
}
}
private void throwIfDstShapeIsIncompatible(Object dst) {
if (isBuffer(dst)) {
Buffer dstBuffer = (Buffer) dst;
int bytes = numBytes();
// Note that we allow the client to provide a ByteBuffer even for non-byte Tensors.
// In such cases, we only care that the raw byte capacity fits the tensor byte capacity.
// This is subtly different than Buffer *inputs*, where the size should be exact.
int dstBytes =
isByteBuffer(dst)
? dstBuffer.capacity()
: dstBuffer.capacity() * dtype.byteSize();
if (bytes > dstBytes) {
throw new IllegalArgumentException(
String.format(
"Cannot copy from a TensorFlowLite tensor (%s) with %d bytes to a "
+ "Java Buffer with %d bytes.",
name(), bytes, dstBytes));
}
return;
}
int[] dstShape = computeShapeOf(dst);
if (!Arrays.equals(dstShape, shapeCopy)) {
throw new IllegalArgumentException(
String.format(
"Cannot copy from a TensorFlowLite tensor (%s) with shape %s to a Java object "
+ "with shape %s.",
name(), Arrays.toString(shapeCopy), Arrays.toString(dstShape)));
}
}
private static boolean isBuffer(Object o) {
return o instanceof Buffer;
}
private static boolean isByteBuffer(Object o) {
return o instanceof ByteBuffer;
}
private long nativeHandle;
private final DataType dtype;
private int[] shapeCopy;
private final int[] shapeSignatureCopy;
private final QuantizationParams quantizationParamsCopy;
private Tensor(long nativeHandle) {
this.nativeHandle = nativeHandle;
this.dtype = DataType.fromC(dtype(nativeHandle));
this.shapeCopy = shape(nativeHandle);
this.shapeSignatureCopy = shapeSignature(nativeHandle);
this.quantizationParamsCopy =
new QuantizationParams(
quantizationScale(nativeHandle), quantizationZeroPoint(nativeHandle));
}
public ByteBuffer buffer() {
return buffer(nativeHandle).order(ByteOrder.nativeOrder());
}
private static native long create(long interpreterHandle, int tensorIndex);
private static native void delete(long handle);
private static native ByteBuffer buffer(long handle);
private static native void writeDirectBuffer(long handle, Buffer src);
private static native int dtype(long handle);
private static native int[] shape(long handle);
private static native int[] shapeSignature(long handle);
private static native int numBytes(long handle);
private static native boolean hasDelegateBufferHandle(long handle);
private static native void readMultiDimensionalArray(long handle, Object dst);
private static native void writeMultiDimensionalArray(long handle, Object src);
private static native void writeScalar(long handle, Object src);
private static native int index(long handle);
private static native String name(long handle);
private static native float quantizationScale(long handle);
private static native int quantizationZeroPoint(long handle);
}
|
0
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow
|
java-sources/ai/djl/tflite/tflite-native-auto/2.4.1/org/tensorflow/lite/TensorFlowLite.java
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite;
/** Static utility methods loading the TensorFlowLite runtime. */
public final class TensorFlowLite {
private static volatile boolean isInit = false;
private TensorFlowLite() {}
/**
* Returns the version of the underlying TensorFlowLite model schema.
*
* @deprecated Prefer using {@link #runtimeVersion() or #schemaVersion()}.
*/
@Deprecated
public static String version() {
return schemaVersion();
}
/** Returns the version of the underlying TensorFlowLite runtime. */
public static String runtimeVersion() {
init();
return nativeRuntimeVersion();
}
/** Returns the version of the underlying TensorFlowLite model schema. */
public static String schemaVersion() {
init();
return nativeSchemaVersion();
}
/**
* Ensure the TensorFlowLite native library has been loaded.
*
* <p>If unsuccessful, throws an UnsatisfiedLinkError with the appropriate error message.
*/
public static void init() {
if (isInit) {
return;
}
nativeRuntimeVersion();
isInit = true;
}
public static native String nativeRuntimeVersion();
public static native String nativeSchemaVersion();
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/Forecast.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries;
import ai.djl.ndarray.NDArray;
import java.time.LocalDateTime;
/** An abstract class representing the forecast results for the time series data. */
public abstract class Forecast {
protected LocalDateTime startDate;
protected int predictionLength;
protected String freq;
/**
* Constructs a {@code Forecast} instance.
*
* @param startDate the time series start date
* @param predictionLength the time length of prediction
* @param freq the prediction frequency
*/
public Forecast(LocalDateTime startDate, int predictionLength, String freq) {
this.startDate = startDate;
this.predictionLength = predictionLength;
this.freq = freq;
}
/**
* Computes a quantile from the predicted distribution.
*
* @param q quantile to compute
* @return value of the quantile across the prediction range
*/
public abstract NDArray quantile(float q);
/**
* Computes a quantile from the predicted distribution.
*
* @param q quantile to compute
* @return value of the quantile across the prediction range
*/
public NDArray quantile(String q) {
return quantile(Float.parseFloat(q));
}
/**
* Computes and returns the forecast mean.
*
* @return forecast mean
*/
public abstract NDArray mean();
/**
* Computes the median of forecast.
*
* @return value of the median
*/
public NDArray median() {
return quantile(0.5f);
}
/**
* Returns the prediction frequency like "D", "H"....
*
* @return the prediction frequency
*/
public String freq() {
return freq;
}
/**
* Returns the time length of forecast.
*
* @return the prediction length
*/
public int getPredictionLength() {
return predictionLength;
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/SampleForecast.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries;
import ai.djl.ndarray.NDArray;
import java.time.LocalDateTime;
/**
* A {@link Forecast} object, where the predicted distribution is represented internally as samples.
*/
public class SampleForecast extends Forecast {
private NDArray samples;
private int numSamples;
/**
* Constructs a {@code SampleForeCast}.
*
* @param samples {@link NDArray} array of size (num_samples, prediction_length) (1D case),
* (num_samples, prediction_length, target_dim) (multivariate case)
* @param startDate start of the forecast
* @param freq frequency of the forecast
*/
public SampleForecast(NDArray samples, LocalDateTime startDate, String freq) {
super(startDate, (int) samples.getShape().get(1), freq);
this.samples = samples;
this.numSamples = (int) samples.getShape().head();
}
/**
* Returns the sorted sample array.
*
* @return the sorted sample array
*/
public NDArray getSortedSamples() {
return samples.sort(0);
}
/**
* Returns the number of samples representing the forecast.
*
* @return the number of samples
*/
public int getNumSamples() {
return numSamples;
}
/** {@inheritDoc} */
@Override
public NDArray quantile(float q) {
int sampleIdx = Math.round((numSamples - 1) * q);
return getSortedSamples().get("{}, :", sampleIdx);
}
/**
* Returns a new Forecast object with only the selected sub-dimension.
*
* @param dim the selected dim
* @return a new {@link SampleForecast}.
*/
public SampleForecast copyDim(int dim) {
NDArray copySamples;
if (samples.getShape().dimension() == 2) {
copySamples = samples;
} else {
int targetDim = (int) samples.getShape().get(2);
if (dim >= targetDim) {
throw new IllegalArgumentException(
String.format(
"must set 0 <= dim < target_dim, but got dim=%d, target_dim=%d",
dim, targetDim));
}
copySamples = samples.get(":, :, {}", dim);
}
return new SampleForecast(copySamples, startDate, freq);
}
/** {@inheritDoc}. */
@Override
public NDArray mean() {
return samples.mean(new int[] {0});
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/TimeSeriesData.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.timeseries.dataset.FieldName;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* {@link TimeSeriesData} is a DataEntry for managing time series data in preprocess. It contains a
* key-values entries mapping the {@link FieldName} and {@link NDArray} to generate the time
* features.
*
* <p>This class provides a convenient way for user to featurize the data.
*/
public class TimeSeriesData extends PairList<String, NDArray> {
private LocalDateTime startTime;
private LocalDateTime forecastStartTime;
/**
* Constructs an empty {@code TimeSeriesData} with the specified initial capacity.
*
* @param initialCapacity the initial capacity of the list
* @throws IllegalArgumentException if the specified initial capacity is negative
*/
public TimeSeriesData(int initialCapacity) {
super(initialCapacity);
}
/**
* Constructs a {@code TimeSeriesData} containing the elements of the specified keys and values.
*
* @param keys the key list containing elements to be placed into this PairList
* @param values the value list containing elements to be placed into this PairList
* @throws IllegalArgumentException if the keys and values size are different
*/
public TimeSeriesData(List<String> keys, List<NDArray> values) {
super(keys, values);
}
/**
* Constructs a {@code TimeSeriesData} containing the elements of the specified list of Pairs.
*
* @param list the list containing elements to be placed into this PairList
*/
public TimeSeriesData(List<Pair<String, NDArray>> list) {
super(list);
}
/**
* Constructs a {@code TimeSeriesData} containing the elements of the specified map.
*
* @param map the map contains keys and values
*/
public TimeSeriesData(Map<String, NDArray> map) {
super(map);
}
/**
* Constructs a {@link NDList} containing the remaining {@link NDArray} for {@link FieldName}.
*
* @return a {@link NDList}
*/
public NDList toNDList() {
List<NDArray> arrays = this.values();
int index = 0;
for (NDArray array : arrays) {
array.setName("data" + index);
index++;
}
return new NDList(arrays);
}
/**
* Returns the time series start time.
*
* @return a {@link LocalDateTime} representing start time
*/
public LocalDateTime getStartTime() {
return startTime;
}
/**
* Returns the time series forecasting time.
*
* @return a {@link LocalDateTime} representing the time to forecast
*/
public LocalDateTime getForecastStartTime() {
return forecastStartTime;
}
/**
* Adds a fieldName and value to the list.
*
* @param fieldName the {@link FieldName}.
* @param value the {@link NDArray} value
*/
public void add(FieldName fieldName, NDArray value) {
add(fieldName.name(), value);
}
/**
* Returns the value for the fieldName.
*
* @param fieldName the {@link FieldName} of the element to get
* @return the {@link NDArray} value for the {@link FieldName}
*/
public NDArray get(FieldName fieldName) {
return get(fieldName.name());
}
/**
* Replaces the existing {@link NDArray} of {@link FieldName} to the value.
*
* @param fieldName the {@link FieldName}
* @param value the {@link NDArray} value
*/
public void setField(String fieldName, NDArray value) {
remove(fieldName);
add(fieldName, value);
}
/**
* Replace the existing {@link NDArray} of {@link FieldName} to the value.
*
* @param fieldName the {@link FieldName}.
* @param value the {@link NDArray} value.
*/
public void setField(FieldName fieldName, NDArray value) {
setField(fieldName.name(), value);
}
/**
* Set the time series start time.
*
* @param value the {@link LocalDateTime} start time.
*/
public void setStartTime(LocalDateTime value) {
this.startTime = value;
}
/**
* Set the time series forecasting time.
*
* @param value the {@link LocalDateTime} time to forecast
*/
public void setForecastStartTime(LocalDateTime value) {
this.forecastStartTime = value;
}
/**
* Removes the key-value pair for the {@link FieldName}.
*
* @param fieldName the {@link FieldName} to be removed.
*/
public void remove(FieldName fieldName) {
remove(fieldName.name());
}
/** {@inheritDoc} * */
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
TimeSeriesData pairs = (TimeSeriesData) o;
return Objects.equals(startTime, pairs.startTime)
&& Objects.equals(forecastStartTime, pairs.forecastStartTime);
}
/** {@inheritDoc} * */
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), startTime, forecastStartTime);
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/package-info.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes to support TimeSeries in djl. */
package ai.djl.timeseries;
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/block/FeatureEmbedder.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.block;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.ArrayList;
import java.util.List;
/** Embed a sequence of categorical features. */
public class FeatureEmbedder extends AbstractBlock {
private List<Integer> cardinalities;
private List<Integer> embeddingDims;
private List<FeatureEmbedding> embedders;
private int numFeatures;
FeatureEmbedder(Builder builder) {
cardinalities = builder.cardinalities;
embeddingDims = builder.embeddingDims;
numFeatures = cardinalities.size();
embedders = new ArrayList<>();
for (int i = 0; i < cardinalities.size(); i++) {
embedders.add(createEmbedding(i, cardinalities.get(i), embeddingDims.get(i)));
}
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
// Categorical features with shape: (N,T,C) or (N,C), where C is the number of categorical
// features.
NDArray features = inputs.singletonOrThrow();
NDList catFeatureSlices;
if (numFeatures > 1) {
// slice the last dimension, giving an array of length numFeatures with shape (N,T) or
// (N)
catFeatureSlices = features.split(numFeatures, features.getShape().dimension() - 1);
} else {
catFeatureSlices = new NDList(features);
}
NDList output = new NDList();
for (int i = 0; i < numFeatures; i++) {
FeatureEmbedding embed = embedders.get(i);
NDArray catFeatureSlice = catFeatureSlices.get(i);
catFeatureSlice = catFeatureSlice.squeeze(-1);
output.add(
embed.forward(parameterStore, new NDList(catFeatureSlice), training, params)
.singletonOrThrow());
}
return new NDList(NDArrays.concat(output, -1));
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
Shape inputShape = inputShapes[0];
Shape[] embedInputShapes = {inputShape.slice(0, inputShape.dimension() - 1)};
long embedSizes = 0;
for (FeatureEmbedding embed : embedders) {
embedSizes += embed.getOutputShapes(embedInputShapes)[0].tail();
}
return new Shape[] {inputShape.slice(0, inputShape.dimension() - 1).add(embedSizes)};
}
/** {@inheritDoc} */
@Override
protected void initializeChildBlocks(
NDManager manager, DataType dataType, Shape... inputShapes) {
for (FeatureEmbedding embed : embedders) {
embed.initialize(manager, dataType, inputShapes);
}
}
private FeatureEmbedding createEmbedding(int i, int c, int d) {
FeatureEmbedding embedding =
FeatureEmbedding.builder().setNumEmbeddings(c).setEmbeddingSize(d).build();
addChildBlock(String.format("cat_%d_embedding", i), embedding);
return embedding;
}
/**
* Return a builder to build an {@code FeatureEmbedder}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The builder to construct a {@link FeatureEmbedder} type of {@link ai.djl.nn.Block}. */
public static final class Builder {
private List<Integer> cardinalities;
private List<Integer> embeddingDims;
/**
* Set the cardinality for each categorical feature.
*
* @param cardinalities the cardinality for each categorical feature
* @return this Builder
*/
public Builder setCardinalities(List<Integer> cardinalities) {
this.cardinalities = cardinalities;
return this;
}
/**
* Set the number of dimensions to embed each categorical feature.
*
* @param embeddingDims number of dimensions to embed each categorical feature
* @return this Builder
*/
public Builder setEmbeddingDims(List<Integer> embeddingDims) {
this.embeddingDims = embeddingDims;
return this;
}
/**
* Return the constructed {@code FeatureEmbedder}.
*
* @return the constructed {@code FeatureEmbedder}
*/
public FeatureEmbedder build() {
if (cardinalities.isEmpty()) {
throw new IllegalArgumentException(
"Length of 'cardinalities' list must be greater than zero");
}
if (cardinalities.size() != embeddingDims.size()) {
throw new IllegalArgumentException(
"Length of `cardinalities` and `embedding_dims` should match");
}
for (int c : cardinalities) {
if (c <= 0) {
throw new IllegalArgumentException("Elements of `cardinalities` should be > 0");
}
}
for (int d : embeddingDims) {
if (d <= 0) {
throw new IllegalArgumentException(
"Elements of `embedding_dims` should be > 0");
}
}
return new FeatureEmbedder(this);
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/block/FeatureEmbedding.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.block;
import ai.djl.Device;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.Shape;
import ai.djl.ndarray.types.SparseFormat;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Parameter;
import ai.djl.nn.core.Embedding;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
/** An implement of nn.embedding. */
public final class FeatureEmbedding extends AbstractBlock {
private static final String EMBEDDING_PARAM_NAME = "embedding";
private int embeddingSize;
private int numEmbeddings;
private Parameter embedding;
FeatureEmbedding(Builder builder) {
embeddingSize = builder.embeddingSize;
numEmbeddings = builder.numEmbeddings;
embedding =
addParameter(
Parameter.builder()
.setName(EMBEDDING_PARAM_NAME)
.setType(Parameter.Type.WEIGHT)
.optShape(new Shape(numEmbeddings, embeddingSize))
.build());
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
Device device = input.getDevice();
NDArray weight = parameterStore.getValue(embedding, device, training);
return Embedding.embedding(input, weight, SparseFormat.DENSE);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[] {inputShapes[0].addAll(new Shape(embeddingSize))};
}
/**
* Return a builder to build an {@code FeatureEmbedding}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The builder to construct a {@link FeatureEmbedding} type of {@link ai.djl.nn.Block}. */
public static final class Builder {
private int embeddingSize;
private int numEmbeddings;
/**
* Sets the size of the embeddings.
*
* @param embeddingSize the size of the embeddings
* @return this Builder
*/
public Builder setEmbeddingSize(int embeddingSize) {
this.embeddingSize = embeddingSize;
return this;
}
/**
* Sets the size of the dictionary of embeddings.
*
* @param numEmbeddings the size of the dictionary of embeddings
* @return this Builder
*/
public Builder setNumEmbeddings(int numEmbeddings) {
this.numEmbeddings = numEmbeddings;
return this;
}
/**
* Return the constructed {@code FeatureEmbedding}.
*
* @return the constructed {@code FeatureEmbedding}
*/
public FeatureEmbedding build() {
if (numEmbeddings <= 0) {
throw new IllegalArgumentException(
"You must specify the dictionary Size for the embedding.");
}
if (embeddingSize == 0) {
throw new IllegalArgumentException("You must specify the embedding size");
}
return new FeatureEmbedding(this);
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/block/MeanScaler.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.block;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDList;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
/**
* A class computes a scaling factor as the weighted average absolute value along dimension {@code
* dim}, and scales the data accordingly.
*/
public class MeanScaler extends Scaler {
private float minimumScale;
MeanScaler(Builder builder) {
super(builder);
minimumScale = builder.minimumScale;
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray data = inputs.get(0);
NDArray weights = inputs.get(1);
NDArray totalWeight = weights.sum(new int[] {dim});
NDArray weightedSum = data.abs().mul(weights).sum(new int[] {dim});
NDArray totalObserved = totalWeight.sum(new int[] {0});
NDArray denominator = NDArrays.maximum(totalObserved, 1f);
NDArray defaultScale = weightedSum.sum(new int[] {0}).div(denominator);
denominator = NDArrays.maximum(totalWeight, 1f);
NDArray scale = weightedSum.div(denominator);
scale =
NDArrays.maximum(
minimumScale,
NDArrays.where(
weightedSum.gt(weightedSum.zerosLike()),
scale,
defaultScale.mul(totalWeight.onesLike())))
.expandDims(dim);
return new NDList(data.div(scale), keepDim ? scale : scale.squeeze(dim));
}
/**
* Create a builder to build a {@code MeanScaler}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The builder to construct a {@code MeanScaler}. */
public static final class Builder extends ScalerBuilder<Builder> {
private float minimumScale = 1e-10f;
Builder() {}
/**
* Sets the minimum scalar of the data.
*
* @param minimumScale the minimum value
* @return this Builder
*/
public Builder optMinimumScale(float minimumScale) {
this.minimumScale = minimumScale;
return this;
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Return the constructed {@code MeanScaler}.
*
* @return the constructed {@code MeanScaler}
*/
public MeanScaler build() {
validate();
return new MeanScaler(this);
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/block/NopScaler.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.block;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
/**
* A class assigns a scaling factor equal to 1 along dimension {@code dim}, and therefore applies no
* scaling to the input data.
*/
public class NopScaler extends Scaler {
NopScaler(Builder builder) {
super(builder);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray data = inputs.get(0);
NDArray scale = data.onesLike().mean(new int[] {dim}, keepDim);
return new NDList(data, scale);
}
/**
* Create a builder to build a {@code NopScaler}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The builder to construct a {@code NopScaler}. */
public static final class Builder extends ScalerBuilder<Builder> {
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Return constructed {@code NOPScaler}.
*
* @return the constructed {@code NOPScaler}
*/
public NopScaler build() {
validate();
return new NopScaler(this);
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/block/Scaler.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.block;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.util.Preconditions;
/** An abstract class used to scale data. */
public abstract class Scaler extends AbstractBlock {
private static final byte VERSION = 1;
protected int dim;
protected boolean keepDim;
Scaler(ScalerBuilder<?> builder) {
super(VERSION);
dim = builder.dim;
keepDim = builder.keepDim;
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
Shape inputShape = inputShapes[0];
Shape outputShape = new Shape();
for (int i = 0; i < inputShape.dimension(); i++) {
if (i != dim) {
outputShape = outputShape.add(inputShape.get(i));
} else {
if (keepDim) {
outputShape = outputShape.add(1L);
}
}
}
return new Shape[] {inputShape, outputShape};
}
/**
* A builder to extend for all classes extend the {@link Scaler}.
*
* @param <T> the concrete builder type
*/
public abstract static class ScalerBuilder<T extends ScalerBuilder<T>> {
protected int dim;
protected boolean keepDim;
/**
* Set the dim to scale.
*
* @param dim which dim to scale
* @return this Builder
*/
public T setDim(int dim) {
this.dim = dim;
return self();
}
/**
* Set whether to keep dim. Defaults to false;
*
* @param keepDim whether to keep dim
* @return this Builder
*/
public T optKeepDim(boolean keepDim) {
this.keepDim = keepDim;
return self();
}
/**
* Validates that the required arguments are set.
*
* @throws IllegalArgumentException if the required arguments are illegal
*/
protected void validate() {
Preconditions.checkArgument(
dim > 0,
"Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0");
}
protected abstract T self();
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/block/package-info.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains the basic block classes. */
package ai.djl.timeseries.block;
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/dataset/CsvTimeSeriesDataset.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.dataset;
import ai.djl.basicdataset.tabular.utils.DynamicBuffer;
import ai.djl.basicdataset.tabular.utils.Feature;
import ai.djl.basicdataset.tabular.utils.PreparedFeaturizer;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.Shape;
import ai.djl.timeseries.TimeSeriesData;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import ai.djl.util.Progress;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.FloatBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.List;
import java.util.zip.GZIPInputStream;
/** {@code CsvTimeSeriesDataset} represents the dataset that store in a .csv file. */
public class CsvTimeSeriesDataset extends TimeSeriesDataset {
protected PairList<FieldName, List<Feature>> fieldFeatures;
protected Feature startTimeFeature;
protected URL csvUrl;
protected CSVFormat csvFormat;
protected List<CSVRecord> csvRecords;
protected CsvTimeSeriesDataset(CsvBuilder<?> builder) {
super(builder);
fieldFeatures = builder.fieldFeatures;
startTimeFeature = builder.startTimeFeatures;
csvUrl = builder.csvUrl;
csvFormat = builder.csvFormat;
}
/** {@inheritDoc} */
@Override
protected long availableSize() {
return csvRecords.size();
}
/** {@inheritDoc} */
@Override
public void prepare(Progress progress) throws IOException {
try (Reader reader = new InputStreamReader(getCsvStream(), StandardCharsets.UTF_8)) {
CSVParser csvParser = CSVParser.parse(reader, csvFormat);
csvRecords = csvParser.getRecords();
}
prepareFeaturizers();
}
private InputStream getCsvStream() throws IOException {
if (csvUrl.getFile().endsWith(".gz")) {
return new GZIPInputStream(csvUrl.openStream());
}
return new BufferedInputStream(csvUrl.openStream());
}
/** {@inheritDoc} */
@Override
public TimeSeriesData getTimeSeriesData(NDManager manager, long index) {
TimeSeriesData data = new TimeSeriesData(fieldFeatures.size());
for (Pair<FieldName, List<Feature>> pair : fieldFeatures) {
if (!pair.getValue().isEmpty()) {
data.add(
pair.getKey(),
getRowFeatures(manager, index, pair.getValue()).singletonOrThrow());
}
}
data.setStartTime(getStartTime(index));
return data;
}
/** Prepares the {@link PreparedFeaturizer}s. */
protected void prepareFeaturizers() {
int availableSize = Math.toIntExact(availableSize());
List<Feature> featuresToPrepare = new ArrayList<>();
for (List<Feature> list : fieldFeatures.values()) {
featuresToPrepare.addAll(list);
}
for (Feature feature : featuresToPrepare) {
if (feature.getFeaturizer() instanceof PreparedFeaturizer) {
PreparedFeaturizer featurizer = (PreparedFeaturizer) feature.getFeaturizer();
List<String> inputs = new ArrayList<>(Math.toIntExact(availableSize));
for (int i = 0; i < availableSize; i++) {
inputs.add(getCell(i, feature.getName()));
}
featurizer.prepare(inputs);
}
}
}
/**
* Return the prediction start time for the given index.
*
* @param rowIndex the row index
* @return the start time
*/
public LocalDateTime getStartTime(long rowIndex) {
CSVRecord record = csvRecords.get(Math.toIntExact(rowIndex));
TimeFeaturizer featurizer = (TimeFeaturizer) startTimeFeature.getFeaturizer();
if (featurizer instanceof TimeFeaturizers.ConstantTimeFeaturizer) {
return featurizer.featurize(null);
}
String value = record.get(startTimeFeature.getName());
return featurizer.featurize(value);
}
/**
* Returns the designated features (either data or label features) from a row.
*
* @param manager the manager used to create the arrays
* @param index the index of the requested data item
* @param selected the features to pull from the row
* @return the features formatted as an {@link NDList}
*/
public NDList getRowFeatures(NDManager manager, long index, List<Feature> selected) {
DynamicBuffer bb = new DynamicBuffer();
for (Feature feature : selected) {
String name = feature.getName();
String value = getCell(index, name);
feature.getFeaturizer().featurize(bb, value);
}
FloatBuffer buf = bb.getBuffer();
return new NDList(manager.create(buf, new Shape(bb.getLength())));
}
/**
* Returns a cell in the dataset.
*
* @param rowIndex the row index or record index for the cell
* @param featureName the feature or column of the cell
* @return the value of the cell at that row and column
*/
protected String getCell(long rowIndex, String featureName) {
CSVRecord record = csvRecords.get(Math.toIntExact(rowIndex));
return record.get(featureName);
}
/** Used to build a {@code CsvTimeSeriesDataset}. */
public static class CsvBuilder<T extends CsvBuilder<T>> extends TimeSeriesBuilder<T> {
protected PairList<FieldName, List<Feature>> fieldFeatures;
protected Feature startTimeFeatures;
protected URL csvUrl;
protected CSVFormat csvFormat;
protected CsvBuilder() {
fieldFeatures = new PairList<>(DATASET_FIELD_NAMES.length);
for (FieldName fieldName : DATASET_FIELD_NAMES) {
fieldFeatures.add(fieldName, new ArrayList<>());
}
}
/** {@inheritDoc} */
@Override
@SuppressWarnings("unchecked")
protected T self() {
return (T) this;
}
/**
* Set the optional CSV file path.
*
* @param csvFile the CSV file path
* @return this builder
*/
public T optCsvFile(Path csvFile) {
try {
this.csvUrl = csvFile.toAbsolutePath().toUri().toURL();
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Invalid file path: " + csvFile, e);
}
return self();
}
/**
* Set the optional CSV file URL.
*
* @param csvUrl the CSV file URL
* @return this builder
*/
public T optCsvUrl(String csvUrl) {
try {
this.csvUrl = new URL(csvUrl);
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Invalid url: " + csvUrl, e);
}
return self();
}
/**
* Set the CSV file format.
*
* @param csvFormat the {@code CSVFormat}
* @return this builder
*/
public T setCsvFormat(CSVFormat csvFormat) {
this.csvFormat = csvFormat;
return self();
}
/**
* Add the features to the correspongding {@link FieldName}.
*
* @param fieldName the correspongding {@link FieldName}
* @param feature the feature
* @return this builder
*/
public T addFieldFeature(FieldName fieldName, Feature feature) {
if (fieldName == FieldName.START) {
startTimeFeatures = feature;
} else if (fieldFeatures.contains(fieldName)) {
fieldFeatures.get(fieldName).add(feature);
} else {
throw new IllegalArgumentException("Unsupported feature field type: " + fieldName);
}
return self();
}
/**
* Validate the builder to ensure it is correct.
*
* @throws IllegalArgumentException if there is an error with the builder arguments
*/
protected void validate() {
if (fieldFeatures.get(FieldName.TARGET).isEmpty()) {
throw new IllegalArgumentException("Missing target");
} else if (startTimeFeatures == null) {
throw new IllegalArgumentException("Missing start time");
}
}
/**
* Build the new {@link CsvTimeSeriesDataset}.
*
* @return the new {@link CsvTimeSeriesDataset}
*/
public CsvTimeSeriesDataset build() {
validate();
return new CsvTimeSeriesDataset(this);
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/dataset/FieldName.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.dataset;
import ai.djl.timeseries.TimeSeriesData;
/** Represents the name field of elements in a {@link TimeSeriesData} as an enum. */
public enum FieldName {
ITEM_ID,
START,
TARGET,
FEAT_STATIC_CAT,
FEAT_STATIC_REAL,
FEAT_DYNAMIC_CAT,
FEAT_DYNAMIC_REAL,
PAST_FEAT_DYNAMIC_REAL,
FEAT_DYNAMIC_REAL_LEGACY,
FEAT_DYNAMIC,
PAST_FEAT_DYNAMIC,
FEAT_TIME,
FEAT_CONST,
FEAT_AGE,
OBSERVED_VALUES,
IS_PAD,
FORECAST_START,
TARGET_DIM_INDICATOR;
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/dataset/M5Forecast.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.dataset;
import ai.djl.Application;
import ai.djl.basicdataset.BasicDatasets;
import ai.djl.basicdataset.tabular.utils.Feature;
import ai.djl.basicdataset.tabular.utils.Featurizers;
import ai.djl.repository.Artifact;
import ai.djl.repository.MRL;
import ai.djl.repository.Repository;
import ai.djl.util.JsonUtils;
import ai.djl.util.Progress;
import org.apache.commons.csv.CSVFormat;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* M5 Forecasting - Accuracy from <a
* href="https://www.kaggle.com/competitions/m5-forecasting-accuracy">https://www.kaggle.com/competitions/m5-forecasting-accuracy</a>
*
* <p>To improve the model performance, we coarse grain the target of the dataset by summing the
* sale amount every seven days. And set the column names of sum as 'w_i'. This can reduce
* occurrence of invalid values 0 and reduce the noise learned by model.
*/
public class M5Forecast extends CsvTimeSeriesDataset {
private static final String ARTIFACT_ID = "m5forecast";
private static final String VERSION = "1.0";
private Usage usage;
private MRL mrl;
private boolean prepared;
private List<Integer> cardinality;
/**
* Creates a new instance of {@link M5Forecast} with the given necessary configurations.
*
* @param builder a builder with the necessary configurations
*/
protected M5Forecast(Builder builder) {
super(builder);
usage = builder.usage;
mrl = builder.getMrl();
cardinality = builder.cardinality;
}
/** {@inheritDoc} */
@Override
public void prepare(Progress progress) throws IOException {
if (prepared) {
return;
}
Artifact artifact = mrl.getDefaultArtifact();
mrl.prepare(artifact, progress);
Path root = mrl.getRepository().getResourceDirectory(artifact);
Path csvFile = root.resolve(getUsagePath(usage));
csvUrl = csvFile.toUri().toURL();
super.prepare(progress);
prepared = true;
}
/**
* Return the cardinality of the dataset.
*
* @return the cardinality of the dataset
*/
public List<Integer> getCardinality() {
return cardinality;
}
/**
* Creates a builder to build a {@link M5Forecast}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
private String getUsagePath(Usage usage) {
// We coarse graining the data by summing the sale amount every 7 days and rename the .csv
// file as 'weekly_***'
switch (usage) {
case TRAIN:
return "weekly_sales_train_validation.csv";
case TEST:
return "weekly_sales_train_evaluation.csv";
case VALIDATION:
default:
throw new UnsupportedOperationException("Data not available.");
}
}
/** Used to build a {@code M5Forecast}. */
public static class Builder extends CsvBuilder<Builder> {
Repository repository;
String groupId;
String artifactId;
Usage usage;
M5Features mf;
List<Integer> cardinality;
Builder() {
repository = BasicDatasets.REPOSITORY;
groupId = BasicDatasets.GROUP_ID;
artifactId = ARTIFACT_ID;
usage = Usage.TRAIN;
csvFormat =
CSVFormat.DEFAULT
.builder()
.setHeader()
.setSkipHeaderRecord(true)
.setIgnoreHeaderCase(true)
.setTrim(true)
.get();
cardinality = new ArrayList<>();
}
MRL getMrl() {
return repository.dataset(Application.Tabular.ANY, groupId, artifactId, VERSION);
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Sets the optional repository.
*
* @param repository the repository
* @return this builder
*/
public Builder optRepository(Repository repository) {
this.repository = repository;
return this;
}
/**
* Sets optional groupId.
*
* @param groupId the groupId}
* @return this builder
*/
public Builder optGroupId(String groupId) {
this.groupId = groupId;
return this;
}
/**
* Sets the optional artifactId.
*
* @param artifactId the artifactId
* @return this builder
*/
public Builder optArtifactId(String artifactId) {
if (artifactId.contains(":")) {
String[] tokens = artifactId.split(":");
groupId = tokens[0];
this.artifactId = tokens[1];
} else {
this.artifactId = artifactId;
}
return this;
}
/**
* Sets the optional usage.
*
* @param usage the usage
* @return this builder
*/
public Builder optUsage(Usage usage) {
this.usage = usage;
return this;
}
/**
* Add a feature to the features set of the filed name.
*
* @param name the name of the feature
* @param fieldName the field name
* @return this builder
*/
public Builder addFeature(String name, FieldName fieldName) {
return addFeature(name, fieldName, false);
}
/**
* Add a feature to the features set of the filed name with onehot encoding.
*
* @param name the name of the feature
* @param fieldName the field name
* @param onehotEncode true if use onehot encoding
* @return this builder
*/
public Builder addFeature(String name, FieldName fieldName, boolean onehotEncode) {
parseFeatures();
if (mf.categorical.contains(name)) {
Map<String, Integer> map = mf.featureToMap.get(name);
if (map == null) {
return addFieldFeature(
fieldName,
new Feature(name, Featurizers.getStringFeaturizer(onehotEncode)));
}
cardinality.add(map.size());
return addFieldFeature(fieldName, new Feature(name, map, onehotEncode));
}
return addFieldFeature(fieldName, new Feature(name, true));
}
/**
* Returns the available features of this dataset.
*
* @return a list of feature names
*/
public List<String> getAvailableFeatures() {
parseFeatures();
return mf.featureArray;
}
/**
* Build the new {@code M5Forecast}.
*
* @return the new {@code M5Forecast}
*/
@Override
public M5Forecast build() {
validate();
return new M5Forecast(this);
}
private void parseFeatures() {
if (mf == null) {
try (InputStream is =
M5Forecast.class.getResourceAsStream("m5forecast_parser.json");
Reader reader = new InputStreamReader(is, StandardCharsets.UTF_8)) {
mf = JsonUtils.GSON.fromJson(reader, M5Features.class);
} catch (IOException e) {
throw new AssertionError(
"Failed to read m5forecast_parser.json from classpath", e);
}
}
}
}
private static final class M5Features {
List<String> featureArray;
Set<String> categorical;
// categorical = String in featureArray its value indicate a String in featureToMap
Map<String, Map<String, Integer>> featureToMap;
/**
* Sets the feature array.
*
* @param featureArray the feature array
*/
public void setFeatureArray(List<String> featureArray) {
this.featureArray = featureArray;
}
/**
* Sets the categorical value.
*
* @param categorical the categorical value
*/
public void setCategorical(Set<String> categorical) {
this.categorical = categorical;
}
/**
* Sets the feature map.
*
* @param featureToMap the feature map
*/
public void setFeatureToMap(Map<String, Map<String, Integer>> featureToMap) {
this.featureToMap = featureToMap;
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/dataset/TimeFeaturizer.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.dataset;
import ai.djl.basicdataset.tabular.utils.DynamicBuffer;
import ai.djl.basicdataset.tabular.utils.Featurizer;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
/**
* An interface that convert String to {@link LocalDateTime} as the start field of {@link
* ai.djl.timeseries.TimeSeriesData}.
*/
public interface TimeFeaturizer extends Featurizer {
/** {@inheritDoc} */
@Override
default void featurize(DynamicBuffer buf, String input) {
throw new IllegalArgumentException(
"Please use the other featurize for DateTimeFeaturizers");
}
/**
* Return the parsed time data.
*
* @param input the string input
* @return the parsed {@link LocalDateTime}
*/
LocalDateTime featurize(String input);
/** {@inheritDoc} */
@Override
default int dataRequired() {
return 2;
}
/** {@inheritDoc} */
@Override
default Object deFeaturize(float[] data) {
return LocalDateTime.ofEpochSecond((long) data[0], (int) data[1], ZoneOffset.UTC);
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/dataset/TimeFeaturizers.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.dataset;
import ai.djl.basicdataset.tabular.utils.Featurizer;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
/** A utility class provides helper functions to create {@link TimeFeaturizer}. */
public final class TimeFeaturizers {
private TimeFeaturizers() {}
/**
* Construct a {@link PatternTimeFeaturizer}.
*
* @param datePattern the pattern that dates are found in the data table column
* @return a new instance of {@link PatternTimeFeaturizer}
*/
public static Featurizer getPatternTimeFeaturizer(String datePattern) {
return new PatternTimeFeaturizer(datePattern);
}
/**
* Construct a {@link ConstantTimeFeaturizer}.
*
* @param dateTime the date time to return for all
* @return a new instance of {@link ConstantTimeFeaturizer}
*/
public static Featurizer getConstantTimeFeaturizer(LocalDateTime dateTime) {
return new ConstantTimeFeaturizer(dateTime);
}
/** A featurizer implemented for feature of date type. */
public static final class PatternTimeFeaturizer implements TimeFeaturizer {
String datePattern;
/**
* Constructs a {@link PatternTimeFeaturizer}.
*
* @param datePattern the pattern that dates are found in the data table column
*/
PatternTimeFeaturizer(String datePattern) {
this.datePattern = datePattern;
}
/** {@inheritDoc} */
@Override
public LocalDateTime featurize(String input) {
return LocalDateTime.parse(input, DateTimeFormatter.ofPattern(datePattern));
}
}
/** A featurizer always return a constant date. */
public static final class ConstantTimeFeaturizer implements TimeFeaturizer {
LocalDateTime dateTime;
/**
* Constructs a {@link ConstantTimeFeaturizer}.
*
* @param dateTime the constant date
*/
ConstantTimeFeaturizer(LocalDateTime dateTime) {
this.dateTime = LocalDateTime.from(dateTime);
}
/** {@inheritDoc} */
@Override
public LocalDateTime featurize(String input) {
return dateTime;
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/dataset/TimeSeriesDataset.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.dataset;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.timeseries.TimeSeriesData;
import ai.djl.timeseries.transform.TimeSeriesTransform;
import ai.djl.training.dataset.RandomAccessDataset;
import ai.djl.training.dataset.Record;
import java.util.List;
/** An abstract class for creating time series datasets. */
public abstract class TimeSeriesDataset extends RandomAccessDataset {
protected List<TimeSeriesTransform> transformation;
protected int contextLength;
static final FieldName[] DATASET_FIELD_NAMES = {
FieldName.TARGET,
FieldName.FEAT_STATIC_CAT,
FieldName.FEAT_STATIC_REAL,
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_DYNAMIC_REAL
};
protected TimeSeriesDataset(TimeSeriesBuilder<?> builder) {
super(builder);
transformation = builder.transformation;
contextLength = builder.contextLength;
}
/**
* {@code TimeSeriesDataset} override the get function so that it can preprocess the feature
* data as timeseries package way.
*
* <p>{@inheritDoc}
*/
@Override
public Record get(NDManager manager, long index) {
TimeSeriesData data = getTimeSeriesData(manager, index);
if (transformation.isEmpty()) {
// For inference with translator
return new Record(data.toNDList(), new NDList());
}
data = apply(manager, data);
// For both training and prediction
if (!data.contains("PAST_" + FieldName.TARGET)) {
throw new IllegalArgumentException(
"Transformation must include InstanceSampler to split data into past and future"
+ " part");
}
if (!data.contains("FUTURE_" + FieldName.TARGET)) {
// Warning: We do not recommend using TimeSeriesDataset directly to generate the
// inference input, using Translator instead
// For prediction without translator, we don't need labels and corresponding
// FUTURE_TARGET.
return new Record(data.toNDList(), new NDList());
}
// For training, we must have the FUTURE_TARGET label to compute Loss.
NDArray contextTarget = data.get("PAST_" + FieldName.TARGET).get("{}:", -contextLength + 1);
NDArray futureTarget = data.get("FUTURE_" + FieldName.TARGET);
NDList label = new NDList(contextTarget.concat(futureTarget, 0));
return new Record(data.toNDList(), label);
}
/**
* Return the {@link TimeSeriesData} for the given index from the {@code TimeSeriesDataset}.
*
* @param manager the manager to create data
* @param index the index
* @return the {@link TimeSeriesData}
*/
public abstract TimeSeriesData getTimeSeriesData(NDManager manager, long index);
/**
* Apply to preprocess transformation on {@link TimeSeriesData}.
*
* @param manager default {@link NDManager}
* @param input data the {@link TimeSeriesData} to operate on
* @return the transformed data
*/
private TimeSeriesData apply(NDManager manager, TimeSeriesData input) {
try (NDManager scope = manager.newSubManager()) {
input.values().forEach(array -> array.tempAttach(scope));
for (TimeSeriesTransform transform : transformation) {
input = transform.transform(manager, input, true);
}
input.values().forEach(array -> array.attach(manager));
}
return input;
}
/**
* Used to build a {@code TimeSeriesDataset}.
*
* @param <T> the builder type
*/
public abstract static class TimeSeriesBuilder<T extends TimeSeriesBuilder<T>>
extends RandomAccessDataset.BaseBuilder<T> {
protected List<TimeSeriesTransform> transformation;
protected int contextLength;
/**
* Set the transformation for data preprocess.
*
* @param transformation the transformation
* @return this builder
*/
public T setTransformation(List<TimeSeriesTransform> transformation) {
this.transformation = transformation;
return self();
}
/**
* Set the model prediction context length.
*
* @param contextLength the context length
* @return this builder
*/
public T setContextLength(int contextLength) {
this.contextLength = contextLength;
return self();
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/dataset/package-info.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains the basic dataset classes. */
package ai.djl.timeseries.dataset;
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/distribution/AffineTransformed.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.distribution;
import ai.djl.ndarray.NDArray;
/** Represents the distribution of an affinely transformed random variable. */
public class AffineTransformed extends Distribution {
private Distribution baseDistribution;
private NDArray loc;
private NDArray scale;
/**
* Construct a new {@code AffineTransformed}
*
* <p>This is the distribution of Y = scale * X + loc, where X is a random variable distributed
* according to {@code baseDistribution}.
*
* @param baseDistribution original distribution
* @param loc translation parameter of the affine transformation
* @param scale scaling parameter of the affine transformation
*/
public AffineTransformed(Distribution baseDistribution, NDArray loc, NDArray scale) {
this.baseDistribution = baseDistribution;
this.loc = loc == null ? baseDistribution.mean().zerosLike() : loc;
this.scale = scale == null ? baseDistribution.mean().onesLike() : scale;
}
/** {@inheritDoc} */
@Override
public NDArray logProb(NDArray target) {
NDArray x = fInv(target);
NDArray ladj = logAbsDetJac(x);
NDArray lp = ladj.mul(-1);
return baseDistribution.logProb(x).add(lp);
}
/** {@inheritDoc} */
@Override
public NDArray sample(int numSamples) {
NDArray sample = baseDistribution.sample(numSamples);
return f(sample);
}
/** {@inheritDoc} */
@Override
public NDArray mean() {
return baseDistribution.mean().mul(scale).add(loc);
}
private NDArray f(NDArray x) {
return x.mul(scale).add(loc);
}
private NDArray fInv(NDArray y) {
return y.sub(loc).div(scale);
}
private NDArray logAbsDetJac(NDArray x) {
return scale.broadcast(x.getShape()).abs().log();
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/distribution/Distribution.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.distribution;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
/** An abstract class representing probability distribution. */
public abstract class Distribution {
/**
* Compute the log of the probability density/mass function evaluated at target.
*
* @param target {@link NDArray} of shape (*batch_shape, *event_shape)
* @return Tensor of shape (batch_shape) containing the probability log-density for each event
* in target
*/
public abstract NDArray logProb(NDArray target);
/**
* Draw samples from the distribution.
*
* <p>This function would expand the dimension of arguments, the first dimension of the output
* will be numSamples.
*
* @param numSamples Number of samples to be drawn
* @return a {@link NDArray} has shape (num_samples, *batch_shape, *target_shape)
*/
public abstract NDArray sample(int numSamples);
/**
* Draw samples from the distribution.
*
* <p>This function would not expand the dimension
*
* @return a sampled {@link NDArray}
*/
public NDArray sample() {
return sample(0);
}
/**
* Return the mean of the distribution.
*
* @return the mean of the distribution
*/
public abstract NDArray mean();
/**
* A builder to extend for all classes extend the {@link Distribution}.
*
* @param <T> the concrete builder type
*/
public abstract static class DistributionBuilder<T extends DistributionBuilder<T>> {
protected NDList distrArgs;
protected NDArray scale;
protected NDArray loc;
/**
* Set the appropriate arguments for the probability distribution.
*
* @param distrArgs a {@link NDList} containing distribution args named after the parameter
* name
* @return this builder
*/
public T setDistrArgs(NDList distrArgs) {
this.distrArgs = distrArgs;
return self();
}
/**
* Set the affine scale for the probability distribution.
*
* @param scale the affine scale
* @return this builder
*/
public T optScale(NDArray scale) {
this.scale = scale;
return self();
}
/**
* Set the affine location of the probability.
*
* @param loc the affine location
* @return this builder
*/
public T optLoc(NDArray loc) {
this.loc = loc;
return self();
}
/**
* Build a {@code Distribution}.
*
* @return the {@code Distribution}
*/
public abstract Distribution build();
protected abstract T self();
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/distribution/DistributionLoss.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.distribution;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDList;
import ai.djl.timeseries.distribution.output.DistributionOutput;
import ai.djl.training.loss.Loss;
/**
* {@code DistributionLoss} calculates loss for a given distribution.
*
* <p>Distribution Loss is calculated by {@link Distribution#logProb(NDArray)} at label point
*/
public class DistributionLoss extends Loss {
private DistributionOutput distrOutput;
/**
* Calculates Distribution Loss between the label and distribution.
*
* @param name the name of the loss
* @param distrOutput the {@link DistributionOutput} to construct the target distribution
*/
public DistributionLoss(String name, DistributionOutput distrOutput) {
super(name);
this.distrOutput = distrOutput;
}
/** {@inheritDoc} */
@Override
public NDArray evaluate(NDList labels, NDList predictions) {
Distribution.DistributionBuilder<?> builder = distrOutput.distributionBuilder();
builder.setDistrArgs(predictions);
if (predictions.contains("scale")) {
builder.optScale(predictions.get("scale"));
}
if (predictions.contains("loc")) {
builder.optLoc(predictions.get("loc"));
}
NDArray loss = builder.build().logProb(labels.singletonOrThrow()).mul(-1);
if (predictions.contains("loss_weights")) {
NDArray lossWeights = predictions.get("loss_weights");
NDArray weightedValue =
NDArrays.where(lossWeights.neq(0), loss.mul(lossWeights), loss.zerosLike());
NDArray sumWeights = lossWeights.sum().maximum(1f);
loss = weightedValue.sum().div(sumWeights);
}
return loss;
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/distribution/NegativeBinomial.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.distribution;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDManager;
import ai.djl.util.Preconditions;
/**
* Negative binomial distribution.
*
* <p>The distribution of the number of successes in a sequence of independent Bernoulli trials.
*
* <p>Two arguments for this distribution. {@code total_count} non-negative number of negative
* Bernoulli trials to stop, {@code logits} Event log-odds for probabilities of success
*/
public final class NegativeBinomial extends Distribution {
private NDArray totalCount;
private NDArray logits;
NegativeBinomial(Builder builder) {
totalCount = builder.distrArgs.get("total_count");
logits = builder.distrArgs.get("logits");
}
/** {@inheritDoc} */
@Override
public NDArray logProb(NDArray target) {
NDArray logUnnormalizedProb =
totalCount.mul(logSigmoid(logits.mul(-1))).add(target.mul(logSigmoid(logits)));
NDArray logNormalization =
totalCount
.add(target)
.gammaln()
.mul(-1)
.add(target.add(1).gammaln())
.add(totalCount.gammaln());
return logUnnormalizedProb.sub(logNormalization);
}
/** {@inheritDoc} */
@Override
public NDArray sample(int numSamples) {
NDManager manager = totalCount.getManager();
NDArray expandedTotalCount =
numSamples > 0 ? totalCount.expandDims(0).repeat(0, numSamples) : totalCount;
NDArray expandedLogits =
numSamples > 0 ? logits.expandDims(0).repeat(0, numSamples) : logits;
return manager.samplePoisson(manager.sampleGamma(expandedTotalCount, expandedLogits.exp()));
}
/** {@inheritDoc} */
@Override
public NDArray mean() {
return totalCount.mul(logits.exp());
}
private NDArray logSigmoid(NDArray x) {
return x.mul(-1).exp().add(1).getNDArrayInternal().rdiv(1).log();
}
/**
* Creates a builder to build a {@code NegativeBinomial}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The builder to construct a {@code NegativeBinomial}. */
public static final class Builder extends DistributionBuilder<Builder> {
/** {@inheritDoc} */
@Override
public Distribution build() {
Preconditions.checkArgument(
distrArgs.contains("total_count"),
"NegativeBinomial's args must contain total_count.");
Preconditions.checkArgument(
distrArgs.contains("logits"), "NegativeBinomial's args must contain logits.");
// We cannot scale using the affine transformation since negative binomial should return
// integers. Instead we scale the parameters.
if (scale != null) {
NDArray logits = distrArgs.get("logits");
logits.add(scale.log());
logits.setName("logits");
distrArgs.remove("logits");
distrArgs.add(logits);
}
return new NegativeBinomial(this);
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/distribution/StudentT.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.distribution;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDManager;
import ai.djl.util.Preconditions;
/**
* Student's t-test distribution.
*
* <p>Three arguments for this distribution. {@code mu} mean of the distribution, {@code sigma} the
* standard deviations (scale), {@code nu} degrees of freedom.
*/
public class StudentT extends Distribution {
private NDArray mu;
private NDArray sigma;
private NDArray nu;
StudentT(Builder builder) {
mu = builder.distrArgs.get("mu");
sigma = builder.distrArgs.get("sigma");
nu = builder.distrArgs.get("nu");
}
/** {@inheritDoc} */
@Override
public NDArray logProb(NDArray target) {
NDArray nup1Half = nu.add(1.).div(2.);
NDArray part1 = nu.getNDArrayInternal().rdiv(1.).mul(target.sub(mu).div(sigma).square());
NDArray z =
nup1Half.gammaln()
.sub(nu.div(2.).gammaln())
.sub(nu.mul(Math.PI).log().mul(0.5))
.sub(sigma.log());
return z.sub(nup1Half.mul(part1.add(1.).log()));
}
/** {@inheritDoc} */
@Override
public NDArray sample(int numSamples) {
NDManager manager = mu.getManager();
NDArray expandedMu = numSamples > 0 ? mu.expandDims(0).repeat(0, numSamples) : mu;
NDArray expandedSigma = numSamples > 0 ? sigma.expandDims(0).repeat(0, numSamples) : sigma;
NDArray expandedNu = numSamples > 0 ? nu.expandDims(0).repeat(0, numSamples) : nu;
NDArray gammas =
manager.sampleGamma(
expandedNu.div(2.),
expandedNu.mul(expandedSigma.square()).getNDArrayInternal().rdiv(2.));
return manager.sampleNormal(expandedMu, gammas.sqrt().getNDArrayInternal().rdiv(1.));
}
/** {@inheritDoc} */
@Override
public NDArray mean() {
return NDArrays.where(nu.gt(1.0), mu, mu.getManager().full(mu.getShape(), Float.NaN));
}
/**
* Creates a builder to build a {@code NegativeBinomial}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The builder to construct a {@code NegativeBinomial}. */
public static final class Builder extends DistributionBuilder<Builder> {
/** {@inheritDoc} */
@Override
public Distribution build() {
Preconditions.checkArgument(
distrArgs.contains("mu"), "StudentTl's args must contain mu.");
Preconditions.checkArgument(
distrArgs.contains("sigma"), "StudentTl's args must contain sigma.");
Preconditions.checkArgument(
distrArgs.contains("nu"), "StudentTl's args must contain nu.");
StudentT baseDistr = new StudentT(this);
if (scale == null && loc == null) {
return baseDistr;
}
return new AffineTransformed(baseDistr, loc, scale);
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
}
}
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/distribution/package-info.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes to support distribution in djl. */
package ai.djl.timeseries.distribution;
|
0
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/distribution
|
java-sources/ai/djl/timeseries/timeseries/0.34.0/ai/djl/timeseries/distribution/output/ArgProj.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.timeseries.distribution.output;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.core.Linear;
import ai.djl.training.ParameterStore;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import ai.djl.util.Preconditions;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
/**
* A Block used to map the output of a dense layer to statistical parameters, like mean and standard
* deviation. It will be used in both training and inference.
*/
public final class ArgProj extends AbstractBlock {
private Block domainMap;
private List<Block> proj;
ArgProj(Builder builder) {
proj = new ArrayList<>();
for (Pair<String, Integer> entry : builder.argsDim) {
proj.add(
addChildBlock(
String.format("%s_distr_%s", builder.prefix, entry.getKey()),
Linear.builder().setUnits(entry.getValue()).build()));
}
domainMap =
addChildBlock(String.format("%s_domain_map", builder.prefix), builder.domainMap);
}
/** {@inheritDoc} */
@Override
protected void initializeChildBlocks(
NDManager manager, DataType dataType, Shape... inputShapes) {
for (Block block : proj) {
block.initialize(manager, dataType, inputShapes);
}
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDList paramsUnbounded = new NDList();
for (Block block : proj) {
paramsUnbounded.add(
block.forward(parameterStore, inputs, training, params).singletonOrThrow());
}
return domainMap.forward(parameterStore, paramsUnbounded, training, params);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
Shape[] projOutShapes = new Shape[proj.size()];
for (int i = 0; i < proj.size(); i++) {
projOutShapes[i] = proj.get(i).getOutputShapes(inputShapes)[0];
}
return domainMap.getOutputShapes(projOutShapes);
}
/**
* Creates a builder to build a {@code ArgProj}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@code ArgProj} type of {@link Block}. */
public static final class Builder {
private PairList<String, Integer> argsDim;
private Function<NDList, NDList> domainMap;
private String prefix = "";
/**
* Set the arguments dimensions of distribution.
*
* @param argsDim the arguments dimension
* @return this builder
*/
public Builder setArgsDim(PairList<String, Integer> argsDim) {
this.argsDim = argsDim;
return this;
}
/**
* Set the domain map function.
*
* @param domainMap the domain map function
* @return this builder
*/
public Builder setDomainMap(Function<NDList, NDList> domainMap) {
this.domainMap = domainMap;
return this;
}
/**
* Set the block name prefix.
*
* @param prefix the prefix
* @return this builder
*/
public Builder optPrefix(String prefix) {
this.prefix = prefix;
return this;
}
/**
* Build a {@link ArgProj} block.
*
* @return the {@link ArgProj} block.
*/
public ArgProj build() {
Preconditions.checkArgument(argsDim != null, "must specify dim args");
Preconditions.checkArgument(domainMap != null, "must specify domain PairList function");
return new ArgProj(this);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.