index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/SparseNDArray.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray;
import ai.djl.ndarray.types.SparseFormat;
/**
* An interface representing a Sparse NDArray.
*
* @see SparseFormat
* @see <a href="https://software.intel.com/en-us/node/471374">Sparse Matrix Storage Formats</a>
*/
public interface SparseNDArray extends NDArray {}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains classes and interfaces that define an n-dimensional array.
*
* @see ai.djl.ndarray.NDArray
* @see ai.djl.ndarray.NDManager
*/
package ai.djl.ndarray;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/NDArrayIndexer.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.index.dim.NDIndexBooleans;
import ai.djl.ndarray.index.dim.NDIndexElement;
import ai.djl.ndarray.index.full.NDIndexFullPick;
import ai.djl.ndarray.index.full.NDIndexFullSlice;
import ai.djl.ndarray.index.full.NDIndexFullTake;
import java.util.List;
import java.util.Optional;
/** A helper class for {@link NDArray} implementations for operations with an {@link NDIndex}. */
public abstract class NDArrayIndexer {
/**
* Returns a subarray by picking the elements.
*
* @param array the array to get from
* @param fullPick the elements to pick
* @return the subArray
*/
public abstract NDArray get(NDArray array, NDIndexFullPick fullPick);
/**
* Returns a subarray by taken the elements from one axis.
*
* @param array the array to get from
* @param fullTake the elements to pick
* @return the subArray
*/
public abstract NDArray get(NDArray array, NDIndexFullTake fullTake);
/**
* Returns a subarray at the slice.
*
* @param array the array to get from
* @param fullSlice the fullSlice index of the array
* @return the subArray
*/
public abstract NDArray get(NDArray array, NDIndexFullSlice fullSlice);
/**
* Returns a subarray at the given index.
*
* @param array the array to get from
* @param index the index to get
* @return the subarray
*/
public NDArray get(NDArray array, NDIndex index) {
if (index.getRank() == 0 && array.getShape().isScalar()) {
return array.duplicate();
}
// use booleanMask for NDIndexBooleans case
List<NDIndexElement> indices = index.getIndices();
if (!indices.isEmpty() && indices.get(0) instanceof NDIndexBooleans) {
if (indices.size() != 1) {
throw new IllegalArgumentException(
"get() currently doesn't support more that one boolean NDArray");
}
return array.booleanMask(((NDIndexBooleans) indices.get(0)).getIndex());
}
Optional<NDIndexFullTake> fullTake = NDIndexFullTake.fromIndex(index, array.getShape());
if (fullTake.isPresent()) {
return get(array, fullTake.get());
}
Optional<NDIndexFullPick> fullPick = NDIndexFullPick.fromIndex(index, array.getShape());
if (fullPick.isPresent()) {
return get(array, fullPick.get());
}
Optional<NDIndexFullSlice> fullSlice = NDIndexFullSlice.fromIndex(index, array.getShape());
if (fullSlice.isPresent()) {
return get(array, fullSlice.get());
}
throw new UnsupportedOperationException(
"get() currently supports all, fixed, and slices indices");
}
/**
* Sets the entries of array at the indexed locations with the parameter value. The value can be
* only Number or NDArray.
*
* @param array the array to set
* @param index the index to set at in the array
* @param value the value to set with
*/
public void set(NDArray array, NDIndex index, Object value) {
NDIndexFullSlice fullSlice =
NDIndexFullSlice.fromIndex(index, array.getShape()).orElse(null);
if (fullSlice != null) {
if (value instanceof Number) {
set(array, fullSlice, (Number) value);
} else if (value instanceof NDArray) {
set(array, fullSlice, (NDArray) value);
} else {
throw new IllegalArgumentException(
"The type of value to assign cannot be other than NDArray and Number.");
}
return;
}
List<NDIndexElement> indices = index.getIndices();
if (!indices.isEmpty() && indices.get(0) instanceof NDIndexBooleans) {
if (indices.size() != 1) {
throw new IllegalArgumentException(
"set() currently doesn't support more than one boolean NDArray");
}
if (value instanceof Number) {
set(
array,
(NDIndexBooleans) indices.get(0),
array.getManager().create((Number) value));
} else if (value instanceof NDArray) {
set(array, (NDIndexBooleans) indices.get(0), (NDArray) value);
} else {
throw new IllegalArgumentException(
"The type of value to assign cannot be other than NDArray and Number.");
}
return;
}
throw new UnsupportedOperationException(
"set() currently supports all, fixed, and slices indices");
}
/**
* Sets the values of the array at the fullSlice with an array.
*
* @param array the array to set
* @param fullSlice the fullSlice of the index to set in the array
* @param value the value to set with
*/
public abstract void set(NDArray array, NDIndexFullSlice fullSlice, NDArray value);
/**
* Sets the values of the array at the boolean locations with an array.
*
* @param array the array to set
* @param indices a boolean array where true indicates values to update
* @param value the value to set with when condition is true
*/
public void set(NDArray array, NDIndexBooleans indices, NDArray value) {
array.intern(NDArrays.where(indices.getIndex(), value, array));
}
/**
* Sets the values of the array at the fullSlice with a number.
*
* @param array the array to set
* @param fullSlice the fullSlice of the index to set in the array
* @param value the value to set with
*/
public abstract void set(NDArray array, NDIndexFullSlice fullSlice, Number value);
/**
* Sets a scalar value in the array at the indexed location.
*
* @param array the array to set
* @param index the index to set at in the array
* @param value the value to set with
* @throws IllegalArgumentException if the index does not point to a scalar value in the array
*/
public void setScalar(NDArray array, NDIndex index, Number value) {
NDIndexFullSlice fullSlice =
NDIndexFullSlice.fromIndex(index, array.getShape()).orElse(null);
if (fullSlice != null) {
if (fullSlice.getShape().size() != 1) {
throw new IllegalArgumentException("The provided index does not set a scalar");
}
set(array, index, value);
return;
}
throw new UnsupportedOperationException(
"set() currently supports all, fixed, and slices indices");
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/NDIndex.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.index.dim.NDIndexAll;
import ai.djl.ndarray.index.dim.NDIndexBooleans;
import ai.djl.ndarray.index.dim.NDIndexElement;
import ai.djl.ndarray.index.dim.NDIndexFixed;
import ai.djl.ndarray.index.dim.NDIndexNull;
import ai.djl.ndarray.index.dim.NDIndexPick;
import ai.djl.ndarray.index.dim.NDIndexSlice;
import ai.djl.ndarray.index.dim.NDIndexTake;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Stream;
/**
* The {@code NDIndex} allows you to specify a subset of an NDArray that can be used for fetching or
* updating.
*
* <p>It accepts a different index option for each dimension, given in the order of the dimensions.
* Each dimension has options corresponding to:
*
* <ul>
* <li>Return all dimensions - Pass null to addIndices
* <li>A single value in the dimension - Pass the value to addIndices with a negative index -i
* corresponding to [dimensionLength - i]
* <li>A range of values - Use addSliceDim
* </ul>
*
* <p>We recommend creating the NDIndex using {@link #NDIndex(String, Object...)}.
*
* @see #NDIndex(String, Object...)
*/
public class NDIndex {
/* Android regex requires escape } char as well */
private static final Pattern ITEM_PATTERN =
Pattern.compile(
"(\\*)|((-?\\d+|\\{\\})?:(-?\\d+|\\{\\})?(:(-?\\d+|\\{\\}))?)|(-?\\d+|\\{\\})|null");
private int rank;
private List<NDIndexElement> indices;
private int ellipsisIndex;
/** Creates an empty {@link NDIndex} to append values to. */
public NDIndex() {
rank = 0;
indices = new ArrayList<>();
ellipsisIndex = -1;
}
/**
* Creates a {@link NDIndex} given the index values.
*
* <p>Here are some examples of the indices format.
*
* <pre>
* NDArray a = manager.ones(new Shape(5, 4, 3));
*
* // Gets a subsection of the NDArray in the first axis.
* assertEquals(a.get(new NDIndex("2")).getShape(), new Shape(4, 3));
*
* // Gets a subsection of the NDArray indexing from the end (-i == length - i).
* assertEquals(a.get(new NDIndex("-2")).getShape(), new Shape(4, 3));
*
* // Gets everything in the first axis and a subsection in the second axis.
* // You can use either : or * to represent everything
* assertEquals(a.get(new NDIndex(":, 2")).getShape(), new Shape(5, 3));
* assertEquals(a.get(new NDIndex("*, 2")).getShape(), new Shape(5, 3));
*
* // Gets a range of values along the second axis that is inclusive on the bottom and exclusive on the top.
* assertEquals(a.get(new NDIndex(":, 1:3")).getShape(), new Shape(5, 2, 3));
*
* // Excludes either the min or the max of the range to go all the way to the beginning or end.
* assertEquals(a.get(new NDIndex(":, :3")).getShape(), new Shape(5, 3, 3));
* assertEquals(a.get(new NDIndex(":, 1:")).getShape(), new Shape(5, 4, 3));
*
* // Uses the value after the second colon in a slicing range, the step, to get every other result.
* assertEquals(a.get(new NDIndex(":, 1::2")).getShape(), new Shape(5, 2, 3));
*
* // Uses a negative step to reverse along the dimension.
* assertEquals(a.get(new NDIndex("-1")).getShape(), new Shape(5, 4, 3));
*
* // Uses a variable argument to the index
* // It can replace any number in any of these formats with {} and then the value of {}
* // is specified in an argument following the indices string.
* assertEquals(a.get(new NDIndex("{}, {}:{}", 0, 1, 3)).getShape(), new Shape(2, 3));
*
* // Uses ellipsis to insert many full slices
* assertEquals(a.get(new NDIndex("...")).getShape(), new Shape(5, 4, 3));
*
* // Uses ellipsis to select all the dimensions except for last axis where we only get a subsection.
* assertEquals(a.get(new NDIndex("..., 2")).getShape(), new Shape(5, 4));
*
* // Uses null to add an extra axis to the output array
* assertEquals(a.get(new NDIndex(":2, null, 0, :2")).getShape(), new Shape(2, 1, 2));
*
* // Gets entries of an NDArray with mixed index
* index1 = manager.create(new long[] {0, 1, 1}, new Shape(2));
* bool1 = manager.create(new boolean[] {true, false, true});
* assertEquals(a.get(new NDIndex(":{}, {}, {}, {}" 2, index1, bool1, null).getShape(), new Shape(2, 2, 1));
*
* </pre>
*
* @param indices a comma separated list of indices corresponding to either subsections,
* everything, or slices on a particular dimension
* @param args arguments to replace the variable "{}" in the indices string. Can be an integer,
* long, boolean {@link NDArray}, or integer {@link NDArray}.
* @see <a href="https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html">Numpy
* Indexing</a>
*/
public NDIndex(String indices, Object... args) {
this();
addIndices(indices, args);
}
/**
* Creates an NDIndex with the given indices as specified values on the NDArray.
*
* @param indices the indices with each index corresponding to the dimensions and negative
* indices starting from the end
*/
public NDIndex(long... indices) {
this();
addIndices(indices);
}
/**
* Creates an {@link NDIndex} that just has one slice in the given axis.
*
* @param axis the axis to slice
* @param min the min of the slice
* @param max the max of the slice
* @return a new {@link NDIndex} with the given slice.
*/
public static NDIndex sliceAxis(int axis, long min, long max) {
NDIndex ind = new NDIndex();
for (int i = 0; i < axis; i++) {
ind.addAllDim();
}
ind.addSliceDim(min, max);
return ind;
}
/**
* Returns the number of dimensions specified in the Index.
*
* @return the number of dimensions specified in the Index
*/
public int getRank() {
return rank;
}
/**
* Returns the index of the ellipsis.
*
* @return the index of the ellipsis within this index or -1 for none.
*/
public int getEllipsisIndex() {
return ellipsisIndex;
}
/**
* Returns the index affecting the given dimension.
*
* @param dimension the affected dimension
* @return the index affecting the given dimension
*/
public NDIndexElement get(int dimension) {
return indices.get(dimension);
}
/**
* Returns the indices.
*
* @return the indices
*/
public List<NDIndexElement> getIndices() {
return indices;
}
/**
* Updates the NDIndex by appending indices to the array.
*
* @param indices the indices to add similar to {@link #NDIndex(String, Object...)}
* @param args arguments to replace the variable "{}" in the indices string. Can be an integer,
* long, boolean {@link NDArray}, or integer {@link NDArray}.
* @return the updated {@link NDIndex}
* @see #NDIndex(String, Object...)
*/
public final NDIndex addIndices(String indices, Object... args) {
String[] indexItems = indices.split(",");
rank += indexItems.length;
int argIndex = 0;
for (int i = 0; i < indexItems.length; ++i) {
if ("...".equals(indexItems[i].trim())) {
// make sure ellipsis appear only once
if (ellipsisIndex != -1) {
throw new IllegalArgumentException(
"an index can only have a single ellipsis (\"...\")");
}
ellipsisIndex = i;
} else {
argIndex = addIndexItem(indexItems[i], argIndex, args);
}
}
if (ellipsisIndex != -1) {
rank--;
}
if (argIndex != args.length) {
throw new IllegalArgumentException("Incorrect number of index arguments");
}
return this;
}
/**
* Updates the NDIndex by appending indices as specified values on the NDArray.
*
* @param indices with each index corresponding to the dimensions and negative indices starting
* from the end
* @return the updated {@link NDIndex}
*/
public final NDIndex addIndices(long... indices) {
rank += indices.length;
for (long i : indices) {
this.indices.add(new NDIndexFixed(i));
}
return this;
}
/**
* Updates the NDIndex by appending a boolean NDArray.
*
* <p>The NDArray should have a matching shape to the dimensions being fetched and will return
* where the values in NDIndex do not equal zero.
*
* @param index a boolean NDArray where all nonzero elements correspond to elements to return
* @return the updated {@link NDIndex}
*/
public NDIndex addBooleanIndex(NDArray index) {
rank += index.getShape().dimension();
indices.add(new NDIndexBooleans(index));
return this;
}
/**
* Appends ellipse index in the current dimension.
*
* @return the updated {@link NDIndex}
*/
public NDIndex addEllipseDim() {
ellipsisIndex = indices.size();
return this;
}
/**
* Appends a new index to get all values in the dimension.
*
* @return the updated {@link NDIndex}
*/
public NDIndex addAllDim() {
rank++;
indices.add(new NDIndexAll());
return this;
}
/**
* Appends multiple new index to get all values in the dimension.
*
* @param count how many axes of {@link NDIndexAll} to add.
* @return the updated {@link NDIndex}
* @throws IllegalArgumentException if count is negative
*/
public NDIndex addAllDim(int count) {
if (count < 0) {
throw new IllegalArgumentException(
"The number of index dimensions to add can't be negative");
}
rank += count;
for (int i = 0; i < count; i++) {
indices.add(new NDIndexAll());
}
return this;
}
/**
* Appends a new index to slice the dimension and returns a range of values.
*
* @param min the minimum of the range
* @param max the maximum of the range
* @return the updated {@link NDIndex}
*/
public NDIndex addSliceDim(long min, long max) {
rank++;
indices.add(new NDIndexSlice(min, max, null));
return this;
}
/**
* Appends a new index to slice the dimension and returns a range of values.
*
* @param min the minimum of the range
* @param max the maximum of the range
* @param step the step of the slice
* @return the updated {@link NDIndex}
*/
public NDIndex addSliceDim(long min, long max, long step) {
rank++;
indices.add(new NDIndexSlice(min, max, step));
return this;
}
/**
* Appends a picking index that gets values by index in the axis.
*
* @param index the indices should be NDArray. For each element in the indices array, it acts
* like a fixed index returning an element of that shape. So, the final shape would be
* indices.getShape().addAll(target.getShape().slice(1)) (assuming it is the first index
* element).
* @return the updated {@link NDIndex}
*/
public NDIndex addPickDim(NDArray index) {
rank++;
indices.add(new NDIndexPick(index));
return this;
}
/**
* Returns a stream of the NDIndexElements.
*
* @return a stream of the NDIndexElements
*/
public Stream<NDIndexElement> stream() {
return indices.stream();
}
private int addIndexItem(String indexItem, int argIndex, Object[] args) {
indexItem = indexItem.trim();
Matcher m = ITEM_PATTERN.matcher(indexItem);
if (!m.matches()) {
throw new IllegalArgumentException("Invalid argument index: " + indexItem);
}
// "null" case
if ("null".equals(indexItem)) {
indices.add(new NDIndexNull());
return argIndex;
}
// "*" case
String star = m.group(1);
if (star != null) {
indices.add(new NDIndexAll());
return argIndex;
}
// "number" number only case
String digit = m.group(7);
if (digit != null) {
if ("{}".equals(digit)) {
Object arg = args[argIndex];
if (arg instanceof Integer) {
indices.add(new NDIndexFixed((Integer) arg));
return argIndex + 1;
} else if (arg instanceof Long) {
indices.add(new NDIndexFixed((Long) arg));
return argIndex + 1;
} else if (arg instanceof NDArray) {
NDArray array = (NDArray) arg;
if (array.getDataType().isBoolean()) {
indices.add(new NDIndexBooleans(array));
return argIndex + 1;
} else if (array.getDataType().isInteger()
|| array.getDataType().isFloating()) {
indices.add(new NDIndexTake(array));
return argIndex + 1;
}
} else if (arg == null) {
indices.add(new NDIndexNull());
return argIndex + 1;
}
throw new IllegalArgumentException("Unknown argument: " + arg);
} else {
indices.add(new NDIndexFixed(Long.parseLong(digit)));
return argIndex;
}
}
// Slice
Long min = null;
Long max = null;
Long step = null;
if (m.group(3) != null) {
min = parseSliceItem(m.group(3), argIndex, args);
if ("{}".equals(m.group(3))) {
argIndex++;
}
}
if (m.group(4) != null) {
max = parseSliceItem(m.group(4), argIndex, args);
if ("{}".equals(m.group(4))) {
argIndex++;
}
}
if (m.group(6) != null) {
step = parseSliceItem(m.group(6), argIndex, args);
if ("{}".equals(m.group(6))) {
argIndex++;
}
}
if (min == null && max == null && step == null) {
indices.add(new NDIndexAll());
} else {
indices.add(new NDIndexSlice(min, max, step));
}
return argIndex;
}
private Long parseSliceItem(String sliceItem, int argIndex, Object... args) {
if ("{}".equals(sliceItem)) {
Object arg = args[argIndex];
if (arg instanceof Integer) {
return ((Integer) arg).longValue();
} else if (arg instanceof Long) {
return (Long) arg;
}
throw new IllegalArgumentException("Unknown slice argument: " + arg);
} else {
return Long.parseLong(sliceItem);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains classes that help access {@link ai.djl.ndarray.NDArray}'s indices.
*
* @see ai.djl.ndarray.index.NDIndex
*/
package ai.djl.ndarray.index;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/NDIndexAll.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.dim;
/** An {@code NDIndexElement} to return all values in a particular dimension. */
public class NDIndexAll implements NDIndexElement {
/** {@inheritDoc} */
@Override
public int getRank() {
return 1;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/NDIndexBooleans.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.dim;
import ai.djl.ndarray.NDArray;
/** An {@code NDIndexElement} to return values based on a mask binary NDArray. */
public class NDIndexBooleans implements NDIndexElement {
private NDArray index;
/**
* Constructs a {@code NDIndexBooleans} instance with specified mask binary NDArray.
*
* @param index the mask binary {@code NDArray}
*/
public NDIndexBooleans(NDArray index) {
this.index = index;
}
/**
* Returns the mask binary {@code NDArray}.
*
* @return the mask binary {@code NDArray}
*/
public NDArray getIndex() {
return index;
}
/** {@inheritDoc} */
@Override
public int getRank() {
return index.getShape().dimension();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/NDIndexElement.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.dim;
/** An index for particular dimensions created by NDIndex. */
public interface NDIndexElement {
/**
* Returns the number of dimensions occupied by this index element.
*
* @return the number of dimensions occupied by this index element
*/
int getRank();
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/NDIndexFixed.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.dim;
/** An NDIndexElement that returns only a specific value in the corresponding dimension. */
public class NDIndexFixed implements NDIndexElement {
private long index;
/**
* Constructs a {@code NDIndexFixed} instance with specified dimension.
*
* @param index the dimension of the NDArray
*/
public NDIndexFixed(long index) {
this.index = index;
}
/**
* Returns the dimension of the index.
*
* @return the dimension of the index
*/
public long getIndex() {
return index;
}
/** {@inheritDoc} */
@Override
public int getRank() {
return 1;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/NDIndexNull.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.dim;
/** An {@code NDIndexElement} to return all values in a particular dimension. */
public class NDIndexNull implements NDIndexElement {
/** {@inheritDoc} */
@Override
public int getRank() {
return 1;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/NDIndexPick.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.dim;
import ai.djl.ndarray.NDArray;
/** An {@link NDIndexElement} that gets elements by index in the specified axis. */
public class NDIndexPick implements NDIndexElement {
private NDArray index;
/**
* Constructs a pick.
*
* @param index the index to pick
*/
public NDIndexPick(NDArray index) {
this.index = index;
}
/** {@inheritDoc} */
@Override
public int getRank() {
return 1;
}
/**
* Returns the index to pick.
*
* @return the index to pick
*/
public NDArray getIndex() {
return index;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/NDIndexSlice.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.dim;
/** An NDIndexElement that returns a range of values in the specified dimension. */
public class NDIndexSlice implements NDIndexElement {
private Long min;
private Long max;
private Long step;
/**
* Constructs a {@code NDIndexSlice} instance with specified range and step.
*
* @param min the start of the range
* @param max the end of the range
* @param step the step between each slice
* @throws IllegalArgumentException Thrown if the step is zero
*/
public NDIndexSlice(Long min, Long max, Long step) {
this.min = min;
this.max = max;
this.step = step;
if (step != null && step == 0) {
throw new IllegalArgumentException("The step can not be zero");
}
}
/**
* Returns the start of the range.
*
* @return the start of the range
*/
public Long getMin() {
return min;
}
/**
* Returns the end of the range.
*
* @return the end of the range
*/
public Long getMax() {
return max;
}
/**
* Returns the step between each slice.
*
* @return the step between each slice
*/
public Long getStep() {
return step;
}
/** {@inheritDoc} */
@Override
public int getRank() {
return 1;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/NDIndexTake.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.dim;
import ai.djl.ndarray.NDArray;
/** An {@link NDIndexElement} that gets elements by index in the specified axis. */
public class NDIndexTake implements NDIndexElement {
private NDArray index;
/**
* Constructs a pick.
*
* @param index the index to pick
*/
public NDIndexTake(NDArray index) {
this.index = index;
}
/** {@inheritDoc} */
@Override
public int getRank() {
return 1;
}
/**
* Returns the index to pick.
*
* @return the index to pick
*/
public NDArray getIndex() {
return index;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/dim/package-info.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains classes that represent an index element in a {@link ai.djl.ndarray.NDArray}'s indices.
*
* <p>Contains the main interface {@link ai.djl.ndarray.index.dim.NDIndexElement} and various
* implementations.
*
* @see ai.djl.ndarray.index.dim.NDIndexElement
*/
package ai.djl.ndarray.index.dim;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/full/NDIndexFullPick.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.full;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.index.NDIndex;
import ai.djl.ndarray.index.dim.NDIndexAll;
import ai.djl.ndarray.index.dim.NDIndexElement;
import ai.djl.ndarray.index.dim.NDIndexPick;
import ai.djl.ndarray.types.Shape;
import java.util.Optional;
/** A simplified representation of a pick-based {@link NDIndex}. */
public final class NDIndexFullPick {
private NDArray indices;
private int axis;
/**
* Constructs a new {@link NDIndexFullPick}.
*
* @param indices the indices to pick
* @param axis the axis to pick at
*/
private NDIndexFullPick(NDArray indices, int axis) {
this.indices = indices;
this.axis = axis;
}
/**
* Returns (if possible) the {@link NDIndexFullPick} representation of an {@link NDIndex}.
*
* @param index the index to represent
* @param target the shape of the array to index
* @return the full pick representation or nothing if it can't represent the index
*/
public static Optional<NDIndexFullPick> fromIndex(NDIndex index, Shape target) {
int axis = 0;
NDIndexFullPick fullPick = null;
for (NDIndexElement el : index.getIndices()) {
if (el instanceof NDIndexAll) {
axis++;
} else if (el instanceof NDIndexPick) {
if (fullPick != null) {
// Don't support multiple picks
throw new UnsupportedOperationException(
"Only one pick per get is currently supported. Check if the array index"
+ " is supposed to be boolean index. If so, remember to change the"
+ " datatype of index to boolean. Or you can explicitly do new"
+ " NDIndex().addBooleanIndex(array)");
}
NDArray indexElem = ((NDIndexPick) el).getIndex();
fullPick = new NDIndexFullPick(indexElem, axis);
} else {
// Invalid dim for fullPick
return Optional.empty();
}
}
return Optional.ofNullable(fullPick);
}
/**
* Returns the indices to pick.
*
* @return the indices to pick
*/
public NDArray getIndices() {
return indices;
}
/**
* Returns the axis to pick.
*
* @return the axis to pick
*/
public int getAxis() {
return axis;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/full/NDIndexFullSlice.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.full;
import ai.djl.ndarray.index.NDIndex;
import ai.djl.ndarray.index.dim.NDIndexAll;
import ai.djl.ndarray.index.dim.NDIndexElement;
import ai.djl.ndarray.index.dim.NDIndexFixed;
import ai.djl.ndarray.index.dim.NDIndexSlice;
import ai.djl.ndarray.types.Shape;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/** An index as a slice on all dimensions where some dimensions can be squeezed. */
public final class NDIndexFullSlice {
private long[] min;
private long[] max;
private long[] step;
private int[] toSqueeze;
private Shape shape;
private Shape squeezedShape;
/**
* Constructs a {@link NDIndexFullSlice}.
*
* @param min the min for each axis
* @param max the max for each axis
* @param step the step for each axis
* @param toSqueeze the axes to squeeze after slicing
* @param shape the result shape (without squeezing)
* @param squeezedShape the result shape (with squeezing)
*/
private NDIndexFullSlice(
long[] min,
long[] max,
long[] step,
int[] toSqueeze,
Shape shape,
Shape squeezedShape) {
this.min = min;
this.max = max;
this.step = step;
this.toSqueeze = toSqueeze;
this.shape = shape;
this.squeezedShape = squeezedShape;
}
/**
* Returns (if possible) the {@link NDIndexFullSlice} representation of an {@link NDIndex}.
*
* @param index the index to represent
* @param target the shape of the array to index
* @return the full slice representation or nothing if it can't represent the index
*/
public static Optional<NDIndexFullSlice> fromIndex(NDIndex index, Shape target) {
if (!index.stream()
.allMatch(
ie ->
ie instanceof NDIndexAll
|| ie instanceof NDIndexFixed
|| ie instanceof NDIndexSlice)) {
return Optional.empty();
}
int ellipsisIndex = index.getEllipsisIndex();
int indDimensions = index.getRank();
int targetDimensions = target.dimension();
if (indDimensions > target.dimension()) {
throw new IllegalArgumentException(
"The index has too many dimensions - "
+ indDimensions
+ " dimensions for array with "
+ targetDimensions
+ " dimensions");
}
long[] min = new long[targetDimensions];
long[] max = new long[targetDimensions];
long[] step = new long[targetDimensions];
List<Integer> toSqueeze = new ArrayList<>(targetDimensions);
long[] shape = new long[targetDimensions];
List<Long> squeezedShape = new ArrayList<>(targetDimensions);
if (ellipsisIndex == -1 || ellipsisIndex == indDimensions) {
// ellipsis in the end and non ellipsis case
for (int i = 0; i < indDimensions; i++) {
NDIndexElement ie = index.get(i);
addSliceInfo(ie, i, target, min, max, step, toSqueeze, shape, squeezedShape);
}
for (int i = indDimensions; i < target.dimension(); i++) {
padIndexAll(i, target, min, max, step, shape, squeezedShape);
}
} else if (ellipsisIndex == 0) {
// ellipsis in the beginning
int paddingDim = targetDimensions - indDimensions;
int i;
for (i = 0; i < paddingDim; ++i) {
padIndexAll(i, target, min, max, step, shape, squeezedShape);
}
for (; i < targetDimensions; ++i) {
NDIndexElement ie = index.get(i - paddingDim);
addSliceInfo(ie, i, target, min, max, step, toSqueeze, shape, squeezedShape);
}
} else {
// ellipsis in the middle
int paddingDim = targetDimensions - indDimensions;
int i;
for (i = 0; i < ellipsisIndex; ++i) {
NDIndexElement ie = index.get(i);
addSliceInfo(ie, i, target, min, max, step, toSqueeze, shape, squeezedShape);
}
for (; i < paddingDim + ellipsisIndex; ++i) {
padIndexAll(i, target, min, max, step, shape, squeezedShape);
}
for (; i < targetDimensions; ++i) {
NDIndexElement ie = index.get(i - paddingDim);
addSliceInfo(ie, i, target, min, max, step, toSqueeze, shape, squeezedShape);
}
}
int[] squeeze = toSqueeze.stream().mapToInt(i -> i).toArray();
NDIndexFullSlice fullSlice =
new NDIndexFullSlice(
min, max, step, squeeze, new Shape(shape), new Shape(squeezedShape));
return Optional.of(fullSlice);
}
private static void addSliceInfo(
NDIndexElement ie,
int i,
Shape target,
long[] min,
long[] max,
long[] step,
List<Integer> toSqueeze,
long[] shape,
List<Long> squeezedShape) {
if (ie instanceof NDIndexFixed) {
NDIndexFixed fixed = ((NDIndexFixed) ie);
long rawIndex = fixed.getIndex();
min[i] = rawIndex < 0 ? Math.floorMod(rawIndex, target.get(i)) : rawIndex;
max[i] = min[i] + 1;
step[i] = 1;
toSqueeze.add(i);
shape[i] = 1;
} else if (ie instanceof NDIndexSlice) {
NDIndexSlice slice = (NDIndexSlice) ie;
long rawMin = Optional.ofNullable(slice.getMin()).orElse(0L);
min[i] = rawMin < 0 ? Math.floorMod(rawMin, target.get(i)) : rawMin;
long rawMax = Optional.ofNullable(slice.getMax()).orElse(target.size(i));
max[i] = rawMax < 0 ? Math.floorMod(rawMax, target.get(i)) : rawMax;
step[i] = Optional.ofNullable(slice.getStep()).orElse(1L);
shape[i] = (long) Math.ceil(((double) (max[i] - min[i])) / step[i]);
squeezedShape.add(shape[i]);
} else if (ie instanceof NDIndexAll) {
padIndexAll(i, target, min, max, step, shape, squeezedShape);
}
}
private static void padIndexAll(
int i,
Shape target,
long[] min,
long[] max,
long[] step,
long[] shape,
List<Long> squeezedShape) {
min[i] = 0;
max[i] = target.size(i);
step[i] = 1;
shape[i] = target.size(i);
squeezedShape.add(target.size(i));
}
/**
* Returns the slice min for each axis.
*
* @return the slice min for each axis
*/
public long[] getMin() {
return min;
}
/**
* Returns the slice max for each axis.
*
* @return the slice max for each axis
*/
public long[] getMax() {
return max;
}
/**
* Returns the slice step for each axis.
*
* @return the slice step for each axis
*/
public long[] getStep() {
return step;
}
/**
* Returns the squeeze array of axis.
*
* @return the squeeze array of axis
*/
public int[] getToSqueeze() {
return toSqueeze;
}
/**
* Returns the slice shape without squeezing.
*
* @return the slice shape without squeezing
*/
public Shape getShape() {
return shape;
}
/**
* Returns the slice shape with squeezing.
*
* @return the slice shape with squeezing
*/
public Shape getSqueezedShape() {
return squeezedShape;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/full/NDIndexFullTake.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.index.full;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.index.NDIndex;
import ai.djl.ndarray.index.dim.NDIndexAll;
import ai.djl.ndarray.index.dim.NDIndexElement;
import ai.djl.ndarray.index.dim.NDIndexTake;
import ai.djl.ndarray.types.Shape;
import java.util.Optional;
/** A simplified representation of a take-based {@link NDIndex}. */
public final class NDIndexFullTake {
private NDArray indices;
private int axis;
/**
* Constructs a new {@link NDIndexFullTake}.
*
* @param indices the indices to take
* @param axis the axis to take at
*/
private NDIndexFullTake(NDArray indices, int axis) {
this.indices = indices;
this.axis = axis;
}
/**
* Returns (if possible) the {@link NDIndexFullTake} representation of an {@link NDIndex}.
*
* @param index the index to represent
* @param target the shape of the array to index
* @return the full take representation or nothing if it can't represent the index
*/
public static Optional<NDIndexFullTake> fromIndex(NDIndex index, Shape target) {
int axis = 0;
NDIndexFullTake fullTake = null;
for (NDIndexElement el : index.getIndices()) {
if (el instanceof NDIndexAll) {
axis++;
} else if (el instanceof NDIndexTake) {
if (fullTake != null) {
// Don't support multiple takes
throw new UnsupportedOperationException(
"Only one take per get is currently supported.If this is triggered by"
+ " array NDIndex: get(NDIndex array), then you should be aware of"
+ " the following changes. 1. previously this was equivalent to"
+ " .get(new NDIndex().addPickDim(array)), but now equivalent to"
+ " .take(array). So please check if you want to restore the"
+ " previous behaviour ie .get(new NDIndex().addPickDim(array)). If"
+ " so do it explicitly. 2. Check if the array index is supposed to"
+ " be boolean index. If so, remember to change the datatype of"
+ " index to boolean. Or you can explicitly do new"
+ " NDIndex().addBooleanIndex(array)");
}
NDArray indexElem = ((NDIndexTake) el).getIndex();
if (!indexElem.getShape().isRankOne()) {
throw new UnsupportedOperationException(
"Only rank-1 indexing array is supported for take. If this is triggered"
+ " by array NDIndex: get(NDIndex array), then you should be aware"
+ " of the following changes. 1. previously this was equivalent to"
+ " .get(new NDIndex().addPickDim(array)), but now equivalent to"
+ " .take(array). So please check if you want to restore the"
+ " previous behaviour ie .get(new NDIndex().addPickDim(array)). If"
+ " so do it explicitly. 2. Check if the array index is supposed to"
+ " be boolean index. If so, remember to change the datatype of"
+ " index to boolean. Or you can explicitly do new"
+ " NDIndex().addBooleanIndex(array)");
}
fullTake = new NDIndexFullTake(indexElem, axis);
} else {
// Invalid dim for fullTake
return Optional.empty();
}
}
return Optional.ofNullable(fullTake);
}
/**
* Returns the indices to take.
*
* @return the indices to take
*/
public NDArray getIndices() {
return indices;
}
/**
* Returns the axis to take.
*
* @return the axis to take
*/
public int getAxis() {
return axis;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/index/full/package-info.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains classes that represent simplified representations of an {@link ai.djl.ndarray.NDArray}'s
* indices.
*
* <p>The typical use case is to try to convert to a simplified representation and then the Engines
* will design their array getting and setting based off these representations.
*
* @see ai.djl.ndarray.index.NDIndex
*/
package ai.djl.ndarray.index.full;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/internal/NDArrayEx.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.internal;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.index.NDArrayIndexer;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.ndarray.types.SparseFormat;
import ai.djl.nn.Activation;
import ai.djl.nn.recurrent.RNN;
import java.util.List;
/** An internal interface that encapsulates engine specific operations. */
@SuppressWarnings("MissingJavadocMethod")
public interface NDArrayEx {
/*
// NDArrays
*/
/**
* Applies reverse division with a scalar - i.e., (n / thisArrayValues).
*
* @param n the Value to use for reverse division
* @return a copy of the array after applying reverse division
*/
default NDArray rdiv(Number n) {
NDArray array = getArray();
NDArray b = array.getManager().create(n).toType(array.getDataType(), false);
return rdiv(b);
}
/**
* Applies reverse division with a scalar - i.e., (n / thisArrayValues).
*
* @param b the ndarray to use for reverse division
* @return a copy of the array after applying reverse division
*/
default NDArray rdiv(NDArray b) {
return b.div(getArray());
}
/**
* Applies in place reverse division - i.e., (n / thisArrayValues).
*
* @param n the value to use for reverse division
* @return this array after applying reverse division
*/
default NDArray rdivi(Number n) {
NDArray array = getArray();
NDArray b = array.getManager().create(n).toType(array.getDataType(), false);
return rdivi(b);
}
/**
* Applies in place reverse division - i.e., (n / thisArrayValues).
*
* @param b the ndarray to use for reverse division
* @return this array after applying reverse division
*/
NDArray rdivi(NDArray b);
/**
* Applies reverse subtraction with duplicates - i.e., (n - thisArrayValues).
*
* @param n the value to use for reverse subtraction
* @return a copy of array after reverse subtraction
*/
default NDArray rsub(Number n) {
return getArray().sub(n).neg();
}
/**
* Applies reverse subtraction with duplicates - i.e., (n - thisArrayValues).
*
* @param b the ndarray to use for reverse subtraction
* @return a copy of the array after reverse subtraction
*/
default NDArray rsub(NDArray b) {
return b.sub(getArray());
}
/**
* Applies reverse subtraction in place - i.e., (n - thisArrayValues).
*
* @param n the value to use for reverse subtraction
* @return this array after reverse subtraction
*/
default NDArray rsubi(Number n) {
return getArray().subi(n).negi();
}
/**
* Applies reverse subtraction in place - i.e., (n - thisArrayValues).
*
* @param b the ndarray to use for reverse subtraction
* @return this array after reverse subtraction
*/
default NDArray rsubi(NDArray b) {
return getArray().subi(b).negi();
}
/**
* Applies reverse remainder of division with a scalar.
*
* @param n the value to use for reverse division
* @return a copy of array after applying reverse division
*/
default NDArray rmod(Number n) {
NDArray array = getArray();
NDArray b = array.getManager().create(n).toType(array.getDataType(), false);
return rmod(b);
}
/**
* Applies reverse remainder of division.
*
* @param b the ndarray to use for reverse division
* @return a copy of array after applying reverse division
*/
default NDArray rmod(NDArray b) {
return b.mod(getArray());
}
/**
* Applies in place reverse remainder of division with a scalar.
*
* @param n the value to use for reverse division
* @return this array after applying reverse division
*/
default NDArray rmodi(Number n) {
NDArray array = getArray();
NDArray b = array.getManager().create(n).toType(array.getDataType(), false);
return rmodi(b);
}
/**
* Applies in place reverse remainder of division.
*
* @param b the ndarray to use for reverse division
* @return this array after applying reverse division
*/
NDArray rmodi(NDArray b);
/**
* Reverses the power of each element being raised in the {@code NDArray}.
*
* @param n the value to use for reverse power
* @return a copy of array after applying reverse power
*/
default NDArray rpow(Number n) {
NDArray array = getArray();
NDArray b = array.getManager().create(n).toType(array.getDataType(), false);
return b.pow(array);
}
/**
* Reverses the power of each element being raised in the {@code NDArray} in place.
*
* @param n the value to use for reverse power
* @return a copy of array after applying reverse power
*/
NDArray rpowi(Number n);
/*
// Activations
*/
/**
* Computes rectified linear activation.
*
* @return a copy of array after applying relu
*/
NDArray relu();
NDArray sigmoid();
NDArray tanh();
NDArray softPlus();
NDArray softSign();
NDArray leakyRelu(float alpha);
NDArray elu(float alpha);
NDArray selu();
NDArray gelu();
default NDArray swish(float beta) {
return Activation.sigmoid(getArray().mul(beta)).mul(getArray());
}
default NDArray mish() {
return getArray().exp().add(1).log2().tanh().mul(getArray());
}
/*
// Pooling Operations
*/
NDArray maxPool(Shape kernelShape, Shape stride, Shape padding, boolean ceilMode);
NDArray globalMaxPool();
NDArray avgPool(
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode,
boolean countIncludePad);
NDArray globalAvgPool();
NDArray lpPool(
float normType, Shape kernelShape, Shape stride, Shape padding, boolean ceilMode);
NDArray globalLpPool(float normType);
/*
// Optimizer
*/
void adadeltaUpdate(
NDList inputs,
NDList weights,
float weightDecay,
float rescaleGrad,
float clipGrad,
float rho,
float epsilon);
void adagradUpdate(
NDList inputs,
NDList weights,
float learningRate,
float weightDecay,
float rescaleGrad,
float clipGrad,
float epsilon);
void adamUpdate(
NDList inputs,
NDList weights,
float learningRate,
float learningRateBiasCorrection,
float weightDecay,
float rescaleGrad,
float clipGrad,
float beta1,
float beta2,
float epsilon,
boolean lazyUpdate,
boolean adamw);
void nagUpdate(
NDList inputs,
NDList weights,
float learningRate,
float weightDecay,
float rescaleGrad,
float clipGrad,
float momentum);
void rmspropUpdate(
NDList inputs,
NDList weights,
float learningRate,
float weightDecay,
float rescaleGrad,
float clipGrad,
float rho,
float momentum,
float epsilon,
boolean centered);
void sgdUpdate(
NDList inputs,
NDList weights,
float learningRate,
float weightDecay,
float rescaleGrad,
float clipGrad,
float momentum,
boolean lazyUpdate);
/*
// Neural network
*/
NDList convolution(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation,
int groups);
NDList deconvolution(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding,
Shape dilation,
int groups);
NDList linear(NDArray input, NDArray weight, NDArray bias);
NDList embedding(NDArray input, NDArray weight, SparseFormat sparse);
NDList prelu(NDArray input, NDArray alpha);
NDList dropout(NDArray input, float rate, boolean training);
NDList layerNorm(NDArray input, Shape normalizedShape, NDArray gamma, NDArray beta, float eps);
NDList batchNorm(
NDArray input,
NDArray runningMean,
NDArray runningVar,
NDArray gamma,
NDArray beta,
int axis,
float momentum,
float eps,
boolean training);
/**
* Applies RNN operation to input data.
*
* @param input the inputs to the recurrent operation.
* @param state the hidden state to the recurrent operation.
* @param params all params (weights and biases) for the recurrent operation
* @param hasBiases If false, then the recurrent operation does not use bias weights b_ih and
* b_hh
* @param numLayers the number of recurrent layers.
* @param activation the activation function to use
* @param dropRate If non-zero, introduces a Dropout layer on the outputs of each RNN layer
* except the last layer, with dropout probability equal to dropout
* @param training apply dropout if is true
* @param bidirectional If true, becomes a bidirectional RNN
* @param batchFirst If true, then the input and output NDArray are provided as (batch, seq,
* feature)
* @return the output of the operation
*/
NDList rnn(
NDArray input,
NDArray state,
NDList params,
boolean hasBiases,
int numLayers,
RNN.Activation activation,
double dropRate,
boolean training,
boolean bidirectional,
boolean batchFirst);
/**
* Applies GRU operation to input data.
*
* @param input the inputs to the GRU operation.
* @param state the hidden state to the GRU operation.
* @param params all params (weights and biases) for the GRU operation
* @param hasBiases If false, then the recurrent operation does not use bias weights b_ih and
* b_hh
* @param numLayers the number of recurrent layers.
* @param dropRate If non-zero, introduces a Dropout layer on the outputs of each GRU layer
* except the last layer, with dropout probability equal to dropout
* @param training apply dropout if is true
* @param bidirectional If true, becomes a bidirectional GRU
* @param batchFirst If true, then the input and output NDArray are provided as (batch, seq,
* feature)
* @return the output of the operation
*/
NDList gru(
NDArray input,
NDArray state,
NDList params,
boolean hasBiases,
int numLayers,
double dropRate,
boolean training,
boolean bidirectional,
boolean batchFirst);
/**
* Applies LSTM operation to input data.
*
* @param input the inputs to the LSTM operation.
* @param states the hidden state and cell state to the LSTM operation.
* @param params all params (weights and biases) for the LSTM operation
* @param hasBiases If false, then the recurrent operation does not use bias weights b_ih and
* b_hh
* @param numLayers the number of recurrent layers.
* @param dropRate If non-zero, introduces a Dropout layer on the outputs of each LSTM layer
* except the last layer, with dropout probability equal to dropout
* @param training apply dropout if is true
* @param bidirectional If true, becomes a bidirectional LSTM
* @param batchFirst If true, then the input and output NDArray are provided as (batch, seq,
* feature)
* @return the output of the operation
*/
NDList lstm(
NDArray input,
NDList states,
NDList params,
boolean hasBiases,
int numLayers,
double dropRate,
boolean training,
boolean bidirectional,
boolean batchFirst);
/*
// Image and CV
*/
/**
* Normalizes a NDArray of shape CHW or NCHW with mean and standard deviation.
*
* <p>Given mean `(m1, ..., mn)` and std `(s\ :sub:`1`\ , ..., s\ :sub:`n`)` for `n` channels,
* this transform normalizes each channel of the input tensor with: output[i] = (input[i] - m\
* :sub:`i`\ ) / s\ :sub:`i`
*
* @param mean the mean value for each channel
* @param std the standard deviation for each channel
* @return the result of normalization
*/
default NDArray normalize(float[] mean, float[] std) {
NDManager manager = getArray().getManager();
int dim = getArray().getShape().dimension();
Shape shape = (dim == 3) ? new Shape(3, 1, 1) : new Shape(1, 3, 1, 1);
try (NDArray meanArr = manager.create(mean, shape);
NDArray stdArr = manager.create(std, shape)) {
return getArray().sub(meanArr).divi(stdArr);
}
}
default NDArray toTensor() {
NDManager manager = getArray().getManager();
try (NDManager subManager = manager.newSubManager()) {
NDArray array = getArray();
array.attach(subManager);
NDArray result = array;
int dim = result.getShape().dimension();
if (dim == 3) {
result = result.expandDims(0);
}
// For Apple Silicon MPS it is important not to switch to 64-bit float here
if (result.getDataType() == DataType.FLOAT32) {
result = result.div(255.0f).transpose(0, 3, 1, 2);
} else {
result = result.div(255.0).transpose(0, 3, 1, 2);
}
if (dim == 3) {
result = result.squeeze(0);
}
// The network by default takes float32
if (!result.getDataType().equals(DataType.FLOAT32)) {
result = result.toType(DataType.FLOAT32, false);
}
array.attach(manager);
result.attach(manager);
return result;
}
}
NDArray interpolation(long[] size, int mode, boolean alignCorners);
NDArray resize(int width, int height, int interpolation);
default NDArray crop(int x, int y, int width, int height) {
NDArray array = getArray();
StringBuilder sb = new StringBuilder(30);
if (array.getShape().dimension() == 4) {
sb.append(":,");
}
sb.append(y)
.append(':')
.append(y + height)
.append(',')
.append(x)
.append(':')
.append(x + width)
.append(",:");
return array.get(sb.toString());
}
// TODO: default can be implemented by using np.flip
NDArray randomFlipLeftRight();
// TODO: default can be implemented by using np.flip
NDArray randomFlipTopBottom();
// TODO: add TorchVision support
NDArray randomBrightness(float brightness);
// TODO: add TorchVision support
NDArray randomHue(float hue);
// TODO: add TorchVision support
NDArray randomColorJitter(float brightness, float contrast, float saturation, float hue);
/*
// Miscellaneous
*/
/**
* Returns an {@link NDArrayIndexer}.
*
* @param manager the manager used to create the arrays
* @return an {@link NDArrayIndexer}
*/
NDArrayIndexer getIndexer(NDManager manager);
/**
* Returns elements chosen from the {@code NDArray} or the other {@code NDArray} depending on
* condition.
*
* <p>Given three {@code NDArray}s, condition, this, and other, returns an {@code NDArray} with
* the elements from this or other, depending on whether the elements from condition {@code
* NDArray} are {@code true} or {@code false}. If condition has the same shape as this, each
* element in the output {@code NDArray} is from this if the corresponding element in the
* condition is {@code true}, and from other if {@code false}.
*
* <p>Note that all non-zero values are interpreted as {@code true} in condition {@link
* NDArray}.
*
* @param condition the condition {@code NDArray}
* @param other the other {@code NDArray}
* @return the result {@code NDArray}
*/
NDArray where(NDArray condition, NDArray other);
/**
* Joins a sequence of {@code NDArray}s in {@link NDList} along a new axis.
*
* <p>The axis parameter specifies the index of the new axis in the dimensions of the result.
* For example, if axis=0 it will be the first dimension and if axis=-1 it will be the last
* dimension.
*
* @param arrays the input {@link NDList}. Each {@code NDArray} in the {@link NDList} must have
* the same shape as the {@code NDArray}
* @param axis the axis in the result {@code NDArray} along which the input {@link NDList} are
* stacked
* @return the result {@code NDArray}. The stacked {@code NDArray} has one more dimension than
* the the {@code NDArray}
*/
NDArray stack(NDList arrays, int axis);
/**
* Joins a sequence of {@code NDArray}s in {@link NDList} along first axis.
*
* @param arrays the input {@link NDList}. Each {@code NDArray} in the {@link NDList} must have
* the same shape as the {@code NDArray}
* @return the result {@code NDArray}. The stacked {@code NDArray} has one more dimension than
* the {@code NDArray}s in {@link NDList}
*/
default NDArray stack(NDList arrays) {
return stack(arrays, 0);
}
/**
* Joins a {@link NDList} along an existing axis.
*
* @param arrays a {@link NDList} which have the same shape as the {@code NDArray}, except in
* the dimension corresponding to axis
* @param axis the axis along which the {@link NDList} will be joined
* @return the concatenated {@code NDArray}
*/
NDArray concat(NDList arrays, int axis);
/**
* Joins a {@link NDList} along first axis.
*
* @param arrays a {@link NDList} which have the same shape as the {@code NDArray}, except in
* the dimension corresponding to axis
* @return the concatenated {@code NDArray}
*/
default NDArray concat(NDList arrays) {
return concat(arrays, 0);
}
/**
* Computes Multibox training targets.
*
* @param inputs a NDList of (anchors, labels, and class prediction)
* @param iouThreshold the anchor-GroundTruth overlap threshold to be regarded as a positive
* match
* @param ignoreLabel the label for ignored anchors
* @param negativeMiningRatio the max negative to positive samples ratio, use -1 to disable
* mining
* @param negativeMiningThreshold the threshold used for negative mining
* @param minNegativeSamples the minimum number of negative samples
* @return an NDList of (bounding box labels, bounding box masks, class labels)
*/
NDList multiBoxTarget(
NDList inputs,
float iouThreshold,
float ignoreLabel,
float negativeMiningRatio,
float negativeMiningThreshold,
int minNegativeSamples);
/**
* Generate prior(anchor) boxes from data, sizes and ratios.
*
* @param sizes List of sizes of generated MultiBoxPriores
* @param ratios List of aspect ratios of generated MultiBoxPriores
* @param steps Priorbox step across y and x, -1 for auto calculation
* @param offsets Priorbox center offsets, y and x respectively
* @param clip Whether to clip out-of-boundary boxes
* @return an NDList of anchor boxes
*/
NDList multiBoxPrior(
List<Float> sizes,
List<Float> ratios,
List<Float> steps,
List<Float> offsets,
boolean clip);
/**
* Converts multi-box detection predictions.
*
* @param inputs a NDList of (anchors, labels, and class prediction) in that order
* @param clip whether to clip out-of-boundary boxes
* @param threshold the threshold to be a positive prediction
* @param backgroundId the background id
* @param nmsThreshold the non-maximum suppression threshold
* @param forceSuppress whether to suppress all detections regardless of class_id
* @param nmsTopK the number of detections to keep before NMS, -1 for no limit
* @return an NDList
*/
NDList multiBoxDetection(
NDList inputs,
boolean clip,
float threshold,
int backgroundId,
float nmsThreshold,
boolean forceSuppress,
int nmsTopK);
/**
* Get internal {@link NDArray}.
*
* @return a NDArray
*/
NDArray getArray();
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/internal/NDFormat.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.internal;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDScope;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.util.Utils;
import java.lang.management.ManagementFactory;
import java.util.Arrays;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/** A helper for printing an {@link NDArray}. */
public abstract class NDFormat {
private static final int PRECISION = 8;
private static final String LF = System.lineSeparator();
private static final Pattern PATTERN = Pattern.compile("\\s*\\d\\.(\\d*?)0*e[+-](\\d+)");
private static final boolean DEBUGGER =
!Boolean.getBoolean("jshell")
&& ManagementFactory.getRuntimeMXBean().getInputArguments().stream()
.anyMatch(arg -> arg.startsWith("-agentlib:jdwp"));
/**
* Formats the contents of an array as a pretty printable string.
*
* @param array the array to print
* @param maxSize the maximum elements to print out
* @param maxDepth the maximum depth to print out
* @param maxRows the maximum rows to print out
* @param maxColumns the maximum columns to print out
* @return the string representation of the array
*/
public static String format(
NDArray array, int maxSize, int maxDepth, int maxRows, int maxColumns) {
return format(array, maxSize, maxDepth, maxRows, maxColumns, !DEBUGGER);
}
/**
* Formats the contents of an array as a pretty printable string.
*
* @param array the array to print
* @param maxSize the maximum elements to print out
* @param maxDepth the maximum depth to print out
* @param maxRows the maximum rows to print out
* @param maxColumns the maximum columns to print out
* @param withContent true to show the content of NDArray
* @return the string representation of the array
*/
public static String format(
NDArray array,
int maxSize,
int maxDepth,
int maxRows,
int maxColumns,
boolean withContent) {
StringBuilder sb = new StringBuilder(1000);
String name = array.getName();
if (name != null) {
sb.append(name).append(": ");
} else {
sb.append("ND: ");
}
sb.append(array.getShape())
.append(' ')
.append(array.getDevice())
.append(' ')
.append(array.getDataType());
if (array.hasGradient()) {
sb.append(" hasGradient");
}
if (!withContent) {
sb.append("\nCheck the \"Development Guideline\"->Debug to enable array display.\n");
return sb.toString();
}
NDFormat format;
DataType dataType = array.getDataType();
if (dataType == DataType.BOOLEAN) {
format = new BooleanFormat();
} else if (dataType == DataType.STRING) {
format = new StringFormat();
} else if (dataType.isInteger()) {
format = new IntFormat();
} else {
format = new FloatFormat();
}
return format.dump(sb, array, maxSize, maxDepth, maxRows, maxColumns);
}
protected abstract CharSequence format(Number value);
protected void init(NDArray array) {}
protected String dump(
StringBuilder sb,
NDArray array,
int maxSize,
int maxDepth,
int maxRows,
int maxColumns) {
sb.append(LF);
long size = array.size();
long dimension = array.getShape().dimension();
if (size == 0) {
// corner case: 0 dimension
sb.append("[]").append(LF);
} else if (dimension == 0) {
// scalar case
init(array);
sb.append(format(array.toArray()[0])).append(LF);
} else if (size > maxSize) {
sb.append("Exceed max print size:").append(LF);
int limit = Math.min(maxSize, maxRows * maxColumns);
dumpFlat(sb, array, limit);
} else if (dimension > maxDepth) {
sb.append("Exceed max print dimension:").append(LF);
int limit = Math.min(maxSize, maxRows * maxColumns);
dumpFlat(sb, array, limit);
} else {
init(array);
dump(sb, array, 0, true, maxRows, maxColumns);
}
return sb.toString();
}
private void dump(
StringBuilder sb,
NDArray array,
int depth,
boolean first,
int maxRows,
int maxColumns) {
if (!first) {
Utils.pad(sb, ' ', depth);
}
sb.append('[');
Shape shape = array.getShape();
if (shape.dimension() == 1) {
append(sb, array.toArray(), maxColumns);
} else {
long len = shape.head();
long limit = Math.min(len, maxRows);
for (int i = 0; i < limit; ++i) {
try (NDArray nd = array.get(i)) {
dump(sb, nd, depth + 1, i == 0, maxRows, maxColumns);
}
}
long remaining = len - limit;
if (remaining > 0) {
Utils.pad(sb, ' ', depth + 1);
sb.append("... ").append(remaining).append(" more");
}
Utils.pad(sb, ' ', depth);
}
// last "]"
if (depth == 0) {
sb.append(']').append(LF);
} else {
sb.append("],").append(LF);
}
}
@SuppressWarnings("try")
private void dumpFlat(StringBuilder sb, NDArray array, int limit) {
try (NDScope ignore = new NDScope()) {
NDArray tmp = array.flatten().get(":" + limit);
init(tmp);
sb.append('{');
append(sb, array.toArray(), limit);
sb.append('}').append(LF);
}
}
private void append(StringBuilder sb, Number[] values, int maxColumns) {
if (values.length == 0) {
return;
}
long limit = Math.min(values.length, maxColumns);
sb.append(format(values[0]));
for (int i = 1; i < limit; ++i) {
sb.append(", ");
sb.append(format(values[i]));
}
long remaining = values.length - limit;
if (remaining > 0) {
sb.append(", ... ").append(remaining).append(" more");
}
}
private static final class FloatFormat extends NDFormat {
private boolean exponential;
private int precision;
private int totalLength;
/** {@inheritDoc} */
@Override
public void init(NDArray array) {
Number[] values = array.toArray();
int maxIntPartLen = 0;
int maxFractionLen = 0;
int expFractionLen = 0;
int maxExpSize = 2;
boolean sign = false;
double max = 0;
double min = Double.MAX_VALUE;
for (Number n : values) {
double v = n.doubleValue();
if (v < 0) {
sign = true;
}
if (!Double.isFinite(v)) {
int intPartLen = v < 0 ? 4 : 3;
if (totalLength < intPartLen) {
totalLength = intPartLen;
}
continue;
}
double abs = Math.abs(v);
String str = String.format(Locale.ENGLISH, "%16e", abs);
Matcher m = PATTERN.matcher(str);
if (!m.matches()) {
throw new AssertionError("Invalid decimal value: " + str);
}
int fractionLen = m.group(1).length();
if (expFractionLen < fractionLen) {
expFractionLen = fractionLen;
}
int expSize = m.group(2).length();
if (expSize > maxExpSize) {
maxExpSize = expSize;
}
if (abs >= 1) {
int intPartLen = (int) Math.log10(abs) + 1;
if (v < 0) {
++intPartLen;
}
if (intPartLen > maxIntPartLen) {
maxIntPartLen = intPartLen;
}
int fullFractionLen = fractionLen + 1 - intPartLen;
if (maxFractionLen < fullFractionLen) {
maxFractionLen = fullFractionLen;
}
} else {
int intPartLen = v < 0 ? 2 : 1;
if (intPartLen > maxIntPartLen) {
maxIntPartLen = intPartLen;
}
int fullFractionLen = fractionLen + Integer.parseInt(m.group(2));
if (maxFractionLen < fullFractionLen) {
maxFractionLen = fullFractionLen;
}
}
if (abs > max) {
max = abs;
}
if (abs < min && abs > 0) {
min = abs;
}
}
double ratio = max / min;
if (max > 1.e8 || min < 0.0001 || ratio > 1000.) {
exponential = true;
precision = Math.min(PRECISION, expFractionLen);
totalLength = precision + 4;
if (sign) {
++totalLength;
}
} else {
precision = Math.min(4, maxFractionLen);
int len = maxIntPartLen + precision + 1;
if (totalLength < len) {
totalLength = len;
}
}
}
/** {@inheritDoc} */
@Override
public CharSequence format(Number value) {
double d = value.doubleValue();
if (Double.isNaN(d)) {
return String.format(Locale.ENGLISH, "%" + totalLength + "s", "nan");
} else if (Double.isInfinite(d)) {
if (d > 0) {
return String.format(Locale.ENGLISH, "%" + totalLength + "s", "inf");
} else {
return String.format(Locale.ENGLISH, "%" + totalLength + "s", "-inf");
}
}
if (exponential) {
precision = Math.max(PRECISION, precision);
return String.format(Locale.ENGLISH, "% ." + precision + "e", value.doubleValue());
}
if (precision == 0) {
String fmt = "%" + (totalLength - 1) + '.' + precision + "f.";
return String.format(Locale.ENGLISH, fmt, value.doubleValue());
}
String fmt = "%" + totalLength + '.' + precision + 'f';
String ret = String.format(Locale.ENGLISH, fmt, value.doubleValue());
// Replace trailing zeros with space
char[] chars = ret.toCharArray();
for (int i = chars.length - 1; i >= 0; --i) {
if (chars[i] == '0') {
chars[i] = ' ';
} else {
break;
}
}
return new String(chars);
}
}
private static final class IntFormat extends NDFormat {
private boolean exponential;
private int precision;
private int totalLength;
/** {@inheritDoc} */
@Override
public void init(NDArray array) {
Number[] values = array.toArray();
// scalar case
if (values.length == 1) {
totalLength = 1;
return;
}
long max = 0;
long negativeMax = 0;
for (Number n : values) {
long v = n.longValue();
long abs = Math.abs(v);
if (v < 0 && abs > negativeMax) {
negativeMax = abs;
}
if (abs > max) {
max = abs;
}
}
if (max >= 1.e8) {
exponential = true;
precision = Math.min(PRECISION, (int) Math.log10(max) + 1);
} else {
int size = (max != 0) ? (int) Math.log10(max) + 1 : 1;
int negativeSize = (negativeMax != 0) ? (int) Math.log10(negativeMax) + 2 : 2;
totalLength = Math.max(size, negativeSize);
}
}
/** {@inheritDoc} */
@Override
public CharSequence format(Number value) {
if (exponential) {
return String.format(Locale.ENGLISH, "% ." + precision + "e", value.floatValue());
}
return String.format(Locale.ENGLISH, "%" + totalLength + "d", value.longValue());
}
}
private static final class BooleanFormat extends NDFormat {
/** {@inheritDoc} */
@Override
public CharSequence format(Number value) {
return value.byteValue() != 0 ? " true" : "false";
}
}
private static final class StringFormat extends NDFormat {
/** {@inheritDoc} */
@Override
public CharSequence format(Number value) {
return null;
}
/** {@inheritDoc} */
@Override
protected String dump(
StringBuilder sb,
NDArray array,
int maxSize,
int maxDepth,
int maxRows,
int maxColumns) {
return Arrays.toString(array.toStringArray());
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/internal/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains internal helpers for {@link ai.djl.ndarray.NDArray}. */
package ai.djl.ndarray.internal;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/types/DataDesc.java
|
/*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.types;
import ai.djl.Device;
import ai.djl.ndarray.NDArray;
/**
* A data descriptor class that encapsulates information of a {@link NDArray}.
*
* <p>The information includes:
*
* <ul>
* <li>Optional name of the NDArray
* <li>{@link Device}
* <li>{@link Shape}
* <li>{@link DataType}
* <li>{@link SparseFormat}
* </ul>
*/
public class DataDesc {
private String name;
private Shape shape;
private DataType dataType;
/**
* Constructs and initializes a {@code DataDesc} with specified {@link Shape}.
*
* @param shape the {@link Shape} of the {@link NDArray}
*/
public DataDesc(Shape shape) {
this(shape, DataType.FLOAT32, null);
}
/**
* Constructs and initializes a {@code DataDesc} with specified {@link Shape} and name.
*
* @param shape the {@link Shape} of the {@link NDArray}
* @param name the name of the {@link NDArray}
*/
public DataDesc(Shape shape, String name) {
this(shape, DataType.FLOAT32, name);
}
/**
* Constructs and initializes a {@code DataDesc} with specified {@link Shape} and {@link
* DataType}.
*
* @param shape the {@link Shape} of the {@link NDArray}
* @param dataType the {@link DataType} of the {@link NDArray}
*/
public DataDesc(Shape shape, DataType dataType) {
this(shape, dataType, null);
}
/**
* Constructs and initializes a {@code DataDesc} with specified {@link Shape}, {@link DataType},
* name, {@link Device} and {@link SparseFormat}.
*
* @param shape the {@link Shape} of the {@link NDArray}
* @param dataType the {@link DataType} of the {@link NDArray}
* @param name the name of the {@link NDArray}
*/
public DataDesc(Shape shape, DataType dataType, String name) {
this.name = name;
this.shape = shape;
this.dataType = dataType;
}
/**
* Returns the name of the {@link NDArray}.
*
* @return the name of the {@link NDArray}
*/
public String getName() {
return name;
}
/**
* Sets the name of the {@link NDArray}.
*
* @param name the name of the {@link NDArray}
*/
public void setName(String name) {
this.name = name;
}
/**
* Returns the {@link Shape} of the {@link NDArray}.
*
* @return the {@link Shape} of the {@link NDArray}
*/
public Shape getShape() {
return shape;
}
/**
* Sets the {@link Shape} of the {@link NDArray}.
*
* @param shape the {@link Shape} of the {@link NDArray}
*/
public void setShape(Shape shape) {
this.shape = shape;
}
/**
* Returns the {@link DataType} of the {@link NDArray}.
*
* @return the {@link DataType} of the {@link NDArray}
*/
public DataType getDataType() {
return dataType;
}
/**
* Sets the {@link DataType} of the {@link NDArray}.
*
* @param dataType the {@link DataType} of the {@link NDArray}
*/
public void setDataType(DataType dataType) {
this.dataType = dataType;
}
/** {@inheritDoc} */
@Override
public String toString() {
return name + " shape: " + shape + " dataType: " + dataType;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/types/DataType.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.types;
import ai.djl.ndarray.NDArray;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.DoubleBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.LongBuffer;
import java.nio.ShortBuffer;
/** An enum representing the underlying {@link NDArray}'s data type. */
public enum DataType {
// do not change order, pytorch engine rely on DataType.ordinal()
FLOAT32(Format.FLOATING, 4),
FLOAT64(Format.FLOATING, 8),
FLOAT16(Format.FLOATING, 2),
UINT8(Format.UINT, 1),
INT32(Format.INT, 4),
INT8(Format.INT, 1),
INT64(Format.INT, 8),
BOOLEAN(Format.BOOLEAN, 1),
COMPLEX64(Format.FLOATING, 4),
UNKNOWN(Format.UNKNOWN, 0),
STRING(Format.STRING, -1),
BFLOAT16(Format.FLOATING, 2),
UINT64(Format.UINT, 8),
UINT32(Format.UINT, 4),
UINT16(Format.UINT, 2),
INT16(Format.INT, 2);
/** The general data type format categories. */
public enum Format {
FLOATING,
UINT,
INT,
BOOLEAN,
STRING,
UNKNOWN
}
private Format format;
private int numOfBytes;
DataType(Format format, int numOfBytes) {
this.format = format;
this.numOfBytes = numOfBytes;
}
/**
* Returns the number of bytes for each element.
*
* @return the number of bytes for each element
*/
public int getNumOfBytes() {
return numOfBytes;
}
/**
* Returns the format of the data type.
*
* @return the format of the data type
*/
public Format getFormat() {
return format;
}
/**
* Checks whether it is a floating data type.
*
* @return whether it is a floating data type
*/
public boolean isFloating() {
return format == Format.FLOATING;
}
/**
* Checks whether it is an integer data type.
*
* @return whether it is an integer type
*/
public boolean isInteger() {
return format == Format.UINT || format == Format.INT;
}
/**
* Checks whether it is a boolean data type.
*
* @return whether it is a boolean data type
*/
public boolean isBoolean() {
return format == Format.BOOLEAN;
}
/**
* Returns the data type to use for a data buffer.
*
* @param data the buffer to analyze
* @return the data type for the buffer
*/
public static DataType fromBuffer(Buffer data) {
if (data instanceof FloatBuffer) {
return DataType.FLOAT32;
} else if (data instanceof ShortBuffer) {
return DataType.FLOAT16;
} else if (data instanceof DoubleBuffer) {
return DataType.FLOAT64;
} else if (data instanceof IntBuffer) {
return DataType.INT32;
} else if (data instanceof LongBuffer) {
return DataType.INT64;
} else if (data instanceof ByteBuffer) {
return DataType.INT8;
} else {
throw new IllegalArgumentException(
"Unsupported buffer type: " + data.getClass().getSimpleName());
}
}
/**
* Returns the data type from numpy value.
*
* @param dtype the numpy datatype
* @return the data type
*/
public static DataType fromNumpy(String dtype) {
switch (dtype) {
case "<f4":
case ">f4":
case "=f4":
return FLOAT32;
case "<f8":
case ">f8":
case "=f8":
return FLOAT64;
case "<f2":
case ">f2":
case "=f2":
return FLOAT16;
case "|u1":
return UINT8;
case "<u2":
case ">u2":
case "=u2":
return UINT16;
case "<u4":
case ">u4":
case "=u4":
return UINT32;
case "<u8":
case ">u8":
case "=u8":
return UINT64;
case "|i1":
return INT8;
case "<i2":
case ">i2":
case "=i2":
return INT16;
case "<i4":
case ">i4":
case "=i4":
return INT32;
case "<i8":
case ">i8":
case "=i8":
return INT64;
case "|b1":
return BOOLEAN;
case "|S1":
return STRING;
default:
throw new IllegalArgumentException("Unsupported dataType: " + dtype);
}
}
/**
* Returns the data type from Safetensors value.
*
* @param dtype the Safetensors datatype
* @return the data type
*/
public static DataType fromSafetensors(String dtype) {
switch (dtype) {
case "F64":
return FLOAT64;
case "F32":
return FLOAT32;
case "F16":
return FLOAT16;
case "BF16":
return BFLOAT16;
case "I64":
return INT64;
case "I32":
return INT32;
case "I8":
return INT8;
case "U8":
return UINT8;
case "BOOL":
return BOOLEAN;
default:
throw new IllegalArgumentException("Unsupported safetensors dataType: " + dtype);
}
}
/**
* Converts a {@link ByteBuffer} to a buffer for this data type.
*
* @param data the buffer to convert
* @return the converted buffer
*/
public Buffer asDataType(ByteBuffer data) {
switch (this) {
case FLOAT16:
case BFLOAT16:
return data.asShortBuffer();
case FLOAT32:
return data.asFloatBuffer();
case FLOAT64:
return data.asDoubleBuffer();
case INT32:
case UINT32:
return data.asIntBuffer();
case INT64:
case UINT64:
return data.asLongBuffer();
case UINT8:
case INT8:
case COMPLEX64:
case UNKNOWN:
default:
return data;
}
}
/**
* Returns a numpy string value.
*
* @return a numpy string value
*/
public String asNumpy() {
char order = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN ? '>' : '<';
switch (this) {
case FLOAT32:
return order + "f4";
case FLOAT64:
return order + "f8";
case FLOAT16:
return order + "f2";
case UINT8:
return "|u1";
case UINT16:
return order + "u2";
case UINT32:
return order + "u4";
case UINT64:
return order + "u8";
case INT8:
return "|i1";
case INT16:
return order + "i2";
case INT32:
return order + "i4";
case INT64:
return order + "i8";
case BOOLEAN:
return "|b1";
case STRING:
return "|S1";
case BFLOAT16:
case COMPLEX64:
case UNKNOWN:
default:
throw new IllegalArgumentException("Unsupported dataType: " + this);
}
}
/**
* Returns a safetensors string value.
*
* @return a safetensors string value
*/
public String asSafetensors() {
switch (this) {
case FLOAT64:
return "F64";
case FLOAT32:
return "F32";
case FLOAT16:
return "F16";
case BFLOAT16:
return "BF16";
case INT64:
return "I64";
case INT32:
return "I32";
case INT8:
return "I8";
case UINT8:
return "U8";
case BOOLEAN:
return "BOOL";
case INT16:
case UINT64:
case UINT32:
case UINT16:
case STRING:
case COMPLEX64:
case UNKNOWN:
default:
throw new IllegalArgumentException("Unsupported dataType: " + this);
}
}
/** {@inheritDoc} */
@Override
public String toString() {
return name().toLowerCase();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/types/LayoutType.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.types;
import java.util.stream.IntStream;
/**
* An enum to represent the meaning of a particular axis in an {@link ai.djl.ndarray.NDArray}.
*
* <p>The options are:
*
* <ul>
* <li>{@link LayoutType#BATCH} - Different elements in a batch, usually from a {@link
* ai.djl.translate.StackBatchifier}.
* <li>{@link LayoutType#CHANNEL} - Each channel represents a different aspect of the data such as
* RGB showing different color channels.
* <li>{@link LayoutType#DEPTH} - The depth of a 3-D input
* <li>{@link LayoutType#HEIGHT} - The width of a multi-dimensional input, usually an image.
* <li>{@link LayoutType#WIDTH} - The height of a multi-dimensional input, usually an image.
* <li>{@link LayoutType#TIME} - The time within a sequence such as text or video.
* <li>{@link LayoutType#UNKNOWN} - A unknown or otherwise unrepresentable layout type.
* </ul>
*/
public enum LayoutType {
BATCH('N'),
CHANNEL('C'),
DEPTH('D'),
HEIGHT('H'),
WIDTH('W'),
TIME('T'),
UNKNOWN('?');
private char value;
LayoutType(char value) {
this.value = value;
}
/**
* Returns the character representation of the layout type.
*
* @return the character representation of the layout type
*/
public char getValue() {
return value;
}
/**
* Converts the character to the matching layout type.
*
* @param value the character to convert
* @return the matching layout type
* @throws IllegalArgumentException thrown if the character does not match any layout type
*/
public static LayoutType fromValue(char value) {
for (LayoutType type : LayoutType.values()) {
if (value == type.value) {
return type;
}
}
throw new IllegalArgumentException(
"The value does not match any layoutTypes. Use '?' for Unknown");
}
/**
* Converts each character to the matching layout type.
*
* @param layout the character string to convert
* @return the list of layout types for each character in the string
* @throws IllegalArgumentException thrown if the character does not match any layout type
*/
public static LayoutType[] fromValue(String layout) {
return IntStream.range(0, layout.length())
.mapToObj(i -> fromValue(layout.charAt(i)))
.toArray(LayoutType[]::new);
}
/**
* Converts a layout type array to a string of the character representations.
*
* @param layouts the layout type to convert
* @return the string of the character representations
*/
public static String toString(LayoutType[] layouts) {
StringBuilder sb = new StringBuilder(layouts.length);
for (LayoutType layout : layouts) {
sb.append(layout.getValue());
}
return sb.toString();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/types/Shape.java
|
/*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.types;
import ai.djl.ndarray.NDArray;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.LongStream;
import java.util.stream.Stream;
/** A class that presents the {@link NDArray}'s shape information. */
public class Shape {
private long[] shape;
private LayoutType[] layout;
/**
* Constructs and initializes a {@code Shape} with specified dimension as {@code (long...
* shape)}.
*
* @param shape the dimensions of the shape
* @throws IllegalArgumentException Thrown if any element in Shape is invalid. It should not be
* less than -1. Also thrown if the shape and layout do not have equal sizes.
*/
public Shape(long... shape) {
this(
shape,
Arrays.stream(shape).mapToObj(x -> LayoutType.UNKNOWN).toArray(LayoutType[]::new));
}
/**
* Constructs and initializes a {@code Shape} with specified dimension.
*
* @param shape the dimensions of the shape
* @throws IllegalArgumentException Thrown if any element in Shape is invalid. It should not be
* less than -1. Also thrown if the shape and layout do not have equal sizes.
*/
public Shape(List<Long> shape) {
this(
shape.stream().mapToLong(l -> l).toArray(),
shape.stream().map(x -> LayoutType.UNKNOWN).toArray(LayoutType[]::new));
}
/**
* Constructs and initializes a {@code Shape} with specified shape and layout pairList.
*
* @param shape the dimensions and layout of the shape
* @throws IllegalArgumentException Thrown if any element in Shape is invalid. It should not be
* less than -1 .Also thrown if the shape and layout do not have equal sizes.
*/
public Shape(PairList<Long, LayoutType> shape) {
this(
shape.keys().stream().mapToLong(l -> l).toArray(),
shape.values().toArray(new LayoutType[shape.size()]));
}
/**
* Constructs and initializes a {@code Shape} with specified dimension and layout.
*
* @param shape the size of each axis of the shape
* @param layout the {@link LayoutType} of each axis in the shape
* @throws IllegalArgumentException Thrown if any element in Shape is invalid. It should not be
* less than -1. Also thrown for an invalid layout. Also thrown if the shape and layout do
* not have equal sizes.
*/
public Shape(long[] shape, String layout) {
this(shape, LayoutType.fromValue(layout));
}
/**
* Constructs and initializes a {@code Shape} with specified dimension and layout.
*
* @param shape the size of each axis of the shape
* @param layout the {@link LayoutType} of each axis in the shape
* @throws IllegalArgumentException Thrown if any element in Shape is invalid. It should not be
* less than -1. Also thrown if the shape and layout do not have equal sizes.
*/
public Shape(long[] shape, LayoutType[] layout) {
if (Arrays.stream(shape).anyMatch(s -> s < -1)) {
throw new IllegalArgumentException("The shape must be >= -1");
}
if (shape.length != layout.length) {
throw new IllegalArgumentException("The shape and layout must have the same length");
}
this.shape = shape;
this.layout = layout;
}
/**
* Returns a new shape altering the given dimension.
*
* @param shape the shape to update
* @param dimension the dimension to get the shape in
* @param value the value to set the dimension to
* @return a new shape with the update applied
*/
public static Shape update(Shape shape, int dimension, long value) {
long[] newShape = shape.shape.clone();
newShape[dimension] = value;
return new Shape(newShape, shape.layout);
}
/**
* Returns the dimensions of the {@code Shape}.
*
* @return the dimensions of the {@code Shape}
*/
public long[] getShape() {
return shape;
}
/**
* Returns the shape in the given dimension.
*
* @param dimension the dimension to get the shape in
* @return the shape in the given dimension
*/
public long get(int dimension) {
return shape[dimension];
}
/**
* Returns the last index.
*
* @return the last index
*/
public long getLastDimension() {
return shape[shape.length - 1];
}
/**
* Returns the layout type in the given dimension.
*
* @param dimension the dimension to get the layout type in
* @return the layout type in the given dimension
*/
public LayoutType getLayoutType(int dimension) {
return layout[dimension];
}
/**
* Returns the size of a specific dimension or several specific dimensions.
*
* @param dimensions the dimension or dimensions to find the size of
* @return the size of specific dimension(s) or -1 for indeterminate size
* @throws IllegalArgumentException thrown if passed an invalid dimension
*/
public long size(int... dimensions) {
long total = 1;
for (long d : dimensions) {
if (d < 0 || d >= shape.length) {
throw new IllegalArgumentException("Invalid dimension " + d);
}
if (shape[Math.toIntExact(d)] == -1) {
return -1;
}
total *= shape[Math.toIntExact(d)];
}
return total;
}
/**
* Returns the total size.
*
* @return the total size or -1 for indeterminate size
*/
public long size() {
long total = 1;
for (long v : shape) {
if (v == -1) {
return -1;
}
total *= v;
}
return total;
}
/**
* Returns the number of dimensions of this {@code Shape}.
*
* @return the number of dimensions of this {@code Shape}
*/
public int dimension() {
return shape.length;
}
/**
* Return the count of unknown value in this {@code Shape}.
*
* @return the number of unknown value in this {@code Shape}
*/
public long getUnknownValueCount() {
return Arrays.stream(shape).filter(s -> s == -1).count();
}
/**
* Creates a new {@code Shape} whose content is a slice of this shape.
*
* <p>The sub shape begins at the specified {@code beginIndex} and extends to {@code endIndex -
* 1}.
*
* @param beginIndex the beginning index, inclusive
* @return a new {@code Shape} whose content is a slice of this shape
*/
public Shape slice(int beginIndex) {
return slice(beginIndex, shape.length);
}
/**
* Creates a new {@code Shape} whose content is a slice of this shape.
*
* <p>The sub shape begins at the specified {@code beginIndex} and extends to {@code endIndex -
* 1}.
*
* @param beginIndex the beginning index, inclusive
* @param endIndex the ending index, exclusive
* @return a new {@code Shape} whose content is a slice of this shape
*/
public Shape slice(int beginIndex, int endIndex) {
beginIndex = beginIndex + (beginIndex < 0 ? shape.length : 0);
endIndex = endIndex + (endIndex < 0 ? shape.length : 0);
int size = endIndex - beginIndex;
long[] out = new long[size];
System.arraycopy(shape, beginIndex, out, 0, size);
return new Shape(out);
}
/**
* Returns only the axes of the Shape whose layout types match the predicate.
*
* @param predicate the predicate to compare the axes of the Shape with
* @return a new filtered Shape
*/
public Shape filterByLayoutType(Predicate<LayoutType> predicate) {
return new Shape(
new PairList<>(
this.stream()
.filter(pair -> predicate.test(pair.getValue()))
.collect(Collectors.toList())));
}
/**
* Returns a mapped shape.
*
* @param mapper the function to map each element of the Shape by
* @return a new mapped Shape
*/
public Shape map(Function<Pair<Long, LayoutType>, Pair<Long, LayoutType>> mapper) {
return new Shape(new PairList<>(stream().map(mapper).collect(Collectors.toList())));
}
/**
* Returns a stream of the Shape.
*
* @return the stream of the Shape
*/
public Stream<Pair<Long, LayoutType>> stream() {
return new PairList<>(
Arrays.stream(shape).boxed().collect(Collectors.toList()),
Arrays.asList(layout))
.stream();
}
/**
* Joins this shape with axes.
*
* @param axes the axes to join
* @return the joined {@code Shape}
*/
public Shape add(long... axes) {
return this.addAll(new Shape(axes));
}
/**
* Joins this shape with specified {@code other} shape.
*
* @param other the shape to join
* @return the joined {@code Shape}
*/
public Shape addAll(Shape other) {
return new Shape(
LongStream.concat(Arrays.stream(shape), Arrays.stream(other.shape)).toArray());
}
/**
* Returns the head index of the shape.
*
* @return the head index of the shape
* @throws IndexOutOfBoundsException Thrown if the shape is empty
*/
public long head() {
// scalar case
if (shape.length == 0) {
throw new IndexOutOfBoundsException("can't get value from scalar shape.");
}
return shape[0];
}
/**
* Returns the tail index of the shape.
*
* @return the tail index of the shape
* @throws IndexOutOfBoundsException Thrown if the shape is empty
*/
public long tail() {
// scalar case
if (shape.length == 0) {
throw new IndexOutOfBoundsException("can't get value from scalar shape.");
}
return shape[shape.length - 1];
}
/**
* Returns the number of trailing ones in the array shape.
*
* <p>For example, a rank 3 array with shape [10, 1, 1] would return 2 for this method
*
* @return the number of trailing ones in the shape
*/
public int getTrailingOnes() {
for (int i = 0; i < shape.length; i++) {
if (shape[shape.length - i - 1] != 1) {
return i;
}
}
return 0;
}
/**
* Returns the number of leading ones in the array shape.
*
* <p>For example, a rank 3 array with shape [1, 10, 1] would return value 1 for this method
*
* @return the number of leading ones in the shape
*/
public int getLeadingOnes() {
for (int i = 0; i < shape.length; i++) {
if (shape[i] != 1) {
return i;
}
}
return 0;
}
/**
* Returns {@code true} if the NDArray is a scalar.
*
* @return whether the NDArray is a scalar
*/
public boolean isScalar() {
return dimension() == 0;
}
/**
* Returns {@code true} if the NDArray contains zero dimensions.
*
* @return whether the NDArray contain zero dimensions
*/
public boolean hasZeroDimension() {
for (int i = 0; i < dimension(); i++) {
if (shape[i] == 0) {
return true;
}
}
return false;
}
/**
* Returns {@code true} if a layout is set.
*
* @return whether a layout has been set
*/
public boolean isLayoutKnown() {
return !Arrays.stream(layout).allMatch(l -> l == LayoutType.UNKNOWN);
}
/**
* Returns the layout type for each axis in this shape.
*
* @return the layout type for each axis in this shape
*/
public LayoutType[] getLayout() {
return layout;
}
/**
* Returns the string layout type for each axis in this shape.
*
* @return the string layout type for each axis in this shape
*/
public String toLayoutString() {
return LayoutType.toString(layout);
}
/**
* Gets the byte array representation of this {@code Shape} for serialization.
*
* @return a byte array representation of this {@code Shape}
*/
public byte[] getEncoded() {
int length = 8 + shape.length * 8 + layout.length * 2;
ByteBuffer bb = ByteBuffer.allocate(length);
bb.putInt(shape.length);
for (long l : shape) {
bb.putLong(l);
}
bb.putInt(layout.length);
for (LayoutType layoutType : layout) {
bb.putChar(layoutType.getValue());
}
return bb.array();
}
/** {@inheritDoc} */
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Shape shape1 = (Shape) o;
return Arrays.equals(shape, shape1.shape);
}
/** {@inheritDoc} */
@Override
public int hashCode() {
return Arrays.hashCode(shape);
}
/** {@inheritDoc} */
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('(');
for (int i = 0; i < shape.length; ++i) {
if (i > 0) {
sb.append(", ");
}
sb.append(shape[i]);
}
sb.append(')');
return sb.toString();
}
/**
* Decodes the data in the given {@link DataInputStream} and converts it into the corresponding
* {@link Shape} object.
*
* @param dis the inputstream to read from
* @return the corresponding {@link Shape} object
* @throws IOException when an I/O error occurs
*/
public static Shape decode(DataInputStream dis) throws IOException {
// Shape
int length = dis.readInt();
long[] shapeValue = new long[length];
for (int i = 0; i < length; ++i) {
shapeValue[i] = dis.readLong();
}
// Layout
length = dis.readInt();
char[] layout = new char[length];
for (int i = 0; i < length; ++i) {
layout[i] = dis.readChar();
}
return new Shape(shapeValue, new String(layout));
}
/**
* Decodes the data in the given {@link ByteBuffer} and converts it into the corresponding
* {@link Shape} object.
*
* @param bb the ByteBuffer to read from
* @return the corresponding {@link Shape} object
*/
public static Shape decode(ByteBuffer bb) {
// Shape
int length = bb.getInt();
long[] shapeValue = new long[length];
for (int i = 0; i < length; ++i) {
shapeValue[i] = bb.getLong();
}
// Layout
length = bb.getInt();
char[] layout = new char[length];
for (int i = 0; i < length; ++i) {
layout[i] = bb.getChar();
}
return new Shape(shapeValue, new String(layout));
}
/**
* Returns if the array is rank-1 which is inferred from the shape.
*
* <p>For example, an array with shape [1, 10, 1] returns true. Array with indeterminate size -1
* returns false.
*
* @return if the array is rank-1
*/
public boolean isRankOne() {
int max = 1;
int ans = 1;
for (long s : shape) {
int size = Math.toIntExact(s);
max = Math.max(max, size);
ans *= size;
if (ans < 0) {
return false;
}
}
return max == ans;
}
/**
* Parses a string representation of shapes for NDList.
*
* @param value a string representation of shapes for NDList
* @return a list of Shape and datatype pairs
*/
public static PairList<DataType, Shape> parseShapes(String value) {
PairList<DataType, Shape> inputShapes = new PairList<>();
if (value != null) {
if (value.contains("(")) {
Pattern pattern =
Pattern.compile("\\((\\s*([-\\d]+)([,\\s]+[-\\d]+)*\\s*)\\)(\\w?)");
Matcher matcher = pattern.matcher(value);
while (matcher.find()) {
String[] tokens = matcher.group(1).split(",");
long[] array = Arrays.stream(tokens).mapToLong(Long::parseLong).toArray();
DataType dataType;
String dataTypeStr = matcher.group(4);
if (dataTypeStr == null || dataTypeStr.isEmpty()) {
dataType = DataType.FLOAT32;
} else {
switch (dataTypeStr) {
case "s":
dataType = DataType.FLOAT16;
break;
case "d":
dataType = DataType.FLOAT64;
break;
case "u":
dataType = DataType.UINT8;
break;
case "b":
dataType = DataType.INT8;
break;
case "i":
dataType = DataType.INT32;
break;
case "l":
dataType = DataType.INT64;
break;
case "B":
dataType = DataType.BOOLEAN;
break;
case "f":
dataType = DataType.FLOAT32;
break;
default:
throw new IllegalArgumentException("Invalid input-shape: " + value);
}
}
inputShapes.add(dataType, new Shape(array));
}
} else {
String[] tokens = value.split(",");
long[] shapes = Arrays.stream(tokens).mapToLong(Long::parseLong).toArray();
inputShapes.add(DataType.FLOAT32, new Shape(shapes));
}
}
return inputShapes;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/types/SparseFormat.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.ndarray.types;
/**
* An enum representing Sparse matrix storage formats.
*
* <ul>
* <li>DENSE: Stride format
* <li>ROW_SPARSE: Row Sparse
* <li>CSR: Compressed Sparse Row
* </ul>
*
* @see <a href="https://software.intel.com/en-us/node/471374">Sparse Matrix Storage Formats</a>
*/
public enum SparseFormat {
// the dense format is accelerated by MKLDNN by default
DENSE("default", 0),
ROW_SPARSE("row_sparse", 1),
CSR("csr", 2),
COO("coo", 3);
private String type;
private int value;
SparseFormat(String type, int value) {
this.type = type;
this.value = value;
}
/**
* Gets the {@code SparseFormat} from it's integer value.
*
* @param value the integer value of the {@code SparseFormat}
* @return a {@code SparseFormat}
*/
public static SparseFormat fromValue(int value) {
for (SparseFormat t : values()) {
if (value == t.getValue()) {
return t;
}
}
throw new IllegalArgumentException("Unknown Sparse type: " + value);
}
/**
* Returns the {@code SparseFormat} name.
*
* @return the {@code SparseFormat} name
*/
public String getType() {
return type;
}
/**
* Returns the integer value of this {@code SparseFormat}.
*
* @return the integer value of this {@code SparseFormat}
*/
public int getValue() {
return value;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray
|
java-sources/ai/djl/api/0.34.0/ai/djl/ndarray/types/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes that define n-dimensional array data types. */
package ai.djl.ndarray.types;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/AbstractBaseBlock.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.training.ParameterStore;
import ai.djl.training.initializer.Initializer;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Predicate;
/**
* This provides shared functionality for both the DJL-based {@link AbstractBlock}s and the imported
* {@link AbstractSymbolBlock}s.
*/
public abstract class AbstractBaseBlock implements Block {
/**
* The model version of this block, used for checking if parameters are still valid during
* parameter loading.
*/
protected byte version;
/** The shape of the input for this block, set by the initialization process. */
protected Shape[] inputShapes;
protected DataType[] outputDataTypes;
/** List of names for the input, named inputs should be manually set in sub class. */
protected List<String> inputNames = Collections.emptyList();
/** Constructs a new {@link AbstractBaseBlock} instance. */
public AbstractBaseBlock() {
this((byte) 1);
}
/**
* Builds an empty block with the given version for parameter serialization.
*
* @param version the version to use for parameter serialization.
*/
public AbstractBaseBlock(byte version) {
this.version = version;
}
/** {@inheritDoc} */
@Override
public final NDList forward(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
if (training && !isInitialized()) {
NDManager paramsManager = parameterStore.getManager();
initialize(paramsManager, DataType.FLOAT32, inputs.getShapes());
}
return forwardInternal(parameterStore, inputs, training, params);
}
/** {@inheritDoc} */
@Override
public NDList forward(
ParameterStore parameterStore,
NDList data,
NDList labels,
PairList<String, Object> params) {
NDManager paramsManager = parameterStore.getManager();
if (!isInitialized()) {
initialize(paramsManager, DataType.FLOAT32, data.getShapes());
}
return forwardInternal(parameterStore, data, labels, params);
}
/**
* A helper for {@link Block#forward(ParameterStore, NDList, boolean, PairList)} after
* initialization.
*
* @param parameterStore the parameter store
* @param inputs the input NDList
* @param training true for a training forward pass
* @param params optional parameters
* @return the output of the forward pass
*/
protected abstract NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params);
/**
* A helper for {@link Block#forward(ParameterStore, NDList, NDList, PairList)} after
* initialization.
*
* @param parameterStore the parameter store
* @param data the input data NDList
* @param labels the input labels NDList
* @param params optional parameters
* @return the output of the forward pass
* @see #forward(ParameterStore, NDList, boolean, PairList)
*/
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList data,
NDList labels,
PairList<String, Object> params) {
return forwardInternal(parameterStore, data, true, params);
}
/** {@inheritDoc} */
@Override
public PairList<String, Shape> describeInput() {
if (!isInitialized()) {
throw new IllegalStateException(
"Parameter of this block are not initialised,"
+ "please call model.newTrainer and trainer.initialize");
}
return new PairList<>(inputNames, Arrays.asList(inputShapes));
}
/** {@inheritDoc} */
@Override
public void setInitializer(Initializer initializer, Parameter.Type params) {
Predicate<Parameter> predicate = parameter -> parameter.getType().equals(params);
setInitializer(initializer, predicate);
}
/** {@inheritDoc} */
@Override
public void setInitializer(Initializer initializer, String paramName) {
Parameter parameter =
getDirectParameters().values().stream()
.filter(p -> p.getName().equals(paramName))
.findFirst()
.orElseThrow(
() ->
new IllegalArgumentException(
"Could not find parameter " + paramName));
parameter.setInitializer(initializer);
}
/** {@inheritDoc} */
@Override
public void setInitializer(Initializer initializer, Predicate<Parameter> predicate) {
List<Parameter> params = getParameters().values();
for (Parameter param : params) {
if (predicate.test(param)) {
param.setInitializer(initializer);
}
}
}
/** {@inheritDoc} */
@Override
public void initialize(NDManager manager, DataType dataType, Shape... inputShapes) {
beforeInitialize(inputShapes);
// Block inputShape is null or params arrays are null
if (!isInitialized()) {
// Set the shape of parameter to be inputShapes
prepare(inputShapes);
}
for (Parameter parameter : getDirectParameters().values()) {
// Attach arrays to params if params are null; set require gradient if required
parameter.initialize(manager, dataType);
}
initializeChildBlocks(manager, dataType, inputShapes);
}
/**
* Performs any action necessary before initialization. For example, keep the input information
* or verify the layout.
*
* @param inputShapes the expected shapes of the input
*/
protected void beforeInitialize(Shape... inputShapes) {
if (inputNames.isEmpty()) {
// automatically assign input names
inputNames = new ArrayList<>();
for (int i = 0; i < inputShapes.length; ++i) {
inputNames.add("data" + i);
}
}
this.inputShapes = inputShapes;
}
/**
* Initializes the Child blocks of this block. You need to override this method if your subclass
* has child blocks. Used to determine the correct input shapes for child blocks based on the
* requested input shape for this block.
*
* @param manager the manager to use for initialization
* @param dataType the requested data type
* @param inputShapes the expected input shapes for this block
*/
protected void initializeChildBlocks(
NDManager manager, DataType dataType, Shape... inputShapes) {
if (!getChildren().isEmpty()) {
throw new IllegalStateException(
getClass().getSimpleName()
+ " has child blocks but initializeChildBlocks is not overwritten.");
}
}
/**
* Sets the shape of {@link Parameter}s.
*
* @param inputShapes the shapes of inputs
*/
protected void prepare(Shape[] inputShapes) {}
/** {@inheritDoc} */
@Override
public ParameterList getParameters() {
// we accumulate a list of all parameters by starting with a list of the direct parameters
ParameterList allParams = getDirectParameters();
// then we add the parameters of child blocks
for (Pair<String, Block> childPair : getChildren()) {
for (Pair<String, Parameter> paramPair : childPair.getValue().getParameters()) {
// we prepend the name of the child block to the parameter name
allParams.add(childPair.getKey() + "_" + paramPair.getKey(), paramPair.getValue());
}
}
return allParams;
}
/** {@inheritDoc} */
@Override
public boolean isInitialized() {
if (inputShapes == null) {
return false;
}
for (Parameter param : getParameters().values()) {
if (!param.isInitialized()) {
return false;
}
}
return true;
}
/** {@inheritDoc} */
@Override
public void clear() {
getParameters().forEach(param -> param.getValue().close());
}
/** {@inheritDoc} */
@Override
public void cast(DataType dataType) {
throw new UnsupportedOperationException("Not implemented yet.");
}
/** {@inheritDoc} */
@Override
public void saveParameters(DataOutputStream os) throws IOException {
os.write(version);
saveMetadata(os);
for (Parameter parameter : getDirectParameters().values()) {
parameter.save(os);
}
for (Block child : getChildren().values()) {
child.saveParameters(os);
}
}
/** {@inheritDoc} */
@Override
public void loadParameters(NDManager manager, DataInputStream is)
throws IOException, MalformedModelException {
byte loadVersion = is.readByte();
loadMetadata(loadVersion, is);
for (Parameter parameter : getDirectParameters().values()) {
parameter.load(manager, is);
}
for (Block child : getChildren().values()) {
child.loadParameters(manager, is);
}
}
/**
* Override this method to save additional data apart from parameter values.
*
* <p>This default implementation saves the currently set input shapes.
*
* @param os the non-null output stream the parameter values and metadata are written to
* @throws IOException saving failed
*/
protected void saveMetadata(DataOutputStream os) throws IOException {
saveInputShapes(os);
}
/**
* Overwrite this to load additional metadata with the parameter values.
*
* <p>If you overwrite {@link AbstractBlock#saveMetadata(DataOutputStream)} or need to provide
* backward compatibility to older binary formats, you probably need to overwrite this. This
* default implementation checks if the version number fits, if not it throws an {@link
* MalformedModelException}. After that it restores the input shapes.
*
* @param loadVersion the version used for loading this metadata.
* @param is the input stream we are loading from
* @throws IOException loading failed
* @throws MalformedModelException data can be loaded but has wrong format
*/
protected void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion != version) {
throw new MalformedModelException(
"Cannot load parameters for "
+ this.getClass().getCanonicalName()
+ ", expected version "
+ version
+ ", got "
+ loadVersion
+ ".");
}
readInputShapes(is);
}
protected void saveInputShapes(DataOutputStream os) throws IOException {
os.writeInt(inputShapes.length);
for (Shape shape : inputShapes) {
os.write(shape.getEncoded());
}
}
protected void readInputShapes(DataInputStream is) throws IOException {
int len = is.readInt();
Shape[] shapes = new Shape[len];
for (int i = 0; i < len; ++i) {
shapes[i] = Shape.decode(is);
}
if (inputShapes == null) {
// load inputShapes from parameter file if Block has not been initialized
inputShapes = shapes;
}
}
/** {@inheritDoc} */
@Override
public String toString() {
return Blocks.describe(this, null, 0);
}
/** {@inheritDoc} */
@Override
public Shape[] getInputShapes() {
if (!isInitialized()) {
throw new IllegalStateException(
"getInputShapes() can only be called after the initialization process");
}
return inputShapes;
}
/** {@inheritDoc} */
@Override
public DataType[] getOutputDataTypes() {
return outputDataTypes;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/AbstractBlock.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.training.ParameterStore;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import java.io.DataOutputStream;
import java.util.LinkedHashMap;
import java.util.Locale;
import java.util.function.Function;
/**
* {@code AbstractBlock} is an abstract implementation of {@link Block}.
*
* <p>It is recommended that all {@code Block} classes that have children extend the {@code
* AbstractBlock}.
*
* <p>To create your own blocks, you need to do the following:
*
* <ul>
* <li>Define a version for serializing parameter and metadata and pass it to the parent
* constructor
* <li>Use {@link AbstractBlock#addParameter(Parameter)} to add parameters to your block in the
* constructor if necessary.
* <li>Use {@link AbstractBlock#addChildBlock(String, Block)} to add child blocks if necessary.
* <li>Override {@link Block#getOutputShapes(Shape[])} to determine the shape of your custom
* block's output based on the input it will receive.
* <li>Override {@link AbstractBlock#initializeChildBlocks(NDManager, DataType, Shape...)} if you
* added child blocks to initialize them based on the input shape your block will receive. You
* can skip this if your block does not contain child blocks
* <li>Override {@link AbstractBlock#forward(ParameterStore, NDList, boolean, PairList)} to
* implement the computation of your block
* <li>IFF you need to save data apart from the parameter values of your block, you need to
* override {@link AbstractBlock#saveMetadata(DataOutputStream)} and {@link
* AbstractBlock#loadMetadata(byte, java.io.DataInputStream)}. If you do not need to save or
* load any state other than parameters in your block, you can skip this.
* </ul>
*
* <p>If you use {@link AbstractBlock#addParameter(Parameter)} to add parameters, you have to take
* care of parameter initialization yourself. In this case, you need to setShape to your parameters
* if you know the shape of Parameter or you can implement prepare to setShape when you see the
* input shape.
*/
// Using LinkedHashMap instead of Map is intentional: we want to make sure that consumers
// of this API know the children and parameters are always iterated over in insertion order.
// LinkedHashMap provides this guarantee, Map does not.
@SuppressWarnings("PMD.LooseCoupling")
public abstract class AbstractBlock extends AbstractBaseBlock {
/**
* All direct children of this Block. Keys are names of the blocks.
*
* <p>Use the {@link AbstractBlock#addChildBlock(String, Block)} method to add children. All
* children in this map are automagically loaded / saved.
*/
protected BlockList children = new BlockList();
/**
* All direct parameters of this Block. Keys are name of the parameters.
*
* <p>Use the {@link AbstractBlock#addParameter(Parameter)} method to add children. All
* parameters in this map are automatically loaded / saved.
*/
protected LinkedHashMap<String, Parameter> parameters = new LinkedHashMap<>();
/** Constructs a new {@code AbstractBlock} instance. */
public AbstractBlock() {}
/**
* Builds an empty block with the given version for parameter serialization.
*
* @param version the version to use for parameter serialization.
*/
public AbstractBlock(byte version) {
super(version);
}
/**
* Use this to add a child block to this block.
*
* @param name Name of the block, must not be null.
* @param block The block, must not be null.
* @param <B> The type of block
* @return the block given as a parameter - that way the block can be created and reassigned to
* a member variable more easily.
*/
protected final <B extends Block> B addChildBlock(String name, B block) {
int childNumber = children.size() + 1;
children.add(String.format(Locale.ROOT, "%02d%s", childNumber, name), block);
return block;
}
/**
* Adds a {@link LambdaBlock} as a child block to this block.
*
* @param name Name of the block, must not be null.
* @param f the function forms the {@link LambdaBlock}
* @return the child block
*/
protected LambdaBlock addChildBlock(String name, Function<NDList, NDList> f) {
return addChildBlock(name, new LambdaBlock(f, name));
}
/**
* Adds a {@link LambdaBlock#singleton(Function)} as a child block to this block.
*
* @param name Name of the block, must not be null.
* @param f the function forms the {@link LambdaBlock}
* @return the child block
* @see LambdaBlock#singleton(Function)
*/
protected final LambdaBlock addChildBlockSingleton(String name, Function<NDArray, NDArray> f) {
return addChildBlock(name, LambdaBlock.singleton(f, name));
}
/**
* Adds a parameter to this block. If parameters are added with this method, initialization of
* the parameter works out of the box
*
* @param <P> the specific parameter subclass
* @param parameter the parameter to add, not null
* @return the parameter passed as arguments to make it easier to create and assign parameters
* in one line
*/
protected final <P extends Parameter> P addParameter(P parameter) {
parameters.put(parameter.getName(), parameter);
return parameter;
}
/** {@inheritDoc} */
@Override
public BlockList getChildren() {
BlockList defensiveCopy = new BlockList(children.size());
for (Pair<String, Block> entry : children) {
defensiveCopy.add(entry);
}
return defensiveCopy;
}
/** {@inheritDoc} */
@Override
public ParameterList getDirectParameters() {
return new ParameterList(parameters);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/AbstractSymbolBlock.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.ndarray.types.Shape;
/** {@code AbstractSymbolBlock} is an abstract implementation of {@link SymbolBlock}. */
public abstract class AbstractSymbolBlock extends AbstractBaseBlock implements SymbolBlock {
/** Constructs a new {@code AbstractSymbolBlock} instance. */
public AbstractSymbolBlock() {}
/**
* Builds an empty block with the given version for parameter serialization.
*
* @param version the version to use for parameter serialization.
*/
public AbstractSymbolBlock(byte version) {
super(version);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
throw new UnsupportedOperationException("not implement!");
}
/** {@inheritDoc} */
@Override
public BlockList getChildren() {
return new BlockList();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/Activation.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDList;
import ai.djl.nn.core.Prelu;
/**
* Utility class that provides activation functions and blocks.
*
* <p>Many networks make use of the {@link ai.djl.nn.core.Linear} block and other similar linear
* transformations. However, any number of linear transformations that are composed will only result
* in a different linear transformation (\($f(x) = W_2(W_1x) = (W_2W_1)x = W_{combined}x\)). In
* order to represent non-linear data, non-linear functions called activation functions are
* interspersed between the linear transformations. This allows the network to represent non-linear
* functions of increasing complexity.
*
* <p>See <a href="https://en.wikipedia.org/wiki/Activation_function">wikipedia</a> for more
* details.
*/
public final class Activation {
private Activation() {}
/**
* Applies ReLU activation on the input {@link NDArray}.
*
* <p>ReLU is defined by: \( y = max(0, x) \)
*
* @param array the input {@link NDArray}
* @return the {@link NDArray} after applying ReLU activation
*/
public static NDArray relu(NDArray array) {
return array.getNDArrayInternal().relu();
}
/**
* Applies ReLU activation on the input singleton {@link NDList}.
*
* <p>ReLU is defined by: \( y = max(0, x) \)
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying ReLU activation
*/
public static NDList relu(NDList arrays) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().relu());
}
/**
* Applies ReLU6 activation on the input {@link NDArray}.
*
* <p>ReLU6 is defined by: \( y = min(6,max(0, x)) \)
*
* @param array the input singleton {@link NDArray}
* @return the {@link NDArray} after applying ReLU6 activation
*/
public static NDArray relu6(NDArray array) {
return NDArrays.minimum(6, array.getNDArrayInternal().relu());
}
/**
* Applies ReLU6 activation on the input singleton {@link NDList}.
*
* <p>ReLU6 is defined by: \( y = min(6,max(0, x)) \)
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying ReLU6 activation
*/
public static NDList relu6(NDList arrays) {
return new NDList(relu6(arrays.singletonOrThrow()));
}
/**
* Applies Sigmoid activation on the input {@link NDArray}.
*
* <p>Sigmoid is defined by: \( y = 1 / (1 + e^{-x}) \)
*
* @param array the input {@link NDArray}
* @return the {@link NDArray} after applying Sigmoid activation
*/
public static NDArray sigmoid(NDArray array) {
return array.getNDArrayInternal().sigmoid();
}
/**
* Applies Sigmoid activation on the input singleton {@link NDList}.
*
* <p>Sigmoid is defined by: \( y = 1 / (1 + e^{-x}) \)
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying Sigmoid activation
*/
public static NDList sigmoid(NDList arrays) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().sigmoid());
}
/**
* Applies Tanh activation on the input {@link NDArray}.
*
* <p>Tanh is defined by: \( y = (e^x - e^{-x}) / (e^x + e^{-x}) \)
*
* @param array the input {@link NDArray}
* @return the {@link NDArray} after applying Tanh activation
*/
public static NDArray tanh(NDArray array) {
return array.getNDArrayInternal().tanh();
}
/**
* Applies Tanh activation on the input singleton {@link NDList}.
*
* <p>Tanh is defined by: \( y = (e^x - e^{-x}) / (e^x + e^{-x}) \)
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying tanh activation
*/
public static NDList tanh(NDList arrays) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().tanh());
}
/**
* Applies softPlus activation on the input {@link NDArray}.
*
* <p>softPlus is defined by: \( y = log(1 + e^x) \)
*
* @param array the input {@link NDArray}
* @return the {@link NDArray} after applying soft ReLU activation
*/
public static NDArray softPlus(NDArray array) {
return array.getNDArrayInternal().softPlus();
}
/**
* Applies softPlus activation on the input singleton {@link NDList}.
*
* <p>softPlus is defined by: \( y = log(1 + e^x) \)
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying soft ReLU activation
*/
public static NDList softPlus(NDList arrays) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().softPlus());
}
/**
* Applies softSign activation on the input {@link NDArray}.
*
* <p>softPlus is defined by: \( y = x / 1 + |x| \)
*
* @param array the input {@link NDArray}
* @return the {@link NDArray} after applying soft ReLU activation
*/
public static NDArray softSign(NDArray array) {
return array.getNDArrayInternal().softSign();
}
/**
* Applies softPlus activation on the input singleton {@link NDList}.
*
* <p>softPlus is defined by: \( y = x / 1 + |x| \)
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying soft ReLU activation
*/
public static NDList softSign(NDList arrays) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().softSign());
}
/**
* Applies Leaky ReLU activation on the input {@link NDArray}.
*
* <p>Leaky ReLU is defined by: \( y = x \gt 0 ? x : alpha * x \)
*
* @param array the input {@link NDArray}
* @param alpha the slope for the activation
* @return the {@link NDArray} after applying Leaky ReLU activation
*/
public static NDArray leakyRelu(NDArray array, float alpha) {
return array.getNDArrayInternal().leakyRelu(alpha);
}
/**
* Applies Leaky ReLU activation on the input singleton {@link NDList}.
*
* <p>Leaky ReLU is defined by: \( y = x \gt 0 ? x : alpha * x \)
*
* @param arrays the input singleton {@link NDList}
* @param alpha the slope for the activation
* @return the singleton {@link NDList} after applying Leaky ReLU activation
*/
public static NDList leakyRelu(NDList arrays, float alpha) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().leakyRelu(alpha));
}
/**
* Applies ELU activation on the input {@link NDArray}.
*
* <p>ELU is defined by: \( y = x \gt 0 ? x : alpha * (e^x - 1) \)
*
* @param array the input {@link NDArray}
* @param alpha the slope for the activation
* @return the {@link NDArray} after applying ELU activation
*/
public static NDArray elu(NDArray array, float alpha) {
return array.getNDArrayInternal().elu(alpha);
}
/**
* Applies ELU(Exponential Linear Unit) activation on the input singleton {@link NDList}.
*
* <p>ELU is defined by: \( y = x \gt 0 ? x : alpha * (e^x - 1) \)
*
* @param arrays the input singleton {@link NDList}
* @param alpha the slope for the activation
* @return the singleton {@link NDList} after applying ELU activation
*/
public static NDList elu(NDList arrays, float alpha) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().elu(alpha));
}
/**
* Applies Scaled ELU activation on the input {@link NDArray}.
*
* <p>Scaled ELU is defined by: \( y = lambda * (x \gt 0 ? x : alpha * (e^x - 1))\) where
* \(lambda = 1.0507009873554804934193349852946\) and \(alpha =
* 1.6732632423543772848170429916717\)
*
* @param array the input {@link NDArray}
* @return the {@link NDArray} after applying Scale ELU activation
*/
public static NDArray selu(NDArray array) {
return array.getNDArrayInternal().selu();
}
/**
* Applies Scaled ELU activation on the input singleton {@link NDList}.
*
* <p>Scaled ELU is defined by: \( y = lambda * (x \gt 0 ? x : alpha * (e^x - 1))\) where
* \(lambda = 1.0507009873554804934193349852946\) and \(alpha =
* 1.6732632423543772848170429916717 \)
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying Scaled ELU activation
*/
public static NDList selu(NDList arrays) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().selu());
}
/**
* Applies GELU(Gaussian Error Linear Unit) activation on the input {@link NDArray}.
*
* @param array the input {@link NDArray}
* @return the {@link NDArray} after applying GELU activation
*/
public static NDArray gelu(NDArray array) {
return array.getNDArrayInternal().gelu();
}
/**
* Applies GELU(Gaussian Error Linear Unit) activation on the input singleton {@link NDList}.
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying GELU activation
*/
public static NDList gelu(NDList arrays) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().gelu());
}
/**
* Applies Swish activation on the input {@link NDArray}.
*
* <p>Swish is defined as \(y = x * sigmoid(beta * x)\)
*
* @param array the input {@link NDArray}
* @param beta a hyper-parameter
* @return the {@link NDArray} after applying Swish activation
*/
public static NDArray swish(NDArray array, float beta) {
return array.getNDArrayInternal().swish(beta);
}
/**
* Applies SWish activation on the input singleton {@link NDList}.
*
* <p>Swish is defined as \(y = x * sigmoid(beta * x)\)
*
* @param arrays the input singleton {@link NDList}
* @param beta a hyper-parameter
* @return the singleton {@link NDList} after applying Swish activation
*/
public static NDList swish(NDList arrays, float beta) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().swish(beta));
}
/**
* Applies Mish activation on the input {@link NDArray}.
*
* <p>Mish is defined as \(y = x * tanh(ln(1 + e^x)\) defined by Diganta Misra in his paper
* Mish: A Self Regularized Non-Monotonic Neural Activation Function
*
* @param array the input {@link NDArray}
* @return the {@link NDArray} after applying Mish activation
*/
public static NDArray mish(NDArray array) {
return array.getNDArrayInternal().mish();
}
/**
* Applies Mish activation on the input singleton {@link NDList}.
*
* <p>Mish is defined as \(y = x * tanh(ln(1 + e^x)\) defined by Diganta Misra in his paper
* Mish: A Self Regularized Non-Monotonic Neural Activation Function
*
* @param arrays the input singleton {@link NDList}
* @return the singleton {@link NDList} after applying Mish activation
*/
public static NDList mish(NDList arrays) {
return new NDList(arrays.singletonOrThrow().getNDArrayInternal().mish());
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #relu(NDList) ReLU} activation function
* in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #relu(NDList) ReLU} activation
* function
*/
public static Block reluBlock() {
return new LambdaBlock(Activation::relu, "ReLU");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #relu6(NDList) ReLU6} activation
* function in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #relu6(NDList) ReLU} activation
* function
*/
public static Block relu6Block() {
return new LambdaBlock(Activation::relu6, "ReLU6");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #sigmoid(NDList) Sigmoid} activation
* function in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #sigmoid(NDList) Sigmoid} activation
* function
*/
public static Block sigmoidBlock() {
return new LambdaBlock(Activation::sigmoid, "sigmoid");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #tanh(NDList) Tanh} activation function
* in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #tanh(NDList) Tanh} activation
* function
*/
public static Block tanhBlock() {
return new LambdaBlock(Activation::tanh, "Tanh");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #softPlus(NDList)} activation function
* in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #softPlus(NDList)} activation
* function
*/
public static Block softPlusBlock() {
return new LambdaBlock(Activation::softPlus, "softPlus");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #softSign(NDList)} activation function
* in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #softSign(NDList)} activation
* function
*/
public static Block softSignBlock() {
return new LambdaBlock(Activation::softSign, "softSign");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #leakyRelu(NDList, float) LeakyReLU}
* activation function in its forward function.
*
* @param alpha the slope for the activation
* @return the {@link LambdaBlock} that applies the {@link #leakyRelu(NDList, float) LeakyReLU}
* activation function
*/
public static Block leakyReluBlock(float alpha) {
return new LambdaBlock(arrays -> Activation.leakyRelu(arrays, alpha), "LeakyReLU");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #elu(NDList, float) ELU} activation
* function in its forward function.
*
* @param alpha the slope for the activation
* @return the {@link LambdaBlock} that applies the {@link #elu(NDList, float) ELU} activation
* function
*/
public static Block eluBlock(float alpha) {
return new LambdaBlock(arrays -> Activation.elu(arrays, alpha), "ELU");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #selu(NDList) SELU} activation function
* in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #selu(NDList) SELU} activation
* function
*/
public static Block seluBlock() {
return new LambdaBlock(Activation::selu, "SELU");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #gelu(NDList) GELU} activation function
* in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #gelu(NDList) GELU} activation
* function
*/
public static Block geluBlock() {
return new LambdaBlock(Activation::gelu, "GELU");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #swish(NDList, float) Swish} activation
* function in its forward function.
*
* @param beta a hyper-parameter
* @return the {@link LambdaBlock} that applies the {@link #swish(NDList, float) Swish}
* activation function
*/
public static Block swishBlock(float beta) {
return new LambdaBlock(arrays -> Activation.swish(arrays, beta), "Swish");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #mish(NDList) Mish} activation function
* in its forward function.
*
* @return the {@link LambdaBlock} that applies the {@link #mish(NDList) Mish} activation
* function
*/
public static Block mishBlock() {
return new LambdaBlock(Activation::mish, "Mish");
}
/**
* Returns a {@link Prelu} block.
*
* @return a {@link Prelu} block
*/
public static Block preluBlock() {
return new Prelu();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/Block.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.convolutional.Conv2d;
import ai.djl.training.ParameterStore;
import ai.djl.training.initializer.Initializer;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.function.Predicate;
/**
* A {@code Block} is a composable function that forms a neural network.
*
* <p>Blocks serve a purpose similar to functions that convert an input NDList to an output NDList.
* They can represent single operations, parts of a neural network, and even the whole neural
* network. What makes blocks special is that they contain a number of parameters that are used in
* their function and are trained during deep learning. As these parameters are trained, the
* functions represented by the blocks get more and more accurate. Each block consists of the
* following components:
*
* <ul>
* <li>Forward function
* <li>Parameters
* <li>Child blocks
* </ul>
*
* <p>The core purpose of a {@code Block} is to perform an operation on the inputs, and return an
* output. It is defined in the {@link #forward(ParameterStore, NDList, boolean) forward} method.
* The forward function could be defined explicitly in terms of parameters or implicitly and could
* be a combination of the functions of the child blocks.
*
* <p>The parameters of a {@code Block} are instances of {@link Parameter} which are required for
* the operation in the forward function. For example, in a {@link Conv2d} block, the parameters are
* {@code weight} and {@code bias}. During training, these parameters are updated to reflect the
* training data, and that forms the crux of learning.
*
* <p>When building these block functions, the easiest way is to use composition. Similar to how
* functions are built by calling other functions, blocks can be built by combining other blocks. We
* refer to the containing block as the parent and the sub-blocks as the children.
*
* <p>We provide helpers for creating two common structures of blocks. For blocks that call children
* in a chain, use {@link SequentialBlock}. If a blocks calls all of the children in parallel and
* then combines their results, use {@link ParallelBlock}. For blocks that do not fit these
* structures, you should directly extend the {@link AbstractBlock} class.
*
* <p>A block does not necessarily have to have children and parameters. For example, {@link
* SequentialBlock}, and {@link ParallelBlock} don't have any parameters, but do have child blocks.
* Similarly, {@link Conv2d} does not have children, but has parameters. There can be special cases
* where blocks have neither parameters nor children. One such example is {@link LambdaBlock}.
* {@link LambdaBlock} takes in a function, and applies that function to its input in the {@link
* #forward(ParameterStore, NDList, boolean) forward} method.
*
* <p>Now that we understand the components of the block, we can explore what the block really
* represents. A block combined with the recursive, hierarchical structure of its children forms a
* network. It takes in the input to the network, performs its operation, and returns the output of
* the network. When a block is added as a child of another block, it becomes a sub-network of that
* block.
*
* <p>The life-cycle of a block has 3 stages:
*
* <ul>
* <li>Construction
* <li>Initialization
* <li>Training
* </ul>
*
* <p>Construction is the process of building the network. During this stage, blocks are created
* with appropriate arguments and the desired network is built by adding creating a hierarchy of
* parent and child blocks. At this stage, it is a bare-bones network. The parameter values are not
* created and the shapes of the inputs are not known. The block is ready for initialization.
*
* <p>Initialization is the process of initializing all the parameters of the block and its
* children, according to the inputs expected. It involves setting an {@link Initializer}, deciding
* the {@link DataType}, and the shapes of the input. The parameter arrays are {@link
* ai.djl.ndarray.NDArray} that are initialized according to the {@link Initializer} set. At this
* stage, the block is expecting a specific type of input, and is ready to be trained.
*
* <p>Training is when we are starting feeding the training data as input to the block, get the
* output, and try to update parameters to learn. For more information about training, please refer
* the javadoc at {@link ai.djl.training.Trainer}. At the end of training, a block represents a
* fully-trained model.
*
* <p>It is also possible to freeze parameters and blocks to avoid them being trained. When loading
* models or building blocks with preTrained data, they default to being frozen. If you wish to
* further refine these elements, use {@link Block#freezeParameters(boolean)} to unfreeze them.
*
* @see <a
* href="https://docs.djl.ai/master/docs/demos/jupyter/tutorial/01_create_your_first_network.html">this
* tutorial on creating your first network</a>
* @see <a href="https://d2l.djl.ai/chapter_deep-learning-computation/model-construction.html">The
* D2L chapter on blocks</a> and <a
* href="https://d2l.djl.ai/chapter_deep-learning-computation/custom-layer.html">blocks with
* direct parameters</a>
*/
public interface Block {
/**
* Applies the operating function of the block once. This method should be called only on blocks
* that are initialized.
*
* @param parameterStore the parameter store
* @param inputs the input NDList
* @param training true for a training forward pass (turn on dropout and layerNorm)
* @return the output of the forward pass
*/
default NDList forward(ParameterStore parameterStore, NDList inputs, boolean training) {
return forward(parameterStore, inputs, training, null);
}
/**
* Applies the operating function of the block once. This method should be called only on blocks
* that are initialized.
*
* @param parameterStore the parameter store
* @param inputs the input NDList
* @param training true for a training forward pass (turn on dropout and layerNorm)
* @param params optional parameters
* @return the output of the forward pass
*/
NDList forward(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params);
/**
* A forward call using both training data and labels.
*
* <p>Within this forward call, it can be assumed that training is true.
*
* @param parameterStore the parameter store
* @param data the input data NDList
* @param labels the input labels NDList
* @param params optional parameters
* @return the output of the forward pass
* @see #forward(ParameterStore, NDList, boolean, PairList)
*/
default NDList forward(
ParameterStore parameterStore,
NDList data,
NDList labels,
PairList<String, Object> params) {
return forward(parameterStore, data, true, params);
}
/**
* Sets an {@link Initializer} to all the parameters that match parameter type in the block.
*
* @param initializer the initializer to set
* @param type the Parameter Type we want to setInitializer
*/
void setInitializer(Initializer initializer, Parameter.Type type);
/**
* Sets an {@link Initializer} to the specified direct parameter of the block, overriding the
* initializer of the parameter, if already set.
*
* @param initializer the initializer to be set
* @param paramName the name of the parameter
*/
void setInitializer(Initializer initializer, String paramName);
/**
* Sets an {@link Initializer} to all the parameters that match Predicate in the block.
*
* @param initializer the initializer to be set
* @param predicate predicate function to indicate parameters you want to set
*/
void setInitializer(Initializer initializer, Predicate<Parameter> predicate);
/**
* Initializes the parameters of the block, set require gradient if required and infer the block
* inputShape. This method must be called before calling `forward`.
*
* @param manager the NDManager to initialize the parameters
* @param dataType the datatype of the parameters
* @param inputShapes the shapes of the inputs to the block
*/
void initialize(NDManager manager, DataType dataType, Shape... inputShapes);
/**
* Returns a boolean whether the block is initialized (block has inputShape and params have
* nonNull array).
*
* @return whether the block is initialized
*/
boolean isInitialized();
/**
* Guaranteed to throw an exception. Not yet implemented
*
* @param dataType the data type to cast to
* @throws UnsupportedOperationException always
*/
void cast(DataType dataType);
/**
* Closes all the parameters of the block. All the updates made during training will be lost.
*/
void clear();
/**
* Returns a {@link PairList} of input names, and shapes.
*
* @return the {@link PairList} of input names, and shapes
*/
PairList<String, Shape> describeInput();
/**
* Returns a list of all the children of the block.
*
* @return the list of child blocks
*/
BlockList getChildren();
/**
* Returns a list of all the direct parameters of the block.
*
* @return the list of {@link Parameter}
*/
ParameterList getDirectParameters();
/**
* Returns a list of all the parameters of the block, including the parameters of its children
* fetched recursively.
*
* @return the list of all parameters of the block
*/
ParameterList getParameters();
/**
* Returns the expected output shapes of the block for the specified input shapes.
*
* @param inputShapes the shapes of the inputs
* @return the expected output shapes of the block
*/
Shape[] getOutputShapes(Shape[] inputShapes);
/**
* Returns the expected output shapes of the block for the specified input shapes.
*
* @param inputShapes the shapes of the inputs
* @param inputDataTypes the datatypes of the inputs
* @return the expected output shapes of the block
*/
default Shape[] getOutputShapes(Shape[] inputShapes, DataType[] inputDataTypes) {
return getOutputShapes(inputShapes);
}
/**
* Returns the input shapes of the block. The input shapes are only available after the block is
* initialized, otherwise an {@link IllegalStateException} is thrown.
*
* @return the input shapes of the block
*/
Shape[] getInputShapes();
/**
* Returns the input dataTypes of the block.
*
* @return the input dataTypes of the block
*/
DataType[] getOutputDataTypes();
/**
* Writes the parameters of the block to the given outputStream.
*
* @param os the outputstream to save the parameters to
* @throws IOException if an I/O error occurs
*/
void saveParameters(DataOutputStream os) throws IOException;
/**
* Loads the parameters from the given input stream.
*
* @param manager an NDManager to create the parameter arrays
* @param is the inputstream that stream the parameter values
* @throws IOException if an I/O error occurs
* @throws MalformedModelException if the model file is corrupted or unsupported
*/
void loadParameters(NDManager manager, DataInputStream is)
throws IOException, MalformedModelException;
/**
* Freezes or unfreezes all parameters inside the block for training.
*
* @param freeze true if the parameter should be frozen
* @see Parameter#freeze(boolean)
*/
default void freezeParameters(boolean freeze) {
for (Parameter parameter : getParameters().values()) {
parameter.freeze(freeze);
}
}
/**
* Freezes or unfreezes all parameters inside the block that pass the predicate.
*
* @param freeze true to mark as frozen rather than unfrozen
* @param pred true tests if the parameter should be updated
* @see Parameter#freeze(boolean)
*/
default void freezeParameters(boolean freeze, Predicate<Parameter> pred) {
for (Parameter parameter : getParameters().values()) {
if (pred.test(parameter)) {
parameter.freeze(freeze);
}
}
}
/**
* Validates that actual layout matches the expected layout.
*
* @param expectedLayout the expected layout
* @param actualLayout the actual Layout
* @throws UnsupportedOperationException if the actual layout does not match the expected layout
*/
static void validateLayout(LayoutType[] expectedLayout, LayoutType[] actualLayout) {
if (actualLayout.length != expectedLayout.length) {
throw new UnsupportedOperationException(
"Expected layout: "
+ LayoutType.toString(expectedLayout)
+ ", but got: "
+ LayoutType.toString(actualLayout));
}
for (int i = 0; i < actualLayout.length; i++) {
if (actualLayout[i] != LayoutType.UNKNOWN && actualLayout[i] != expectedLayout[i]) {
throw new UnsupportedOperationException(
"Expected layout: "
+ LayoutType.toString(expectedLayout)
+ ", but got: "
+ LayoutType.toString(actualLayout));
}
}
}
/**
* Returns a map of all the custom metadata of the block.
*
* @return the map of {@link PairList}
*/
default Map<String, String> getCustomMetadata() {
return Collections.emptyMap();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/BlockFactory.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.Model;
import ai.djl.repository.zoo.ModelZoo;
import java.io.IOException;
import java.io.Serializable;
import java.nio.file.Path;
import java.util.Map;
/**
* Block factory is a component to make standard for block creating and saving procedure. Block
* factory design is intended to bypass the serialization of the blocks. This class can be used by
* {@link ModelZoo} or DJL Serving to recover the block to its uninitialized states. User should
* combine this method with the block.loadParameter to get the block with all parameters.
*/
public interface BlockFactory extends Serializable {
/**
* Constructs the uninitialized block.
*
* @param model the model of the block
* @param modelPath the directory of the model location
* @param arguments the block creation arguments
* @return the uninitialized block
* @throws IOException if IO operation fails during creating block
*/
Block newBlock(Model model, Path modelPath, Map<String, ?> arguments) throws IOException;
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/BlockList.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import java.util.List;
import java.util.Map;
/** Represents a set of names and Blocks. */
public class BlockList extends PairList<String, Block> {
/** Creates an empty {@code BlockList}. */
public BlockList() {}
/**
* Constructs an empty {@code BlockList} with the specified initial capacity.
*
* @param initialCapacity the initial capacity of the list
* @throws IllegalArgumentException if the specified initial capacity is negative
*/
public BlockList(int initialCapacity) {
super(initialCapacity);
}
/**
* Constructs a {@code BlockList} containing the elements of the specified keys and values.
*
* @param keys the key list containing the elements to be placed into this {@code BlockList}
* @param values the value list containing the elements to be placed into this {@code BlockList}
* @throws IllegalArgumentException if the keys and values size are different
*/
public BlockList(List<String> keys, List<Block> values) {
super(keys, values);
}
/**
* Constructs a {@code BlockList} containing the elements of the specified list of Pairs.
*
* @param list the list containing the elements to be placed into this {@code BlockList}
*/
public BlockList(List<Pair<String, Block>> list) {
super(list);
}
/**
* Constructs a {@code BlockList} containing the elements of the specified map.
*
* @param map the map containing keys and values
*/
public BlockList(Map<String, Block> map) {
super(map);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/Blocks.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/** Utility class that provides some useful blocks. */
public final class Blocks {
private Blocks() {}
/**
* Inflates the {@link ai.djl.ndarray.NDArray} provided as input to a 2-D {@link
* ai.djl.ndarray.NDArray} of shape (batch, size).
*
* @param array a array to be flattened
* @return a {@link NDList} that contains the inflated {@link ai.djl.ndarray.NDArray}
*/
public static NDArray batchFlatten(NDArray array) {
long batch = array.size(0);
if (batch == 0) {
// calculate the size of second dimension manually as using -1 would not work here
return array.reshape(batch, array.getShape().slice(1).size());
}
return array.reshape(batch, -1);
}
/**
* Inflates the {@link ai.djl.ndarray.NDArray} provided as input to a 2-D {@link
* ai.djl.ndarray.NDArray} of shape (batch, size).
*
* @param array a array to be flattened
* @param size the input size
* @return a {@link NDList} that contains the inflated {@link ai.djl.ndarray.NDArray}
* @throws IndexOutOfBoundsException if the input {@link NDList} has more than one {@link
* ai.djl.ndarray.NDArray}
*/
public static NDArray batchFlatten(NDArray array, long size) {
return array.reshape(-1, size);
}
/**
* Creates a {@link Block} whose forward function applies the {@link #batchFlatten(NDArray)
* batchFlatten} method.
*
* @return a {@link Block} whose forward function applies the {@link #batchFlatten(NDArray)
* batchFlatten} method
*/
public static Block batchFlattenBlock() {
return LambdaBlock.singleton(Blocks::batchFlatten, "batchFlatten");
}
/**
* Creates a {@link Block} whose forward function applies the {@link #batchFlatten(NDArray)
* batchFlatten} method. The size of input to the block returned must be batch_size * size.
*
* @param size the expected size of each input
* @return a {@link Block} whose forward function applies the {@link #batchFlatten(NDArray)
* batchFlatten} method
*/
public static Block batchFlattenBlock(long size) {
return LambdaBlock.singleton(array -> batchFlatten(array, size), "batchFlatten");
}
/**
* Creates a {@link LambdaBlock} that performs the identity function.
*
* @return an identity {@link Block}
*/
public static Block identityBlock() {
return new LambdaBlock(x -> x, "identity");
}
/**
* Creates a {@link LambdaBlock} that return all-ones NDList.
*
* @return an all-ones {@link Block}
*/
public static Block onesBlock(PairList<DataType, Shape> shapes, String[] names) {
return new LambdaBlock(
a -> {
Shape[] inShapes = a.getShapes();
NDManager manager = a.getManager();
NDList list = new NDList(shapes.size());
int index = 0;
for (Pair<DataType, Shape> pair : shapes) {
long[] shape = pair.getValue().getShape().clone();
for (int i = 0; i < shape.length; ++i) {
if (shape[i] == -1) {
shape[i] = inShapes[index].get(i);
}
}
DataType dataType = pair.getKey();
NDArray arr = manager.ones(new Shape(shape), dataType);
if (names.length == list.size()) {
arr.setName(names[index++]);
}
list.add(arr);
}
return list;
},
"ones");
}
/**
* Returns a string representation of the passed {@link Block} describing the input axes, output
* axes, and the block's children.
*
* @param block the block to describe
* @param blockName the name to be used for the passed block, or <code>null</code> if its class
* name is to be used
* @param beginAxis skips all axes before this axis; use <code>0</code> to print all axes and
* <code>1</code> to skip the batch axis.
* @return the string representation
*/
public static String describe(Block block, String blockName, int beginAxis) {
Shape[] inputShapes = block.isInitialized() ? block.getInputShapes() : null;
Shape[] outputShapes = inputShapes != null ? block.getOutputShapes(inputShapes) : null;
StringBuilder sb = new StringBuilder(200);
if (block instanceof LambdaBlock
&& !LambdaBlock.DEFAULT_NAME.equals(((LambdaBlock) block).getName())) {
sb.append(((LambdaBlock) block).getName());
} else if (blockName != null) {
sb.append(blockName);
} else {
sb.append(block.getClass().getSimpleName());
}
if (inputShapes != null) {
sb.append(
Stream.of(inputShapes)
.map(shape -> shape.slice(beginAxis).toString())
.collect(Collectors.joining("+")));
}
if (!block.getChildren().isEmpty()) {
sb.append(" {\n");
for (Pair<String, Block> pair : block.getChildren()) {
String child = describe(pair.getValue(), pair.getKey().substring(2), beginAxis);
sb.append(child.replaceAll("(?m)^", "\t")).append('\n');
}
sb.append('}');
}
if (outputShapes != null) {
sb.append(" -> ");
sb.append(
Stream.of(outputShapes)
.map(shape -> shape.slice(beginAxis).toString())
.collect(Collectors.joining("+")));
}
return sb.toString();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/IdentityBlockFactory.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.Model;
import java.nio.file.Path;
import java.util.Map;
/** A {@link BlockFactory} class that creates IdentityBlock. */
public class IdentityBlockFactory implements BlockFactory {
private static final long serialVersionUID = 1L;
/** {@inheritDoc} */
@Override
public Block newBlock(Model model, Path modelPath, Map<String, ?> arguments) {
return Blocks.identityBlock();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/LambdaBlock.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.function.Function;
/**
* {@code LambdaBlock} is a {@link Block} with no parameters or children.
*
* <p>{@code LambdaBlock} allows converting any function that takes an {@code NDList} as input and
* returns an {@code NDList} into a parameter-less and child-less {@link Block}. This can be useful
* in converting activation functions, identity blocks, and more. For example, identity block can be
* created as {@code new LambdaBlock(x -> x)}.
*/
public class LambdaBlock extends AbstractBlock {
public static final String DEFAULT_NAME = "anonymous";
private static final byte VERSION = 2;
private Function<NDList, NDList> lambda;
private String name;
/**
* Creates a LambdaBlock that can apply the specified function.
*
* @param lambda the function to apply
*/
public LambdaBlock(Function<NDList, NDList> lambda) {
this(lambda, DEFAULT_NAME);
}
/**
* Creates a LambdaBlock that can apply the specified function.
*
* @param lambda the function to apply
* @param name the function name
*/
public LambdaBlock(Function<NDList, NDList> lambda, String name) {
super(VERSION);
this.lambda = lambda;
this.name = name;
}
/**
* Returns the lambda function name.
*
* @return the lambda function name
*/
public String getName() {
return name;
}
/**
* Creates a {@link LambdaBlock} for a singleton function.
*
* @param lambda a function accepting a singleton {@link NDList} and returning another singleton
* {@link NDList}
* @return a new {@link LambdaBlock} for the function
*/
public static LambdaBlock singleton(Function<NDArray, NDArray> lambda) {
return new LambdaBlock(
arrays -> new NDList(lambda.apply(arrays.singletonOrThrow())),
lambda.getClass().getSimpleName());
}
/**
* Creates a {@link LambdaBlock} for a singleton function.
*
* @param lambda a function accepting a singleton {@link NDList} and returning another singleton
* {@link NDList}
* @param name the function name
* @return a new {@link LambdaBlock} for the function
*/
public static LambdaBlock singleton(Function<NDArray, NDArray> lambda, String name) {
return new LambdaBlock(arrays -> new NDList(lambda.apply(arrays.singletonOrThrow())), name);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
return lambda.apply(inputs);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
try (NDManager manager = NDManager.newBaseManager()) {
NDList input = new NDList(inputShapes.length);
for (Shape shape : inputShapes) {
input.add(manager.zeros(shape));
}
NDList output = lambda.apply(input);
Shape[] outputShapes = new Shape[output.size()];
DataType[] dataTypes = new DataType[output.size()];
for (int i = 0; i < output.size(); ++i) {
outputShapes[i] = output.get(i).getShape();
dataTypes[i] = output.get(i).getDataType();
}
outputDataTypes = dataTypes;
return outputShapes;
}
}
/** {@inheritDoc} */
@Override
public void loadParameters(NDManager manager, DataInputStream is)
throws IOException, MalformedModelException {
byte version = is.readByte();
if (version == VERSION) {
readInputShapes(is);
} else if (version != 1) {
throw new MalformedModelException("Unsupported encoding version: " + version);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/OnesBlockFactory.java
|
/*
* Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.Model;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.translate.ArgumentsUtil;
import ai.djl.util.PairList;
import ai.djl.util.Utils;
import java.nio.file.Path;
import java.util.Map;
/** A {@link BlockFactory} class that creates LambdaBlock. */
public class OnesBlockFactory implements BlockFactory {
private static final long serialVersionUID = 1L;
/** {@inheritDoc} */
@Override
public Block newBlock(Model model, Path modelPath, Map<String, ?> arguments) {
String shapes = ArgumentsUtil.stringValue(arguments, "block_shapes");
String blockNames = ArgumentsUtil.stringValue(arguments, "block_names");
PairList<DataType, Shape> pairs = Shape.parseShapes(shapes);
String[] names;
if (blockNames != null) {
names = blockNames.split(",");
} else {
names = Utils.EMPTY_ARRAY;
}
return Blocks.onesBlock(pairs, names);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/ParallelBlock.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import ai.djl.util.Preconditions;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* {@code ParallelBlock} is a {@link Block} whose children form a parallel branch in the network and
* are combined to produce a single output.
*
* <p>{@code ParallelBlock} has no direct parameters.
*/
public class ParallelBlock extends AbstractBlock {
private static final byte VERSION = 2;
private Function<List<NDList>, NDList> function;
/**
* Creates a parallel block whose branches are combined to form a single output by the given
* function.
*
* @param function the function to define how the parallel branches are combined to form a
* single output
*/
public ParallelBlock(Function<List<NDList>, NDList> function) {
this(function, Collections.emptyList());
}
/**
* Creates a parallel block whose branches are formed by each block in the list of blocks, and
* are combined to form a single output by the given function.
*
* @param function the function to define how the parallel branches are combined
* @param blocks the blocks that form each of the parallel branches
*/
@SuppressWarnings("this-escape")
public ParallelBlock(Function<List<NDList>, NDList> function, List<Block> blocks) {
super(VERSION);
this.function = function;
addAll(blocks);
}
/**
* Adds an array of blocks, each of which is a parallel branch.
*
* @param blocks the array of blocks to add
* @return this block
*/
@SuppressWarnings("this-escape")
public final ParallelBlock addAll(Block... blocks) {
return addAll(Arrays.asList(blocks));
}
/**
* Adds a {@link Collection} of blocks, each of which is a parallel branch.
*
* @param blocks the {@link Collection} of blocks to add
* @return this block
*/
public final ParallelBlock addAll(Collection<Block> blocks) {
blocks.forEach(this::add);
return this;
}
/**
* Adds the given {@link Block} to the block, which is one parallel branch.
*
* @param block the block to be added as a parallel branch
* @return this block
*/
public final ParallelBlock add(Block block) {
if (block != null) {
addChildBlock(block.getClass().getSimpleName(), block);
}
return this;
}
/**
* Adds a {@link LambdaBlock}, that applies the given function, to the list of parallel
* branches.
*
* @param f the function that forms the {@link LambdaBlock}
* @return this block
*/
public final ParallelBlock add(Function<NDList, NDList> f) {
return add(new LambdaBlock(f));
}
/**
* Adds a {@link LambdaBlock}, that applies the given function, to the list of parallel
* branches.
*
* @param f the function forms the {@link LambdaBlock}
* @param name the function name
* @return this block
*/
public ParallelBlock add(Function<NDList, NDList> f, String name) {
return add(new LambdaBlock(f, name));
}
/**
* Adds a {@link LambdaBlock#singleton(Function)}, that applies the given function, to the list
* of parallel branches.
*
* @param f the function forms the {@link LambdaBlock}
* @return this block
* @see LambdaBlock#singleton(Function)
*/
public ParallelBlock addSingleton(Function<NDArray, NDArray> f) {
return add(LambdaBlock.singleton(f));
}
/**
* Adds a {@link LambdaBlock#singleton(Function)}, that applies the given function, to the list
* of parallel branches.
*
* @param f the function forms the {@link LambdaBlock}
* @param name the function name
* @return this block
* @see LambdaBlock#singleton(Function)
*/
public ParallelBlock addSingleton(Function<NDArray, NDArray> f, String name) {
return add(LambdaBlock.singleton(f, name));
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
return function.apply(
children.values().stream()
.map(block -> block.forward(parameterStore, inputs, training, params))
.collect(Collectors.toList()));
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList data,
NDList labels,
PairList<String, Object> params) {
return function.apply(
children.values().stream()
.map(block -> block.forward(parameterStore, data, labels, params))
.collect(Collectors.toList()));
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
for (Block child : getChildren().values()) {
child.initialize(manager, dataType, inputShapes);
}
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
Preconditions.checkArgument(!children.isEmpty(), "The parallel block is empty");
try (NDManager manager = NDManager.newBaseManager()) {
List<NDList> inputs = new ArrayList<>();
for (Block block : children.values()) {
Shape[] shapes = block.getOutputShapes(inputShapes);
NDList output = new NDList(shapes.length);
for (Shape shape : shapes) {
output.add(manager.create(shape));
}
inputs.add(output);
}
NDList output = function.apply(inputs);
Shape[] outputShapes = new Shape[output.size()];
for (int i = 0; i < output.size(); ++i) {
outputShapes[i] = output.get(i).getShape();
}
return outputShapes;
}
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == version) {
readInputShapes(is);
} else if (loadVersion != 1) {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/Parameter.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.training.initializer.Initializer;
import ai.djl.training.initializer.XavierInitializer;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Objects;
/**
* {@code Parameter} is a container class that holds a learnable parameter of a model.
*
* <p>Every {@code Parameter} is associated with a {@link Block}. The output of the block's forward
* function depends on the values in the {@code Parameter}. During training, the values in the
* {@code Parameter} are updated to reflect the training data. This process forms the crux of
* learning.
*
* @see <a href="https://d2l.djl.ai/chapter_deep-learning-computation/parameters.html">The D2L
* chapter on parameter management</a>
*/
public class Parameter implements AutoCloseable {
private static final byte VERSION = 1;
private String id;
private String name;
private Shape shape;
private Type type;
private Initializer initializer;
private NDArray array;
private boolean requiresGrad;
Parameter(Builder builder) {
this.id = NDManager.nextUid();
this.name = builder.name;
this.shape = builder.shape;
this.type = builder.type;
this.array = builder.array;
this.requiresGrad = builder.requiresGrad;
this.initializer =
(builder.initializer != null) ? builder.initializer : type.getInitializer();
}
/**
* Gets the ID of this {@code Parameter}.
*
* @return the ID of this {@code Parameter}
*/
public String getId() {
return id;
}
/**
* Gets the name of this {@code Parameter}.
*
* @return the name of this {@code Parameter}
*/
public String getName() {
return name == null ? "" : name;
}
/**
* Gets the type of this {@code Parameter}.
*
* @return the type of this {@code Parameter}
*/
public Type getType() {
return type;
}
/**
* Sets the values of this {@code Parameter}.
*
* @param array the {@link NDArray} that contains values of this {@code Parameter}
*/
public void setArray(NDArray array) {
if (shape != null) {
throw new IllegalStateException("array has been set! Use either setArray or setShape");
}
this.array = array;
shape = array.getShape();
array.setName(name);
}
/**
* Sets the shape of this {@code Parameter}.
*
* @param shape the shape of this {@code Parameter}
*/
public void setShape(Shape shape) {
if (array != null) {
throw new IllegalStateException("array has been set! Use either setArray or setShape");
}
this.shape = shape;
}
/**
* Gets the shape of this {@code Parameter}.
*
* @return the shape of this {@code Parameter}
*/
public Shape getShape() {
return shape;
}
/**
* Gets the values of this {@code Parameter} as an {@link NDArray}.
*
* @return an {@link NDArray} that contains values of this {@code Parameter}
*/
public NDArray getArray() {
if (!isInitialized()) {
throw new UninitializedParameterException(
"The array for parameter \"" + getName() + "\" has not been initialized");
}
return array;
}
/**
* Returns whether this parameter needs gradients to be computed.
*
* @return whether this parameter needs gradients to be computed
*/
public boolean requiresGradient() {
return requiresGrad;
}
/**
* Freezes or unfreezes the parameter for training.
*
* <p>Sometimes during training, especially during transfer learning, it is typical to train
* only part of the model. For this, the freeze can be used to prevent certain parts from being
* trained.
*
* <p>This modifies the {@link #requiresGradient()} of the parameter.
*
* @param freeze true if the parameter should be frozen ({@code freeze == !requiresGradient()})
*/
public void freeze(boolean freeze) {
requiresGrad = !freeze;
if (array != null) {
// array can be null if block is loaded and then cleared
array.setRequiresGradient(requiresGrad);
}
}
/**
* Checks if this {@code Parameter} is initialized.
*
* @return {@code true} if this {@code Parameter} is initialized
*/
public boolean isInitialized() {
return array != null;
}
/**
* Sets the {@link Initializer} for this {@code Parameter}, if not already set. If overwrite
* flag is true, sets the initializer regardless.
*
* @param initializer the initializer to be set
*/
public void setInitializer(Initializer initializer) {
this.initializer = initializer;
}
/**
* Returns the {@link Initializer} for this {@code Parameter}, if not already set. If overwrite
* flag is true, sets the initializer regardless.
*
* @return the initializer of this {@code Parameter}
*/
public Initializer getInitializer() {
return initializer;
}
/**
* Initializes the parameter with the given {@link NDManager}, with given {@link DataType} for
* the given expected input shapes.
*
* @param manager an NDManager to create the arrays
* @param dataType the datatype of the {@code Parameter}
*/
public void initialize(NDManager manager, DataType dataType) {
// Param is attached to an array not null
if (!isInitialized()) {
// Params in a PtSymbolBlock is set during model loading and its isInitialized()=true.
// Shouldn't further initialize it.
Objects.requireNonNull(initializer, "No initializer has been set");
// Params in a PtSymbolBlock can have null shape, but are still initialized (i.e. param
// is attached to an array not null)
Objects.requireNonNull(shape, "No parameter shape has been set");
array = initializer.initialize(manager, shape, dataType);
array.setName(name);
}
if (requiresGradient()) {
array.setRequiresGradient(true);
}
}
/**
* Writes the parameter NDArrays to the given output stream.
*
* @param dos the output stream to write to
* @throws IOException if the write operation fails
*/
public void save(DataOutputStream dos) throws IOException {
if (!isInitialized()) {
dos.writeChar('N');
return;
}
dos.writeChar('P');
dos.writeByte(VERSION);
dos.writeUTF(getName());
dos.write(array.encode());
}
/**
* Loads parameter NDArrays from InputStream.
*
* <p>Currently, we cannot deserialize into the exact subclass of NDArray. The SparseNDArray
* will be loaded as NDArray only.
*
* @param manager the NDManager
* @param dis the InputStream
* @throws IOException if failed to read
* @throws MalformedModelException Exception thrown when model is not in expected format
* (parameters).
*/
public void load(NDManager manager, DataInputStream dis)
throws IOException, MalformedModelException {
char magic = dis.readChar();
if (magic == 'N') {
return;
} else if (magic != 'P') {
throw new MalformedModelException("Invalid input data.");
}
// Version
byte version = dis.readByte();
if (version != VERSION) {
throw new MalformedModelException("Unsupported encoding version: " + version);
}
String parameterName = dis.readUTF();
if (!parameterName.equals(getName())) {
throw new MalformedModelException(
"Unexpected parameter name: " + parameterName + ", expected: " + name);
}
array = manager.decode(dis);
// set the shape of the parameter and prepare() can be skipped
shape = array.getShape();
}
/** {@inheritDoc} */
@Override
public void close() {
if (array != null) {
array.close();
array = null;
}
}
/**
* Creates a builder to build a {@code Parameter}.
*
* <p>The methods start with {@code set} are required fields, and {@code opt} for optional
* fields.
*
* @return a new builder
*/
public static Parameter.Builder builder() {
return new Parameter.Builder();
}
/** Enumerates the types of {@link Parameter}. */
public enum Type {
WEIGHT(
new XavierInitializer(
XavierInitializer.RandomType.GAUSSIAN, XavierInitializer.FactorType.IN, 2)),
BIAS(Initializer.ZEROS),
GAMMA(Initializer.ONES),
BETA(Initializer.ZEROS),
RUNNING_MEAN(Initializer.ZEROS),
RUNNING_VAR(Initializer.ONES),
OTHER(null);
private final transient Initializer initializer;
Type(Initializer initializer) {
this.initializer = initializer;
}
/**
* Gets the {@link Initializer} of this {@code ParameterType}.
*
* @return the {@link Initializer} of this {@code ParameterType}
*/
public Initializer getInitializer() {
return initializer;
}
}
/** A Builder to construct a {@code Parameter}. */
public static final class Builder {
String name;
Shape shape;
Type type;
Initializer initializer;
NDArray array;
boolean requiresGrad = true;
/**
* Sets the name of the {@code Parameter}.
*
* @param name the name of the {@code Parameter}
* @return this {@code Parameter}
*/
public Builder setName(String name) {
this.name = name;
return this;
}
/**
* Sets the {@code Type} of the {@code Parameter}.
*
* @param type the {@code Type} of the {@code Parameter}
* @return this {@code Parameter}
*/
public Builder setType(Type type) {
this.type = type;
return this;
}
/**
* Sets the shape of the {@code Parameter}.
*
* @param shape the shape of the {@code Parameter}
* @return this {@code Parameter}
*/
public Builder optShape(Shape shape) {
this.shape = shape;
return this;
}
/**
* Sets the Initializer of the {@code Parameter}.
*
* @param initializer the Initializer of the {@code Parameter}
* @return this {@code Parameter}
*/
public Builder optInitializer(Initializer initializer) {
this.initializer = initializer;
return this;
}
/**
* Sets the array of the {@code Parameter}.
*
* @param array the array of the {@code Parameter}
* @return this {@code Parameter}
*/
public Builder optArray(NDArray array) {
this.array = array;
return this;
}
/**
* Sets if the {@code Parameter} requires gradient.
*
* @param requiresGrad if the {@code Parameter} requires gradient
* @return this {@code Parameter}
*/
public Builder optRequiresGrad(boolean requiresGrad) {
this.requiresGrad = requiresGrad;
return this;
}
/**
* Builds a {@code Parameter} instance.
*
* @return the {@code Parameter} instance
*/
public Parameter build() {
return new Parameter(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/ParameterList.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.util.Pair;
import ai.djl.util.PairList;
import java.util.List;
import java.util.Map;
/** Represents a set of names and Parameters. */
public class ParameterList extends PairList<String, Parameter> {
/** Create an empty {@code ParameterList}. */
public ParameterList() {}
/**
* Constructs an empty {@code ParameterList} with the specified initial capacity.
*
* @param initialCapacity the initial capacity of the list
* @throws IllegalArgumentException if the specified initial capacity is negative
*/
public ParameterList(int initialCapacity) {
super(initialCapacity);
}
/**
* Constructs a {@code ParameterList} containing the elements of the specified keys and values.
*
* @param keys the key list containing the elements to be placed into this {@code ParameterList}
* @param values the value list containing the elements to be placed into this {@code
* ParameterList}
* @throws IllegalArgumentException if the keys and values size are different
*/
public ParameterList(List<String> keys, List<Parameter> values) {
super(keys, values);
}
/**
* Constructs a {@code ParameterList} containing the elements of the specified list of Pairs.
*
* @param list the list containing the elements to be placed into this {@code ParameterList}
*/
public ParameterList(List<Pair<String, Parameter>> list) {
super(list);
}
/**
* Constructs a {@code ParameterList} containing the elements of the specified map.
*
* @param map the map containing keys and values
*/
public ParameterList(Map<String, Parameter> map) {
super(map);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/SequentialBlock.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.MalformedModelException;
import ai.djl.inference.streaming.StreamingBlock;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* {@code SequentialBlock} is a {@link Block} whose children form a chain of blocks with each child
* block feeding its output to the next. The output of the last child is returned as the output of
* the {@code SequentialBlock}.
*
* <p>{@code SequentialBlock} has no direct parameters.
*/
public class SequentialBlock extends AbstractBlock implements StreamingBlock {
private static final byte VERSION = 3;
private boolean returnIntermediate;
/**
* Creates an empty sequential block. Use {@code add} and {@code addAll} to add blocks to be
* executed in sequence.
*/
public SequentialBlock() {
super(VERSION);
}
/**
* Adds an array of blocks to be executed in sequence, in order.
*
* @param blocks the array of blocks
* @return this block
*/
public SequentialBlock addAll(Block... blocks) {
this.addAll(Arrays.asList(blocks));
return this;
}
/**
* Adds a {@link Collection} of blocks to be executed in sequence, in order.
*
* @param blocks the {@link Collection} of blocks
* @return this block
*/
public SequentialBlock addAll(Collection<Block> blocks) {
blocks.forEach(this::add);
return this;
}
/**
* Adds the given {@link Block} to the block to be executed in order.
*
* @param block the block to be added to the sequence of blocks
* @return this block
*/
public SequentialBlock add(Block block) {
if (block != null) {
addChildBlock(block.getClass().getSimpleName(), block);
}
return this;
}
/**
* Adds a {@link LambdaBlock} that applies the given function to the sequence of blocks.
*
* @param f the function forms the {@link LambdaBlock}
* @return this block
*/
public SequentialBlock add(Function<NDList, NDList> f) {
add(new LambdaBlock(f));
return this;
}
/**
* Adds a {@link LambdaBlock} that applies the given function to the sequence of blocks.
*
* @param f the function forms the {@link LambdaBlock}
* @param name the function name
* @return this block
*/
public SequentialBlock add(Function<NDList, NDList> f, String name) {
add(new LambdaBlock(f, name));
return this;
}
/**
* Adds a {@link LambdaBlock#singleton(Function)} that applies the given function to the
* sequence of blocks.
*
* @param f the function forms the {@link LambdaBlock}
* @return this block
* @see LambdaBlock#singleton(Function)
*/
public SequentialBlock addSingleton(Function<NDArray, NDArray> f) {
add(LambdaBlock.singleton(f));
return this;
}
/**
* Adds a {@link LambdaBlock#singleton(Function)} that applies the given function to the
* sequence of blocks.
*
* @param f the function forms the {@link LambdaBlock}
* @param name the function name
* @return this block
* @see LambdaBlock#singleton(Function)
*/
public SequentialBlock addSingleton(Function<NDArray, NDArray> f, String name) {
add(LambdaBlock.singleton(f, name));
return this;
}
/** Removes the {@link Block} added last from the sequence of blocks. */
public void removeLastBlock() {
children.remove(children.size() - 1);
}
/**
* Replaces the {@link Block} last added from the sequence of blocks, and adds the given block.
*
* @param block the block to replace the last block with
*/
public void replaceLastBlock(Block block) {
removeLastBlock();
if (block != null) {
add(block);
}
}
/**
* Returns whether the block returns all intermediate block results or only the end of the
* sequential chain.
*
* @return whether the block returns all intermediate block results or only the end of the
* sequential chain
*/
public boolean isReturnIntermediate() {
return returnIntermediate;
}
/**
* Sets whether the block returns all intermediate sequence results.
*
* @param returnIntermediate true for intermediates, false for only chain result (default and
* typical behavior is false)
* @return this {@link SequentialBlock}
*/
public SequentialBlock setReturnIntermediate(boolean returnIntermediate) {
this.returnIntermediate = returnIntermediate;
return this;
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
List<NDList> past = new ArrayList<>(children.size());
NDList current = inputs;
for (Block block : children.values()) {
current = block.forward(parameterStore, current, training);
past.add(current);
}
if (returnIntermediate) {
return new NDList(
past.stream().flatMap(Collection::stream).collect(Collectors.toList()));
}
return current;
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList data,
NDList labels,
PairList<String, Object> params) {
List<NDList> past = new ArrayList<>(children.size());
NDList current = data;
for (Block block : children.values()) {
current = block.forward(parameterStore, current, labels, params);
past.add(current);
}
if (returnIntermediate) {
return new NDList(
past.stream().flatMap(Collection::stream).collect(Collectors.toList()));
}
return current;
}
/** {@inheritDoc} */
@Override
public Iterator<NDList> forwardStreamIter(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
return new StreamIterator(parameterStore, inputs, training);
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
Shape[] shapes = inputShapes;
DataType[] lastDataTypes = null;
for (Block child : getChildren().values()) {
child.initialize(manager, dataType, shapes);
shapes = child.getOutputShapes(shapes, lastDataTypes);
lastDataTypes = child.getOutputDataTypes();
}
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputs) {
if (children.isEmpty()) {
throw new IllegalArgumentException("The sequential block is empty");
}
List<Shape[]> past = new ArrayList<>(children.size());
Shape[] current = inputs;
for (Block block : children.values()) {
current = block.getOutputShapes(current);
past.add(current);
}
if (returnIntermediate) {
return past.stream().flatMap(Arrays::stream).toArray(Shape[]::new);
}
return current;
}
/** {@inheritDoc} */
@Override
protected void saveMetadata(DataOutputStream os) throws IOException {
saveInputShapes(os);
os.writeBoolean(returnIntermediate);
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == version) {
readInputShapes(is);
returnIntermediate = is.readBoolean();
} else if (loadVersion == 2) {
readInputShapes(is);
} else {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
}
private final class StreamIterator implements Iterator<NDList> {
private int childIndex;
private ParameterStore parameterStore;
private NDList current;
private boolean training;
private StreamIterator(ParameterStore parameterStore, NDList inputs, boolean training) {
this.parameterStore = parameterStore;
this.current = inputs;
this.training = training;
childIndex = 0;
}
/** {@inheritDoc} */
@Override
public boolean hasNext() {
return childIndex < children.size();
}
/** {@inheritDoc} */
@Override
public NDList next() {
current =
children.get(childIndex++)
.getValue()
.forward(parameterStore, current, training);
return current;
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/SymbolBlock.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.Shape;
import ai.djl.util.PairList;
/**
* {@code SymbolBlock} is a {@link Block} is used to load models that were exported directly from
* the engine in its native format.
*/
public interface SymbolBlock extends Block {
/**
* Creates an empty SymbolBlock instance.
*
* @param manager the manager to be applied in the SymbolBlock
* @return a new Model instance
*/
static SymbolBlock newInstance(NDManager manager) {
return manager.getEngine().newSymbolBlock(manager);
}
/** Removes the last block in the symbolic graph. */
default void removeLastBlock() {
throw new UnsupportedOperationException("not supported");
}
/**
* Returns a {@link PairList} of output names and shapes stored in model file.
*
* @return the {@link PairList} of output names, and shapes
*/
default PairList<String, Shape> describeOutput() {
throw new UnsupportedOperationException("not supported");
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/UninitializedParameterException.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn;
/** Thrown to indicate that a {@link Parameter} was not initialized. */
public class UninitializedParameterException extends RuntimeException {
private static final long serialVersionUID = 1L;
/**
* Constructs a new exception with the specified detail message. The cause is not initialized,
* and may subsequently be initialized by a call to {@link #initCause}.
*
* @param message the detail message that is saved for later retrieval by the {@link
* #getMessage()} method
*/
public UninitializedParameterException(String message) {
super(message);
}
/**
* Constructs a new exception with the specified detail message and cause.
*
* <p>Note that the detail message associated with {@code cause} is <i>not</i> automatically
* incorporated in this exception's detail message.
*
* @param message the detail message that is saved for later retrieval by the {@link
* #getMessage()} method
* @param cause the cause that is saved for later retrieval by the {@link #getCause()} method. A
* {@code null} value is permitted, and indicates that the cause is nonexistent or unknown
*/
public UninitializedParameterException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a new exception with the specified cause and a detail message of {@code
* (cause==null ? null : cause.toString())} which typically contains the class and detail
* message of {@code cause}. This constructor is useful for exceptions that are little more than
* wrappers for other throwables. For example, {@link java.security.PrivilegedActionException}.
*
* @param cause the cause that is saved for later retrieval by the {@link #getCause()} method. A
* {@code null} value is permitted, and indicates that the cause is nonexistent or unknown
*/
public UninitializedParameterException(Throwable cause) {
super(cause);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains classes to construct neural networks.
*
* <p>The primary construct used to build up the networks is the {@link ai.djl.nn.Block} (see for
* details). This package contains a number of implementations of blocks as well as helpers for
* blocks.
*
* <p>The following subpackages also contain a number of standard neural network operations to use
* with blocks:
*
* <ul>
* <li>{@link ai.djl.nn.convolutional}
* <li>{@link ai.djl.nn.core}
* <li>{@link ai.djl.nn.norm}
* <li>{@link ai.djl.nn.pooling}
* <li>{@link ai.djl.nn.recurrent}
* </ul>
*/
package ai.djl.nn;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/convolutional/Conv1d.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.convolutional;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.util.Preconditions;
/**
* A {@code Conv1d} layer works similar to {@link Convolution} layer with the exception of the
* number of dimension it operates on being only one, which is {@link LayoutType#WIDTH}. The channel
* of the input data may be more than one, depending on what data is processed. Each filter slides
* through the data with only one direction of movement along the dimension itself.
*
* <p>Commonly, this kind of convolution layer, as proposed in this <a
* href="https://ieeexplore.ieee.org/document/7318926/">paper</a> is used in tasks utilizing serial
* data, enabling convolutional processing of 1-dimensional data such as time-series data (stock
* price, weather, ECG) and text/speech data without the need of transforming it to 2-dimensional
* data to be processed by {@link Conv2d}, though this is quite a common technique as well.
*
* <p>The input to a {@code Conv1d} is an {@link ai.djl.ndarray.NDList} with a single 3-D {@link
* ai.djl.ndarray.NDArray}. The layout of the {@link ai.djl.ndarray.NDArray} must be "NCW". The
* shapes are
*
* <ul>
* <li>{@code data: (batch_size, channel, width)}
* <li>{@code weight: (num_filter, channel, kernel[0])}
* <li>{@code bias: (num_filter,)}
* <li>{@code out: (batch_size, num_filter, out_width)} <br>
* {@code out_width = f(width, kernel[0], pad[0], stride[0], dilate[0])} <br>
* {@code where f(x, k, p, s, d) = floor((x + 2 * p - d * (k - 1) - 1)/s) + 1}
* </ul>
*
* <p>Both {@code weight} and {@code bias} are learn-able parameters.
*
* @see Convolution
*/
public class Conv1d extends Convolution {
private static final LayoutType[] EXPECTED_LAYOUT = {
LayoutType.BATCH, LayoutType.CHANNEL, LayoutType.WIDTH
};
private static final String STRING_LAYOUT = "NCW";
private static final int NUM_DIMENSIONS = 3;
Conv1d(Builder builder) {
super(builder);
}
/** {@inheritDoc} */
@Override
protected LayoutType[] getExpectedLayout() {
return EXPECTED_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected String getStringLayout() {
return STRING_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected int numDimensions() {
return NUM_DIMENSIONS;
}
/**
* Applies 1D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @return the output of the conv1d operation
*/
public static NDList conv1d(NDArray input, NDArray weight) {
return conv1d(input, weight, null, new Shape(1), new Shape(0), new Shape(1));
}
/**
* Applies 1D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @return the output of the conv1d operation
*/
public static NDList conv1d(NDArray input, NDArray weight, NDArray bias) {
return conv1d(input, weight, bias, new Shape(1), new Shape(0), new Shape(1));
}
/**
* Applies 1D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(width)
* @return the output of the conv1d operation
*/
public static NDList conv1d(NDArray input, NDArray weight, NDArray bias, Shape stride) {
return conv1d(input, weight, bias, stride, new Shape(0), new Shape(1));
}
/**
* Applies 1D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(width)
* @param padding implicit paddings on both sides of the input: Shape(width)
* @return the output of the conv1d operation
*/
public static NDList conv1d(
NDArray input, NDArray weight, NDArray bias, Shape stride, Shape padding) {
return conv1d(input, weight, bias, stride, padding, new Shape(1));
}
/**
* Applies 1D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(width)
* @param padding implicit paddings on both sides of the input: Shape(width)
* @param dilation the spacing between kernel elements: Shape(width)
* @return the output of the conv1d operation
*/
public static NDList conv1d(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation) {
return conv1d(input, weight, bias, stride, padding, dilation, 1);
}
/**
* Applies 1D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(width)
* @param padding implicit paddings on both sides of the input: Shape(width)
* @param dilation the spacing between kernel elements: Shape(width)
* @param groups split input into groups: input channel(input.size(1)) should be divisible by
* the number of groups
* @return the output of the conv1d operation
*/
public static NDList conv1d(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation,
int groups) {
Preconditions.checkArgument(
input.getShape().dimension() == 3 && weight.getShape().dimension() == 3,
"the shape of input or weight doesn't match the conv1d");
Preconditions.checkArgument(
stride.dimension() == 1 && padding.dimension() == 1 && dilation.dimension() == 1,
"the shape of stride or padding or dilation doesn't match the conv1d");
return Convolution.convolution(input, weight, bias, stride, padding, dilation, groups);
}
/**
* Creates a builder to build a {@code Conv1d}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link Conv1d} type of {@link Block}. */
public static final class Builder extends ConvolutionBuilder<Builder> {
/** Creates a builder that can build a {@link Conv1d} block. */
Builder() {
stride = new Shape(1);
padding = new Shape(0);
dilation = new Shape(1);
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Builds a {@link Conv1d} block.
*
* @return the {@link Conv1d} block
*/
public Conv1d build() {
validate();
return new Conv1d(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/convolutional/Conv1dTranspose.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.convolutional;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.util.Preconditions;
/**
* A {@code Conv1dTranspose} layer works similar to {@link Deconvolution} layer with the exception
* of the number of dimension it operates on being only one, which is {@link LayoutType#WIDTH}. The
* channel of the input data may be more than one, depending on what data is processed. Each filter
* slides through the data with only one direction of movement along the dimension itself.
*
* <p>The input to a {@code Conv1dTranspose} is an {@link ai.djl.ndarray.NDList} with a single 3-D
* {@link ai.djl.ndarray.NDArray}. The layout of the {@link ai.djl.ndarray.NDArray} must be "NCW".
* The shapes are
*
* <ul>
* <li>{@code data: (batch_size, channel, width)}
* <li>{@code weight: (num_filter, channel, kernel[0])}
* <li>{@code bias: (num_filter,)}
* <li>{@code out: (batch_size, num_filter, out_width)} <br>
* {@code out_width = f(width, kernel[0], pad[0], oPad[0], stride[0], dilate[0])} <br>
* {@code where f(x, k, p, oP, s, d) = (x-1)*s-2*p+k+oP}
* </ul>
*
* <p>Both {@code weight} and {@code bias} are learn-able parameters.
*
* @see Deconvolution
*/
public class Conv1dTranspose extends Deconvolution {
private static final LayoutType[] EXPECTED_LAYOUT = {
LayoutType.BATCH, LayoutType.CHANNEL, LayoutType.WIDTH
};
private static final String STRING_LAYOUT = "NCW";
private static final int NUM_DIMENSIONS = 3;
Conv1dTranspose(Builder builder) {
super(builder);
}
/** {@inheritDoc} */
@Override
protected LayoutType[] getExpectedLayout() {
return EXPECTED_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected String getStringLayout() {
return STRING_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected int numDimensions() {
return NUM_DIMENSIONS;
}
/**
* Applies 1D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @return the output of the conv1dTranspose operation
*/
public static NDList conv1dTranspose(NDArray input, NDArray weight) {
return conv1dTranspose(
input, weight, null, new Shape(1), new Shape(0), new Shape(0), new Shape(1));
}
/**
* Applies 1D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @return the output of the conv1dTranspose operation
*/
public static NDList conv1dTranspose(NDArray input, NDArray weight, NDArray bias) {
return conv1dTranspose(
input, weight, bias, new Shape(1), new Shape(0), new Shape(0), new Shape(1));
}
/**
* Applies 1D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(width)
* @return the output of the conv1dTranspose operation
*/
public static NDList conv1dTranspose(
NDArray input, NDArray weight, NDArray bias, Shape stride) {
return conv1dTranspose(
input, weight, bias, stride, new Shape(0), new Shape(0), new Shape(1));
}
/**
* Applies 1D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(width)
* @param padding implicit paddings on both sides of the input: Shape(width)
* @return the output of the conv1dTranspose operation
*/
public static NDList conv1dTranspose(
NDArray input, NDArray weight, NDArray bias, Shape stride, Shape padding) {
return conv1dTranspose(input, weight, bias, stride, padding, new Shape(0), new Shape(1));
}
/**
* Applies 1D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(width)
* @param padding implicit paddings on both sides of the input: Shape(width)
* @param outPadding Controls the amount of implicit zero-paddings on both sides of the output
* for outputPadding number of points for each dimension.
* @return the output of the conv1dTranspose operation
*/
public static NDList conv1dTranspose(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding) {
return conv1dTranspose(input, weight, bias, stride, padding, outPadding, new Shape(1));
}
/**
* Applies 1D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(width)
* @param padding implicit paddings on both sides of the input: Shape(width)
* @param outPadding Controls the amount of implicit zero-paddings on both sides of the output
* for outputPadding number of points for each dimension.
* @param dilation the spacing between kernel elements: Shape(width)
* @return the output of the conv1dTranspose operation
*/
public static NDList conv1dTranspose(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding,
Shape dilation) {
return conv1dTranspose(input, weight, bias, stride, padding, outPadding, dilation, 1);
}
/**
* Applies 1D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(width)
* @param padding implicit paddings on both sides of the input: Shape(width)
* @param outPadding Controls the amount of implicit zero-paddings on both sides of the output
* for outputPadding number of points for each dimension. Shape(width)
* @param dilation the spacing between kernel elements: Shape(width)
* @param groups split input into groups: input channel(input.size(1)) should be divisible by
* the number of groups
* @return the output of the conv1dTranspose operation
*/
public static NDList conv1dTranspose(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding,
Shape dilation,
int groups) {
Preconditions.checkArgument(
input.getShape().dimension() == 3 && weight.getShape().dimension() == 3,
"the shape of input or weight doesn't match the conv1dTranspose");
Preconditions.checkArgument(
stride.dimension() == 1
&& padding.dimension() == 1
&& outPadding.dimension() == 1
&& dilation.dimension() == 1,
"the shape of stride or padding or dilation doesn't match the conv1dTranspose");
return Deconvolution.deconvolution(
input, weight, bias, stride, padding, outPadding, dilation, groups);
}
/**
* Creates a builder to build a {@code Conv1dTranspose}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link Conv1dTranspose} type of {@link Block}. */
public static final class Builder extends DeconvolutionBuilder<Builder> {
/** Creates a builder that can build a {@link Conv1dTranspose} block. */
Builder() {
stride = new Shape(1);
padding = new Shape(0);
outPadding = new Shape(0);
dilation = new Shape(1);
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Builds a {@link Conv1dTranspose} block.
*
* @return the {@link Conv1dTranspose} block
*/
public Conv1dTranspose build() {
validate();
return new Conv1dTranspose(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/convolutional/Conv2d.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.convolutional;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.util.Preconditions;
/**
* Being the pioneer of convolution layers, {@code Conv2d} layer works on two dimensions of input,
* {@link LayoutType#WIDTH} and {@link LayoutType#HEIGHT} as usually a {@code Conv2d} layer is used
* to process data with two spatial dimensions, namely image. The concept itself works just as how
* {@link Convolution} does, and each filter slides through an input data by two directions, first
* traversing the {@link LayoutType#WIDTH} then traverses each row of the data.
*
* <p>First proposed by LeCun <i>et al.</i>'s <a
* href="http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf">paper</a>, 2-dimensional convolution
* layer gained its rising interest with the publication of <a
* href="https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf">
* paper</a> about AlexNet for image classification task. It is still commonly used in image-related
* tasks and adapted in other tasks, including but not limited to 1-dimensional data which may be
* transformed to 2-dimensional data, though {@link Conv1d} is now available for use.
*
* <p>The input to a {@code Conv2d} is an {@link ai.djl.ndarray.NDList} with a single 4-D {@link
* ai.djl.ndarray.NDArray}. The layout of the {@link ai.djl.ndarray.NDArray} must be "NCHW". The
* shapes are
*
* <ul>
* <li>{@code data: (batch_size, channel, height, width)}
* <li>{@code weight: (num_filter, channel, kernel[0], kernel[1])}
* <li>{@code bias: (num_filter,)}
* <li>{@code out: (batch_size, num_filter, out_height, out_width)} <br>
* {@code out_height = f(height, kernel[0], pad[0], stride[0], dilate[0])} <br>
* {@code out_width = f(width, kernel[1], pad[1], stride[1], dilate[1])} <br>
* {@code where f(x, k, p, s, d) = floor((x + 2 * p - d * (k - 1) - 1)/s) + 1}
* </ul>
*
* <p>Both {@code weight} and {@code bias} are learn-able parameters.
*
* @see Convolution
*/
public class Conv2d extends Convolution {
private static final LayoutType[] EXPECTED_LAYOUT = {
LayoutType.BATCH, LayoutType.CHANNEL, LayoutType.HEIGHT, LayoutType.WIDTH
};
private static final String STRING_LAYOUT = "NCHW";
private static final int NUM_DIMENSIONS = 4;
protected Conv2d(Builder builder) {
super(builder);
}
/** {@inheritDoc} */
@Override
protected LayoutType[] getExpectedLayout() {
return EXPECTED_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected String getStringLayout() {
return STRING_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected int numDimensions() {
return NUM_DIMENSIONS;
}
/**
* Applies 2D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @return the output of the conv2d operation
*/
public static NDList conv2d(NDArray input, NDArray weight) {
return conv2d(input, weight, null, new Shape(1, 1), new Shape(0, 0), new Shape(1, 1));
}
/**
* Applies 2D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @return the output of the conv2d operation
*/
public static NDList conv2d(NDArray input, NDArray weight, NDArray bias) {
return conv2d(input, weight, bias, new Shape(1, 1), new Shape(0, 0), new Shape(1, 1));
}
/**
* Applies 2D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(height, width)
* @return the output of the conv2d operation
*/
public static NDList conv2d(NDArray input, NDArray weight, NDArray bias, Shape stride) {
return conv2d(input, weight, bias, stride, new Shape(0, 0), new Shape(1, 1));
}
/**
* Applies 2D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(height, width)
* @param padding implicit paddings on both sides of the input: Shape(height, width)
* @return the output of the conv2d operation
*/
public static NDList conv2d(
NDArray input, NDArray weight, NDArray bias, Shape stride, Shape padding) {
return conv2d(input, weight, bias, stride, padding, new Shape(1, 1));
}
/**
* Applies 2D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(height, width)
* @param padding implicit paddings on both sides of the input: Shape(height, width)
* @param dilation the spacing between kernel elements: Shape(height, width)
* @return the output of the conv2d operation
*/
public static NDList conv2d(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation) {
return conv2d(input, weight, bias, stride, padding, dilation, 1);
}
/**
* Applies 2D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(height, width)
* @param padding implicit paddings on both sides of the input: Shape(height, width)
* @param dilation the spacing between kernel elements: Shape(height, width)
* @param groups split input into groups: input channel(input.size(1)) should be divisible by
* the number of groups
* @return the output of the conv2d operation
*/
public static NDList conv2d(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation,
int groups) {
Preconditions.checkArgument(
input.getShape().dimension() == 4 && weight.getShape().dimension() == 4,
"the shape of input or weight doesn't match the conv2d");
Preconditions.checkArgument(
stride.dimension() == 2 && padding.dimension() == 2 && dilation.dimension() == 2,
"the shape of stride or padding or dilation doesn't match the conv2d");
return Convolution.convolution(input, weight, bias, stride, padding, dilation, groups);
}
/**
* Creates a builder to build a {@code Conv2d}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link Conv2d} type of {@link Block}. */
public static class Builder extends ConvolutionBuilder<Builder> {
/** Creates a builder that can build a {@link Conv2d} block. */
protected Builder() {
stride = new Shape(1, 1);
padding = new Shape(0, 0);
dilation = new Shape(1, 1);
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Builds a {@link Conv2d} block.
*
* @return the {@link Conv2d} block
*/
public Conv2d build() {
validate();
return new Conv2d(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/convolutional/Conv2dTranspose.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.convolutional;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.util.Preconditions;
/**
* The input to a {@code Conv2dTranspose} is an {@link ai.djl.ndarray.NDList} with a single 4-D
* {@link ai.djl.ndarray.NDArray}. The layout of the {@link ai.djl.ndarray.NDArray} must be "NCHW".
* The shapes are
*
* <ul>
* <li>{@code data: (batch_size, channel, height, width)}
* <li>{@code weight: (num_filter, channel, kernel[0], kernel[1])}
* <li>{@code bias: (num_filter,)}
* <li>{@code out: (batch_size, num_filter, out_height, out_width)} <br>
* {@code out_height = f(height, kernel[0], pad[0], oPad[0], stride[0], dilate[0])} <br>
* {@code out_width = f(width, kernel[1], pad[1], oPad[1], stride[1], dilate[1])} <br>
* {@code where f(x, k, p, oP, s, d) = (x-1)*s-2*p+k+oP}
* </ul>
*
* <p>Both {@code weight} and {@code bias} are learn-able parameters.
*
* @see Deconvolution
*/
public class Conv2dTranspose extends Deconvolution {
private static final LayoutType[] EXPECTED_LAYOUT = {
LayoutType.BATCH, LayoutType.CHANNEL, LayoutType.HEIGHT, LayoutType.WIDTH
};
private static final String STRING_LAYOUT = "NCHW";
private static final int NUM_DIMENSIONS = 4;
Conv2dTranspose(Builder builder) {
super(builder);
}
/** {@inheritDoc} */
@Override
protected LayoutType[] getExpectedLayout() {
return EXPECTED_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected String getStringLayout() {
return STRING_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected int numDimensions() {
return NUM_DIMENSIONS;
}
/**
* Applies 2D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @return the output of the conv2dTranspose operation
*/
public static NDList conv2dTranspose(NDArray input, NDArray weight) {
return conv2dTranspose(
input,
weight,
null,
new Shape(1, 1),
new Shape(0, 0),
new Shape(0, 0),
new Shape(1, 1));
}
/**
* Applies 2D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @return the output of the conv2dTranspose operation
*/
public static NDList conv2dTranspose(NDArray input, NDArray weight, NDArray bias) {
return conv2dTranspose(
input,
weight,
bias,
new Shape(1, 1),
new Shape(0, 0),
new Shape(0, 0),
new Shape(1, 1));
}
/**
* Applies 2D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(height, width)
* @return the output of the conv2dTranspose operation
*/
public static NDList conv2dTranspose(
NDArray input, NDArray weight, NDArray bias, Shape stride) {
return conv2dTranspose(
input, weight, bias, stride, new Shape(0, 0), new Shape(0, 0), new Shape(1, 1));
}
/**
* Applies 2D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(height, width)
* @param padding implicit paddings on both sides of the input: Shape(height, width)
* @return the output of the conv2dTranspose operation
*/
public static NDList conv2dTranspose(
NDArray input, NDArray weight, NDArray bias, Shape stride, Shape padding) {
return conv2dTranspose(
input, weight, bias, stride, padding, new Shape(0, 0), new Shape(1, 1));
}
/**
* Applies 2D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(height, width)
* @param padding implicit paddings on both sides of the input: Shape(height, width)
* @param outPadding Controls the amount of implicit zero-paddings on both sides of the output
* for outputPadding number of points for each dimension.
* @return the output of the conv2dTranspose operation
*/
public static NDList conv2dTranspose(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding) {
return conv2dTranspose(input, weight, bias, stride, padding, outPadding, new Shape(1, 1));
}
/**
* Applies 2D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(height, width)
* @param padding implicit paddings on both sides of the input: Shape(height, width)
* @param outPadding Controls the amount of implicit zero-paddings on both sides of the output
* for outputPadding number of points for each dimension.
* @param dilation the spacing between kernel elements: Shape(height, width)
* @return the output of the conv2dTranspose operation
*/
public static NDList conv2dTranspose(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding,
Shape dilation) {
return conv2dTranspose(input, weight, bias, stride, padding, outPadding, dilation, 1);
}
/**
* Applies 2D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, height, width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, height,
* width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(height, width)
* @param padding implicit paddings on both sides of the input: Shape(height, width)
* @param outPadding Controls the amount of implicit zero-paddings on both sides of the output
* for outputPadding number of points for each dimension. Shape(height, width)
* @param dilation the spacing between kernel elements: Shape(height, width)
* @param groups split input into groups: input channel(input.size(1)) should be divisible by
* the number of groups
* @return the output of the conv2dTranspose operation
*/
public static NDList conv2dTranspose(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding,
Shape dilation,
int groups) {
Preconditions.checkArgument(
input.getShape().dimension() == 4 && weight.getShape().dimension() == 4,
"the shape of input or weight doesn't match the conv2dTranspose");
Preconditions.checkArgument(
stride.dimension() == 2
&& padding.dimension() == 2
&& outPadding.dimension() == 2
&& dilation.dimension() == 2,
"the shape of stride or padding or dilation doesn't match the conv2dTranspose");
return Deconvolution.deconvolution(
input, weight, bias, stride, padding, outPadding, dilation, groups);
}
/**
* Creates a builder to build a {@code Conv2dTranspose}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link Conv2dTranspose} type of {@link Block}. */
public static final class Builder extends DeconvolutionBuilder<Builder> {
/** Creates a builder that can build a {@link Conv2dTranspose} block. */
Builder() {
stride = new Shape(1, 1);
padding = new Shape(0, 0);
outPadding = new Shape(0, 0);
dilation = new Shape(1, 1);
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Builds a {@link Conv2dTranspose} block.
*
* @return the {@link Conv2dTranspose} block
*/
public Conv2dTranspose build() {
validate();
return new Conv2dTranspose(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/convolutional/Conv3d.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.convolutional;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.util.Preconditions;
/**
* {@code Conv3d} layer behaves just as {@link Convolution} does, with the distinction being it
* operates of 3-dimensional data such as medical images or video data. The traversal of each filter
* begins from {@link LayoutType#WIDTH} then to {@link LayoutType#HEIGHT}, and lastly across each
* {@link LayoutType#DEPTH} in the specified {@code depth} size of the data.
*
* <p>The utilization of {@code Conv3d} layer allows deeper analysis of visual data such as those in
* medical images, or even analysis on temporal data such as video data as a whole instead of
* processing each frame with a {@link Conv2d} layer, despite this being a common practice in
* computer vision researches. The benefit of utilizing this kind of layer is the maintaining of
* serial data across 2-dimensional data, hence could be beneficial for research focus on such as
* object tracking. The drawback is that this kind of layer is more costly compared to other
* convolution layer types since dot product operation is performed on all three dimensions.
*
* <p>The input to a {@code Conv3d} is an {@link ai.djl.ndarray.NDList} with a single 5-D {@link
* ai.djl.ndarray.NDArray}. The layout of the {@link ai.djl.ndarray.NDArray} must be "NCDHW". The
* shapes are
*
* <ul>
* <li>{@code data: (batch_size, channel, depth, height, width)}
* <li>{@code weight: (num_filter, channel, kernel[0], kernel[1], kernel[2])}
* <li>{@code bias: (num_filter,)}
* <li>{@code out: (batch_size, num_filter, out_depth, out_height, out_width)} <br>
* {@code out_depth = f(depth, kernel[0], pad[0], stride[0], dilate[0])} <br>
* {@code out_height = f(height, kernel[1], pad[1], stride[1], dilate[1])} <br>
* {@code out_width = f(width, kernel[2], pad[2], stride[2], dilate[2])} <br>
* {@code where f(x, k, p, s, d) = floor((x + 2 * p - d * (k - 1) - 1)/s) + 1}
* </ul>
*
* <p>Both {@code weight} and {@code bias} are learn-able parameters.
*
* @see Convolution
*/
public class Conv3d extends Convolution {
private static final LayoutType[] EXPECTED_LAYOUT = {
LayoutType.BATCH, LayoutType.CHANNEL, LayoutType.DEPTH, LayoutType.HEIGHT, LayoutType.WIDTH
};
private static final String STRING_LAYOUT = "NCDHW";
private static final int NUM_DIMENSIONS = 5;
Conv3d(Builder builder) {
super(builder);
}
/** {@inheritDoc} */
@Override
protected LayoutType[] getExpectedLayout() {
return EXPECTED_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected String getStringLayout() {
return STRING_LAYOUT;
}
/** {@inheritDoc} */
@Override
protected int numDimensions() {
return NUM_DIMENSIONS;
}
/**
* Applies 3D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, depth, height,
* width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, depth,
* height, width)
* @return the output of the conv3d operation
*/
public static NDList conv3d(NDArray input, NDArray weight) {
return conv3d(
input, weight, null, new Shape(1, 1, 1), new Shape(0, 0, 0), new Shape(1, 1, 1));
}
/**
* Applies 3D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, depth, height,
* width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, depth,
* height, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @return the output of the conv3d operation
*/
public static NDList conv3d(NDArray input, NDArray weight, NDArray bias) {
return conv3d(
input, weight, bias, new Shape(1, 1, 1), new Shape(0, 0, 0), new Shape(1, 1, 1));
}
/**
* Applies 3D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, depth, height,
* width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, depth,
* height, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(depth, height, width)
* @return the output of the conv3d operation
*/
public static NDList conv3d(NDArray input, NDArray weight, NDArray bias, Shape stride) {
return conv3d(input, weight, bias, stride, new Shape(0, 0, 0), new Shape(1, 1, 1));
}
/**
* Applies 3D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, depth, height,
* width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, depth,
* height, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(depth, height, width)
* @param padding implicit paddings on both sides of the input: Shape(depth, height, width)
* @return the output of the conv3d operation
*/
public static NDList conv3d(
NDArray input, NDArray weight, NDArray bias, Shape stride, Shape padding) {
return conv3d(input, weight, bias, stride, padding, new Shape(1, 1, 1));
}
/**
* Applies 3D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, depth, height,
* width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, depth,
* height, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(depth, height, width)
* @param padding implicit paddings on both sides of the input: Shape(depth, height, width)
* @param dilation the spacing between kernel elements: Shape(depth, height, width)
* @return the output of the conv3d operation
*/
public static NDList conv3d(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation) {
return conv3d(input, weight, bias, stride, padding, dilation, 1);
}
/**
* Applies 3D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, depth, height,
* width)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, depth,
* height, width)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(depth, height, width)
* @param padding implicit paddings on both sides of the input: Shape(depth, height, width)
* @param dilation the spacing between kernel elements: Shape(depth, height, width)
* @param groups split input into groups: input channel(input.size(1)) should be divisible by
* the number of groups
* @return the output of the conv3d operation
*/
public static NDList conv3d(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation,
int groups) {
Preconditions.checkArgument(
input.getShape().dimension() == 5 && weight.getShape().dimension() == 5,
"the shape of input or weight doesn't match the conv2d");
Preconditions.checkArgument(
stride.dimension() == 3 && padding.dimension() == 3 && dilation.dimension() == 3,
"the shape of stride or padding or dilation doesn't match the conv2d");
return Convolution.convolution(input, weight, bias, stride, padding, dilation, groups);
}
/**
* Creates a builder to build a {@code Conv3d}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link Conv3d} type of {@link Block}. */
public static final class Builder extends ConvolutionBuilder<Builder> {
/** Creates a builder that can build a {@link Conv3d} block. */
Builder() {
stride = new Shape(1, 1, 1);
padding = new Shape(0, 0, 0);
dilation = new Shape(1, 1, 1);
}
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Builds a {@link Conv3d} block.
*
* @return the {@link Conv3d} block
*/
public Conv3d build() {
validate();
return new Conv3d(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/convolutional/Convolution.java
|
/*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.convolutional;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.IOException;
/**
* A convolution layer does a dot product calculation on each channel of \(k\)-channel input data by
* specified number of filters, each containing \(k\) kernels for calculating each channel in the
* input data and then summed per filter, hence the number of filters denote the number of output
* channels of a convolution layer. Some modifications may be set on a convolution layer, namely
* stride which shows the distance between each convolved input data in a channel, and padding which
* shows the preservation of input size (width and/or height and/or depth) by adding specified
* padding to the sides of the output. A convolution layer extracts features of input data with
* different representations where each representation lies per channel in the output, often known
* as feature map or feature vector.
*
* <p>While convolution process itself has been around for quite some time in mathematics, in 1998
* LeCun <i>et al.</i> implemented the very first convolution layers forming a network called
* LeNet-5 for character recognition task; details of the network's implementation can be find in
* LeNet-5's <a href="http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf">paper</a>. When other
* approaches at that time used handcrafted features with external stage of feature extraction,
* convolution layer performed feature extraction on its own with no human interference. This marks
* a new era of machine-extracted features, but it was not until 2012 that the published <a
* href="https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf">
* paper</a> of AlexNet marked the beginning of convolutional neural networks, which by the name
* itself heavily relies on convolution layer.
*
* <p>Convolution layer is usually used in image-related tasks due to its well-renowned performance
* as shown by existing works and currently, other non-image-related fields of study are beginning
* to incorporate convolution layer as an addition or replacement of previous approaches, with one
* example being time series processing with 1-dimensional convolution layer. Due to the nature of
* convolution that processes all points in the input data, it is computationally expensive, hence
* the use of GPU is strongly recommended for faster performance as opposed to using CPU. Note that
* it is also common to stack convolution layers with different output channels for more
* representations of the input data.
*
* <p>Current implementations of {@code Convolution} are {@link Conv1d} with input dimension of
* {@link LayoutType#WIDTH}, {@link Conv2d} with input dimension of {@link LayoutType#WIDTH} and
* {@link LayoutType#HEIGHT}, and lastly {@link Conv3d} with input dimension of {@link
* LayoutType#WIDTH}, {@link LayoutType#HEIGHT}, and {@link LayoutType#DEPTH}. These implementations
* share the same core principal as a {@code Convolution} layer does, with the difference being the
* number of input dimension each operates on as denoted by {@code ConvXD} for {@code X}
* dimension(s).
*
* @see <a href="https://d2l.djl.ai/chapter_convolutional-neural-networks/why-conv.html">The D2L
* chapters on convolution</a>
*/
public abstract class Convolution extends AbstractBlock {
private static final byte VERSION = 3;
protected Shape kernelShape;
protected Shape stride;
protected Shape padding;
protected Shape dilation;
protected int filters;
protected int groups;
protected boolean includeBias;
protected Parameter weight;
protected Parameter bias;
/**
* Creates a {@link Convolution} object.
*
* @param builder the {@code Builder} that has the necessary configurations
*/
@SuppressWarnings("this-escape")
public Convolution(ConvolutionBuilder<?> builder) {
super(VERSION);
kernelShape = builder.kernelShape;
stride = builder.stride;
padding = builder.padding;
dilation = builder.dilation;
filters = builder.filters;
groups = builder.groups;
includeBias = builder.includeBias;
weight =
addParameter(
Parameter.builder()
.setName("weight")
.setType(Parameter.Type.WEIGHT)
.build());
if (includeBias) {
bias =
addParameter(
Parameter.builder()
.setName("bias")
.setType(Parameter.Type.BIAS)
.build());
}
}
/**
* Returns the expected layout of the input.
*
* @return the expected layout of the input
*/
protected abstract LayoutType[] getExpectedLayout();
/**
* Returns the string representing the layout of the input.
*
* @return the string representing the layout of the input
*/
protected abstract String getStringLayout();
/**
* Returns the number of dimensions of the input.
*
* @return the number of dimensions of the input
*/
protected abstract int numDimensions();
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
Device device = input.getDevice();
NDArray weightArr = parameterStore.getValue(weight, device, training);
NDArray biasArr = parameterStore.getValue(bias, device, training);
return convolution(input, weightArr, biasArr, stride, padding, dilation, groups);
}
/** {@inheritDoc} */
@Override
protected void beforeInitialize(Shape... inputShapes) {
super.beforeInitialize(inputShapes);
Block.validateLayout(getExpectedLayout(), inputShapes[0].getLayout());
}
/** {@inheritDoc} */
@Override
protected void prepare(Shape[] inputs) {
long inputChannel = inputs[0].get(1);
weight.setShape(new Shape(filters, inputChannel / groups).addAll(kernelShape));
if (bias != null) {
bias.setShape(new Shape(filters));
}
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputs) {
long[] shape = new long[numDimensions()];
shape[0] = inputs[0].get(0);
shape[1] = filters;
for (int i = 0; i < numDimensions() - 2; i++) {
shape[2 + i] =
(inputs[0].get(2 + i)
+ 2 * padding.get(i)
- dilation.get(i) * (kernelShape.get(i) - 1)
- 1)
/ stride.get(i)
+ 1;
}
return new Shape[] {new Shape(shape)};
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == version) {
readInputShapes(is);
} else if (loadVersion != 1) {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
}
/**
* Returns the shape of the kernel.
*
* @return the shape of the kernel
*/
public Shape getKernelShape() {
return kernelShape;
}
/**
* Returns the stride of the convolution.
*
* @return the stride of the convolution
*/
public Shape getStride() {
return stride;
}
/**
* Returns the padding along each dimension.
*
* @return the padding along each dimension
*/
public Shape getPadding() {
return padding;
}
/**
* Returns the dilation along each dimension.
*
* @return the dilation along each dimension
*/
public Shape getDilation() {
return dilation;
}
/**
* Returns the required number of filters.
*
* @return the required number of filters
*/
public int getFilters() {
return filters;
}
/**
* Returns the number of group partitions.
*
* @return the number of group partitions
*/
public int getGroups() {
return groups;
}
/**
* Returns whether to include a bias vector.
*
* @return whether to include a bias vector
*/
public boolean isIncludeBias() {
return includeBias;
}
/**
* Applies N-D convolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, ...)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, ...)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the convolving kernel: Shape(w), Shape(h, w) or Shape(d, h, w)
* @param padding implicit paddings on both sides of the input: Shape(w), Shape(h, w) or
* Shape(d, h, w)
* @param dilation the spacing between kernel elements: Shape(w), Shape(h, w) or Shape(d, h, w)
* @param groups split input into groups: input channel(input.size(1)) should be divisible by
* the number of groups
* @return the output of the convolution operation
*/
static NDList convolution(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape dilation,
int groups) {
return input.getNDArrayInternal()
.convolution(input, weight, bias, stride, padding, dilation, groups);
}
/**
* A builder that can build any {@code Convolution} block.
*
* @param <T> the type of {@code Convolution} block to build
*/
@SuppressWarnings("rawtypes")
public abstract static class ConvolutionBuilder<T extends ConvolutionBuilder> {
protected Shape kernelShape;
protected Shape stride;
protected Shape padding;
protected Shape dilation;
protected int filters;
protected int groups = 1;
protected boolean includeBias = true;
/**
* Sets the shape of the kernel.
*
* @param kernelShape the shape of the kernel
* @return this Builder
*/
public T setKernelShape(Shape kernelShape) {
this.kernelShape = kernelShape;
return self();
}
/**
* Sets the stride of the convolution. Defaults to 1 in each dimension.
*
* @param stride the shape of the stride
* @return this Builder
*/
public T optStride(Shape stride) {
this.stride = stride;
return self();
}
/**
* Sets the padding along each dimension. Defaults to 0 along each dimension.
*
* @param padding the shape of padding along each dimension
* @return this Builder
*/
public T optPadding(Shape padding) {
this.padding = padding;
return self();
}
/**
* Sets the dilation along each dimension. Defaults to 1 along each dimension.
*
* @param dilate the shape of dilation along each dimension
* @return this Builder
*/
public T optDilation(Shape dilate) {
this.dilation = dilate;
return self();
}
/**
* Sets the <b>Required</b> number of filters.
*
* @param filters the number of convolution filters(channels)
* @return this Builder
*/
public T setFilters(int filters) {
this.filters = filters;
return self();
}
/**
* Sets the number of group partitions.
*
* @param groups the number of group partitions
* @return this Builder
*/
public T optGroups(int groups) {
this.groups = groups;
return self();
}
/**
* Sets the optional parameter of whether to include a bias vector. Includes bias by
* default.
*
* @param includeBias whether to use a bias vector parameter
* @return this Builder
*/
public T optBias(boolean includeBias) {
this.includeBias = includeBias;
return self();
}
/**
* Validates that the required arguments are set.
*
* @throws IllegalArgumentException if the required arguments are not set
*/
protected void validate() {
if (kernelShape == null || filters == 0) {
throw new IllegalArgumentException("Kernel and numFilters must be set");
}
}
protected abstract T self();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/convolutional/Deconvolution.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.convolutional;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.IOException;
/**
* Transposed convolution, also named fractionally-strided convolution <a
* href="https://arxiv.org/pdf/1603.07285">Dumoulin & Visin</a> or deconvolution <a
* href="https://ieeexplore.ieee.org/document/7298965">Long et al., 2015</a>, serves this purpose.
*
* <p>The need for transposed convolutions generally arises from the desire to use a transformation
* going in the opposite direction of a normal convolution, i.e., from something that has the shape
* of the output of some convolution to something that has the shape of its input while maintaining
* a connectivity pattern that is compatible with said convolution.
*
* <p>Current implementations of {@code Deconvolution} are {@link Conv1dTranspose} with input
* dimension of {@link LayoutType#WIDTH} and {@link Conv2dTranspose} with input dimension of {@link
* LayoutType#WIDTH} and {@link LayoutType#HEIGHT}. These implementations share the same core
* principal as a {@code Deconvolution} layer does, with the difference being the number of input
* dimension each operates on as denoted by {@code ConvXdTranspose} for {@code X} dimension(s).
*/
public abstract class Deconvolution extends AbstractBlock {
protected Shape kernelShape;
protected Shape stride;
protected Shape padding;
protected Shape outPadding;
protected Shape dilation;
protected int filters;
protected int groups;
protected boolean includeBias;
protected Parameter weight;
protected Parameter bias;
/**
* Creates a {@link Deconvolution} object.
*
* @param builder the {@code Builder} that has the necessary configurations
*/
@SuppressWarnings("this-escape")
public Deconvolution(DeconvolutionBuilder<?> builder) {
kernelShape = builder.kernelShape;
stride = builder.stride;
padding = builder.padding;
outPadding = builder.outPadding;
dilation = builder.dilation;
filters = builder.filters;
groups = builder.groups;
includeBias = builder.includeBias;
weight =
addParameter(
Parameter.builder()
.setName("weight")
.setType(Parameter.Type.WEIGHT)
.build());
if (includeBias) {
bias =
addParameter(
Parameter.builder()
.setName("bias")
.setType(Parameter.Type.BIAS)
.build());
}
}
/**
* Returns the expected layout of the input.
*
* @return the expected layout of the input
*/
protected abstract LayoutType[] getExpectedLayout();
/**
* Returns the string representing the layout of the input.
*
* @return the string representing the layout of the input
*/
protected abstract String getStringLayout();
/**
* Returns the number of dimensions of the input.
*
* @return the number of dimensions of the input
*/
protected abstract int numDimensions();
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
Device device = input.getDevice();
NDArray weightArr = parameterStore.getValue(weight, device, training);
NDArray biasArr = parameterStore.getValue(bias, device, training);
return deconvolution(
input, weightArr, biasArr, stride, padding, outPadding, dilation, groups);
}
/** {@inheritDoc} */
@Override
protected void beforeInitialize(Shape... inputShapes) {
super.beforeInitialize(inputShapes);
Block.validateLayout(getExpectedLayout(), inputShapes[0].getLayout());
}
/** {@inheritDoc} */
@Override
protected void prepare(Shape[] inputs) {
long inputChannel = inputs[0].get(1);
weight.setShape(new Shape(filters, inputChannel / groups).addAll(kernelShape));
if (bias != null) {
bias.setShape(new Shape(filters));
}
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputs) {
long[] shape = new long[numDimensions()];
shape[0] = inputs[0].get(0);
shape[1] = filters;
for (int i = 0; i < numDimensions() - 2; i++) {
shape[2 + i] =
(inputs[0].get(2 + i) - 1) * stride.get(i)
- 2 * padding.get(i)
+ dilation.get(i) * (kernelShape.get(i) - 1)
+ outPadding.get(i)
+ 1;
}
return new Shape[] {new Shape(shape)};
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == version) {
readInputShapes(is);
} else {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
}
/**
* Applies N-D deconvolution over an input signal composed of several input planes.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, ...)
* @param weight filters {@code NDArray} of shape (outChannel, inputChannel/groups, ...)
* @param bias bias {@code NDArray} of shape (outChannel)
* @param stride the stride of the deconvolving kernel: Shape(w) or Shape(h, w)
* @param padding implicit paddings on both sides of the input: Shape(w) or Shape(h, w)
* @param outPadding Controls the amount of implicit zero-paddings on both sides of the output
* for output_padding number of points for each dimension. Shape(w) or Shape(h, w)
* @param dilation the spacing between kernel elements: Shape(w) or Shape(h, w)
* @param groups split input into groups: input channel(input.size(1)) should be divisible by
* the number of groups
* @return the output of the deconvolution operation
*/
static NDList deconvolution(
NDArray input,
NDArray weight,
NDArray bias,
Shape stride,
Shape padding,
Shape outPadding,
Shape dilation,
int groups) {
return input.getNDArrayInternal()
.deconvolution(input, weight, bias, stride, padding, outPadding, dilation, groups);
}
/**
* A builder that can build any {@code Deconvolution} block.
*
* @param <T> the type of {@code Deconvolution} block to build
*/
@SuppressWarnings("rawtypes")
public abstract static class DeconvolutionBuilder<T extends DeconvolutionBuilder> {
protected Shape kernelShape;
protected Shape stride;
protected Shape padding;
protected Shape outPadding;
protected Shape dilation;
protected int filters;
protected int groups = 1;
protected boolean includeBias = true;
/**
* Sets the shape of the kernel.
*
* @param kernelShape the shape of the kernel
* @return this Builder
*/
public T setKernelShape(Shape kernelShape) {
this.kernelShape = kernelShape;
return self();
}
/**
* Sets the stride of the deconvolution. Defaults to 1 in each dimension.
*
* @param stride the shape of the stride
* @return this Builder
*/
public T optStride(Shape stride) {
this.stride = stride;
return self();
}
/**
* Sets the padding along each dimension. Defaults to 0 along each dimension.
*
* @param padding the shape of padding along each dimension
* @return this Builder
*/
public T optPadding(Shape padding) {
this.padding = padding;
return self();
}
/**
* Sets the out_padding along each dimension. Defaults to 0 along each dimension.
*
* @param outPadding the shape of out_padding along each dimension
* @return this Builder
*/
public T optOutPadding(Shape outPadding) {
this.outPadding = outPadding;
return self();
}
/**
* Sets the dilation along each dimension. Defaults to 1 along each dimension.
*
* @param dilate the shape of dilation along each dimension
* @return this Builder
*/
public T optDilation(Shape dilate) {
this.dilation = dilate;
return self();
}
/**
* Sets the <b>Required</b> number of filters.
*
* @param filters the number of deconvolution filters(channels)
* @return this Builder
*/
public T setFilters(int filters) {
this.filters = filters;
return self();
}
/**
* Sets the number of group partitions.
*
* @param groups the number of group partitions
* @return this Builder
*/
public T optGroups(int groups) {
this.groups = groups;
return self();
}
/**
* Sets the optional parameter of whether to include a bias vector. Includes bias by
* default.
*
* @param includeBias whether to use a bias vector parameter
* @return this Builder
*/
public T optBias(boolean includeBias) {
this.includeBias = includeBias;
return self();
}
/**
* Validates that the required arguments are set.
*
* @throws IllegalArgumentException if the required arguments are not set
*/
protected void validate() {
if (kernelShape == null || filters == 0) {
throw new IllegalArgumentException("Kernel and numFilters must be set");
}
}
protected abstract T self();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/convolutional/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains classes that define convolutional operations extending {@link
* ai.djl.nn.convolutional.Convolution} and {@link ai.djl.nn.convolutional.Deconvolution}.
*/
package ai.djl.nn.convolutional;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/AbstractEmbedding.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDManager;
/**
* An Embedding maps elements of type T to a 1-Dimensional representative {@link NDArray}s.
*
* @param <T> the type of item that should be embedded
*/
public interface AbstractEmbedding<T> {
/**
* Returns whether an item is in the embedding.
*
* @param item the item to test
* @return true if the item is in the embedding
*/
boolean hasItem(T item);
/**
* Embeds an array of items.
*
* @param manager the manager for the new embeddings
* @param items the items to embed
* @return the embedding {@link NDArray} of Shape(items.length, embeddingSize)
*/
NDArray embed(NDManager manager, T[] items);
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/AbstractIndexedEmbedding.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import java.io.IOException;
import java.util.Optional;
/**
* An {@link AbstractEmbedding} where each embedded item can be assigned an integer index.
*
* @param <T> the type of the item that should be embedded
*/
public interface AbstractIndexedEmbedding<T> extends AbstractEmbedding<T> {
/**
* Encodes an object of input type into a byte array. This is used in saving and loading the
* {@link Embedding} objects.
*
* @param input the input object to be encoded
* @return the encoded byte array.
* @throws IOException if there is an error while encoding
*/
byte[] encode(T input) throws IOException;
/**
* Decodes the given byte array into an object of input parameter type.
*
* @param byteArray the byte array to be decoded
* @return the decode object of input parameter type
* @throws IOException if there was an error while decoding
*/
T decode(byte[] byteArray) throws IOException;
/**
* Embeds an item.
*
* @param item the item to embed
* @return the index of the item in the embedding
*/
long embed(T item);
/**
* Returns the item corresponding to the given index.
*
* @param index the index
* @return the item corresponding to the given index
*/
Optional<T> unembed(long index);
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/ConstantEmbedding.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.util.Optional;
/** An {@link AbstractIndexedEmbedding} that always returns a constant value. */
@SuppressWarnings("rawtypes")
public class ConstantEmbedding extends AbstractBlock implements AbstractIndexedEmbedding {
protected NDArray embedding;
/**
* Constructs a constant embedding with the given constant.
*
* <p>The constant is assumed to be a fixed value, and starts out as frozen. To unfreeze, use
* {@link ai.djl.nn.Block#freezeParameters(boolean)}.
*
* @param embedding the value to return for all embeddings
*/
@SuppressWarnings("this-escape")
public ConstantEmbedding(NDArray embedding) {
this.embedding = embedding;
freezeParameters(true);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDManager manager = inputs.get(0).getManager();
NDArray base = manager.create(embedding.getShape());
embedding.copyTo(base);
Shape shape = inputs.get(0).getShape().addAll(embedding.getShape());
return new NDList(
base.reshape(1, embedding.size()).repeat(0, inputs.get(0).size()).reshape(shape));
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[] {inputShapes[0].addAll(embedding.getShape())};
}
/** {@inheritDoc} */
@Override
public void saveParameters(DataOutputStream os) {
// Nothing to save
}
/** {@inheritDoc} */
@Override
public void loadParameters(NDManager manager, DataInputStream is) {
// Nothing to load
}
/** {@inheritDoc} */
@Override
public Optional<?> unembed(long index) {
return Optional.empty();
}
/** {@inheritDoc} */
@Override
public byte[] encode(Object input) {
return new byte[0];
}
/** {@inheritDoc} */
@Override
public Object decode(byte[] byteArray) {
return null;
}
/** {@inheritDoc} */
@Override
public long embed(Object item) {
return 0;
}
/** {@inheritDoc} */
@Override
public NDArray embed(NDManager manager, Object[] items) {
NDArray base = manager.create(embedding.getShape());
embedding.copyTo(base);
return base.repeat(0, items.length)
.reshape(new Shape(items.length).addAll(embedding.getShape()));
}
/** {@inheritDoc} */
@Override
public boolean hasItem(Object item) {
return true;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/Embedding.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.Shape;
import ai.djl.ndarray.types.SparseFormat;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Optional;
/**
* An Embedding block map a collection of items to 1-Dimensional representative {@link NDArray}s.
*
* @param <T> the type of item that should be embedded and map to the array
*/
public abstract class Embedding<T> extends AbstractBlock implements AbstractIndexedEmbedding<T> {
private static final byte VERSION = 6;
protected int numEmbeddings;
protected int embeddingSize;
protected SparseFormat sparseFormat;
protected AbstractIndexedEmbedding<T> fallthroughEmbedding;
protected Parameter embedding;
@SuppressWarnings("this-escape")
protected Embedding(BaseBuilder<T, ?> baseBuilder) {
super(VERSION);
embeddingSize = baseBuilder.embeddingSize;
numEmbeddings = baseBuilder.numEmbeddings != 0 ? baseBuilder.numEmbeddings : 1;
sparseFormat = baseBuilder.sparseFormat;
embedding =
addParameter(
Parameter.builder()
.setName("embedding")
.setType(Parameter.Type.WEIGHT)
.build());
if (baseBuilder.fallthrough != null && baseBuilder.defaultItem != null) {
throw new IllegalArgumentException(
"You can not specify both a fallthrough and a defaultItem");
} else if (baseBuilder.fallthrough != null) {
fallthroughEmbedding = baseBuilder.fallthrough;
} else if (baseBuilder.defaultItem != null) {
fallthroughEmbedding = new DefaultItem(baseBuilder.defaultItem);
} else if (baseBuilder.useDefault) {
fallthroughEmbedding = new DefaultEmbedding();
}
inputShapes = new Shape[] {new Shape(-1)};
}
/**
* Constructs a pretrained embedding.
*
* @param embedding the embedding array
*/
protected Embedding(NDArray embedding) {
this(embedding, SparseFormat.DENSE);
}
/**
* Constructs a pretrained embedding.
*
* <p>Because it is created with preTrained data, it is created as a frozen block. If you with
* to update it, call {@link Block#freezeParameters(boolean)}.
*
* @param embedding the embedding array
* @param format whether to compute row sparse gradient in the backward calculation
*/
@SuppressWarnings("this-escape")
protected Embedding(NDArray embedding, SparseFormat format) {
super(VERSION);
numEmbeddings = Math.toIntExact(embedding.getShape().get(0));
embeddingSize = Math.toIntExact(embedding.getShape().get(1));
this.sparseFormat = format;
this.embedding =
addParameter(
Parameter.builder()
.setName("embedding")
.setType(Parameter.Type.WEIGHT)
.build());
this.embedding.setArray(embedding);
inputShapes = new Shape[] {new Shape(-1)};
freezeParameters(true);
}
/** {@inheritDoc} */
@Override
public void prepare(Shape[] inputShapes) {
// numItems will be adjusted by embedding array or fallthroughEmbedding
embedding.setShape(new Shape(numEmbeddings, embeddingSize));
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[] {inputShapes[0].addAll(new Shape(embeddingSize))};
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.head();
Device device = input.getDevice();
NDArray weightArr = parameterStore.getValue(embedding, device, training);
return embedding(input, weightArr, sparseFormat);
}
/** {@inheritDoc} */
@Override
public void saveParameters(DataOutputStream os) throws IOException {
os.writeByte(VERSION);
saveInputShapes(os);
os.writeInt(sparseFormat.getValue());
embedding.save(os);
}
/** {@inheritDoc} */
@Override
public void loadParameters(NDManager manager, DataInputStream is)
throws IOException, MalformedModelException {
byte version = is.readByte();
// True to prepend an empty zero index to embedding table
// For compatibility with versions that did not always have
// the zero index reserved for the fallthrough embedding
boolean addMissingZero = false;
if (version >= 3) {
readInputShapes(is);
if (version == 3) {
addMissingZero = !is.readBoolean();
}
if (version == 6) {
sparseFormat = SparseFormat.fromValue(is.readInt());
} else {
sparseFormat = is.readBoolean() ? SparseFormat.ROW_SPARSE : SparseFormat.DENSE;
}
if (version < 6) {
// read the datatype from old version
is.readUTF();
}
if (version == 3 || version == 4) {
int embedderSize = is.readInt();
for (int i = 1; i <= embedderSize; i++) {
int encodedKeySize = is.readInt();
byte[] encodedKey = new byte[encodedKeySize];
if (is.read(encodedKey) != encodedKey.length) {
throw new MalformedModelException("Model data is malformed");
}
is.readInt();
}
}
} else if (version == 2) {
readInputShapes(is);
addMissingZero = true;
} else if (version != 1) {
throw new MalformedModelException("Unsupported encoding version: " + version);
}
embedding.load(manager, is);
numEmbeddings = (int) embedding.getArray().getShape().get(0);
embeddingSize = (int) embedding.getArray().getShape().get(1);
if (addMissingZero) {
numEmbeddings++;
embedding.setArray(
NDArrays.concat(
new NDList(
manager.zeros(new Shape(1, embeddingSize)),
embedding.getArray())));
}
}
/** {@inheritDoc} */
@Override
public NDArray embed(NDManager manager, T[] items) {
return manager.create(Arrays.stream(items).mapToLong(this::embed).toArray());
}
/**
* A simple lookup table that looks up embeddings in a fixed dictionary and size.
*
* @param input NDArray containing indices into the embedding matrix
* @param weight The embedding matrix with number of rows equal to the maximum possible index +
* 1, and number of columns equal to the embedding size
* @param sparse SparseFormat of the gradient
* @return output NDArray
*/
public static NDList embedding(NDArray input, NDArray weight, SparseFormat sparse) {
return input.getNDArrayInternal().embedding(input, weight, sparse);
}
/**
* The Builder to construct a {@link Embedding} type of {@link Block}.
*
* @param <T> the type of object to embed
*/
public abstract static class BaseBuilder<T, B extends BaseBuilder<T, B>> {
protected Class<T> embeddingType;
protected int numEmbeddings;
protected int embeddingSize;
protected boolean useDefault = true;
protected T defaultItem;
protected AbstractIndexedEmbedding<T> fallthrough;
protected SparseFormat sparseFormat = SparseFormat.DENSE;
protected BaseBuilder() {}
/**
* Returns the embedded type.
*
* @return the embedded type
*/
public Class<T> getEmbeddingType() {
return embeddingType;
}
/**
* Creates a new {@link BaseBuilder} with the specified embedding type.
*
* @param embeddingType the embedding class
* @return a new {@link BaseBuilder} class with the specified embedding type
*/
protected abstract B setType(Class<T> embeddingType);
/**
* Sets the size of the embeddings.
*
* @param embeddingSize the size of the 1D embedding array
* @return this Builder
*/
public B setEmbeddingSize(int embeddingSize) {
this.embeddingSize = embeddingSize;
return self();
}
/**
* Sets the size of the dictionary of embeddings.
*
* @param numEmbeddings the size of the dictionary of embeddings
* @return this Builder
*/
public B optNumEmbeddings(int numEmbeddings) {
this.numEmbeddings = numEmbeddings;
return self();
}
/**
* Sets whether to use a default embedding for undefined items (default true).
*
* @param useDefault true to provide a default embedding and false to throw an {@link
* IllegalArgumentException} when the item can not be found
* @return this Builder
*/
public B optUseDefault(boolean useDefault) {
this.useDefault = useDefault;
return self();
}
/**
* Sets whether to use a default item's embedding for undefined items.
*
* @param defaultItem the item to use as a default.
* @return this Builder
*/
public B optDefaultItem(T defaultItem) {
this.defaultItem = defaultItem;
return self();
}
/**
* Sets a custom handler for items not found in the embedding.
*
* <p>See the standard fallthrough handlers {@link #optUseDefault(boolean)} and {@link
* #optDefaultItem(Object)}.
*
* @param fallthrough the embedding to handle default cases.
* @return this Builder
*/
public B optFallthrough(AbstractIndexedEmbedding<T> fallthrough) {
this.fallthrough = fallthrough;
return self();
}
/**
* Sets the optional parameter whether to compute row sparse gradient in the backward
* calculation. If set to True, the grad’s storage type is row_sparse.
*
* @param sparseFormat whether to compute row sparse gradient in the backward calculation
* @return this Builder
*/
public B optSparseFormat(SparseFormat sparseFormat) {
this.sparseFormat = sparseFormat;
return self();
}
/**
* Returns this {code Builder} object.
*
* @return this {@code BaseBuilder}
*/
protected abstract B self();
}
protected class DefaultEmbedding implements AbstractIndexedEmbedding<T> {
/** {@inheritDoc} */
@Override
public byte[] encode(T input) throws IOException {
return Embedding.this.encode(input);
}
/** {@inheritDoc} */
@Override
public T decode(byte[] byteArray) throws IOException {
return Embedding.this.decode(byteArray);
}
/** {@inheritDoc} */
@Override
public boolean hasItem(T item) {
return true;
}
/** {@inheritDoc} */
@Override
public NDArray embed(NDManager manager, T[] items) {
int length = items.length;
NDArray base = embedding.getArray().get(0);
base.attach(manager);
return base.repeat(new Shape(length, embeddingSize));
}
/** {@inheritDoc} */
@Override
public long embed(T item) {
return 0;
}
/** {@inheritDoc} */
@Override
public Optional<T> unembed(long index) {
return Optional.empty();
}
}
protected class DefaultItem implements AbstractIndexedEmbedding<T> {
private T defaultItem;
public DefaultItem(T defaultItem) {
this.defaultItem = defaultItem;
}
/** {@inheritDoc} */
@Override
public byte[] encode(T input) throws IOException {
return Embedding.this.encode(input);
}
/** {@inheritDoc} */
@Override
public T decode(byte[] byteArray) throws IOException {
return Embedding.this.decode(byteArray);
}
/** {@inheritDoc} */
@Override
public boolean hasItem(T item) {
return true;
}
/** {@inheritDoc} */
@Override
@SuppressWarnings("unchecked")
public NDArray embed(NDManager manager, T[] items) {
Object[] defaults = new Object[items.length];
Arrays.fill(defaults, defaultItem);
return Embedding.this.embed(manager, (T[]) defaults);
}
/** {@inheritDoc} */
@Override
public long embed(T item) {
return 0;
}
/** {@inheritDoc} */
@Override
public Optional<T> unembed(long index) {
return Optional.of(defaultItem);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/Linear.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import ai.djl.util.Preconditions;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Collections;
/**
* A Linear block applies a linear transformation \(Y = XW^T + b\).
*
* <p>It has the following shapes:
*
* <ul>
* <li>input X: [x1, x2, ..., xn, input_dim]
* <li>weight W: [units, input_dim]
* <li>Bias b: [units]
* <li>output Y: [x1, x2, ..., xn, units]
* </ul>
*
* <p>It is most typically used with a simple batched 1D input. In that case, the shape would be:
*
* <ul>
* <li>input X: [batch_num, input_dim]
* <li>weight W: [units, input_dim]
* <li>Bias b: [units]
* <li>output Y: [batch_num, units]
* </ul>
*
* <p>The Linear block should be constructed using {@link Linear.Builder}.
*/
public class Linear extends AbstractBlock {
private static final byte VERSION = 4;
private long units;
private long inputFeatures;
private Shape inputShape;
private Parameter weight;
private Parameter bias;
@SuppressWarnings("this-escape")
protected Linear(Builder builder) {
super(VERSION);
units = builder.units;
weight =
addParameter(
Parameter.builder()
.setName("weight")
.setType(Parameter.Type.WEIGHT)
.build());
if (builder.bias) {
bias =
addParameter(
Parameter.builder()
.setName("bias")
.setType(Parameter.Type.BIAS)
.build());
}
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
Device device = input.getDevice();
NDArray weightArr = parameterStore.getValue(weight, device, training);
NDArray biasArr = parameterStore.getValue(bias, device, training);
return linear(input, weightArr, biasArr);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputs) {
return new Shape[] {inputs[0].slice(0, inputs[0].dimension() - 1).add(units)};
}
/** {@inheritDoc} */
@Override
public PairList<String, Shape> describeInput() {
return new PairList<>(
Collections.singletonList("linearInput"), Collections.singletonList(inputShape));
}
/** {@inheritDoc} */
@Override
protected void beforeInitialize(Shape... inputShapes) {
super.beforeInitialize(inputShapes);
Preconditions.checkArgument(inputShapes.length == 1, "Linear block only support 1 input");
Shape input = inputShapes[0];
inputFeatures = input.get(input.dimension() - 1);
inputShape = input.slice(0, input.dimension() - 1);
}
/** {@inheritDoc} */
@Override
public void prepare(Shape[] inputShapes) {
Shape input = inputShapes[0];
weight.setShape(new Shape(units, input.get(input.dimension() - 1)));
if (bias != null) {
bias.setShape(new Shape(units));
}
}
/** {@inheritDoc} */
@Override
protected void saveMetadata(DataOutputStream os) throws IOException {
os.writeLong(units);
os.writeLong(inputFeatures);
os.write(inputShape.getEncoded());
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
switch (loadVersion) {
case VERSION:
units = is.readLong();
inputFeatures = is.readLong();
break;
case 3:
units = is.readLong();
if (is.readBoolean()) {
throw new IllegalArgumentException("flatten is not supported!");
}
inputFeatures = is.readLong();
break;
case 2:
if (is.readBoolean()) {
throw new IllegalArgumentException("flatten is not supported!");
}
inputFeatures = is.readLong();
break;
case 1:
inputFeatures = Shape.decode(is).size();
break;
default:
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
inputShape = Shape.decode(is);
inputShapes = new Shape[] {inputShape};
}
/**
* Applies a linear transformation to the incoming data.
*
* @param input input X: [x1, x2, …, xn, input_dim]
* @param weight weight W: [units, input_dim]
* @return output Y: [x1, x2, …, xn, units]
*/
public static NDList linear(NDArray input, NDArray weight) {
return linear(input, weight, null);
}
/**
* Applies a linear transformation to the incoming data.
*
* @param input input X: [x1, x2, …, xn, input_dim]
* @param weight weight W: [units, input_dim]
* @param bias bias b: [units]
* @return output Y: [x1, x2, …, xn, units]
*/
public static NDList linear(NDArray input, NDArray weight, NDArray bias) {
return input.getNDArrayInternal().linear(input, weight, bias);
}
/**
* Creates a builder to build a {@code Linear}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link Linear} type of {@link Block}. */
public static class Builder {
protected long units;
private boolean bias = true;
protected Builder() {}
/**
* Sets the number of output channels.
*
* @param units the number of desired output channels
* @return this Builder
*/
public Builder setUnits(long units) {
this.units = units;
return this;
}
/**
* Sets the optional parameter that indicates whether to include a bias vector with default
* value of true.
*
* @param bias whether to use a bias vector parameter
* @return this Builder
*/
public Builder optBias(boolean bias) {
this.bias = bias;
return this;
}
/**
* Returns the constructed {@code Linear}.
*
* @return the constructed {@code Linear}
* @throws IllegalArgumentException if all required parameters (outChannels) have not been
* set
*/
public Linear build() {
Preconditions.checkArgument(units > 0, "You must specify unit");
return new Linear(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/LinearCollection.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import ai.djl.util.Preconditions;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Collections;
/**
* A LinearCollection block applies \(m\) linear transformations \(Y_i = X_i W_i + b_i\) for each
* \(i \in [1, \ldots, m]\) and \(m = \prod_{j=1}^t s_j\). \(t\) is typically 1, so compared to a
* {@link Linear} block, the involved shapes have typically one split dimension added which
* separates the different linear transformations from each other. Another difference to a {@link
* Linear} block is that the weight is not transposed (to align with the internally used algebraic
* operation {@link NDArray#matMul(NDArray)} ).
*
* <p>It has the following shapes:
*
* <ul>
* <li>input X: [x_1, s_1, s_2, …, s_t, input_dim]
* <li>weight W: [s_1, s_2, …, s_t, input_dim, units]
* <li>Bias b: [s_1, s_2, …, s_t, units]
* <li>output Y: [x_1, s_1, s_2, …, s_t, units]
* </ul>
*
* <p>The LinearCollection block should be constructed using {@link LinearCollection.Builder}.
*/
public class LinearCollection extends AbstractBlock {
private static final byte VERSION = 1;
private long units;
private long inputFeatures;
private Shape inputShape;
private Parameter weight;
private Parameter bias;
private int[] shiftBatchAxis;
private int[] reverseShiftBatchAxis;
LinearCollection(Builder builder) {
super(VERSION);
units = builder.units;
weight =
addParameter(
Parameter.builder()
.setName("weight")
.setType(Parameter.Type.WEIGHT)
.build());
if (builder.bias) {
bias =
addParameter(
Parameter.builder()
.setName("bias")
.setType(Parameter.Type.BIAS)
.build());
}
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
Device device = input.getDevice();
NDArray weightArr = parameterStore.getValue(weight, device, training);
NDArray biasArr = parameterStore.getValue(bias, device, training);
return linear(input, weightArr, biasArr);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputs) {
return new Shape[] {inputs[0].slice(0, inputs[0].dimension() - 1).add(units)};
}
/** {@inheritDoc} */
@Override
public PairList<String, Shape> describeInput() {
return new PairList<>(
Collections.singletonList("linearInput"), Collections.singletonList(inputShape));
}
/** {@inheritDoc} */
@Override
protected void beforeInitialize(Shape... inputShapes) {
super.beforeInitialize(inputShapes);
Preconditions.checkArgument(inputShapes.length == 1, "Linear block only support 1 input");
Shape input = inputShapes[0];
inputFeatures = input.slice(1).size();
inputShape = input.slice(0, 1);
}
/** {@inheritDoc} */
@Override
public void prepare(Shape[] inputShapes) {
Shape input = inputShapes[0];
weight.setShape(input.slice(1).add(units));
if (bias != null) {
bias.setShape(input.slice(1, input.dimension() - 1).add(units));
}
}
/** {@inheritDoc} */
@Override
protected void saveMetadata(DataOutputStream os) throws IOException {
os.writeLong(units);
os.writeLong(inputFeatures);
os.write(inputShape.getEncoded());
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
switch (loadVersion) {
case VERSION:
units = is.readLong();
inputFeatures = is.readLong();
break;
default:
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
inputShape = Shape.decode(is);
}
/**
* Applies linear transformations to the incoming data.
*
* @param input input X: [x_1, s_1, s_2, …, s_t, input_dim]
* @param weight weight W: [s_1, s_2, …, s_t, input_dim, units]
* @param bias bias b: [s_1, s_2, …, s_t, units]
* @return output Y: [x_1, s_1, s_2, …, s_t, units]
*/
public NDList linear(NDArray input, NDArray weight, NDArray bias) {
if (shiftBatchAxis == null) {
// as the batch axis is the first axis in the shape of the input resp. output
// arrays, it needs to be shifted in order to bring the split axes in front resp. back
// again to be suitable for matMul;
// in case there is only one split axis, the transpose array (1,0,2) could be used for
// both shifts, but for the general case we calculate the transpose arrays here
int dim = input.getShape().dimension();
shiftBatchAxis = new int[dim];
reverseShiftBatchAxis = new int[dim];
for (int d = 0; d < dim - 2; d++) {
shiftBatchAxis[d] = d + 1;
reverseShiftBatchAxis[d + 1] = d;
}
shiftBatchAxis[dim - 1] = dim - 1;
reverseShiftBatchAxis[dim - 1] = dim - 1;
shiftBatchAxis[dim - 2] = 0;
reverseShiftBatchAxis[0] = dim - 2;
}
NDArray resultArr =
input.transpose(shiftBatchAxis).matMul(weight).transpose(reverseShiftBatchAxis);
if (bias != null) {
resultArr.addi(bias);
}
return new NDList(resultArr);
}
/**
* Creates a builder to build a {@code Linear}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link LinearCollection} type of {@link Block}. */
public static final class Builder {
private long units;
private boolean bias = true;
Builder() {}
/**
* Sets the number of output channels.
*
* @param units the number of desired output channels
* @return this Builder
*/
public Builder setUnits(long units) {
this.units = units;
return this;
}
/**
* Sets the optional parameter that indicates whether to include a bias vector with default
* value of true.
*
* @param bias whether to use a bias vector parameter
* @return this Builder
*/
public Builder optBias(boolean bias) {
this.bias = bias;
return this;
}
/**
* Returns the constructed {@code Linear}.
*
* @return the constructed {@code Linear}
* @throws IllegalArgumentException if all required parameters (outChannels) have not been
* set
*/
public LinearCollection build() {
Preconditions.checkArgument(units > 0, "You must specify unit");
return new LinearCollection(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/Multiplication.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import ai.djl.util.Preconditions;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Collections;
/**
* A Multiplication block performs an element-wise multiplication of inputs and weights as opposed
* to a {@link Linear} block which additionally sums up each element-wise multiplication.
*
* <p>Similar to a {@link LinearCollection}, multiple split dimensions are supported but they remain
* optional (i.e. \(t\) can be zero). Other differences to a {@link Linear} block are that the
* weight has an additional dimension of size 1 interspersed (to broadcast the weight to every input
* of the batch when applying the internally used algebraic operation {@link NDArray#mul(NDArray)} )
* and that biases are not supported.
*
* <p>Caution: the output-channel is the left-most dimension as opposed to traditionally being the
* right-most dimension. As the output is one dimension larger than that of a {@link Linear} block,
* it is more efficient and therefore recommended to apply an aggregating function (like the sum)
* first and only then shift the first axis of the aggregated and thus smaller {@link NDArray}
* instance into last position.
*
* <p>It has the following shapes:
*
* <ul>
* <li>input X: [x_1, s_1, s_2, …, s_t, input_dim]
* <li>weight W: [units, 1, s_1, s_2, …, s_t, input_dim]
* <li>output Y: [units, x_1, s_1, s_2, …, s_t, input_dim]
* </ul>
*
* <p>The Multiplication block should be constructed using {@link Multiplication.Builder}.
*/
public class Multiplication extends AbstractBlock {
private static final byte VERSION = 1;
private long units;
private long inputFeatures;
private Shape inputShape;
private Parameter weight;
Multiplication(Builder builder) {
super(VERSION);
units = builder.units;
weight =
addParameter(
Parameter.builder()
.setName("weight")
.setType(Parameter.Type.WEIGHT)
.build());
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
Device device = input.getDevice();
NDArray weightArr = parameterStore.getValue(weight, device, training);
return multiply(input, weightArr);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputs) {
return new Shape[] {new Shape(units).addAll(inputs[0])};
}
/** {@inheritDoc} */
@Override
public PairList<String, Shape> describeInput() {
return new PairList<>(
Collections.singletonList("linearInput"), Collections.singletonList(inputShape));
}
/** {@inheritDoc} */
@Override
protected void beforeInitialize(Shape... inputShapes) {
super.beforeInitialize(inputShapes);
Preconditions.checkArgument(inputShapes.length == 1, "Linear block only support 1 input");
Shape input = inputShapes[0];
inputFeatures = input.slice(1).size();
inputShape = input.slice(0, 1);
}
/** {@inheritDoc} */
@Override
public void prepare(Shape[] inputShapes) {
Shape input = inputShapes[0];
weight.setShape(new Shape(units, 1).addAll(input.slice(1)));
}
/** {@inheritDoc} */
@Override
protected void saveMetadata(DataOutputStream os) throws IOException {
os.writeLong(units);
os.writeLong(inputFeatures);
os.write(inputShape.getEncoded());
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == VERSION) {
units = is.readLong();
inputFeatures = is.readLong();
} else {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
inputShape = Shape.decode(is);
}
/**
* Applies an element-wise multiplication to the incoming data.
*
* @param input The incoming data
* @param weight The weight of this block
* @return element-wise multiplication of input and weight using broadcasting rules
*/
public NDList multiply(NDArray input, NDArray weight) {
NDArray resultArr = input.mul(weight);
return new NDList(resultArr);
}
/**
* Creates a builder to build a {@code Linear}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link Multiplication} type of {@link Block}. */
public static final class Builder {
private long units;
Builder() {}
/**
* Sets the number of output channels.
*
* @param units the number of desired output channels
* @return this Builder
*/
public Builder setUnits(long units) {
this.units = units;
return this;
}
/**
* Returns the constructed {@code Linear}.
*
* @return the constructed {@code Linear}
* @throws IllegalArgumentException if all required parameters (outChannels) have not been
* set
*/
public Multiplication build() {
Preconditions.checkArgument(units > 0, "You must specify unit");
return new Multiplication(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/Prelu.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.IOException;
/**
* Applies Leaky Parametric ReLU activation element-wise to the input.
*
* <p>Leaky ReLUs attempt to fix the 'dying ReLU' problem by allowing a small slope when the input
* is negative and has a slope of one when input is positive. This is defined by \(y= x \gt 0 ? x :
* slope * x\).
*
* <p>Parametric ReLU is a Leaky ReLU in which the slope is learnt during training.
*/
public class Prelu extends AbstractBlock {
private static final byte VERSION = 2;
private Parameter alpha;
/** Creates a Parametric ReLU Block. */
@SuppressWarnings("this-escape")
public Prelu() {
super(VERSION);
alpha =
addParameter(
Parameter.builder()
.setName("alpha")
.setType(Parameter.Type.WEIGHT)
.optShape(new Shape())
.build());
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
NDArray alphaArr = parameterStore.getValue(alpha, input.getDevice(), training);
return prelu(input, alphaArr);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputs) {
return new Shape[] {inputs[0]};
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == version) {
readInputShapes(is);
} else if (loadVersion != 1) {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
}
/**
* Applies a Prelu activation on the input {@link NDArray}.
*
* <p>Prelu is defined as \(y = max(0,x) + alpha * min(0, x) \) where alpha is learnable
* parameter
*
* @param input the input {@link NDArray}
* @param alpha learnable parameter
* @return the {@link NDArray} after applying Prelu activation
*/
public static NDList prelu(NDArray input, NDArray alpha) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.prelu(input, alpha);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/SparseMax.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.core;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.stream.IntStream;
/**
* {@code SparseMax} contains a generic implementation of sparsemax function the definition of
* SparseMax can be referred to https://arxiv.org/pdf/1602.02068.pdf. {@code SparseMax} is a simpler
* implementation of sparseMax function, where we set K as a hyperParameter(default 3). We only do
* softmax on those max-K data, and we set all the other value as 0.
*/
public class SparseMax extends AbstractBlock {
private static final Byte VERSION = 1;
private int axis;
private int topK;
/** Creates a sparseMax activation function for the last axis and 3 elements. */
public SparseMax() {
this(-1, 3);
}
/**
* Creates a sparseMax activation function along a given axis for 3 elements.
*
* @param axis the axis to do sparseMax for
*/
public SparseMax(int axis) {
this(axis, 3);
}
/**
* Creates a sparseMax activation function along a given axis and number of elements.
*
* @param axis the axis to do sparseMax for
* @param topK hyperParameter K
*/
public SparseMax(int axis, int topK) {
super(VERSION);
this.axis = axis;
this.topK = topK;
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
// the shape of input and output are the same
return new Shape[] {inputShapes[0]};
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
/*
A simple implementation of sparseMax, where we only calculate softMax with largest K data
*/
NDArray input = inputs.singletonOrThrow();
if (axis != -1) {
input = input.swapAxes(axis, -1);
}
// level should be: the max i-th is index j in input
NDArray level = input.argSort(-1, false).toType(DataType.INT64, false);
int lastDimSize = (int) input.size(input.getShape().dimension() - 1);
// maskTopK should be: the topK in input is 1 and other is zero
NDArray maskTopK =
NDArrays.add(
IntStream.range(0, topK)
.mapToObj(j -> level.get("..., {}", j).oneHot(lastDimSize))
.toArray(NDArray[]::new));
NDArray expSum =
input.exp().mul(maskTopK).sum(new int[] {-1}, true).broadcast(input.getShape());
NDArray output = input.exp().mul(maskTopK).div(expSum);
if (axis != -1) {
output = output.swapAxes(axis, -1);
}
return new NDList(output);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/core/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes that define simple neural network operations. */
package ai.djl.nn.core;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/norm/BatchNorm.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.norm;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
/**
* In batch training (training with more than one samples per iteration), a batch normalization
* layer works by normalizing the values of input data to have mean of 0 and variance of 1. Since
* this may alter the representation of a layer, two parameters (\ (\gamma\) and \(\beta\)) are
* learned along the normalization process to respectively scale and shift the normalized output
* (activations) to have any mean and variance so the network can utilize non-linear transformations
* such as sigmoid function as described in the <a
* href="https://arxiv.org/abs/1502.03167">paper</a>. During backpropagation, both \(\gamma\) and
* \(\beta\) parameters are included following the chain-rule in derivation.
*
* <p>The problem of varying distribution of input data requires the training process of a deep
* network to compensate for each different data distribution per batch, hence changing parameters'
* values as new batch data is processed and changes distribution of the network's (and each of its
* layers) activations. This condition is termed as internal covariate shift, and such occurrence
* prevents the network to learn faster and generalize better to unseen data.
*
* <p>With batch normalization, one benefits by having faster learning process as batch
* normalization allows larger learning rate without causing gradient problems on backpropagation as
* all inputs are normalized and hence reducing the scale of weight update impact on
* backpropagation. In some cases, the utilization of batch normalization layer regularizes the
* network and reduces, even eliminates, the need for dropout, which in turn results in even faster
* training process since dropout slows down training by 2-3 times. However, it was reported that
* batch normalization may not be beneficial when small batch sizes are used.
*
* <p>Formally, batch normalization is represented below: <br>
* \(\hat{x} \:=\: \frac{x \:-\: \mu_{batch}}{\sqrt{\sigma^2_{batch} \:+\: \epsilon}}\), <br>
* where \(\hat{x}\) is the normalized input, \(\mu_{batch}\) and \(\sigma^2_{batch}\) respectively
* denote the mean and variance of a batch, and \(\epsilon\) (epsilon) is a constant for numerical
* stability. The scale and shift operation can be formally defined as follows: <br>
* \(y \:=\: \gamma\hat{x} \:+\: \beta\), <br>
* where \(\gamma\) is the scale factor and \(\beta\) is the shift factor.
*
* @see <a href="https://d2l.djl.ai/chapter_convolutional-modern/batch-norm.html">The D2L chapter on
* batch normalization</a>
*/
public class BatchNorm extends AbstractBlock {
private static final byte VERSION = 2;
private int axis;
private float epsilon;
private float momentum;
private long inChannels;
private boolean center;
private boolean scale;
private Parameter gamma;
private Parameter beta;
private Parameter runningMean;
private Parameter runningVar;
BatchNorm(BaseBuilder<?> builder) {
super(VERSION);
axis = builder.axis;
epsilon = builder.epsilon;
momentum = builder.momentum;
center = builder.center;
scale = builder.scale;
// make gamma trainable if scale
gamma =
addParameter(
Parameter.builder()
.setName("gamma")
.setType(Parameter.Type.GAMMA)
.optRequiresGrad(scale)
.build());
// make beta trainable if center
beta =
addParameter(
Parameter.builder()
.setName("beta")
.setType(Parameter.Type.BETA)
.optRequiresGrad(center)
.build());
runningMean =
addParameter(
Parameter.builder()
.setName("runningMean")
.setType(Parameter.Type.RUNNING_MEAN)
.optRequiresGrad(false)
.build());
runningVar =
addParameter(
Parameter.builder()
.setName("runningVar")
.setType(Parameter.Type.RUNNING_VAR)
.optRequiresGrad(false)
.build());
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
Device device = input.getDevice();
NDArray gammaArr = parameterStore.getValue(gamma, device, training);
NDArray betaArr = parameterStore.getValue(beta, device, training);
NDArray runningMeanArr = parameterStore.getValue(runningMean, device, training);
NDArray runningVarArr = parameterStore.getValue(runningVar, device, training);
return batchNorm(
input,
runningMeanArr,
runningVarArr,
gammaArr,
betaArr,
axis,
momentum,
epsilon,
training);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[] {inputShapes[0]};
}
/** {@inheritDoc} */
@Override
protected void beforeInitialize(Shape... inputShapes) {
super.beforeInitialize(inputShapes);
inChannels = inputShapes[0].size(axis);
}
/** {@inheritDoc} */
@Override
public void prepare(Shape[] inputShapes) {
gamma.setShape(new Shape(inChannels));
beta.setShape(new Shape(inChannels));
runningMean.setShape(new Shape(inChannels));
runningVar.setShape(new Shape(inChannels));
}
/** {@inheritDoc} */
@Override
protected void saveMetadata(DataOutputStream os) throws IOException {
saveInputShapes(os);
os.writeLong(inChannels);
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == VERSION) {
readInputShapes(is);
} else if (loadVersion != 1) {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
inChannels = is.readLong();
}
/**
* Applies Batch Normalization for each channel across a batch of data.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, *), * could be
* empty, width, (height, width), (depth, height, width)
* @param runningMean runningMean {@code NDArray}
* @param runningVar runningVar {@code NDArray}
* @return the output {@code NDArray} of shape (batchSize, inputChannel, *), * could be empty,
* width, (height, width), (depth, height, width)
*/
public static NDList batchNorm(NDArray input, NDArray runningMean, NDArray runningVar) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.batchNorm(input, runningMean, runningVar, null, null, 1, 0.9f, 1E-5f, true);
}
/**
* Applies Batch Normalization for each channel across a batch of data.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, *), * could be
* empty, width, (height, width), (depth, height, width)
* @param runningMean runningMean {@code NDArray}
* @param runningVar runningVar {@code NDArray}
* @param gamma gamma weight {@code NDArray}
* @param beta beta weight {@code NDArray}
* @return the output {@code NDArray} of shape (batchSize, inputChannel, *), * could be empty,
* width, (height, width), (depth, height, width)
*/
public static NDList batchNorm(
NDArray input, NDArray runningMean, NDArray runningVar, NDArray gamma, NDArray beta) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.batchNorm(input, runningMean, runningVar, gamma, beta, 1, 0.9f, 1E-5f, true);
}
/**
* Applies Batch Normalization for each channel across a batch of data.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, *), * could be
* empty, width, (height, width), (depth, height, width)
* @param runningMean runningMean {@code NDArray}
* @param runningVar runningVar {@code NDArray}
* @param gamma gamma weight {@code NDArray}
* @param beta beta weight {@code NDArray}
* @param axis the axis that should be normalized
* @return the output {@code NDArray} of shape (batchSize, inputChannel, *), * could be empty,
* width, (height, width), (depth, height, width)
*/
public static NDList batchNorm(
NDArray input,
NDArray runningMean,
NDArray runningVar,
NDArray gamma,
NDArray beta,
int axis) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.batchNorm(input, runningMean, runningVar, gamma, beta, axis, 0.9f, 1E-5f, true);
}
/**
* Applies Batch Normalization for each channel across a batch of data.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, *), * could be
* empty, width, (height, width), (depth, height, width)
* @param runningMean runningMean {@code NDArray}
* @param runningVar runningVar {@code NDArray}
* @param gamma gamma weight {@code NDArray}
* @param beta beta weight {@code NDArray}
* @param axis the axis that should be normalized
* @param momentum the value used for the runningMean and runningVar computation.
* @param eps a value added to the denominator for numerical stability
* @param training indicate the training mode if true
* @return the output {@code NDArray} of shape (batchSize, inputChannel, *), * could be empty,
* width, (height, width), (depth, height, width)
*/
public static NDList batchNorm(
NDArray input,
NDArray runningMean,
NDArray runningVar,
NDArray gamma,
NDArray beta,
int axis,
float momentum,
float eps,
boolean training) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.batchNorm(
input, runningMean, runningVar, gamma, beta, axis, momentum, eps, training);
}
/**
* Creates a builder to build a {@code BatchNorm}.
*
* @return a new builder
*/
public static BaseBuilder<?> builder() {
return new Builder();
}
/** The Builder to construct a {@link BatchNorm}. */
public static class Builder extends BaseBuilder<Builder> {
Builder() {}
/** {@inheritDoc} */
@Override
public BatchNorm build() {
return new BatchNorm(this);
}
/** {@inheritDoc} */
@Override
public Builder self() {
return this;
}
}
/** The Builder to construct a {@link BatchNorm} type of {@link ai.djl.nn.Block}. */
public abstract static class BaseBuilder<T extends BaseBuilder<T>> {
protected int axis = 1;
protected float epsilon = 1E-5f;
protected float momentum = .9f;
protected boolean center = true;
protected boolean scale = true;
protected BaseBuilder() {}
/**
* Set the axis in which channel is specified. Defaults to 1.
*
* @param axis the axis in which channel is specified
* @return this Builder
*/
public T optAxis(int axis) {
this.axis = axis;
return self();
}
/**
* If True, add offset of `beta` to normalized tensor. Defaults to True.
*
* @param val True or False on whether to add and train offset value
* @return this Builder
*/
public T optCenter(boolean val) {
center = val;
return self();
}
/**
* If True, multiply result by `gamma`. Defaults to True;
*
* @param val True or False on whether to add and train scale value
* @return this Builder
*/
public T optScale(boolean val) {
scale = val;
return self();
}
/**
* Sets the epsilon value to prevent division by 0.
*
* @param val the epsilon value
* @return this Builder
*/
public T optEpsilon(float val) {
epsilon = val;
return self();
}
/**
* Set the momentum for moving average.
*
* @param val the momentum for moving average
* @return this Builder
*/
public T optMomentum(float val) {
momentum = val;
return self();
}
/**
* Builds the new {@link BatchNorm}.
*
* @return the new {@link BatchNorm}
*/
public abstract BatchNorm build();
/**
* Returns this {code Builder} object.
*
* @return this {@code BaseBuilder}
*/
public abstract T self();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/norm/Dropout.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.norm;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.IOException;
/**
* A dropout layer benefits a network by allowing some units (neurons), and hence their respective
* connections, of a network to be randomly and temporarily removed by setting its value to 0
* <b>only</b> during training by specified probability \(p\), usually set to 0.5. The use of
* dropout acts as if multiple networks with different architectures had been trained, and during
* test/inference, the removed unit's output is multiplied by \(p\) as an approximation of the
* averaged output of all the possible network architectures for that unit. The implementation of
* dropout gives state-of-the-art performances for diverse tasks as shown in the proposal's <a
* href="https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf">paper</a>, suggesting its
* general-use capability.
*
* <p>The idea of dropout itself was proposed in 2014, with the purpose of improving the performance
* of large networks due to co-adaptation, where some connections are stronger and learned more
* while other connections become weaker and loses their impact on the prediction, resulting in
* network overfitting. It was also created as an alternative for costly networks, such as large or
* ensemble networks, by removing several units, hence creating different thinned network
* architectures and simulates multiple networks within a single network, greatly reducing the
* computation cost.
*
* <p>Dropout is recommended to be used when one is trying to optimize an overfitting network or
* when large dataset is available. It is still quite commonly used in many publications due to its
* generalization capability. However, using dropout may not prevent overfitting due to variation
* and limited size of the dataset, and it is reported that dropout layer increases training time by
* 2-3 times since different simulated multiple networks are trained for each iteration, thus
* resulting in noisy parameter updates.
*
* @see <a href="https://d2l.djl.ai/chapter_multilayer-perceptrons/dropout.html">The D2L chapter on
* dropout</a>
*/
public class Dropout extends AbstractBlock {
private static final byte VERSION = 2;
private float rate;
Dropout(Builder builder) {
super(VERSION);
rate = builder.rate;
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
return dropout(inputs.singletonOrThrow(), rate, training);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[] {inputShapes[0]};
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == version) {
readInputShapes(is);
} else if (loadVersion != 1) {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
}
/**
* Applies Dropout to the input.
*
* @param input input to apply dropout
* @return output
*/
public static NDList dropout(NDArray input) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.dropout(input, 0.5f, true);
}
/**
* Applies Dropout to the input.
*
* @param input input to apply dropout
* @param rate Fraction of the input units to drop
* @return output
*/
public static NDList dropout(NDArray input, float rate) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.dropout(input, rate, true);
}
/**
* Applies Dropout to the input.
*
* @param input input to apply dropout
* @param rate Fraction of the input units to drop
* @param training apply dropout if true
* @return output
*/
public static NDList dropout(NDArray input, float rate, boolean training) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.dropout(input, rate, training);
}
/**
* Creates a builder to build a {@link Dropout}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link Dropout} type of {@link ai.djl.nn.Block}. */
public static final class Builder {
private float rate = 0.5f;
Builder() {}
/**
* Sets the probability or the fraction of the input that gets dropped out during training
* time. Defaults to 0.5.
*
* @param rate fraction of the input that gets dropped out during training
* @return this Builder
*/
public Builder optRate(float rate) {
this.rate = rate;
return this;
}
/**
* Builds a {@link Dropout} block.
*
* @return the {@link Dropout} block
*/
public Dropout build() {
return new Dropout(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/norm/GhostBatchNorm.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.norm;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.training.ParameterStore;
import ai.djl.translate.Batchifier;
import ai.djl.translate.StackBatchifier;
import ai.djl.util.PairList;
/**
* {@link GhostBatchNorm} is similar to {@link BatchNorm} except that it splits a batch into a
* smaller sub-batches aka <em>ghost batches</em>, and normalize them individually to have a mean of
* 0 and variance of 1 and finally concatenate them again to a single batch. Each of the
* mini-batches contains a virtualBatchSize samples.
*
* @see <a href="https://arxiv.org/abs/1705.08741">Ghost Normalization Paper</a>
*/
public class GhostBatchNorm extends BatchNorm {
private int virtualBatchSize;
private Batchifier batchifier;
protected GhostBatchNorm(Builder builder) {
super(builder);
this.virtualBatchSize = builder.virtualBatchSize;
this.batchifier = new StackBatchifier();
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDList[] subBatches = split(inputs);
for (int i = 0; i < subBatches.length; i++) {
subBatches[i] = super.forwardInternal(parameterStore, subBatches[i], training, params);
}
return batchify(subBatches);
}
/**
* Splits an {@link NDList} into the given <b>size</b> of sub-batch.
*
* <p>This function unbatchifies the input {@link NDList} into mini-batches, each with the size
* of virtualBatchSize. If the batch size is divisible by the virtual batch size, all returned
* sub-batches will be the same size. If the batch size is not divisible by virtual batch size,
* all returned sub-batches will be the same size, except the last one.
*
* @param list the {@link NDList} that needs to be split
* @return an array of {@link NDList} that contains all the mini-batches
*/
protected NDList[] split(NDList list) {
double batchSize = list.head().size(0);
int countBatches = (int) Math.ceil(batchSize / virtualBatchSize);
return batchifier.split(list, countBatches, true);
}
/**
* Converts an array of {@link NDList} into an NDList using {@link StackBatchifier} and squeezes
* the first dimension created by it. This makes the final {@link NDArray} same size as the
* splitted one.
*
* @param subBatches the input array of {@link NDList}
* @return the batchified {@link NDList}
*/
protected NDList batchify(NDList[] subBatches) {
NDList batch = batchifier.batchify(subBatches);
return squeezeExtraDimensions(batch);
}
/**
* Squeezes first axes of {@link NDList}.
*
* @param batch input array of {@link NDList}
* @return the squeezed {@link NDList}
*/
protected NDList squeezeExtraDimensions(NDList batch) {
NDArray array = batch.singletonOrThrow().squeeze(0);
batch.set(0, array);
return batch;
}
/**
* Creates a builder to build a {@code GhostBatchNorm}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link GhostBatchNorm}. */
public static class Builder extends BatchNorm.BaseBuilder<Builder> {
private int virtualBatchSize = 128;
Builder() {}
/**
* Sets the size of virtual batches in which to use when sub-batching. Defaults to 128.
*
* @param virtualBatchSize the virtual batch size
* @return this Builder
*/
public Builder optVirtualBatchSize(int virtualBatchSize) {
this.virtualBatchSize = virtualBatchSize;
return this;
}
/**
* Builds the new {@link GhostBatchNorm}.
*
* @return the new {@link GhostBatchNorm}
*/
@Override
public GhostBatchNorm build() {
return new GhostBatchNorm(this);
}
/** {@inheritDoc} */
@Override
public Builder self() {
return this;
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/norm/LayerNorm.java
|
/*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.norm;
import ai.djl.Device;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
/**
* Layer normalization works by normalizing the values of input data for each input sample to have
* mean of 0 and variance of 1. Since this may alter the representation of a layer, two parameters
* (\ (\gamma\) and \(\beta\)) are learned along the normalization process to respectively scale and
* shift the normalized output (activations) to have any mean and variance so the network can
* utilize non-linear transformations such as sigmoid function as described in the <a
* href="https://arxiv.org/abs/1607.06450">paper</a>. During backpropagation, both \(\gamma\) and
* \(\beta\) parameters are included following the chain-rule in derivation.
*
* <p>Citing the abstract of the paper: "Training state-of-the-art, deep neural networks is
* computationally expensive. One way to reduce the training time is to normalize the activities of
* the neurons. A recently introduced technique called batch normalization uses the distribution of
* the summed input to a neuron over a mini-batch of training cases to compute a mean and variance
* which are then used to normalize the summed input to that neuron on each training case. This
* significantly reduces the training time in feed-forward neural networks. However, the effect of
* batch normalization is dependent on the mini-batch size and it is not obvious how to apply it to
* recurrent neural networks. In this paper, we transpose batch normalization into layer
* normalization by computing the mean and variance used for normalization from all of the summed
* inputs to the neurons in a layer on a single training case. Like batch normalization, we also
* give each neuron its own adaptive bias and gain which are applied after the normalization but
* before the non-linearity. Unlike batch normalization, layer normalization performs exactly the
* same computation at training and test times. It is also straightforward to apply to recurrent
* neural networks by computing the normalization statistics separately at each time step. Layer
* normalization is very effective at stabilizing the hidden state dynamics in recurrent networks.
* Empirically, we show that layer normalization can substantially reduce the training time compared
* with previously published techniques."
*/
public class LayerNorm extends AbstractBlock {
protected float epsilon;
protected Shape normalizedShape;
protected boolean center;
protected boolean scale;
protected int[] axis;
protected Parameter gamma;
protected Parameter beta;
@SuppressWarnings("this-escape")
protected LayerNorm(Builder builder) {
epsilon = builder.epsilon;
scale = builder.scale;
center = builder.center;
axis = builder.axis;
// make gamma trainable if scale
gamma =
addParameter(
Parameter.builder()
.setName("gamma")
.setType(Parameter.Type.GAMMA)
.optRequiresGrad(scale)
.build());
// make beta trainable if center
beta =
addParameter(
Parameter.builder()
.setName("beta")
.setType(Parameter.Type.BETA)
.optRequiresGrad(center)
.build());
}
/**
* Applies Layer Normalization with average and variance for each input sample across the axis
* dimensions.
*
* @param input the input {@code NDArray} of shape (batchSize, inputChannel, *), * could be
* empty, width, (height, width), (depth, height, width)
* @param normalizedShape dimensions to calculate average and variance from
* @param gamma gamma weight {@code NDArray}
* @param beta beta weight {@code NDArray}
* @param eps a value added to the denominator for numerical stability
* @return the output {@code NDArray} of shape (batchSize, inputChannel, *), * could be empty,
* width, (height, width), (depth, height, width)
*/
public static NDList layerNorm(
NDArray input, Shape normalizedShape, NDArray gamma, NDArray beta, float eps) {
NDArrayEx ex = input.getNDArrayInternal();
return ex.layerNorm(input, normalizedShape, gamma, beta, eps);
}
/**
* Creates a builder to build a {@code LayerNorm}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
Device device = input.getDevice();
NDArray gammaArr = parameterStore.getValue(gamma, device, training);
NDArray betaArr = parameterStore.getValue(beta, device, training);
return layerNorm(input, normalizedShape, gammaArr, betaArr, epsilon);
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[] {inputShapes[0]};
}
/** {@inheritDoc} */
@Override
protected void beforeInitialize(Shape... inputShapes) {
super.beforeInitialize(inputShapes);
normalizedShape =
axis == null
? inputShapes[0].slice(1)
: new Shape(
Arrays.stream(axis)
.mapToLong(dim -> inputShapes[0].get(dim))
.toArray());
}
/** {@inheritDoc} */
@Override
public void prepare(Shape[] inputShapes) {
gamma.setShape(normalizedShape);
beta.setShape(normalizedShape);
}
/** {@inheritDoc} */
@Override
protected void saveMetadata(DataOutputStream os) throws IOException {
saveInputShapes(os);
os.write(normalizedShape.getEncoded());
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion != version) {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
readInputShapes(is);
normalizedShape = Shape.decode(is);
}
/** The Builder to construct a {@link LayerNorm}. */
public static class Builder {
private float epsilon = 1E-5f;
// private Shape normalizedShape;
private boolean scale = true;
private boolean center = true;
private int[] axis;
protected Builder() {}
/**
* List the axis over which the mean and variance will be calculated (alternative to
* normalizedShape).
*
* @param axis input axis over which the mean and variance will be calculated (if null all
* existing dimensions)
* @return this Builder
*/
public Builder axis(int... axis) {
this.axis = axis;
return this;
}
/**
* If True, add offset of `beta` to normalized tensor. Defaults to True.
*
* @param val True or False on whether to add and train offset value
* @return this Builder
*/
public Builder optCenter(boolean val) {
center = val;
return this;
}
/**
* If True, multiply result by `gamma`. Defaults to True;
*
* @param val True or False on whether to add and train scale value
* @return this Builder
*/
public Builder optScale(boolean val) {
scale = val;
return this;
}
/**
* Sets the epsilon value to prevent division by 0.
*
* @param val the epsilon value
* @return this Builder
*/
public Builder optEpsilon(float val) {
epsilon = val;
return this;
}
/**
* Builds a {@link LayerNorm} block.
*
* @return the {@link LayerNorm} block
*/
public LayerNorm build() {
return new LayerNorm(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/norm/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains classes that define normalizing neural network operations. */
package ai.djl.nn.norm;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/pooling/Pool.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.pooling;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.nn.LambdaBlock;
import ai.djl.util.Preconditions;
import java.util.Objects;
/**
* Utility class that provides {@code Block} and methods for different pooling functions.
*
* @see <a href="https://d2l.djl.ai/chapter_convolutional-neural-networks/pooling.html">The D2L
* chapter on pooling</a>
*/
public final class Pool {
private Pool() {}
/**
* Performs 1-D Max Pooling on the input.
*
* @param input the NDArray on which max pooling is performed
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the NDArray after applying max pooling
*/
public static NDArray maxPool1d(
NDArray input, Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for maxPool1d");
Preconditions.checkArgument(
input.getShape().dimension() == 3,
"Expect input dimension is 3 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 1 && stride.dimension() == 1 && padding.dimension() == 1,
"kernelShape, Stride and Padding dimensions for maxPool1d layer should be 1");
return input.getNDArrayInternal().maxPool(kernelShape, stride, padding, ceilMode);
}
/**
* Performs 2-D Max Pooling on the input.
*
* @param input the NDArray on which max pooling is performed
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the NDArray after applying max pooling
*/
public static NDArray maxPool2d(
NDArray input, Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for maxPool2d");
Preconditions.checkArgument(
input.getShape().dimension() == 4,
"Expect input dimension is 4 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 2 && stride.dimension() == 2 && padding.dimension() == 2,
"kernelShape, Stride and Padding dimensions for maxPool2d should be 2");
return input.getNDArrayInternal().maxPool(kernelShape, stride, padding, ceilMode);
}
/**
* Performs 3-D Max Pooling on the input.
*
* @param input the NDArray on which max pooling is performed
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the NDArray after applying max pooling
*/
public static NDArray maxPool3d(
NDArray input, Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for maxPool3d");
Preconditions.checkArgument(
input.getShape().dimension() == 5,
"Expect input dimension is 5 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 3 && stride.dimension() == 3 && padding.dimension() == 3,
"kernelShape, Stride and Pad dimensions for maxPool3d should be 3");
return input.getNDArrayInternal().maxPool(kernelShape, stride, padding, ceilMode);
}
/**
* Performs 1-D Global Max Pooling on the input.
*
* @param input the NDArray on which max pooling is performed
* @return the NDArray after applying global max pooling
*/
public static NDArray globalMaxPool1d(NDArray input) {
Preconditions.checkArgument(
input.getShape().dimension() == 3,
"Expect input dimension is 3 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalMaxPool();
}
/**
* Performs 2-D Global Max Pooling on the input.
*
* @param input the NDArray on which max pooling is performed
* @return the NDArray after applying global max pooling
*/
public static NDArray globalMaxPool2d(NDArray input) {
Preconditions.checkArgument(
input.getShape().dimension() == 4,
"Expect input dimension is 4 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalMaxPool();
}
/**
* Performs 3-D Global Max Pooling on the input.
*
* @param input the NDArray on which max pooling is performed
* @return the NDArray after applying global max pooling
*/
public static NDArray globalMaxPool3d(NDArray input) {
Preconditions.checkArgument(
input.getShape().dimension() == 5,
"Expect input dimension is 5 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalMaxPool();
}
/**
* Performs 1-D Avg Pooling on the input.
*
* @param input the NDArray on which average pooling is performed
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @param countIncludePad whether to include padding for calculations
* @return the NDArray after applying avg pooling
*/
public static NDArray avgPool1d(
NDArray input,
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode,
boolean countIncludePad) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for avgPool1d");
Preconditions.checkArgument(
input.getShape().dimension() == 3,
"Expect input dimension is 3 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 1 && stride.dimension() == 1 && padding.dimension() == 1,
"kernelShape, Stride and Padding dimensions for avgPool1d should be 1");
return input.getNDArrayInternal()
.avgPool(kernelShape, stride, padding, ceilMode, countIncludePad);
}
/**
* Performs 2-D Avg Pooling on the input.
*
* @param input the NDArray on which average pooling is performed
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @param countIncludePad whether to include padding for calculations
* @return the NDArray after applying avg pooling
*/
public static NDArray avgPool2d(
NDArray input,
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode,
boolean countIncludePad) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for avgPool2d");
Preconditions.checkArgument(
input.getShape().dimension() == 4,
"Expect input dimension is 4 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 2 && stride.dimension() == 2 && padding.dimension() == 2,
"kernelShape, Stride and Padding dimensions for avgPool2d should be 2");
return input.getNDArrayInternal()
.avgPool(kernelShape, stride, padding, ceilMode, countIncludePad);
}
/**
* Performs 3-D Avg Pooling on the input.
*
* @param input the NDArray on which average pooling is performed
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @param countIncludePad whether to include padding for calculations
* @return the NDArray after applying avg pooling
*/
public static NDArray avgPool3d(
NDArray input,
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode,
boolean countIncludePad) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for avgPool3d");
Preconditions.checkArgument(
input.getShape().dimension() == 5,
"Expect input dimension is 5 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 3 && stride.dimension() == 3 && padding.dimension() == 3,
"kernelShape, Stride and Padding dimensions for avgPool2d should be 3");
return input.getNDArrayInternal()
.avgPool(kernelShape, stride, padding, ceilMode, countIncludePad);
}
/**
* Performs 1-D Global Avg Pooling on the input.
*
* @param input the NDArray on which average pooling is performed
* @return the NDArray after applying global avg pooling
*/
public static NDArray globalAvgPool1d(NDArray input) {
Preconditions.checkArgument(
input.getShape().dimension() == 3,
"Expect input dimension is 3 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalAvgPool();
}
/**
* Performs 2-D Global Avg Pooling on the input.
*
* @param input the NDArray on which average pooling is performed
* @return the NDArray after applying global avg pooling
*/
public static NDArray globalAvgPool2d(NDArray input) {
Preconditions.checkArgument(
input.getShape().dimension() == 4,
"Expect input dimension is 4 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalAvgPool();
}
/**
* Performs 3-D Global Avg Pooling on the input.
*
* @param input the NDArray on which average pooling is performed
* @return the NDArray after applying global avg pooling
*/
public static NDArray globalAvgPool3d(NDArray input) {
Preconditions.checkArgument(
input.getShape().dimension() == 5,
"Expect input dimension is 5 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalAvgPool();
}
/**
* Performs 1-D LP Pooling on the input.
*
* @param input the NDArray on which LP pooling is performed
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the NDArray after applying lp pooling
*/
public static NDArray lpPool1d(
NDArray input,
float normType,
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for lpPool1d");
Preconditions.checkArgument(
input.getShape().dimension() == 3,
"Expect input dimension is 3 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 1 && stride.dimension() == 1 && padding.dimension() == 1,
"kernelShape, Stride and Padding dimensions for lpPool1d should be 1");
return input.getNDArrayInternal().lpPool(normType, kernelShape, stride, padding, ceilMode);
}
/**
* Performs 2-D LP Pooling on the input.
*
* @param input the NDArray on which LP pooling is performed
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the NDArray after applying lp pooling
*/
public static NDArray lpPool2d(
NDArray input,
float normType,
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for lpPool2d");
Preconditions.checkArgument(
input.getShape().dimension() == 4,
"Expect input dimension is 4 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 2 && stride.dimension() == 2,
"kernelShape, Stride and Padding dimensions for lpPool2d should be 2");
return input.getNDArrayInternal().lpPool(normType, kernelShape, stride, padding, ceilMode);
}
/**
* Performs 3-D LP Pooling on the input.
*
* @param input the NDArray on which LP pooling is performed
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride the stride to be used for each dimension
* @param padding the padding to be set in each dimension
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the NDArray after applying lp pooling
*/
public static NDArray lpPool3d(
NDArray input,
float normType,
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode) {
Objects.requireNonNull(kernelShape, "kernelShape cannot be null for lpPool3d");
Preconditions.checkArgument(
input.getShape().dimension() == 5,
"Expect input dimension is 5 but got " + input.getShape().dimension());
Preconditions.checkArgument(
kernelShape.dimension() == 3 && stride.dimension() == 3 && padding.dimension() == 3,
"kernelShape, Stride and Padding dimensions for lpPool3d should be 1");
return input.getNDArrayInternal().lpPool(normType, kernelShape, stride, padding, ceilMode);
}
/**
* Performs 1-D Global LP Pooling on the input.
*
* @param input the NDArray on which LP pooling is performed
* @param normType float value indicating norm
* @return the NDArray after applying global lp pooling
*/
public static NDArray globalLpPool1d(NDArray input, float normType) {
Preconditions.checkArgument(
input.getShape().dimension() == 3,
"Expect input dimension is 3 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalLpPool(normType);
}
/**
* Performs 2-D Global LP Pooling on the input.
*
* @param input the NDArray on which LP pooling is performed
* @param normType float value indicating norm
* @return the NDArray after applying global lp pooling
*/
public static NDArray globalLpPool2d(NDArray input, float normType) {
Preconditions.checkArgument(
input.getShape().dimension() == 4,
"Expect input dimension is 4 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalLpPool(normType);
}
/**
* Performs 3-D Global LP Pooling on the input.
*
* @param input the NDArray on which LP pooling is performed
* @param normType float value indicating norm
* @return the NDArray after applying global lp pooling
*/
public static NDArray globalLpPool3d(NDArray input, float normType) {
Preconditions.checkArgument(
input.getShape().dimension() == 5,
"Expect input dimension is 5 but got " + input.getShape().dimension());
return input.getNDArrayInternal().globalLpPool(normType);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool1d(NDArray, Shape, Shape,
* Shape, boolean) maxPool1d} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #maxPool1d(NDArray, Shape, Shape,
* Shape, boolean) maxPool1dBlock} activation function
*/
public static Block maxPool1dBlock(
Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return LambdaBlock.singleton(
array -> maxPool1d(array, kernelShape, stride, padding, ceilMode), "maxPool1d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool1d(NDArray, Shape, Shape,
* Shape, boolean) maxPool1dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @return the {@link LambdaBlock} that applies the {@link #maxPool1d(NDArray, Shape, Shape,
* Shape, boolean) maxPool1dBlock} activation function
*/
public static Block maxPool1dBlock(Shape kernelShape, Shape stride, Shape padding) {
return maxPool1dBlock(kernelShape, stride, padding, false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool1d(NDArray, Shape, Shape,
* Shape, boolean) maxPool1dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #maxPool1d(NDArray, Shape, Shape,
* Shape, boolean) maxPool1dBlock} activation function
*/
public static Block maxPool1dBlock(Shape kernelShape, Shape stride) {
return maxPool1dBlock(kernelShape, stride, new Shape(0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool1d(NDArray, Shape, Shape,
* Shape, boolean) maxPool1dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #maxPool1d(NDArray, Shape, Shape,
* Shape, boolean) maxPool1dBlock} activation function
*/
public static Block maxPool1dBlock(Shape kernelShape) {
return maxPool1dBlock(kernelShape, kernelShape, new Shape(0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool2d(NDArray, Shape, Shape,
* Shape, boolean) maxPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #maxPool2d(NDArray, Shape, Shape,
* Shape, boolean) maxPool2dBlock} activation function
*/
public static Block maxPool2dBlock(
Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return LambdaBlock.singleton(
array -> maxPool2d(array, kernelShape, stride, padding, ceilMode), "maxPool2d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool2d(NDArray, Shape, Shape,
* Shape, boolean) maxPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @return the {@link LambdaBlock} that applies the {@link #maxPool2d(NDArray, Shape, Shape,
* Shape, boolean) maxPool2dBlock} activation function
*/
public static Block maxPool2dBlock(Shape kernelShape, Shape stride, Shape padding) {
return maxPool2dBlock(kernelShape, stride, padding, false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool2d(NDArray, Shape, Shape,
* Shape, boolean) maxPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #maxPool2d(NDArray, Shape, Shape,
* Shape, boolean) maxPool2dBlock} activation function
*/
public static Block maxPool2dBlock(Shape kernelShape, Shape stride) {
return maxPool2dBlock(kernelShape, stride, new Shape(0, 0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool2d(NDArray, Shape, Shape,
* Shape, boolean) maxPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #maxPool2d(NDArray, Shape, Shape,
* Shape, boolean) maxPool2dBlock} activation function
*/
public static Block maxPool2dBlock(Shape kernelShape) {
return maxPool2dBlock(kernelShape, kernelShape, new Shape(0, 0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool3d(NDArray, Shape, Shape,
* Shape, boolean) maxPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #maxPool3d(NDArray, Shape, Shape,
* Shape, boolean) maxPool3dBlock} activation function
*/
public static Block maxPool3dBlock(
Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return LambdaBlock.singleton(
array -> maxPool3d(array, kernelShape, stride, padding, ceilMode), "maxPool3d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool3d(NDArray, Shape, Shape,
* Shape, boolean) maxPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @return the {@link LambdaBlock} that applies the {@link #maxPool3d(NDArray, Shape, Shape,
* Shape, boolean) maxPool3dBlock} activation function
*/
public static Block maxPool3dBlock(Shape kernelShape, Shape stride, Shape padding) {
return maxPool3dBlock(kernelShape, stride, padding, false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool3d(NDArray, Shape, Shape,
* Shape, boolean) maxPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #maxPool3d(NDArray, Shape, Shape,
* Shape, boolean) maxPool3dBlock} activation function
*/
public static Block maxPool3dBlock(Shape kernelShape, Shape stride) {
return maxPool3dBlock(kernelShape, stride, new Shape(0, 0, 0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #maxPool3d(NDArray, Shape, Shape,
* Shape, boolean) maxPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #maxPool3d(NDArray, Shape, Shape,
* Shape, boolean) maxPool3dBlock} activation function
*/
public static Block maxPool3dBlock(Shape kernelShape) {
return maxPool3dBlock(kernelShape, new Shape(1, 1, 1), new Shape(0, 0, 0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalMaxPool1d(NDArray)
* globalmaxPool1dBlock } pooling function.
*
* @return the {@link LambdaBlock} that applies the {@link #globalMaxPool1d(NDArray)
* globalmaxPool1dBlock} pooling function
*/
public static Block globalMaxPool1dBlock() {
return LambdaBlock.singleton(Pool::globalMaxPool1d, "globalMaxPool1d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalMaxPool2d(NDArray)
* globalmaxPool2dBlock } pooling function.
*
* @return the {@link LambdaBlock} that applies the {@link #globalMaxPool2d(NDArray)
* globalmaxPool2dBlock} pooling function
*/
public static Block globalMaxPool2dBlock() {
return LambdaBlock.singleton(Pool::globalMaxPool2d, "globalMaxPool2d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalMaxPool3d(NDArray)
* globalmaxPool3dBlock } pooling function.
*
* @return the {@link LambdaBlock} that applies the {@link #globalMaxPool3d(NDArray)
* globalmaxPool3dBlock} pooling function
*/
public static Block globalMaxPool3dBlock() {
return LambdaBlock.singleton(Pool::globalMaxPool3d, "globalMaxPool3d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @param countIncludePad Boolean indicating whether to include padding for calculations
* @return the {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} activation function
*/
public static Block avgPool1dBlock(
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode,
boolean countIncludePad) {
return LambdaBlock.singleton(
array -> avgPool1d(array, kernelShape, stride, padding, ceilMode, countIncludePad),
"avgPool1d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock } activation function
*/
public static Block avgPool1dBlock(
Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return avgPool1dBlock(kernelShape, stride, padding, ceilMode, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @return the {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} activation function
*/
public static Block avgPool1dBlock(Shape kernelShape, Shape stride, Shape padding) {
return avgPool1dBlock(kernelShape, stride, padding, false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} activation function
*/
public static Block avgPool1dBlock(Shape kernelShape, Shape stride) {
return avgPool1dBlock(kernelShape, stride, new Shape(0), false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #avgPool1d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool1dBlock} activation function
*/
public static Block avgPool1dBlock(Shape kernelShape) {
return avgPool1dBlock(kernelShape, kernelShape, new Shape(0), false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @param countIncludePad Boolean indicating whether to include padding for calculations
* @return the {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} activation function
*/
public static Block avgPool2dBlock(
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode,
boolean countIncludePad) {
return LambdaBlock.singleton(
array -> avgPool2d(array, kernelShape, stride, padding, ceilMode, countIncludePad),
"avgPool2d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} activation function
*/
public static Block avgPool2dBlock(
Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return avgPool2dBlock(kernelShape, stride, padding, ceilMode, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @return the {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} activation function
*/
public static Block avgPool2dBlock(Shape kernelShape, Shape stride, Shape padding) {
return avgPool2dBlock(kernelShape, stride, padding, false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} activation function
*/
public static Block avgPool2dBlock(Shape kernelShape, Shape stride) {
return avgPool2dBlock(kernelShape, stride, new Shape(0, 0), false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #avgPool2d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool2dBlock} activation function
*/
public static Block avgPool2dBlock(Shape kernelShape) {
return avgPool2dBlock(kernelShape, kernelShape, new Shape(0, 0), false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @param countIncludePad Boolean indicating whether to include padding for calculations
* @return the {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} activation function
*/
public static Block avgPool3dBlock(
Shape kernelShape,
Shape stride,
Shape padding,
boolean ceilMode,
boolean countIncludePad) {
return LambdaBlock.singleton(
array -> avgPool3d(array, kernelShape, stride, padding, ceilMode, countIncludePad),
"avgPool3d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} activation function
*/
public static Block avgPool3dBlock(
Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return avgPool3dBlock(kernelShape, stride, padding, ceilMode, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @return the {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} activation function
*/
public static Block avgPool3dBlock(Shape kernelShape, Shape stride, Shape padding) {
return avgPool3dBlock(kernelShape, stride, padding, false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} activation function
*/
public static Block avgPool3dBlock(Shape kernelShape, Shape stride) {
return avgPool3dBlock(kernelShape, stride, new Shape(0, 0, 0), false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} pooling function in its forward function.
*
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #avgPool3d(NDArray, Shape, Shape,
* Shape, boolean, boolean) avgPool3dBlock} activation function
*/
public static Block avgPool3dBlock(Shape kernelShape) {
return avgPool3dBlock(kernelShape, kernelShape, new Shape(0, 0, 0), false, true);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalAvgPool1d(NDArray)
* globalAvgPool1d } pooling function.
*
* @return the {@link LambdaBlock} that applies the {@link #globalAvgPool1d(NDArray)
* globalAvgPool1d} pooling function
*/
public static Block globalAvgPool1dBlock() {
return LambdaBlock.singleton(Pool::globalAvgPool1d, "globalAvgPool1d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalAvgPool2d(NDArray)
* globalAvgPool2d } pooling function.
*
* @return the {@link LambdaBlock} that applies the {@link #globalAvgPool2d(NDArray)
* globalAvgPool2d} pooling function
*/
public static Block globalAvgPool2dBlock() {
return LambdaBlock.singleton(Pool::globalAvgPool2d, "globalAvgPool2d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalAvgPool3d(NDArray)
* globalAvgPool3d } pooling function.
*
* @return the {@link LambdaBlock} that applies the {@link #globalAvgPool3d(NDArray)
* globalAvgPool3d} pooling function
*/
public static Block globalAvgPool3dBlock() {
return LambdaBlock.singleton(Pool::globalAvgPool3d, "globalAvgPool3d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool1d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool1dBlock} pooling function in its forward function.
*
* @param normType integer indicating pValue
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding padding of pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #lpPool1d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool1dBlock} activation function
*/
public static Block lpPool1dBlock(
float normType, Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return LambdaBlock.singleton(
array -> lpPool1d(array, normType, kernelShape, stride, padding, ceilMode),
"lpPool1d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool1d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool1dBlock} pooling function in its forward function.
*
* @param normType integer indicating pValue
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding padding of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #lpPool1d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool1dBlock} activation function
*/
public static Block lpPool1dBlock(
float normType, Shape kernelShape, Shape stride, Shape padding) {
return lpPool1dBlock(normType, kernelShape, stride, padding, false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool1d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool1dBlock} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #lpPool1d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool1dBlock} activation function
*/
public static Block lpPool1dBlock(float normType, Shape kernelShape) {
return lpPool1dBlock(normType, kernelShape, new Shape(1), new Shape(0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool2d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool2dBlock} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #lpPool2d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool2dBlock} activation function
*/
public static Block lpPool2dBlock(
float normType, Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return LambdaBlock.singleton(
array -> lpPool2d(array, normType, kernelShape, stride, padding, ceilMode),
"lpPool2d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool2d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool2dBlock} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @return the {@link LambdaBlock} that applies the {@link #lpPool2d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool2dBlock} activation function
*/
public static Block lpPool2dBlock(
float normType, Shape kernelShape, Shape stride, Shape padding) {
return lpPool2dBlock(normType, kernelShape, stride, padding, false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool2d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool2dBlock} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #lpPool2d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool2dBlock} activation function
*/
public static Block lpPool2dBlock(float normType, Shape kernelShape, Shape stride) {
return lpPool2dBlock(normType, kernelShape, stride, new Shape(0, 0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool2d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool2dBlock} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #lpPool2d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool2dBlock} activation function
*/
public static Block lpPool2dBlock(float normType, Shape kernelShape) {
return lpPool2dBlock(normType, kernelShape, new Shape(1, 1), new Shape(0, 0));
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool3d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool3dBlock} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @param ceilMode when true, will use ceil instead of floor in the formula to compute the
* output shape. The formula is {@code f(x, k, p, s) = floor((x+2*p-k)/s)+1}.
* @return the {@link LambdaBlock} that applies the {@link #lpPool3d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool3dBlock} activation function
*/
public static Block lpPool3dBlock(
float normType, Shape kernelShape, Shape stride, Shape padding, boolean ceilMode) {
return LambdaBlock.singleton(
array -> lpPool3d(array, normType, kernelShape, stride, padding, ceilMode),
"lpPool3d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool3d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool3dBlock} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @param padding pad of the pooling layer
* @return the {@link LambdaBlock} that applies the {@link #lpPool3d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool3dBlock} activation function
*/
public static Block lpPool3dBlock(
float normType, Shape kernelShape, Shape stride, Shape padding) {
return lpPool3dBlock(normType, kernelShape, stride, padding, false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool3d(NDArray, float, Shape, Shape,
* Shape, boolean) LpPoo3D} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @param stride stride of pooling layer
* @return the {@link LambdaBlock} that applies the {@link #lpPool3d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool3dBlock} activation function
*/
public static Block lpPool3dBlock(float normType, Shape kernelShape, Shape stride) {
return lpPool3dBlock(normType, kernelShape, stride, new Shape(0, 0, 0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #lpPool3d(NDArray, float, Shape, Shape,
* Shape, boolean) lpPool3dBlock} pooling function in its forward function.
*
* @param normType float value indicating norm
* @param kernelShape the shape of the kernel to be used
* @return the {@link LambdaBlock} that applies the {@link #lpPool3d(NDArray, float, Shape,
* Shape, Shape, boolean) lpPool3dBlock} activation function
*/
public static Block lpPool3dBlock(float normType, Shape kernelShape) {
return lpPool3dBlock(normType, kernelShape, kernelShape, new Shape(0, 0, 0), false);
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalLpPool1d(NDArray, float)
* globalLpPool1d } pooling function.
*
* @param normType float value indicating norm
* @return the {@link LambdaBlock} that applies the {@link #globalLpPool1d(NDArray, float)
* globalLpPool1d} pooling function
*/
public static Block globalLpPool1dBlock(float normType) {
return LambdaBlock.singleton(array -> globalLpPool1d(array, normType), "globalLpPool1d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalLpPool2d(NDArray, float)
* globalLpPool2d } pooling function.
*
* @param normType float value indicating norm
* @return the {@link LambdaBlock} that applies the {@link #globalLpPool2d(NDArray, float)
* globalLpPool2d} pooling function
*/
public static Block globalLpPool2dBlock(float normType) {
return LambdaBlock.singleton(array -> globalLpPool2d(array, normType), "globalLpPool2d");
}
/**
* Creates a {@link LambdaBlock} that applies the {@link #globalLpPool3d(NDArray, float)
* globalLpPool3d } pooling function.
*
* @param normType float value indicating norm
* @return the {@link LambdaBlock} that applies the {@link #globalLpPool3d(NDArray, float)
* globalLpPool3d} pooling function
*/
public static Block globalLpPool3dBlock(float normType) {
return LambdaBlock.singleton(array -> globalLpPool3d(array, normType), "globalLpPool3d");
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/pooling/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains pooling neural network operations in {@link ai.djl.nn.pooling.Pool} and helpers for it.
*/
package ai.djl.nn.pooling;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/recurrent/GRU.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.recurrent;
import ai.djl.Device;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import ai.djl.util.Preconditions;
/**
* {@code GRU} is an abstract implementation of recurrent neural networks which applies GRU (Gated
* Recurrent Unit) recurrent layer to input.
*
* <p>Current implementation refers the [paper](http://arxiv.org/abs/1406.1078) - Gated Recurrent
* Unit. The definition of GRU here is slightly different from the paper but compatible with CUDNN.
*
* <p>The GRU operator is formulated as below:
*
* <p>$$ \begin{split}\begin{array}{ll} r_t = \mathrm{sigmoid}(W_{ir} x_t + b_{ir} + W_{hr}
* h_{(t-1)} + b_{hr}) \\ z_t = \mathrm{sigmoid}(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
* n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ h_t = (1 - z_t) * n_t +
* z_t * h_{(t-1)} \\ \end{array}\end{split} $$
*/
public class GRU extends RecurrentBlock {
GRU(Builder builder) {
super(builder);
gates = 3;
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArrayEx ex = inputs.head().getNDArrayInternal();
Device device = inputs.head().getDevice();
NDList gruParams = new NDList();
for (Parameter parameter : parameters.values()) {
gruParams.add(parameterStore.getValue(parameter, device, training));
}
NDArray input = inputs.head();
if (inputs.size() == 1) {
int batchIndex = batchFirst ? 0 : 1;
inputs.add(
input.getManager()
.zeros(
new Shape(
(long) numLayers * getNumDirections(),
input.size(batchIndex),
stateSize)));
}
NDList outputs =
ex.gru(
input,
inputs.get(1),
gruParams,
hasBiases,
numLayers,
dropRate,
training,
bidirectional,
batchFirst);
if (returnState) {
return outputs;
}
outputs.stream().skip(1).forEach(NDArray::close);
return new NDList(outputs.get(0));
}
/**
* Creates a builder to build a {@link GRU}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link GRU} type of {@link Block}. */
public static final class Builder extends BaseBuilder<Builder> {
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Builds a {@link GRU} block.
*
* @return the {@link GRU} block
*/
public GRU build() {
Preconditions.checkArgument(
stateSize > 0 && numLayers > 0, "Must set stateSize and numStackedLayers");
return new GRU(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/recurrent/LSTM.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.recurrent;
import ai.djl.Device;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import ai.djl.util.Preconditions;
/**
* {@code LSTM} is an implementation of recurrent neural networks which applies Long Short-Term
* Memory recurrent layer to input.
*
* <p>Reference paper - LONG SHORT-TERM MEMORY - Hochreiter, 1997.
* http://www.bioinf.jku.at/publications/older/2604.pdf
*
* <p>The LSTM operator is formulated as below:
*
* <p>$$ \begin{split}\begin{array}{ll} i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{hi}
* h_{(t-1)} + b_{hi}) \\ f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\
* g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\ o_t = \mathrm{sigmoid}(W_{io} x_t
* + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ c_t = f_t * c_{(t-1)} + i_t * g_t \\ h_t = o_t *
* \tanh(c_t) \end{array}\end{split} $$
*/
public class LSTM extends RecurrentBlock {
/**
* Creates an LSTM block.
*
* @param builder the builder used to create the RNN block
*/
LSTM(Builder builder) {
super(builder);
gates = 4;
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArrayEx ex = inputs.head().getNDArrayInternal();
Device device = inputs.head().getDevice();
NDList rnnParams = new NDList();
for (Parameter parameter : parameters.values()) {
rnnParams.add(parameterStore.getValue(parameter, device, training));
}
NDArray input = inputs.head();
if (inputs.size() == 1) {
int batchIndex = batchFirst ? 0 : 1;
Shape stateShape =
new Shape(
(long) numLayers * getNumDirections(),
input.size(batchIndex),
stateSize);
// hidden state
inputs.add(input.getManager().zeros(stateShape));
// cell
inputs.add(input.getManager().zeros(stateShape));
}
if (inputs.size() == 2) {
int batchIndex = batchFirst ? 0 : 1;
Shape stateShape =
new Shape(
(long) numLayers * getNumDirections(),
input.size(batchIndex),
stateSize);
// cell
inputs.add(input.getManager().zeros(stateShape));
}
NDList outputs =
ex.lstm(
input,
new NDList(inputs.get(1), inputs.get(2)),
rnnParams,
hasBiases,
numLayers,
dropRate,
training,
bidirectional,
batchFirst);
if (returnState) {
return outputs;
}
outputs.stream().skip(1).forEach(NDArray::close);
return new NDList(outputs.get(0));
}
/**
* Creates a builder to build a {@link LSTM}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link LSTM} type of {@link Block}. */
public static final class Builder extends BaseBuilder<Builder> {
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Builds a {@link LSTM} block.
*
* @return the {@link LSTM} block
*/
public LSTM build() {
Preconditions.checkArgument(
stateSize > 0 && numLayers > 0, "Must set stateSize and numStackedLayers");
return new LSTM(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/recurrent/RNN.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.recurrent;
import ai.djl.Device;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.internal.NDArrayEx;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import ai.djl.util.Preconditions;
/**
* {@code RNN} is an implementation of recurrent neural networks which applies a single-gate
* recurrent layer to input. Two kinds of activation function are supported: ReLU and Tanh.
*
* <p>Current implementation refers the [paper](https://crl.ucsd.edu/~elman/Papers/fsit.pdf),
* Finding structure in time - Elman, 1988.
*
* <p>The RNN operator is formulated as below:
*
* <p>With ReLU activation function: \(h_t = relu(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} +
* b_{hh})\)
*
* <p>With Tanh activation function: \(h_t = \tanh(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} +
* b_{hh})\)
*/
public class RNN extends RecurrentBlock {
private Activation activation;
/**
* Creates a vanilla RNN block.
*
* @param builder the builder used to create the RNN block
*/
RNN(Builder builder) {
super(builder);
activation = builder.activation;
gates = 1;
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
NDArrayEx ex = inputs.head().getNDArrayInternal();
Device device = inputs.head().getDevice();
NDList rnnParams = new NDList();
for (Parameter parameter : parameters.values()) {
rnnParams.add(parameterStore.getValue(parameter, device, training));
}
NDArray input = inputs.head();
if (inputs.size() == 1) {
int batchIndex = batchFirst ? 0 : 1;
inputs.add(
input.getManager()
.zeros(
new Shape(
(long) numLayers * getNumDirections(),
input.size(batchIndex),
stateSize)));
}
NDList outputs =
ex.rnn(
input,
inputs.get(1),
rnnParams,
hasBiases,
numLayers,
activation,
dropRate,
training,
bidirectional,
batchFirst);
if (returnState) {
return outputs;
}
outputs.stream().skip(1).forEach(NDArray::close);
return new NDList(outputs.get(0));
}
/**
* Creates a builder to build a {@link RNN}.
*
* @return a new builder
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link RNN} type of {@link Block}. */
public static final class Builder extends BaseBuilder<Builder> {
/** {@inheritDoc} */
@Override
protected Builder self() {
return this;
}
/**
* Sets the activation for the RNN - ReLu or Tanh.
*
* @param activation the activation
* @return this Builder
*/
public Builder setActivation(RNN.Activation activation) {
this.activation = activation;
return self();
}
/**
* Builds a {@link RNN} block.
*
* @return the {@link RNN} block
*/
public RNN build() {
Preconditions.checkArgument(
stateSize > 0 && numLayers > 0, "Must set stateSize and numLayers");
return new RNN(this);
}
}
/** An enum that enumerates the type of activation. */
public enum Activation {
RELU,
TANH
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/recurrent/RecurrentBlock.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.recurrent;
import ai.djl.MalformedModelException;
import ai.djl.ndarray.types.LayoutType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.nn.ParameterList;
import ai.djl.util.Pair;
import java.io.DataInputStream;
import java.io.IOException;
/**
* {@code RecurrentBlock} is an abstract implementation of recurrent neural networks.
*
* <p>Recurrent neural networks are neural networks with hidden states. They are very popular for
* natural language processing tasks, and other tasks which involve sequential data.
*
* <p>This [article](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) written by Andrej
* Karpathy provides a detailed explanation of recurrent neural networks.
*
* <p>Currently, vanilla RNN, LSTM and GRU are implemented, with both multi-layer and bidirectional
* support.
*/
public abstract class RecurrentBlock extends AbstractBlock {
private static final byte VERSION = 2;
private static final LayoutType[] EXPECTED_LAYOUT = {
LayoutType.BATCH, LayoutType.TIME, LayoutType.CHANNEL
};
protected long stateSize;
protected float dropRate;
protected int numLayers;
protected int gates;
protected boolean batchFirst;
protected boolean hasBiases;
protected boolean bidirectional;
protected boolean returnState;
/**
* Creates a {@code RecurrentBlock} object.
*
* @param builder the {@code Builder} that has the necessary configurations
*/
@SuppressWarnings("this-escape")
public RecurrentBlock(BaseBuilder<?> builder) {
super(VERSION);
stateSize = builder.stateSize;
dropRate = builder.dropRate;
numLayers = builder.numLayers;
batchFirst = builder.batchFirst;
hasBiases = builder.hasBiases;
bidirectional = builder.bidirectional;
returnState = builder.returnState;
Parameter.Type[] parameterTypes =
hasBiases
? new Parameter.Type[] {Parameter.Type.WEIGHT, Parameter.Type.BIAS}
: new Parameter.Type[] {Parameter.Type.WEIGHT};
String[] directions = {"l"};
if (builder.bidirectional) {
directions = new String[] {"l", "r"};
}
String[] gateStrings = {"i2h", "h2h"};
for (int i = 0; i < numLayers; i++) {
for (Parameter.Type parameterType : parameterTypes) {
for (String direction : directions) {
for (String gateString : gateStrings) {
String name =
direction + '_' + i + '_' + gateString + '_' + parameterType.name();
addParameter(
Parameter.builder().setName(name).setType(parameterType).build());
}
}
}
}
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputs) {
Shape inputShape = inputs[0];
Shape outputShape =
new Shape(inputShape.get(0), inputShape.get(1), stateSize * getNumDirections());
if (!returnState) {
return new Shape[] {
outputShape,
};
}
return new Shape[] {
outputShape,
new Shape(
(long) numLayers * getNumDirections(),
inputShape.get((batchFirst) ? 0 : 1),
stateSize)
};
}
/** {@inheritDoc} */
@Override
protected void beforeInitialize(Shape... inputShapes) {
super.beforeInitialize(inputShapes);
Block.validateLayout(EXPECTED_LAYOUT, inputShapes[0].getLayout());
}
/** {@inheritDoc} */
@Override
public void prepare(Shape[] inputs) {
Shape inputShape = inputs[0];
ParameterList parameters = getDirectParameters();
for (Pair<String, Parameter> pair : parameters) {
String name = pair.getKey();
Parameter parameter = pair.getValue();
int layer = Integer.parseInt(name.split("_")[1]);
long inputSize = inputShape.get(2);
if (layer > 0) {
inputSize = stateSize * getNumDirections();
}
if (name.contains("BIAS")) {
parameter.setShape(new Shape(gates * stateSize));
} else if (name.contains("i2h")) {
parameter.setShape(new Shape(gates * stateSize, inputSize));
} else if (name.contains("h2h")) {
parameter.setShape(new Shape(gates * stateSize, stateSize));
} else {
throw new IllegalArgumentException("Invalid parameter name");
}
}
}
/** {@inheritDoc} */
@Override
public void loadMetadata(byte loadVersion, DataInputStream is)
throws IOException, MalformedModelException {
if (loadVersion == version) {
readInputShapes(is);
} else if (loadVersion != 1) {
throw new MalformedModelException("Unsupported encoding version: " + loadVersion);
}
}
protected int getNumDirections() {
return bidirectional ? 2 : 1;
}
/** The Builder to construct a {@link RecurrentBlock} type of {@link ai.djl.nn.Block}. */
@SuppressWarnings("rawtypes")
public abstract static class BaseBuilder<T extends BaseBuilder> {
protected float dropRate;
protected long stateSize;
protected int numLayers;
// set it true by default for usability
protected boolean batchFirst = true;
protected boolean hasBiases = true;
protected boolean bidirectional;
protected boolean returnState;
protected RNN.Activation activation;
/**
* Sets the drop rate of the dropout on the outputs of each RNN layer, except the last
* layer.
*
* @param dropRate the drop rate of the dropout
* @return this Builder
*/
public T optDropRate(float dropRate) {
this.dropRate = dropRate;
return self();
}
/**
* Sets the <b>Required</b> size of the state for each layer.
*
* @param stateSize the size of the state for each layer
* @return this Builder
*/
public T setStateSize(int stateSize) {
this.stateSize = stateSize;
return self();
}
/**
* Sets the <b>Required</b> number of stacked layers.
*
* @param numLayers the number of stacked layers
* @return this Builder
*/
public T setNumLayers(int numLayers) {
this.numLayers = numLayers;
return self();
}
/**
* Sets the optional parameter that indicates whether to use bidirectional recurrent layers.
*
* @param useBidirectional whether to use bidirectional recurrent layers
* @return this Builder
*/
public T optBidirectional(boolean useBidirectional) {
this.bidirectional = useBidirectional;
return self();
}
/**
* Sets the optional batchFirst flag that indicates whether the input is batch major or not.
* The default value is true.
*
* @param batchFirst whether the input is batch major or not
* @return this Builder
*/
public T optBatchFirst(boolean batchFirst) {
this.batchFirst = batchFirst;
return self();
}
/**
* Sets the optional biases flag that indicates whether to use biases or not.
*
* @param hasBiases whether to use biases or not
* @return this Builder
*/
public T optHasBiases(boolean hasBiases) {
this.hasBiases = hasBiases;
return self();
}
/**
* Sets the optional flag that indicates whether to return state or not. This is typically
* useful when you use RecurrentBlock in Sequential block. The default value is false.
*
* @param returnState whether to return state or not
* @return this Builder
*/
public T optReturnState(boolean returnState) {
this.returnState = returnState;
return self();
}
protected abstract T self();
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/recurrent/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* Contains classes for recurrent neural network operations.
*
* @see ai.djl.nn.recurrent.RecurrentBlock
*/
package ai.djl.nn.recurrent;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/BertBlock.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.index.NDIndex;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Activation;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.nn.core.Linear;
import ai.djl.nn.norm.BatchNorm;
import ai.djl.nn.norm.Dropout;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Implements the core bert model (without next sentence and masked language task) of bert.
*
* <p>This closely follows the original <a href="https://arxiv.org/abs/1810.04805">Devlin et.
* al.</a> paper and its reference implementation.
*/
// We name local variables for tensor dimensions as in the paper and the reference code.
// While against the general code style, it makes things much easier readable here.
@SuppressWarnings({
"LocalFinalVariableName",
"PMD.LocalVariableNamingConventions",
"ParameterName",
"PMD.FormalParameterNamingConventions"
})
public final class BertBlock extends AbstractBlock {
private static final byte VERSION = 1;
private static final String PARAM_POSITION_EMBEDDING = "positionEmbedding";
private int embeddingSize;
private int tokenDictionarySize;
private int typeDictionarySize;
private IdEmbedding tokenEmbedding;
private IdEmbedding typeEmbedding;
private Parameter positionEmebdding;
private BatchNorm embeddingNorm;
private Dropout embeddingDropout;
private List<TransformerEncoderBlock> transformerEncoderBlocks;
private Linear pooling;
private BertBlock(Builder builder) {
super(VERSION);
this.embeddingSize = builder.embeddingSize;
// embedding for the input tokens
this.tokenEmbedding =
addChildBlock(
"tokenEmbedding",
new IdEmbedding.Builder()
.setEmbeddingSize(builder.embeddingSize)
.setDictionarySize(builder.tokenDictionarySize)
.build());
this.tokenDictionarySize = builder.tokenDictionarySize;
// embedding for the position
this.positionEmebdding =
addParameter(
Parameter.builder()
.setName(PARAM_POSITION_EMBEDDING)
.setType(Parameter.Type.WEIGHT)
.optShape(
new Shape(builder.maxSequenceLength, builder.embeddingSize))
.build());
// embedding for the input types
this.typeEmbedding =
addChildBlock(
"typeEmbedding",
new IdEmbedding.Builder()
.setEmbeddingSize(builder.embeddingSize)
.setDictionarySize(builder.typeDictionarySize)
.build());
this.typeDictionarySize = builder.typeDictionarySize;
// normalizer for the embeddings
this.embeddingNorm = addChildBlock("embeddingNorm", BatchNorm.builder().optAxis(2).build());
// dropout to apply after embedding normalization
this.embeddingDropout =
addChildBlock(
"embeddingDropout",
Dropout.builder().optRate(builder.hiddenDropoutProbability).build());
// the transformer blocks
this.transformerEncoderBlocks = new ArrayList<>(builder.transformerBlockCount);
for (int i = 0; i < builder.transformerBlockCount; ++i) {
this.transformerEncoderBlocks.add(
addChildBlock(
"transformer_" + i,
new TransformerEncoderBlock(
builder.embeddingSize,
builder.attentionHeadCount,
builder.hiddenSize,
0.1f,
Activation::gelu)));
}
// add projection for pooling layer
this.pooling =
addChildBlock(
"poolingProjection",
Linear.builder().setUnits(builder.embeddingSize).optBias(true).build());
}
/**
* Returns the token embedding used by this Bert model.
*
* @return the token embedding used by this Bert model
*/
public IdEmbedding getTokenEmbedding() {
return this.tokenEmbedding;
}
/**
* Returns the embedding size used for tokens.
*
* @return the embedding size used for tokens
*/
public int getEmbeddingSize() {
return embeddingSize;
}
/**
* Returns the size of the token dictionary.
*
* @return the size of the token dictionary
*/
public int getTokenDictionarySize() {
return tokenDictionarySize;
}
/**
* Returns the size of the type dictionary.
*
* @return the size of the type dictionary
*/
public int getTypeDictionarySize() {
return typeDictionarySize;
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
long batch = inputShapes[0].get(0);
long seqLength = inputShapes[0].get(1);
return new Shape[] {
new Shape(batch, seqLength, embeddingSize), new Shape(batch, embeddingSize)
};
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
super.beforeInitialize(inputShapes);
inputNames = Arrays.asList("tokenIds", "typeIds", "masks");
Shape[] tokenShape = {inputShapes[0]};
Shape[] typeShape = {inputShapes[1]};
this.tokenEmbedding.initialize(manager, dataType, tokenShape);
Shape[] embeddingOutput = this.tokenEmbedding.getOutputShapes(tokenShape);
this.typeEmbedding.initialize(manager, dataType, typeShape);
this.embeddingNorm.initialize(manager, dataType, embeddingOutput);
this.embeddingDropout.initialize(manager, dataType, embeddingOutput);
for (final TransformerEncoderBlock tb : transformerEncoderBlocks) {
tb.initialize(manager, dataType, embeddingOutput);
}
long batchSize = inputShapes[0].get(0);
this.pooling.initialize(manager, dataType, new Shape(batchSize, embeddingSize));
}
/**
* Creates a 3D attention mask from a 2D tensor mask.
*
* @param ids 2D Tensor of shape (B, F)
* @param mask 2D Tensor of shape (B, T)
* @return float tensor of shape (B, F, T)
*/
public static NDArray createAttentionMaskFromInputMask(NDArray ids, NDArray mask) {
long batchSize = ids.getShape().get(0);
long fromSeqLength = ids.getShape().get(1);
long toSeqLength = mask.getShape().get(1);
// we ignore the actual content of the ids, we just create a "pseudo-mask" of ones for them
NDArray broadcastOnes =
ids.onesLike().toType(DataType.FLOAT32, false).reshape(batchSize, fromSeqLength, 1);
// add empty dimension to multiply with broadcasted ones
NDArray mask3D = mask.toType(DataType.FLOAT32, false).reshape(batchSize, 1, toSeqLength);
return broadcastOnes.matMul(mask3D);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore ps, NDList inputs, boolean training, PairList<String, Object> params) {
// First input are the tokens.
NDArray tokenIds = inputs.get(0);
// Second are the token types (first / second sentence).
NDArray typeIds = inputs.get(1);
// Third are the masks for the input
NDArray masks = inputs.get(2);
NDManager initScope = NDManager.subManagerOf(tokenIds);
initScope.tempAttachAll(inputs);
// Create embeddings for inputs
NDArray embeddedTokens =
tokenEmbedding.forward(ps, new NDList(tokenIds), training).singletonOrThrow();
NDArray embeddedTypes =
typeEmbedding.forward(ps, new NDList(typeIds), training).singletonOrThrow();
NDArray embeddedPositions = ps.getValue(positionEmebdding, tokenIds.getDevice(), training);
// Merge them to one embedding by adding them
// (We can just add the position embedding, even though it does not have a batch dimension:
// the tensor is automagically "broadcast" i.e. repeated in the batch dimension. That
// gives us the result we want: every embedding gets the same position embedding added
// to it)
NDArray embedding = embeddedTokens.add(embeddedTypes).add(embeddedPositions);
// Apply normalization
NDList normalizedEmbedding = embeddingNorm.forward(ps, new NDList(embedding), training);
NDList dropoutEmbedding = embeddingDropout.forward(ps, normalizedEmbedding, training);
// create 3D attention mask
NDArray attentionMask = createAttentionMaskFromInputMask(tokenIds, masks);
Shape maskShape = attentionMask.getShape();
NDArray offsetMask =
attentionMask
.reshape(maskShape.get(0), 1, maskShape.get(1), maskShape.get(2))
.toType(DataType.FLOAT32, false)
.mul(-1f) // turn 1 into -1
.add(1f) // turn 0s to 1s, -1s to 0s
.mul(-100000f); // turn 1s (original 0s) into -100000
// Run through all transformer blocks
NDList lastOutput = dropoutEmbedding;
initScope.ret(lastOutput);
initScope.ret(offsetMask);
initScope.close();
for (final TransformerEncoderBlock block : transformerEncoderBlocks) {
NDList input = new NDList(lastOutput.head(), offsetMask);
try (NDManager innerScope = NDManager.subManagerOf(input)) {
innerScope.tempAttachAll(input);
lastOutput = innerScope.ret(block.forward(ps, input, training));
}
}
// We also return the pooled output - this is an additional fully connected layer
// only applied to the first token, assumed to be the CLS token to be used for training
// classifiers. shape = (B, E) We apply a tanh activation to it.
NDArray firstToken = lastOutput.head().get(new NDIndex(":,1,:")).squeeze();
NDArray pooledFirstToken =
pooling.forward(ps, new NDList(firstToken), training).head().tanh();
lastOutput.add(pooledFirstToken);
return lastOutput;
}
/**
* Returns a new BertBlock builder.
*
* @return a new BertBlock builder.
*/
public static Builder builder() {
return new Builder();
}
/** The Builder to construct a {@link BertBlock} type of {@link Block}. */
public static final class Builder {
int tokenDictionarySize;
int typeDictionarySize = 16;
int embeddingSize = 768;
int transformerBlockCount = 12;
int attentionHeadCount = 12;
int hiddenSize = 4 * embeddingSize;
float hiddenDropoutProbability = 0.1f;
// float attentionDropoutProbability = 0.1f;
int maxSequenceLength = 512;
// float initializerRange = 0.02f;
private Builder() {}
/**
* Sets the number of tokens in the dictionary.
*
* @param tokenDictionarySize the number of tokens in the dictionary
* @return this builder
*/
public Builder setTokenDictionarySize(int tokenDictionarySize) {
this.tokenDictionarySize = tokenDictionarySize;
return this;
}
/**
* Sets the number of possible token types. This should be a very small number (2-16).
*
* @param typeDictionarySize the number of possible token types. This should be a very small
* number (2-16)
* @return this builder
*/
public Builder optTypeDictionarySize(int typeDictionarySize) {
this.typeDictionarySize = typeDictionarySize;
return this;
}
/**
* Sets the embedding size to use for input tokens. This size must be divisible by the
* number of attention heads.
*
* @param embeddingSize the embedding size to use for input tokens.
* @return this builder
*/
public Builder optEmbeddingSize(int embeddingSize) {
this.embeddingSize = embeddingSize;
return this;
}
/**
* Sets the number of transformer blocks to use.
*
* @param transformerBlockCount the number of transformer blocks to use
* @return this builder
*/
public Builder optTransformerBlockCount(int transformerBlockCount) {
this.transformerBlockCount = transformerBlockCount;
return this;
}
/**
* Sets the number of attention heads to use in each transformer block. This number must
* divide the embedding size without rest.
*
* @param attentionHeadCount the number of attention heads to use in each transformer block.
* @return this builder
*/
public Builder optAttentionHeadCount(int attentionHeadCount) {
this.attentionHeadCount = attentionHeadCount;
return this;
}
/**
* Sets the size of the hidden layers in the fully connected networks used.
*
* @param hiddenSize the size of the hidden layers in the fully connected networks used.
* @return this builder
*/
public Builder optHiddenSize(int hiddenSize) {
this.hiddenSize = hiddenSize;
return this;
}
/**
* Sets the dropout probabilty in the hidden fully connected networks.
*
* @param hiddenDropoutProbability the dropout probabilty in the hidden fully connected
* networks.
* @return this builder
*/
public Builder optHiddenDropoutProbability(float hiddenDropoutProbability) {
this.hiddenDropoutProbability = hiddenDropoutProbability;
return this;
}
/**
* Sets the maximum sequence length this model can process. Memory and compute requirements
* of the attention mechanism is O(n²), so large values can easily exhaust your GPU memory!
*
* @param maxSequenceLength the maximum sequence length this model can process.
* @return this builder
*/
public Builder optMaxSequenceLength(int maxSequenceLength) {
this.maxSequenceLength = maxSequenceLength;
return this;
}
/**
* Tiny config for testing on laptops.
*
* @return this builder
*/
public Builder nano() {
typeDictionarySize = 2;
embeddingSize = 256;
transformerBlockCount = 4;
attentionHeadCount = 4;
hiddenSize = 4 * embeddingSize;
hiddenDropoutProbability = 0.1f;
// attentionDropoutProbability = 0.1f;
maxSequenceLength = 128;
// initializerRange = 0.02f;
return this;
}
/**
* Sets this builder's params to a minimal configuration that nevertheless performs quite
* well.
*
* @return this builder
*/
public Builder micro() {
typeDictionarySize = 2;
embeddingSize = 512;
transformerBlockCount = 12;
attentionHeadCount = 8;
hiddenSize = 4 * embeddingSize;
hiddenDropoutProbability = 0.1f;
// attentionDropoutProbability = 0.1f;
maxSequenceLength = 128;
// initializerRange = 0.02f;
return this;
}
/**
* Sets this builder's params to the BASE config of the original BERT paper. (except for the
* dictionary size)
*
* @return this builder
*/
public Builder base() {
typeDictionarySize = 16;
embeddingSize = 768;
transformerBlockCount = 12;
attentionHeadCount = 12;
hiddenSize = 4 * embeddingSize;
hiddenDropoutProbability = 0.1f;
// attentionDropoutProbability = 0.1f;
maxSequenceLength = 256;
// initializerRange = 0.02f;
return this;
}
/**
* Sets this builder's params to the LARGE config of the original BERT paper. (except for
* the dictionary size)
*
* @return this builder
*/
public Builder large() {
typeDictionarySize = 16;
embeddingSize = 1024;
transformerBlockCount = 24;
attentionHeadCount = 16;
hiddenSize = 4 * embeddingSize;
hiddenDropoutProbability = 0.1f;
// attentionDropoutProbability = 0.1f;
maxSequenceLength = 512;
// initializerRange = 0.02f;
return this;
}
/**
* Returns a new BertBlock with the parameters of this builder.
*
* @return a new BertBlock with the parameters of this builder.
*/
public BertBlock build() {
if (tokenDictionarySize == 0) {
throw new IllegalArgumentException("You must specify the dictionary size.");
}
return new BertBlock(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/BertMaskedLanguageModelBlock.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Parameter;
import ai.djl.nn.core.Linear;
import ai.djl.nn.norm.BatchNorm;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.Arrays;
import java.util.function.Function;
/** Block for the bert masked language task. */
public class BertMaskedLanguageModelBlock extends AbstractBlock {
private static final byte VERSION = 1;
private Linear sequenceProjection;
private BatchNorm sequenceNorm;
private Parameter dictionaryBias;
private Function<NDArray, NDArray> hiddenActivation;
/**
* Creates a new block that applies the masked language task.
*
* @param bertBlock the bert block to create the task for
* @param hiddenActivation the activation to use for the hidden layer
*/
@SuppressWarnings("this-escape")
public BertMaskedLanguageModelBlock(
BertBlock bertBlock, Function<NDArray, NDArray> hiddenActivation) {
super(VERSION);
this.sequenceProjection =
addChildBlock(
"sequenceProjection",
Linear.builder()
.setUnits(bertBlock.getEmbeddingSize())
.optBias(true)
.build());
this.sequenceNorm = addChildBlock("sequenceNorm", BatchNorm.builder().optAxis(1).build());
this.dictionaryBias =
addParameter(
Parameter.builder()
.setName("dictionaryBias")
.setType(Parameter.Type.BIAS)
.optShape(new Shape(bertBlock.getTokenDictionarySize()))
.build());
this.hiddenActivation = hiddenActivation;
}
/**
* Given a 3D array of shape (B, S, E) and a 2D array of shape (B, I) returns the flattened
* lookup result of shape (B * I * E).
*
* @param sequences Sequences of embeddings
* @param indices Indices into the sequences. The indices are relative within each sequence,
* i.e. [[0, 1],[0, 1]] would return the first two elements of two sequences.
* @return The flattened result of gathering elements from the sequences
*/
public static NDArray gatherFromIndices(NDArray sequences, NDArray indices) {
int batchSize = (int) sequences.getShape().get(0);
int sequenceLength = (int) sequences.getShape().get(1);
int width = (int) sequences.getShape().get(2);
int indicesPerSequence = (int) indices.getShape().get(1);
// this creates a list of offsets for each sequence. Say sequence length is 16 and
// batch size is 4, this creates [0, 16, 32, 48]. Each
NDArray sequenceOffsets =
indices.getManager()
.newSubManager(indices.getDevice())
.arange(0, batchSize) // [0, 1, 2, ..., batchSize - 1]
.mul(sequenceLength) // [0, 16, 32, ...]
.reshape(batchSize, 1); // [[0], [16], [32], ...]
// The following adds the sequence offsets to every index for every sequence.
// This works, because the single values in the sequence offsets are propagated
NDArray absoluteIndices =
indices.add(sequenceOffsets).reshape(1, (long) batchSize * indicesPerSequence);
// Now we create one long sequence by appending all sequences
NDArray flattenedSequences = sequences.reshape((long) batchSize * sequenceLength, width);
// We use the absolute indices to gather the elements of the flattened sequences
return flattenedSequences.gatherNd(absoluteIndices);
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
inputNames = Arrays.asList("sequence", "maskedIndices", "embeddingTable");
int width = (int) inputShapes[0].get(2);
sequenceProjection.initialize(manager, dataType, new Shape(-1, width));
sequenceNorm.initialize(manager, dataType, new Shape(-1, width));
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore ps, NDList inputs, boolean training, PairList<String, Object> params) {
NDArray sequenceOutput = inputs.get(0); // (B, S, E)
NDArray maskedIndices = inputs.get(1); // (B, I)
NDArray embeddingTable = inputs.get(2); // (D, E)
try (NDManager scope = NDManager.subManagerOf(sequenceOutput)) {
scope.tempAttachAll(sequenceOutput, maskedIndices);
NDArray gatheredTokens = gatherFromIndices(sequenceOutput, maskedIndices); // (B * I, E)
NDArray projectedTokens =
hiddenActivation.apply(
sequenceProjection
.forward(ps, new NDList(gatheredTokens), training)
.head()); // (B * I, E)
NDArray normalizedTokens =
sequenceNorm
.forward(ps, new NDList(projectedTokens), training)
.head(); // (B * I, E)
// raw logits for each position to correspond to an entry in the embedding table
NDArray embeddingTransposed = embeddingTable.transpose();
embeddingTransposed.attach(gatheredTokens.getManager());
NDArray logits = normalizedTokens.dot(embeddingTransposed); // (B * I, D)
// we add an offset for each dictionary entry
NDArray logitsWithBias =
logits.add(
ps.getValue(
dictionaryBias, logits.getDevice(), training)); // (B * I, D)
// now we apply log Softmax to get proper log probabilities
NDArray logProbs = logitsWithBias.logSoftmax(1); // (B * I, D)
return scope.ret(new NDList(logProbs));
}
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(final Shape[] inputShapes) {
int batchSize = (int) inputShapes[0].get(0);
int indexCount = (int) inputShapes[1].get(1);
int dictionarySize = (int) inputShapes[2].get(0);
return new Shape[] {new Shape((long) batchSize * indexCount, dictionarySize)};
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/BertMaskedLanguageModelLoss.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.training.loss.Loss;
/** The loss for the bert masked language model task. */
public class BertMaskedLanguageModelLoss extends Loss {
private int labelIdx;
private int maskIdx;
private int logProbsIdx;
/**
* Creates an MLM loss.
*
* @param labelIdx index of labels
* @param maskIdx index of mask
* @param logProbsIdx index of log probs
*/
public BertMaskedLanguageModelLoss(int labelIdx, int maskIdx, int logProbsIdx) {
super("BertMLLoss");
this.labelIdx = labelIdx;
this.maskIdx = maskIdx;
this.logProbsIdx = logProbsIdx;
}
/** {@inheritDoc} */
@Override
public NDArray evaluate(NDList labels, NDList predictions) {
try (NDManager scope = NDManager.subManagerOf(labels)) {
scope.tempAttachAll(labels, predictions);
NDArray logProbs = predictions.get(logProbsIdx); // (B * I, D)
int dictionarySize = (int) logProbs.getShape().get(1);
NDArray targetIds = labels.get(labelIdx).flatten(); // (B * I)
NDArray mask = labels.get(maskIdx).flatten().toType(DataType.FLOAT32, false); // (B * I)
NDArray targetOneHots = targetIds.oneHot(dictionarySize);
// Multiplying log_probs and one_hot_labels leaves the log probabilities of the correct
// entries.
// By summing we get the total predicition quality. We want to minimize the error,
// so we negate the value - as we have logarithms, probability = 1 means log(prob) = 0,
// the less sure we are the smaller the log value.
NDArray perExampleLoss = logProbs.mul(targetOneHots).sum(new int[] {1}).mul(-1);
// Multiplying log_probs and one_hot_labels leaves the log probabilities of the correct
// entries.
// By summing we get the total prediction quality.
NDArray numerator = perExampleLoss.mul(mask).sum();
// We normalize the loss by the actual number of predictions we had to make
NDArray denominator = mask.sum().add(1e-5f);
NDArray result = numerator.div(denominator);
return scope.ret(result);
}
}
/**
* Calculates the percentage of correctly predicted masked tokens.
*
* @param labels expected tokens and mask
* @param predictions prediction of a bert model
* @return the percentage of correctly predicted masked tokens
*/
public NDArray accuracy(NDList labels, NDList predictions) {
try (NDManager scope = NDManager.subManagerOf(labels)) {
scope.tempAttachAll(labels, predictions);
NDArray mask = labels.get(maskIdx).flatten(); // (B * I)
NDArray targetIds = labels.get(labelIdx).flatten(); // (B * I)
NDArray logProbs = predictions.get(logProbsIdx); // (B * I, D)
NDArray predictedIs = logProbs.argMax(1).toType(DataType.INT32, false); // (B * I)
NDArray equal = predictedIs.eq(targetIds).mul(mask);
NDArray equalCount = equal.sum().toType(DataType.FLOAT32, false);
NDArray count = mask.sum().toType(DataType.FLOAT32, false);
NDArray result = equalCount.div(count);
return scope.ret(result);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/BertNextSentenceBlock.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.core.Linear;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.Collections;
/** Block to perform the Bert next-sentence-prediction task. */
public class BertNextSentenceBlock extends AbstractBlock {
private Linear binaryClassifier;
/** Creates a next sentence block. */
@SuppressWarnings("this-escape")
public BertNextSentenceBlock() {
binaryClassifier =
addChildBlock(
"binaryClassifier", Linear.builder().setUnits(2).optBias(true).build());
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
this.inputNames = Collections.singletonList("pooledOutput");
this.binaryClassifier.initialize(manager, dataType, inputShapes);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore ps, NDList inputs, boolean training, PairList<String, Object> params) {
return new NDList(binaryClassifier.forward(ps, inputs, training).head().logSoftmax(1));
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[] {new Shape(inputShapes[0].get(0), 2)};
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/BertNextSentenceLoss.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.training.loss.Loss;
/** Calculates the loss for the next sentence prediction task. */
public class BertNextSentenceLoss extends Loss {
private int labelIdx;
private int nextSentencePredictionIdx;
/**
* Creates a new bert next sentence loss.
*
* @param labelIdx index of the next sentence labels
* @param nextSentencePredictionIdx index of the next sentence prediction in the bert output
*/
public BertNextSentenceLoss(int labelIdx, int nextSentencePredictionIdx) {
super("BertNSLoss");
this.labelIdx = labelIdx;
this.nextSentencePredictionIdx = nextSentencePredictionIdx;
}
/** {@inheritDoc} */
@Override
public NDArray evaluate(NDList labels, NDList predictions) {
try (NDManager scope = NDManager.subManagerOf(labels)) {
scope.tempAttachAll(labels, predictions);
NDArray label = labels.get(labelIdx).toType(DataType.FLOAT32, false);
// predictions are log(softmax)
NDArray logPredictions = predictions.get(nextSentencePredictionIdx);
NDArray oneHotLabels = label.oneHot(2);
// we use negative log likelihood as loss: log(softmax) turns high confidence into
// negative values near one, low confidence into negative values near -inf,
// negating gives almost 0 for high confidence and near +inf for very low confidence
NDArray logPredictionForLabels = oneHotLabels.mul(logPredictions);
NDArray summedPredictions = logPredictionForLabels.sum(new int[] {1});
NDArray perExampleLoss = summedPredictions.mul(-1f);
NDArray result = perExampleLoss.mean();
return scope.ret(result);
}
}
/**
* Calculates the fraction of correct predictions.
*
* @param labels the labels with the correct predictions
* @param predictions the bert pretraining model output
* @return the fraction of correct predictions.
*/
public NDArray accuracy(NDList labels, NDList predictions) {
try (NDManager scope = NDManager.subManagerOf(labels)) {
scope.tempAttachAll(labels, predictions);
NDArray label = labels.get(labelIdx);
NDArray predictionLogProbs = predictions.get(nextSentencePredictionIdx);
// predictions are log(softmax) -> highest confidence is highest (negative) value near 0
NDArray prediction = predictionLogProbs.argMax(1).toType(DataType.INT32, false);
NDArray equalCount = label.eq(prediction).sum().toType(DataType.FLOAT32, false);
NDArray result = equalCount.div(label.getShape().size());
return scope.ret(result);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/BertPretrainingBlock.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Activation;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.Arrays;
/** Creates a block that performs all bert pretraining tasks (next sentence and masked language). */
public class BertPretrainingBlock extends AbstractBlock {
private BertBlock bertBlock;
private BertMaskedLanguageModelBlock mlBlock;
private BertNextSentenceBlock nsBlock;
/**
* Creates a new Bert pretraining block fitting the given bert configuration.
*
* @param builder a builder with a bert configuration
*/
@SuppressWarnings("this-escape")
public BertPretrainingBlock(final BertBlock.Builder builder) {
this.bertBlock = addChildBlock("Bert", builder.build());
this.mlBlock =
addChildBlock(
"BertMaskedLanguageModelBlock",
new BertMaskedLanguageModelBlock(bertBlock, Activation::gelu));
this.nsBlock = addChildBlock("BertNextSentenceBlock", new BertNextSentenceBlock());
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
inputNames = Arrays.asList("tokenIds", "typeIds", "sequenceMasks", "maskedIndices");
bertBlock.initialize(manager, dataType, inputShapes);
Shape[] bertOutputShapes = bertBlock.getOutputShapes(inputShapes);
Shape embeddedSequence = bertOutputShapes[0];
Shape pooledOutput = bertOutputShapes[1];
Shape maskedIndices = inputShapes[2];
Shape embeddingTableShape =
new Shape(bertBlock.getTokenDictionarySize(), bertBlock.getEmbeddingSize());
mlBlock.initialize(manager, dataType, embeddedSequence, embeddingTableShape, maskedIndices);
nsBlock.initialize(manager, dataType, pooledOutput);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore ps, NDList inputs, boolean training, PairList<String, Object> params) {
NDArray tokenIds = inputs.get(0);
NDArray typeIds = inputs.get(1);
NDArray sequenceMasks = inputs.get(2);
NDArray maskedIndices = inputs.get(3);
try (NDManager scope = NDManager.subManagerOf(tokenIds)) {
scope.tempAttachAll(inputs);
// run the core bert model
NDList bertResult =
bertBlock.forward(ps, new NDList(tokenIds, typeIds, sequenceMasks), training);
NDArray embeddedSequence = bertResult.get(0);
NDArray pooledOutput = bertResult.get(1);
// apply pooled output to the classifier
NDArray nextSentenceProbabilities =
nsBlock.forward(ps, new NDList(pooledOutput), training).singletonOrThrow();
// de-mask masked tokens
NDArray embeddingTable =
bertBlock
.getTokenEmbedding()
.getValue(ps, embeddedSequence.getDevice(), training);
NDArray logProbs =
mlBlock.forward(
ps,
new NDList(embeddedSequence, maskedIndices, embeddingTable),
training)
.singletonOrThrow();
// return the next sentence & masked language result to apply the loss to
return scope.ret(new NDList(nextSentenceProbabilities, logProbs));
}
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
long batchSize = inputShapes[0].get(0);
long maskedIndexCount = inputShapes[3].get(1);
return new Shape[] {
new Shape(batchSize, 2),
new Shape(batchSize, maskedIndexCount, bertBlock.getTokenDictionarySize())
};
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/BertPretrainingLoss.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDList;
import ai.djl.training.loss.AbstractCompositeLoss;
import ai.djl.util.Pair;
import java.util.Arrays;
/** Loss that combines the next sentence and masked language losses of bert pretraining. */
public class BertPretrainingLoss extends AbstractCompositeLoss {
private BertNextSentenceLoss bertNextSentenceLoss = new BertNextSentenceLoss(0, 0);
private BertMaskedLanguageModelLoss bertMaskedLanguageModelLoss =
new BertMaskedLanguageModelLoss(1, 2, 1);
/** Creates a loss combining the next sentence and masked language loss for bert pretraining. */
public BertPretrainingLoss() {
super(BertPretrainingLoss.class.getSimpleName());
this.components = Arrays.asList(bertNextSentenceLoss, bertMaskedLanguageModelLoss);
}
@Override
protected Pair<NDList, NDList> inputForComponent(
int componentIndex, NDList labels, NDList predictions) {
return new Pair<>(labels, predictions);
}
/**
* gets BertNextSentenceLoss.
*
* @return BertNextSentenceLoss
*/
public BertNextSentenceLoss getBertNextSentenceLoss() {
return bertNextSentenceLoss;
}
/**
* gets BertMaskedLanguageModelLoss.
*
* @return BertMaskedLanguageModelLoss
*/
public BertMaskedLanguageModelLoss getBertMaskedLanguageModelLoss() {
return bertMaskedLanguageModelLoss;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/IdEmbedding.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.Device;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.Parameter;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.Collections;
/**
* An Embedding from integer ids to float vectors. Output shape is the input shape + one dimension
* for the embedding. E.g. If input shape is (-1, 128), embedding size is 1024, then the output
* shape is (-1, 128, 1024)
*/
public final class IdEmbedding extends AbstractBlock {
private static final String EMBEDDING_PARAM_NAME = "embedding";
private int dictionarySize;
private int embeddingSize;
private Parameter embedding;
private IdEmbedding(Builder builder) {
this.dictionarySize = builder.dictionarySize;
this.embeddingSize = builder.embeddingSize;
this.embedding =
addParameter(
Parameter.builder()
.setName(EMBEDDING_PARAM_NAME)
.setType(Parameter.Type.WEIGHT)
.optShape(new Shape(dictionarySize, embeddingSize))
.build());
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return new Shape[] {inputShapes[0].addAll(new Shape(embeddingSize))};
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore ps, NDList inputs, boolean training, PairList<String, Object> params) {
NDArray input = inputs.singletonOrThrow();
try (NDManager scope = NDManager.subManagerOf(input)) {
// on info to the right shapes, see: http://beta.mxnet.io/r/api/mx.symbol.gather_nd.html
NDArray ids = input.flatten().reshape(1, input.getShape().size());
// create the embedding Table
NDArray embeddingTable = ps.getValue(embedding, ids.getDevice(), training);
scope.tempAttachAll(embeddingTable);
// We do not perform a sparse lookup, instead we just project into the table
NDArray result = embeddingTable.gatherNd(ids);
result.attach(inputs.getManager());
// we want the original shape of the input + the last dimension of the embedding
Shape targetShape =
input.getShape().addAll(new Shape(embeddingTable.getShape().get(1)));
return new NDList(result.reshape(targetShape));
}
}
/**
* Turns an array of embeddings of shape (d0 ... dN, E) into an array of log probabilities of
* shape (d0 ... dN, D) that shows the probability distribution that a given embedding
* corresponds to an entry in the internal embedding table.
*
* @param parameterStore the parameters store
* @param input the embeddings to create log probabilities for
* @param training true for a training forward pass
* @return log probabilities for each embedding
*/
public NDArray probabilities(ParameterStore parameterStore, NDArray input, boolean training) {
// reshape input into a matrix
NDArray asMatrix = input.reshape(-1, embeddingSize);
// get embedding table
NDArray embeddingTableTransposed =
parameterStore.getValue(embedding, input.getDevice(), training).transpose();
embeddingTableTransposed.attach(input.getManager());
// Create raw logits by taking the scalar product of the tokens and the embedding table
NDArray logitsFlat = asMatrix.dot(embeddingTableTransposed);
// turn the logits int negative log probabilities
NDArray logProbsFlat = logitsFlat.logSoftmax(1);
// turn probs back into original shape
Shape targetShape =
input.getShape()
.slice(0, input.getShape().dimension() - 1)
.addAll(new Shape(dictionarySize));
return logProbsFlat.reshape(targetShape);
}
/**
* Quick hack for bert model to acces embedding table, replace by a proper function to calculate
* raw logits from embeddings. TODO: replace by function to get logits
*
* @param ps the parameter store
* @param device device to get internal table for
* @param training true for a training forward pass
* @return this embedding table as an array on the given device
*/
public NDArray getValue(ParameterStore ps, Device device, boolean training) {
return ps.getValue(embedding, device, training);
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
inputNames = Collections.singletonList("tokenIds");
// nothing else to do, we have no child blocks
}
/** The Builder to construct an {@link IdEmbedding} type of {@link Block}. */
public static final class Builder {
private int dictionarySize;
private int embeddingSize;
/**
* Sets the number of ids that should be embedded. Valid ids are 0 to dictionarySize - 1.
*
* @param dictionarySize the number of ids that should be embedded. Valid ids are 0 to
* dictionarySize - 1.
* @return this builder
*/
public Builder setDictionarySize(final int dictionarySize) {
this.dictionarySize = dictionarySize;
return this;
}
/**
* Sets the size of the embeddings.
*
* @param embeddingSize the size of the embeddings.
* @return this builder
*/
public Builder setEmbeddingSize(final int embeddingSize) {
this.embeddingSize = embeddingSize;
return this;
}
/**
* Builds the {@link IdEmbedding}.
*
* @return the constructed {@code IdEmbedding}
* @throws IllegalArgumentException if all required parameters (items, embeddingSize) have
* not been set
*/
public IdEmbedding build() {
if (dictionarySize <= 0) {
throw new IllegalArgumentException(
"You must specify the dictionary Size for the embedding.");
}
if (embeddingSize == 0) {
throw new IllegalArgumentException("You must specify the embedding size");
}
return new IdEmbedding(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/PointwiseFeedForwardBlock.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDList;
import ai.djl.nn.LambdaBlock;
import ai.djl.nn.SequentialBlock;
import ai.djl.nn.core.Linear;
import java.util.List;
import java.util.function.Function;
/** Fully connected Feed-Forward network, only applied to the last dimension of the input. */
public class PointwiseFeedForwardBlock extends SequentialBlock {
/**
* Creates a pointwise feed-forward block.
*
* @param hiddenSizes the sizes of the hidden layers
* @param outputSize the output size
* @param activationFunction the activation function to use for the hidden layers (not applied
* to output)
*/
@SuppressWarnings("this-escape")
public PointwiseFeedForwardBlock(
List<Integer> hiddenSizes,
int outputSize,
Function<NDList, NDList> activationFunction) {
// add hidden layers with activation
for (int hiddenSize : hiddenSizes) {
add(Linear.builder().optBias(true).setUnits(hiddenSize).build());
add(new LambdaBlock(activationFunction));
}
// add output layer without activation
add(Linear.builder().optBias(true).setUnits(outputSize).build());
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/ScaledDotProductAttentionBlock.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.Block;
import ai.djl.nn.core.Linear;
import ai.djl.nn.norm.Dropout;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
/**
* A Block implementing scaled product attention according to <a
* href="https://arxiv.org/abs/1706.03762">Vaswani et. al.</a>.
*
* <p>Abbreviations used:
*
* <ul>
* <li>E = embedding size
* <li>B = batch size
* <li>N = number of attention heads
* <li>F = "from" sequence length (key/value sequence), the input sequence
* <li>T = "to" sequence length (query sequence), e.g. the length of the output sequence
* <li>S = a sequence length, either F or T
* <li>H = Attention head size (= E / N)
* </ul>
*
* <p>In many use cases F=T. For self attention, the input is equal to the output.
*
* <p>This block can process input in four forms:
*
* <ul>
* <li>Input size one: [Values] = [(B, F, E)], only input is used as key, query and value
* (unmasked self attention), e.g. BERT
* <li>Input size two: [Values, Mask] = [(B, F, E), (B, F, F)], first input is used as key, query
* and value, masked self attention
* <li>Input size three: [Keys, Queries, Values] = [(B, F, E), (B, T, E), (B, F, E)], inputs are
* interpreted as keys, queries and values, unmasked attention
* <li>Input size four: [Keys, Queries, Values, Mask] = [(B, F, E), (B, T, E), (B, F, E), (B, T,
* F)], inputs are interpreted as keys, queries, values and an attention mask, full masked
* attention.
* </ul>
*
* <p>Attention masks must contain a 1 for positions to keep and a 0 for positions to mask.
*/
// We name local variables for tensor dimensions as in the paper and the reference code.
// While against the general code style, it makes things much easier readable here.
@SuppressWarnings({
"LocalVariableName",
"PMD.LocalVariableNamingConventions",
"ParameterName",
"PMD.FormalParameterNamingConventions"
})
public final class ScaledDotProductAttentionBlock extends AbstractBlock {
private static final byte VERSION = 1;
/** Size of the Word-/Token-embeddings we use the attention on. */
private int embeddingSize;
/** Number of attention heads. */
private int headCount;
/** Pointwise Linear projection of the keys. */
private Linear keyProjection;
/** Pointwise Linear projection of the queries. */
private Linear queryProjection;
/** Pointwise Linear projection of the values. */
private Linear valueProjection;
/** Pointwise Linear projection of the results. */
private Linear resultProjection;
/** Dropout operation to be applied after probability calculation. */
private Dropout attentionProbsDropout;
private ScaledDotProductAttentionBlock(Builder builder) {
super(VERSION);
this.embeddingSize = builder.embeddingSize;
this.headCount = builder.headCount;
this.keyProjection = addChildBlock("keyProjection", buildProjection());
this.queryProjection = addChildBlock("queryProjection", buildProjection());
this.valueProjection = addChildBlock("valueProjection", buildProjection());
this.resultProjection = addChildBlock("resultProjection", buildProjection());
this.attentionProbsDropout =
addChildBlock(
"probabilityDropout",
Dropout.builder().optRate(builder.attentionProbsDropoutProb).build());
}
/**
* Helper method to build a pointwise linear projection for the current embedding size.
*
* @return a linear projection with bias and an output size equal to the embedding size.
*/
private Linear buildProjection() {
return Linear.builder().setUnits(embeddingSize).optBias(true).build();
}
/**
* Pointwise Linear projection of the keys.
*
* @return Pointwise Linear projection of the keys.
*/
public Linear getKeyProjection() {
return keyProjection;
}
/**
* Pointwise Linear projection of the queries.
*
* @return Pointwise Linear projection of the queries.
*/
public Linear getQueryProjection() {
return queryProjection;
}
/**
* Pointwise Linear projection of the values.
*
* @return Pointwise Linear projection of the values.
*/
public Linear getValueProjection() {
return valueProjection;
}
/**
* Pointwise Linear projection of the results.
*
* @return Pointwise Linear projection of the results.
*/
public Linear getResultProjection() {
return resultProjection;
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
// Return shape is the shape of the query. For 2 or less inputs we have self-attention, i.e.
// the shape of the output is the shape of the input
if (inputShapes.length == 1 || inputShapes.length == 2) {
return new Shape[] {inputShapes[0]};
} else if (inputShapes.length == 3 || inputShapes.length == 4) {
// For attention with a dedicated query, the output shape is the query shape
return new Shape[] {inputShapes[1]};
} else {
throw new IllegalArgumentException(
"Invalid number of input shapes: " + inputShapes.length + ", must be 1-4.");
}
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
// The lookups are fed reshaped input where the batch size is combined with the sequence
// length.
// The linear layers only care about the 2nd dimension, so we set the first to -1.
Shape projectionShape = new Shape(-1L, embeddingSize);
// We initialize the lookup with that reshaped input shape
for (Block projection : children.values()) {
projection.initialize(manager, DataType.FLOAT32, projectionShape);
}
}
/**
* Utility function to reshape and transpose an input of the shape (B, S, E) into (B, N, S, H).
*
* @param projection projected embeddings
* @param B batch size
* @param S sequence length
* @param N number of attention heads
* @param H size of attention heads
* @return the reshaped input
*/
private NDArray createAttentionHeadsFromEmbeddings(
NDArray projection, long B, long S, long N, long H) {
// Reshape projection to sequence & heads: (B, S, E) -> (B, S, N, H)
NDArray sequenceAndHeads = projection.reshape(B, S, N, H);
// Switch sequence idx & head index, so we have sequences of heads at the end
return sequenceAndHeads.transpose(0, 2, 1, 3);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore parameterStore,
NDList inputs,
boolean training,
PairList<String, Object> params) {
// E=embedding size
long E = embeddingSize;
// B=batch size
long B = inputs.head().getShape().get(0);
// N=number of attention heads
long N = headCount;
// F=from sequence length
long F;
// T=to sequence length
long T;
// H=Attention head size (= E / N)
long H = E / N;
// Create key, query & value input based on input size
NDList flattenedKeyInput;
NDList flattenedQueryInput;
NDList flattenedValueInput;
NDArray attentionMask;
if (inputs.size() < 3) { // self attention, either masked or unmasked
F = inputs.head().getShape().get(1);
T = F;
flattenedKeyInput = new NDList(inputs.head());
flattenedQueryInput = flattenedKeyInput;
flattenedValueInput = flattenedKeyInput;
} else { // attention with separate key, query & value
F = inputs.get(0).getShape().get(1);
T = inputs.get(1).getShape().get(1);
flattenedKeyInput = new NDList(inputs.get(0));
flattenedQueryInput = new NDList(inputs.get(1));
flattenedValueInput = new NDList(inputs.get(2));
}
if (inputs.size() == 2 || inputs.size() == 4) { // we have an additional attention mask
attentionMask = inputs.get(inputs.size() - 1);
} else {
attentionMask = null;
}
// apply projection for key, query and value, preserves shape: (B, S, E)
NDList keys = keyProjection.forward(parameterStore, flattenedKeyInput, training, params);
NDList queries =
queryProjection.forward(parameterStore, flattenedQueryInput, training, params);
NDList values =
valueProjection.forward(parameterStore, flattenedValueInput, training, params);
// reshape to (B, N, S, H) to create separate attention heads
NDArray keyHeads = createAttentionHeadsFromEmbeddings(keys.head(), B, F, N, H);
NDArray queryHeads = createAttentionHeadsFromEmbeddings(queries.head(), B, T, N, H);
NDArray valueHeads = createAttentionHeadsFromEmbeddings(values.head(), B, F, N, H);
// Apply attention by multiplying the key and query vectors: (B, N, T, F)
// (For each entry in the sequence there is a weight for each other head in the sequence)
NDArray attentionScores = queryHeads.matMul(keyHeads.transpose(0, 1, 3, 2));
// Normalize the scores with 1/sqrt(H)
NDArray normalizedAttentionScores =
attentionScores.mul(attentionScores.getManager().create(1f / (float) Math.sqrt(H)));
// Apply masking if requested, mask has shape (B, T, F)
if (attentionMask != null) {
NDArray maskOffset;
// The input mask is initially given as a list of integers with a 1 for each existing
// token. In order to apply it to the attention result, it needs to be expanded and the
// values turned into offsets for the softmax calculation. For stacked models, this
// can be done once and reused - hence we check for the number of dimensions if we
// have to do this locally or whether it was done for us.
if (attentionMask.getShape().dimension() != 4) {
// expand mask to be used on all heads at once
NDArray expandedMask = attentionMask.reshape(B, 1, T, F);
// we turn the mask from ints into floats and turn all 1s into 0s and all
// 0s int o a value of -10000. Adding this to the scores will push all unwanted
// values towards -inf and keep the unmasked values unchanged
maskOffset =
expandedMask
.toType(DataType.FLOAT32, false)
.mul(expandedMask.getManager().create(-1f)) // turn 1 into -1
.add(
expandedMask
.getManager()
.create(1f)) // turn 0s to 1s, -1s to 0s
.mul(
expandedMask
.getManager()
.create(-100000f)); // turn 1s (original 0s) into
// -100000
} else {
maskOffset = attentionMask;
}
// adding the mask to the scores removes the scores of unwanted positions
normalizedAttentionScores = normalizedAttentionScores.add(maskOffset);
}
// Then apply softmax to get a probability distribution, shape (B, N, T, F)
NDArray attentionProbs = normalizedAttentionScores.softmax(3);
// We apply dropout to the attention probabilities - this will remove entire tokens from the
// result of a position, as their probability will be set to 0
NDArray attentionProbsAfterDropout =
attentionProbsDropout
.forward(parameterStore, new NDList(attentionProbs), training)
.singletonOrThrow();
// The result of the attention mechanism is created by a weighted sum using the attention
// probs. The new head is the weighted sum of the value heads. (B, N, T, H)
NDArray attentionResult = attentionProbsAfterDropout.matMul(valueHeads);
// Finally, the heads are reshaped and concatenated into an embedding again
NDArray resultEmbeddings =
attentionResult // (B, N, T, H)
.transpose(0, 2, 1, 3) // -> (B, T, N, H)
.reshape(B, T, E); // -> (B, T, E)
// As a last step, we add another linear projection for each token to the embedding size
NDList projectedEmbeddings =
resultProjection.forward(parameterStore, new NDList(resultEmbeddings), training);
// done!
return new NDList(projectedEmbeddings);
}
/**
* Creates a new Builder to build an Attention Block with.
*
* @return a new Builder to build an Attention Block with.
*/
public static Builder builder() {
return new Builder();
}
/** A builder for {@link ScaledDotProductAttentionBlock}s. */
public static final class Builder {
private int embeddingSize;
private int headCount;
private float attentionProbsDropoutProb = 0.1f;
private Builder() {}
/**
* Sets the embedding Size to be used for the internal token representation.
*
* @param embeddingSize the embedding Size to be used for the internal token representation.
* @return this builder
*/
public Builder setEmbeddingSize(int embeddingSize) {
this.embeddingSize = embeddingSize;
return this;
}
/**
* Sets the number of attention Heads, must divide the embedding size without rest. I.e. if
* embeddingSize = 10, a headCount of 3 would not be valid, a headCount of 1, 2 or 5 would
* be.
*
* @param headCount the number of attention Heads
* @return this builder
*/
public Builder setHeadCount(int headCount) {
this.headCount = headCount;
return this;
}
/**
* Sets the probability of applying dropout to the attention probability distribution. This
* dropout can randomly remove a complete token from the result at a position.
*
* @param attentionProbsDropoutProb the probability of applying dropout to the attention
* probability distribution
* @return this builder
*/
public Builder optAttentionProbsDropoutProb(float attentionProbsDropoutProb) {
this.attentionProbsDropoutProb = attentionProbsDropoutProb;
return this;
}
/**
* Creates a new {@code ScaledDotProductAttentionBlock} with the current configuration.
*
* @return a new {@code ScaledDotProductAttentionBlock} with the current configuration.
*/
public ScaledDotProductAttentionBlock build() {
if (embeddingSize < 1) {
throw new IllegalStateException("Embedding size not initialized.");
}
if (headCount < 1) {
throw new IllegalStateException("Head count not initialized.");
}
if (embeddingSize % headCount != 0) {
throw new IllegalStateException(
"Embedding Size ("
+ embeddingSize
+ ") is not divisible by head count ("
+ headCount
+ ")");
}
return new ScaledDotProductAttentionBlock(this);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/TransformerEncoderBlock.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.nn.transformer;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.nn.AbstractBlock;
import ai.djl.nn.norm.BatchNorm;
import ai.djl.nn.norm.Dropout;
import ai.djl.training.ParameterStore;
import ai.djl.util.PairList;
import java.util.Collections;
import java.util.function.Function;
/** Self-Attention based transformer encoder block. */
public class TransformerEncoderBlock extends AbstractBlock {
/** The attention mechanism. */
private ScaledDotProductAttentionBlock selfAttentionBlock;
/** Dropout before residual & layer normalization. */
private Dropout selfAttentionDropout;
/** Normalization of attention output and residual. */
private BatchNorm attentionNorm;
/** Fully connected pointwise block for output projection. */
private PointwiseFeedForwardBlock pointWisefullyConnected;
/** Dropout after fully connected and before last residual & layer normalization. */
private Dropout fullyConnectedDropout;
/** Another normalization for the output and residual. */
private BatchNorm outputNorm;
/**
* Creates a transformer encoder block.
*
* @param embeddingSize the embedding size for tokens
* @param headCount number of attention blocks
* @param hiddenSize the hidden size for fully connected networks
* @param dropoutProbability dropout probability
* @param activationFunction activation function
*/
@SuppressWarnings("this-escape")
public TransformerEncoderBlock(
int embeddingSize,
int headCount,
int hiddenSize,
float dropoutProbability,
Function<NDList, NDList> activationFunction) {
this.selfAttentionBlock =
addChildBlock(
"selfAttention",
ScaledDotProductAttentionBlock.builder()
.setEmbeddingSize(embeddingSize)
.setHeadCount(headCount)
.optAttentionProbsDropoutProb(dropoutProbability)
.build());
this.selfAttentionDropout = Dropout.builder().optRate(dropoutProbability).build();
this.attentionNorm = addChildBlock("attentionNorm", BatchNorm.builder().optAxis(2).build());
this.pointWisefullyConnected =
addChildBlock(
"outputBlock",
new PointwiseFeedForwardBlock(
Collections.singletonList(hiddenSize),
embeddingSize,
activationFunction));
this.fullyConnectedDropout = Dropout.builder().optRate(dropoutProbability).build();
this.outputNorm = addChildBlock("outputNorm", BatchNorm.builder().optAxis(2).build());
}
/** {@inheritDoc} */
@Override
public Shape[] getOutputShapes(Shape[] inputShapes) {
return inputShapes;
}
/** {@inheritDoc} */
@Override
public void initializeChildBlocks(NDManager manager, DataType dataType, Shape... inputShapes) {
selfAttentionBlock.initialize(manager, dataType, inputShapes);
attentionNorm.initialize(manager, dataType, inputShapes);
pointWisefullyConnected.initialize(manager, dataType, inputShapes);
outputNorm.initialize(manager, dataType, inputShapes);
}
/** {@inheritDoc} */
@Override
protected NDList forwardInternal(
ParameterStore ps, NDList inputs, boolean training, PairList<String, Object> params) {
NDArray embedding = inputs.head();
// perform attention lookup
NDList attentionOutput = selfAttentionBlock.forward(ps, inputs, training);
// add dropout to attention Output
NDList attentionOutputAfterDropout =
selfAttentionDropout.forward(ps, attentionOutput, training);
// add input as residual
NDArray withResidual = attentionOutputAfterDropout.singletonOrThrow().add(embedding);
// apply normalization
NDList normalized = attentionNorm.forward(ps, new NDList(withResidual), training);
// apply pointwise projection
NDList afterFullyConnected = pointWisefullyConnected.forward(ps, normalized, training);
// apply dropout to fully connected output
NDList afterFullyConnectedDropout =
fullyConnectedDropout.forward(ps, afterFullyConnected, training);
// add residual again
NDList outputWithResidual =
new NDList(afterFullyConnectedDropout.singletonOrThrow().add(embedding));
// normalize result
return outputNorm.forward(ps, new NDList(outputWithResidual), training);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn
|
java-sources/ai/djl/api/0.34.0/ai/djl/nn/transformer/package-info.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/** Contains blocks for transformer models. */
package ai.djl.nn.transformer;
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/AbstractRepository.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.util.Hex;
import ai.djl.util.Progress;
import ai.djl.util.TarUtils;
import ai.djl.util.Utils;
import ai.djl.util.ZipUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URLDecoder;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.security.DigestInputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.zip.GZIPInputStream;
import java.util.zip.ZipInputStream;
/**
* The {@code AbstractRepository} is the shared base for implementers of the {@link Repository}
* interface.
*
* @see Repository
*/
public abstract class AbstractRepository implements Repository {
private static final Logger logger = LoggerFactory.getLogger(AbstractRepository.class);
protected String name;
protected URI uri;
protected Map<String, String> arguments;
protected AbstractRepository(String name, URI uri) {
this.name = name;
this.uri = uri;
arguments = parseQueryString(uri);
}
/** {@inheritDoc} */
@Override
public String getName() {
return name;
}
/** {@inheritDoc} */
@Override
public URI getBaseUri() {
return uri;
}
/** {@inheritDoc} */
@Override
public InputStream openStream(Artifact.Item item, String path) throws IOException {
return new BufferedInputStream(Files.newInputStream(Paths.get(resolvePath(item, path))));
}
/** {@inheritDoc} */
@Override
public String[] listDirectory(Artifact.Item item, String path) throws IOException {
return Paths.get(resolvePath(item, path)).toFile().list();
}
/** {@inheritDoc} */
@Override
public Path getFile(Artifact.Item item, String path) throws IOException {
return Paths.get(resolvePath(item, path)).toAbsolutePath();
}
protected URI resolvePath(Artifact.Item item, String path) throws IOException {
Artifact artifact = item.getArtifact();
URI artifactUri = artifact.getResourceUri();
String itemUri = item.getUri();
// Resolve cached item
if (itemUri != null && URI.create(itemUri).isAbsolute() || isRemote()) {
Path cacheDir = getCacheDirectory();
Path resourceDir = cacheDir.resolve(artifactUri.getPath());
String type = item.getType();
String fileName = item.getName();
Path cachedFile;
if ("dir".equals(type)) {
if (!fileName.isEmpty()) {
cachedFile = resourceDir.resolve(fileName);
} else {
cachedFile = resourceDir;
}
return cachedFile.resolve(path).toUri();
} else {
return resourceDir.resolve(fileName).toUri();
}
}
// Resolve metadata item
String uriSuffix = itemUri != null ? itemUri : item.getName();
return getBaseUri().resolve(artifactUri.resolve(uriSuffix));
}
/** {@inheritDoc} */
@Override
public void prepare(Artifact artifact, Progress progress) throws IOException {
Path resourceDir = getResourceDirectory(artifact);
if (Files.exists(resourceDir)) {
logger.debug("Files have been downloaded already: {}", resourceDir);
return;
}
Metadata metadata = artifact.getMetadata();
URI baseUri = metadata.getRepositoryUri();
Map<String, Artifact.Item> files = artifact.getFiles();
Path parentDir = resourceDir.toAbsolutePath().getParent();
if (parentDir == null) {
throw new AssertionError("Parent path should never be null: " + resourceDir);
}
Files.createDirectories(parentDir);
Path tmp = Files.createTempDirectory(parentDir, resourceDir.toFile().getName());
if (progress != null) {
long totalSize = 0;
for (Artifact.Item item : files.values()) {
totalSize += item.getSize();
}
progress.reset("Downloading", totalSize);
}
try {
logger.debug("Items to download: {}", files.size());
for (Artifact.Item item : files.values()) {
download(tmp, baseUri, item, progress);
}
Utils.moveQuietly(tmp, resourceDir);
} finally {
Utils.deleteQuietly(tmp);
if (progress != null) {
progress.end();
}
}
}
/** {@inheritDoc} */
@Override
public Path getCacheDirectory() throws IOException {
Path dir = Utils.getCacheDir().resolve("cache/repo");
if (Files.notExists(dir)) {
Files.createDirectories(dir);
} else if (!Files.isDirectory(dir)) {
throw new IOException("Failed initialize cache directory: " + dir);
}
return dir;
}
/** {@inheritDoc} */
@Override
public void addResource(MRL mrl) {
throw new IllegalArgumentException(
getClass().getSimpleName() + " doesn't support addResource.");
}
protected void download(Path tmp, URI baseUri, Artifact.Item item, Progress progress)
throws IOException {
URI fileUri = URI.create(item.getUri());
if (!fileUri.isAbsolute()) {
fileUri = getBaseUri().resolve(baseUri).resolve(fileUri);
}
logger.debug("Downloading artifact: {} ...", fileUri);
try (InputStream is = new BufferedInputStream(fileUri.toURL().openStream())) {
save(is, tmp, item, progress);
}
}
protected void save(InputStream is, Path tmp, Artifact.Item item, Progress progress)
throws IOException {
ProgressInputStream pis = new ProgressInputStream(is, progress);
String fileName = item.getName();
String extension = item.getExtension();
if ("dir".equals(item.getType())) {
Path dir;
if (!fileName.isEmpty()) {
// honer the name set in metadata.json
dir = tmp.resolve(fileName);
Files.createDirectories(dir);
} else {
dir = tmp;
}
if ("zip".equals(extension)) {
ZipUtils.unzip(pis, dir);
} else if ("tgz".equals(extension)) {
TarUtils.untar(pis, dir, true);
} else if ("tar".equals(extension)) {
TarUtils.untar(pis, dir, false);
} else {
throw new IOException("File type is not supported: " + extension);
}
} else {
Path file = tmp.resolve(fileName);
if ("zip".equals(extension)) {
ZipInputStream zis = new ZipInputStream(pis);
zis.getNextEntry();
Files.copy(zis, file, StandardCopyOption.REPLACE_EXISTING);
} else if ("gzip".equals(extension)) {
Files.copy(new GZIPInputStream(pis), file, StandardCopyOption.REPLACE_EXISTING);
} else {
Files.copy(pis, file, StandardCopyOption.REPLACE_EXISTING);
}
}
pis.validateChecksum(item);
}
private static Map<String, String> parseQueryString(URI uri) {
try {
Map<String, String> map = new ConcurrentHashMap<>();
String queryString = uri.getQuery();
if (queryString != null && !queryString.isEmpty()) {
String[] pairs = uri.getQuery().split("&");
for (String pair : pairs) {
String[] tokens = pair.split("=", 2);
if (tokens.length > 1) {
String key = URLDecoder.decode(tokens[0], "UTF-8");
String value = URLDecoder.decode(tokens[1], "UTF-8");
map.put(key, value);
}
}
}
return map;
} catch (UnsupportedEncodingException e) {
throw new AssertionError("Should not happen.", e);
}
}
/**
* A {@code ProgressInputStream} is a wrapper around an {@link InputStream} that also uses
* {@link Progress}.
*/
private static final class ProgressInputStream extends InputStream {
private DigestInputStream dis;
private Progress progress;
/**
* Constructs a new ProgressInputStream with an input stream and progress.
*
* @param is the input stream
* @param progress the (optionally null) progress tracker
*/
public ProgressInputStream(InputStream is, Progress progress) {
MessageDigest md;
try {
md = MessageDigest.getInstance("SHA1");
} catch (NoSuchAlgorithmException e) {
throw new AssertionError("SHA1 algorithm not found.", e);
}
dis = new DigestInputStream(is, md);
this.progress = progress;
}
/** {@inheritDoc} */
@Override
public int read() throws IOException {
int ret = dis.read();
if (progress != null) {
if (ret >= 0) {
progress.increment(1);
} else {
progress.end();
}
}
return ret;
}
/** {@inheritDoc} */
@Override
public int read(byte[] b, int off, int len) throws IOException {
int size = dis.read(b, off, len);
if (progress != null) {
progress.increment(size);
}
return size;
}
private void validateChecksum(Artifact.Item item) throws IOException {
String expectedHash = item.getSha1Hash();
if (expectedHash == null) {
return;
}
// drain InputSteam to get correct sha1 hash
Utils.toByteArray(dis);
String sha1 = Hex.toHexString(dis.getMessageDigest().digest());
if (!sha1.equalsIgnoreCase(item.getSha1Hash())) {
throw new IOException(
"Checksum error: "
+ item.getName()
+ ", expected sha1: "
+ item.getSha1Hash()
+ ", actual sha1: "
+ sha1);
}
}
/** {@inheritDoc} */
@Override
public void close() throws IOException {
dis.close();
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/Artifact.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.Application;
import ai.djl.util.JsonUtils;
import java.io.Serializable;
import java.net.URI;
import java.util.Collections;
import java.util.Comparator;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* An {@code Artifact} is a set of data files such as a model or dataset.
*
* @see Repository
*/
@SuppressWarnings("PMD.LooseCoupling")
public class Artifact {
private transient String metadataVersion;
private String version;
private boolean snapshot;
private String name;
private Map<String, String> properties;
private Map<String, Object> arguments;
private Map<String, String> options;
private Map<String, Item> files;
private transient Metadata metadata;
private transient Version cache;
/**
* Returns the metadata format version.
*
* @return the metadata format version
*/
public String getMetadataVersion() {
return metadataVersion;
}
/**
* Sets the metadata format version.
*
* @param metadataVersion the new version
*/
public void setMetadataVersion(String metadataVersion) {
this.metadataVersion = metadataVersion;
}
/**
* Returns the artifact version.
*
* @return the artifact version
* @see Version
*/
public String getVersion() {
return version;
}
/**
* Sets the artifact version.
*
* @param version the new version
* @see Version
*/
public void setVersion(String version) {
this.version = version;
}
/**
* Returns true if the artifact is a snapshot.
*
* @return true if the artifact is a snapshot
* @see Version
*/
public boolean isSnapshot() {
return snapshot;
}
/**
* Sets if the artifact is a snapshot.
*
* @param snapshot true to make the artifact a snapshot
* @see Version
*/
public void setSnapshot(boolean snapshot) {
this.snapshot = snapshot;
}
/**
* Returns the artifact name.
*
* @return the artifact name
*/
public String getName() {
return name;
}
/**
* Sets the artifact name.
*
* @param name the new name
*/
public void setName(String name) {
this.name = name;
}
/**
* Returns the artifact properties.
*
* @return the artifact properties
* @see Repository
*/
public Map<String, String> getProperties() {
if (properties == null) {
return Collections.emptyMap();
}
return properties;
}
/**
* Sets the artifact properties.
*
* @param properties the new properties
* @see Repository
*/
public void setProperties(Map<String, String> properties) {
this.properties = properties;
}
/**
* Returns the artifact arguments.
*
* @param override the override configurations to the default arguments
* @return the artifact arguments
* @see Repository
*/
public Map<String, Object> getArguments(Map<String, Object> override) {
Map<String, Object> map = new ConcurrentHashMap<>();
if (arguments != null) {
map.putAll(arguments);
}
if (override != null) {
map.putAll(override);
}
if (!map.containsKey("application") && metadata != null) {
Application application = metadata.getApplication();
if (application != null && Application.UNDEFINED != application) {
map.put("application", application.getPath());
}
}
return map;
}
/**
* Returns the artifact arguments.
*
* @return the artifact arguments
*/
public Map<String, Object> getArguments() {
if (arguments == null) {
arguments = new ConcurrentHashMap<>();
}
return arguments;
}
/**
* Sets the artifact arguments.
*
* @param arguments the new arguments
* @see Repository
*/
public void setArguments(Map<String, Object> arguments) {
this.arguments = arguments;
}
/**
* Returns the artifact options.
*
* @param override the override options to the default options
* @return the artifact options
*/
public Map<String, String> getOptions(Map<String, String> override) {
Map<String, String> map = new ConcurrentHashMap<>();
if (options != null) {
map.putAll(options);
}
if (override != null) {
map.putAll(override);
}
return map;
}
/**
* Sets the artifact arguments.
*
* @param options the new arguments
*/
public void setOptions(Map<String, String> options) {
this.options = options;
}
/**
* Returns the metadata containing this artifact.
*
* @return the metadata containing this artifact
* @see Repository
*/
public Metadata getMetadata() {
return metadata;
}
/**
* Sets the associated metadata.
*
* @param metadata the new metadata
* @see Repository
*/
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
/**
* Returns the location of the resource directory.
*
* @return the location of the resource directory
*/
public URI getResourceUri() {
URI uri = metadata.getRepositoryUri();
if (version != null) {
uri = uri.resolve(version + '/');
}
if (name != null && !name.isEmpty()) {
uri = uri.resolve(name + '/');
}
return uri;
}
/**
* Returns all the file items in the artifact.
*
* @return all the file items in the artifact
*/
public Map<String, Item> getFiles() {
if (files == null) {
return Collections.emptyMap();
}
for (Map.Entry<String, Item> file : files.entrySet()) {
file.getValue().setArtifact(this);
}
return files;
}
/**
* Sets the file items.
*
* @param files the replacement file items
*/
public void setFiles(Map<String, Item> files) {
this.files = files;
}
/**
* Returns true if every filter matches the corresponding property.
*
* @param filter the values to check against the properties
* @return true if every filter matches the corresponding property
* @see Repository
*/
public boolean hasProperties(Map<String, String> filter) {
if (filter == null || filter.isEmpty()) {
return true;
}
if (properties == null || properties.isEmpty()) {
return false;
}
for (Map.Entry<String, String> entry : filter.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
if (!value.equals(properties.get(key))) {
return false;
}
}
return true;
}
/**
* Returns the artifact version as a {@link Version}.
*
* @return the artifact version as a {@link Version}
* @see Version
*/
public Version getParsedVersion() {
if (cache == null) {
cache = new Version(version);
}
return cache;
}
/** {@inheritDoc} */
@Override
public String toString() {
StringBuilder sb = new StringBuilder(100);
if (metadata != null) {
sb.append(metadata.getGroupId())
.append('/')
.append(metadata.getArtifactId())
.append('/');
}
if (version != null) {
sb.append(version).append('/');
}
sb.append(name);
if (properties != null) {
sb.append(' ').append(JsonUtils.GSON.toJson(properties));
} else {
sb.append(" {}");
}
return sb.toString();
}
/** A file (possibly compressed) within an {@link Artifact}. */
public static final class Item {
private String uri;
private String sha1Hash;
private String name;
private String type;
private long size;
private transient String extension;
private transient Artifact artifact;
/**
* Returns the URI of the item.
*
* @return the URI of the item
*/
public String getUri() {
return uri;
}
/**
* Sets the URI of the item.
*
* @param uri the new URI
*/
public void setUri(String uri) {
this.uri = uri;
}
/**
* Returns the hash of the item.
*
* <p>This value is from the metadata, but should be checked when the item is downloaded.
*
* @return the sha1 hash
*/
public String getSha1Hash() {
return sha1Hash;
}
/**
* Sets the sha1hash of the item.
*
* @param sha1Hash the new hash
*/
public void setSha1Hash(String sha1Hash) {
this.sha1Hash = sha1Hash;
}
/**
* Sets the type of the item.
*
* <p>The valid types are:
*
* <ul>
* <li>"file" - used for single files and gzip compressed files
* <li>"dir" - used for extracted zip folders
* </ul>
*
* @return the type string
*/
public String getType() {
if (type == null) {
getExtension();
if ("zip".equals(extension) || "tar".equals(extension) || "tgz".equals(extension)) {
type = "dir";
} else {
type = "file";
}
}
return type;
}
/**
* Sets the type of the item.
*
* @param type the type
* @see Item#getType()
*/
public void setType(String type) {
this.type = type;
}
/**
* Returns the file size.
*
* @return the file size in bytes
*/
public long getSize() {
return size;
}
/**
* Sets the file size.
*
* @param size the new size in bytes
*/
public void setSize(long size) {
this.size = size;
}
/**
* Returns the item name.
*
* @return the item name
*/
public String getName() {
if (name == null) {
int pos = uri.lastIndexOf('/');
if (pos >= 0) {
name = uri.substring(pos + 1);
} else {
name = uri;
}
name = FilenameUtils.getNamePart(name);
}
return name;
}
/**
* Sets the item name.
*
* @param name the new name
*/
public void setName(String name) {
this.name = name;
}
/**
* Returns the type of file extension.
*
* @return the type as "zip", "gzip", or "" for other
*/
public String getExtension() {
if (extension == null) {
extension = FilenameUtils.getFileType(uri);
}
return extension;
}
/**
* Sets the file extension.
*
* @param extension the new extension
*/
public void setExtension(String extension) {
this.extension = extension;
}
/**
* Returns the artifact associated with this item.
*
* @return the artifact
*/
public Artifact getArtifact() {
return artifact;
}
/**
* Sets the artifact associated with this item.
*
* @param artifact the new artifact
*/
public void setArtifact(Artifact artifact) {
this.artifact = artifact;
}
}
/** A {@link Comparator} to compare artifacts based on their version numbers. */
public static final class VersionComparator implements Comparator<Artifact>, Serializable {
private static final long serialVersionUID = 1L;
/** {@inheritDoc} */
@Override
public int compare(Artifact o1, Artifact o2) {
return o1.getParsedVersion().compareTo(o2.getParsedVersion());
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/FilenameUtils.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import java.util.Locale;
/** A class containing utility methods. */
public final class FilenameUtils {
private FilenameUtils() {}
/**
* Returns the type of the file.
*
* @param fileName the file name
* @return the type of the file
*/
public static String getFileType(String fileName) {
fileName = fileName.toLowerCase(Locale.ROOT);
if (fileName.endsWith(".zip") || fileName.endsWith(".mar")) {
return "zip";
} else if (fileName.endsWith(".tgz")
|| fileName.endsWith(".tar.gz")
|| fileName.endsWith(".tar.z")) {
return "tgz";
} else if (fileName.endsWith(".tar")) {
return "tar";
} else if (fileName.endsWith(".gz") || fileName.endsWith(".z")) {
return "gzip";
} else {
return "";
}
}
/**
* Returns if the the file is an archive file.
*
* @param fileName the file name
* @return the type of the file
*/
public static boolean isArchiveFile(String fileName) {
String fileType = getFileType(fileName);
return "tgz".equals(fileType) || "zip".equals(fileType) || "tar".equals(fileType);
}
/**
* Returns the name of the file without file extension.
*
* @param name the file name
* @return the name of the file without file extension
*/
public static String getNamePart(String name) {
String lowerCase = name.toLowerCase(Locale.ROOT);
if (lowerCase.endsWith(".tar.gz")) {
return name.substring(0, name.length() - 7);
} else if (name.endsWith(".tar.z")) {
return name.substring(0, name.length() - 6);
} else if (name.endsWith(".tgz")
|| name.endsWith(".zip")
|| name.endsWith(".tar")
|| name.endsWith(".mar")) {
return name.substring(0, name.length() - 4);
} else if (name.endsWith(".gz")) {
return name.substring(0, name.length() - 3);
} else if (name.endsWith(".z")) {
return name.substring(0, name.length() - 2);
}
return name;
}
/**
* Returns the file name extension of the file.
*
* @param fileName the file name
* @return the file name extension
*/
public static String getFileExtension(String fileName) {
int pos = fileName.lastIndexOf('.');
if (pos > 0) {
return fileName.substring(pos + 1);
}
return "";
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/JarRepository.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.Application;
import ai.djl.repository.zoo.DefaultModelZoo;
import ai.djl.util.Progress;
import ai.djl.util.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* A {@code JarRepository} is a {@link Repository} contains an archive file from classpath.
*
* @see Repository
*/
public class JarRepository extends AbstractRepository {
private static final Logger logger = LoggerFactory.getLogger(SimpleUrlRepository.class);
private String artifactId;
private String modelName;
private String queryString;
private String originalUri;
private Metadata metadata;
private boolean resolved;
JarRepository(String name, URI uri, String fileName, URI realUri) {
super(name, uri);
this.uri = realUri;
queryString = uri.getRawQuery();
originalUri = uri.toString();
modelName = arguments.get("model_name");
artifactId = arguments.get("artifact_id");
if (artifactId == null) {
artifactId = fileName;
}
if (modelName == null) {
modelName = artifactId;
}
}
/** {@inheritDoc} */
@Override
public boolean isRemote() {
return true;
}
/** {@inheritDoc} */
@Override
public Metadata locate(MRL mrl) {
return getMetadata();
}
/** {@inheritDoc} */
@Override
public Artifact resolve(MRL mrl, Map<String, String> filter) {
List<Artifact> artifacts = locate(mrl).getArtifacts();
if (artifacts.isEmpty()) {
return null;
}
return artifacts.get(0);
}
/** {@inheritDoc} */
@Override
public List<MRL> getResources() {
Metadata m = getMetadata();
if (m != null && !m.getArtifacts().isEmpty()) {
MRL mrl = MRL.undefined(this, m.getGroupId(), m.getArtifactId());
return Collections.singletonList(mrl);
}
return Collections.emptyList();
}
/** {@inheritDoc} */
@Override
protected void download(Path tmp, URI baseUri, Artifact.Item item, Progress progress)
throws IOException {
logger.debug("Extracting artifact: {} ...", uri);
try (InputStream is = new BufferedInputStream(uri.toURL().openStream())) {
save(is, tmp, item, progress);
}
}
private synchronized Metadata getMetadata() {
if (resolved) {
return metadata;
}
resolved = true;
Artifact artifact = new Artifact();
artifact.setName(modelName);
artifact.getArguments().putAll(arguments);
Map<String, Artifact.Item> files = new ConcurrentHashMap<>();
Artifact.Item item = new Artifact.Item();
item.setUri(uri.getSchemeSpecificPart());
item.setName(""); // avoid creating extra folder
item.setArtifact(artifact);
item.setSize(-1);
files.put(artifactId, item);
artifact.setFiles(files);
metadata = new Metadata.MatchAllMetadata();
metadata.setArtifactId(artifactId);
metadata.setArtifacts(Collections.singletonList(artifact));
String hashKey;
if (Boolean.parseBoolean(arguments.get("ignore_real_uri"))) {
hashKey = originalUri;
} else {
hashKey = queryString == null ? uri.toString() : uri.toString() + queryString;
}
String hash = Utils.hash(hashKey);
MRL mrl = model(Application.UNDEFINED, DefaultModelZoo.GROUP_ID, hash);
metadata.setRepositoryUri(mrl.toURI());
return metadata;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/License.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
/**
* A {@code License} is a container to save the license information.
*
* @see Repository
*/
public class License {
private transient String id;
private String name;
private String url;
/**
* The default Apache License.
*
* @return Apache license
*/
public static License apache() {
License license = new License();
license.setName("The Apache License, Version 2.0");
license.setUrl("https://www.apache.org/licenses/LICENSE-2.0");
license.setId("apache");
return license;
}
/**
* Returns the name of the license.
*
* @return the name of the license
*/
public String getName() {
return name;
}
/**
* Sets the name of the license.
*
* @param name the name of the license
*/
public void setName(String name) {
this.name = name;
}
/**
* Returns the url of the license.
*
* @return the url of the license
*/
public String getUrl() {
return url;
}
/**
* Sets the url of the license.
*
* @param url the url of the license;
*/
public void setUrl(String url) {
this.url = url;
}
/**
* Sets the identifier of the license.
*
* @param id the identifier of the license.
*/
public void setId(String id) {
this.id = id;
}
/**
* Returns the identifier of the license.
*
* @return the identifier of the license
*/
public String getId() {
return id;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/LocalRepository.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.Application;
import ai.djl.util.JsonUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.Reader;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
/**
* A {@code LocalRepository} is a {@link Repository} located in a filesystem directory.
*
* @see Repository
*/
public class LocalRepository extends AbstractRepository {
private static final Logger logger = LoggerFactory.getLogger(LocalRepository.class);
private Path path;
/**
* (Internal) Constructs a {@code LocalRepository} from the path with inferred name.
*
* <p>Use {@link Repository#newInstance(String, String)}.
*
* @param name the name of the repository
* @param uri the base URI of the repository
* @param path the path to the repository
*/
protected LocalRepository(String name, URI uri, Path path) {
super(name, uri);
this.path = path;
}
/** {@inheritDoc} */
@Override
public boolean isRemote() {
return false;
}
/** {@inheritDoc} */
@Override
public Metadata locate(MRL mrl) throws IOException {
URI uri = mrl.toURI();
Path base = path.resolve(uri.getPath());
Path file = base.resolve("metadata.json");
if (!Files.isRegularFile(file)) {
return null;
}
try (Reader reader = Files.newBufferedReader(file)) {
Metadata metadata = JsonUtils.GSON_PRETTY.fromJson(reader, Metadata.class);
metadata.init(arguments);
metadata.setRepositoryUri(uri);
return metadata;
}
}
/** {@inheritDoc} */
@Override
public Artifact resolve(MRL mrl, Map<String, String> filter) throws IOException {
Metadata metadata = locate(mrl);
VersionRange range = VersionRange.parse(mrl.getVersion());
List<Artifact> artifacts = metadata.search(range, filter);
if (artifacts.isEmpty()) {
return null;
}
return artifacts.stream().max(Comparator.comparing(o -> new Version(o.getVersion()))).get();
}
/** {@inheritDoc} */
@Override
public List<MRL> getResources() {
List<MRL> list = new ArrayList<>();
try (Stream<Path> stream = Files.walk(path)) {
stream.forEach(
f -> {
if (f.endsWith("metadata.json") && Files.isRegularFile(f)) {
Path relative = path.relativize(f);
String type = relative.getName(0).toString();
try (Reader reader = Files.newBufferedReader(f)) {
Metadata metadata = JsonUtils.GSON.fromJson(reader, Metadata.class);
Application application = metadata.getApplication();
String groupId = metadata.getGroupId();
String artifactId = metadata.getArtifactId();
if ("dataset".equals(type)) {
list.add(dataset(application, groupId, artifactId));
} else if ("model".equals(type)) {
list.add(model(application, groupId, artifactId));
}
} catch (IOException e) {
logger.warn("Failed to read metadata.json", e);
}
}
});
} catch (IOException e) {
logger.warn("", e);
}
return list;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/MRL.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.Application;
import ai.djl.util.Progress;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Files;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* The {@code MRL} (Machine learning Resource Locator) is a pointer to a {@link Metadata} "resource"
* on a machine learning {@link Repository}.
*
* <p>Each mrl references a single metadata file (parsed to {@link Metadata} and the collection of
* artifacts located within it. Those artifacts all share the same groupId and artifactId, but can
* differ based on the name and properties.
*
* <p>The mrl consists of three different properties:
*
* <ul>
* <li>type - The resource type, e.g. model or dataset.
* <li>application - The resource application (See {@link Application}).
* <li>groupId - The group id identifies the group publishing the artifacts using a reverse domain
* name system.
* <li>artifactId - The artifact id identifies the different artifacts published by a single
* group.
* </ul>
*/
public final class MRL {
private static final Logger logger = LoggerFactory.getLogger(MRL.class);
private String type;
private Application application;
private String groupId;
private String artifactId;
private String version;
private String artifactName;
private Repository repository;
private Metadata metadata;
/**
* Constructs an MRL.
*
* @param repository the {@link Repository}
* @param type the resource type
* @param application the resource application
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @param version the resource version
* @param artifactName the desired artifact name
*/
private MRL(
Repository repository,
String type,
Application application,
String groupId,
String artifactId,
String version,
String artifactName) {
this.repository = repository;
this.type = type;
this.application = application;
this.groupId = groupId;
this.artifactId = artifactId;
this.version = version;
this.artifactName = artifactName;
}
/**
* Creates a model {@code MRL} with specified application.
*
* @param repository the {@link Repository}
* @param application the desired application
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @param version the resource version
* @param artifactName the desired artifact name
* @return a model {@code MRL}
*/
public static MRL model(
Repository repository,
Application application,
String groupId,
String artifactId,
String version,
String artifactName) {
return new MRL(
repository, "model", application, groupId, artifactId, version, artifactName);
}
/**
* Creates a dataset {@code MRL} with specified application.
*
* @param repository the {@link Repository}
* @param application the desired application
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @param version the resource version
* @return a dataset {@code MRL}
*/
public static MRL dataset(
Repository repository,
Application application,
String groupId,
String artifactId,
String version) {
return new MRL(repository, "dataset", application, groupId, artifactId, version, null);
}
/**
* Creates a dataset {@code MRL} with specified application.
*
* @param repository the {@link Repository}
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @return a dataset {@code MRL}
*/
public static MRL undefined(Repository repository, String groupId, String artifactId) {
return new MRL(repository, "", Application.UNDEFINED, groupId, artifactId, null, null);
}
/**
* Returns the URI to the metadata location (used for {@link Repository} implementations).
*
* @return the URI to the metadata location
*/
public URI toURI() {
StringBuilder sb = new StringBuilder();
if (!type.isEmpty()) {
sb.append(type).append('/');
}
sb.append(application.getPath())
.append('/')
.append(groupId.replace('.', '/'))
.append('/')
.append(artifactId)
.append('/');
return URI.create(sb.toString());
}
/**
* Returns the repository.
*
* @return the repository
*/
public Repository getRepository() {
return repository;
}
/**
* Returns the application.
*
* @return the application
*/
public Application getApplication() {
return application;
}
/**
* Returns the groupId.
*
* @return the groupId
*/
public String getGroupId() {
return groupId;
}
/**
* Returns the artifactId.
*
* @return the artifactId
*/
public String getArtifactId() {
return artifactId;
}
/**
* Returns the version.
*
* @return the version
*/
public String getVersion() {
return version;
}
/**
* Returns the default artifact.
*
* @return the default artifact
* @throws IOException for various exceptions depending on the specific dataset
*/
public Artifact getDefaultArtifact() throws IOException {
return repository.resolve(this, null);
}
/**
* Returns the first artifact that matches a given criteria.
*
* @param criteria the criteria to match against
* @return the first artifact that matches the criteria. Null will be returned if no artifact
* matches
* @throws IOException for errors while loading the model
*/
public Artifact match(Map<String, String> criteria) throws IOException {
List<Artifact> list = search(criteria);
if (list.isEmpty()) {
return null;
}
if (artifactName != null) {
for (Artifact artifact : list) {
if (artifactName.equals(artifact.getName())) {
return artifact;
}
}
return null;
}
return list.get(0);
}
/**
* Returns a list of artifacts in this resource.
*
* @return a list of artifacts in this resource
* @throws IOException for errors while loading the model
*/
public List<Artifact> listArtifacts() throws IOException {
return getMetadata().getArtifacts().stream()
.filter(a -> version == null || version.equals(a.getVersion()))
.collect(Collectors.toList());
}
/**
* Returns {@code true} if the artifact is ready for use.
*
* @param artifact the artifact to prepare
* @return {@code true} if the artifact is ready for use
* @throws IOException if it failed to prepare
*/
public boolean isPrepared(Artifact artifact) throws IOException {
return Files.exists(repository.getResourceDirectory(artifact));
}
/**
* Prepares the artifact for use.
*
* @param artifact the artifact to prepare
* @throws IOException if it failed to prepare
*/
public void prepare(Artifact artifact) throws IOException {
prepare(artifact, null);
}
/**
* Prepares the artifact for use with progress tracking.
*
* @param artifact the artifact to prepare
* @param progress the progress tracker
* @throws IOException if it failed to prepare
*/
public void prepare(Artifact artifact, Progress progress) throws IOException {
if (artifact != null) {
logger.debug("Preparing artifact: {}, {}", repository.getName(), artifact);
repository.prepare(artifact, progress);
}
}
/**
* Returns all the artifacts that match a given criteria.
*
* @param criteria the criteria to match against
* @return all the artifacts that match a given criteria
* @throws IOException for errors while loading the model
*/
private List<Artifact> search(Map<String, String> criteria) throws IOException {
return getMetadata().search(VersionRange.parse(version), criteria);
}
private Metadata getMetadata() throws IOException {
if (metadata == null) {
metadata = repository.locate(this);
if (metadata == null) {
throw new IOException(this + " resource not found.");
}
}
return metadata;
}
/** {@inheritDoc} */
@Override
public String toString() {
return "djl://" + groupId + '/' + artifactId;
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/Metadata.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.Application;
import ai.djl.repository.zoo.DefaultModelZoo;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
/**
* A {@code Metadata} is a collection of {@link Artifact}s with unified metadata (including {@link
* MRL}) that are stored in the same "metadata.json" file.
*
* <p>All of the artifacts located within the metadata share the data defined at the metadata level
* such as name, description, and website. The key difference between the artifacts within the same
* metadata are the properties.
*
* @see Repository
*/
public class Metadata {
private String metadataVersion;
private String resourceType;
private String application;
protected String groupId;
protected String artifactId;
private String name;
private String description;
private String website;
protected Map<String, License> licenses;
protected List<Artifact> artifacts;
private Date lastUpdated;
private transient Application applicationClass;
private transient URI repositoryUri;
/**
* Returns the artifacts matching the version and property requirements.
*
* @param versionRange the version range for the artifact
* @param filter the property filter
* @return the matching artifacts
*/
public List<Artifact> search(VersionRange versionRange, Map<String, String> filter) {
List<Artifact> results = versionRange.matches(artifacts);
if (filter == null) {
return results;
}
return results.stream().filter(a -> a.hasProperties(filter)).collect(Collectors.toList());
}
/**
* Returns the metadata format version.
*
* @return the metadata format version
*/
public String getMetadataVersion() {
return metadataVersion;
}
/**
* Sets the metadata format version.
*
* @param metadataVersion the new version
*/
public void setMetadataVersion(String metadataVersion) {
this.metadataVersion = metadataVersion;
}
/**
* Returns the resource type.
*
* @return the resource type
*/
public String getResourceType() {
return resourceType;
}
/**
* Returns the resource type.
*
* @param resourceType the resource type
*/
public void setResourceType(String resourceType) {
this.resourceType = resourceType;
}
/**
* Returns the groupId.
*
* @return the groupId
*/
public String getGroupId() {
return groupId;
}
/**
* Sets the groupId.
*
* @param groupId the new groupId
*/
public void setGroupId(String groupId) {
this.groupId = groupId;
}
/**
* Returns the artifactId.
*
* @return the artifactId
*/
public String getArtifactId() {
return artifactId;
}
/**
* Sets the artifactId.
*
* @param artifactId the new artifactId
*/
public void setArtifactId(String artifactId) {
this.artifactId = artifactId;
}
/**
* Returns the metadata-level name.
*
* @return the metadata-level name
*/
public String getName() {
return name;
}
/**
* Sets the metadata-level name.
*
* @param name the new metadata-level name
*/
public void setName(String name) {
this.name = name;
}
/**
* Returns the description.
*
* @return the description
*/
public String getDescription() {
return description;
}
/**
* Sets the description.
*
* @param description the description
*/
public void setDescription(String description) {
this.description = description;
}
/**
* Returns the website.
*
* @return the website
*/
public String getWebsite() {
return website;
}
/**
* Sets the website.
*
* @param website the website
*/
public void setWebsite(String website) {
this.website = website;
}
/**
* Returns the {@link Application}.
*
* @return the {@link Application}
*/
public Application getApplication() {
if (applicationClass == null && application != null) {
applicationClass = Application.of(application);
}
return applicationClass;
}
/**
* Sets the {@link Application}.
*
* @param application {@link Application}
*/
public final void setApplication(Application application) {
this.applicationClass = application;
this.application = application.getPath();
}
/**
* Returns the {@link License}.
*
* @return licenses in this metadata
*/
public Map<String, License> getLicenses() {
return licenses;
}
/**
* Sets the {@link License}.
*
* @param licenses {@link License}
*/
public void setLicense(Map<String, License> licenses) {
this.licenses = licenses;
}
/**
* Adds one {@link License}.
*
* @param license {@link License}
*/
public void addLicense(License license) {
if (licenses == null) {
licenses = new ConcurrentHashMap<>();
}
licenses.put(license.getId(), license);
}
/**
* Returns all the artifacts in the metadata.
*
* @return the artifacts in the metadata
*/
public List<Artifact> getArtifacts() {
return artifacts;
}
/**
* Sets the artifacts for the metadata.
*
* @param artifacts the new artifacts
*/
public void setArtifacts(List<Artifact> artifacts) {
this.artifacts = artifacts;
for (Artifact artifact : artifacts) {
artifact.setMetadata(this);
}
}
/**
* Adds one artifact for the metadata.
*
* @param artifact the new artifact
*/
public void addArtifact(Artifact artifact) {
if (artifacts == null) {
artifacts = new ArrayList<>();
}
artifacts.add(artifact);
}
/**
* Returns the last update date for the metadata.
*
* @return the last update date
*/
public Date getLastUpdated() {
return lastUpdated;
}
/**
* Sets the last update date for the metadata.
*
* @param lastUpdated the new last update date
*/
public void setLastUpdated(Date lastUpdated) {
this.lastUpdated = lastUpdated;
}
/**
* Returns the URI to the repository storing the metadata.
*
* @return the URI to the repository storing the metadata
*/
public URI getRepositoryUri() {
return repositoryUri;
}
/**
* Sets the repository URI.
*
* @param repositoryUri the new URI
*/
public void setRepositoryUri(URI repositoryUri) {
this.repositoryUri = repositoryUri;
}
/**
* Restores artifacts state.
*
* <p>This call is required after the metadata is restored back from JSON.
*
* @param arguments the override arguments
*/
public final void init(Map<String, String> arguments) {
if (artifacts != null) {
for (Artifact artifact : artifacts) {
artifact.setMetadata(this);
artifact.getArguments().putAll(arguments);
}
}
}
/** A {@code Metadata} class that matches all any search criteria. */
public static final class MatchAllMetadata extends Metadata {
/** Creates a {@code MatchAllMetadata} instance. */
public MatchAllMetadata() {
groupId = DefaultModelZoo.GROUP_ID;
artifacts = Collections.emptyList();
setApplication(Application.UNDEFINED);
}
/** {@inheritDoc} */
@Override
public List<Artifact> search(VersionRange versionRange, Map<String, String> filter) {
return getArtifacts();
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/RemoteRepository.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.util.JsonUtils;
import ai.djl.util.Utils;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.io.Writer;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* A {@code RemoteRepository} is a {@link Repository} located on a remote web server.
*
* @see Repository
*/
public class RemoteRepository extends AbstractRepository {
private static final long ONE_DAY = Duration.ofDays(1).toMillis();
private List<MRL> resources;
/**
* Constructs a remote repository.
*
* @param name the repository name
* @param uri the repository location
*/
public RemoteRepository(String name, URI uri) {
super(name, uri);
}
/** {@inheritDoc} */
@Override
public boolean isRemote() {
return true;
}
/** {@inheritDoc} */
@Override
public Metadata locate(MRL mrl) throws IOException {
URI mrlUri = mrl.toURI();
URI file = uri.resolve(mrlUri.getPath() + "/metadata.json");
Path cacheDir = getCacheDirectory().resolve(mrlUri.getPath());
if (!Files.exists(cacheDir)) {
Files.createDirectories(cacheDir);
}
Path cacheFile = cacheDir.resolve("metadata.json");
if (Files.exists(cacheFile)) {
try (Reader reader = Files.newBufferedReader(cacheFile)) {
Metadata metadata = JsonUtils.GSON_PRETTY.fromJson(reader, Metadata.class);
metadata.init(arguments);
Date lastUpdated = metadata.getLastUpdated();
if (Utils.isOfflineMode()
|| System.currentTimeMillis() - lastUpdated.getTime() < ONE_DAY) {
metadata.setRepositoryUri(mrlUri);
return metadata;
}
}
}
Path tmp = Files.createTempFile(cacheDir, "metadata", ".tmp");
try (InputStream is = new BufferedInputStream(file.toURL().openStream())) {
String json = Utils.toString(is);
Metadata metadata = JsonUtils.GSON_PRETTY.fromJson(json, Metadata.class);
metadata.setLastUpdated(new Date());
try (Writer writer = Files.newBufferedWriter(tmp)) {
writer.write(JsonUtils.GSON_PRETTY.toJson(metadata));
}
Utils.moveQuietly(tmp, cacheFile);
metadata.init(arguments);
metadata.setRepositoryUri(mrlUri);
return metadata;
} finally {
Utils.deleteQuietly(tmp);
}
}
/** {@inheritDoc} */
@Override
public Artifact resolve(MRL mrl, Map<String, String> filter) throws IOException {
Metadata metadata = locate(mrl);
VersionRange range = VersionRange.parse(mrl.getVersion());
List<Artifact> artifacts = metadata.search(range, filter);
if (artifacts.isEmpty()) {
return null;
}
return artifacts.stream().max(Comparator.comparing(o -> new Version(o.getVersion()))).get();
}
/** {@inheritDoc} */
@Override
public List<MRL> getResources() {
return resources == null ? Collections.emptyList() : resources;
}
/** {@inheritDoc} */
@Override
public void addResource(MRL mrl) {
if (resources == null) {
resources = new ArrayList<>();
}
resources.add(mrl);
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/Repository.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.Application;
import ai.djl.util.Progress;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
/**
* {@code Repository} is a format for storing data {@link Artifact}s for various uses including deep
* learning models and datasets.
*
* <p>This repository format is based off of the design of the Maven Repository format (See <a
* href="https://maven.apache.org/guides/introduction/introduction-to-repositories.html">maven</a>).
* Unlike in Maven, the data doesn't need to be located within the repository. Instead, the
* repository only stores metadata including the URL and checksum of the actual data. When the
* artifact is prepared, the data is downloaded, checked, and then stored in the {@code
* ~/.djl.ai/cache} folder.
*
* <p>The artifacts are first divided into a number of {@link Metadata} files that can each have
* multiple artifacts. The metadata files are identified by an {@link MRL} which contains:
*
* <ul>
* <li>type - The resource type, e.g. model or dataset.
* <li>Application - The resource application (See {@link Application}).
* <li>Group Id - The group id identifies the group publishing the artifacts using a reverse
* domain name system.
* <li>Artifact Id - The artifact id identifies the different artifacts published by a single
* group.
* </ul>
*
* <p>Within each metadata are a number of artifacts that share the same groupId, artifactId, name,
* description, website, and update date. The artifacts within the metadata differ primarily based
* on name and properties. Note that there is a metadata name and a separate artifact name. The
* properties are a map with string property names and string property values that can be used to
* represent key differentiators between artifacts such as dataset, flavors, and image sizes. For
* example, you might have a ResNet metadata file with different artifacts to represent different
* hyperparameters and datasets used for training the ResNet.
*
* <p>Each artifact contains a {@link Version} number (which can be a snapshot version). The data in
* the artifacts are represented by files in the format of an {@link Artifact.Item} and a parsed
* JSON object of arguments. The files can either by a single file, an automatically extracted gzip
* file, or an automatically extracted zip file that will be treated as a directory. These can be
* used to store data such as the dataset, model parameters, and synset files. The arguments can be
* used to store data about the model used for initialization. For example, it can store the image
* size which can be used by the model loader for both initializing the block and setting up
* resizing in the translator.
*
* <p>There are three kinds of repositories: a {@link LocalRepository}, {@link RemoteRepository},
* and {@link SimpleRepository}. For all three kinds, new repositories should be created by calling
* {@link Repository#newInstance(String, String)} with the location of the repository.
*/
public interface Repository {
/**
* Creates a new instance of a repository with a name and url.
*
* @param name the repository name
* @param path the repository location
* @return the new repository
*/
static Repository newInstance(String name, Path path) {
return RepositoryFactoryImpl.getFactory().newInstance(name, path.toUri());
}
/**
* Creates a new instance of a repository with a name and url.
*
* @param name the repository name
* @param url the repository location
* @return the new repository
*/
static Repository newInstance(String name, String url) {
return RepositoryFactoryImpl.getFactory().newInstance(name, URI.create(url));
}
/**
* Registers a {@link RepositoryFactory} to handle the specified url scheme.
*
* @param factory the {@link RepositoryFactory} to be registered
*/
static void registerRepositoryFactory(RepositoryFactory factory) {
RepositoryFactoryImpl.registerRepositoryFactory(factory);
}
/**
* Creates a model {@code MRL} with specified application.
*
* @param application the desired application
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @return a model {@code MRL}
*/
default MRL model(Application application, String groupId, String artifactId) {
return model(application, groupId, artifactId, null, null);
}
/**
* Creates a model {@code MRL} with specified application.
*
* @param application the desired application
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @param version the resource version
* @return a model {@code MRL}
*/
default MRL model(Application application, String groupId, String artifactId, String version) {
return MRL.model(this, application, groupId, artifactId, version, null);
}
/**
* Creates a model {@code MRL} with specified application.
*
* @param application the desired application
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @param version the resource version
* @param artifactName the desired artifact name
* @return a model {@code MRL}
*/
default MRL model(
Application application,
String groupId,
String artifactId,
String version,
String artifactName) {
return MRL.model(this, application, groupId, artifactId, version, artifactName);
}
/**
* Creates a dataset {@code MRL} with specified application.
*
* @param application the desired application
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @return a dataset {@code MRL}
*/
default MRL dataset(Application application, String groupId, String artifactId) {
return dataset(application, groupId, artifactId, null);
}
/**
* Creates a dataset {@code MRL} with specified application.
*
* @param application the desired application
* @param groupId the desired groupId
* @param artifactId the desired artifactId
* @param version the resource version
* @return a dataset {@code MRL}
*/
default MRL dataset(
Application application, String groupId, String artifactId, String version) {
return MRL.dataset(this, application, groupId, artifactId, version);
}
/**
* Returns whether the repository is remote repository.
*
* @return whether the repository is remote repository
*/
boolean isRemote();
/**
* Returns the repository name.
*
* @return the repository name
*/
String getName();
/**
* Returns the URI to the base of the repository.
*
* @return the URI
*/
URI getBaseUri();
/**
* Returns the metadata at a mrl.
*
* @param mrl the mrl of the metadata to retrieve
* @return the metadata
* @throws IOException if it failed to load the metadata
*/
Metadata locate(MRL mrl) throws IOException;
/**
* Returns the artifact matching a mrl, version, and property filter.
*
* @param mrl the mrl to match the artifact against
* @param filter the property filter
* @return the matched artifact
* @throws IOException if it failed to load the artifact
*/
Artifact resolve(MRL mrl, Map<String, String> filter) throws IOException;
/**
* Returns an {@link InputStream} for an item in a repository.
*
* @param item the item to open
* @param path the path to a file if the item is a zipped directory. Otherwise, pass null
* @return the file stream
* @throws IOException if it failed to open the stream
*/
InputStream openStream(Artifact.Item item, String path) throws IOException;
/**
* Returns the path to a file for the item.
*
* @param item the item to find the path for
* @param path the path to a file if the item is a zipped directory. Otherwise, pass null
* @return the file path
* @throws IOException if it failed to find the path
*/
Path getFile(Artifact.Item item, String path) throws IOException;
/**
* Returns the list of files directly within a specified directory in a zipped directory item.
*
* @param item the zipped directory item
* @param path the path within the zip directory
* @return the list of files/directories
* @throws IOException if it failed to list the directory
*/
String[] listDirectory(Artifact.Item item, String path) throws IOException;
/**
* Prepares the artifact for use.
*
* @param artifact the artifact to prepare
* @throws IOException if it failed to prepare
*/
default void prepare(Artifact artifact) throws IOException {
prepare(artifact, null);
}
/**
* Prepares the artifact for use with progress tracking.
*
* @param artifact the artifact to prepare
* @param progress the progress tracker
* @throws IOException if it failed to prepare
*/
void prepare(Artifact artifact, Progress progress) throws IOException;
/**
* Returns the cache directory for the repository.
*
* @return the cache directory path
* @throws IOException if it failed to ensure the creation of the cache directory
*/
Path getCacheDirectory() throws IOException;
/**
* Returns the resource directory for the an artifact.
*
* @param artifact the artifact whose resource directory to return
* @return the resource directory path
* @throws IOException if it failed to ensure the creation of the cache directory
*/
default Path getResourceDirectory(Artifact artifact) throws IOException {
return getCacheDirectory().resolve(artifact.getResourceUri().getPath());
}
/**
* Returns a list of {@link MRL}s in the repository.
*
* <p>An empty list will be returned if underlying {@code Repository} implementation does not
* support this feature.
*
* @return a list of {@link MRL}s in the repository
*/
List<MRL> getResources();
/**
* Adds resource to the repository.
*
* @param mrl the resource to add
*/
void addResource(MRL mrl);
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/RepositoryFactory.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import java.net.URI;
import java.util.Set;
/** A interface responsible to create {@link ai.djl.repository.Repository} instances. */
public interface RepositoryFactory {
/**
* Creates a new instance of a repository with a name and url.
*
* @param name the repository name
* @param uri the repository location
* @return the new repository
*/
Repository newInstance(String name, URI uri);
/**
* Returns a set of URI scheme that the {@code RepositoryFactory} supports.
*
* @return a set of URI scheme that the {@code RepositoryFactory} supports
*/
Set<String> getSupportedScheme();
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/RepositoryFactoryImpl.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import ai.djl.repository.zoo.ModelLoader;
import ai.djl.repository.zoo.ModelZoo;
import ai.djl.util.ClassLoaderUtils;
import ai.djl.util.JsonUtils;
import ai.djl.util.Utils;
import com.google.gson.JsonParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.Locale;
import java.util.Map;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
class RepositoryFactoryImpl implements RepositoryFactory {
private static final Logger logger = LoggerFactory.getLogger(RepositoryFactoryImpl.class);
private static final RepositoryFactory FACTORY = new RepositoryFactoryImpl();
private static final Map<String, RepositoryFactory> REGISTRY = init();
private static final Pattern PATTERN = Pattern.compile("(.+)/([\\d.]+)(/(.*))?");
static RepositoryFactory getFactory() {
return FACTORY;
}
/** {@inheritDoc} */
@Override
public Repository newInstance(String name, URI uri) {
String scheme = uri.getScheme();
if (scheme == null) {
scheme = "file";
}
RepositoryFactory factory = REGISTRY.get(scheme);
if (factory != null) {
return factory.newInstance(name, uri);
}
try {
uri.toURL();
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Malformed URL: " + uri, e);
}
if ("tfhub.dev".equals(uri.getHost().toLowerCase(Locale.ROOT))) {
// Handle tfhub case
String path = uri.getPath();
String query = "tf-hub-format=compressed";
try {
uri = new URI(uri.getScheme(), null, "tfhub.dev", uri.getPort(), path, query, null);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Failed to append query string: " + uri, e);
}
String[] tokens = path.split("/");
String modelName = tokens[tokens.length - 2];
return new SimpleUrlRepository(name, uri, modelName + ".tar.gz");
}
String[] paths = parseFilePath(uri).split("/");
String fileName = paths[paths.length - 1];
if (FilenameUtils.isArchiveFile(fileName)) {
return new SimpleUrlRepository(name, uri, fileName);
}
return new RpcRepository(name, uri);
}
/** {@inheritDoc} */
@Override
public Set<String> getSupportedScheme() {
return REGISTRY.keySet();
}
static void registerRepositoryFactory(RepositoryFactory factory) {
for (String scheme : factory.getSupportedScheme()) {
REGISTRY.put(scheme, factory);
}
}
private static Map<String, RepositoryFactory> init() {
Map<String, RepositoryFactory> registry = new ConcurrentHashMap<>();
registry.put("file", new LocalRepositoryFactory());
registry.put("jar", new JarRepositoryFactory());
registry.put("djl", new DjlRepositoryFactory());
if (S3RepositoryFactory.findS3Fuse() != null) {
registry.put("s3", new S3RepositoryFactory());
}
if (GcsRepositoryFactory.findGcsFuse() != null) {
registry.put("gs", new GcsRepositoryFactory());
}
ServiceLoader<RepositoryFactory> factories = ServiceLoader.load(RepositoryFactory.class);
for (RepositoryFactory factory : factories) {
for (String scheme : factory.getSupportedScheme()) {
registry.put(scheme, factory);
}
}
return registry;
}
static String parseFilePath(URI uri) {
String uriPath = uri.getPath();
if (uriPath == null) {
uriPath = uri.getSchemeSpecificPart();
}
if (uriPath.startsWith("file:")) {
// handle jar:file:/ url
uriPath = uriPath.substring(5);
}
if (uriPath.startsWith("/") && System.getProperty("os.name").startsWith("Win")) {
uriPath = uriPath.substring(1);
}
return uriPath;
}
private static String exec(String... cmd) throws IOException, InterruptedException {
Process exec = new ProcessBuilder(cmd).redirectErrorStream(true).start();
String logOutput;
try (InputStream is = exec.getInputStream()) {
logOutput = Utils.toString(is);
}
int exitCode = exec.waitFor();
if (0 != exitCode) {
logger.error("exit: {}, {}", exitCode, logOutput);
throw new IOException("Failed to execute: [" + String.join(" ", cmd) + "]");
} else {
logger.debug("{}", logOutput);
}
return logOutput;
}
private static boolean isMounted(String path) throws IOException, InterruptedException {
String out = exec("df");
String[] lines = out.split("\\s");
for (String line : lines) {
if (line.trim().equals(path)) {
logger.debug("Mount point already mounted");
return true;
}
}
return false;
}
private static final class JarRepositoryFactory implements RepositoryFactory {
/** {@inheritDoc} */
@Override
public Repository newInstance(String name, URI uri) {
String p = uri.getPath();
if (p.startsWith("/")) {
p = p.substring(1);
}
URL u = ClassLoaderUtils.getContextClassLoader().getResource(p);
if (u == null) {
throw new IllegalArgumentException("Resource not found: " + uri);
}
URI realUri;
try {
// resolve real uri: jar:file:/path/my_lib.jar!/model.zip
realUri = u.toURI();
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Resource not found: " + uri, e);
}
Path path = Paths.get(parseFilePath(realUri));
String fileName = path.toFile().getName();
if (FilenameUtils.isArchiveFile(fileName)) {
fileName = FilenameUtils.getNamePart(fileName);
}
return new JarRepository(name, uri, fileName, realUri);
}
/** {@inheritDoc} */
@Override
public Set<String> getSupportedScheme() {
return Collections.singleton("jar");
}
}
private static final class LocalRepositoryFactory implements RepositoryFactory {
/** {@inheritDoc} */
@Override
public Repository newInstance(String name, URI uri) {
Path path = Paths.get(parseFilePath(uri));
if (Files.exists(path) && Files.isDirectory(path)) {
try {
if (Files.walk(path).anyMatch(f -> isLocalRepository(path, f))) {
logger.debug("Found local repository: {}", path);
return new LocalRepository(name, path.toUri(), path);
}
} catch (IOException e) {
logger.warn("Failed locate metadata.json file, defaulting to simple", e);
}
}
return new SimpleRepository(name, uri, path);
}
private boolean isLocalRepository(Path root, Path file) {
if (!Files.isRegularFile(file) || root.equals(file.getParent())) {
return false;
}
if (!"metadata.json".equals(file.toFile().getName())) {
return false;
}
try (Reader reader = Files.newBufferedReader(file)) {
Metadata metadata = JsonUtils.GSON.fromJson(reader, Metadata.class);
return metadata.getMetadataVersion() != null && metadata.getArtifacts() != null;
} catch (IOException | JsonParseException e) {
logger.warn("Invalid metadata.json file", e);
}
return false;
}
/** {@inheritDoc} */
@Override
public Set<String> getSupportedScheme() {
return Collections.singleton("file");
}
}
private static final class DjlRepositoryFactory implements RepositoryFactory {
/** {@inheritDoc} */
@Override
public Repository newInstance(String name, URI uri) {
String queryString = uri.getQuery();
URI djlUri;
if (queryString != null) {
djlUri = URI.create("https://mlrepo.djl.ai/?" + queryString);
} else {
djlUri = URI.create("https://mlrepo.djl.ai/");
}
RemoteRepository repo = new RemoteRepository(name, djlUri);
String groupId = uri.getHost();
if (groupId == null) {
throw new IllegalArgumentException("Invalid djl URL: " + uri);
}
String artifactId = parseFilePath(uri);
if (artifactId.startsWith("/")) {
artifactId = artifactId.substring(1);
}
if (artifactId.isEmpty()) {
throw new IllegalArgumentException("Invalid djl URL: " + uri);
}
String version = null;
String artifactName = null;
Matcher m = PATTERN.matcher(artifactId);
if (m.matches()) {
artifactId = m.group(1);
version = m.group(2);
artifactName = m.group(4);
}
ModelZoo zoo = ModelZoo.getModelZoo(groupId);
if (zoo == null) {
throw new IllegalArgumentException("ModelZoo not found in classpath: " + groupId);
}
ModelLoader loader = zoo.getModelLoader(artifactId);
if (loader == null) {
throw new IllegalArgumentException("Invalid djl URL: " + uri);
}
MRL mrl =
repo.model(loader.getApplication(), groupId, artifactId, version, artifactName);
repo.addResource(mrl);
return repo;
}
/** {@inheritDoc} */
@Override
public Set<String> getSupportedScheme() {
return Collections.singleton("djl");
}
}
static final class S3RepositoryFactory implements RepositoryFactory {
/** {@inheritDoc} */
@Override
public Repository newInstance(String name, URI uri) {
try {
Path path = mount(uri);
return new SimpleRepository(name, uri, path);
} catch (IOException | InterruptedException e) {
throw new IllegalArgumentException("Failed to mount s3 bucket", e);
}
}
/** {@inheritDoc} */
@Override
public Set<String> getSupportedScheme() {
return Collections.singleton("s3");
}
static String findS3Fuse() {
if (System.getProperty("os.name").startsWith("Win")) {
logger.debug("mount-s3 is not supported on Windows");
return null;
}
String gcsFuse = Utils.getEnvOrSystemProperty("MOUNT_S3", "/usr/bin/mount-s3");
if (Files.isRegularFile(Paths.get(gcsFuse))) {
return gcsFuse;
}
String path = System.getenv("PATH");
String[] directories = path.split(File.pathSeparator);
for (String dir : directories) {
Path file = Paths.get(dir, "mount-s3");
if (Files.isRegularFile(file)) {
return file.toAbsolutePath().toString();
}
}
return null;
}
private static Path mount(URI uri) throws IOException, InterruptedException {
String bucket = uri.getHost();
String prefix = uri.getPath();
if (!prefix.isEmpty()) {
prefix = prefix.substring(1);
}
Path dir = Utils.getCacheDir().toAbsolutePath().normalize();
dir = dir.resolve("s3").resolve(Utils.hash(uri.toString()));
String path = dir.toString();
if (Files.isDirectory(dir)) {
if (isMounted(path)) {
return dir.resolve(prefix);
}
} else {
Files.createDirectories(dir);
}
exec(findS3Fuse(), bucket, path);
return dir.resolve(prefix);
}
}
static final class GcsRepositoryFactory implements RepositoryFactory {
/** {@inheritDoc} */
@Override
public Repository newInstance(String name, URI uri) {
try {
Path path = mount(uri);
return new SimpleRepository(name, uri, path);
} catch (IOException | InterruptedException e) {
throw new IllegalArgumentException("Failed to mount gs bucket", e);
}
}
/** {@inheritDoc} */
@Override
public Set<String> getSupportedScheme() {
return Collections.singleton("gs");
}
static String findGcsFuse() {
if (System.getProperty("os.name").startsWith("Win")) {
logger.debug("gcsfuse is not supported on Windows");
return null;
}
String gcsFuse = Utils.getEnvOrSystemProperty("GCSFUSE", "/usr/bin/gcsfuse");
if (Files.isRegularFile(Paths.get(gcsFuse))) {
return gcsFuse;
}
String path = System.getenv("PATH");
String[] directories = path.split(File.pathSeparator);
for (String dir : directories) {
Path file = Paths.get(dir, "gcsfuse");
if (Files.isRegularFile(file)) {
return file.toAbsolutePath().toString();
}
}
return null;
}
private static Path mount(URI uri) throws IOException, InterruptedException {
String bucket = uri.getHost();
String prefix = uri.getPath();
if (!prefix.isEmpty()) {
prefix = prefix.substring(1);
}
Path dir = Utils.getCacheDir().toAbsolutePath().normalize();
dir = dir.resolve("gs").resolve(Utils.hash(uri.toString()));
String path = dir.toString();
if (Files.isDirectory(dir)) {
if (isMounted(path)) {
return dir.resolve(prefix);
}
} else {
Files.createDirectories(dir);
}
exec(findGcsFuse(), "--implicit-dirs", bucket, path);
return dir.resolve(prefix);
}
}
}
|
0
|
java-sources/ai/djl/api/0.34.0/ai/djl
|
java-sources/ai/djl/api/0.34.0/ai/djl/repository/Restriction.java
|
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.repository;
import java.util.Objects;
/**
* A {@code Restriction} is a set of bounds on a {@link Version} that form a {@link VersionRange}.
*/
class Restriction {
public static final Restriction EVERYTHING = new Restriction(null, false, null, false);
private Version lowerBound;
private boolean lowerBoundInclusive;
private Version upperBound;
private boolean upperBoundInclusive;
/**
* Constructs a Restriction from the bounds.
*
* @param lowerBound the lower bound
* @param lowerBoundInclusive true if the lower bound is inclusive and not exclusive
* @param upperBound the upper bound
* @param upperBoundInclusive true if the upper bound is inclusive and not exclusive
*/
public Restriction(
Version lowerBound,
boolean lowerBoundInclusive,
Version upperBound,
boolean upperBoundInclusive) {
this.lowerBound = lowerBound;
this.lowerBoundInclusive = lowerBoundInclusive;
this.upperBound = upperBound;
this.upperBoundInclusive = upperBoundInclusive;
}
/**
* Returns the lower bound version (inclusive/exclusive depends on {@link
* Restriction#isLowerBoundInclusive()}).
*
* @return the lower bound
*/
public Version getLowerBound() {
return lowerBound;
}
/**
* Returns true if the lower bound is inclusive.
*
* @return true if the lower bound is inclusive
*/
public boolean isLowerBoundInclusive() {
return lowerBoundInclusive;
}
/**
* Returns the upper bound version (inclusive/exclusive depends on {@link
* Restriction#isUpperBoundInclusive()}).
*
* @return the upper bound
*/
public Version getUpperBound() {
return upperBound;
}
/**
* Returns true if the upper bound is inclusive.
*
* @return true if the upper bound is inclusive
*/
public boolean isUpperBoundInclusive() {
return upperBoundInclusive;
}
/**
* Returns true if the given version lies within the restriction bounds.
*
* @param version the version to check against
* @return true if the version fits the bounds
*/
public boolean containsVersion(Version version) {
if (lowerBound != null) {
int comparison = lowerBound.compareTo(version);
if ((comparison == 0) && !lowerBoundInclusive) {
return false;
}
if (comparison > 0) {
return false;
}
}
if (upperBound != null) {
int comparison = upperBound.compareTo(version);
if ((comparison == 0) && !upperBoundInclusive) {
return false;
}
return comparison >= 0;
}
return true;
}
/** {@inheritDoc} */
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Restriction that = (Restriction) o;
return lowerBoundInclusive == that.lowerBoundInclusive
&& upperBoundInclusive == that.upperBoundInclusive
&& Objects.equals(lowerBound, that.lowerBound)
&& Objects.equals(upperBound, that.upperBound);
}
/** {@inheritDoc} */
@Override
public int hashCode() {
return Objects.hash(lowerBound, lowerBoundInclusive, upperBound, upperBoundInclusive);
}
/** {@inheritDoc} */
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append(isLowerBoundInclusive() ? '[' : '(');
if (getLowerBound() != null) {
buf.append(getLowerBound().toString());
}
buf.append(',');
if (getUpperBound() != null) {
buf.append(getUpperBound().toString());
}
buf.append(isUpperBoundInclusive() ? ']' : ')');
return buf.toString();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.