index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/delete/DeleteResponse.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.delete;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@NoArgsConstructor
public class DeleteResponse {}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/fetch/FetchRequest.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.fetch;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import lombok.Builder;
import lombok.Data;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
public class FetchRequest {
@JsonProperty("ids")
private List<String> ids;
@JsonProperty("namespace")
private String namespace;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/fetch/FetchResponse.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.fetch;
import ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.Vector;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@AllArgsConstructor
@NoArgsConstructor
public class FetchResponse {
@JsonProperty("vectors")
private Map<String, Vector> vectors;
@JsonProperty("namespace")
private String namespace;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/query/Match.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.query;
import ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.SparseValues;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@NoArgsConstructor
@AllArgsConstructor
public class Match {
@JsonProperty("id")
private String id;
@JsonProperty("score")
private Double score;
@JsonProperty("values")
private List<Double> values;
@JsonProperty("sparseValues")
private SparseValues sparseValues;
@JsonProperty("metadata")
private Map<String, String> metadata;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/query/QueryRequest.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.query;
import ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.SparseValues;
import java.util.List;
import java.util.Map;
import lombok.Builder;
import lombok.Data;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
public class QueryRequest {
private String namespace;
private long topK;
// The filter to apply. You can use vector metadata to limit your search. See
// https://www.pinecone.io/docs/metadata-filtering/.
private Map<String, String> filter;
private boolean includeValues;
private boolean includeMetadata;
private List<Double> vector;
private SparseValues sparseVector;
private String id;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/query/QueryResponse.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.query;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@NoArgsConstructor
@AllArgsConstructor
public class QueryResponse {
@JsonProperty("matches")
private List<Match> matches;
@JsonProperty("namespace")
private String namespace;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/update/UpdateRequest.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.update;
import ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.SparseValues;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Map;
import lombok.Builder;
import lombok.Data;
import lombok.NonNull;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
public class UpdateRequest {
@JsonProperty("id")
@NonNull
private String id;
@JsonProperty("values")
private List<Double> values;
@JsonProperty("sparseValues")
private SparseValues sparseValues;
@JsonProperty("setMetadata")
private Map<String, String> setMetadata;
@JsonProperty("namespace")
private String namespace;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/update/UpdateResponse.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.update;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@NoArgsConstructor
public class UpdateResponse {}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/upsert/UpsertRequest.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.upsert;
import ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.Vector;
import java.util.List;
import lombok.Builder;
import lombok.Data;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
public class UpsertRequest {
private List<Vector> vectors;
private String namespace;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/pinecone/schema/dto/upsert/UpsertResponse.java
|
package ai.knowly.langtorch.store.vectordb.integration.pinecone.schema.dto.upsert;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@NoArgsConstructor
@AllArgsConstructor
public class UpsertResponse {
@JsonProperty("upsertedCount")
long upsertedCount;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/store/vectordb/integration/schema/SimilaritySearchQuery.java
|
package ai.knowly.langtorch.store.vectordb.integration.schema;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NonNull;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public class SimilaritySearchQuery {
@Builder.Default Map<String, String> filter = new HashMap<>();
@NonNull private List<Double> query;
@NonNull private Long topK;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/tool/Function.java
|
package ai.knowly.langtorch.tool;
/** The common interface for all langtorch functions. */
public interface Function {
Object execute(Object... args);
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/tool/Tool.java
|
package ai.knowly.langtorch.tool;
import com.google.auto.value.AutoValue;
import java.util.HashMap;
import java.util.Map;
/** A class representing a tool with registered functions. */
@AutoValue
public abstract class Tool {
public static Builder builder() {
return new AutoValue_Tool.Builder().setFunctionRegistry(new HashMap<>());
}
public abstract String name();
public abstract String description();
public abstract Map<String, Function> functionRegistry();
/**
* Invoke a registered function with the given label and arguments.
*
* @param label the label of the function
* @param args the arguments to pass to the function
* @return the result of the function execution
*/
public Object invoke(String label, Object... args) {
if (functionRegistry().isEmpty()) {
throw new IllegalArgumentException("Function registry not found");
}
if (functionRegistry().containsKey(label)) {
return functionRegistry().get(label).execute(args);
}
throw new IllegalArgumentException("Function not found");
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder setName(String name);
public abstract Builder setDescription(String description);
public abstract Builder setFunctionRegistry(Map<String, Function> functionRegistry);
abstract Map<String, Function> functionRegistry();
/**
* Register a new function with the given label.
*
* @param label the label of the function
* @param function the function to register
* @return the builder with the registered function
*/
public Builder register(String label, Function function) {
functionRegistry().put(label, function);
return this;
}
public abstract Tool build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/ApiEndPointUtils.java
|
package ai.knowly.langtorch.utils;
import com.google.common.flogger.FluentLogger;
import io.github.cdimascio.dotenv.Dotenv;
import java.util.Optional;
public class ApiEndPointUtils {
private ApiEndPointUtils() {}
public static void logEndPoint(FluentLogger logger, String provider, String endpoint) {
logger.atInfo().log("Using %s endpoint: ***************" + endpoint);
}
public static String getPineconeEndPointFromEnv(Optional<FluentLogger> logger) {
String endpointFromEnv =
getVectorStoreEndpointFromEnv(VectorStoreApiEndpoint.PINECONE_ENDPOINT);
logger.ifPresent(
l -> logEndPoint(l, VectorStoreApiEndpoint.PINECONE_ENDPOINT.name(), endpointFromEnv));
return endpointFromEnv;
}
private static String getVectorStoreEndpointFromEnv(
VectorStoreApiEndpoint vectorStoreApiEndpoint) {
Dotenv dotenv = Dotenv.configure().ignoreIfMissing().load();
return dotenv.get(vectorStoreApiEndpoint.name());
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/ApiEndpoint.java
|
package ai.knowly.langtorch.utils;
public enum ApiEndpoint {
PINECONE_ENDPOINT
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/Constants.java
|
package ai.knowly.langtorch.utils;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
@NoArgsConstructor(access = AccessLevel.PRIVATE)
public class Constants {
public static final String TEST_RESOURCE_FOLDER = "src/test/resources";
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/Environment.java
|
package ai.knowly.langtorch.utils;
/** Enum for environment */
public enum Environment {
PRODUCTION,
TEST,
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/VectorStoreApiEndpoint.java
|
package ai.knowly.langtorch.utils;
public enum VectorStoreApiEndpoint {
PINECONE_ENDPOINT
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/endpoint/EndpointUtil.java
|
package ai.knowly.langtorch.utils.api.endpoint;
import static ai.knowly.langtorch.utils.Constants.TEST_RESOURCE_FOLDER;
import ai.knowly.langtorch.utils.Environment;
import com.google.common.flogger.FluentLogger;
import io.github.cdimascio.dotenv.Dotenv;
import lombok.NoArgsConstructor;
/** Utility class for getting endpoints from .env file */
@NoArgsConstructor(access = lombok.AccessLevel.PRIVATE)
public class EndpointUtil {
public static String getEndPoint(VectorStoreApiEndpoint apiEndpoint, Environment environment) {
Dotenv dotenv;
if (environment == Environment.PRODUCTION) {
dotenv = Dotenv.configure().ignoreIfMissing().load();
} else {
dotenv = Dotenv.configure().directory(TEST_RESOURCE_FOLDER).ignoreIfMissing().load();
}
return dotenv.get(apiEndpoint.name());
}
public static void logEndPoint(FluentLogger logger, String provider, String endpoint) {
logger.atInfo().log("Using %s endpoint: %s", provider, endpoint);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/endpoint/PineconeEnvUtil.java
|
package ai.knowly.langtorch.utils.api.endpoint;
import static ai.knowly.langtorch.utils.api.endpoint.EndpointUtil.logEndPoint;
import ai.knowly.langtorch.utils.Environment;
import com.google.common.flogger.FluentLogger;
import java.util.Optional;
/** Get Pinecone endpoint from .env file */
public class PineconeEnvUtil {
private PineconeEnvUtil() {}
public static String getEndPoint(Environment environment) {
return getPineconeEndPointFromEnv(Optional.empty(), environment);
}
public static String getEndPoint(FluentLogger logger, Environment environment) {
return getPineconeEndPointFromEnv(Optional.ofNullable(logger), environment);
}
private static String getPineconeEndPointFromEnv(
Optional<FluentLogger> logger, Environment environment) {
String endpointFromEnv =
EndpointUtil.getEndPoint(VectorStoreApiEndpoint.PINECONE_ENDPOINT, environment);
logger.ifPresent(
l -> logEndPoint(l, VectorStoreApiEndpoint.PINECONE_ENDPOINT.name(), endpointFromEnv));
return endpointFromEnv;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/endpoint/VectorStoreApiEndpoint.java
|
package ai.knowly.langtorch.utils.api.endpoint;
public enum VectorStoreApiEndpoint {
PINECONE_ENDPOINT
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/key/ApiKeyEnvUtils.java
|
package ai.knowly.langtorch.utils.api.key;
import static ai.knowly.langtorch.utils.Constants.TEST_RESOURCE_FOLDER;
import ai.knowly.langtorch.utils.Environment;
import com.google.common.flogger.FluentLogger;
import io.github.cdimascio.dotenv.Dotenv;
public class ApiKeyEnvUtils {
private ApiKeyEnvUtils() {}
static void logPartialApiKey(FluentLogger logger, String provider, String apiKey) {
logger.atInfo().log(
"Using %s API key: ***************" + apiKey.substring(apiKey.length() - 6), provider);
}
static String getKeyFromEnv(KeyType keyType, Environment environment) {
Dotenv dotenv;
if (environment == Environment.PRODUCTION) {
dotenv = Dotenv.configure().ignoreIfMissing().load();
} else {
dotenv = Dotenv.configure().directory(TEST_RESOURCE_FOLDER).ignoreIfMissing().load();
}
String key = dotenv.get(keyType.name());
if (key == null) {
throw new KeyNotFoundException(
String.format(
"Could not find %s in .env file. Please add it to the .env file.", keyType.name()));
}
return key;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/key/CohereKeyUtil.java
|
package ai.knowly.langtorch.utils.api.key;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.getKeyFromEnv;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.logPartialApiKey;
import ai.knowly.langtorch.utils.Environment;
import com.google.common.flogger.FluentLogger;
import java.util.Optional;
/** Get Cohere key from .env file */
public class CohereKeyUtil {
private CohereKeyUtil() {}
public static String getKey(Environment environment) {
return getKey(Optional.empty(), environment);
}
public static String getKey(FluentLogger logger, Environment environment) {
return getKey(Optional.ofNullable(logger), environment);
}
private static String getKey(Optional<FluentLogger> logger, Environment environment) {
String endpointFromEnv = getKeyFromEnv(KeyType.COHERE_API_KEY, environment);
logger.ifPresent(l -> logPartialApiKey(l, KeyType.COHERE_API_KEY.name(), endpointFromEnv));
return endpointFromEnv;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/key/HuggingFaceKeyUtil.java
|
package ai.knowly.langtorch.utils.api.key;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.getKeyFromEnv;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.logPartialApiKey;
import ai.knowly.langtorch.utils.Environment;
import com.google.common.flogger.FluentLogger;
import java.util.Optional;
/** Get OpenAI key from .env file */
public class HuggingFaceKeyUtil {
private HuggingFaceKeyUtil() {}
public static String getKey(Environment environment) {
return getKey(Optional.empty(), environment);
}
public static String getKey(FluentLogger logger, Environment environment) {
return getKey(Optional.ofNullable(logger), environment);
}
private static String getKey(Optional<FluentLogger> logger, Environment environment) {
String keyFromEnv = getKeyFromEnv(KeyType.HUGGINGFACE_API_KEY, environment);
logger.ifPresent(l -> logPartialApiKey(l, KeyType.HUGGINGFACE_API_KEY.name(), keyFromEnv));
return keyFromEnv;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/key/KeyNotFoundException.java
|
package ai.knowly.langtorch.utils.api.key;
/** Thrown when a key is not found in the .env file. */
public class KeyNotFoundException extends RuntimeException {
public KeyNotFoundException(String message) {
super(message);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/key/KeyType.java
|
package ai.knowly.langtorch.utils.api.key;
public enum KeyType {
OPENAI_API_KEY,
MINMAX_GROUP_ID,
MINIMAX_API_KEY,
PINECONE_API_KEY,
COHERE_API_KEY,
HUGGINGFACE_API_KEY,
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/key/MiniMaxKeyUtil.java
|
package ai.knowly.langtorch.utils.api.key;
import ai.knowly.langtorch.utils.Environment;
import com.google.common.flogger.FluentLogger;
import java.util.Optional;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.getKeyFromEnv;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.logPartialApiKey;
/**
* @author maxiao
* @date 2023/06/07
*/
/** Get MiniMax key from .env file */
public class MiniMaxKeyUtil {
private MiniMaxKeyUtil() {}
public static String getGroupId(Environment environment) {
return getGroupId(Optional.empty(), environment);
}
public static String getGroupId(FluentLogger logger, Environment environment) {
return getGroupId(Optional.ofNullable(logger), environment);
}
private static String getGroupId(Optional<FluentLogger> logger, Environment environment) {
String groupIdFromEnv = getKeyFromEnv(KeyType.MINMAX_GROUP_ID, environment);
logger.ifPresent(l -> logPartialApiKey(l, KeyType.MINMAX_GROUP_ID.name(), groupIdFromEnv));
return groupIdFromEnv;
}
public static String getKey(Environment environment) {
return getKey(Optional.empty(), environment);
}
public static String getKey(FluentLogger logger, Environment environment) {
return getKey(Optional.ofNullable(logger), environment);
}
private static String getKey(Optional<FluentLogger> logger, Environment environment) {
String keyFromEnv = getKeyFromEnv(KeyType.MINIMAX_API_KEY, environment);
logger.ifPresent(l -> logPartialApiKey(l, KeyType.MINIMAX_API_KEY.name(), keyFromEnv));
return keyFromEnv;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/key/OpenAIKeyUtil.java
|
package ai.knowly.langtorch.utils.api.key;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.getKeyFromEnv;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.logPartialApiKey;
import ai.knowly.langtorch.utils.Environment;
import com.google.common.flogger.FluentLogger;
import java.util.Optional;
/** Get OpenAI key from .env file */
public class OpenAIKeyUtil {
private OpenAIKeyUtil() {}
public static String getKey(Environment environment) {
return getKey(Optional.empty(), environment);
}
public static String getKey(FluentLogger logger, Environment environment) {
return getKey(Optional.ofNullable(logger), environment);
}
private static String getKey(Optional<FluentLogger> logger, Environment environment) {
String keyFromEnv = getKeyFromEnv(KeyType.OPENAI_API_KEY, environment);
logger.ifPresent(l -> logPartialApiKey(l, KeyType.OPENAI_API_KEY.name(), keyFromEnv));
return keyFromEnv;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/api/key/PineconeKeyUtil.java
|
package ai.knowly.langtorch.utils.api.key;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.getKeyFromEnv;
import static ai.knowly.langtorch.utils.api.key.ApiKeyEnvUtils.logPartialApiKey;
import ai.knowly.langtorch.utils.Environment;
import com.google.common.flogger.FluentLogger;
import java.util.Optional;
/** Get Pinecone key from .env file */
public class PineconeKeyUtil {
private PineconeKeyUtil() {}
public static String getKey(FluentLogger logger, Environment environment) {
return getKey(Optional.ofNullable(logger), environment);
}
public static String getKey(Environment environment) {
return getKey(Optional.empty(), environment);
}
private static String getKey(Optional<FluentLogger> logger, Environment environment) {
String keyFromEnv = getKeyFromEnv(KeyType.PINECONE_API_KEY, environment);
logger.ifPresent(l -> logPartialApiKey(l, KeyType.PINECONE_API_KEY.name(), keyFromEnv));
return keyFromEnv;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry/FutureRetrier.java
|
package ai.knowly.langtorch.utils.future.retry;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import ai.knowly.langtorch.utils.future.retry.strategy.BackoffStrategy;
import com.google.common.base.Predicate;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
import com.google.inject.Inject;
import java.util.concurrent.ScheduledExecutorService;
import java.util.function.Supplier;
/** A utility class for retrying a {@link ListenableFuture} until a success condition is met. */
public final class FutureRetrier {
private final ScheduledExecutorService executor;
private final BackoffStrategy backoffStrategy;
private final RetryConfig retryConfig;
@Inject
public FutureRetrier(
ScheduledExecutorService executor, BackoffStrategy backoffStrategy, RetryConfig retryConfig) {
this.executor = executor;
this.backoffStrategy = backoffStrategy;
this.retryConfig = retryConfig;
}
public <T> ListenableFuture<T> runWithRetries(
Supplier<ListenableFuture<T>> futureSupplier, Predicate<T> successCondition) {
return runWithRetries(
futureSupplier,
retryConfig.getMaxRetries(),
retryConfig.getRetryIntervalMillis(),
successCondition);
}
public <T> ListenableFuture<T> runWithRetries(
Supplier<ListenableFuture<T>> futureSupplier,
int retries,
long intervalMillis,
Predicate<T> successCondition) {
SettableFuture<T> resultFuture = SettableFuture.create();
runWithRetriesInternal(
resultFuture, futureSupplier, retries, intervalMillis, successCondition, 0);
return resultFuture;
}
private <T> void runWithRetriesInternal(
final SettableFuture<T> future,
final Supplier<ListenableFuture<T>> futureSupplier,
final int retries,
final long intervalMillis,
final Predicate<T> successCondition,
final int retryCount) {
ListenableFuture<T> immediateFuture;
try {
immediateFuture = futureSupplier.get();
} catch (Exception e) {
handleFailure(
future, futureSupplier, retries, intervalMillis, successCondition, e, retryCount);
return;
}
Futures.addCallback(
immediateFuture,
new FutureCallback<T>() {
@Override
public void onSuccess(T result) {
if (successCondition.apply(result)) {
future.set(result);
} else {
RuntimeException exception =
new RuntimeException("Success condition not met, retrying.");
handleFailure(
future,
futureSupplier,
retries,
intervalMillis,
successCondition,
exception,
retryCount + 1);
}
}
@Override
public void onFailure(Throwable t) {
handleFailure(
future,
futureSupplier,
retries,
intervalMillis,
successCondition,
t,
retryCount + 1);
}
},
MoreExecutors.directExecutor());
}
private <T> void handleFailure(
SettableFuture<T> future,
Supplier<ListenableFuture<T>> futureSupplier,
int retries,
long delayInMillis,
Predicate<T> successCondition,
Throwable t,
int retryCount) {
if (retries > 0) {
executor.schedule(
() ->
runWithRetriesInternal(
future,
futureSupplier,
retries - 1,
delayInMillis,
successCondition,
retryCount + 1),
backoffStrategy.getDelayMillis(retryCount, delayInMillis),
MILLISECONDS);
} else {
future.setException(t);
}
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry/RetryConfig.java
|
package ai.knowly.langtorch.utils.future.retry;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
@AllArgsConstructor(access = lombok.AccessLevel.PRIVATE)
public class RetryConfig {
@Builder.Default private int maxRetries = 2;
@Builder.Default private long retryIntervalMillis = 200;
public static RetryConfig getDefaultInstance() {
return RetryConfig.builder().build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry/strategy/BackoffStrategy.java
|
package ai.knowly.langtorch.utils.future.retry.strategy;
public interface BackoffStrategy {
long getDelayMillis(int retryCount, long intervalMillis);
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry/strategy/ExponentialBackoffStrategy.java
|
package ai.knowly.langtorch.utils.future.retry.strategy;
public class ExponentialBackoffStrategy implements BackoffStrategy {
@Override
public long getDelayMillis(int retryCount, long intervalMillis) {
return (long) (intervalMillis * Math.pow(2, retryCount));
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry/strategy/FibonacciBackoffStrategy.java
|
package ai.knowly.langtorch.utils.future.retry.strategy;
public class FibonacciBackoffStrategy implements BackoffStrategy {
@Override
public long getDelayMillis(int retryCount, long intervalMillis) {
return intervalMillis * fibonacci(retryCount);
}
private long fibonacci(int n) {
if (n <= 1) {
return n;
}
long fib = 1;
long prevFib = 1;
for (int i = 2; i < n; i++) {
long temp = fib;
fib += prevFib;
prevFib = temp;
}
return fib;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/future/retry/strategy/FixedBackoffStrategy.java
|
package ai.knowly.langtorch.utils.future.retry.strategy;
public class FixedBackoffStrategy implements BackoffStrategy {
@Override
public long getDelayMillis(int retryCount, long intervalMillis) {
return intervalMillis;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/graph/DAGViolationException.java
|
package ai.knowly.langtorch.utils.graph;
/** Exception thrown when a cycle is detected in the graph. */
public class DAGViolationException extends RuntimeException {
public DAGViolationException(String message) {
super(message);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/graph/TopologicalSorter.java
|
package ai.knowly.langtorch.utils.graph;
import java.util.*;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
/** Class to perform topological sort on a Directed Acyclic Graph (DAG). */
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public class TopologicalSorter {
public static List<String> topologicalSort(Map<String, List<String>> graph) {
// Create a map to store the in-degree of each node
Map<String, Integer> inDegree = initializeInDegree(graph);
// Create a queue and enqueue all nodes of in-degree 0
Queue<String> queue = getZeroInDegreeNodes(inDegree);
// Create a stack to store the result and process nodes in the queue
Deque<String> stack = processNodes(graph, inDegree, queue);
// Check if a topological sort is possible (i.e., the graph is a DAG)
if (!isTopologicalSortPossible(inDegree, stack)) {
throw new DAGViolationException(
"The graph has at least one cycle, so a topological sort is not possible.");
}
// Get the result from the stack
return getResultFromStack(stack);
}
// Initialize in-degrees
private static Map<String, Integer> initializeInDegree(Map<String, List<String>> graph) {
Map<String, Integer> inDegree = new HashMap<>();
graph
.entrySet()
.forEach(
entry -> {
inDegree.putIfAbsent(entry.getKey(), 0);
for (String neighbor : entry.getValue()) {
inDegree.put(neighbor, inDegree.getOrDefault(neighbor, 0) + 1);
}
});
return inDegree;
}
// Get nodes with in-degree 0
private static Queue<String> getZeroInDegreeNodes(Map<String, Integer> inDegree) {
Queue<String> queue = new LinkedList<>();
for (Map.Entry<String, Integer> entry : inDegree.entrySet()) {
if (entry.getValue() == 0) {
queue.add(entry.getKey());
}
}
return queue;
}
// Process nodes in the queue
private static Deque<String> processNodes(
Map<String, List<String>> graph, Map<String, Integer> inDegree, Queue<String> queue) {
Deque<String> stack = new ArrayDeque<>();
while (!queue.isEmpty()) {
String node = queue.poll();
stack.push(node);
if (graph.containsKey(node)) {
for (String neighbor : graph.get(node)) {
inDegree.put(neighbor, inDegree.get(neighbor) - 1);
if (inDegree.get(neighbor) == 0) {
queue.add(neighbor);
}
}
}
}
return stack;
}
// Check if a topological sort is possible
private static boolean isTopologicalSortPossible(
Map<String, Integer> inDegree, Deque<String> stack) {
return stack.size() == inDegree.size();
}
// Get the result from the stack
private static List<String> getResultFromStack(Deque<String> stack) {
List<String> result = new ArrayList<>();
while (!stack.isEmpty()) {
result.add(stack.pop());
}
return result;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/utils/reflection/ContextUtil.java
|
package ai.knowly.langtorch.utils.reflection;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import lombok.AllArgsConstructor;
@AllArgsConstructor(access = lombok.AccessLevel.PRIVATE)
public class ContextUtil {
public static void setAccessible(Field field) {
if (!Modifier.isPublic(field.getModifiers())) {
field.setAccessible(true);
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg/AsyncVideoFrameConsumer.java
|
package ai.kognition.pilecv4j.ffmpeg;
import static net.dempsy.util.Functional.ignore;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.kognition.pilecv4j.ffmpeg.Ffmpeg.VideoFrameConsumer;
import ai.kognition.pilecv4j.image.VideoFrame;
public class AsyncVideoFrameConsumer implements VideoFrameConsumer {
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncVideoFrameConsumer.class);
private final AtomicReference<VideoFrame> ondeck = new AtomicReference<>(null);
private final VideoFrameConsumer underlying;
private final Thread thread;
private final AtomicBoolean stop = new AtomicBoolean(false);
private static final AtomicLong threadCount = new AtomicLong(0);
private static final String THREAD_NAME = "avp_";
public AsyncVideoFrameConsumer(final VideoFrameConsumer underlying) {
this.underlying = underlying;
thread = start();
}
@Override
public void handle(final VideoFrame frame) {
try(VideoFrame next = frame.shallowCopy();
VideoFrame prev = ondeck.getAndSet(next.returnMe());) {}
}
@Override
public void close() {
stop.set(true);
thread.interrupt();
underlying.close();
}
private Thread start() {
final Thread ret = new Thread(() -> {
while(!stop.get()) {
boolean gotit = false;
try(VideoFrame f = ondeck.getAndSet(null);) {
if(f != null) {
gotit = true;
underlying.handle(f);
}
} catch(final RuntimeException rte) {
LOGGER.warn("Underlying video frame handler failed.", rte);
}
if(!gotit)
ignore(() -> Thread.sleep(1));
}
}, THREAD_NAME + threadCount.getAndIncrement());
ret.start();
return ret;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg/Ffmpeg.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.ffmpeg;
import static ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.LOG_LEVEL_DEBUG;
import static ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.LOG_LEVEL_ERROR;
import static ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.LOG_LEVEL_FATAL;
import static ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.LOG_LEVEL_INFO;
import static ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.LOG_LEVEL_TRACE;
import static ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.LOG_LEVEL_WARN;
import static net.dempsy.util.Functional.chain;
import static net.dempsy.util.Functional.ignore;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.IntBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.sun.jna.Pointer;
import com.sun.jna.ptr.IntByReference;
import com.sun.jna.ptr.LongByReference;
import org.apache.commons.lang3.mutable.MutableObject;
import org.opencv.core.Mat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.Functional;
import net.dempsy.util.MutableRef;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.ffmpeg.Ffmpeg.EncodingContext;
import ai.kognition.pilecv4j.ffmpeg.Ffmpeg.EncodingContext.VideoEncoder;
import ai.kognition.pilecv4j.ffmpeg.Ffmpeg.MediaContext.StreamDetails;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.fill_buffer_callback;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.packet_filter_callback;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.push_frame_callback;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.seek_buffer_callback;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.select_streams_callback;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.VideoFrame;
public class Ffmpeg {
private static final Logger LOGGER = LoggerFactory.getLogger(Ffmpeg.class);
static {
FfmpegApi._init();
}
public static final long AVERROR_EOF_KOGSTAT = FfmpegApi.pcv4j_ffmpeg_code_averror_eof_as_kognition_stat();
public static final long AVERROR_UNKNOWN = FfmpegApi.pcv4j_ffmpeg_code_averror_unknown_as_kognition_stat();
public static final int AVERROR_EOF_AVSTAT = FfmpegApi.pcv4j_ffmpeg_code_averror_eof();
// values of 'whence' passed to seek_buffer_callback
public static final int SEEK_SET = FfmpegApi.pcv4j_ffmpeg_code_seek_set();
public static final int SEEK_CUR = FfmpegApi.pcv4j_ffmpeg_code_seek_cur();
public static final int SEEK_END = FfmpegApi.pcv4j_ffmpeg_code_seek_end();
public static final int AVSEEK_SIZE = FfmpegApi.pcv4j_ffmpeg_code_seek_size();
public static final int AVEAGAIN = FfmpegApi.pcv4j_ffmpeg_code_eagain();
public static final int AVMEDIA_TYPE_UNKNOWN = FfmpegApi.pcv4j_ffmpeg2_mediaType_UNKNOWN();
public static final int AVMEDIA_TYPE_VIDEO = FfmpegApi.pcv4j_ffmpeg2_mediaType_VIDEO();
public static final int AVMEDIA_TYPE_AUDIO = FfmpegApi.pcv4j_ffmpeg2_mediaType_AUDIO();
public static final int AVMEDIA_TYPE_DATA = FfmpegApi.pcv4j_ffmpeg2_mediaType_DATA();
public static final int AVMEDIA_TYPE_SUBTITLE = FfmpegApi.pcv4j_ffmpeg2_mediaType_SUBTITLE();
public static final int AVMEDIA_TYPE_ATTACHMENT = FfmpegApi.pcv4j_ffmpeg2_mediaType_ATTACHMENT();
public static final int AVMEDIA_TYPE_NB = FfmpegApi.pcv4j_ffmpeg2_mediaType_NB();
// This needs to be kept in sync with the value in EncodingContext.h
public static final int DEFAULT_FPS = 30;
/**
* The default latency to allow in the encoding of a live stream before duplicating frames.
*/
public static final long DEFAULT_MAX_LATENCY_MILLIS = 500;
static {
final Logger nativeLogger = LoggerFactory.getLogger(Ffmpeg.class.getPackageName() + ".native");
// find the level
final int logLevelSet;
if(nativeLogger.isTraceEnabled())
logLevelSet = LOG_LEVEL_TRACE;
else if(nativeLogger.isDebugEnabled())
logLevelSet = LOG_LEVEL_DEBUG;
else if(nativeLogger.isInfoEnabled())
logLevelSet = LOG_LEVEL_INFO;
else if(nativeLogger.isWarnEnabled())
logLevelSet = LOG_LEVEL_WARN;
else if(nativeLogger.isErrorEnabled())
logLevelSet = LOG_LEVEL_ERROR;
else
logLevelSet = LOG_LEVEL_FATAL;
FfmpegApi.pcv4j_ffmpeg2_logging_setLogLevel(logLevelSet);
}
/**
* The default here should match the DEFAULT_MAX_REMUX_ERRORS in ffmpeg_wrapper.cpp
*/
public static final int DEFAULT_MAX_REMUX_ERRORS = 20;
public static final String DEFAULT_CHAIN_NAME = "default";
// ======================================================================
// MEDIA DATA SOURCE SUPPORT
// ======================================================================
// ======================================================================
// Custom media data source support
@FunctionalInterface
public static interface MediaDataSupplier {
/**
* The {@link MediaContext} will request {@code numBytes} be placed in the buffer.
* You should return the number of bytes actually placed in the buffer.
*/
public int fillBuffer(ByteBuffer buf, int numBytes);
}
// ======================================================================
// ======================================================================
// MEDIA PROCESSING SUPPORT
// ======================================================================
/**
* This interface is used for processors that handle decoded video frames.
*/
@FunctionalInterface
public static interface VideoFrameConsumer extends QuietCloseable, Consumer<VideoFrame> {
public void handle(VideoFrame frame);
@Override
default public void accept(final VideoFrame videoFrame) {
handle(videoFrame);
}
@Override
default public void close() {}
}
/**
* You can implement a stream selector in java by passing a StreamSelectorCallback
* to {@link MediaProcessingChain#selectStreams(StreamSelectorCallback)}.
*/
@FunctionalInterface
public static interface StreamSelectorCallback {
/**
* The details for all of the streams will be passed to you and you need to fill out
* the selection array. {@code true} means you want the stream data passed to
* the processing.
*
* @return {@code true} on success. {@code false} on failure.
*/
public boolean select(StreamDetails[] details, boolean[] selection);
}
/**
* Calling {@link Ffmpeg.MediaContext#chain(String)} returns a {@link Ffmpeg.MediaProcessingChain}
* which represents a selection of streams and a number of processes that operate on those
* streams.
*/
public static class MediaProcessingChain extends MediaProcessor {
private final MediaContext ctx;
private final List<MediaProcessor> processors = new ArrayList<>();
private final List<PacketFilterWrap> packetFilters = new ArrayList<>();
private final String name;
private MediaProcessingChain(final String name, final long nativeRef, final MediaContext ctx) {
super(nativeRef);
this.ctx = ctx;
this.name = name;
}
/**
* Return the underlying {@link MediaContext} for this processing chain.
*/
public MediaContext mediaContext() {
return ctx;
}
/**
* What's the name of this processing chain.
*/
public String getName() {
return name;
}
/**
* Cleanup the underlying resource associated with this {@link MediaProcessingChain}
*/
@Override
public void close() {
Functional.reverseRange(0, processors.size())
.mapToObj(i -> processors.get(i))
.forEach(p -> p.close());
Functional.reverseRange(0, packetFilters.size())
.mapToObj(i -> packetFilters.get(i))
.forEach(p -> p.close());
}
// ======================================================================
// STREAM SELECTOR SUPPORT
// ======================================================================
/**
* Add a filter to the processing chain that selects only packets from the first video stream
* in the media.
*/
public MediaProcessingChain selectFirstVideoStream() {
return manage(new PacketFilterWrap(FfmpegApi.pcv4j_ffmpeg2_firstVideoStreamSelector_create()));
}
/**
* Create a packet filter that uses the details of the streams in the source to decide which
* packets to filter.
*/
public MediaProcessingChain selectStreams(final StreamSelectorCallback callback) {
return selectStreams(res -> {
final StreamDetails[] sd = ctx.getStreamDetails();
if(sd == null)
return false;
LOGGER.debug("Selecting streams from {}", Arrays.toString(sd));
if(sd.length != res.length) {
LOGGER.error(
"The number of stream determined from getStreamDetails ({}) is not equal to the number of streams determined by the result length ({})",
sd.length, res.length);
return false;
}
return callback.select(sd, res);
});
}
public MediaProcessingChain preferBgr() {
mediaContext().addOption("pilecv4j:prefer_bgr", "true");
return this;
}
/**
* Create a video processor that takes the first decodable video stream.
*/
public MediaProcessingChain processVideoFrames(final VideoFrameConsumer consumer) {
return processVideoFrames(-1, (String)null, consumer);
}
/**
* Create a video processor that takes the first decodable video stream and applies the initializer on the
* first frame and the handler on all of the other frames.
*/
public MediaProcessingChain processVideoFrames(final VideoFrameConsumer initializer, final VideoFrameConsumer handler) {
return processVideoFrames(-1, null, initializer, handler);
}
/**
* Create a video processor that takes the first decodable video stream. If decoderName is not null then the decoder
* will be used to decode the frames.
*/
public MediaProcessingChain processVideoFrames(final String decoderName, final VideoFrameConsumer consumer) {
return processVideoFrames(-1, decoderName, consumer);
}
/**
* Create a video processor that takes the first decodable video stream and applies the initializer on the
* first frame and the handler on all of the other frames. If decoderName is not null then the decoder
* will be used to decode the frames.
*/
public MediaProcessingChain processVideoFrames(final String decoderName, final VideoFrameConsumer initializer, final VideoFrameConsumer handler) {
return processVideoFrames(-1, decoderName, initializer, handler);
}
/**
* Create a video processor that produces frames no larger than the given {@code maxDim}
* that takes the first decodable video stream.
*/
public MediaProcessingChain processVideoFrames(final int maxDim, final VideoFrameConsumer consumer) {
return processVideoFrames(maxDim, (String)null, consumer);
}
/**
* Create a video processor that produces frames no larger than the given {@code maxDim}
* that takes the first decodable video stream and applies the initializer on the
* first frame and the handler on all of the frames (including the first frame).
*/
public MediaProcessingChain processVideoFrames(final int maxDim, final VideoFrameConsumer initializer, final VideoFrameConsumer handler) {
return processVideoFrames(maxDim, null, initializer, handler);
}
/**
* Create a video processor that produces frames no larger than the given {@code maxDim}
* that takes the first decodable video stream. If decoderName is not null then the decoder
* will be used to decode the frames.
*/
public MediaProcessingChain processVideoFrames(final int maxDim, final String decoderName, final VideoFrameConsumer consumer) {
final var pfc = wrap(consumer);
final long nativeRef = FfmpegApi.pcv4j_ffmpeg2_decodedFrameProcessor_create(pfc, maxDim, decoderName);
return manage(new FrameVideoProcessor(nativeRef, pfc, consumer));
}
/**
* Create a video processor that produces frames no larger than the given {@code maxDim}
* that takes the first decodable video stream and applies the initializer on the
* first frame and the handler on all of the other frames. If decoderName is not null then the decoder
* will be used to decode the frames.
*/
public MediaProcessingChain processVideoFrames(final int maxDim, final String decoderName, final VideoFrameConsumer initializer,
final VideoFrameConsumer handler) {
final var pfc = wrap(handler);
final MutableRef<FrameVideoProcessor> proc = new MutableRef<>();
final var init = wrap(vf -> {
initializer.handle(vf);
proc.ref.replace(pfc);
initializer.close();
handler.handle(vf);
});
final long nativeRef = FfmpegApi.pcv4j_ffmpeg2_decodedFrameProcessor_create(init, maxDim, decoderName);
final var fm = new FrameVideoProcessor(nativeRef, init, handler);
proc.ref = fm;
return manage(fm);
}
/**
* Remux the input to the given Muxer.
*
* @param output is the Muxer to use to remux the streams to.
* @param maxRemuxErrorCount is the maximum error count before failing.
* @return the current MediaProcessingChain
*/
public MediaProcessingChain remux(final Muxer output, final int maxRemuxErrorCount) {
return manage(new MediaProcessorWithMuxer(FfmpegApi.pcv4j_ffmpeg2_remuxer_create(output.nativeRef, maxRemuxErrorCount), output));
}
/**
* Remux the input to the given Muxer. A convenience method for:
*
* <pre>
* <code>
* remux(output, DEFAULT_MAX_REMUX_ERRORS)
* </code>
* </pre>
*
* @param output is the Muxer to use to remux the streams to.
* @return the current MediaProcessingChain
*/
public MediaProcessingChain remux(final Muxer output) {
return remux(output, DEFAULT_MAX_REMUX_ERRORS);
}
/**
* optionally call the consumer with the current MediaProcessingChain
*
* @param doIt when {@code true}, the {@code ctxWork} Consumer will be called with the {@code this}
*/
public MediaProcessingChain optionally(final boolean doIt, final Consumer<MediaProcessingChain> ctxWork) {
if(doIt)
ctxWork.accept(this);
return this;
}
/**
* Add a filter to the media processing that suppresses/passes a given
* packet based on the packet meta-data passed to the filter.
*/
public MediaProcessingChain filterPackets(final PacketFilter cb) {
final packet_filter_callback rcb = new packet_filter_callback() {
@Override
public int packet_filter(final int mediaType, final int stream_index, final int packetNumBytes, final int isKeyFrame, final long pts,
final long dts, final int tbNum, final int tbDen) {
return cb.test(mediaType, stream_index, packetNumBytes, isKeyFrame == 0 ? false : true, pts, dts, tbNum, tbDen) ? 1 : 0;
}
};
return manage(new CallbackPacketFilter(FfmpegApi.pcv4j_ffmpeg2_javaPacketFilter_create(rcb), rcb));
}
/**
* Add a filter to the media processing that suppresses/passes a given
* packet based on the packet meta-data passed to the filter.
*/
public MediaProcessingChain filterPackets(final Function<PacketMetadata, Boolean> cb) {
return filterPackets((final int mediaType, final int stream_index, final int packetNumBytes, final boolean isKeyFrame, final long pts,
final long dts, final int tbNum, final int tbDen) -> {
return cb.apply(new PacketMetadata(mediaType, stream_index, packetNumBytes, isKeyFrame, pts, dts, tbNum, tbDen));
});
}
/**
* This is package protected to eliminate any optimization of the strong references
* required to keep the JNA callbacks from being GCed
*/
static class CallbackStreamSelector extends PacketFilterWrap {
// ======================================================================
// JNA will only hold a weak reference to the callbacks passed in
// so if we dynamically allocate them then they will be garbage collected.
// In order to prevent that we're keeping strong references to them.
// These are not private in order to avoid any possibility that the
// JVM optimized them out since they aren't read anywhere in this code.
public select_streams_callback ssc;
// ======================================================================
private CallbackStreamSelector(final long nativeRef, final select_streams_callback selector) {
super(nativeRef);
ssc = selector;
}
}
/**
* This is package protected to eliminate any optimization of the strong references
* required to keep the JNA callbacks from being GCed
*/
static class CallbackPacketFilter extends PacketFilterWrap {
// ======================================================================
// JNA will only hold a weak reference to the callbacks passed in
// so if we dynamically allocate them then they will be garbage collected.
// In order to prevent that we're keeping strong references to them.
// These are not private in order to avoid any possibility that the
// JVM optimized them out since they aren't read anywhere in this code.
public packet_filter_callback pfcb;
// ======================================================================
private CallbackPacketFilter(final long nativeRef, final packet_filter_callback selector) {
super(nativeRef);
pfcb = selector;
}
}
private push_frame_callback wrap(final VideoFrameConsumer consumer) {
return new push_frame_callback() {
final AtomicLong frameNumber = new AtomicLong(0);
@Override
public long push_frame(final long frame, final int isRbg, final int streamIndex) {
try(final VideoFrame mat = new VideoFrame(
frame, System.currentTimeMillis(), frameNumber.getAndIncrement(), isRbg == 0 ? false : true) {
// mats are closed automatically in the native code
// once the push_frame returns.
@Override
public void doNativeDelete() {}
};) {
try {
consumer.handle(mat);
return 0;
} catch(final FfmpegException ffe) {
long status = ffe.status;
if(status == 0)
status = AVERROR_UNKNOWN;
LOGGER.error("Pushing the frame failed in ffmpeg: {}", errorMessage(status), ffe);
return status;
} catch(final RuntimeException rte) {
final long status = AVERROR_UNKNOWN;
LOGGER.error("Pushing the frame failed in ffmpeg: {}", errorMessage(status), rte);
return status;
}
}
}
};
}
@FunctionalInterface
private static interface RawStreamSelectorCallback {
public boolean select(boolean[] selection);
}
private MediaProcessingChain selectStreams(final RawStreamSelectorCallback callback) {
final var ssc = new select_streams_callback() {
@Override
public int select_streams(final int numStreams, final Pointer selected) {
final IntBuffer buf = selected.getByteBuffer(0, Integer.BYTES * numStreams).asIntBuffer();
final boolean[] res = new boolean[numStreams];
for(int i = 0; i < numStreams; i++)
res[i] = buf.get(i) == 0 ? false : true;
if(!callback.select(res))
return 0;
for(int i = 0; i < numStreams; i++)
buf.put(i, res[i] ? 1 : 0);
return 1;
}
};
return manage(new CallbackStreamSelector(FfmpegApi.pcv4j_ffmpeg2_javaStreamSelector_create(ssc), ssc));
}
private MediaProcessingChain manage(final MediaProcessor newProc) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_mediaProcessorChain_addProcessor(this.nativeRef, newProc.nativeRef));
processors.add(newProc);
return this;
}
private MediaProcessingChain manage(final PacketFilterWrap newProc) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_mediaProcessorChain_addPacketFilter(this.nativeRef, newProc.nativeRef));
packetFilters.add(newProc);
return this;
}
}
/**
* This is the base class opaque handle to an underlying native class
* that processes packets.
*/
private static class MediaProcessor implements QuietCloseable {
final long nativeRef;
private MediaProcessor(final long nativeRef) {
this.nativeRef = nativeRef;
}
@Override
public void close() {
if(nativeRef != 0L)
FfmpegApi.pcv4j_ffmpeg2_mediaProcessor_destroy(nativeRef);
}
}
private static class MediaProcessorWithMuxer extends MediaProcessor {
private final Muxer output;
private MediaProcessorWithMuxer(final long nativeRef) {
this(nativeRef, null);
}
private MediaProcessorWithMuxer(final long nativeRef, final Muxer output) {
super(nativeRef);
this.output = output;
}
@Override
public void close() {
if(output != null)
output.close();
super.close();
}
}
private static class PacketFilterWrap implements QuietCloseable {
final long nativeRef;
private PacketFilterWrap(final long nativeRef) {
this.nativeRef = nativeRef;
}
@Override
public void close() {
if(nativeRef != 0)
FfmpegApi.pcv4j_ffmpeg2_packetFilter_destroy(nativeRef);
}
}
/**
* This is a base class for a media processor that handles decoded video frames.
*
* This is package protected to eliminate any optimization of the strong references
* required to keep the JNA callbacks from being GCed
*/
static class FrameVideoProcessor extends MediaProcessor {
// ======================================================================
// JNA will only hold a weak reference to the callbacks passed in
// so if we dynamically allocate them then they will be garbage collected.
// In order to prevent that we're keeping strong references to them.
// These are not private in order to avoid any possibility that the
// JVM optimized them out since they aren't read anywhere in this code.
public push_frame_callback pfc;
// ======================================================================
public final QuietCloseable toClose;
private FrameVideoProcessor(final long nativeRef, final push_frame_callback consumer, final QuietCloseable toClose) {
super(nativeRef);
pfc = consumer;
this.toClose = toClose;
}
public void replace(final push_frame_callback consumer) {
pfc = consumer;
FfmpegApi.pcv4j_ffmpeg2_decodedFrameProcessor_replace(super.nativeRef, consumer);
}
@Override
public void close() {
if(toClose != null)
toClose.close();
super.close();
}
}
/**
* Create a {@link MediaContext} for building a processing chain for a given media source.
*/
public static MediaContext createMediaContext() {
final long nativeRef = FfmpegApi.pcv4j_ffmpeg2_mediaContext_create();
return new MediaContext(nativeRef);
}
/**
* This is a convenience method for:
*
* <pre>
* <code>
* MediaContext.createMediaContext()
* .source(source);
* </code>
* </pre>
*/
public static MediaContext createMediaContext(final URI source) {
return createMediaContext()
.source(source);
}
/**
* This is a convenience method for:
*
* <pre>
* <code>
* MediaContext.createMediaContext()
* .source(source);
* </code>
* </pre>
*/
public static MediaContext createMediaContext(final String source) {
return createMediaContext()
.source(source);
}
/**
* This is a convenience method for:
*
* <pre>
* <code>
* MediaContext.createMediaContext()
* .source(fmt, source);
* </code>
* </pre>
*/
public static MediaContext createMediaContext(final String fmt, final String source) {
return createMediaContext()
.source(fmt, source);
}
/**
* <p>
* A {@link MediaContext} represents the coupling of an input source to a set of
* processing on the media streams in that source. It's also a builder for declaring
* the media source and that processing to be done.
* </p>
* <p align="center">
* Fig 1.
* </p>
* <p align="center">
* <img src="https://raw.githubusercontent.com/KognitionAI/pilecv4j/master/docs/Stream%20Context.png" width="500">
* </p>
*
* <p>
* <ul>
* <li><b>MediaDataSource:</b> The MediaDataSource is responsible for connecting to
* a source of media data. There are two main types of MediaDataSource.
* <ul>
* <li>The first is a simple URI based input which is instantiated when you use
* {@link #source(String)}. This is the same as the {@code -i} option
* passed to the {@code ffmpeg} command line.</li>
* <li>The second is a custom data source where you can supply raw media stream data
* dynamically by supplying a {@link MediaDataSupplier} callback implementation and
* optionally a {@link MediaDataSeek} implementation. These will be called by the
* system in order to fetch more data or, when a {@link MediaDataSeek} is supplied,
* move around in the stream.</li>
* </ul>
* </li>
* <li><b>MediaProcessingChain:</b> Data packets from the MediaDataSource are passed
* to a series of {@link MediaProcessingChain}s. {@link MediaProcessingChain}s are added
* to a {@link MediaContext} using the {@link #chain(String)} call.
* {@link MediaProcessingChain}s couple a means of selecting which media streams from
* the MediaDataSource are to be processed with the series of processing.
* <ul>
* <li><em>StreamSelector:</em> A {@link StreamSelector} sets up a simple filter that will only
* allow packets from the selected streams through to be processed by the {@link MediaProcessor}s.
* A {@link StreamSelector} is added to a {@link MediaProcessingChain} by calling one of the
* {@code create*StreamSelector(...)} methods.</li>
* <li><em>MediaProcessor:</em> A set of {@link MediaProcessor}s can then process the packets
* that make it through the selection filter. There are currently two main processors but this
* will likely grow in the future:
* <ul>
* <li>A uri remuxer: This will allow the packets to be remuxed and output to a given URI.
* You add a remuxer using the call {@link MediaProcessingChain#remux(String)}</li>
* <li>A frame processor: The packets will be fully decoded and the frames will be passed
* to the callback provided. A frame processor can be added by calling
* {@link MediaProcessingChain#processVideoFrames(VideoFrameConsumer)}</li>
* </ul>
* </li>
* </ul>
* </li>
* </ul>
* </p>
* <p>
* The processing can then be kicked off by calling {@link #play()} on the fully configured
* {@link MediaContext}
* </p>
* <h3>Additional Information</h3>
* <p>
* A {@link MediaContext} goes through the following internal states:
* <ul>
* <li>FRESH - When a context is instantiated, it's in this state.</li>
* <li>OPEN - The media data source is opened
* (<a href="https://ffmpeg.org/doxygen/3.4/group__lavf__decoding.html#ga31d601155e9035d5b0e7efedc894ee49">avformat_open_input</a>).</li>
* </ul>
* </p>
*/
public static class MediaContext implements QuietCloseable {
private final long nativeRef;
private MediaDataSource dataSource = null;
private final List<MediaProcessingChain> mediaProcesingChains = new ArrayList<>();
private final Map<String, MediaProcessingChain> mediaProcesingChainsMap = new HashMap<>();
private MediaContext(final long nativeRef) {
if(nativeRef == 0)
throw new FfmpegException("Couldn't create new stream context");
this.nativeRef = nativeRef;
}
public static class StreamDetails {
public final int streamIndex;
public final int mediaType;
public final int fps_num;
public final int fps_den;
public final int tb_num;
public final int tb_den;
public final int codecId;
public final String codecName;
private StreamDetails(final FfmpegApi.internal_StreamDetails sd) {
streamIndex = sd.stream_index;
mediaType = sd.mediaType;
fps_num = sd.fps_num;
fps_den = sd.fps_den;
tb_num = sd.tb_num;
tb_den = sd.tb_den;
codecId = sd.codec_id;
codecName = sd.codecName;
}
@Override
public String toString() {
return "StreamDetails [streamIndex=" + streamIndex + ", mediaType=" + mediaType + ", fps_num=" + fps_num + ", fps_den=" + fps_den
+ ", tb_num="
+ tb_num + ", tb_den=" + tb_den + ", codecId=" + codecId + ", codecName=" + codecName + "]";
}
}
public StreamDetails[] getStreamDetails() {
final IntByReference numStreamsRef = new IntByReference();
final LongByReference rc = new LongByReference();
final FfmpegApi.internal_StreamDetails.ByReference detailsRef = FfmpegApi.pcv4j_ffmpeg2_mediaContext_getStreamDetails(nativeRef,
numStreamsRef,
rc);
try {
throwIfNecessary(rc.getValue());
final int numStreams = numStreamsRef.getValue();
final FfmpegApi.internal_StreamDetails[] details = numStreams == 0 ? new FfmpegApi.internal_StreamDetails[0]
: (FfmpegApi.internal_StreamDetails[])detailsRef.toArray(numStreams);
return Arrays.stream(details)
.map(sd -> new StreamDetails(sd))
.toArray(StreamDetails[]::new);
} finally {
FfmpegApi.pcv4j_ffmpeg2_streamDetails_deleteArray(detailsRef.getPointer());
}
}
/**
* Kick off the media processing
*/
public MediaContext play() {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_mediaContext_play(nativeRef), Ffmpeg.AVERROR_EOF_KOGSTAT);
return this;
}
/**
* Add an option to be passed to the processing chain. These are options you would
* pass to the ffmpeg command line.
*/
public MediaContext addOption(final String key, final String value) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_mediaContext_addOption(nativeRef, key, value));
return this;
}
/**
* Add options to be passed to the processing chain. These are options you would
* pass to the ffmpeg command line.
*/
public MediaContext addOptions(final Map<String, String> options) {
options.entrySet().stream().forEach(e -> addOption(e.getKey(), e.getValue()));
return this;
}
/**
* Stop processing. This will cause the call to {@link MediaContext#play()} to return.
*/
public synchronized MediaContext stop() {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_mediaContext_stop(nativeRef));
return this;
}
/**
* Synchronize the processing with the media stream's timing.
*
* NOTE: This should NOT be used if the media source is a live stream.
*/
public synchronized MediaContext sync() {
FfmpegApi.pcv4j_ffmpeg2_mediaContext_sync(nativeRef);
return this;
}
public MediaContext optionally(final boolean doIt, final Consumer<MediaContext> ctxWork) {
if(doIt)
ctxWork.accept(this);
return this;
}
public MediaContext peek(final Consumer<MediaContext> ctxWork) {
ctxWork.accept(this);
return this;
}
public synchronized int currentState() {
return FfmpegApi.pcv4j_ffmpeg2_mediaContext_state(nativeRef);
}
@Override
public void close() {
if(nativeRef != 0) {
if(currentState() == FfmpegApi.STREAM_CONTEXT_STATE_PLAYING) {
stop();
final long endTime = System.currentTimeMillis() + 10000; // give it 10 seconds to stop
while(currentState() != FfmpegApi.STREAM_CONTEXT_STATE_ENDED && (System.currentTimeMillis() < endTime))
Thread.yield();
if(currentState() != FfmpegApi.STREAM_CONTEXT_STATE_ENDED)
LOGGER.warn("Couldn't stop the playing stream.");
}
FfmpegApi.pcv4j_ffmpeg2_mediaContext_delete(nativeRef);
}
if(dataSource != null)
dataSource.close();
Functional.reverseRange(0, mediaProcesingChains.size())
.mapToObj(i -> mediaProcesingChains.get(i))
.forEach(p -> p.close());
mediaProcesingChains.clear();
}
// ======================================================================
// MEDIA DATA SOURCE SUPPORT
// ======================================================================
/**
* Create a data source from a URI or file name. This sources media data from whatever the
* URI is pointing to given it's supported by ffmpeg.
*/
public MediaContext source(final String source) {
final long nativeVds = FfmpegApi.pcv4j_ffmpeg2_uriMediaDataSource_create(source);
if(nativeVds == 0)
throw new FfmpegException("Failed to create a uri based native MediaDataSource");
return manage(new MediaDataSource(nativeVds));
}
/**
* Create a data source from a URI. This sources media data from whatever the
* URI is pointing to given it's supported by ffmpeg.
*/
public MediaContext source(final URI url) {
final String uriStr;
// dewindowsfy the URI.
if("file".equals(url.getScheme())) {
// we need to fix the path if there's a windows disk in the uri.
String tmp = url.toString();
if(tmp.startsWith("file:"))
tmp = tmp.substring("file:".length());
while(tmp.startsWith("/"))
tmp = tmp.substring(1);
if(tmp.charAt(1) == ':')
uriStr = "file:" + tmp;
else
uriStr = url.toString();
} else
uriStr = url.toString();
final long nativeVds = FfmpegApi.pcv4j_ffmpeg2_uriMediaDataSource_create(uriStr);
if(nativeVds == 0)
throw new FfmpegException("Failed to create a uri based native MediaDataSource");
return manage(new MediaDataSource(nativeVds));
}
/**
* <p>
* Create a data source from a URI or file name and explicitly specify the format. This
* sources media data from whatever the URI is pointing to with the explicit format
* given it's supported by ffmpeg.
* </p>
*
* <p>
* This can be used to specify a device like a web cam. For example, on Linux you
* can call {@code createMediaDataSource("video4linux2", "/dev/video0")}.
* see {@linkplain https://trac.ffmpeg.org/wiki/Capture/Webcam} for more details
* </p>
*
* @see https://trac.ffmpeg.org/wiki/Capture/Webcam
*/
public MediaContext source(final String fmt, final String rawFile) {
final long nativeVds = FfmpegApi.pcv4j_ffmpeg2_uriMediaDataSource_create2(fmt, rawFile);
if(nativeVds == 0)
throw new FfmpegException("Failed to create a uri based native MediaDataSource");
return manage(new MediaDataSource(nativeVds));
}
/**
* Create a raw data source based on FFmpeg customIO. This is the same as calling
* createMediaDataSource(vds, null);
*/
public MediaContext source(final MediaDataSupplier vds) {
return source(vds, null);
}
/**
* Create a raw data source based on FFmpeg customIO. The job of the data supplier is to fill
* the buffer with at most the number of bytes and return the number of bytes it was able to
* put in the buffer. When the MediaDataSeek is not null, it's used to move the stream to the
* desired location in the stream.
*/
public MediaContext source(final MediaDataSupplier dataSupplier, final MediaDataSeek seek) {
final var ret = new CustomMediaDataSource(FfmpegApi.pcv4j_ffmpeg2_customMediaDataSource_create());
final ByteBuffer buffer = ret.customStreamBuffer();
final int bufSize = ret.bufSize; // set after customStreamBuffer is called.
ret.set(new fill_buffer_callback() {
@Override
public int fill_buffer(final int numBytesRequested) {
final int numBytes = Math.min(numBytesRequested, bufSize);
buffer.rewind();
return dataSupplier.fillBuffer(buffer, numBytes);
}
},
seek != null ? new seek_buffer_callback() {
@Override
public long seek_buffer(final long offset, final int whence) {
return seek.seekBuffer(offset, whence);
}
} : null);
return manage(ret);
}
// ======================================================================
// MEDIA PROCESSING SUPPORT
// ======================================================================
/**
* Create if it doesn't exist, or return the existing, {@link MediaProcessingChain} with the given name.
*/
public MediaProcessingChain chain(final String chainName) {
final MediaProcessingChain cur = mediaProcesingChainsMap.get(chainName);
if(cur != null)
return cur;
final long nativeRef = FfmpegApi.pcv4j_ffmpeg2_mediaProcessorChain_create();
if(nativeRef == 0)
throw new FfmpegException("Failed to create a media processing chain");
return manage(new MediaProcessingChain(chainName, nativeRef, this));
}
// ========================================================================================
// Convenience methods for using the default MediaProcessingChain
// ========================================================================================
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .selectFirstVideoStream()
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#selectFirstVideoStream()}
*/
public MediaContext selectFirstVideoStream() {
return chain(DEFAULT_CHAIN_NAME).selectFirstVideoStream().mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .processVideoFrames(consumer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#processVideoFrames(VideoFrameConsumer)}
*/
public MediaContext processVideoFrames(final VideoFrameConsumer consumer) {
return chain(DEFAULT_CHAIN_NAME).processVideoFrames(consumer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .processVideoFrames(initializer, consumer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#processVideoFrames(VideoFrameConsumer, VideoFrameConsumer)}
*/
public MediaContext processVideoFrames(final VideoFrameConsumer initializer, final VideoFrameConsumer consumer) {
return chain(DEFAULT_CHAIN_NAME).processVideoFrames(initializer, consumer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .processVideoFrames(decoder, consumer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#processVideoFrames(String, VideoFrameConsumer)}
*/
public MediaContext processVideoFrames(final String decoder, final VideoFrameConsumer consumer) {
return chain(DEFAULT_CHAIN_NAME).processVideoFrames(decoder, consumer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .processVideoFrames(decoder, initializer, consumer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#processVideoFrames(String, VideoFrameConsumer, VideoFrameConsumer)}
*/
public MediaContext processVideoFrames(final String decoder, final VideoFrameConsumer initializer, final VideoFrameConsumer consumer) {
return chain(DEFAULT_CHAIN_NAME).processVideoFrames(decoder, initializer, consumer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .processVideoFrames(maxDim, consumer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#processVideoFrames(int, VideoFrameConsumer)}
*/
public MediaContext processVideoFrames(final int maxDim, final VideoFrameConsumer consumer) {
return chain(DEFAULT_CHAIN_NAME).processVideoFrames(maxDim, consumer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .processVideoFrames(maxDim, initializer, consumer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#processVideoFrames(int, VideoFrameConsumer, VideoFrameConsumer)}
*/
public MediaContext processVideoFrames(final int maxDim, final VideoFrameConsumer initializer, final VideoFrameConsumer consumer) {
return chain(DEFAULT_CHAIN_NAME).processVideoFrames(maxDim, initializer, consumer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .processVideoFrames(maxDim, decoder, consumer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#processVideoFrames(int, String, VideoFrameConsumer)}
*/
public MediaContext processVideoFrames(final int maxDim, final String decoder, final VideoFrameConsumer consumer) {
return chain(DEFAULT_CHAIN_NAME).processVideoFrames(maxDim, decoder, consumer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .processVideoFrames(maxDim, decoder, initializer, consumer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#processVideoFrames(int, String, VideoFrameConsumer, VideoFrameConsumer)}
*/
public MediaContext processVideoFrames(final int maxDim, final String decoder, final VideoFrameConsumer initializer,
final VideoFrameConsumer consumer) {
return chain(DEFAULT_CHAIN_NAME).processVideoFrames(maxDim, decoder, initializer, consumer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .remux(muxer)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#remux(Muxer)}
*/
public MediaContext remux(final Muxer muxer) {
return chain(DEFAULT_CHAIN_NAME).remux(muxer).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .remux(muxer, maxRemuxErrorCount)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#remux(Muxer, int)}
*/
public MediaContext remux(final Muxer muxer, final int maxRemuxErrorCount) {
return chain(DEFAULT_CHAIN_NAME).remux(muxer, maxRemuxErrorCount).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .selectStreams(streamSelector)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#selectStreams(StreamSelector)}
*/
public MediaContext selectStreams(final StreamSelectorCallback streamSelector) {
return chain(DEFAULT_CHAIN_NAME).selectStreams(streamSelector).mediaContext();
}
/**
* A convenience method for operating on the default chain. It's equivalent to:
*
* <pre>
* <code>
* mediaContext
* .chain(DEFAULT_CHAIN_NAME)
* .filterPackets(packetFilter)
* .mediaContext();
* </code>
* </pre>
*
* @see {@link MediaProcessingChain#filterPackets(packetFilter)}
*/
public MediaContext filterPackets(final PacketFilter packetFilter) {
return chain(DEFAULT_CHAIN_NAME).filterPackets(packetFilter).mediaContext();
}
public MediaContext filterPackets(final Function<PacketMetadata, Boolean> packetFilter) {
return chain(DEFAULT_CHAIN_NAME).filterPackets(packetFilter).mediaContext();
}
public MediaContext preferBgr() {
return chain(DEFAULT_CHAIN_NAME).preferBgr().mediaContext();
}
private MediaContext manage(final MediaDataSource vds) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_mediaContext_setSource(nativeRef, vds.nativeRef));
dataSource = vds;
return this;
}
private MediaProcessingChain manage(final MediaProcessingChain vds) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_mediaContext_addProcessor(nativeRef, vds.nativeRef));
mediaProcesingChains.add(vds);
mediaProcesingChainsMap.put(vds.getName(), vds);
return vds;
}
}
// ======================================================================
// ENCODER
// ======================================================================
/**
* Create an encoding context.
*/
public static EncodingContext createEncoder() {
final long nativeRef = FfmpegApi.pcv4j_ffmpeg2_encodingContext_create();
return new EncodingContext(nativeRef);
}
/**
* Create an encoding context with a predefined muxer. This is a convenience method for:
*
* <pre>
* <code>
* createEncoder()
* .muxer(Muxer.create(outputUri))
* </code>
* </pre>
*/
public static EncodingContext createEncoder(final String outputUri) {
return createEncoder()
.muxer(Muxer.create(outputUri));
}
public static class LiveFeedEncoder implements QuietCloseable {
private static final AtomicLong threadCount = new AtomicLong(0);
private final VideoEncoder videoEncoder;
private boolean deepCopy = false;
private final AtomicBoolean stopMe = new AtomicBoolean(false);
private final AtomicReference<Sample> onDeck = new AtomicReference<>(null);
private Thread encoderThread = null;
private final AtomicReference<RuntimeException> failure = new AtomicReference<>(null);
private final static class Sample implements QuietCloseable {
public final CvMat frame;
public final boolean isRgb;
public Sample(final CvMat frame, final boolean isRgb) {
this.frame = frame;
this.isRgb = isRgb;
}
@Override
public void close() {
frame.close();
}
}
private LiveFeedEncoder(final VideoEncoder ctx) {
this.videoEncoder = ctx;
}
public LiveFeedEncoder deepCopy(final boolean deepCopy) {
this.deepCopy = deepCopy;
return this;
}
public void encode(final Mat frame, final boolean isRgb) {
final RuntimeException rte = failure.get();
if(rte != null)
throw new FfmpegException("Error from encoder thread", rte);
if(frame != null) {
try(CvMat copied = deepCopy ? CvMat.deepCopy(frame) : CvMat.shallowCopy(frame);) {
try(var sample = onDeck.getAndSet(new Sample(copied.returnMe(), isRgb));) {}
}
}
}
public EncodingContext encodingContext() {
return videoEncoder.encodingContext();
}
@Override
public void close() {
stopMe.set(true);
ignore(() -> encoderThread.join(5000));
if(encoderThread.isAlive())
LOGGER.warn("Failed to stop the encoder thread.");
videoEncoder.close();
}
private void start() {
encoderThread = chain(new Thread(() -> {
Sample prev;
// wait for at least one.
{
Sample curSample;
do {
curSample = onDeck.getAndSet(null);
if(curSample == null)
Thread.yield();
} while(curSample == null && !stopMe.get());
prev = curSample;
}
while(!stopMe.get()) {
final Sample toEncode;
{
final Sample curSample = onDeck.getAndSet(null);
if(curSample != null) {
prev.close();
toEncode = prev = curSample;
} else
toEncode = prev;
}
try {
videoEncoder.encode(toEncode.frame, toEncode.isRgb);
} catch(final RuntimeException rte) {
LOGGER.error("Live stream encoding thread threw an exception while encoding.", rte);
failure.set(rte);
break;
}
}
}, "Encoding Thread " + threadCount.getAndIncrement()), t -> t.start());
}
}
/**
* Class to encode streams to a Muxer. An {@code EncodingContext} can have many
* {@code Encoder}s. Currently the only supported encoding type is the {@link VideoEncoder}.
*/
public static class EncodingContext implements QuietCloseable {
public static final String DEFAULT_VIDEO_ENCODER_NAME = "pilecv4j:default:videoEncoder";
/**
* An {@code Encoder} within an {@link EncodingContext} that encodes a single stream
* to be muxed by the {@link EncodingContext}'s {@link Muxer}.
*/
public class VideoEncoder {
private final long nativeRef;
private boolean closed = false;
private VideoEncoder(final long nativeRef) {
this.nativeRef = nativeRef;
}
/**
* Add codec specific options to this video encoder.
*/
public VideoEncoder addCodecOptions(final String key, final String values) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_addCodecOption(nativeRef, key, values));
return this;
}
/**
*
* Set the frame rate as a rational.
*
* @param num - numerator
* @param den - denominator
*/
public VideoEncoder setFps(final int num, final int den) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_setFramerate(nativeRef, num, den));
return this;
}
public VideoEncoder setFps(final MediaContext ctx) {
final StreamDetails sd = Arrays.stream(ctx.getStreamDetails())
.filter(d -> d.mediaType == Ffmpeg.AVMEDIA_TYPE_VIDEO)
.findFirst()
.orElseThrow(
() -> new FfmpegException(
"There doesn't appear to be any video streams in the given " + MediaContext.class.getSimpleName() + " source."));
setFps(sd.fps_num, sd.fps_den);
return this;
}
/**
* <p>
* Set the encoder's output picture resolution.<.p>
*
* <p>
* You can also make sure the aspect ratio is preserved in which case the requested
* width or height will be adjusted so that the image will fit within the bounds but
* it will maintain the same aspect ratio.
* </p>
*
* <p>
* You can set either width or height to -1 in which case it will calculate that
* value from the other assuming the aspect ratio is being preserved. Note that if you
* do this then the {@code preserveAspectRatio} flag is ignored.
* </p>
*
* <p>
* If you specify {@code onlyScaleDown} and the frames being encoded are already smaller
* than the width and height requested, no scaling will take place.
* </p>
*/
public VideoEncoder setOutputDims(final int width, final int height, final boolean preserveAspectRatio, final boolean onlyScaleDown) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_setOutputDims(nativeRef, width, height,
preserveAspectRatio ? 1 : 0, onlyScaleDown ? 1 : 0));
return this;
}
/**
* Some encoders support setting the rate control buffer. This is done in conjunction with setting
* the rate cotrol min and max bit rate. See {@link https://trac.ffmpeg.org/wiki/Encode/H.264} discussion
* on rate control
*/
public VideoEncoder setRcBufferSize(final int pbufferSize) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_setRcBufferSize(nativeRef, pbufferSize));
return this;
}
/**
* Some encoders support setting the bitrate. This is done in conjunction with setting a buffer size.
* See {@link https://trac.ffmpeg.org/wiki/Encode/H.264} discussion on rate control
*/
public VideoEncoder setRcBitrate(final long pminBitrate, final long pmaxBitrate) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_setRcBitrate(nativeRef, pminBitrate, pmaxBitrate));
return this;
}
/**
* Some encoders support setting the target bitrate.
* See {@link https://trac.ffmpeg.org/wiki/Encode/H.264}
*/
public VideoEncoder setTargetBitrate(final long bitrate) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_setTargetBitrate(nativeRef, bitrate));
return this;
}
/**
* Enabling a video context sets up the transformation from input to output. This needs to
* be called before encoding begins but will be called automatically if it isn't. You can pass
* -1 for stride, destWidth, and/or destHeight and the values will be inferred from the input
* values.
*
* @param stride is the number of bytes in a row of pixels of the input to the encoder. If it's
* set to -1 it will be assumed to be 3 * width.
* @param width is the destination encoded video picture width. If it's set to -1 it will be assumed
* to be the same as the input width.
* @param height is the destination encoded video picture height. If it's set to -1 it will be assumed
* to be the same as the input height.
*/
public EncodingContext enable(final boolean isRgb, final int width, final int height, final int stride, final int destWidth,
final int destHeight) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_enable(nativeRef, isRgb ? 1 : 0, width, height, stride, destWidth, destHeight));
return EncodingContext.this;
}
/**
* Enabling a video context sets up the transformation from input to output. This call
* assumes the video output will match the input specs. It's a convenience method for:
*
* <pre>
* <code>
* enable(isRgb, frame.width(), frame.height(), (int)frame.step1(), -1, -1);
* </code>
* </pre>
*
* @see {@link EncodingContext.VideoEncoder#enable(boolean, int, int, int, int, int)}
*/
public EncodingContext enable(final Mat frame, final boolean isRgb) {
return enable(isRgb, frame.width(), frame.height(), (int)frame.step1(), -1, -1);
}
/**
* Enabling a video context sets up the transformation from input to output. This call
* assumes the video output will match the input specs. It's a convenience method for:
*
* <pre>
* <code>
* enable(isRgb, width, height, stride, -1, -1);
* </code>
* </pre>
*
* @see {@link EncodingContext.VideoEncoder#enable(boolean, int, int, int, int, int)}
*/
public EncodingContext enable(final boolean isRgb, final int width, final int height, final int stride) {
return enable(isRgb, width, height, stride, -1, -1);
}
/**
* Enabling a video context sets up the transformation from input to output. This call
* assumes the video output will match the input specs. It's a convenience method for:
*
* <pre>
* <code>
* enable(isRgb, width, height, -1, -1, -1);
* </code>
* </pre>
*
* @see {@link EncodingContext.VideoEncoder#enable(boolean, int, int, int, int, int)}
*/
public EncodingContext enable(final boolean isRgb, final int width, final int height) {
return enable(isRgb, width, height, -1, -1, -1);
}
/**
* Enabling a video context sets up the transformation from input to output. This call
* assumes the video output will match the input specs. It's a convenience method for:
*
* <pre>
* <code>
* enable(isRgb, frame.width(), frame.height(), (int)frame.step1(), destWidth, destHeight);
* </code>
* </pre>
*
* @see {@link EncodingContext.VideoEncoder#enable(boolean, int, int, int, int, int)}
*/
public EncodingContext enable(final Mat frame, final boolean isRgb, final int destWidth, final int destHeight) {
return enable(isRgb, frame.width(), frame.height(), (int)frame.step1(), destWidth, destHeight);
}
/**
* Enabling a video context sets up the transformation from input to output. This call
* assumes the video output will match the input specs. It's a convenience method for:
*
* <pre>
* <code>
* enable(isRgb, width, height, -1, destWidth, destHeight);
* </code>
* </pre>
*
* @see {@link VideoEncoder#enable(boolean, int, int, int, int, int)}
*/
public EncodingContext enable(final boolean isRgb, final int width, final int height, final int destWidth, final int destHeight) {
return enable(isRgb, width, height, -1, destWidth, destHeight);
}
/**
* Encode the given image. If the {@link VideoEncoder} has not been explicitly {@code enable}d, it will be done
* prior to encoding the first frame and will assume the output parameters are equivalent to the given frame
*/
public void encode(final Mat frame, final boolean isRgb) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_encode(nativeRef, frame.nativeObj, isRgb ? 1 : 0));
}
/**
* Encode the given image. If the {@link VideoEncoder} has not been explicitly {@code enable}d, it will be done
* prior to encoding the first frame and will assume the output parameters are equivalent to the given frame
*/
public void encode(final VideoFrame frame) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_encode(nativeRef, frame.nativeObj, frame.isRgb ? 1 : 0));
}
/**
* Stop the current encoding process on this stream
*/
public void stop() {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_stop(nativeRef));
}
/**
* If the destination for the encoding is a live stream, this will allow for the decoupling
* of the input from the output. If the input slows down, the output will send duplicate frames.
*/
public LiveFeedEncoder liveFeedEncoder(final long maxLatencyMillis) {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_videoEncoder_streaming(nativeRef));
final var r = new LiveFeedEncoder(this);
// if(enabled)
r.start();
return r;
}
/**
* If the destination for the encoding is a live stream, this will allow for the decoupling
* of the input from the output. If the input slows down, the output will send duplicate frames.
* This is a convenience method for:
*
* <pre>
* <code>
* liveFeedEncoder(DEFAULT_MAX_LATENCY_MILLIS);
* </code>
* </pre>
*/
public LiveFeedEncoder liveFeedEncoder() {
return liveFeedEncoder(DEFAULT_MAX_LATENCY_MILLIS);
}
/**
* Return the {@link EncodingContext} associated with this {@link VideoEncoder}.
*/
public EncodingContext encodingContext() {
return EncodingContext.this;
}
private void close() {
if(!closed && nativeRef != 0)
FfmpegApi.pcv4j_ffmpeg2_videoEncoder_delete(nativeRef);
closed = true;
}
}
private long nativeRef;
private final LinkedList<VideoEncoder> toClose = new LinkedList<>();
private final Map<String, VideoEncoder> encoders = new HashMap<>();
private Muxer output = null;
private VideoEncoder firstVideoEncoder = null;
private EncodingContext(final long nativeRef) {
this.nativeRef = nativeRef;
}
/**
* Explicitly set the muxer for the encoding context output.
*/
public EncodingContext muxer(final Muxer muxer) {
this.output = muxer;
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_encodingContext_setMuxer(nativeRef, muxer.nativeRef));
return this;
}
/**
* Fetch the video encoder by name or create it if it doesn't exist yet using
* the the given codec. If the codec string is null then the codec will be inferred
* from the output muxer details.
*/
public VideoEncoder videoEncoder(final String codec, final String name) {
final var existing = encoders.get(name);
if(existing != null)
return existing;
final var ret = new VideoEncoder(FfmpegApi.pcv4j_ffmpeg2_encodingContext_openVideoEncoder(nativeRef, codec));
if(firstVideoEncoder == null)
firstVideoEncoder = ret;
toClose.addFirst(ret);
encoders.put(name, ret);
return ret;
}
/**
* The default video encoder is the first one created. If one hasn't been created yet
* then the codec will be inferred from the output muxer, created, and returned.
*/
public VideoEncoder defaultVideoEncoder() {
if(firstVideoEncoder != null)
return firstVideoEncoder;
else
return videoEncoder(null, DEFAULT_VIDEO_ENCODER_NAME);
}
/**
* Fetch an existing video encoder by name. Return null if the encoder doesn't exist yet.
*/
public VideoEncoder getExistingVideoEncoder(final String encoderName) {
return encoders.get(encoderName);
}
/**
* Explicitly ready the encoding context for encoding. If this is not called prior to
* encoding the first image, the encoding context will be readied automatically.
*/
public EncodingContext ready() {
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_encodingContext_ready(nativeRef));
return this;
}
/**
* Close the encoding context. This will finalize the underlying stream flushing buffers
* and writing trailers.
*/
@Override
public void close() {
if(nativeRef == 0)
return;
ignore(() -> stop(), re -> LOGGER.error("Failed on stopping the EncodingContext", re));
toClose.forEach(q -> q.close());
toClose.clear();
// output should be closed ONLY after the video encoders
if(output != null)
output.close();
output = null;
if(nativeRef != 0) {
FfmpegApi.pcv4j_ffmpeg2_encodingContext_delete(nativeRef);
nativeRef = 0;
}
}
/**
* This is a convenience method for accessing the default video encoder. It's
* equivalent to:
*
* <pre>
* <code>
* defaultVideoEncoder().setOutputDims(width, height, preserveAspectRatio, onlyScaleDown);
* </code>
* </pre>
*
*/
public EncodingContext setOutputDims(final int width, final int height, final boolean preserveAspectRatio, final boolean onlyScaleDown) {
defaultVideoEncoder().setOutputDims(width, height, preserveAspectRatio, onlyScaleDown);
return this;
}
/**
* This is a convenience method for accessing the default video encoder. It's
* equivalent to:
*
* <pre>
* <code>
* defaultVideoEncoder().setFps(num, den);
* </code>
* </pre>
*
*/
public EncodingContext setFps(final int num, final int den) {
defaultVideoEncoder().setFps(num, den);
return this;
}
/**
* This is a convenience method for accessing the default video encoder. It's
* equivalent to:
*
* <pre>
* <code>
* defaultVideoEncoder().addCodecOptions(optionName, optionValue);
* </code>
* </pre>
*
*/
public EncodingContext addCodecOptions(final String optionName, final String optionValue) {
defaultVideoEncoder().addCodecOptions(optionName, optionValue);
return this;
}
/**
* This is a convenience method for accessing the default video encoder. It's
* equivalent to:
*
* <pre>
* <code>
* defaultVideoEncoder().encode(frame);
* </code>
* </pre>
*
*/
public EncodingContext encode(final VideoFrame frame) {
defaultVideoEncoder().encode(frame);
return this;
}
/**
* Initialize the encoding context and default video encoder with the
* details from the given media context. This will find the first video
* stream in the {@link MediaContext} and assume set up the output to
* encode frames from that source.
*/
public EncodingContext setFps(final MediaContext ctx) {
final StreamDetails sd = Arrays.stream(ctx.getStreamDetails())
.filter(d -> d.mediaType == Ffmpeg.AVMEDIA_TYPE_VIDEO)
.findFirst()
.orElseThrow(
() -> new FfmpegException("There doesn't appear to be any video streams in the given " + MediaContext.class.getSimpleName() + " source."));
defaultVideoEncoder()
.setFps(sd.fps_num, sd.fps_den);
return this;
}
private EncodingContext stop() {
encoders.values().forEach(v -> v.stop());
throwIfNecessary(FfmpegApi.pcv4j_ffmpeg2_encodingContext_stop(nativeRef));
return this;
}
}
private static class MediaDataSource implements QuietCloseable {
protected final long nativeRef;
private MediaDataSource(final long nativeRef) {
this.nativeRef = nativeRef;
}
@Override
public void close() {
if(nativeRef != 0L)
FfmpegApi.pcv4j_ffmpeg2_mediaDataSource_destroy(nativeRef);
}
}
/**
* This is package protected to eliminate any optimization of the strong references
* required to keep the JNA callbacks from being GCed
*/
static class CustomMediaDataSource extends MediaDataSource {
// ======================================================================
// JNA will only hold a weak reference to the callbacks passed in
// so if we dynamically allocate them then they will be garbage collected.
// In order to prevent that we're keeping strong references to them.
// These are not private in order to avoid any possibility that the
// JVM optimized them out since they aren't read anywhere in this code.
public fill_buffer_callback strongRefDs;
public seek_buffer_callback strongRefS;
// ======================================================================
int bufSize = -1;
public CustomMediaDataSource(final long nativeRef) {
super(nativeRef);
}
private void set(final fill_buffer_callback fill, final seek_buffer_callback seek) {
strongRefDs = fill;
strongRefS = seek;
FfmpegApi.pcv4j_ffmpeg2_customMediaDataSource_set(nativeRef, fill, seek);
}
private ByteBuffer customStreamBuffer() {
final Pointer value = FfmpegApi.pcv4j_ffmpeg2_customMediaDataSource_buffer(nativeRef);
bufSize = FfmpegApi.pcv4j_ffmpeg2_customMediaDataSource_bufferSize(nativeRef);
return value.getByteBuffer(0, bufSize);
}
}
private static void throwIfNecessary(final long status, final long... ignore) {
final Set<Long> toIgnore = Arrays.stream(ignore).mapToObj(Long::valueOf).collect(Collectors.toSet());
if(status != 0L && !toIgnore.contains(status)) {
throw new FfmpegException(status, errorMessage(status));
}
}
private static String errorMessage(final long errorCode) {
final MutableObject<Pointer> nmes = new MutableObject<>(null);
try(final QuietCloseable qc = () -> FfmpegApi.pcv4j_ffmpeg2_utils_freeString(nmes.getValue());) {
nmes.setValue(FfmpegApi.pcv4j_ffmpeg2_utils_statusMessage(errorCode));
return Optional.ofNullable(nmes.getValue()).orElseThrow(() -> new FfmpegException("Failed to retrieve status message for code: " + errorCode))
.getString(0);
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg/FfmpegException.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.ffmpeg;
public class FfmpegException extends RuntimeException {
private static final long serialVersionUID = 1L;
public final long status;
public FfmpegException(final String message) {
super(message);
this.status = 0;
}
public FfmpegException(final String message, final Throwable cause) {
super(message, cause);
this.status = 0;
}
public FfmpegException(final long status, final String message) {
super((status == 0) ? message : (sanitizeStatus(status) + ", " + message));
this.status = status;
}
private static String sanitizeStatus(final long status) {
if((status & 0xffffffff00000000L) == 0) { // not a pilecv4j status
if((status & 0x0000000080000000L) != 0)
return "AV status: " + (int)(status & ~0xffffffff00000000L);
else
return "AV status: " + (int)status;
} else { // is a pilecv4j status
return "Pilecv4j status: " + (int)(status >> 32);
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg/MediaDataSeek.java
|
package ai.kognition.pilecv4j.ffmpeg;
/**
* This interface is used in both muxing and custom sources.
*/
@FunctionalInterface
public interface MediaDataSeek {
public long seekBuffer(long offset, int whence);
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg/Muxer.java
|
package ai.kognition.pilecv4j.ffmpeg;
import java.nio.ByteBuffer;
import java.util.function.Function;
import com.sun.jna.Pointer;
import com.sun.jna.ptr.LongByReference;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.create_muxer_from_java_callback;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.seek_buffer_callback;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.should_close_segment_callback;
import ai.kognition.pilecv4j.ffmpeg.internal.FfmpegApi.write_buffer_callback;
/**
* Class the wraps an FFMpeg muxer. These are automatically created by all of the {@code createRemuxer} calls
* as well as explicitly created by all {@code newMuxer} calls.
*/
public class Muxer implements QuietCloseable {
final long nativeRef;
private boolean skipCloseOnceForReturn = false;
private Muxer(final long nativeRef) {
this.nativeRef = nativeRef;
}
/**
* This interface is used for custom output post-muxing
*/
@FunctionalInterface
public static interface WritePacket {
public void handle(ByteBuffer packet, int len);
}
@Override
public void close() {
if(skipCloseOnceForReturn) {
skipCloseOnceForReturn = false;
return;
}
if(nativeRef != 0L)
FfmpegApi.pcv4j_ffmpeg2_muxer_delete(nativeRef);
}
public Muxer returnMe() {
skipCloseOnceForReturn = true;
return this;
}
public static Muxer create(final String fmt, final String outputUri) {
try(Muxer ret = new Muxer(FfmpegApi.pcv4j_ffmpeg2_defaultMuxer_create(fmt, outputUri, null, null));) {
return ret.returnMe();
}
}
public static Muxer create(final String outputUri) {
return create(null, outputUri);
}
public static Muxer create(final String outputFormat, final WritePacket writer, final MediaDataSeek seek) {
final Wbc wbc = new Wbc(writer);
final seek_buffer_callback sbcb = seek != null ? new seek_buffer_callback() {
@Override
public long seek_buffer(final long offset, final int whence) {
return seek.seekBuffer(offset, whence);
}
} : null;
try(final var output = new CustomMuxer(FfmpegApi.pcv4j_ffmpeg2_defaultMuxer_create(outputFormat, null, wbc, sbcb), wbc, sbcb);) {
// violation of the rule that objects should be usable once the constructor returns ... oh well,
// at least it's private. The fix for this would be to have the Wbc hold the CustomOutput rather than
// the other way around but ... not right now.
wbc.bb = output.customBuffer();
return output.returnMe();
}
}
public static Muxer create(final String outputFormat, final WritePacket writer) {
return create(outputFormat, writer, null);
}
public static Muxer create(final Function<Long, Muxer> segmentSupplier, final PacketFilter whenToSegment) {
final create_muxer_from_java_callback p1 = (final long muxerNumber, final LongByReference muxerOut) -> {
final Muxer next = segmentSupplier.apply(muxerNumber);
muxerOut.setValue(next.nativeRef);
return 0;
};
final should_close_segment_callback p2 = (final int mediaType, final int stream_index, final int packetNumBytes, final int isKeyFrame, final long pts,
final long dts, final int tbNum, final int tbDen) -> {
return whenToSegment.test(mediaType, stream_index, packetNumBytes, isKeyFrame == 0 ? false : true, pts, dts, tbNum, tbDen) ? 1 : 0;
};
return new SegmentedMuxer(FfmpegApi.pcv4j_ffmpeg2_segmentedMuxer_create(p1, p2), p1, p2);
}
private static class SegmentedMuxer extends Muxer {
// ======================================================================
// JNA will only hold a weak reference to the callbacks passed in
// so if we dynamically allocate them then they will be garbage collected.
// In order to prevent that, we're keeping strong references to them.
// These are not private in order to avoid any possibility that the
// JVM optimized them out since they aren't read anywhere in this code.
@SuppressWarnings("unused") public create_muxer_from_java_callback strongRefW = null;
@SuppressWarnings("unused") public should_close_segment_callback strongRefF = null;
// ======================================================================
private SegmentedMuxer(final long nativeRef, final create_muxer_from_java_callback write, final should_close_segment_callback sbcb) {
super(nativeRef);
strongRefW = write;
strongRefF = sbcb;
}
}
private static class CustomMuxer extends Muxer {
// ======================================================================
// JNA will only hold a weak reference to the callbacks passed in
// so if we dynamically allocate them then they will be garbage collected.
// In order to prevent that, we're keeping strong references to them.
// These are not private in order to avoid any possibility that the
// JVM optimized them out since they aren't read anywhere in this code.
@SuppressWarnings("unused") public write_buffer_callback strongRefW = null;
@SuppressWarnings("unused") public seek_buffer_callback strongRefS = null;
// ======================================================================
private CustomMuxer(final long nativeRef, final write_buffer_callback write, final seek_buffer_callback sbcb) {
super(nativeRef);
strongRefW = write;
strongRefS = sbcb;
}
private ByteBuffer customBuffer() {
final Pointer value = FfmpegApi.pcv4j_ffmpeg2_defaultMuxer_buffer(nativeRef);
final int bufSize = FfmpegApi.pcv4j_ffmpeg2_defaultMuxer_bufferSize(nativeRef);
return value.getByteBuffer(0, bufSize);
}
}
private static class Wbc implements write_buffer_callback {
ByteBuffer bb;
final WritePacket writer;
private Wbc(final WritePacket writer) {
this.writer = writer;
}
@Override
public long write_buffer(final int numBytesToWrite) {
writer.handle(bb, numBytesToWrite);
return 0L;
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg/PacketFilter.java
|
package ai.kognition.pilecv4j.ffmpeg;
/**
* Used for filtering packets on the input stream and also for deciding when to cut video segments
* in a segmenting Muxer
*/
@FunctionalInterface
public interface PacketFilter {
/**
* @return given the details, should this packet be let through
*/
public boolean test(final int mediaType, final int stream_index, final int packetNumBytes, final boolean isKeyFrame, final long pts,
final long dts, final int tbNum, final int tbDen);
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg/PacketMetadata.java
|
package ai.kognition.pilecv4j.ffmpeg;
public record PacketMetadata(int mediaType, int stream_index, int packetNumBytes, boolean isKeyFrame, long pts,
long dts, int tbNum, int tbDen) {}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg
|
java-sources/ai/kognition/pilecv4j/lib-ffmpeg/1.0/ai/kognition/pilecv4j/ffmpeg/internal/FfmpegApi.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.ffmpeg.internal;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import com.sun.jna.Callback;
import com.sun.jna.Native;
import com.sun.jna.NativeLibrary;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.ptr.IntByReference;
import com.sun.jna.ptr.LongByReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.ImageAPI;
import ai.kognition.pilecv4j.util.NativeLibraryLoader;
public class FfmpegApi {
private static final Logger LOGGER = LoggerFactory.getLogger(FfmpegApi.class);
public static final AtomicBoolean inited = new AtomicBoolean(false);
public static final String LIBNAME = "ai.kognition.pilecv4j.ffmpeg";
// needs to match LogLevel enum in the C++ code.
public static final int LOG_LEVEL_TRACE = 0;
public static final int LOG_LEVEL_DEBUG = 1;
public static final int LOG_LEVEL_INFO = 2;
public static final int LOG_LEVEL_WARN = 3;
public static final int LOG_LEVEL_ERROR = 4;
public static final int LOG_LEVEL_FATAL = 5;
// needs to match the StreamContext state enum in the C++ code.
public static final int STREAM_CONTEXT_STATE_FRESH = 0;
public static final int STREAM_CONTEXT_STATE_OPEN = 1;
public static final int STREAM_CONTEXT_STATE_LOADED = 2;
public static final int STREAM_CONTEXT_STATE_PROCESSORS_SETUP = 3;
public static final int STREAM_CONTEXT_STATE_PLAYING = 4;
public static final int STREAM_CONTEXT_STATE_STOPPING = 5;
public static final int STREAM_CONTEXT_STATE_ENDED = 6;
static {
if(inited.get())
throw new IllegalStateException("Cannot initialize the Ffmpeg twice.");
CvMat.initOpenCv();
if(!inited.getAndSet(true)) {
NativeLibraryLoader.loader()
.library(LIBNAME)
.destinationDir(new File(System.getProperty("java.io.tmpdir"), LIBNAME).getAbsolutePath())
.addPreLoadCallback((dir, libname, oslibname) -> {
LOGGER.info("scanning dir:{}, libname:{}, oslibname:{}", dir, libname, oslibname);
NativeLibrary.addSearchPath(libname, dir.getAbsolutePath());
})
.load();
Native.register(LIBNAME);
pcv4j_ffmpeg2_imageMaker_set(ImageAPI.pilecv4j_image_get_im_maker());
}
}
// called from Ffmpeg2 to load the class
public static void _init() {}
// ==========================================================
// Custom IO callback declarations
// ==========================================================
public static interface fill_buffer_callback extends Callback {
public int fill_buffer(final int numBytesToWrite);
}
public static interface seek_buffer_callback extends Callback {
public long seek_buffer(final long offset, final int whence);
}
public static interface write_buffer_callback extends Callback {
public long write_buffer(final int numBytesToWrite);
}
// ==========================================================
// frame processing callback declarations
// ==========================================================
public static interface push_frame_callback extends Callback {
public long push_frame(final long val, final int isRbg, final int streamIndex);
}
// ==========================================================
// Stream selector callback declarations
// ==========================================================
public static interface select_streams_callback extends Callback {
public int select_streams(final int numStreams, Pointer selected);
}
public static interface packet_filter_callback extends Callback {
public int packet_filter(final int mediaType, final int stream_index, final int packetNumBytes, final int isKeyFrame, final long pts, final long dts,
final int tbNum, final int tbDen);
}
// ==========================================================
// Segmented Muxer callback declarations
// ==========================================================
public static interface create_muxer_from_java_callback extends Callback {
public long next_muxer(final long muxerNumber, LongByReference muxerOut);
}
public static interface should_close_segment_callback extends Callback {
public int should_close_segment(int mediaType, int stream_index, int packetNumBytes,
int isKeyFrame, long pts, long dts, int tbNum, int tbDen);
}
// ==========================================================
// Utilities
// ==========================================================
public static native void pcv4j_ffmpeg2_logging_setLogLevel(final int logLevel);
public static native Pointer pcv4j_ffmpeg2_utils_statusMessage(final long status);
public static native void pcv4j_ffmpeg2_utils_freeString(final Pointer str);
public static native void pcv4j_ffmpeg2_imageMaker_set(final long im);
// ==========================================================
// Stream Context construction/destruction
// ==========================================================
public static native long pcv4j_ffmpeg2_mediaContext_create();
public static native void pcv4j_ffmpeg2_mediaContext_delete(final long nativeRef);
public static class internal_StreamDetails extends Structure {
public int stream_index;
public int mediaType;
public int fps_num;
public int fps_den;
public int tb_num;
public int tb_den;
public int codec_id;
public String codecName;
public static class ByReference extends internal_StreamDetails implements Structure.ByReference {}
private static final List<String> fo = gfo(internal_StreamDetails.class, "stream_index", "mediaType", "fps_num", "fps_den", "tb_num", "tb_den",
"codec_id", "codecName");
public internal_StreamDetails() {}
public internal_StreamDetails(final Pointer ptr) {
super(ptr);
}
@Override
protected List<String> getFieldOrder() {
return fo;
}
@Override
public String toString() {
return "internal_StreamDetails [mediaType=" + mediaType + ", fps_num=" + fps_num + ", fps_den=" + fps_den + ", tb_num=" + tb_num + ", tb_den="
+ tb_den + ", codecName=" + codecName + "]";
}
}
public static native internal_StreamDetails.ByReference pcv4j_ffmpeg2_mediaContext_getStreamDetails(final long ctx, final IntByReference numResults,
LongByReference rc);
public static native void pcv4j_ffmpeg2_streamDetails_deleteArray(Pointer p);
// ==========================================================
// MediaDataSource lifecycle methods
// ==========================================================
public static native void pcv4j_ffmpeg2_mediaDataSource_destroy(final long vdsRef);
/**
* Get a Uri based MediaDataSource source.
*
* @return a reference to a native MediaDataSource built from a source uri.
*/
public static native long pcv4j_ffmpeg2_uriMediaDataSource_create(final String sourceUri);
public static native long pcv4j_ffmpeg2_uriMediaDataSource_create2(final String fmt, final String source);
public static native long pcv4j_ffmpeg2_customMediaDataSource_create();
public static native long pcv4j_ffmpeg2_customMediaDataSource_set(final long nativeRef, final fill_buffer_callback vds, seek_buffer_callback seek);
/**
* When running a custom data source, a constant ByteBuffer wrapping native memory
* is used to transfer the data. The size of that buffer is retrieved with this call.
*/
public static native int pcv4j_ffmpeg2_customMediaDataSource_bufferSize(final long vdsRef);
/**
* When running a custom data source, a constant ByteBuffer wrapping native memory
* is used to transfer the data. That buffer is retrieved using this call.
*/
public static native Pointer pcv4j_ffmpeg2_customMediaDataSource_buffer(final long vdsRef);
// ==========================================================
// MediaProcessor lifecycle methods
// ==========================================================
public static native void pcv4j_ffmpeg2_mediaProcessor_destroy(final long vdsRef);
public static native long pcv4j_ffmpeg2_decodedFrameProcessor_create(final push_frame_callback cb, final int maxDim, final String decoderName);
public static native void pcv4j_ffmpeg2_decodedFrameProcessor_replace(final long nativeRef, final push_frame_callback cb);
public static native long pcv4j_ffmpeg2_remuxer_create(long outputRef, final int maxRemuxErrorCount);
// ==========================================================
// Muxers methods
// ==========================================================
public static native void pcv4j_ffmpeg2_muxer_delete(final long outputRef);
public static native long pcv4j_ffmpeg2_defaultMuxer_create(final String pfmt, final String poutputUri, final write_buffer_callback callback,
seek_buffer_callback seek);
public static native Pointer pcv4j_ffmpeg2_defaultMuxer_buffer(final long ctx);
public static native int pcv4j_ffmpeg2_defaultMuxer_bufferSize(final long ctx);
public static native long pcv4j_ffmpeg2_segmentedMuxer_create(final create_muxer_from_java_callback create_muxer_callback,
final should_close_segment_callback ssc_callback);
// ==========================================================
// MediaProcessorChain methods
// ==========================================================
public static native long pcv4j_ffmpeg2_mediaProcessorChain_create();
public static native void pcv4j_ffmpeg2_mediaProcessorChain_destroy(long nativeRef);
public static native long pcv4j_ffmpeg2_mediaProcessorChain_addProcessor(long mpc, long mp);
public static native long pcv4j_ffmpeg2_mediaProcessorChain_addPacketFilter(long mpc, long pf);
// ==========================================================
// Filters and Stream selectors
// ==========================================================
public static native long pcv4j_ffmpeg2_firstVideoStreamSelector_create();
public static native long pcv4j_ffmpeg2_javaStreamSelector_create(select_streams_callback callback);
public static native long pcv4j_ffmpeg2_javaPacketFilter_create(packet_filter_callback callback);
public static native void pcv4j_ffmpeg2_packetFilter_destroy(long nativeRef);
// ==========================================================
// Stream Context setup methods
// ==========================================================
public static native long pcv4j_ffmpeg2_mediaContext_setSource(final long ctxRef, final long mediaDataSourceRef);
public static native long pcv4j_ffmpeg2_mediaContext_addProcessor(final long ctxRef, final long mediaProcessorRef);
/**
* Set an option for the ffmpeg call (e.g. rtsp_transport = tcp).
*/
public native static long pcv4j_ffmpeg2_mediaContext_addOption(final long streamCtx, final String key, final String value);
/**
* Play the stream and carry out all of the processing that should have been
* set up prior to calling this method.
*/
public static native long pcv4j_ffmpeg2_mediaContext_play(final long ctx);
/**
* Stop a playing stream. If the stream isn't in the PLAY state, then it will return an error.
* If the stream is already in a STOP state, this will do nothing and return no error.
*/
public native static long pcv4j_ffmpeg2_mediaContext_stop(final long nativeDef);
public native static int pcv4j_ffmpeg2_mediaContext_state(final long nativeDef);
public native static void pcv4j_ffmpeg2_mediaContext_sync(final long nativeDef);
// ==========================================================
// Encoding
// ==========================================================
public native static long pcv4j_ffmpeg2_encodingContext_create();
public native static void pcv4j_ffmpeg2_encodingContext_delete(final long nativeDef);
public native static long pcv4j_ffmpeg2_encodingContext_setMuxer(final long nativeDef, long muxerRef);
public native static long pcv4j_ffmpeg2_encodingContext_openVideoEncoder(final long encCtxRef, final String video_codec);
public native static long pcv4j_ffmpeg2_encodingContext_ready(final long encCtxRef);
public native static long pcv4j_ffmpeg2_encodingContext_stop(final long nativeDef);
public native static long pcv4j_ffmpeg2_videoEncoder_addCodecOption(final long nativeDef, final String key, final String val);
public native static long pcv4j_ffmpeg2_videoEncoder_enable(final long nativeDef, final int isRgb, final int width, final int height, final int stride,
final int dstW, final int dstH);
public native static void pcv4j_ffmpeg2_videoEncoder_delete(final long nativeDef);
public native static long pcv4j_ffmpeg2_videoEncoder_encode(final long nativeDef, final long matRef, final int isRgb);
public native static long pcv4j_ffmpeg2_videoEncoder_setFramerate(final long nativeDef, final int pfps_num, final int pfps_den);
public native static long pcv4j_ffmpeg2_videoEncoder_setOutputDims(final long nativeDef, final int width, final int height, int preserveAspectRatio,
int onlyScaleDown);
public native static long pcv4j_ffmpeg2_videoEncoder_setRcBufferSize(final long nativeDef, final int pbufferSize);
public native static long pcv4j_ffmpeg2_videoEncoder_setRcBitrate(final long nativeDef, final long pminBitrate, final long pmaxBitrate);
public native static long pcv4j_ffmpeg2_videoEncoder_setTargetBitrate(final long nativeDef, final long pbitrate);
public native static long pcv4j_ffmpeg2_videoEncoder_stop(final long nativeDef);
public native static long pcv4j_ffmpeg2_videoEncoder_streaming(final long nativeDef);
// ==========================================================
// Error codes
// ==========================================================
/**
* Get the AV Error code for EOF. Can be called at any time.
*/
public static native int pcv4j_ffmpeg_code_averror_eof();
public static native long pcv4j_ffmpeg_code_averror_eof_as_kognition_stat();
public static native long pcv4j_ffmpeg_code_averror_unknown_as_kognition_stat();
/**
* Get the seek code.
*/
public static native int pcv4j_ffmpeg_code_seek_set();
/**
* Get the seek code.
*/
public static native int pcv4j_ffmpeg_code_seek_cur();
/**
* Get the seek code.
*/
public static native int pcv4j_ffmpeg_code_seek_end();
/**
* Get the "error: code for EAGAIN
*/
public static native int pcv4j_ffmpeg_code_eagain();
/**
* Get the FFmpeg specific seek code. This means just return
* the entire stream size or a negative number if not supported.
*/
public static native int pcv4j_ffmpeg_code_seek_size();
public static native int pcv4j_ffmpeg2_mediaType_UNKNOWN();
public static native int pcv4j_ffmpeg2_mediaType_VIDEO();
public static native int pcv4j_ffmpeg2_mediaType_AUDIO();
public static native int pcv4j_ffmpeg2_mediaType_DATA();
public static native int pcv4j_ffmpeg2_mediaType_SUBTITLE();
public static native int pcv4j_ffmpeg2_mediaType_ATTACHMENT();
public static native int pcv4j_ffmpeg2_mediaType_NB();
public static native void pcv4j_ffmpeg2_timings();
private static List<String> gfo(final Class<?> clazz, final String... fieldNames) {
try {
final ArrayList<String> ret = new ArrayList<>(fieldNames.length);
for(final String fn: fieldNames)
ret.add(clazz.getField(fn)
.getName());
return ret;
} catch(final NoSuchFieldException | SecurityException e) {
// This will only happen if the structure changes and should cause systemic
// test failures pointing to that fact.
throw new RuntimeException(e);
}
}
public static interface get_frame_callback extends Callback {
public long get_frame();
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/Closer.java
|
package ai.kognition.pilecv4j.image;
import static net.dempsy.util.Functional.uncheck;
import java.util.LinkedList;
import java.util.List;
import org.opencv.core.Mat;
import net.dempsy.util.QuietCloseable;
/**
* Manage resources from a single place
*/
public class Closer implements AutoCloseable {
private final List<AutoCloseable> toClose = new LinkedList<>();
public <T extends AutoCloseable> T add(final T mat) {
if(mat != null)
toClose.add(0, mat);
return mat;
}
public <T extends Mat> T addMat(final T mat) {
if(mat == null)
return null;
if(mat instanceof AutoCloseable)
add((AutoCloseable)mat);
else
toClose.add(0, (QuietCloseable)() -> CvMat.closeRawMat(mat));
return mat;
}
@Override
public void close() {
toClose.stream().forEach(r -> uncheck(() -> r.close()));
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/CvMat.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import static ai.kognition.pilecv4j.image.ImageAPI.LOG_LEVEL_DEBUG;
import static ai.kognition.pilecv4j.image.ImageAPI.LOG_LEVEL_ERROR;
import static ai.kognition.pilecv4j.image.ImageAPI.LOG_LEVEL_FATAL;
import static ai.kognition.pilecv4j.image.ImageAPI.LOG_LEVEL_INFO;
import static ai.kognition.pilecv4j.image.ImageAPI.LOG_LEVEL_TRACE;
import static ai.kognition.pilecv4j.image.ImageAPI.LOG_LEVEL_WARN;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.function.Consumer;
import java.util.function.Function;
import com.sun.jna.Memory;
import com.sun.jna.Pointer;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.QuietCloseable;
/**
* <p>
* This class is an easier (perhaps) and more efficient interface to an OpenCV
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
* than the one available through the official Java wrapper. It includes more efficient resource
* management as an {@link AutoCloseable} and the ability to do more <em>"zero-copy"</em>
* image manipulations than is typically available in OpenCVs default Java API.
* </p>
*
* <h2>Memory management</h2>
*
* <p>
* In OpenCV's C/C++ API, the developer is responsible for managing the resources. The
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a> class in C++
* references the underlying memory resources for the image data. When a C++ Mat is deleted,
* this memory is freed (that is, as long as other Mat's aren't referring to the same
* memory, in which case when the last one is deleted, the memeory is freed). This gives the
* developer using the C++ API fine grained control over the compute resources.
* </p>
*
* <p>
* However, for Java developers, it's not typical for the developer to manage memory or explicitly
* delete objects or free resources. Instead, they typically rely on garbage collection. The
* problem with doing that in OpenCV's Java API is that the Java VM and it's garbage
* collector <em>can't see</em> the image memory referred to by the
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>. This memory is
* <a href="https://stackoverflow.com/questions/6091615/difference-between-on-heap-and-off-heap"><em>off-heap</em></a>
* from the perspective of the Java VM.
* </p>
*
* <p>
* This is why, as you may have experienced if you've used OpenCV's Java API in a larger
* video system, you can rapidly run out of memory. Creating a Mat for each high resolution
* video frame but letting the JVM garbage collector decide when to delete these objects as
* you create will eventually (sometimes rapidly) fill the available system memory since the
* garbage collector is unaware of how much of that computer memory is actually being
* utilized by these <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>s.
* </p>
*
* <p>
* This class allows OpenCV's <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>s
* to be managed the same way you would any other {@link AutoCloseable} in Java (since 1.7).
* That is, using a <em>"try-with-resource"</em>.
* </p>
*
* <h3>Tracking memory leaks</h3>
*
* Additionally, you can track leaks in your use of {@link CvMat} by setting the environment variable
* {@code PILECV4J_TRACK_MEMORY_LEAKS="true"} or by using the system property
* {@code -Dpilecv4j.TRACK_MEMORY_LEAKS=true}. This will tell {@link CvMat} to track the locations in
* the code where it's been instantiated so that if it's eventually deleted by the garbage collector,
* rather than {@code CvMat#close}d by the developer, a {@code debug} level log message will be emitted
* identifying where the leaked {@link CvMat} was initially instantiated.
*/
public class CvMat extends Mat implements QuietCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CvMat.class);
public static final boolean TRACK_MEMORY_LEAKS;
protected boolean skipCloseOnceForReturn = false;
static {
ImageAPI._init();
final String sysOpTRACKMEMLEAKS = System.getProperty("pilecv4j.TRACK_MEMORY_LEAKS");
final boolean sysOpSet = sysOpTRACKMEMLEAKS != null;
boolean track = ("".equals(sysOpTRACKMEMLEAKS) || Boolean.parseBoolean(sysOpTRACKMEMLEAKS));
if(!sysOpSet)
track = Boolean.parseBoolean(System.getenv("PILECV4J_TRACK_MEMORY_LEAKS"));
TRACK_MEMORY_LEAKS = track;
if(TRACK_MEMORY_LEAKS)
LOGGER.info("Tracking memory leaks in {} enabled.", CvMat.class.getSimpleName());
final Logger nativeLogger = LoggerFactory.getLogger(CvMat.class.getPackageName() + ".native");
// find the level
final int logLevelSet;
if(nativeLogger.isTraceEnabled())
logLevelSet = LOG_LEVEL_TRACE;
else if(nativeLogger.isDebugEnabled())
logLevelSet = LOG_LEVEL_DEBUG;
else if(nativeLogger.isInfoEnabled())
logLevelSet = LOG_LEVEL_INFO;
else if(nativeLogger.isWarnEnabled())
logLevelSet = LOG_LEVEL_WARN;
else if(nativeLogger.isErrorEnabled())
logLevelSet = LOG_LEVEL_ERROR;
else
logLevelSet = LOG_LEVEL_FATAL;
ImageAPI.pilecv4j_image_setLogLevel(logLevelSet);
}
public static void initOpenCv() {}
// This is used when there's an input matrix that can't be null but should be ignored.
public static final Mat nullMat = new Mat();
private static final Method nDelete;
private boolean deletedAlready = false;
protected final RuntimeException stackTrace;
protected RuntimeException delStackTrace = null;
static {
try {
nDelete = org.opencv.core.Mat.class.getDeclaredMethod("n_delete", long.class);
nDelete.setAccessible(true);
} catch(final NoSuchMethodException | SecurityException e) {
throw new RuntimeException(
"Got an exception trying to access Mat.n_Delete. Either the security model is too restrictive or the version of OpenCv can't be supported.",
e);
}
}
protected CvMat(final long nativeObj) {
super(nativeObj);
stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
/**
* Construct's an empty {@link CvMat}.
* This simply calls the parent classes equivalent constructor.
*/
public CvMat() {
stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
/**
* Construct a {@link CvMat} and preallocate the image space.
* This simply calls the parent classes equivalent constructor.
*
* @param rows number of rows
* @param cols number of columns
* @param type type of the {@link CvMat}. See
* <a href="https://docs.opencv.org/4.0.1/javadoc/org/opencv/core/CvType.html">CvType</a>
*/
public CvMat(final int rows, final int cols, final int type) {
super(rows, cols, type);
stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
/**
* Construct a {@link CvMat} and preallocate the image space and fill it from the {@link ByteBuffer}.
* This simply calls the parent classes equivalent constructor.
*
* @param rows number of rows
* @param cols number of columns
* @param type type of the {@link CvMat}. See
* <a href="https://docs.opencv.org/4.0.1/javadoc/org/opencv/core/CvType.html">CvType</a>
* @param data the {@link ByteBuffer} with the image date.
*/
public CvMat(final int rows, final int cols, final int type, final ByteBuffer data) {
super(rows, cols, type, data);
stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
/**
* Construct a {@link CvMat} and preallocate the multidimensional tensor space.
* This simply calls the parent classes equivalent constructor.
*
* @param sizes array of the sizes of each dimension.
* @param type type of the {@link CvMat}. See
* <a href="https://docs.opencv.org/4.0.1/javadoc/org/opencv/core/CvType.html">CvType</a>
*/
public CvMat(final int[] sizes, final int type) {
super(sizes, type);
stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
/**
* This is equivalent to getting the full size array in C++
* using:
* <p>
* <code>
* auto sz = mat.size();<br>
* ... sz[dim] ..;
* </code>
* </p>
*/
public int[] dimSizes() {
final int ndims = dims();
final int[] ret = new int[ndims];
for(int i = 0; i < ndims; i++) {
ret[i] = size(i);
}
return ret;
}
/**
* Reshape this Mat without constructing any intermediate and guaranteeing the result
* is using the same memory segment.
*/
public void inplaceReshape(final int cn, final int[] sizes) {
final int ndims = sizes.length;
final long numBytes = ndims * Integer.BYTES;
final Pointer ptr = new Memory(numBytes);
final ByteBuffer bb = ptr.getByteBuffer(0, numBytes).order(ByteOrder.nativeOrder());
for(final int sz: sizes)
bb.putInt(sz);
ImageAPI.pilecv4j_image_CvRaster_inplaceReshape(nativeObj, cn, ndims, ptr);
}
/**
* <p>
* This method is similar to reshape except it also allows the changing of the type
* without doing a conversion. That is it simply reinterprets the raw binary buffer
* as being a different type.
* </p>
*
* <p>
*
* @param maxSize is the total number of bytes that should not be exceeded. It
* should be the number of bytes that the mat's backing buffer contains and is
* used as a safety check (as in C/C++'s {@code strncpy} for example). This
* number can be greater than the number of bytes required by {@code sizes} and
* {@code type} but it cannot be less.
* </p>
*
* <p>
* DO NOT USE THIS METHOD UNLESS YOU REALLY KNOW WHAT YOU'RE DOING!
* </p>
*
* <p>
* This method has the unfortunate side effect of unreferencing the underlying data.
* </p>
*
* <p>
* The memory representing this {@link CvMat} must be under the management of another mechanism.
* Therefore, either this must be a shallow copy of another Mat that outlives this Mat, OR
* the mat would have been constructed from a data buffer (that also must outlive this Mat).
* </p>
*/
public boolean inplaceRemake(final int[] sizes, final int type, final long maxSize) {
final int ndims = sizes.length;
final long numBytes = ndims * Integer.BYTES;
final Pointer ptr = new Memory(numBytes);
final ByteBuffer bb = ptr.getByteBuffer(0, numBytes).order(ByteOrder.nativeOrder());
for(final int sz: sizes)
bb.putInt(sz);
return ImageAPI.pilecv4j_image_CvRaster_inplaceRemake(nativeObj, ndims, ptr, type, maxSize) == 0 ? false : true;
}
/**
* This performs a proper matrix multiplication that returns {@code this * other}.
*
* @return a new {@link CvMat} resulting from the operation. <b>Note: The caller owns the CvMat returned</b>
*
* @see <a href=
* "https://docs.opencv.org/4.0.1/d2/de8/group__core__array.html#gacb6e64071dffe36434e1e7ee79e7cb35">cv::gemm()</a>
*/
public CvMat mm(final Mat other) {
return mm(other, 1.0D);
}
/**
* @return the transpose of the matrix. <b>Note: The caller owns the CvMat returned.</b>
*/
@Override
public CvMat t() {
return CvMat.move(super.t());
}
/**
* This performs a proper matrix multiplication and multiplies the result by a scalar. It returns:
* <p>
* {@code scale (this * other)}.
*
* @return a new {@link CvMat} resulting from the operation. <b>Note: The caller owns the CvMat returned</b>
*
* @see <a href=
* "https://docs.opencv.org/4.0.1/d2/de8/group__core__array.html#gacb6e64071dffe36434e1e7ee79e7cb35">cv::gemm()</a>
*/
public CvMat mm(final Mat other, final double scale) {
final Mat ret = new Mat(); // we don't close this because we're going to move it.
try {
Core.gemm(this, other, scale, nullMat, 0.0D, ret);
} catch(final RuntimeException rte) {
CvMat.closeRawMat(ret);
throw rte;
}
return CvMat.move(ret);
}
/**
* Apply the given {@link Function} to a {@link ByteBuffer} containing the raw image data for this {@link CvMat}.
*
*
* @throws IllegalArgumentException will be thrown if the underlying data can't be retrieved either because it's
* not continuous, or if the underlying mat is invalid it will throw
* @param function is the {@link Consumer} to pass the {@link ByteBuffer} to.
*/
public void bulkAccess(final Consumer<ByteBuffer> bulkAccessor) {
bulkAccess(this, bulkAccessor);
}
/**
* Apply the given {@link Function} to a {@link ByteBuffer} containing the raw image data for this {@link CvMat}.
*
* @throws IllegalArgumentException will be thrown if the underlying data can't be retrieved either because it's
* not continuous, or if the underlying mat is invalid it will throw
* @param function is the {@link Consumer} to pass the {@link ByteBuffer} to.
*/
public static void bulkAccess(final Mat mat, final Consumer<ByteBuffer> bulkAccessor) {
bulkAccessor.accept(_getData(mat));
}
/**
* Apply the given {@link Function} to a {@link ByteBuffer} containing the raw image data for this {@link CvMat}.
*
*
* @throws IllegalArgumentException will be thrown if the underlying data can't be retrieved either because it's
* not continuous, or if the underlying mat is invalid it will throw
* @param function is the {@link Function} to pass the {@link ByteBuffer} to.
* @return the return value of the provided {@code bulkAccessor}
*/
public <T> T bulkAccessOp(final Function<ByteBuffer, T> bulkAccessor) {
return bulkAccessOp(this, bulkAccessor);
}
/**
* Apply the given {@link Function} to a {@link ByteBuffer} containing the raw image data for this {@link CvMat}.
*
*
* @throws IllegalArgumentException will be thrown if the underlying data can't be retrieved either because it's
* not continuous, or if the underlying mat is invalid it will throw
* @param function is the {@link Function} to pass the {@link ByteBuffer} to.
* @return the return value of the provided {@code bulkAccessor}
*/
public static <T> T bulkAccessOp(final Mat mat, final Function<ByteBuffer, T> bulkAccessor) {
return bulkAccessor.apply(_getData(mat));
}
/**
* Apply the given {@link Function} to a {@link CvRaster} containing the image data for this {@link CvMat}
*
* @param function is the {@link Function} to pass the {@link CvRaster} to.
* @return the return value of the pro)vided {@code function}
* @see CvRaster
* @deprecated use {@link #bulkAccessOp(Function)} instead.
*/
@Deprecated
public <T> T rasterOp(final Function<CvRaster, T> function) {
try(final CvRaster raster = CvRaster.makeInstance(this)) {
return function.apply(raster);
}
}
/**
* Apply the given {@link Consumer} to a {@link CvRaster} containing the image data for this {@link CvMat}
*
* @param function is the {@link Consumer} to pass the {@link CvRaster} to.
* @see CvRaster
* @deprecated use {@link #bulkAccess(Consumer)} instead
*/
@Deprecated
public void rasterAp(final Consumer<CvRaster> function) {
try(final CvRaster raster = CvRaster.makeInstance(this)) {
function.accept(raster);
}
}
/**
* Helper function for applying a {@link Function} to the a {@link CvRaster} built from the given
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
*
* @param mat <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a> to build the
* {@link CvRaster} from.
* @param function is the {@link Function} to pass the {@link CvRaster} to.
* @return the return value of the provided {@code function}
* @see CvRaster
* @deprecated use {@link #bulkAccessOp(Mat, Function)} instead.
*/
@Deprecated
public static <T> T rasterOp(final Mat mat, final Function<CvRaster, T> function) {
if(mat instanceof CvMat)
return ((CvMat)mat).rasterOp(function);
else {
try(final CvRaster raster = CvRaster.makeInstance(mat)) {
return function.apply(raster);
}
}
}
/**
* Helper function for applying a {@link Consumer} to the a {@link CvRaster} built from the given
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
*
* @param mat <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a> to build the
* {@link CvRaster} from.
* @param function is the {@link Consumer} to pass the {@link CvRaster} to.
* @see CvRaster
* @deprecated use {@link #bulkAccess(Mat, Consumer)} instead.
*/
@Deprecated
public static void rasterAp(final Mat mat, final Consumer<CvRaster> function) {
if(mat instanceof CvMat)
((CvMat)mat).rasterAp(function);
else {
try(final CvRaster raster = CvRaster.makeInstance(mat)) {
function.accept(raster);
}
}
}
/**
* @return How many bytes constitute the image data.
*/
public long numBytes() {
return elemSize() * total();
}
/**
* The underlying data buffer pointer as a long
*/
public long getNativeAddressOfData() {
if(!isContinuous())
throw new IllegalArgumentException("Cannot retrieve the data reference of a Mat without a continuous buffer.");
return Pointer.nativeValue(ImageAPI.pilecv4j_image_CvRaster_getData(nativeObj));
}
// public GpuMat upload() {
// return new GpuMat(nativeObj);
// }
/**
* Free the resources for this {@link CvMat}. Once the {@link CvMat} is closed, it shouldn't be used and certainly
* wont contain the image data any longer.
*/
@Override
public void close() {
if(!skipCloseOnceForReturn) {
if(!deletedAlready) {
doNativeDelete();
deletedAlready = true;
if(TRACK_MEMORY_LEAKS) {
delStackTrace = new RuntimeException("Here's where I was closed");
}
} else if(TRACK_MEMORY_LEAKS) {
LOGGER.warn("TRACKING: Deleting {} again at:", this.getClass().getSimpleName(), new RuntimeException());
LOGGER.warn("TRACKING: originally closed at:", delStackTrace);
LOGGER.warn("TRACKING: create at: ", stackTrace);
}
} else
skipCloseOnceForReturn = false; // next close counts.
}
@Override
public String toString() {
return "CvMat: (" + getClass().getName() + "@" + Integer.toHexString(hashCode()) + ") " + (deletedAlready ? "" : super.toString());
}
/**
* This call should be made to manage a copy of the Mat using a {@link CvRaster}.
* NOTE!! Changes to the {@link CvMat} will be reflected in the {@link Mat} and
* vs. vrs. If you want a deep copy/clone of the original Mat then consider
* using {@link CvMat#deepCopy(Mat)}.
*/
public static CvMat shallowCopy(final Mat mat) {
final long newNativeObj = ImageAPI.pilecv4j_image_CvRaster_copy(mat.nativeObj);
if(newNativeObj == 0L) {
// let's do some checking
if(!mat.isContinuous())
LOGGER.error("Cannot shallow copy a discontinuous Mat");
else
LOGGER.error("Failed to shallow copy mat");
return null;
}
return new CvMat(newNativeObj);
}
public static CvMat flipRedBlue(final Mat mat) {
if(mat.channels() != 3 && mat.channels() != 4)
throw new IllegalArgumentException("Cannot flip the red and blue channels of a " + mat.channels() + " channel image.");
try(CvMat ret = new CvMat();) {
Imgproc.cvtColor(mat, ret, mat.channels() == 3 ? Imgproc.COLOR_RGB2BGR : Imgproc.COLOR_RGBA2BGRA);
return ret.returnMe();
}
}
/**
* This call will manage a complete deep copy of the provided {@code Mat}.
* Changes in one will not be reflected in the other.
*/
public static CvMat deepCopy(final Mat mat) {
if(mat.rows() == 0)
return new CvMat(mat.rows(), mat.cols(), mat.type());
if(mat.isContinuous())
return move(mat.clone());
try(final CvMat newMat = new CvMat(mat.rows(), mat.cols(), mat.type());) {
mat.copyTo(newMat);
return newMat.returnMe();
}
}
/**
* <p>
* This call can be made to hand management of a
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>'s resources
* over to a new {@link CvMat}. The
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a> passed
* in <em>SOULD NOT</em> be used after this call or, at least, it shouldn't be assumed
* to still be pointing to the same image data. When the {@link CvMat} is closed, it will
* release the data that was originally associated with the {@code Mat}. If you want
* to keep the {@code Mat} beyond the life of the {@link CvMat}, then consider using
* {@link CvMat#shallowCopy(Mat)} instead of {@link CvMat#move(Mat)}.
* </p>
*
* @param mat - <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
* to take control of with the new {@link CvMat}. After this call the
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
* passed should not be used.
* @return a new {@link CvMat} that now managed the internal resources of the origin. <b>Note: The caller owns the
* CvMat returned</b>
*/
public static CvMat move(final Mat mat) {
final var ret = new CvMat(ImageAPI.pilecv4j_image_CvRaster_move(mat.nativeObj));
// if it's a CvMat we can close it now freeing up ALL of the resources
// rather than just simply the data matrix. Otherwise it will wait for
// the gc to get around to finalizing
if(mat instanceof CvMat)
((CvMat)mat).close();
return ret;
}
/**
* <p>
* This call can be made to close a
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>'s and free
* it's resources. The
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a> passed
* in <em>SOULD NOT</em> be used after this call. Doing so will result in a crash.
* </p>
*/
public static void closeRawMat(final Mat mat) {
if(mat == null)
return;
if(mat instanceof CvMat)
((CvMat)mat).close();
else
ImageAPI.pilecv4j_image_CvRaster_freeByMove(mat.nativeObj);
}
/**
* Convenience method that wraps the return value of <a href=
* "https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html#a0b57b6a326c8876d944d188a46e0f556">{@code Mat.zeros}</a>
* in a {@link CvMat}.
*
* @param rows number of rows of in the resulting {@link CvMat}
* @param cols number of columns of in the resulting {@link CvMat}
* @param type type of the resulting {@link CvMat}. See
* <a href="https://docs.opencv.org/4.0.1/javadoc/org/opencv/core/CvType.html">CvType</a>
* @return a new {@link CvMat} with all zeros of the given proportions and type. <b>Note: The caller owns the CvMat
* returned</b>
*/
public static CvMat zeros(final int rows, final int cols, final int type) {
return CvMat.move(Mat.zeros(rows, cols, type));
}
/**
* Convenience method that wraps the return value of
* <a href= "https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html#a69ae0402d116fc9c71908d8508dc2f09">{@code
* Mat.ones}</a> in a {@link CvMat}.
*
* @param rows number of rows of the resulting {@link CvMat}
* @param cols number of columns of the resulting {@link CvMat}
* @param type type of the resulting {@link CvMat}. See
* <a href="https://docs.opencv.org/4.0.1/javadoc/org/opencv/core/CvType.html">CvType</a>
*
* @return a new {@link CvMat} with all ones of the given proportions and type. <b>Note: The caller owns the CvMat returned</b>
*/
public static CvMat ones(final int rows, final int cols, final int type) {
return CvMat.move(Mat.ones(rows, cols, type));
}
public static CvMat eye(final Size size, final int type) {
try(final CvMat identity = CvMat.move(Mat.eye(size, type))) {
return identity.returnMe();
}
}
public static CvMat eye(final int rows, final int cols, final int type) {
try(final CvMat identity = CvMat.move(Mat.eye(rows, cols, type));) {
return identity.returnMe();
}
}
public static CvMat identity(final int rows, final int cols, final int type) {
return eye(rows, cols, type);
}
/**
* Convenience method for {@link Core#setIdentity(Mat, Scalar)} that returns an identity matrix scaled by a value.
*
* @param rows number of rows of the resulting {@link CvMat}
* @param cols number of columns of the resulting {@link CvMat}
* @param type type of the resulting {@link CvMat}. See {@link Core} for example values.
* @param value the value of the diagonal elements in the matrix.
*
* @return a new {@link CvMat} with the values = mtx(i,j) = [{@param value} if i=j; 0 otherwise]
*/
public static CvMat identity(final int rows, final int cols, final int type, final Scalar value) {
try(final CvMat identity = new CvMat(rows, cols, type)) {
Core.setIdentity(identity, value);
return identity.returnMe();
}
}
/**
* This implements the C++ {@code leftOp = rightOp}.
*/
public static void reassign(final Mat leftOp, final Mat rightOp) {
ImageAPI.pilecv4j_image_CvRaster_assign(leftOp.nativeObj, rightOp.nativeObj);
}
/**
* You can use this method to create a {@link CvMat}
* given a native pointer to the location of the raw data, and the metadata for the
* {@code Mat}. Since the data is being passed to the underlying {@code Mat}, the {@code Mat}
* will not be the "owner" of the data. That means YOU need to make sure that the native
* data buffer outlives the {@link CvMat} or you're pretty much guaranteed a core dump.
*/
public static CvMat create(final int rows, final int cols, final int type, final long pointer) {
final long nativeObj = ImageAPI.pilecv4j_image_CvRaster_makeMatFromRawDataReference(rows, cols, type, pointer);
if(nativeObj == 0)
throw new NullPointerException("Cannot create a CvMat from a null pointer data buffer.");
return CvMat.wrapNative(nativeObj);
}
/**
* You can use this method to create a {@link CvMat}
* given a native pointer to the location of the raw data, and the metadata for the
* {@code Mat}. Since the data is being passed to the underlying {@code Mat}, the {@code Mat}
* will not be the "owner" of the data. That means YOU need to make sure that the native
* data buffer outlives the {@link CvMat} or you're pretty much guaranteed a core dump.
*/
public static CvMat create(final int[] sizes, final int type, final long pointer) {
return CvMat.wrapNative(createRaw(sizes, type, pointer));
}
protected static long createRaw(final int[] sizes, final int type, final long pointer) {
final int ndims = sizes.length;
final long numBytes = ndims * Integer.BYTES;
final Pointer ptr = new Memory(numBytes);
final ByteBuffer bb = ptr.getByteBuffer(0, numBytes).order(ByteOrder.nativeOrder());
for(final int sz: sizes)
bb.putInt(sz);
final long nativeObj = ImageAPI.pilecv4j_image_CvRaster_makeMdMatFromRawDataReference(ndims, ptr, type, pointer);
if(nativeObj == 0)
throw new NullPointerException("Cannot create a CvMat from a null pointer data buffer.");
return nativeObj;
}
/**
* This method allows the developer to return a {@link CvMat} that's being managed by
* a <em>"try-with-resource"</em> without worrying about the {@link CvMat}'s resources
* being freed. As an example:
*
* <pre>
* <code>
* try (CvMat matToReturn = new CvMat(); ) {
* // do something to fill in the matToReturn
*
* return matToReturn.returnMe();
* }
* </code>
* </pre>
*
* <p>
* While it's possible to simply not use a try-with-resource and leave the {@link CvMat} unmanaged,
* you run the possibility of leaking the {@link CvMat} if an exception is thrown prior to returning
* it.
* </p>
*
* <p>
* Note: if you call {@link CvMat#returnMe()} and don't actually reassign the result to another managed
* {@link CvMat}, you will leak the CvMat.
* </p>
*/
public CvMat returnMe() {
// hacky, yet efficient.
skipCloseOnceForReturn = true;
return this;
}
/**
* <p>
* Creates a {@link CvMat} given a handle to a native C++
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a> instance.
* nativeObj needs to be a native pointer to a C++ cv::Mat object or you're likely to
* get a core dump. The management of that Mat will now be the responsibility
* of the CvMat. If something else ends up invoking the destructor on the native
* cv::Mat then there will likely be a core dump when subsequently using the {@link CvMat}
* returned. This includes even the deletion of the {@link CvMat} by the garbage collector.
* </p>
*
* <p>
* <em>With great power, comes great responsibility.</em>
* </p>
*
* @param nativeObj - pointer to a C++ cv::Mat instance. You're on your own as to how to
* obtain one of these but you will likely need to write C++ code to do it.
*/
public static CvMat wrapNative(final long nativeObj) {
return new CvMat(nativeObj);
}
protected void doNativeDelete() {
try {
nDelete.invoke(this, super.nativeObj);
} catch(final IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new RuntimeException(
"Got an exception trying to call Mat.n_Delete. Either the security model is too restrictive or the version of OpenCv can't be supported.",
e);
}
}
// Prevent Mat finalize from being called
@Override
protected void finalize() throws Throwable {
if(!deletedAlready) {
LOGGER.warn("Finalizing a {} that hasn't been closed.", this.getClass().getSimpleName());
if(TRACK_MEMORY_LEAKS)
LOGGER.warn("TRACKING: Here's where I was instantiated: ", stackTrace);
close();
}
}
private static ByteBuffer _getData(final Mat mat) {
if(!mat.isContinuous())
throw new IllegalArgumentException("Cannot create a CvRaster from a Mat without a continuous buffer.");
final Pointer dataPtr = ImageAPI.pilecv4j_image_CvRaster_getData(mat.nativeObj);
if(Pointer.nativeValue(dataPtr) == 0)
throw new IllegalArgumentException("Cannot access raw data in Mat. It may be uninitialized.");
return dataPtr.getByteBuffer(0, mat.elemSize() * mat.total());
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/CvMatOfPoint2f.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import static ai.kognition.pilecv4j.image.CvMat.TRACK_MEMORY_LEAKS;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CvMatOfPoint2f extends MatOfPoint2f implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CvMatOfPoint2f.class);
private boolean skipCloseOnceForReturn = false;
static {
CvMat.initOpenCv();
}
private static final Method nDelete;
private boolean deletedAlready = false;
protected final RuntimeException stackTrace;
protected RuntimeException delStackTrace = null;
static {
try {
nDelete = org.opencv.core.Mat.class.getDeclaredMethod("n_delete", long.class);
nDelete.setAccessible(true);
} catch(final NoSuchMethodException | SecurityException e) {
throw new RuntimeException(
"Got an exception trying to access Mat.n_Delete. Either the security model is too restrictive or the version of OpenCv can't be supported.", e);
}
}
protected CvMatOfPoint2f(final long nativeObj) {
super(nativeObj);
this.stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
public CvMatOfPoint2f() {
this.stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
public CvMatOfPoint2f(final Point... a) {
super(a);
this.stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
public CvMatOfPoint2f(final Mat mat) {
super(mat);
this.stackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("Here's where I was instantiated: ") : null;
}
/**
* Shallow copy this as a CvMat.
*
* @param flatten the number of channels present in this correspond to the dimensionality of the point, i.e. the shape is (rows=numPoints, cols=1,
* channels=numDimensions). If flatten is true (and transpose is not), instead, return a mat with shape of (rows=numPoints, cols=numDimensions,
* channels=1)- which is a more traditional format for most mathematical operations.
* @param transpose return the mat transposed from its original shape.
*
* @return <em>The caller owns the returned Mat.</em>
*/
public CvMat asCvMat(final boolean flatten, final boolean transpose) {
try(final var mat = CvMat.shallowCopy(this);
// shaped can be the actual mat, and to avoid closing it twice,
// returnMe is called.
final var shaped = flatten ? CvMat.move(mat.reshape(1)) : mat.returnMe();) {
if(transpose)
try(final var mat_t = shaped.t();) {
return mat_t.returnMe();
}
else
return shaped.returnMe();
}
}
public static CvMatOfPoint2f move(final MatOfPoint mat) {
return new CvMatOfPoint2f(ImageAPI.pilecv4j_image_CvRaster_move(mat.nativeObj));
}
public CvMatOfPoint2f returnMe() {
skipCloseOnceForReturn = true;
return this;
}
@Override
public void close() {
if(!skipCloseOnceForReturn) {
if(!deletedAlready) {
doNativeDelete();
deletedAlready = true;
if(TRACK_MEMORY_LEAKS) {
delStackTrace = new RuntimeException("Here's where I was closed");
}
} else if(TRACK_MEMORY_LEAKS) {
LOGGER.warn("TRACKING: Deleting {} again at:", this.getClass()
.getSimpleName(), new RuntimeException());
LOGGER.warn("TRACKING: originally closed at:", delStackTrace);
LOGGER.warn("TRACKING: create at: ", stackTrace);
}
} else
skipCloseOnceForReturn = false; // next close counts.
}
protected void doNativeDelete() {
try {
nDelete.invoke(this, super.nativeObj);
} catch(final IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new RuntimeException(
"Got an exception trying to call Mat.n_Delete. Either the security model is too restrictive or the version of OpenCv can't be supported.", e);
}
}
// Prevent Mat finalize from being called
@Override
protected void finalize() throws Throwable {
if(!deletedAlready) {
LOGGER.warn("Finalizing a {} that hasn't been closed.", this.getClass()
.getSimpleName());
if(TRACK_MEMORY_LEAKS)
LOGGER.warn("TRACKING: Here's where I was instantiated: ", stackTrace);
close();
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/CvMatWithColorInformation.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import java.awt.color.ColorSpace;
import java.awt.color.ICC_ColorSpace;
import java.awt.image.BufferedImage;
import java.awt.image.ColorModel;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
public class CvMatWithColorInformation extends CvMat {
public final boolean iCC;
public final boolean isLinearRGBspace;
public final boolean isGray;
public final boolean isSRgb;
public final int colorSpaceType;
public final int numColorModelChannels;
private static Map<Integer, String> csTypeName = new HashMap<>();
static {
csTypeName.put(ColorSpace.TYPE_XYZ, "TYPE_XYZ");
csTypeName.put(ColorSpace.TYPE_Lab, "TYPE_Lab");
csTypeName.put(ColorSpace.TYPE_Luv, "TYPE_Luv");
csTypeName.put(ColorSpace.TYPE_YCbCr, "TYPE_YCbCr");
csTypeName.put(ColorSpace.TYPE_Yxy, "TYPE_Yxy");
csTypeName.put(ColorSpace.TYPE_RGB, "TYPE_RGB");
csTypeName.put(ColorSpace.TYPE_GRAY, "TYPE_GRAY");
csTypeName.put(ColorSpace.TYPE_HSV, "TYPE_HSV");
csTypeName.put(ColorSpace.TYPE_HLS, "TYPE_HLS");
csTypeName.put(ColorSpace.TYPE_CMYK, "TYPE_CMYK");
csTypeName.put(ColorSpace.TYPE_CMY, "TYPE_CMY");
csTypeName.put(ColorSpace.TYPE_2CLR, "TYPE_2CLR");
csTypeName.put(ColorSpace.TYPE_3CLR, "TYPE_3CLR");
csTypeName.put(ColorSpace.TYPE_4CLR, "TYPE_4CLR");
csTypeName.put(ColorSpace.TYPE_5CLR, "TYPE_5CLR");
csTypeName.put(ColorSpace.TYPE_6CLR, "TYPE_6CLR");
csTypeName.put(ColorSpace.TYPE_7CLR, "TYPE_7CLR");
csTypeName.put(ColorSpace.TYPE_8CLR, "TYPE_8CLR");
csTypeName.put(ColorSpace.TYPE_9CLR, "TYPE_9CLR");
csTypeName.put(ColorSpace.TYPE_ACLR, "TYPE_ACLR");
csTypeName.put(ColorSpace.TYPE_BCLR, "TYPE_BCLR");
csTypeName.put(ColorSpace.TYPE_CCLR, "TYPE_CCLR");
csTypeName.put(ColorSpace.TYPE_DCLR, "TYPE_DCLR");
csTypeName.put(ColorSpace.TYPE_ECLR, "TYPE_ECLR");
csTypeName.put(ColorSpace.TYPE_FCLR, "TYPE_FCLR");
csTypeName.put(ColorSpace.CS_sRGB, "CS_sRGB");
csTypeName.put(ColorSpace.CS_LINEAR_RGB, "CS_LINEAR_RGB");
csTypeName.put(ColorSpace.CS_CIEXYZ, "CS_CIEXYZ");
csTypeName.put(ColorSpace.CS_PYCC, "CS_PYCC");
csTypeName.put(ColorSpace.CS_GRAY, "CS_GRAY");
}
// this is copied from ColorModel.class source code.
public static boolean isLinearRGBspace(final ColorSpace cs) {
return cs == ColorSpace.getInstance(ColorSpace.CS_LINEAR_RGB);
}
public static String colorSpaceTypeName(final int colorSpaceType) {
final String ret = csTypeName.get(colorSpaceType);
return ret == null ? "UNKNOWN" : ret;
}
CvMatWithColorInformation(final CvMat mat, final BufferedImage im) {
CvMat.reassign(this, mat);
final ColorModel cm = im.getColorModel();
final ColorSpace colorSpace = cm.getColorSpace();
isLinearRGBspace = isLinearRGBspace(colorSpace);
iCC = (ICC_ColorSpace.class.isAssignableFrom(colorSpace.getClass()));
isGray = (colorSpace.getType() == ColorSpace.TYPE_GRAY);
isSRgb = colorSpace == ColorSpace.getInstance(ColorSpace.CS_sRGB);
colorSpaceType = colorSpace.getType();
numColorModelChannels = cm.getNumColorComponents();
}
public CvMat displayable() {
try(final CvMat ret = CvMat.shallowCopy(this);
final Closer c = new Closer();) {
if(channels() > 4) {
final List<Mat> channels = new ArrayList<>(channels());
Core.split(this, channels);
channels.forEach(m -> c.addMat(m));
final List<Mat> sub = channels.subList(0, numColorModelChannels);
Core.merge(sub, ret);
} else if(channels() == 2 && isGray) {
final List<Mat> channels = new ArrayList<>(channels());
Core.split(this, channels);
channels.forEach(m -> c.addMat(m));
final List<Mat> newChannels = new ArrayList<>();
final Mat gray = channels.get(0);
for(int i = 0; i < 3; i++)
newChannels.add(c.addMat(CvMat.shallowCopy(gray)));
newChannels.add(c.addMat(CvMat.shallowCopy(channels.get(1))));
Core.merge(newChannels, ret);
}
if(ret.depth() == CvType.CV_32S) {
final List<Mat> channels = new ArrayList<>(channels());
Core.split(ret, channels);
channels.forEach(m -> c.addMat(m));
final List<Mat> newChannels = new ArrayList<>();
for(final Mat ch: channels) {
final Mat newMat = c.addMat(new Mat());
Utils.bitwiseUnsignedRightShiftAndMask(ch, newMat, 16, 16);
newMat.convertTo(newMat, CvType.makeType(CvType.CV_16S, 1));
newChannels.add(newMat);
}
Core.merge(newChannels, ret);
}
return ret.returnMe();
}
}
@Override
public String toString() {
return super.toString() + " [ ColorSpace: " + csTypeName.get(colorSpaceType) + ", is ICC: " + iCC + ", is Linear RGB: " + isLinearRGBspace
+ ", is Gray:" + isGray + ", sRGB CS:" + isSRgb + "]";
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/CvRaster.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.DoubleBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.ShortBuffer;
import java.util.function.Function;
import com.sun.jna.Pointer;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
/**
* <p>
* {@link CvRaster} is a utility for direct access to the underlying Mat's data
* buffer
* from java using a {@code DirectByteBuffer}. You can get access to an
* underlying
* Mat's data buffer by passing a lambda to the appropriate CvMat method.
* </p>
*
* <pre>
* <code>
* CvMat.rasterAp(mat, raster -> {
* // do something with the raster which contains a DirectByteBuffer
* // that can be retrieved using:
* ByteBuffer bb = raster.underlying();
* });
* </code>
* </pre>
*
* Alternatively you can apply a lambda to the {@link CvRaster} using one of the
* available
* methods. For example, this will add up all of the pixel values in grayscale
* byte image
* and return the result
*
* <pre>
* <code>
* GetChannelValueAsInt valueFetcher = CvRaster.channelValueFetcher(mat.type());
* final long sum = CvMat.rasterOp(mat,
* raster -> raster.reduce(Long.valueOf(0),
* (prev, pixel, row, col) -> Long.valueOf(prev.longValue() + valueFetcher.get(pixel, 0))
* )
* );
* </code>
* </pre>
*
*/
@Deprecated
public abstract class CvRaster implements AutoCloseable {
public final Mat mat;
protected final ByteBuffer currentBuffer;
private CvRaster(final Mat m) {
this.mat = m;
this.currentBuffer = getData(m);
}
/**
* return the {@code CvType} of the {@code CvRaster}'s underlying {@code Mat}.
*/
public int type() {
return mat.type();
}
/**
* return the number of channels of the {@code CvRaster}'s underlying
* {@code Mat}.
*/
public int channels() {
return CvType.channels(type());
}
/**
* return the number of rows of the {@code CvRaster}'s underlying {@code Mat}.
*/
public int rows() {
return mat.rows();
}
/**
* return the number of columns of the {@code CvRaster}'s underlying
* {@code Mat}.
*/
public int cols() {
return mat.cols();
}
/**
* return the element size of the {@code CvRaster}'s underlying {@code Mat}.
* This uses
* {@code CvType.ELEM_SIZE(type())}
*/
public int elemSize() {
return CvType.ELEM_SIZE(type());
}
/**
* Direct access to the underlying {@code Mat}'s data buffer is available, as
* long
* as the underlying buffer is continuous.
*/
public ByteBuffer underlying() {
if(currentBuffer == null)
throw new NullPointerException("You cannot perform this operation without opening an \"imageOp\" on the " + CvRaster.class.getSimpleName());
return currentBuffer;
}
/**
* zero out the pixel at the position (row, col)
*/
public abstract void zero(int row, int col);
/**
* get the pixel at the flattened position {@code pos}. The type will
* depend on the underlying CvType.
*/
public abstract Object get(int pos); // flat get
/**
* Set the {@code pos}ition in the raster to the provided pixel value. The
* pixel value will need to comport with the CvType or you'll get an exception.
* For example, if the CvType is {@code CvType.CV_16SC3} then the pixel
* needs to be {@code short[3]}.
*/
public abstract void set(int pos, Object pixel);
/**
* Apply the given lambda to every pixel.
*/
public abstract <T> void apply(final PixelSetter<T> pixelSetter);
/**
* Apply the given lambda to every pixel.
*/
public abstract <T> void apply(final FlatPixelSetter<T> pixelSetter);
/**
* Apply the given lambda to every pixel.
*/
public abstract <T> void forEach(final PixelConsumer<T> consumer);
/**
* Apply the given lambda to every pixel.
*/
public abstract <T> void forEach(final FlatPixelConsumer<T> consumer);
/**
* Get the pixel for the given row/col location. The pixel value will comport
* with the
* CvType of the raster. For example, if the CvType is {@code CvType.CV_16SC3}
* then the pixel
* will be {@code short[3]}.
*/
public Object get(final int row, final int col) {
return get((row * cols()) + col);
}
/**
* Set the row/col position in the raster to the provided pixel value. The
* pixel value will need to comport with the CvType or you'll get an exception.
* For example, if the CvType is {@code CvType.CV_16SC3} then the pixel
* needs to be {@code short[3]}.
*/
public void set(final int row, final int col, final Object pixel) {
set((row * cols()) + col, pixel);
}
/**
* Reduce the raster to a single value of type {@code U} by applying the
* aggregator
*/
public <U> U reduce(final U identity, final PixelAggregate<Object, U> seqOp) {
U prev = identity;
final int rows = rows();
final int cols = cols();
for(int r = 0; r < rows; r++) {
for(int c = 0; c < cols; c++) {
prev = seqOp.apply(prev, get(r, c), r, c);
}
}
return prev;
}
/**
* The total number of bytes in the raster.
*/
public int getNumBytes() {
return rows() * cols() * elemSize();
}
/**
* The underlying data buffer pointer as a long
*/
public long getNativeAddressOfData() {
if(!mat.isContinuous())
throw new IllegalArgumentException("Cannot retrieve the data reference of a Mat without a continuous buffer.");
return Pointer.nativeValue(ImageAPI.pilecv4j_image_CvRaster_getData(mat.nativeObj));
}
@Override
public void close() {
// clean up the direct byte buffer?
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + channels();
result = prime * result + cols();
result = prime * result + elemSize();
result = prime * result + rows();
result = prime * result + type();
return result;
}
@Override
public boolean equals(final Object obj) {
if(this == obj)
return true;
if(obj == null)
return false;
if(getClass() != obj.getClass())
return false;
final CvRaster other = (CvRaster)obj;
if(channels() != other.channels())
return false;
if(cols() != other.cols())
return false;
if(elemSize() != other.elemSize())
return false;
if(mat == null) {
if(other.mat != null)
return false;
} else if(other.mat == null)
return false;
if(rows() != other.rows())
return false;
if(type() != other.type())
return false;
if(mat != other.mat && !pixelsIdentical(mat, other.mat))
return false;
return true;
}
/**
* This is a helper comparator that verifies the byte by byte equivalent of the
* two
* underlying data buffers.
*/
public static boolean pixelsIdentical(final Mat m1, final Mat m2) {
if(m1.nativeObj == m2.nativeObj)
return true;
final ByteBuffer bb1 = _getData(m1);
final ByteBuffer bb2 = _getData(m2);
return bb1.compareTo(bb2) == 0;
}
/**
* Copy the entire image to a primitive array of the appropriate type.
*/
public static <T> T copyToPrimitiveArray(final CvRaster m) {
return copyToPrimitiveArray(m.mat);
}
/**
* Copy the entire image to a primitive array of the appropriate type.
*/
@SuppressWarnings("unchecked")
public static <T> T copyToPrimitiveArray(final Mat m) {
final int rows = m.rows();
final int cols = m.cols();
final int type = m.type();
final int channels = CvType.channels(type);
final int depth = CvType.depth(type);
switch(depth) {
case CvType.CV_8S:
case CvType.CV_8U: {
final byte[] data = new byte[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
case CvType.CV_16U:
case CvType.CV_16S: {
final short[] data = new short[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
case CvType.CV_32S: {
final int[] data = new int[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
case CvType.CV_32F: {
final float[] data = new float[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
case CvType.CV_64F: {
final double[] data = new double[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
}
/**
* Copy the entire image to a primitive array of the appropriate type.
*/
public static <T> void copyToPrimitiveArray(final Mat m, final T data) {
final int type = m.type();
final int depth = CvType.depth(type);
switch(depth) {
case CvType.CV_8S:
case CvType.CV_8U: {
m.get(0, 0, (byte[])data);
return;
}
case CvType.CV_16U:
case CvType.CV_16S: {
m.get(0, 0, (short[])data);
return;
}
case CvType.CV_32S: {
m.get(0, 0, (int[])data);
return;
}
case CvType.CV_32F: {
m.get(0, 0, (float[])data);
return;
}
case CvType.CV_64F: {
m.get(0, 0, (double[])data);
return;
}
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
}
/**
* Instances of this interface will return the channel value of the given pixel
* as an int.
* You can obtain an instance of this interface for the appropriate
* {@link CvType} using
* {@link CvRaster#channelValueFetcher(int)}.
*/
@FunctionalInterface
public static interface GetChannelValueAsInt {
public int get(Object pixel, int channel);
}
/**
* Instances of this interface will set the channel value of the given pixel.
* You can obtain an instance of this interface for the appropriate
* {@link CvType} using
* {@link CvRaster#channelValuePutter(int)}.
*/
@FunctionalInterface
public static interface PutChannelValueFromInt {
public void put(Object pixel, int channel, int channelValue);
}
/**
* Create the appropriate {@link PutChannelValueFromInt} instance for the given
* type.
*
* @throws
* IllegalArgumentException if the type isn't an integer type.
*/
public static PutChannelValueFromInt channelValuePutter(final int type) {
switch(CvType.depth(type)) {
case CvType.CV_8S:
return (p, ch, chv) -> ((byte[])p)[ch] = (byte)((chv > Byte.MAX_VALUE) ? Byte.MAX_VALUE : chv);
case CvType.CV_8U:
return (p, ch, chv) -> ((byte[])p)[ch] = (byte)((chv > 0xFF) ? 0xFF : chv);
case CvType.CV_16S:
return (p, ch, chv) -> ((short[])p)[ch] = (short)((chv > Short.MAX_VALUE) ? Short.MAX_VALUE : chv);
case CvType.CV_16U:
return (p, ch, chv) -> ((short[])p)[ch] = (short)((chv > 0xFFFF) ? 0XFFFF : chv);
case CvType.CV_32S:
return (p, ch, chv) -> ((int[])p)[ch] = chv;
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
}
/**
* Create the appropriate {@link GetChannelValueAsInt} instance for the given
* type.
*
* @throws
* IllegalArgumentException if the type isn't an integer type.
*/
public static GetChannelValueAsInt channelValueFetcher(final int type) {
switch(CvType.depth(type)) {
case CvType.CV_8S:
return (p, ch) -> (int)((byte[])p)[ch];
case CvType.CV_8U:
return (p, ch) -> (((byte[])p)[ch] & 0xFF);
case CvType.CV_16S:
return (p, ch) -> (int)((short[])p)[ch];
case CvType.CV_16U:
return (p, ch) -> (((short[])p)[ch] & 0xFFFF);
case CvType.CV_32S:
return (p, ch) -> ((int[])p)[ch];
case CvType.CV_32F:
return (p, ch) -> (int)((float[])p)[ch];
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
}
/**
* Retrieves a function that will convert any *integer* based pixel to an array
* of int's that corresponds to the pixel's channel values.
*
* @param type - the CvType of the Mat you will be using to converter on
* @throws IllegalArgumentException if the type isn't an integer type.
*/
public static Function<Object, int[]> pixelToIntsConverter(final int type) {
final GetChannelValueAsInt fetcher = channelValueFetcher(type);
final int numChannels = CvType.channels(type);
final int[] ret = new int[numChannels];
return(p -> {
for(int i = 0; i < numChannels; i++)
ret[i] = fetcher.get(p, i);
return ret;
});
}
/**
* Given the type, this method will create an empty pixel in the form of
* a primitive array of the appropriate type with a length that corresponds
* to the number of channels.
*/
public static Object makeEmptyPixel(final int type) {
final int channels = CvType.channels(type);
switch(CvType.depth(type)) {
case CvType.CV_8S:
case CvType.CV_8U:
return new byte[channels];
case CvType.CV_16S:
case CvType.CV_16U:
return new short[channels];
case CvType.CV_32S:
return new int[channels];
case CvType.CV_32F:
return new float[channels];
case CvType.CV_64F:
return new double[channels];
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
}
public static Function<int[], Object> intsToPixelConverter(final int type) {
final PutChannelValueFromInt putter = channelValuePutter(type);
final int numChannels = CvType.channels(type);
final Object pixel = makeEmptyPixel(type);
return ints -> {
for(int i = 0; i < numChannels; i++)
putter.put(pixel, i, ints[i]);
return pixel;
};
}
// ==================================================================
// PixelConsumer interfaces
// ==================================================================
@FunctionalInterface
public static interface PixelConsumer<T> {
public void accept(int row, int col, T pixel);
}
@FunctionalInterface
public static interface BytePixelConsumer extends PixelConsumer<byte[]> {}
@FunctionalInterface
public static interface ShortPixelConsumer extends PixelConsumer<short[]> {}
@FunctionalInterface
public static interface IntPixelConsumer extends PixelConsumer<int[]> {}
@FunctionalInterface
public static interface FloatPixelConsumer extends PixelConsumer<float[]> {}
@FunctionalInterface
public static interface DoublePixelConsumer extends PixelConsumer<double[]> {}
// ==================================================================
// FlatPixelConsumer interfaces
// ==================================================================
@FunctionalInterface
public static interface FlatPixelConsumer<T> {
public void accept(int pos, T pixel);
}
@FunctionalInterface
public static interface FlatBytePixelConsumer extends FlatPixelConsumer<byte[]> {}
@FunctionalInterface
public static interface FlatShortPixelConsumer extends FlatPixelConsumer<short[]> {}
@FunctionalInterface
public static interface FlatIntPixelConsumer extends FlatPixelConsumer<int[]> {}
@FunctionalInterface
public static interface FlatFloatPixelConsumer extends FlatPixelConsumer<float[]> {}
@FunctionalInterface
public static interface FlatDoublePixelConsumer extends FlatPixelConsumer<double[]> {}
// ==================================================================
// PixelSetter interfaces
// ==================================================================
@FunctionalInterface
public static interface PixelSetter<T> {
public T pixel(int row, int col);
}
@FunctionalInterface
public static interface BytePixelSetter extends PixelSetter<byte[]> {}
@FunctionalInterface
public static interface ShortPixelSetter extends PixelSetter<short[]> {}
@FunctionalInterface
public static interface IntPixelSetter extends PixelSetter<int[]> {}
@FunctionalInterface
public static interface FloatPixelSetter extends PixelSetter<float[]> {}
@FunctionalInterface
public static interface DoublePixelSetter extends PixelSetter<double[]> {}
// ==================================================================
// ==================================================================
// FlatPixelSetter interfaces
// ==================================================================
@FunctionalInterface
public static interface FlatPixelSetter<T> {
public T pixel(int position);
}
@FunctionalInterface
public static interface FlatBytePixelSetter extends FlatPixelSetter<byte[]> {}
@FunctionalInterface
public static interface FlatShortPixelSetter extends FlatPixelSetter<short[]> {}
@FunctionalInterface
public static interface FlatIntPixelSetter extends FlatPixelSetter<int[]> {}
@FunctionalInterface
public static interface FlatFloatPixelSetter extends FlatPixelSetter<float[]> {}
@FunctionalInterface
public static interface FlatDoublePixelSetter extends FlatPixelSetter<double[]> {}
// ==================================================================
@FunctionalInterface
public static interface PixelAggregate<P, R> {
R apply(R prev, P pixel, int row, int col);
}
static CvRaster makeInstance(final Mat mat) {
final int type = mat.type();
final int depth = CvType.depth(type);
switch(depth) {
case CvType.CV_8S:
case CvType.CV_8U:
// we can only handle 16bit FP as raw bytes
case CvType.CV_16F:
return new CvRaster(mat) {
final byte[] zeroPixel = new byte[channels()];
ByteBuffer bb = currentBuffer;
private final int numChannels = mat.channels();
@Override
public void zero(final int row, final int col) {
set(row, col, zeroPixel);
}
@Override
public Object get(final int pos) {
final byte[] ret = new byte[channels()];
bb.position(pos * numChannels);
bb.get(ret);
return ret;
}
@Override
public void set(final int pos, final Object pixel) {
final byte[] p = (byte[])pixel;
bb.position(pos * numChannels);
bb.put(p);
}
@Override
public <T> void forEach(final PixelConsumer<T> pc) {
final BytePixelConsumer bpc = (BytePixelConsumer)pc;
final byte[] pixel = new byte[channels()];
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
bb.position(rowOffset + (col * channels));
bb.get(pixel);
bpc.accept(row, col, pixel);
});
}
@Override
public <T> void forEach(final FlatPixelConsumer<T> pc) {
final FlatBytePixelConsumer bpc = (FlatBytePixelConsumer)pc;
final byte[] pixel = new byte[channels()];
iterateOver((bufPos, pixPos) -> {
bb.position(bufPos);
bb.get(pixel);
bpc.accept(pixPos, pixel);
});
}
@Override
public <T> void apply(final PixelSetter<T> ps) {
final BytePixelSetter bps = (BytePixelSetter)ps;
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
bb.position(rowOffset + (col * channels));
bb.put(bps.pixel(row, col));
});
}
@Override
public <T> void apply(final FlatPixelSetter<T> ps) {
final FlatBytePixelSetter bps = (FlatBytePixelSetter)ps;
iterateOver((bufPos, pixPos) -> {
bb.position(bufPos);
bb.put(bps.pixel(pixPos));
});
}
};
case CvType.CV_16U:
case CvType.CV_16S:
return new CvRaster(mat) {
ShortBuffer sb = currentBuffer.asShortBuffer();
final short[] zeroPixel = new short[channels()]; // zeroed already
private final int numChannels = mat.channels();
@Override
public void zero(final int row, final int col) {
set(row, col, zeroPixel);
}
@Override
public Object get(final int pos) {
final short[] ret = new short[channels()];
sb.position(pos * numChannels);
sb.get(ret);
return ret;
}
@Override
public void set(final int pos, final Object pixel) {
final short[] p = (short[])pixel;
sb.position(pos * numChannels);
sb.put(p);
}
@Override
public <T> void forEach(final PixelConsumer<T> pc) {
final ShortPixelConsumer bpc = (ShortPixelConsumer)pc;
final short[] pixel = new short[channels()];
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
sb.position(rowOffset + (col * channels));
sb.get(pixel);
bpc.accept(row, col, pixel);
});
}
@Override
public <T> void forEach(final FlatPixelConsumer<T> pc) {
final FlatShortPixelConsumer bpc = (FlatShortPixelConsumer)pc;
final short[] pixel = new short[channels()];
iterateOver((bufPos, pixPos) -> {
sb.position(bufPos);
sb.get(pixel);
bpc.accept(pixPos, pixel);
});
}
@Override
public <T> void apply(final PixelSetter<T> ps) {
final ShortPixelSetter bps = (ShortPixelSetter)ps;
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
sb.position(rowOffset + (col * channels));
sb.put(bps.pixel(row, col));
});
}
@Override
public <T> void apply(final FlatPixelSetter<T> ps) {
final FlatShortPixelSetter bps = (FlatShortPixelSetter)ps;
iterateOver((bufPos, pixPos) -> {
sb.position(bufPos);
sb.put(bps.pixel(pixPos));
});
}
};
case CvType.CV_32S:
return new CvRaster(mat) {
IntBuffer ib = currentBuffer.asIntBuffer();
final int[] zeroPixel = new int[channels()]; // zeroed already
private final int numChannels = mat.channels();
@Override
public void zero(final int row, final int col) {
set(row, col, zeroPixel);
}
@Override
public Object get(final int pos) {
final int[] ret = new int[channels()];
ib.position(pos * numChannels);
ib.get(ret);
return ret;
}
@Override
public void set(final int pos, final Object pixel) {
final int[] p = (int[])pixel;
ib.position(pos * numChannels);
ib.put(p);
}
@Override
public <T> void forEach(final PixelConsumer<T> pc) {
final IntPixelConsumer bpc = (IntPixelConsumer)pc;
final int[] pixel = new int[channels()];
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
ib.position(rowOffset + (col * channels));
ib.get(pixel);
bpc.accept(row, col, pixel);
});
}
@Override
public <T> void forEach(final FlatPixelConsumer<T> pc) {
final FlatIntPixelConsumer bpc = (FlatIntPixelConsumer)pc;
final int[] pixel = new int[channels()];
iterateOver((bufPos, pixPos) -> {
ib.position(bufPos);
ib.get(pixel);
bpc.accept(pixPos, pixel);
});
}
@Override
public <T> void apply(final PixelSetter<T> ps) {
final IntPixelSetter bps = (IntPixelSetter)ps;
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
ib.position(rowOffset + (col * channels));
ib.put(bps.pixel(row, col));
});
}
@Override
public <T> void apply(final FlatPixelSetter<T> ps) {
final FlatIntPixelSetter bps = (FlatIntPixelSetter)ps;
iterateOver((bufPos, pixPos) -> {
ib.position(bufPos);
ib.put(bps.pixel(pixPos));
});
}
};
case CvType.CV_32F:
return new CvRaster(mat) {
private final FloatBuffer fb = currentBuffer.asFloatBuffer();
private final float[] zeroPixel = new float[channels()]; // zeroed already
private final int numChannels = mat.channels();
@Override
public void zero(final int row, final int col) {
set(row, col, zeroPixel);
}
@Override
public Object get(final int pos) {
final float[] ret = new float[channels()];
fb.position(pos * numChannels);
fb.get(ret);
return ret;
}
@Override
public void set(final int pos, final Object pixel) {
final float[] p = (float[])pixel;
fb.position(pos * numChannels);
fb.put(p);
}
@Override
public <T> void forEach(final PixelConsumer<T> pc) {
final FloatPixelConsumer bpc = (FloatPixelConsumer)pc;
final float[] pixel = new float[channels()];
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
fb.position(rowOffset + (col * channels));
fb.get(pixel);
bpc.accept(row, col, pixel);
});
}
@Override
public <T> void forEach(final FlatPixelConsumer<T> pc) {
final FlatFloatPixelConsumer bpc = (FlatFloatPixelConsumer)pc;
final float[] pixel = new float[channels()];
iterateOver((bufPos, pixPos) -> {
fb.position(bufPos);
fb.get(pixel);
bpc.accept(pixPos, pixel);
});
}
@Override
public <T> void apply(final PixelSetter<T> ps) {
final FloatPixelSetter bps = (FloatPixelSetter)ps;
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
fb.position(rowOffset + (col * channels));
fb.put(bps.pixel(row, col));
});
}
@Override
public <T> void apply(final FlatPixelSetter<T> ps) {
final FlatFloatPixelSetter bps = (FlatFloatPixelSetter)ps;
iterateOver((bufPos, pixPos) -> {
fb.position(bufPos);
fb.put(bps.pixel(pixPos));
});
}
};
case CvType.CV_64F:
return new CvRaster(mat) {
DoubleBuffer db = currentBuffer.asDoubleBuffer();
final double[] zeroPixel = new double[channels()]; // zeroed already
private final int numChannels = mat.channels();
@Override
public void zero(final int row, final int col) {
set(row, col, zeroPixel);
}
@Override
public Object get(final int pos) {
final double[] ret = new double[channels()];
db.position(pos * numChannels);
db.get(ret);
return ret;
}
@Override
public void set(final int pos, final Object pixel) {
final double[] p = (double[])pixel;
db.position(pos * numChannels);
db.put(p);
}
@Override
public <T> void forEach(final PixelConsumer<T> pc) {
final DoublePixelConsumer bpc = (DoublePixelConsumer)pc;
final double[] pixel = new double[channels()];
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
db.position(rowOffset + (col * channels));
db.get(pixel);
bpc.accept(row, col, pixel);
});
}
@Override
public <T> void forEach(final FlatPixelConsumer<T> pc) {
final FlatDoublePixelConsumer bpc = (FlatDoublePixelConsumer)pc;
final double[] pixel = new double[channels()];
iterateOver((bufPos, pixPos) -> {
db.position(bufPos);
db.get(pixel);
bpc.accept(pixPos, pixel);
});
}
@Override
public <T> void apply(final PixelSetter<T> ps) {
final DoublePixelSetter bps = (DoublePixelSetter)ps;
final int channels = channels();
iterateOver((row, col, rowOffset) -> {
db.position(rowOffset + (col * channels));
db.put(bps.pixel(row, col));
});
}
@Override
public <T> void apply(final FlatPixelSetter<T> ps) {
final FlatDoublePixelSetter bps = (FlatDoublePixelSetter)ps;
iterateOver((bufPos, pixPos) -> {
db.position(bufPos);
db.put(bps.pixel(pixPos));
});
}
};
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
}
protected static interface PixelIterator {
public void accept(int row, int col, int rowOffset);
}
protected void iterateOver(final PixelIterator piter) {
final int rows = rows();
final int cols = cols();
final int channels = channels();
final int colsXchannels = cols * channels;
for(int row = 0; row < rows; row++) {
final int rowOffset = row * colsXchannels;
for(int col = 0; col < cols; col++) {
piter.accept(row, col, rowOffset);
}
}
}
protected static interface FlatPixelIterator {
public void accept(int bufPos, int pixPos);
}
protected void iterateOver(final FlatPixelIterator piter) {
final int rows = rows();
final int cols = cols();
final int channels = channels();
final int numElements = (rows * cols * channels);
int pixPos = 0;
for(int bufPos = 0; bufPos < numElements; bufPos += channels) {
piter.accept(bufPos, pixPos);
pixPos++;
}
}
private static ByteBuffer _getData(final Mat mat) {
if(!mat.isContinuous())
throw new IllegalArgumentException("Cannot create a CvRaster from a Mat without a continuous buffer.");
final Pointer dataPtr = ImageAPI.pilecv4j_image_CvRaster_getData(mat.nativeObj);
if(Pointer.nativeValue(dataPtr) == 0)
throw new IllegalArgumentException("Cannot access raw data in Mat. It may be uninitialized.");
return dataPtr.getByteBuffer(0, mat.elemSize() * mat.total());
}
private static ByteBuffer getData(final Mat mat) {
final ByteBuffer ret = _getData(mat);
if(ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN)
ret.order(ByteOrder.LITTLE_ENDIAN);
return ret;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/ImageAPI.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import com.sun.jna.Callback;
import com.sun.jna.Native;
import com.sun.jna.NativeLibrary;
import com.sun.jna.Pointer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.kognition.pilecv4j.util.NativeLibraryLoader;
public class ImageAPI {
private static final Logger LOGGER = LoggerFactory.getLogger(ImageAPI.class);
public static final String OCV_VERSION_PROPS = "opencv-info.version";
public static final String OCV_SHORT_VERSION_PROP_NAME = "opencv-short.version";
public static final String LIBNAME = "ai.kognition.pilecv4j.image";
static void _init() {}
// needs to match LogLevel enum in the C++ code.
public static final int LOG_LEVEL_TRACE = 0;
public static final int LOG_LEVEL_DEBUG = 1;
public static final int LOG_LEVEL_INFO = 2;
public static final int LOG_LEVEL_WARN = 3;
public static final int LOG_LEVEL_ERROR = 4;
public static final int LOG_LEVEL_FATAL = 5;
static {
// read a properties file from the classpath.
final Properties ocvVersionProps = new Properties();
try(InputStream ocvVerIs = ImageAPI.class.getClassLoader().getResourceAsStream(OCV_VERSION_PROPS)) {
ocvVersionProps.load(ocvVerIs);
} catch(final IOException e) {
throw new IllegalStateException("Problem loading the properties file \"" + OCV_VERSION_PROPS + "\" from the classpath", e);
}
final String ocvShortVersion = ocvVersionProps.getProperty(OCV_SHORT_VERSION_PROP_NAME);
if(ocvShortVersion == null)
throw new IllegalStateException("Problem reading the short version from the properties file \"" +
OCV_VERSION_PROPS + "\" from the classpath");
LOGGER.debug("Loading the library for opencv with a short version {}", ocvShortVersion);
NativeLibraryLoader.loader()
.optional("opencv_ffmpeg" + ocvShortVersion + "_64")
.optional("opencv_videoio_ffmpeg" + ocvShortVersion + "_64")
.library("opencv_java" + ocvShortVersion)
.library(LIBNAME)
.addPreLoadCallback((dir, libname, oslibname) -> {
if(LIBNAME.equals(libname))
NativeLibrary.addSearchPath(libname, dir.getAbsolutePath());
})
.load();
Native.register(LIBNAME);
}
public static native long pilecv4j_image_CvRaster_copy(long nativeMatHandle);
public static native long pilecv4j_image_CvRaster_move(long nativeMatHandle);
public static native void pilecv4j_image_CvRaster_freeByMove(long nativeMatHandle);
public static native void pilecv4j_image_CvRaster_assign(long nativeHandleDest, long nativeMatHandleSrc);
public static native Pointer pilecv4j_image_CvRaster_getData(long nativeMatHandle);
public static native long pilecv4j_image_CvRaster_makeMatFromRawDataReference(int rows, int cols, int type, long dataLong);
// "sizes" is a literal uint32_t* to match java
public static native long pilecv4j_image_CvRaster_makeMdMatFromRawDataReference(int ndims, Pointer sizes, int type, long dataLong);
public static native long pilecv4j_image_CvRaster_defaultMat();
public static native void pilecv4j_image_CvRaster_inplaceReshape(long nativeRef, int cn, int ndims, Pointer sizes);
public static native int pilecv4j_image_CvRaster_inplaceRemake(long nativeObj, int ndims, Pointer ptr, int type, long maxSize);
// ==========================================================
// Wrapped OpenCv HighGUI API.
// ALL of these need to be called from a SINGLE common thread.
public static native void pilecv4j_image_CvRaster_showImage(String name, long nativeMatHandle);
public static native void pilecv4j_image_CvRaster_updateWindow(String name, long nativeMatHandle);
public static native int pilecv4j_image_CvRaster_fetchEvent(int millisToSleep);
public static native void pilecv4j_image_CvRaster_destroyWindow(String name);
public static native boolean pilecv4j_image_CvRaster_isWindowClosed(String name);
// ==========================================================
// =========================================================
// MJPEGWriter functionality
// =========================================================
public static native int pilecv4j_image_mjpeg_initializeMJPEG(String filename);
public static native int pilecv4j_image_mjpeg_doappendFile(String filename, int width, int height);
public static native int pilecv4j_image_mjpeg_close(int fps);
public static native void pilecv4j_image_mjpeg_cleanUp();
// =========================================================
public interface AddHoughSpaceEntryContributorFunc extends Callback {
public boolean add(int orow, int ocol, int hsr, int hsc, int hscount);
}
// =========================================================
// Hough Transform functionality
// =========================================================
public static native void pilecv4j_image_Transform_houghTransformNative(final long image, final int width, final int height, final long gradientDirImage,
final byte[] mask, final int maskw, final int maskh, final int maskcr, final int maskcc,
final byte[] gradientDirMask, final int gdmaskw, final int gdmaskh, final int gdmaskcr, final int gdmaskcc,
final double gradientDirSlopDeg, final double quantFactor, short[] ret, int hswidth, int hsheight,
AddHoughSpaceEntryContributorFunc hsem, int houghThreshold, int rowstart, int rowend, int colstart, int colend,
byte EDGE);
// =========================================================
// =========================================================
// Gst bridge functionality
// =========================================================
public native static long pilecv4j_image_get_im_maker();
// =========================================================
public native static void pilecv4j_image_setLogLevel(int logLevel);
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/ImageFile.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import static org.opencv.imgcodecs.Imgcodecs.IMREAD_UNCHANGED;
import javax.imageio.IIOImage;
import javax.imageio.ImageIO;
import javax.imageio.ImageReadParam;
import javax.imageio.ImageReader;
import javax.imageio.ImageWriteParam;
import javax.imageio.ImageWriter;
import javax.imageio.spi.IIORegistry;
import javax.imageio.spi.ImageReaderSpi;
import javax.imageio.stream.ImageInputStream;
import javax.imageio.stream.ImageOutputStream;
import java.awt.Graphics;
import java.awt.Image;
import java.awt.image.BufferedImage;
import java.awt.image.RenderedImage;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.commons.lang3.tuple.Pair;
import org.opencv.core.Mat;
import org.opencv.core.MatOfByte;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.Functional;
import net.dempsy.util.MutableInt;
public class ImageFile {
private static final Logger LOGGER = LoggerFactory.getLogger(ImageFile.class);
private static List<String> readerClassPrefixOrder = List.of("com.twelvemonkeys.imageio", "com.github.jaiimageio.");
static {
CvMat.initOpenCv();
final Map<String, Long> lookupIndex = new HashMap<>();
final MutableInt count = new MutableInt(0);
for(final String prefix: readerClassPrefixOrder) {
lookupIndex.put(prefix, count.val);
count.val += 10000;
}
final var registry = IIORegistry.getDefaultInstance();
LOGGER.trace("Sorted ImageIO ImageReaderSpis:");
final var spis = Functional.iteratorAsStream(registry.getServiceProviders(ImageReaderSpi.class, true))
.map(r -> {
final String readerClassName = r.getClass().getName();
// find the string from the list that starts the classname of the reader.
final String lookup = readerClassPrefixOrder.stream()
.filter(s -> readerClassName.startsWith(s))
.findFirst()
.orElse(null);
final long rank;
if(lookup == null)
rank = count.val++;
else {
rank = lookupIndex.get(lookup);
// replace the index to compensate for more readers that have the same prefix
lookupIndex.put(lookup, rank + 1);
}
return Pair.of(rank, r);
})
.sorted((l, r) -> l.getLeft().intValue() - r.getLeft().intValue())
.map(p -> p.getRight())
.peek(i -> LOGGER.trace(" {}", i))
.collect(Collectors.toList());
ImageReaderSpi prev = null;
for(final var c: spis) {
if(prev != null)
registry.setOrdering(ImageReaderSpi.class, prev, c);
prev = c;
}
}
public static byte[] encodeToImageData(final Mat mat, final String ext) {
try(Closer closer = new Closer();) {
final MatOfByte mobOut = closer.addMat(new MatOfByte());
Imgcodecs.imencode(".jpg", mat, mobOut);
return mobOut.toArray();
}
}
/**
* Given the imageData byte array contains an encoded image, decode the image
* into a Mat.
*/
public static CvMat decodeImageData(final byte[] imageData) {
try(Closer closer = new Closer();) {
final MatOfByte mobOut = closer.addMat(new MatOfByte(imageData));
try(CvMat cvmat = CvMat.move(Imgcodecs.imdecode(mobOut, Imgcodecs.IMREAD_UNCHANGED));) {
return cvmat.returnMe();
}
}
}
/**
* <p>
* Read a {@link BufferedImage} from a file.
* </p>
*
* <p>
* This read method will fall back to OpenCV's codecs if the ImageIO codecs
* don't support the requested
* file.
* </p>
*
* <p>
* It should be noted that, if the routing falls back to using ImageIO to open
* the file, then
* a the data will be copied into the {@link BufferedImage} after it's loaded
* into a {@link CvMat}.
* </p>
*
* @return a new {@link BufferedImage} constructed from the decoded file
* contents.
*/
public static BufferedImage readBufferedImageFromFile(final String filename) throws IOException {
return readBufferedImageFromFile(filename, 0);
}
/**
* See {@link ImageFile#readBufferedImageFromFile(String)}. ImageIO can handle
* file formats that allow
* multiple images in a single file such as TIFF. The default is to read the
* first image but you can
* ask for subsequent images by passing the imageNumber (starting at zero).
*
* <p>
* If the imageNumber is more than the index of the last image in the file, then
* you'll get an
* {@link IndexOutOfBoundsException}.
* </p>
*/
public static BufferedImage readBufferedImageFromFile(final String filename, final int imageNumber) throws IOException, IndexOutOfBoundsException {
return doReadBufferedImageFromFile(filename, true, imageNumber);
}
/**
* <p>
* Read a {@link CvMat} from a file. You should make sure this is assigned in a try-with-resource
* or the CvMat will leak.
* </p>
*
* <p>
* This read method is much more robust than the one supplied with OpenCv since it will couple
* ImageIO codecs with OpenCV's codecs to provide a much wider set of formats that can be handled.
* It should be noted that, if the routing falls back to using ImageIO to open the file, then
* a the data will be copied into the {@link CvMat} after it's loaded into a {@link BufferedImage}.
* </p>
*
* @return a new {@link CvMat} constructed from the decoded file contents.
* <b>Note: The caller owns the CvMat returned</b>
*/
public static CvMat readMatFromFile(final String filename, final int mode) throws IOException {
return doReadMatFromFile(filename, true, mode);
}
public static CvMat readMatFromFile(final String filename) throws IOException {
return readMatFromFile(filename, IMREAD_UNCHANGED);
}
public static void writeImageFile(final BufferedImage ri, final String filename) throws IOException {
if(!doWrite(ri, filename)) {
LOGGER.debug("Failed to write '" + filename + "' using ImageIO");
try(CvMat mat = Utils.img2CvMat(ri);) {
if(!doWrite(mat, filename, true))
throw new IllegalArgumentException("Failed to write");
}
}
}
public static void writeImageFile(final Mat ri, final String filename) throws IOException {
if(!doWrite(ri, filename, false)) {
LOGGER.debug("Failed to write '" + filename + "' using OpenCV");
final BufferedImage bi = Utils.mat2Img(ri);
if(!doWrite(bi, filename))
throw new IllegalArgumentException("Failed to write");
}
}
public static void transcode(BufferedImage bi, final ImageDestinationDefinition dest) throws IOException {
if(infile != null && infile.equalsIgnoreCase(dest.outfile))
throw new IOException("Can't overwrite original file durring transcode (" + infile + ").");
if(dest.maxw != -1 || dest.maxh != -1 || dest.maxe != -1) {
final int width = bi.getWidth();
final int height = bi.getHeight();
final double scale = scale(width, height, dest);
if(scale >= 0.0) {
final int newwidth = (int)Math.round(scale * (width));
final int newheight = (int)Math.round(scale * (height));
bi = convert(bi.getScaledInstance(newwidth, newheight, BufferedImage.SCALE_DEFAULT), bi.getType());
}
}
writeImageFile(bi, dest.outfile);
}
public static class ImageDestinationDefinition {
public String outfile = null;
public int maxw = -1;
public int maxh = -1;
public int maxe = -1;
public boolean verify = false;
public void set() {}
}
public static String infile = null;
public static void main(final String[] args)
throws IOException {
final List<ImageDestinationDefinition> dests = commandLine(args);
if(dests == null || dests.size() == 0) {
usage();
return;
}
if(infile == null) {
usage();
return;
}
final BufferedImage image = readBufferedImageFromFile(infile);
for(final ImageDestinationDefinition dest: dests) {
transcode(image, dest);
if(dest.verify) {
final RenderedImage im = readBufferedImageFromFile(dest.outfile);
final int width2 = im.getWidth();
final int height2 = im.getHeight();
if(dest.maxw != width2 || dest.maxh != height2 || dest.maxe != ((width2 > height2) ? width2 : height2))
throw new IOException("Verification failed!");
}
}
}
/**
* Converts an {@link Image} to a {@link BufferedImage} image in a really hacky
* way.
*/
private static BufferedImage convert(final Image im, final int type) {
if(im instanceof BufferedImage)
return (BufferedImage)im;
final BufferedImage bi = new BufferedImage(im.getWidth(null), im.getHeight(null), type);
final Graphics bg = bi.getGraphics();
bg.drawImage(im, 0, 0, null);
bg.dispose();
return bi;
}
private synchronized static CvMat doReadMatFromFile(final String filename, final boolean tryOther, final int mode) throws IOException {
LOGGER.trace("OCV Reading CvMat from {}", filename);
final File f = new File(filename);
if(!f.exists())
throw new FileNotFoundException(filename);
final Mat omat = Imgcodecs.imread(filename, mode);
try(final CvMat mat = omat == null || omat.dataAddr() == 0 ? null : CvMat.move(omat);) {
if(tryOther && (mat == null || (mat.rows() == 0 && mat.cols() == 0))) {
LOGGER.warn("OCV Failed to read '" + filename + "' using OpenCV");
try {
return Utils.img2CvMat(doReadBufferedImageFromFile(filename, false, 0));
} catch(final IllegalArgumentException iae) { //
return null;
}
} // else {
// if(filename.endsWith(".jp2") && CvType.channels(mat.channels()) > 1)
// Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGB2BGR);
// ret = CvMat.move(mat);
// }
if(mat != null) {
LOGGER.trace("OCV Read {} from {}", mat, filename);
return mat.returnMe();
} else {
LOGGER.debug("OCV Failed to read '" + filename + "' using OpenCV");
return null;
}
}
}
private static class ReaderAndStream implements AutoCloseable {
public final ImageReader reader;
public final ImageInputStream stream;
public ReaderAndStream(final ImageReader reader, final ImageInputStream stream) {
this.reader = reader;
this.stream = stream;
reader.setInput(stream, true, true);
}
@Override
public void close() throws IOException {
stream.close();
}
}
private static ReaderAndStream getNextReaderAndStream(final File f, final int index) throws IOException {
final ImageInputStream input = ImageIO.createImageInputStream(f);
final Iterator<ImageReader> readers = ImageIO.getImageReaders(input);
int cur = 0;
while(readers.hasNext() && cur <= (index - 1)) {
readers.next();
cur++;
}
ImageReader reader = null;
if(readers.hasNext())
reader = readers.next();
if(reader == null)
input.close();
return reader == null ? null : new ReaderAndStream(reader, input);
}
private static BufferedImage doReadBufferedImageFromFile(final String filename, final boolean tryOther, final int imageNumber) throws IOException {
final File f = new File(filename);
if(!f.exists())
throw new FileNotFoundException(filename);
Exception lastException = null;
int cur = 0;
while(true) {
try(ReaderAndStream ras = getNextReaderAndStream(f, cur)) {
if(ras != null) {
final ImageReader reader = ras.reader;
final ImageReadParam param = reader.getDefaultReadParam();
try {
LOGGER.trace("IIO attempt {}. Using reader {} to read {} ", cur, reader, filename);
final BufferedImage image = reader.read(imageNumber, param);
return image;
} catch(final IndexOutOfBoundsException ioob) {
// TODO: distinguish between IndexOutOfBoundsException because imageNumber is
// too high
// and IndexOutOfBoundsException for some other reason.
if(imageNumber == 0) { // then this is certainly NOT because the imageNumber is too hight
LOGGER.debug("IIO attempt {} using reader {} failed with ", cur, reader, ioob);
lastException = ioob;
} else {
throw ioob; // for now, assume the reason this happened is because the imageNumber is too
// hight
// but there needs to be a better solution. Perhaps distinguish between
// IndexOutOfBoundsException and ArrayIndexOutOfBoundsException for example
}
} catch(final IOException | RuntimeException ioe) {
LOGGER.debug("IIO attempt {} using reader {} failed with ", cur, reader, ioe);
lastException = ioe;
} finally {
reader.dispose();
}
} else
break;
}
cur++;
}
if(cur == 0)
LOGGER.debug("IIO No ImageIO reader's available for {}", filename);
else
LOGGER.debug("IIO No more ImageIO readers to try for {}", filename);
LOGGER.info("IIO Failed to read '{}' using ImageIO", filename);
if(!tryOther)
throw new IllegalArgumentException("Can't read '" + filename + "' as an image. No codec worked in ImageIO");
BufferedImage ret = null;
try(final CvMat mat = doReadMatFromFile(filename, false, IMREAD_UNCHANGED);) {
if(mat == null) {
if(lastException != null)
throw new IllegalArgumentException("Can't read '" + filename + "' as an image. No codec worked in either ImageIO or OpenCv", lastException);
else
throw new IllegalArgumentException("Can't read '" + filename + "' as an image. No codec worked in either ImageIO or OpenCv");
}
// if(filename.endsWith(".jp2") && CvType.channels(mat.channels()) > 1)
// Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGB2BGR);
ret = Utils.mat2Img(mat);
}
LOGGER.trace("IIO Read {} from {}", ret, filename);
return ret;
}
private static boolean doWrite(final BufferedImage ri, final String filename) throws IOException {
LOGGER.trace("Writing image {} to {}", ri, filename);
final int dotindex = filename.lastIndexOf(".");
if(dotindex < 0)
throw new IOException("No extention on " + filename);
final String ext = filename.substring(dotindex + 1);
final File f = new File(filename).getCanonicalFile();
final File p = f.getParentFile();
// make sure the output directory exists.
p.mkdirs();
final Iterator<ImageWriter> iter = ImageIO.getImageWritersBySuffix(ext);
boolean wrote = false;
IOException last = null;
int cur = 0;
while(iter.hasNext()) {
final ImageWriter writer = iter.next(); // grab the first one
try {
try(final ImageOutputStream ios = ImageIO.createImageOutputStream(f);) {
final ImageWriteParam param = writer.getDefaultWriteParam();
writer.setOutput(ios);
writer.write(null, new IIOImage(ri, null, null), param);
}
wrote = true;
} catch(final IOException ioe) {
LOGGER.debug("IIO attempt {} using reader {} failed with ", cur, writer, ioe);
last = ioe;
}
cur++;
}
if(last != null)
throw last;
return wrote;
}
private static boolean doWrite(final Mat ri, final String filename, final boolean canOverwrite) {
LOGGER.trace("Writing image {} to {}", ri, filename);
try(final CvMat newMat = new CvMat();) {
final Mat toWrite;
if(filename.endsWith(".jp2")) {
toWrite = (canOverwrite) ? ri : newMat;
Imgproc.cvtColor(ri, toWrite, Imgproc.COLOR_BGR2RGB);
} else
toWrite = ri;
return Imgcodecs.imwrite(filename, toWrite);
}
}
private static double scale(final int width, final int height, final ImageDestinationDefinition dest) {
double scale = -1.0;
if(dest.maxh != -1) {
if(height > dest.maxh)
// see what we need to scale to make the height the same.
scale = ((double)dest.maxh) / ((double)height);
}
if(dest.maxw != -1) {
final int adjwidth = (scale >= 0.0) ? (int)Math.round(scale * width) : width;
if(adjwidth > dest.maxw) {
scale = ((double)dest.maxw) / ((double)adjwidth);
}
}
if(dest.maxe != -1) {
final int adjedge = width > height ? (scale >= 0.0 ? (int)Math.round(scale * width) : width)
: (scale >= 0.0 ? (int)Math.round(scale * height) : height);
if(adjedge > dest.maxe) {
scale = ((double)(dest.maxe)) / ((double)adjedge);
}
}
return scale;
}
private static List<ImageDestinationDefinition> commandLine(final String[] args) {
final List<ImageDestinationDefinition> ret = new ArrayList<>();
ImageDestinationDefinition cur = null;
for(int i = 0; i < args.length; i++) {
final String optionArg = args[i];
// see if we are asking for help
if("help".equalsIgnoreCase(optionArg) ||
"-help".equalsIgnoreCase(optionArg)) {
usage();
return null;
}
if("-i".equalsIgnoreCase(optionArg)) {
if(infile != null) {
System.err.println("One infile only");
usage();
return null;
}
infile = args[i + 1];
i++;
}
else if("-o".equalsIgnoreCase(args[i])) {
cur = cur == null ? new ImageDestinationDefinition() : cur;
if(cur.outfile != null)
cur = push(cur, ret);
cur.outfile = args[i + 1];
i++;
} else if("-verify".equalsIgnoreCase(args[i])) {
cur = cur == null ? new ImageDestinationDefinition() : cur;
if(cur.verify == false)
cur = push(cur, ret);
cur.verify = true;
} else if("-maxw".equalsIgnoreCase(args[i])) {
cur = cur == null ? new ImageDestinationDefinition() : cur;
if(cur.maxw != -1)
cur = push(cur, ret);
cur.maxw = Integer.parseInt(args[i + 1]);
i++;
} else if("-maxh".equalsIgnoreCase(args[i])) {
cur = cur == null ? new ImageDestinationDefinition() : cur;
if(cur.maxh != -1)
cur = push(cur, ret);
cur.maxh = Integer.parseInt(args[i + 1]);
i++;
} else if("-maxe".equalsIgnoreCase(args[i])) {
cur = cur == null ? new ImageDestinationDefinition() : cur;
if(cur.maxe != -1)
cur = push(cur, ret);
cur.maxe = Integer.parseInt(args[i + 1]);
i++;
} else {
usage();
return null;
}
}
if(cur != null) {
cur.set();
ret.add(cur);
}
return ret;
}
private static ImageDestinationDefinition push(final ImageDestinationDefinition cur,
final List<ImageDestinationDefinition> ret) {
ret.add(cur);
cur.set();
return new ImageDestinationDefinition();
}
private static void usage() {
System.out.println("usage: java [javaargs] ImageFile -i infile -o outfile [-maxw width] [-maxh height] [-maxe maxEdge] [-verify]");
System.out.println(" options -o through -verify can be repeated to convert an image file");
System.out.println(" to a number of different formats and dimentions");
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/Operations.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import static net.dempsy.util.BinaryUtils.byteify;
import static net.dempsy.util.BinaryUtils.intify;
import java.awt.Color;
import java.awt.image.IndexColorModel;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.imgproc.Imgproc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.QuietCloseable;
public class Operations {
private static Logger LOGGER = LoggerFactory.getLogger(Operations.class);
static {
CvMat.initOpenCv();
}
public static byte EDGE = (byte)-1;
public static final byte ROVERLAY = (byte)100;
public static final byte GOVERLAY = (byte)101;
public static final byte BOVERLAY = (byte)102;
public static final byte YOVERLAY = (byte)103;
public static final byte COVERLAY = (byte)104;
public static final byte MOVERLAY = (byte)105;
public static final byte OOVERLAY = (byte)106;
public static final byte GRAYOVERLAY = (byte)107;
private static final double[] cvrtScaleDenom = new double[6];
public static final double _256Ov2Pi = (256.0 / (2.0 * Math.PI));
static {
cvrtScaleDenom[CvType.CV_16U] = (0xffff);
cvrtScaleDenom[CvType.CV_16S] = (0x7fff);
cvrtScaleDenom[CvType.CV_8U] = (0xff);
cvrtScaleDenom[CvType.CV_8S] = (0x7f);
}
public static class GradientImages implements QuietCloseable {
public final CvMat gradientDir;
public final CvMat dx;
public final CvMat dy;
private GradientImages(final CvMat gradientDir, final CvMat dx, final CvMat dy) {
this.gradientDir = gradientDir;
this.dx = dx;
this.dy = dy;
}
@Override
public void close() {
gradientDir.close();
dy.close();
dx.close();
}
}
/**
* Perform a Canny edge detection.
*
* @return A CvMat with the edge detection results. The caller owns the CvMat.
*/
public static CvMat canny(final GradientImages gis, final double tlow, final double thigh) {
try(final CvMat edgeImage = new CvMat();) {
Imgproc.Canny(gis.dx, gis.dy, edgeImage, tlow, thigh, true);
return edgeImage.returnMe();
}
}
public static GradientImages gradient(final CvMat grayImage, final int kernelSize) {
// find gradient image
try(final CvMat dx = new CvMat();
final CvMat dy = new CvMat();) {
Imgproc.Sobel(grayImage, dx, CvType.CV_16S, 1, 0, kernelSize, 1.0, 0.0, Core.BORDER_REPLICATE);
Imgproc.Sobel(grayImage, dy, CvType.CV_16S, 0, 1, kernelSize, 1.0, 0.0, Core.BORDER_REPLICATE);
final int numPixelsInGradient = dx.rows() * dx.cols();
final byte[] dirsa = new byte[numPixelsInGradient];
dx.bulkAccess(dxr -> {
dx.elemSize();
final var dxsb = dxr.asShortBuffer();
dy.bulkAccess(dyr -> {
final var dysb = dyr.asShortBuffer();
for(int pos = 0; pos < numPixelsInGradient; pos++) {
// calculate the angle
final double dxv = dxsb.get(pos);
final double dyv = 0.0 - dysb.get(pos); // flip y axis.
dirsa[pos] = angle_byte(dxv, dyv);
}
});
});
// a byte raster to hold the dirs
try(final CvMat gradientDirImage = new CvMat(dx.rows(), dx.cols(), CvType.CV_8UC1);) {
gradientDirImage.put(0, 0, dirsa);
final GradientImages ret = new GradientImages(gradientDirImage.returnMe(), dx.returnMe(), dy.returnMe());
return ret;
}
}
}
public static CvMat convertToGray(final CvMat src) {
final CvMat workingImage = new CvMat();
if(src.depth() != CvType.CV_8U) {
LOGGER.debug("converting image to 8-bit grayscale ... ");
src.convertTo(workingImage, CvType.CV_8U, 255.0 / cvrtScaleDenom[src.depth()]);
Imgproc.cvtColor(workingImage, workingImage, Imgproc.COLOR_BGR2GRAY);
return workingImage;
} else {
src.copyTo(workingImage);
Imgproc.cvtColor(src, workingImage, Imgproc.COLOR_BGR2GRAY);
return workingImage;
}
}
public static IndexColorModel getOverlayCM() {
final byte[] r = new byte[256];
final byte[] g = new byte[256];
final byte[] b = new byte[256];
r[intify(EDGE)] = g[intify(EDGE)] = b[intify(EDGE)] = -1;
r[intify(ROVERLAY)] = -1;
g[intify(GOVERLAY)] = -1;
b[intify(BOVERLAY)] = -1;
r[intify(YOVERLAY)] = -1;
g[intify(YOVERLAY)] = -1;
r[intify(COVERLAY)] = byteify(Color.cyan.getRed());
g[intify(COVERLAY)] = byteify(Color.cyan.getGreen());
b[intify(COVERLAY)] = byteify(Color.cyan.getBlue());
r[intify(MOVERLAY)] = byteify(Color.magenta.getRed());
g[intify(MOVERLAY)] = byteify(Color.magenta.getGreen());
b[intify(MOVERLAY)] = byteify(Color.magenta.getBlue());
r[intify(OOVERLAY)] = byteify(Color.orange.getRed());
g[intify(OOVERLAY)] = byteify(Color.orange.getGreen());
b[intify(OOVERLAY)] = byteify(Color.orange.getBlue());
r[intify(GRAYOVERLAY)] = byteify(Color.gray.getRed());
g[intify(GRAYOVERLAY)] = byteify(Color.gray.getGreen());
b[intify(GRAYOVERLAY)] = byteify(Color.gray.getBlue());
return new IndexColorModel(8, 256, r, g, b);
}
public static byte angle_byte(final double x, final double y) {
double xu, yu, ang;
double ret;
int rret;
xu = Math.abs(x);
yu = Math.abs(y);
if((xu == 0) && (yu == 0))
return(0);
ang = Math.atan(yu / xu);
if(x >= 0) {
if(y >= 0)
ret = ang;
else
ret = (2.0 * Math.PI - ang);
} else {
if(y >= 0)
ret = (Math.PI - ang);
else
ret = (Math.PI + ang);
}
rret = (int)(0.5 + (ret * _256Ov2Pi));
if(rret >= 256)
rret = 0;
return byteify(rret);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/Utils.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import static java.awt.image.BufferedImage.TYPE_3BYTE_BGR;
import static java.awt.image.BufferedImage.TYPE_4BYTE_ABGR;
import static java.awt.image.BufferedImage.TYPE_4BYTE_ABGR_PRE;
import static java.awt.image.BufferedImage.TYPE_BYTE_GRAY;
import static java.awt.image.BufferedImage.TYPE_CUSTOM;
import static java.awt.image.BufferedImage.TYPE_USHORT_GRAY;
import static net.dempsy.util.Functional.chain;
import static net.dempsy.util.Functional.uncheck;
import static org.opencv.core.CvType.CV_16S;
import static org.opencv.core.CvType.CV_16U;
import static org.opencv.core.CvType.CV_8S;
import static org.opencv.core.CvType.CV_8U;
import java.awt.Color;
import java.awt.Graphics2D;
import java.awt.color.ColorSpace;
import java.awt.color.ICC_ColorSpace;
import java.awt.image.BufferedImage;
import java.awt.image.ColorModel;
import java.awt.image.ComponentColorModel;
import java.awt.image.DataBuffer;
import java.awt.image.DataBufferByte;
import java.awt.image.DataBufferFloat;
import java.awt.image.DataBufferInt;
import java.awt.image.DataBufferShort;
import java.awt.image.DataBufferUShort;
import java.awt.image.DirectColorModel;
import java.awt.image.IndexColorModel;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.DoubleBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.ShortBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.io.IOUtils;
import org.opencv.calib3d.Calib3d;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.MatOfPoint3f;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.Functional;
import net.dempsy.util.MutableDouble;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.image.CvRaster.BytePixelConsumer;
import ai.kognition.pilecv4j.image.CvRaster.FlatFloatPixelConsumer;
import ai.kognition.pilecv4j.image.CvRaster.FloatPixelConsumer;
import ai.kognition.pilecv4j.image.CvRaster.IntPixelConsumer;
import ai.kognition.pilecv4j.image.CvRaster.ShortPixelConsumer;
import ai.kognition.pilecv4j.image.geometry.PerpendicularLine;
import ai.kognition.pilecv4j.image.geometry.Point;
import ai.kognition.pilecv4j.image.geometry.SimplePoint;
@SuppressWarnings("deprecation")
public class Utils {
private static final Logger LOGGER = LoggerFactory.getLogger(Utils.class);
/**
* This is set through reflection because the source changed between OpenCV 3
* and OpenCV 4.
*/
public final static int OCV_FONT_HERSHEY_SIMPLEX;
private final static Method OCV_UNDISTORT_METHOD;
private final static Method OCV_UNDISTORT_POINTS_METHOD;
private final static CvMat bgra2abgr = Utils.toMat(new float[][] {
{0,1,0,0},
{0,0,1,0},
{0,0,0,1},
{1,0,0,0}
});
private final static CvMat abgr2bgra = Utils.toMat(new float[][] {
{0,0,0,1},
{1,0,0,0},
{0,1,0,0},
{0,0,1,0}
});
private static final int NUM_DEPTH_CONSTS = 8;
private static final int[] BITS_PER_CHANNEL_LOOKUP = new int[NUM_DEPTH_CONSTS];
public static final Scalar DEFAULT_PADDING = new Scalar(128, 128, 128);
// Dynamically determine if we're at major version 3 or 4 of OpenCV and set the
// variables appropriately.
static {
// this 3.x.x uses Core while 4.x.x uses Imgproc
OCV_FONT_HERSHEY_SIMPLEX = uncheck(() -> Integer.valueOf(getStaticField("FONT_HERSHEY_SIMPLEX", Core.class, Imgproc.class).getInt(null))).intValue();
OCV_UNDISTORT_METHOD = getStaticMethod("undistort", new Class<?>[] {Mat.class,Mat.class,Mat.class,Mat.class}, Imgproc.class, Calib3d.class);
OCV_UNDISTORT_POINTS_METHOD = getStaticMethod("undistortPoints",
new Class<?>[] {MatOfPoint2f.class,MatOfPoint2f.class,Mat.class,Mat.class,Mat.class,Mat.class},
Imgproc.class, Calib3d.class);
BITS_PER_CHANNEL_LOOKUP[CvType.CV_8S] = 8;
BITS_PER_CHANNEL_LOOKUP[CvType.CV_8U] = 8;
BITS_PER_CHANNEL_LOOKUP[CvType.CV_16S] = 16;
BITS_PER_CHANNEL_LOOKUP[CvType.CV_16U] = 16;
BITS_PER_CHANNEL_LOOKUP[CvType.CV_32S] = 32;
BITS_PER_CHANNEL_LOOKUP[CvType.CV_32F] = 32;
BITS_PER_CHANNEL_LOOKUP[CvType.CV_64F] = 64;
}
/**
* Given the CvType, how many bits-per-channel. For example
* {@code CvType.CV_8UC1}, {@code CvType.CV_8UC2}, and
* {@code CvType.CV_8UC3} will all return {@code 8}.
*/
public static int bitsPerChannel(final int type) {
final int depth = CvType.depth(type);
if(depth > (NUM_DEPTH_CONSTS - 1))
throw new IllegalStateException(
"Something in OpenCV is no longer what it used to be. Depth constants are 3 bits and so should never be greater than "
+ (NUM_DEPTH_CONSTS - 1)
+ ". However, for type " + CvType.typeToString(type) + " it seems to be " + depth);
final int ret = BITS_PER_CHANNEL_LOOKUP[depth];
if(ret <= 0)
throw new IllegalArgumentException(
"The type " + CvType.typeToString(type) + ", resulting in a depth constant of " + depth
+ " has no corresponding bit-per-channel value");
return ret;
}
/**
* This method simply proxies a call to OpenCV's
* <a href=
* "https://docs.opencv.org/4.0.1/d9/d0c/group__calib3d.html#ga69f2545a8b62a6b0fc2ee060dc30559d">undistort</a>
* method in order to provide compatibility between OpenCV 3 and OpenCV 4 when
* it was moved from
* {@code Imgproc} to {@code Calib3d}.
*/
public static void undistort(final Mat src, final Mat dst, final Mat cameraMatrix, final Mat distCoeffs) {
try {
OCV_UNDISTORT_METHOD.invoke(null, src, dst, cameraMatrix, distCoeffs);
} catch(final IllegalAccessException e) {
throw new IllegalStateException("The method " + OCV_UNDISTORT_METHOD.getName() + " isn't accessible.", e);
} catch(final InvocationTargetException e) {
throw(RuntimeException)e.getCause();
}
}
/**
* This method simply proxies a call to OpenCV's
* <a href=
* "https://docs.opencv.org/4.0.1/d9/d0c/group__calib3d.html#ga69f2545a8b62a6b0fc2ee060dc30559d">undistortPoints</a>
* method in order to provide compatibility between OpenCV 3 and OpenCV 4 when
* it was moved from
* {@code Imgproc} to {@code Calib3d}.
*/
public static void undistortPoints(final MatOfPoint2f src, final MatOfPoint2f dst, final Mat cameraMatrix, final Mat distCoeffs, final Mat R,
final Mat P) {
try {
OCV_UNDISTORT_POINTS_METHOD.invoke(null, src, dst, cameraMatrix, distCoeffs, R, P);
} catch(final IllegalAccessException e) {
throw new IllegalStateException("The method " + OCV_UNDISTORT_METHOD.getName() + " isn't accessible.", e);
} catch(final InvocationTargetException e) {
throw(RuntimeException)e.getCause();
}
}
/**
* <p>
* Convert an OpenCV
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
* (or a {@link CvMat}) to a {@link BufferedImage} that can be used in java
* swing and awt. Currently this can handle:
* </p>
* <ul>
* <li>A single channel grayscale image of 8 or 16 bits.</li>
* <li>A 3 channel color image of 8-bits per channel.*</li>
* <li>A 4 channel color image with an alpha channel of 8-bits per
* channel.*</li>
* </ul>
*
* <p>
* <em>* Note: the method assumes color
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat's</a>
* are in typical OpenCV BGR (or, for 4 channel images aBGR) format.</em>
* </p>
*
* <p>
* 8-bit per channel color images will be transformed to {@link BufferedImage}s
* of type {@link BufferedImage#TYPE_3BYTE_BGR} for 3 channel images and
* {@link BufferedImage#TYPE_4BYTE_ABGR} for 4 channel images.
* </p>
*
* TODO: 16-bit per channel color images
*
* @param in <a href=
* "https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
* to be converted
* @return new {@link BufferedImage} from the <a href=
* "https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
*/
public static BufferedImage mat2Img(final Mat in) {
final int inChannels = in.channels();
if(inChannels == 1) { // assume gray
final BufferedImage out;
switch(CvType.depth(in.type())) {
case CV_8U:
case CV_8S:
out = new BufferedImage(in.width(), in.height(), BufferedImage.TYPE_BYTE_GRAY);
in.get(0, 0, ((DataBufferByte)out.getRaster().getDataBuffer()).getData());
break;
case CV_16U:
case CV_16S:
out = new BufferedImage(in.width(), in.height(), BufferedImage.TYPE_USHORT_GRAY);
in.get(0, 0, ((DataBufferUShort)out.getRaster().getDataBuffer()).getData());
break;
default:
throw new IllegalArgumentException(
"Cannot convert a Mat with a type of " + CvType.typeToString(in.type()) + " to a BufferedImage");
}
return out;
} else if(inChannels == 3) {
final int cvDepth = CvType.depth(in.type());
if(cvDepth != CV_8U && cvDepth != CV_8S)
throw new IllegalArgumentException("Cannot convert BGR Mats with elements larger than a byte yet.");
final BufferedImage out = new BufferedImage(in.width(), in.height(), BufferedImage.TYPE_3BYTE_BGR);
in.get(0, 0, ((DataBufferByte)out.getRaster().getDataBuffer()).getData());
return out;
} else if(inChannels == 4) { // assumption here is we have a BGRA
final int cvDepth = CvType.depth(in.type());
if(cvDepth != CV_8U && cvDepth != CV_8S)
throw new IllegalArgumentException("Cannot convert aBGR Mats with elements larger than a byte yet.");
final BufferedImage out = new BufferedImage(in.width(), in.height(), BufferedImage.TYPE_4BYTE_ABGR);
final int height = in.rows();
final int width = in.cols();
// flatten so every pixel is a separate row
try(final CvMat reshaped = CvMat.move(in.reshape(1, height * width));
// type to 32F
final CvMat typed = Functional.chain(new CvMat(), m -> reshaped.convertTo(m, CvType.CV_32F));
// color transform which just reorganizes the pixels.
final CvMat xformed = typed.mm(bgra2abgr);
final CvMat xformedAndShaped = CvMat.move(xformed.reshape(4, height));
final CvMat it = Functional.chain(new CvMat(), m -> xformedAndShaped.convertTo(m, cvDepth));) {
it.get(0, 0, ((DataBufferByte)out.getRaster().getDataBuffer()).getData());
return out;
}
} else
throw new IllegalArgumentException("Can't handle an image with " + inChannels + " channels");
}
/**
* <p>
* Convert an OpenCV
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
* (or a {@link CvMat}) to a {@link BufferedImage} that can be used in java
* swing and awt using a specific index color model (see
* {@link IndexColorModel}).
* </p>
*
* <p>
* This is a much simpler implementation that {@link Utils#mat2Img(Mat)} in that
* it only handles a 1-channel, 8-bit image but allows you to assign colors to
* each of the 256 values. This is primarily used to generated overlays on other
* images and represents a "poor-man's" alpha channel manipulation in OpenCV
* which doesn't really have much in the way of alpha channel handling natively.
* </p>
*/
public static BufferedImage mat2Img(final Mat in, final IndexColorModel colorModel) {
BufferedImage out;
if(in.channels() != 1 || CvType.depth(in.type()) != CvType.CV_8U)
throw new IllegalArgumentException("Cannot convert a Mat to a BufferedImage with a colorMap if the Mat has more than one channel);");
out = new BufferedImage(in.cols(), in.rows(), BufferedImage.TYPE_BYTE_INDEXED, colorModel);
out.getRaster().setDataElements(0, 0, in.cols(), in.rows(), copyToPrimitiveArray(in));
return out;
}
/**
* This is a convenience method for {@link Utils#dump(Mat, PrintStream)} that
* uses {@link System#out} as the {@link PrintStream}
*/
public static void dump(final Mat mat, final int startRow, final int numRows, final int startCol, final int numCols) {
dump(mat, System.out, startRow, numRows, startCol, numCols);
}
/**
* This is a convenience method for
* {@link Utils#dump(Mat, PrintStream, int, int)} that uses {@link System#out}
* as the {@link PrintStream} and dumps all elements of the
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
*/
public static void dump(final Mat mat) {
dump(mat, System.out, 0, -1, 0, -1);
}
/**
* This is a convenience method for {@link Utils#dump(Mat, PrintStream,int,int)}
* that dumps all elements of the
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a>
*/
public static void dump(final Mat mat, final PrintStream out) {
dump(mat, out, 0, -1, 0, -1);
}
/**
* <p>
* You can use this method to dump the contents of a
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a> to
* the {@link PrintStream}.
* </p>
*
* <p>
* Please note this is a really bad idea for large images but can help with debugging
* problems when you're using OpenCV for it's linear-algebra/matrix capabilities.
* </p>
*
* @param mat to dump print to the {@link PrintStream}
* @param out is the {@link PrintStream} to dump the {@link CvRaster} to.
* @param numRows limit the number of rows to the given number. Supply -1 for
* the all rows.
* @param numCols limit the number of columns to the given number. Supply -1 for
* the all columns.
*/
public static void dump(final Mat mat, final PrintStream out, final int numRows, final int numCols) {
dump(mat, out, 0, numRows, 0, numCols);
}
public static record LetterboxDetails(CvMat mat, double scale, int width, int height, int topPadding, int bottomPadding, int leftPadding, int rightPadding)
implements QuietCloseable {
@Override
public void close() {
if(mat != null)
mat.close();
}
}
/**
* Many deep learning models have a fixed size input and so there's a requirement for
* <em>letterboxing</em> the input image to the network size. That is, scale the input
* image to the network input dimensions while preserving the aspect ratio and filling
* in the border with a given pixel value.
*/
public static LetterboxDetails letterbox(final Mat mat, final Size networkDim) {
return letterbox(mat, networkDim, DEFAULT_PADDING, null);
}
/**
* Many deep learning models have a fixed size input and so there's a requirement for
* <em>letterboxing</em> the input image to the network size. That is, scale the input
* image to the network input dimensions while preserving the aspect ratio and filling
* in the border with a given pixel value.
*/
public static LetterboxDetails letterbox(final Mat mat, final int dim) {
return letterbox(mat, new Size(dim, dim), DEFAULT_PADDING, null);
}
/**
* Many deep learning models have a fixed size input and so there's a requirement for
* <em>letterboxing</em> the input image to the network size. That is, scale the input
* image to the network input dimensions while preserving the aspect ratio and filling
* in the border with a given pixel value.
*/
public static LetterboxDetails letterbox(final Mat mat, final int dim, final Scalar padding) {
return letterbox(mat, new Size(dim, dim), padding, null);
}
public static double averageError(final Mat mat, final float[] expected) {
System.out.println(mat);
final int numTotalElements = (int)(mat.total() * mat.channels());
if(expected.length != numTotalElements)
throw new IllegalArgumentException("Mat size doesn't match expected size. Mat size is " +
numTotalElements + " while the number of expected elements is " + expected.length);
try(var flat = CvMat.move(mat.reshape(1, new int[] {1,numTotalElements}));) {
double err = 0;
for(int i = 0; i < numTotalElements; i++) {
err += Math.abs(expected[i] - flat.get(0, i)[0]);
}
return err / numTotalElements;
}
}
public static double averageError(final Mat mat, final double[] expected) {
System.out.println(mat);
final int numTotalElements = (int)(mat.total() * mat.channels());
if(expected.length != numTotalElements)
throw new IllegalArgumentException("Mat size doesn't match expected size. Mat size is " +
numTotalElements + " while the number of expected elements is " + expected.length);
try(var flat = CvMat.move(mat.reshape(1, new int[] {1,numTotalElements}));) {
double err = 0;
for(int i = 0; i < numTotalElements; i++) {
err += Math.abs(expected[i] - flat.get(0, i)[0]);
}
return err / numTotalElements;
}
}
public static double averageErrorFloat(final Mat mat, final File rawDataFile) throws IOException {
final float[] expected;
try(var in = new FileInputStream(rawDataFile);) {
final ByteBuffer expectedBb = ByteBuffer.wrap(IOUtils.toByteArray(in));
expectedBb.order(ByteOrder.LITTLE_ENDIAN);
final FloatBuffer expectedFb = expectedBb.asFloatBuffer();
expected = new float[expectedFb.capacity()];
expectedFb.get(expected);
}
return averageError(mat, expected);
}
public static double averageErrorDouble(final Mat mat, final File rawDataFile) throws IOException {
final double[] expected;
try(var in = new FileInputStream(rawDataFile);) {
final ByteBuffer expectedBb = ByteBuffer.wrap(IOUtils.toByteArray(in));
expectedBb.order(ByteOrder.LITTLE_ENDIAN);
final DoubleBuffer expectedFb = expectedBb.asDoubleBuffer();
expected = new double[expectedFb.capacity()];
expectedFb.get(expected);
}
return averageError(mat, expected);
}
public static double averageErrorUint8(final Mat mat, final File rawDataFile) throws IOException {
final float[] expected;
try(var in = new FileInputStream(rawDataFile);) {
final ByteBuffer expectedBb = ByteBuffer.wrap(IOUtils.toByteArray(in));
expectedBb.order(ByteOrder.LITTLE_ENDIAN);
expected = new float[expectedBb.capacity()];
for(int i = 0; i < expected.length; i++) {
expected[i] = expectedBb.get() & 0xff;
}
}
return averageError(mat, expected);
}
/**
* Many deep learning models have a fixed size input and so there's a requirement for
* <em>letterboxing</em> the input image to the network size. That is, scale the input
* image to the network input dimensions while preserving the aspect ratio and filling
* in the border with a given pixel value.
*/
public static LetterboxDetails letterbox(final Mat mat, final Size networkDim, final Scalar padding, final MutableDouble scaleOut) {
// may want to return these
final int top, bottom, left, right;
final MutableDouble scale = scaleOut == null ? new MutableDouble(-1) : scaleOut;
if(!networkDim.equals(mat.size())) {
// resize the mat
final Size toResizeTo = Utils.scaleWhilePreservingAspectRatio(mat, networkDim, scale);
try(CvMat resized = new CvMat();
CvMat toUse = new CvMat();) {
Imgproc.resize(mat, resized, toResizeTo);
// one of the dim should be exactly the same
if((int)(networkDim.width) > resized.width()) { // then the height dim has 0 border
top = bottom = 0;
final int diff = ((int)networkDim.width - resized.width());
right = diff / 2;
left = diff - right;
} else { // then the width dim has 0 border OR the image is exactly a square (in which case everything becomes 0 anyway).
right = left = 0;
final int diff = ((int)networkDim.height - resized.height());
top = diff / 2;
bottom = diff - top;
}
Core.copyMakeBorder(resized, toUse, top, bottom, left, right, Core.BORDER_CONSTANT, padding);
return new LetterboxDetails(toUse.returnMe(), scale.val, (int)networkDim.width, (int)networkDim.height, top, bottom, left, right);
}
} else {
top = bottom = left = right = 0;
return new LetterboxDetails(CvMat.shallowCopy(mat).returnMe(), scale.val, (int)networkDim.width, (int)networkDim.height, top, bottom, left, right);
}
}
private static String arrayToHexString(final Function<Integer, Long> valueGetter, final int length, final long mask) {
final StringBuilder sb = new StringBuilder("[");
IntStream.range(0, length - 1)
.forEach(i -> {
// sb.append(Long.toHexString(valueGetter.apply(i) & mask));
sb.append(valueGetter.apply(i) & mask);
sb.append(", ");
});
if(length > 0)
// sb.append(Long.toHexString(valueGetter.apply(length - 1) & mask));
sb.append(valueGetter.apply(length - 1) & mask);
sb.append("]");
return sb.toString();
}
private static interface PixelPrinter {
void print(int pixelPos);
}
private static PixelPrinter makePixelPrinter(final PrintStream stream, final int type, final ByteBuffer bb) {
final int channels = CvType.channels(type);
switch(CvType.depth(type)) {
case CvType.CV_8S:
case CvType.CV_8U: {
final byte[] pixelBuf = new byte[channels];
return pixelPos -> {
bb.get(pixelPos * channels, pixelBuf);
stream.print(arrayToHexString(i -> (long)pixelBuf[i], pixelBuf.length, 0xffL));
};
}
case CvType.CV_16S:
case CvType.CV_16U: {
final short[] pixelBuf = new short[channels];
final ShortBuffer buf = bb.asShortBuffer();
return pixelPos -> {
buf.get(pixelPos * channels, pixelBuf);
stream.print(arrayToHexString(i -> (long)pixelBuf[i], pixelBuf.length, 0xffffL));
};
}
case CvType.CV_32S: {
final int[] pixelBuf = new int[channels];
final IntBuffer buf = bb.asIntBuffer();
return pixelPos -> {
buf.get(pixelPos * channels, pixelBuf);
stream.print(arrayToHexString(i -> (long)pixelBuf[i], pixelBuf.length, 0xffffffffL));
};
}
case CvType.CV_32F: {
final float[] pixelBuf = new float[channels];
final FloatBuffer buf = bb.asFloatBuffer();
return pixelPos -> {
buf.get(pixelPos * channels, pixelBuf);
stream.print(Arrays.toString(pixelBuf));
};
}
case CvType.CV_64F: {
final double[] pixelBuf = new double[channels];
final DoubleBuffer buf = bb.asDoubleBuffer();
return pixelPos -> {
buf.get(pixelPos * channels, pixelBuf);
stream.print(Arrays.toString(pixelBuf));
};
}
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
}
/**
* <p>
* You can use this method to dump the contents of a
* <a href="https://docs.opencv.org/4.0.1/d3/d63/classcv_1_1Mat.html">Mat</a> to
* the {@link PrintStream}.
* </p>
*
* <p>
* Please note this is a really bad idea for large images but can help with debugging
* problems when you're using OpenCV for it's linear-algebra/matrix capabilities.
* </p>
*
* @param raster to dump print to the {@link PrintStream}
* @param out is the {@link PrintStream} to dump the {@link CvRaster} to.
* @param numRows limit the number of rows to the given number. Supply -1 for
* the all rows.
* @param numCols limit the number of columns to the given number. Supply -1 for
* the all columns.
*/
private static void dump(final Mat raster, final PrintStream out, final int startRow, final int numRows, final int startCol, final int numCols) {
final int endRow = (numRows < 0) ? (raster.rows() - 1 - startRow) : Math.min((startRow + numRows - 1), raster.rows() - 1);
final int endCol = (numCols < 0) ? (raster.cols() - 1 - startCol) : Math.min((startCol + numCols - 1), raster.cols() - 1);
final int numColsInImage = raster.cols();
out.println(raster);
CvMat.bulkAccess(raster, bb -> {
final PixelPrinter pp = makePixelPrinter(out, raster.type(), bb);
for(int r = startRow; r <= endRow; r++) {
out.print("[");
for(int c = startCol; c < endCol; c++) {
out.print(" ");
pp.print(r * numColsInImage + c);
out.print(",");
}
out.print(" ");
pp.print(r * numColsInImage + endCol);
out.println("]");
}
});
}
private static int[] determineShifts(final int[] masks) {
final int[] ret = new int[masks.length];
for(int i = 0; i < masks.length; i++) {
int mask = masks[i];
int shift = 0;
if(mask != 0) {
while((mask & 1) == 0) {
mask >>>= 1;
shift++;
}
}
ret[i] = shift;
}
return ret;
}
/**
* set the mask to a scalar = 1 channel 1x1 Mat the same type as the rawMat
*/
private static void makeScalarMat(final int mask, final int type, final CvMat toSet) {
try(var tmp = new CvMat(1, 1, type);) {
CvMat.reassign(toSet, tmp);
}
final Object maskPixel = CvRaster.intsToPixelConverter(type).apply(new int[] {mask});
toSet.rasterAp(r -> r.set(0, maskPixel));
}
private static CvMat handleComponentColorModel(final BufferedImage bufferedImage, final ComponentColorModel cm) {
final int w = bufferedImage.getWidth();
final int h = bufferedImage.getHeight();
final int type = bufferedImage.getType();
switch(type) {
case TYPE_CUSTOM:
return handleCustomComponentColorModel(bufferedImage, cm);
case TYPE_3BYTE_BGR:
case TYPE_4BYTE_ABGR:
case TYPE_4BYTE_ABGR_PRE: {
LOGGER.trace("NORMAL COPY");
final DataBuffer dataBuffer = bufferedImage.getRaster().getDataBuffer();
if(!(dataBuffer instanceof DataBufferByte))
throw new IllegalArgumentException("BufferedImage of type \"" + type + "\" should have a " + DataBufferByte.class.getSimpleName()
+ " but instead has a " + dataBuffer.getClass().getSimpleName());
final DataBufferByte bb = (DataBufferByte)dataBuffer;
switch(type) {
case TYPE_3BYTE_BGR:
return abgrDataBufferByteToMat(bb, h, w, false);
case TYPE_4BYTE_ABGR:
case TYPE_4BYTE_ABGR_PRE:
return abgrDataBufferByteToMat(bb, h, w, true);
}
}
case TYPE_BYTE_GRAY: {
System.out.println("GRAY");
final DataBuffer dataBuffer = bufferedImage.getRaster().getDataBuffer();
if(!(dataBuffer instanceof DataBufferByte))
throw new IllegalArgumentException("BufferedImage should have a " + DataBufferByte.class.getSimpleName() + " but instead has a "
+ dataBuffer.getClass().getSimpleName());
final DataBufferByte bb = (DataBufferByte)dataBuffer;
final byte[] srcdata = bb.getData();
try(final CvMat ret = new CvMat(h, w, CvType.CV_8UC1);) {
ret.put(0, 0, srcdata);
return ret.returnMe();
}
}
case TYPE_USHORT_GRAY: {
System.out.println("GRAY 16");
final DataBuffer dataBuffer = bufferedImage.getRaster().getDataBuffer();
if(!(dataBuffer instanceof DataBufferUShort))
throw new IllegalArgumentException("BufferedImage should have a " + DataBufferUShort.class.getSimpleName() + " but instead has a "
+ dataBuffer.getClass().getSimpleName());
final DataBufferUShort bb = (DataBufferUShort)dataBuffer;
final short[] srcdata = bb.getData();
try(final CvMat ret = new CvMat(h, w, CvType.CV_16UC1);) {
ret.put(0, 0, srcdata);
return ret.returnMe();
}
}
default:
throw new IllegalArgumentException("Cannot extract pixels from a BufferedImage of type " + bufferedImage.getType());
}
}
private static final int[] icmCompSizesWithAlpha = {8,8,8,8};
private static final int[] icmCompSizesNoAlpha = {8,8,8};
private static CvMat handleIndexColorModel(final BufferedImage bufferedImage, final ColorModel cm) {
final int[] compSizes = cm.getComponentSize();
// Index Color Model's are always sRGB colorspace.
// the compSizes should be either {8, 8, 8} or {8, 8, 8, 8}.
final boolean hasAlpha;
if(Arrays.equals(compSizes, icmCompSizesWithAlpha))
hasAlpha = true;
else if(Arrays.equals(compSizes, icmCompSizesNoAlpha))
hasAlpha = false;
else
throw new IllegalArgumentException("IndexColorModel component size (" + Arrays.toString(compSizes) + ") should be either "
+ Arrays.toString(icmCompSizesNoAlpha) + " or " + Arrays.toString(icmCompSizesWithAlpha));
final int[] shifty = hasAlpha ? new int[] {16,8,0,24} : new int[] {16,8,0};
try(CvMat ret = new CvMat(bufferedImage.getHeight(), bufferedImage.getWidth(), hasAlpha ? CvType.CV_8UC4 : CvType.CV_8UC3);) {
final byte[] tmpPixel = new byte[hasAlpha ? 4 : 3];
final int oirows = ret.rows();
final int oicols = ret.cols();
ret.bulkAccess(bb -> {
int pos = 0;
for(int r = 0; r < oirows; r++) {
for(int c = 0; c < oicols; c++) {
final int color = bufferedImage.getRGB(c, r);
tmpPixel[2] = (byte)((color >> shifty[0]) & 0xff);
tmpPixel[1] = (byte)((color >> shifty[1]) & 0xff);
tmpPixel[0] = (byte)((color >> shifty[2]) & 0xff);
if(hasAlpha)
tmpPixel[3] = (byte)((color >> shifty[3]) & 0xff);
bb.put(pos * tmpPixel.length, tmpPixel);
pos++;
}
}
});
return ret.returnMe();
}
}
/**
* determine the number of bits for each channel. and set the flag that
* indicates whether or not they're all the same.
*/
private static int[] ccmCeckBitsPerChannel(final ComponentColorModel cm) {
final int[] bitsPerChannel = cm.getComponentSize();
final int bpc = bitsPerChannel[0];
if(IntStream.range(1, bitsPerChannel.length)
.filter(i -> bitsPerChannel[i] != bpc)
.findAny()
.isPresent())
throw new IllegalArgumentException(
"Cannot handle an image with ComponentColorModel where the channels have a different number of bits per channel. They are currently: "
+ Arrays.toString(bitsPerChannel));
return bitsPerChannel;
}
private static CvMat handleCMYKColorSpace(final BufferedImage bufferedImage, final boolean kchannel) {
final int w = bufferedImage.getWidth();
final int h = bufferedImage.getHeight();
final int numDataElements = bufferedImage.getRaster().getNumDataElements();
try(final CvMat pcmykMat = putDataBufferIntoMat(bufferedImage.getData().getDataBuffer(), h, w, numDataElements);
final CvMat cmykMat = new CvMat();
final Closer closer = new Closer();) {
final int bpc = bitsPerChannel(pcmykMat.depth());
final int maxValue;
if(bpc == 16)
maxValue = 65535;
else if(bpc == 8)
maxValue = 255;
else
throw new IllegalStateException("Can only handle CMYK images that are 8 or 16 bits per channel. Not " + bpc);
final int mask = maxValue;
final Mat maskMat = chain(new CvMat(), m -> closer.addMat(m), m -> makeScalarMat(mask, CvType.makeType(CvType.CV_32S, 1), m));
// System.out.println("Original CMYK:");
// dump(pcmykMat, 1, 51);
// do the final inversion first
Core.bitwise_not(pcmykMat, cmykMat);
// System.out.println("~CMYK:");
// dump(cmykMat, 1, 51);
if(kchannel) {
try(final Closer c2 = new Closer();) {
// R = 255 x (1-C) x (1-K)
// R = (255 - C') x (1-K)
// R = (255 - C') x (255 - K')/255
//
// Our image is C'Y'M'K'
//
// so:
//
// 255 R = ~C' x ~K'
//
// R = (~C' x ~K') / 255
final CvMat cmyk32 = chain(new CvMat(), c -> c2.add(c), c -> cmykMat.convertTo(c, CvType.CV_32S));
final List<Mat> channels = new ArrayList<>(4);
Core.split(cmyk32, channels);
channels.forEach(m -> c2.addMat(m));
final List<Mat> bgrL = new ArrayList<>(3);
bgrL.add(channels.get(2));
bgrL.add(channels.get(1));
bgrL.add(channels.get(0));
final Mat k = channels.get(3);
final List<Mat> bgrXk = bgrL.stream()
.map(c -> c2.addMat(c.mul(k)))
// .peek(c -> {
// System.out.println("CxK:");
// dump(c, 1, 51);
// })
.map(c -> chain(c, m -> Core.multiply(m, new Scalar(1.0D / maxValue), m)))
// .peek(c -> {
// System.out.println("CxK/255:");
// dump(c, 1, 51);
// })
.map(c -> chain(c, m -> Core.bitwise_and(m, maskMat, m)))
// .peek(c -> {
// System.out.println("CxK/255 masked:");
// dump(c, 1, 51);
// })
.map(c -> chain(c, m -> m.convertTo(c, CvType.makeType(pcmykMat.depth(), 1))))
// .peek(c -> {
// System.out.println("CxK/255 -> 8U:");
// dump(c, 1, 51);
// })
.collect(Collectors.toList());
if(numDataElements > 4) {
// we need to add back the extra channels
final List<Mat> chs = new ArrayList<>();
Core.split(pcmykMat, chs);
for(int i = 4; i < numDataElements; i++) {
bgrXk.add(chs.get(i));
}
}
final CvMat bgr = chain(new CvMat(), m -> Core.merge(bgrXk, m), m -> c2.add(m));
return bgr.returnMe();
}
} else {
Imgproc.cvtColor(cmykMat, cmykMat, Imgproc.COLOR_RGB2BGR);
return cmykMat;
}
}
}
private static boolean doFallback(final ComponentColorModel cm) {
final ColorSpace colorSpace = cm.getColorSpace();
if(ColorSpace.TYPE_CMYK == colorSpace.getType() || ColorSpace.TYPE_CMYK == colorSpace.getType())
return ICC_ColorSpace.class.isAssignableFrom(colorSpace.getClass());
return !(CvMatWithColorInformation.isLinearRGBspace(colorSpace)
|| colorSpace.getType() == ColorSpace.TYPE_GRAY
|| colorSpace.isCS_sRGB());
}
private static CvMat fallback(final BufferedImage bufferedImage, final ColorModel cm) {
final int w = bufferedImage.getWidth();
final int h = bufferedImage.getHeight();
final int numChannels = bufferedImage.getRaster().getNumDataElements();
final boolean isAlphaPremultiplied = cm.isAlphaPremultiplied();
try(final CvMat floatMat = new CvMat();
final CvMat rawMat = putDataBufferIntoMat(bufferedImage.getRaster().getDataBuffer(), h, w, numChannels);) {
final int bpc = bitsPerChannel(rawMat.depth());
boolean skipConvert = false;
double maxValue = 255.0D;
if(bpc == 16)
maxValue = 65535.0;
else if(bpc == 32 && rawMat.depth() == CvType.CV_32F)
skipConvert = true;
else if(bpc != 8)
throw new IllegalStateException("Can only handle 8 or 16 or 32 bit channels. Not " + bpc);
// normalize, convert to float first
if(skipConvert)
CvMat.reassign(floatMat, rawMat);
else
rawMat.convertTo(floatMat, CvType.makeType(CvType.CV_32F, numChannels), 1.0D / maxValue);
// floatMat is now normalized.
final ColorSpace colorSpace = cm.getColorSpace();
try(final CvMat mappedFloatMat = new CvMat(h, w, CvType.makeType(CvType.CV_32F, numChannels));) {
floatMat.rasterAp(flRas -> {
mappedFloatMat.rasterAp(mappedFloatRas -> {
flRas.forEach((FlatFloatPixelConsumer)(pos, pixel) -> {
final float[] result = colorSpace.toRGB(pixel);
// need bgr.
final float resZero = result[0];
result[0] = result[2];
result[2] = resZero;
if(isAlphaPremultiplied) {
final float alpha = pixel[3];
result[0] = result[0] / alpha;
result[1] = result[1] / alpha;
result[2] = result[2] / alpha;
}
if(numChannels > 3) {
final float[] aug = new float[numChannels];
System.arraycopy(result, 0, aug, 0, 3);
for(int i = 3; i < numChannels; i++)
aug[i] = pixel[i];
mappedFloatRas.set(pos, aug);
} else
mappedFloatRas.set(pos, result);
});
});
});
// okay. Now we need to scale out and convert
try(CvMat ret = new CvMat()) {
mappedFloatMat.convertTo(ret, CvType.makeType(rawMat.depth(), 3), maxValue);
return ret.returnMe();
}
}
}
}
private static CvMat handleCustomComponentColorModel(final BufferedImage bufferedImage, final ComponentColorModel cm) {
// If the bufferedImage isn't sRGB, or Gray, or LinearRGB then
// and it's an ICC color space, we should fallback to normalizing
// the image and then using ColorSpace.getRGB(float[] pixel) for
// each normalized pixel.
if(doFallback(cm))
return fallback(bufferedImage, cm);
// Check the ColorSpace type. If it's TYPE_CMYK then we can handle it
// without the fallback.
if(ColorSpace.TYPE_CMYK == cm.getColorSpace().getType())
return handleCMYKColorSpace(bufferedImage, true);
else if(ColorSpace.TYPE_CMY == cm.getColorSpace().getType())
return handleCMYKColorSpace(bufferedImage, false);
final int w = bufferedImage.getWidth();
final int h = bufferedImage.getHeight();
final int[] bitsPerChannel = ccmCeckBitsPerChannel(cm);
final int bpc = bitsPerChannel[0];
// Now, do we have an 8-bit RGB or a 16-bit (per-channel) RGB or 24 bit
if(bpc > 8 && bpc <= 16) {
final DataBuffer dataBuffer = bufferedImage.getRaster().getDataBuffer();
// make sure the DataBuffer type is a DataBufferUShort or DataBufferShort
if(dataBuffer instanceof DataBufferUShort) {
try(CvMat ret = dataBufferUShortToMat((DataBufferUShort)dataBuffer, h, w, bufferedImage.getRaster().getNumDataElements(),
cm.getColorSpace().getType() == ColorSpace.TYPE_RGB);) {
if(bpc != 16) {
final double maxPixelValue = 65536.0D;
final double scale = ((maxPixelValue - 1) / ((1 << bpc) - 1));
final double[] scalar = new double[bitsPerChannel.length];
for(int i = 0; i < scalar.length; i++)
scalar[i] = scale;
Core.multiply(ret, new Scalar(scalar), ret);
}
return ret.returnMe();
}
} else if(dataBuffer instanceof DataBufferShort) {
try(CvMat ret = dataBufferShortToMat((DataBufferShort)dataBuffer, h, w, bufferedImage.getRaster().getNumDataElements(),
cm.getColorSpace().getType() == ColorSpace.TYPE_RGB);) {
if(bpc != 16) {
final double maxPixelValue = 32768.0D;
final double scale = ((maxPixelValue - 1) / (bpc - 1));
final double[] scalar = new double[bitsPerChannel.length];
for(int i = 0; i < scalar.length; i++)
scalar[i] = scale;
Core.multiply(ret, new Scalar(scalar), ret);
}
return ret.returnMe();
}
} else
throw new IllegalArgumentException("For a 16-bit per channel RGB image the DataBuffer type should be a DataBufferUShort but it's a "
+ dataBuffer.getClass().getSimpleName());
} else if(bpc <= 8) {
final DataBuffer dataBuffer = bufferedImage.getRaster().getDataBuffer();
// make sure the DataBuffer type is a DataBufferByte
if(!(dataBuffer instanceof DataBufferByte))
throw new IllegalArgumentException("For a 8-bit per channel RGB image the DataBuffer type should be a DataBufferByte but it's a "
+ dataBuffer.getClass().getSimpleName());
try(CvMat ret = dataBufferByteToMat((DataBufferByte)dataBuffer, h, w, bufferedImage.getRaster().getNumDataElements(),
cm.getColorSpace().getType() == ColorSpace.TYPE_RGB);) {
if(bpc != 8) {
final double maxPixelValue = 255.0D;
final double scale = ((maxPixelValue - 1) / ((1 << bpc) - 1));
final double[] scalar = new double[bitsPerChannel.length];
for(int i = 0; i < scalar.length; i++)
scalar[i] = scale;
Core.multiply(ret, new Scalar(scalar), ret);
}
if(cm.isAlphaPremultiplied())
dePreMultiplyAlpha(ret, 255.0, CvType.CV_8U);
return ret.returnMe();
}
} else if(bpc > 16 && bpc <= 32) {
final DataBuffer dataBuffer = bufferedImage.getRaster().getDataBuffer();
// make sure the DataBuffer type is a DataBufferByte
if(!(dataBuffer instanceof DataBufferInt)) {
if(dataBuffer instanceof DataBufferFloat && bpc == 32) {
return dataBufferFloatToMat((DataBufferFloat)dataBuffer, h, w, bufferedImage.getRaster().getNumDataElements(),
cm.getColorSpace().getType() == ColorSpace.TYPE_RGB);
} else
throw new IllegalArgumentException("For a " + bpc + "-bit per channel GRAY image the DataBuffer type should be a DataBufferInt but it's a "
+ dataBuffer.getClass().getSimpleName());
}
return dataBufferIntToMat((DataBufferInt)dataBuffer, h, w, bufferedImage.getRaster().getNumDataElements(),
cm.getColorSpace().getType() == ColorSpace.TYPE_RGB);
} else
throw new IllegalArgumentException(
"Cannot handle an image with a ComponentColorModel that has " + bpc + " bits per channel.");
}
private static CvMat handleDirectColorModel(final BufferedImage bufferedImage, final DirectColorModel cm) {
final int w = bufferedImage.getWidth();
final int h = bufferedImage.getHeight();
final boolean hasAlpha = cm.hasAlpha();
try(final CvMat rawMat = putDataBufferIntoMat(bufferedImage.getRaster().getDataBuffer(), h, w, 1);
CvMat ret = transformDirect(rawMat, hasAlpha, cm.isAlphaPremultiplied(), cm.getMasks());) {
return ret.returnMe();
}
}
/**
* <p>
* This will right shift and mask. Let's take an example. Suppose you have an
* CV_32S (32-bit signed int) Mat with a ARGB 8-bit per channel layout. You can
* extract the 'R' channel using the following call:
* {@code bitwiseUnsignedRightShiftAndMask( src, dst, 16, 8 );}.
* </p>
* <p>
* A more complicated example might be: you have an 16-bit 565/RGB layout. Here
* the most-significant 5 bits are Red, The next 6 bits are the unsigned Green
* value, the least-significant 5 bits are the Blue value. You can extract the
* Red using the following call:
* {@code bitwiseUnsignedRightShiftAndMask( src, dst, 11, 5 );}.
* </p>
* <p>
* Extracting each channel would be:
* </p>
*
* <pre>
* <code>
* bitwiseUnsignedRightShiftAndMask(src, red, 11, 5);
* bitwiseUnsignedRightShiftAndMask(src, green, 5, 6);
* bitwiseUnsignedRightShiftAndMask(src, blue, 0, 5);
* </code>
* </pre>
* <p>
* The destination {@link CvMat} will be the same {@code type} as the source so,
* continuing with the above example, if you want the final image to be CV_8UC3 you
* need to {@code convert} and {@code merge} the separate channels.
* </p>
* <p>
* For example:
* </p>
*
* <pre>
* <code>
* red.convertTo(red, CvType.CV8U);
* green.convertTo(green, CvType.CV8U);
* blue.convertTo(blue, CvType.CV8U);
* Core.merge(Arrays.asList(blue, green, red), finalMat);
* </code>
* </pre>
*
* @param toShift is the source Mat to shift
* @param dst is the destination Mat
* @param shift is the number of bits to shift
* @param numBitsInField is the number of bits in the entire number being shifted.
*/
public static void bitwiseUnsignedRightShiftAndMask(final Mat toShift, final Mat dst, final int shift, final int numBitsInField) {
if(toShift.channels() > 1)
throw new IllegalArgumentException("Cannot bitwiseUnsignedRightShiftAndMask a Mat with more than one (" + toShift.channels() + ") channels.");
final int divisor = 1 << shift; // e.g. if shift is 8, divisor is 256
final int maskLsb = divisor - 1; // e.g. if shift is 8, mask = 255 = 0x000000ff
final int type = toShift.type();
final int bitsInSrcField = bitsPerChannel(type);
if(numBitsInField + shift > bitsInSrcField)
throw new IllegalArgumentException(
"The number of bits in the field being shifted (" + numBitsInField + ") along with the amount to shift (" + shift
+ ") is greater than the size of the field itself (" + bitsInSrcField + ")");
final int msbMask = (1 << numBitsInField) - 1;
try(final CvMat maskMat = new CvMat();
final CvMat msbMaskMat = new CvMat();) {
// Filter chop lower bits
if(shift > 0) {
// maskLsb is a mask that when ANDED with, KEEPS the LS bits. We need
// to CUT the LSB prior to shifting, so we negate maskLsb.
makeScalarMat(~maskLsb, type, maskMat);
// System.out.println("mask scalar mat:");
// dump(maskMat);
Core.bitwise_and(toShift, maskMat, dst); // mask out LSBs that are where we're going to shift into
// System.out.println("Mat & mask");
// dump(dst, 13, 13);
Core.multiply(dst, new Scalar(1.0D / divisor), dst); // shift all of values in the channel >> shift.
} else {
try(CvMat tmp = CvMat.deepCopy(toShift)) {
CvMat.reassign(dst, tmp);
}
}
makeScalarMat(msbMask, type, msbMaskMat);
// System.out.println("msbMask mat");
// dump(msbMaskMat);
Core.bitwise_and(dst, msbMaskMat, dst);
}
}
public static CvMatWithColorInformation img2CvMat(final BufferedImage bufferedImage) {
final ColorModel colorModel = bufferedImage.getColorModel();
try(Closer closer = new Closer()) {
if(colorModel instanceof DirectColorModel) {
return new CvMatWithColorInformation(closer.add(handleDirectColorModel(bufferedImage, (DirectColorModel)colorModel)), bufferedImage);
} else if(colorModel instanceof ComponentColorModel) {
return new CvMatWithColorInformation(closer.add(handleComponentColorModel(bufferedImage, (ComponentColorModel)colorModel)), bufferedImage);
} else if(colorModel instanceof IndexColorModel) {
return new CvMatWithColorInformation(closer.add(handleIndexColorModel(bufferedImage, colorModel)), bufferedImage);
} else if(colorModel.getClass().getName().equals("com.twelvemonkeys.imageio.color.DiscreteAlphaIndexColorModel")) {
return new CvMatWithColorInformation(closer.add(handleIndexColorModel(bufferedImage, colorModel)), bufferedImage);
} else {
LOGGER.trace("There's an unknown color model: {}. (img type: {}, color space: {})", colorModel.getClass().getName(), bufferedImage.getType(),
CvMatWithColorInformation.colorSpaceTypeName(colorModel.getColorSpace().getType()));
throw new IllegalArgumentException("Can't handle a BufferedImage with a " + colorModel.getClass().getSimpleName() + " color model.");
}
}
}
public static void print(final String prefix, final Mat im) {
System.out
.println(prefix + " { depth=(" + CvType.ELEM_SIZE(im.type()) + "(" + CvType.typeToString(im.type()) + "), " + im.depth() + "), channels="
+ im.channels() + " HxW=" + im.height() + "x" + im.width() + " }");
}
/**
* Find the point on the line defined by {@code perpRef} that's closest to the
* point {@code x}. Note, {@link PerpendicularLine} is poorly named.
*/
public static Point closest(final Point x, final PerpendicularLine perpRef) {
return closest(x, perpRef.x(), perpRef.y());
}
public static void drawCircle(final Point p, final Mat ti, final Color color) {
drawCircle(p, ti, color, 10);
}
public static void drawCircle(final int row, final int col, final Mat ti, final Color color) {
drawCircle(row, col, ti, color, 10);
}
public static void drawCircle(final Point p, final Mat ti, final Color color, final int radius) {
Imgproc.circle(ti, new org.opencv.core.Point(((int)(p.getCol() + 0.5)) - radius, ((int)(p.getRow() + 0.5)) - radius),
radius, new Scalar(color.getBlue(), color.getGreen(), color.getRed()));
}
public static void drawCircle(final int row, final int col, final Mat ti, final Color color, final int radius) {
Imgproc.circle(ti, new org.opencv.core.Point(((int)(col + 0.5)) - radius, ((int)(row + 0.5)) - radius),
radius, new Scalar(color.getBlue(), color.getGreen(), color.getRed()));
}
public static void drawCircle(final int row, final int col, final Graphics2D g, final Color color, final int radius) {
g.setColor(color);
g.drawOval(((int)(col + 0.5)) - radius,
((int)(row + 0.5)) - radius,
2 * radius, 2 * radius);
}
public static void drawCircle(final int row, final int col, final Graphics2D g, final Color color) {
drawCircle(row, col, g, color, 10);
}
public static void drawCircle(final Point p, final Graphics2D g, final Color color) {
drawCircle((int)p.getRow(), (int)p.getCol(), g, color, 10);
}
public static void drawBoundedPolarLine(final Point bound1, final Point bound2, final double r, final double c, final Mat ti, final Color color) {
drawLine(closest(bound1, c, r), closest(bound2, c, r), ti, color);
}
public static void drawLine(final Point p1, final Point p2, final Mat ti, final Color color) {
Imgproc.line(ti, new org.opencv.core.Point(p1.getCol(), p1.getRow()),
new org.opencv.core.Point(p2.getCol(), p2.getRow()),
new Scalar(color.getBlue(), color.getGreen(), color.getRed()));
}
public static void drawLine(final Point p1, final Point p2, final Graphics2D g, final Color color) {
g.setColor(color);
g.drawLine((int)(p1.getCol() + 0.5), (int)(p1.getRow() + 0.5), (int)(p2.getCol() + 0.5), (int)(p2.getRow() + 0.5));
}
public static void drawPolarLine(final double r, final double c, final Mat ti, final Color color) {
drawPolarLine(r, c, ti, color, 0, 0, ti.rows() - 1, ti.cols() - 1);
}
public static void drawPolarLine(final double r, final double c, final Mat ti, final Color color,
final int boundingr1, final int boundingc1, final int boundingr2, final int boundingc2) {
drawPolarLine(r, c, ti, color, boundingr1, boundingc1, boundingr2, boundingc2, 0, 0);
}
public static void drawPolarLine(final double r, final double c, final Mat ti, final Color color,
int boundingr1, int boundingc1, int boundingr2, int boundingc2,
final int translater, final int translatec) {
int tmpd;
if(boundingr1 > boundingr2) {
tmpd = boundingr1;
boundingr1 = boundingr2;
boundingr2 = tmpd;
}
if(boundingc1 > boundingc2) {
tmpd = boundingc1;
boundingc1 = boundingc2;
boundingc2 = tmpd;
}
// a polar line represented by r,c is a perpendicular to
// the line from the origin to the point r,c. The line
// from the origin to this point in rad,theta is given
// by:
//
// rad = sqrt(r^2 + c^2)
// theta = tan^-1(r/c)
// (where theta is measured from the top of the
// image DOWN to the point r,c)
//
// anyway - the line is represented by:
// x cos(theta) + y sin (theta) = r
final double rad = Math.sqrt((r * r) + (c * c));
// we need to find the endpoints of the line:
int r1, c1, r2, c2;
// lets remove the simple possiblities
if(c == 0.0) {
r1 = r2 = (int)(rad + 0.5);
c1 = boundingc1;
c2 = boundingc2;
} else if(r == 0.0) {
c1 = c2 = (int)(rad + 0.5);
r1 = boundingr1;
r2 = boundingr2;
} else {
final double sintheta = r / rad;
final double costheta = c / rad;
// x cos th + y sin th = r =>
// x (xc/r) + y (yc/r) = r (by definition of sin and cos) =>
// x xc + y yc = r^2 =>
// X.Xc = r^2 - (no duh!)
// find the points at the boundaries
// where does the line intersect the left/right boundary
// bc costh + ir sinth = r =>
//
// r - bc costh
// ir = -------------
// sinth
//
final double leftIntersetingRow = (rad - ((boundingc1) * costheta)) / sintheta;
final double rightIntersetingRow = (rad - ((boundingc2) * costheta)) / sintheta;
// where does the line intersect the top/bottom boundary
// ic costh + br sinth = r =>
//
// r - br sinth
// ic = -------------
// costh
//
final double topIntersectingCol = (rad - ((boundingr1) * sintheta)) / costheta;
final double botIntersectingCol = (rad - ((boundingr2) * sintheta)) / costheta;
// now, which pair works the best
c1 = r1 = -1;
if(leftIntersetingRow >= boundingr1 && leftIntersetingRow <= boundingr2) {
c1 = boundingc1;
r1 = (int)(leftIntersetingRow + 0.5);
} else if(topIntersectingCol >= boundingc1 && topIntersectingCol <= boundingc2) {
c1 = boundingr1;
r1 = (int)(topIntersectingCol + 0.5);
} else if(rightIntersetingRow >= boundingr1 && rightIntersetingRow <= boundingr2) {
c1 = boundingc2;
r1 = (int)(rightIntersetingRow + 0.5);
} else if(botIntersectingCol >= boundingc1 && botIntersectingCol <= boundingc2) {
c1 = boundingr2;
r1 = (int)(botIntersectingCol + 0.5);
}
if(c1 == -1 && r1 == -1) // no part of the line intersects the box
// {
// System.out.println( " line " + r + "," + c + " does not intesect " +
// boundingr1 + "," + boundingc1 + "," + boundingr2 + "," + boundingc2);
return;
// }
// now search in the reverse direction for the other point
c2 = r2 = -1;
if(botIntersectingCol >= boundingc1 && botIntersectingCol <= boundingc2) {
c2 = boundingr2;
r2 = (int)(botIntersectingCol + 0.5);
} else if(rightIntersetingRow >= boundingr1 && rightIntersetingRow <= boundingr2) {
c2 = boundingc2;
r2 = (int)(rightIntersetingRow + 0.5);
} else if(topIntersectingCol >= boundingc1 && topIntersectingCol <= boundingc2) {
c2 = boundingr1;
r2 = (int)(topIntersectingCol + 0.5);
} else if(leftIntersetingRow >= boundingr1 && leftIntersetingRow <= boundingr2) {
c2 = boundingc1;
r2 = (int)(leftIntersetingRow + 0.5);
}
// now, the two points should not be the same ... but anyway
}
Imgproc.line(ti, new org.opencv.core.Point(c1 + translatec, r1 + translater),
new org.opencv.core.Point(c2 + translatec, r2 + translater),
new Scalar(color.getBlue(), color.getGreen(), color.getRed()));
}
/**
* This method will overlay the {@code overlay} image onto the {@code original}
* image by having the original image show through the overlay, everywhere the
* overlay has a pixel value of zero (or zero in all channels).
*/
public static void overlay(final CvMat original, final CvMat dst, final CvMat overlay) {
try(final CvMat gray = new CvMat();
final CvMat invMask = new CvMat();
final CvMat maskedOrig = new CvMat()) {
Imgproc.cvtColor(overlay, gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(gray, invMask, 1, 255, Imgproc.THRESH_BINARY_INV);
Core.bitwise_and(original, original, maskedOrig, invMask);
Core.add(original, overlay, dst);
}
}
@SuppressWarnings("unchecked")
public static <T> T toArray(final Mat mat, final Class<T> clazz) {
final int rows = mat.rows();
if(rows == 0) {
Class<?> component = clazz;
while(component.isArray())
component = component.getComponentType();
return (T)Array.newInstance(component, 0, 0, 0);
}
final int type = mat.type();
final int channels = CvType.channels(type);
final int cols = mat.cols();
return CvMat.rasterOp(mat, raster -> {
final T ret;
switch(CvType.depth(type)) {
case CvType.CV_8S:
case CvType.CV_8U:
ret = (T)Array.newInstance(byte.class, rows, cols, channels);
raster.forEach((BytePixelConsumer)(r, c, p) -> System.arraycopy(p, 0, ((byte[][][])ret)[r][c], 0, channels));
break;
case CvType.CV_16S:
case CvType.CV_16U:
ret = (T)Array.newInstance(short.class, rows, cols, channels);
raster.forEach((ShortPixelConsumer)(r, c, p) -> System.arraycopy(p, 0, ((short[][][])ret)[r][c], 0, channels));
break;
case CvType.CV_32S:
ret = (T)Array.newInstance(int.class, rows, cols, channels);
raster.forEach((IntPixelConsumer)(r, c, p) -> System.arraycopy(p, 0, ((int[][][])ret)[r][c], 0, channels));
break;
case CvType.CV_32F:
ret = (T)Array.newInstance(float.class, rows, cols, channels);
raster.forEach((FloatPixelConsumer)(r, c, p) -> System.arraycopy(p, 0, ((float[][][])ret)[r][c], 0, channels));
break;
case CvType.CV_64F:
ret = (T)Array.newInstance(double.class, rows, cols, channels);
raster.forEach((FloatPixelConsumer)(r, c, p) -> System.arraycopy(p, 0, ((double[][][])ret)[r][c], 0, channels));
break;
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
return ret;
});
}
public static double[] toDoubleArray(final Mat mat) {
if(mat.type() != CvType.CV_64FC1)
throw new IllegalArgumentException("Cannot convert mat " + mat + " to a double[] given the type " + CvType.typeToString(mat.type())
+ " since it must be " + CvType.typeToString(CvType.CV_64FC1));
// we're going to flatten it.
final int r = mat.rows();
final int c = mat.cols();
final int len = r * c;
final double[] ret = new double[len];
try(final CvMat toCvrt = CvMat.move(mat.reshape(0, len));) {
for(int i = 0; i < len; i++)
ret[i] = toCvrt.get(i, 0)[0];
}
return ret;
}
public static double[][] to2dDoubleArray(final Mat mat) {
// if(mat.type() != CvType.CV_64FC1)
// throw new IllegalArgumentException("Cannot convert mat " + mat + " to a double[] given the type " + CvType.typeToString(mat.type())
// + " since it must be " + CvType.typeToString(CvType.CV_64FC1));
// we're going to flatten it.
final int r = mat.rows();
final int c = mat.cols();
final double[][] ret = new double[r][c];
for(int i = 0; i < r; i++)
for(int j = 0; j < c; j++)
ret[i][j] = mat.get(i, j)[0];
return ret;
}
public static CvMat toMat(final double[] a, final boolean row) {
final int len = a.length;
try(final CvMat ret = new CvMat(row ? 1 : len, row ? len : 1, CvType.CV_64FC1);) {
ret.bulkAccess(bb -> {
final DoubleBuffer buffer = bb.asDoubleBuffer();
buffer.put(a);
});
return ret.returnMe();
}
}
public static CvMat toMat(final double[][] a) {
final int rows = a.length;
final int cols = a[0].length;
try(final CvMat ret = new CvMat(rows, cols, CvType.CV_64FC1);) {
ret.bulkAccess(bb -> {
final DoubleBuffer buffer = bb.asDoubleBuffer();
for(int r = 0; r < rows; r++)
buffer.put(a[r]);
});
return ret.returnMe();
}
}
public static CvMat toMat(final float[][] a) {
final int rows = a.length;
final int cols = a[0].length;
try(final CvMat ret = new CvMat(rows, cols, CvType.CV_32FC1);) {
ret.bulkAccess(bb -> {
final FloatBuffer buffer = bb.asFloatBuffer();
for(int r = 0; r < rows; r++)
buffer.put(a[r]);
});
return ret.returnMe();
}
}
public static CvMat pointsToColumns2D(final Mat undistoredPoint) {
final MatOfPoint2f matOfPoints = new MatOfPoint2f(undistoredPoint);
try(final QuietCloseable destroyer = () -> CvMat.closeRawMat(matOfPoints);) {
final double[][] points = matOfPoints.toList().stream()
.map(p -> new double[] {p.x,p.y})
.toArray(double[][]::new);
try(CvMat pointsAsMat = Utils.toMat(points);) {
return pointsAsMat.t();
}
}
}
public static CvMat pointsToColumns3D(final Mat undistoredPoint) {
final MatOfPoint3f matOfPoints = new MatOfPoint3f(undistoredPoint);
try(final QuietCloseable destroyer = () -> CvMat.move(matOfPoints).close();) {
final double[][] points = matOfPoints.toList().stream()
.map(p -> new double[] {p.x,p.y,p.z})
.toArray(double[][]::new);
try(CvMat pointsAsMat = Utils.toMat(points);) {
return pointsAsMat.t();
}
}
}
public static Size scaleDownOrNothing(final Mat mat, final Size newSize) {
return scaleDownOrNothing(mat.size(), newSize);
}
public static Size scaleDownOrNothing(final Size originalMatSize, final Size newSize) {
// calculate the appropriate resize
final double fh = newSize.height / originalMatSize.height;
final double fw = newSize.width / originalMatSize.width;
final double scale = fw < fh ? fw : fh;
return (scale >= 1.0) ? new Size(originalMatSize.width, originalMatSize.height)
: new Size(Math.round(originalMatSize.width * scale), Math.round(originalMatSize.height * scale));
}
public static Size scaleWhilePreservingAspectRatio(final Mat mat, final Size maxSize, final MutableDouble scaleOut) {
return scaleWhilePreservingAspectRatio(mat.size(), maxSize, true, scaleOut);
}
public static Size scaleWhilePreservingAspectRatio(final Mat mat, final Size maxSize) {
return scaleWhilePreservingAspectRatio(mat.size(), maxSize, true, null);
}
public static Size scaleWhilePreservingAspectRatio(final Mat mat, final Size maxSize, final boolean round) {
return scaleWhilePreservingAspectRatio(mat.size(), maxSize, round, null);
}
public static double scaleFactorWhilePreservingAspectRatio(final Mat mat, final Size maxSize) {
return scaleFactorWhilePreservingAspectRatio(mat.size(), maxSize);
}
/**
*
* @param originalMatSize The size of the original image
* @param maxSize The maximum desired size of the new image
* @return The size of the new image, which matches the height or width of {@param newSize} such that the image does not exceed those dimensions while
* preserving the size.
*/
public static Size scaleWhilePreservingAspectRatio(final Size originalMatSize, final Size maxSize) {
// calculate the appropriate resize
return scaleWhilePreservingAspectRatio(originalMatSize, maxSize, true, null);
}
public static Size scaleWhilePreservingAspectRatio(final Size originalMatSize, final Size maxSize, final boolean round, final MutableDouble scaleOut) {
// calculate the appropriate resize
final double scale = scaleFactorWhilePreservingAspectRatio(originalMatSize, maxSize);
if(scaleOut != null)
scaleOut.val = scale;
return round ? new Size(Math.round(originalMatSize.width * scale), Math.round(originalMatSize.height * scale))
: new Size((long)(originalMatSize.width * scale), (long)(originalMatSize.height * scale));
}
public static double scaleFactorWhilePreservingAspectRatio(final Size originalMatSize, final Size maxSize) {
// calculate the appropriate resize
final double fh = maxSize.height / originalMatSize.height;
final double fw = maxSize.width / originalMatSize.width;
return Math.min(fw, fh);
}
/**
* Copy the entire image to a primitive array of the appropriate type.
*/
@SuppressWarnings("unchecked")
public static <T> T copyToPrimitiveArray(final Mat m) {
final int rows = m.rows();
final int cols = m.cols();
final int type = m.type();
final int channels = CvType.channels(type);
final int depth = CvType.depth(type);
switch(depth) {
case CvType.CV_8S:
case CvType.CV_8U: {
final byte[] data = new byte[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
case CvType.CV_16U:
case CvType.CV_16S: {
final short[] data = new short[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
case CvType.CV_32S: {
final int[] data = new int[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
case CvType.CV_32F: {
final float[] data = new float[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
case CvType.CV_64F: {
final double[] data = new double[rows * cols * channels];
m.get(0, 0, data);
return (T)data;
}
default:
throw new IllegalArgumentException("Can't handle CvType with value " + CvType.typeToString(type));
}
}
private static Point closest(final Point x, final double perpRefX, final double perpRefY) {
// Here we use the description for the perpendicularDistance.
// if we translate X0 to the origin then Xi' (defined as
// Xi translated by X0) will be at |P| - (P.X0)/|P| (which
// is the signed magnitude of the X0 - Xi where the sign will
// be positive if X0 X polar(P) is positive and negative
// otherwise (that is, if X0 is on the "lower" side of the polar
// line described by P)) along P itself. So:
//
// Xi' = (|P| - (P.X0)/|P|) Pu = (|P| - (P.X0)/|P|) P/|P|
// = (1 - (P.X0)/|P|^2) P (where Pu is the unit vector in the P direction)
//
// then we can translate it back by X0 so that gives:
//
// Xi = (1 - (P.X0)/|P|^2) P + X0 = c P + X0
// where c = (1 - (P.X0)/|P|^2)
final double Pmagsq = (perpRefX * perpRefX) + (perpRefY * perpRefY);
final double PdotX0 = (x.y() * perpRefY) + (x.x() * perpRefX);
final double c = (1.0 - (PdotX0 / Pmagsq));
return new SimplePoint((c * perpRefY) + x.y(), (c * perpRefX) + x.x());
}
// All DirectColorModel values are stored RGBA. We want them reorganized a BGRA
private static int[] bgraOrderDcm = {2,1,0,3};
private static void dePreMultiplyAlpha(final Mat ret, final double maxValue, final int componentDepth) {
try(Closer c = new Closer();) {
final List<Mat> channels = new ArrayList<>(4);
Core.split(ret, channels);
channels.forEach(ch -> c.addMat(ch));
dePreMultiplyAlpha(channels, 255.0, CvType.CV_8U);
Core.merge(channels, ret);
}
}
private static void dePreMultiplyAlpha(final List<Mat> channels, final double maxValue, final int componentDepth) {
final Mat alpha = channels.get(3);
// dump(alpha, 64, 64);
for(int ch = 0; ch < 3; ch++) {
final Mat cur = channels.get(ch);
Core.divide(cur, alpha, cur, maxValue);
cur.convertTo(cur, CvType.makeType(componentDepth, 1));
}
alpha.convertTo(alpha, CvType.makeType(componentDepth, 1));
}
// The DirectColorModel mask array is returned as R,G,B,A. This method expects
// it in that order.
private static CvMat transformDirect(final CvMat rawMat, final boolean hasAlpha, final boolean isAlphaPremultiplied, final int[] rgbaMasks) {
if(LOGGER.isTraceEnabled())
LOGGER.trace("transformDirect: {} and has alpha {}", rawMat, hasAlpha);
final int numChannels = rgbaMasks.length;
// According to the docs on DirectColorModel the type MUST be TYPE_RGB which
// means 3 channels or 4 if there's an alpha.
final int expectedNumberOfChannels = hasAlpha ? 4 : 3;
if(expectedNumberOfChannels != numChannels)
throw new IllegalArgumentException("The DirectColorModel doesn't seem to contain either 3 or 4 channels. It has " + numChannels);
// Fetch the masks and bitsPerChannel in the OCV BGRA order.
final int[] bgraMasks = new int[hasAlpha ? 4 : 3];
final int[] bitsPerChannel = new int[hasAlpha ? 4 : 3];
for(int rgbch = 0; rgbch < bgraMasks.length; rgbch++) {
final int mask = rgbaMasks[rgbch];
bgraMasks[bgraOrderDcm[rgbch]] = mask;
bitsPerChannel[bgraOrderDcm[rgbch]] = Integer.bitCount(mask);
}
final int[] shifts = determineShifts(bgraMasks);
// check if any channel has a bits-per-channel > 16
if(Arrays.stream(bitsPerChannel)
.filter(v -> v > 16)
.findAny()
.isPresent())
throw new IllegalArgumentException("The image with the DirectColorModel has a channel with more than 16 bits " + bitsPerChannel);
double maxValue = 255.0D;
int componentDepth = CV_8U;
for(int i = 0; i < bitsPerChannel.length; i++) {
final int n = bitsPerChannel[i];
if(n > 8) {
componentDepth = CV_16U;
maxValue = 65535.0D;
break;
}
}
// System.out.println("Raw Mat");
// dump(rawMat, 5, 5);
try(final CvMat remergedMat = new CvMat();
Closer closer = new Closer()) {
// we're going to separate the channels into separate Mat's by masking
final List<Mat> channels = new ArrayList<>(numChannels);
for(int ch = 0; ch < numChannels; ch++) {
try(CvMat tmpCurChannel = new CvMat();) {
bitwiseUnsignedRightShiftAndMask(rawMat, tmpCurChannel, shifts[ch], bitsPerChannel[ch]);
// if the bits don't take up the entire channel then we need to scale them.
// for example, if we have a 4/4/4 image we need to scale the results to 8 bits.
final CvMat curChannel = closer.add(new CvMat());
tmpCurChannel.convertTo(curChannel, CvType.makeType(componentDepth, 1));
// System.out.println("Channel " + ch + " pre scaled:");
// dump(curChannel, 3, 27);
// This will scale the maximum value given the field to the maximum value
// of the final field.
final double scale = maxValue / ((1 << bitsPerChannel[ch]) - 1);
Core.multiply(curChannel, new Scalar(scale), curChannel);
// System.out.println("Channel " + ch + " scaled by " + scale);
// dump(curChannel, 5, 5);
channels.add(curChannel);
}
}
if(isAlphaPremultiplied)
dePreMultiplyAlpha(channels, maxValue, componentDepth);
// now merge the channels
Core.merge(channels, remergedMat);
// System.out.println("Remerged Mat: ");
// dump(remergedMat, 5, 5);
return remergedMat.returnMe();
}
}
private static Field getStaticField(final String fieldName, final Class<?>... classes) {
final Field field = Arrays.stream(classes)
.map(c -> {
try {
return c.getDeclaredField(fieldName);
} catch(final NoSuchFieldException nsfe) {
return null;
}
})
.filter(f -> f != null)
.findFirst()
.orElse(null);
if(field == null)
throw new IllegalStateException("The version of OpenCV defined as a dependency doesn't seem to have " + fieldName
+ " defined in any of these classes: " + Arrays.toString(classes));
return field;
}
private static Method getStaticMethod(final String methodName, final Class<?>[] parameters, final Class<?>... classes) {
final Method method = Arrays.stream(classes)
.map(c -> {
try {
return c.getDeclaredMethod(methodName, parameters);
} catch(final NoSuchMethodException nsfe) {
return null;
}
})
.filter(f -> f != null)
.findFirst()
.orElse(null);
if(method == null)
throw new IllegalStateException("The version of OpenCV defined as a dependency doesn't seem to have the method " + methodName
+ " defined in any of these classes: " + Arrays.toString(classes));
return method;
}
private static CvMat abgrDataBufferByteToMat(final DataBufferByte bb, final int h, final int w, final boolean hasAlpha) {
try(final CvMat retMat = new CvMat(h, w, hasAlpha ? CvType.CV_8UC4 : CvType.CV_8UC3);) {
final byte[] inpixels = bb.getData();
retMat.put(0, 0, inpixels);
if(!hasAlpha) { // indicates a pixel compatible format since the only option is TYPE_3BYTE_BGR
return retMat.returnMe();
} else { // then it's ABGR -> BGRA
try(final CvMat reshaped = CvMat.move(retMat.reshape(1, h * w));
// type to 32F so we can multiple it by the matrix. It would be nice of there
// was an integer 'gemm'
// call on Imgproc
final CvMat typed = Functional.chain(new CvMat(), m -> reshaped.convertTo(m, CvType.CV_32F));
// color transform which just reorganizes the pixels.
final CvMat xformed = typed.mm(abgr2bgra);
// reshape it back to a 4 channel image
final CvMat xformedAndShaped = CvMat.move(xformed.reshape(4, h));
// convert the type back to CV_8UC4
final CvMat it = Functional.chain(new CvMat(), m -> xformedAndShaped.convertTo(m, CvType.depth(retMat.type())));) {
return it.returnMe();
}
}
}
}
private static CvMat putDataBufferIntoMat(final DataBuffer bb, final int h, final int w, final int numChannels) {
if(bb instanceof DataBufferByte)
return dataBufferByteToMat((DataBufferByte)bb, h, w, numChannels, false);
else if(bb instanceof DataBufferUShort)
return dataBufferUShortToMat((DataBufferUShort)bb, h, w, numChannels, false);
else if(bb instanceof DataBufferInt)
return dataBufferIntToMat((DataBufferInt)bb, h, w, numChannels, false);
else if(bb instanceof DataBufferFloat)
return dataBufferFloatToMat((DataBufferFloat)bb, h, w, numChannels, false);
else
throw new IllegalArgumentException("Can't handle a DataBuffer of type " + bb.getClass().getSimpleName());
}
private final static int[] rgb2bgr = {2,1,0};
private final static int[] argb2bgra = {3,2,1,0};
private static CvMat dataBufferFloatToMat(final DataBufferFloat bb, final int h, final int w, final int numChannels, final boolean rgbType) {
final float[][] bankdata = bb.getBankData();
if(bankdata.length == 1) {
try(final CvMat mat = new CvMat(h, w, CvType.makeType(CvType.CV_32F, numChannels));) {
final float[] inpixels = bb.getData();
mat.put(0, 0, inpixels);
if(rgbType) {
if(numChannels == 3)
Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGB2BGR);
if(numChannels == 4)
Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGBA2BGRA);
}
return mat.returnMe();
}
} else {
// bank data must correspond to the channels.
final int[] lookup = rgbType ? (numChannels == 4 ? argb2bgra : (numChannels == 3 ? rgb2bgr : null)) : null;
if(numChannels != bankdata.length)
throw new IllegalStateException("Can't handle a BufferedImage where the data is in banks but it's not 1 per channel. The number of channels is "
+ numChannels + " while the number of banks is " + bankdata.length);
try(Closer closer = new Closer();
CvMat ret = new CvMat();) {
final List<Mat> channels = new ArrayList<>(numChannels);
for(int ch = 0; ch < numChannels; ch++) {
final CvMat cur = closer.add(new CvMat(h, w, CvType.CV_32FC1));
if(lookup == null)
cur.put(0, 0, bankdata[ch]);
else
cur.put(0, 0, bankdata[lookup[ch]]);
channels.add(cur);
}
Core.merge(channels, ret);
return ret.returnMe();
}
}
}
private static CvMat dataBufferIntToMat(final DataBufferInt bb, final int h, final int w, final int numChannels, final boolean rgbType) {
final int[][] bankdata = bb.getBankData();
if(bankdata.length == 1) {
try(final Closer closer = new Closer();
final CvMat mat = new CvMat(h, w, CvType.makeType(CvType.CV_32S, numChannels));) {
final int[] inpixels = bb.getData();
mat.put(0, 0, inpixels);
if(rgbType) {
final List<Mat> channels = new ArrayList<>(numChannels);
Core.split(mat, channels);
channels.forEach(m -> closer.addMat(m));
final List<Mat> bgrChannels = new ArrayList<>();
bgrChannels.add(channels.get(2));
bgrChannels.add(channels.get(1));
bgrChannels.add(channels.get(0));
if(numChannels > 3) {
for(int i = 3; i <= numChannels; i++)
bgrChannels.add(channels.get(i));
}
Core.merge(bgrChannels, mat);
}
return mat.returnMe();
}
} else {
// bank data must correspond to the channels.
final int[] lookup = rgbType ? (numChannels == 4 ? argb2bgra : (numChannels == 3 ? rgb2bgr : null)) : null;
if(numChannels != bankdata.length)
throw new IllegalStateException("Can't handle a BufferedImage where the data is in banks but it's not 1 per channel. The number of channels is "
+ numChannels + " while the number of banks is " + bankdata.length);
try(Closer closer = new Closer();
CvMat ret = new CvMat();) {
final List<Mat> channels = new ArrayList<>(numChannels);
for(int ch = 0; ch < numChannels; ch++) {
final CvMat cur = closer.add(new CvMat(h, w, CvType.CV_32SC1));
if(lookup == null)
cur.put(0, 0, bankdata[ch]);
else
cur.put(0, 0, bankdata[lookup[ch]]);
channels.add(cur);
}
Core.merge(channels, ret);
return ret.returnMe();
}
}
}
private static CvMat dataBufferUShortToMat(final DataBufferUShort bb, final int h, final int w, final int numChannels, final boolean rgbType) {
final short[][] bankdata = bb.getBankData();
return doDataBufferUShortToMat(bankdata, h, w, numChannels, rgbType);
}
private static CvMat doDataBufferUShortToMat(final short[][] bankdata, final int h, final int w, final int numChannels, final boolean rgbType) {
if(bankdata.length == 1) {
try(final CvMat mat = new CvMat(h, w, CvType.makeType(CvType.CV_16U, numChannels));) {
final short[] inpixels = bankdata[0];
mat.put(0, 0, inpixels);
if(rgbType) {
if(numChannels == 3)
Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGB2BGR);
if(numChannels == 4)
Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGBA2BGRA);
if(numChannels > 4) {
// ugh!
try(Closer c = new Closer();) {
final List<Mat> channels = new ArrayList<>();
Core.split(mat, channels);
channels.forEach(ch -> c.addMat(ch));
final ArrayList<Mat> newChannels = new ArrayList<>(channels);
newChannels.set(0, channels.get(2));
newChannels.set(2, channels.get(0));
Core.merge(newChannels, mat);
}
}
}
return mat.returnMe();
}
} else {
// bank data must correspond to the channels.
final int[] lookup = rgbType ? (numChannels == 4 ? argb2bgra : (numChannels == 3 ? rgb2bgr : null)) : null;
if(numChannels != bankdata.length)
throw new IllegalStateException("Can't handle a BufferedImage where the data is in banks but it's not 1 per channel. The number of channels is "
+ numChannels + " while the number of banks is " + bankdata.length);
try(Closer closer = new Closer();
CvMat ret = new CvMat();) {
final List<Mat> channels = new ArrayList<>(numChannels);
for(int ch = 0; ch < numChannels; ch++) {
final CvMat cur = closer.add(new CvMat(h, w, CvType.CV_16UC1));
if(lookup == null)
cur.put(0, 0, bankdata[ch]);
else
cur.put(0, 0, bankdata[lookup[ch]]);
channels.add(cur);
}
Core.merge(channels, ret);
return ret.returnMe();
}
}
}
private static CvMat dataBufferShortToMat(final DataBufferShort bb, final int h, final int w, final int numChannels, final boolean rgbType) {
try(CvMat ret = doDataBufferUShortToMat(bb.getBankData(), h, w, numChannels, rgbType);
CvMat mask = new CvMat();) {
makeScalarMat(Short.MIN_VALUE, CvType.CV_16UC1, mask);
Core.bitwise_xor(ret, mask, ret);
return ret.returnMe();
}
}
private static CvMat dataBufferByteToMat(final DataBufferByte bb, final int h, final int w, final int numChannels, final boolean rgbType) {
final byte[][] bankdata = bb.getBankData();
if(bankdata.length == 1) {
try(final CvMat mat = new CvMat(h, w, CvType.makeType(CvType.CV_8U, numChannels));) {
final byte[] inpixels = bankdata[0];
mat.put(0, 0, inpixels);
if(rgbType) {
if(numChannels == 3)
Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGB2BGR);
if(numChannels == 4)
Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGBA2BGRA);
if(numChannels > 4) {
// ugh!
try(Closer c = new Closer();) {
final List<Mat> channels = new ArrayList<>();
Core.split(mat, channels);
channels.forEach(ch -> c.addMat(ch));
final ArrayList<Mat> newChannels = new ArrayList<>(channels);
newChannels.set(0, channels.get(2));
newChannels.set(2, channels.get(0));
Core.merge(newChannels, mat);
}
}
}
return mat.returnMe();
}
} else {
// bank data must correspond to the channels.
final int[] lookup = rgbType ? (numChannels == 4 ? argb2bgra : (numChannels == 3 ? rgb2bgr : null)) : null;
if(numChannels != bankdata.length)
throw new IllegalStateException("Can't handle a BufferedImage where the data is in banks but it's not 1 per channel. The number of channels is "
+ numChannels + " while the number of banks is " + bankdata.length);
try(Closer closer = new Closer();
CvMat ret = new CvMat();) {
final List<Mat> channels = new ArrayList<>(numChannels);
for(int ch = 0; ch < numChannels; ch++) {
final CvMat cur = closer.add(new CvMat(h, w, CvType.CV_8UC1));
if(lookup == null)
cur.put(0, 0, bankdata[ch]);
else
cur.put(0, 0, bankdata[lookup[ch]]);
channels.add(cur);
}
Core.merge(channels, ret);
return ret.returnMe();
}
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/VideoFrame.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.opencv.imgproc.Imgproc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.QuietCloseable;
public class VideoFrame extends CvMat {
private static final Logger LOGGER = LoggerFactory.getLogger(VideoFrame.class);
public long decodeTimeMillis;
private final Pool pool;
private boolean isInPool = false;
private RuntimeException rtpStackTrace = null;
private boolean skipCloseOnceForReturn = false;
private long frameNumber;
public final boolean isRgb;
public VideoFrame(final long nativeObj, final long decodeTimeMillis, final long frameNumber, final boolean isRgb) {
super(nativeObj);
this.pool = null;
this.decodeTimeMillis = decodeTimeMillis;
this.frameNumber = frameNumber;
this.isRgb = isRgb;
}
public VideoFrame(final long decodeTimeMillis, final long frameNumber, final boolean isRgb) {
super();
this.pool = null;
this.decodeTimeMillis = decodeTimeMillis;
this.frameNumber = frameNumber;
this.isRgb = isRgb;
}
private VideoFrame(final Pool pool, final int h, final int w, final int type, final long decodeTimeMillis, final long frameNumber, final boolean isRgb) {
super(h, w, type);
this.pool = pool;
this.decodeTimeMillis = decodeTimeMillis;
this.frameNumber = frameNumber;
this.isRgb = isRgb;
}
public static VideoFrame create(final int rows, final int cols, final int type, final long pointer, final long decodeTimeMillis, final long frameNumber,
final boolean isRgb) {
final long nativeObj = ImageAPI.pilecv4j_image_CvRaster_makeMatFromRawDataReference(rows, cols, type, pointer);
if(nativeObj == 0)
throw new NullPointerException("Cannot create a CvMat from a null pointer data buffer.");
return VideoFrame.wrapNativeVideoFrame(nativeObj, decodeTimeMillis, frameNumber, isRgb);
}
public VideoFrame rgb(final boolean garanteeDeepCopy) {
if(!isRgb) {
if(LOGGER.isTraceEnabled())
LOGGER.trace("Converting {} from BGR to RGB. {}", VideoFrame.class.getSimpleName(), toString());
return swapBgrRgb();
}
if(LOGGER.isTraceEnabled())
LOGGER.trace("Returning {} in RGB as-is. {}", VideoFrame.class.getSimpleName(), toString());
return garanteeDeepCopy ? this.deepCopy() : this.shallowCopy();
}
public VideoFrame bgr(final boolean garanteeDeepCopy) {
if(isRgb) {
if(LOGGER.isTraceEnabled())
LOGGER.trace("Converting {} from RGB to BGR. {}", VideoFrame.class.getSimpleName(), toString());
return swapBgrRgb();
}
if(LOGGER.isTraceEnabled())
LOGGER.trace("Returning {} in BGR as-is. {}", VideoFrame.class.getSimpleName(), toString());
return garanteeDeepCopy ? this.deepCopy() : this.shallowCopy();
}
private VideoFrame swapBgrRgb() {
if(channels() != 3) {
throw new IllegalArgumentException("Can only convert a 3 channel image from RGB to BGR or vice versa.");
}
try(final VideoFrame swapped = new VideoFrame(decodeTimeMillis, frameNumber, !isRgb)) {
Imgproc.cvtColor(this, swapped, isRgb ? Imgproc.COLOR_RGB2BGR : Imgproc.COLOR_BGR2RGB);
return swapped.returnMe();
}
}
private VideoFrame leavingPool(final long decodeTimeMillis, final long frameNumber) {
this.decodeTimeMillis = decodeTimeMillis;
this.frameNumber = frameNumber;
if(TRACK_MEMORY_LEAKS) {
rtpStackTrace = null;
}
isInPool = false;
return this;
}
public static class Pool implements AutoCloseable {
public final int h;
public final int w;
public final boolean isRgb;
public final int type;
private final AtomicReference<ConcurrentLinkedQueue<VideoFrame>> resources = new AtomicReference<>(new ConcurrentLinkedQueue<>());
private boolean closed = false;
private final AtomicLong totalSize = new AtomicLong(0);
private final AtomicLong resident = new AtomicLong(0);
private Pool(final int h, final int w, final int type, final boolean isRgb) {
this.h = h;
this.w = w;
this.type = type;
this.isRgb = isRgb;
}
public VideoFrame get(final long decodeTimeMillis, final long frameNumber) {
final ConcurrentLinkedQueue<VideoFrame> lpool = getPool();
if(lpool == null) // we're closed
throw new IllegalStateException("VideoFrame Pool is shut down");
try(QuietCloseable qc = () -> resources.set(lpool)) {
final VideoFrame ret = lpool.poll();
if(ret == null) {
totalSize.incrementAndGet();
return new VideoFrame(this, h, w, type, decodeTimeMillis, frameNumber, isRgb);
}
resident.decrementAndGet();
return ret.leavingPool(decodeTimeMillis, frameNumber);
}
}
// called from VF close
private void returnToPool(final VideoFrame vf) {
final ConcurrentLinkedQueue<VideoFrame> lpool = getPool();
if(lpool == null) // we're closed
vf.reallyClose();
else {
try(final QuietCloseable qc = () -> resources.set(lpool);) {
lpool.add(vf);
vf.isInPool = true;
resident.incrementAndGet();
}
}
}
@Override
public void close() {
final ConcurrentLinkedQueue<VideoFrame> lpool = getPool();
if(lpool != null) {
closed = true;
lpool.stream().forEach(f -> f.reallyClose());
lpool.clear();
} // else, if lpool is null then another thread already closed this
}
public long totalSize() {
return totalSize.get();
}
public long numResident() {
return resident.get();
}
private ConcurrentLinkedQueue<VideoFrame> getPool() {
while(!closed) {
final ConcurrentLinkedQueue<VideoFrame> ret = resources.getAndSet(null);
if(ret != null)
return ret;
}
return null;
}
}
public static Pool getPool(final int h, final int w, final int type, final boolean isRgb) {
return new Pool(h, w, type, isRgb);
}
public long frameNumber() {
return frameNumber;
}
@Override
public String toString() {
return VideoFrame.class.getSimpleName() + ": (" + getClass().getName() + "@" + Integer.toHexString(hashCode()) + ")"
+ super.toString();
}
@Override
public VideoFrame returnMe() {
// hacky, yet efficient.
skipCloseOnceForReturn = true;
return this;
}
@Override
public void close() {
if(skipCloseOnceForReturn) {
skipCloseOnceForReturn = false;
return;
}
if(isInPool) {
LOGGER.warn("VideoFrame being closed twice at ", new RuntimeException());
if(TRACK_MEMORY_LEAKS) {
LOGGER.warn("TRACKING: originally returned to pool at:", rtpStackTrace);
LOGGER.warn("TRACKING: create at: ", stackTrace);
}
} else {
rtpStackTrace = TRACK_MEMORY_LEAKS ? new RuntimeException("VideoFrame Returned to pool here") : null;
if(pool == null) {
reallyClose();
} else {
pool.returnToPool(this);
}
}
}
private void reallyClose() {
super.close();
}
public VideoFrame pooledDeepCopy(final Pool ppool) {
final VideoFrame newMat = ppool.get(decodeTimeMillis, frameNumber);
if(rows() != 0)
copyTo(newMat);
return newMat;
}
public VideoFrame deepCopy() {
final VideoFrame newMat = pool == null ? new VideoFrame(decodeTimeMillis, frameNumber, isRgb) : pool.get(decodeTimeMillis, frameNumber);
if(rows() != 0)
copyTo(newMat);
return newMat;
}
public VideoFrame shallowCopy() {
return new VideoFrame(ImageAPI.pilecv4j_image_CvRaster_copy(nativeObj), decodeTimeMillis, frameNumber, isRgb);
}
public static VideoFrame wrapNativeVideoFrame(final long nativeObj, final long decodeTimeMillis, final long frameNumber, final boolean isRgb) {
return new VideoFrame(nativeObj, decodeTimeMillis, frameNumber, isRgb);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display/CvImageDisplay.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.display;
import static net.dempsy.util.Functional.ignore;
import static net.dempsy.util.Functional.uncheck;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import org.opencv.core.Mat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.ImageAPI;
public class CvImageDisplay extends ImageDisplay {
static {
CvMat.initOpenCv();
}
private static Logger LOGGER = LoggerFactory.getLogger(CvImageDisplay.class);
// ==============================================================
// This is basically a single threaded executor but we need to
// check cv::waitKey or nothing happens in OpenCv::HighGUI
private static ArrayBlockingQueue<Consumer<WindowsState>> commands = new ArrayBlockingQueue<>(2);
public static AtomicBoolean stillRunningEvents = new AtomicBoolean(true);
private final CountDownLatch countDown = new CountDownLatch(1);
@FunctionalInterface
private static interface CvKeyPressCallback {
public String keyPressed(int keyPressed);
}
private static class WindowsState {
final Map<String, CvImageDisplay> windows = new HashMap<>();
final Map<String, CvKeyPressCallback> callbacks = new HashMap<>();
void remove(final String n) {
callbacks.remove(n);
windows.remove(n);
}
}
static {
final WindowsState state = new WindowsState();
ImageDisplay.addEventPollingRunnable(() -> {
if(stillRunningEvents.get()) {
ImageDisplay.syncExec(() -> {
// System.out.println(state.windows);
try {
if(state.windows.size() > 0) {
// then we can check for a key press.
final int key = ImageAPI.pilecv4j_image_CvRaster_fetchEvent(1);
final Set<String> toCloseUp = state.callbacks.values().stream()
.map(cb -> cb.keyPressed(key))
.filter(n -> n != null)
.collect(Collectors.toSet());
toCloseUp.addAll(state.windows.keySet().stream()
.filter(ImageAPI::pilecv4j_image_CvRaster_isWindowClosed)
.collect(Collectors.toSet()));
toCloseUp.forEach(n -> {
// need to close the window and cleanup.
ImageAPI.pilecv4j_image_CvRaster_destroyWindow(n);
final CvImageDisplay id = state.windows.get(n);
if(id != null)
id.closeNow.set(true);
state.remove(n);
if(id != null)
id.close();
});
} else
ignore(() -> Thread.sleep(1));
// we need to check to see if there's any commands to execute.
final Consumer<WindowsState> cmd = commands.poll();
if(cmd != null) {
try {
cmd.accept(state);
} catch(final Exception e) {
LOGGER.error("OpenCv::HighGUI command \"{}\" threw an excetion.", cmd, e);
}
} else
ignore(() -> Thread.sleep(1));
} catch(final Throwable th) {
LOGGER.error("OpenCv::HighGUI CRITICAL ERROR! But yet, I persist.", th);
}
});
}
});
}
// ==============================================================
private boolean closed = false;
private final ShowKeyPressCallback callback;
private Runnable closeCallback;
private final String name;
private boolean shownYet = false;
private final AtomicBoolean closeNow = new AtomicBoolean(false);
CvImageDisplay(final Mat mat, final String name, final Runnable closeCallback, final KeyPressCallback kpCallback) {
this.closeCallback = closeCallback;
this.name = name;
// create a callback that ignores the keypress but polls the state of the closeNow
this.callback = new ShowKeyPressCallback(this, kpCallback);
if(mat != null) {
doShow(mat, name, callback);
while(!callback.shown.get())
Thread.yield();
}
}
@Override
public void setCloseCallback(final Runnable closeCallback) {
this.closeCallback = closeCallback;
}
@Override
public void waitUntilClosed() throws InterruptedException {
countDown.await();
}
@Override
public void close() {
if(!closed) {
LOGGER.trace("Closing window \"{}\"", name);
countDown.countDown();
closed = true;
closeNow.set(true);
if(closeCallback != null)
closeCallback.run();
}
}
@Override
public void update(final Mat toUpdate) {
if(closed) {
LOGGER.trace("Attempting to update a closed window with {}", toUpdate);
return;
}
if(closeNow.get() && !closed) {
close();
LOGGER.debug("Attempting to update a closed window with {}", toUpdate);
return;
}
if(!shownYet) {
synchronized(this) {
if(!shownYet)
doShow(toUpdate, name, callback);
}
}
// Shallow copy
final CvMat old = callback.update.getAndSet(CvMat.shallowCopy(toUpdate));
if(old != null)
old.close();
}
private void doShow(final Mat mat, final String name, final CvKeyPressCallback callback) {
LOGGER.trace("Showing image {} in window {} ", mat, name);
// There is a problem with resource management since the mat is being passed to another thread
final CvMat omat = CvMat.shallowCopy(mat);
uncheck(() -> commands.put(s -> {
try(CvMat lmat = omat) {
ImageAPI.pilecv4j_image_CvRaster_showImage(name, omat.nativeObj);
// if we got here, we're going to assume the windows was created.
if(callback != null)
s.callbacks.put(name, callback);
s.windows.put(name, this);
}
}));
shownYet = true;
}
// a callback that ignores the keypress but polls the state of the closeNow
private static class ShowKeyPressCallback implements CvKeyPressCallback {
final AtomicBoolean shown = new AtomicBoolean(false);
final AtomicReference<CvMat> update = new AtomicReference<>(null);
final KeyPressCallback keyPressCallback;
final CvImageDisplay window;
private boolean shownSet = false;
private ShowKeyPressCallback(final CvImageDisplay window, final KeyPressCallback keyPressCallback) {
this.window = window;
this.keyPressCallback = keyPressCallback;
}
@Override
public String keyPressed(final int kp) {
// the window is shown by the time we get here.
if(!shownSet) {
shown.set(true);
shownSet = true;
}
try(final CvMat toUpdate = update.getAndSet(null);) {
if(toUpdate != null)
ImageAPI.pilecv4j_image_CvRaster_updateWindow(window.name, toUpdate.nativeObj);
}
if(keyPressCallback != null && kp >= 0) {
if(keyPressCallback.keyPressed(kp))
window.closeNow.set(true);
} else if(kp == 32) window.closeNow.set(true);
return window.closeNow.get() ? window.name : null;
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display/ImageDisplay.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.display;
import static net.dempsy.util.Functional.chain;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicLong;
import org.eclipse.swt.SWT;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.FileDialog;
import org.eclipse.swt.widgets.Shell;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.Functional;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.ImageFile;
import ai.kognition.pilecv4j.image.Utils;
import ai.kognition.pilecv4j.image.display.swt.SwtImageDisplay;
import ai.kognition.pilecv4j.image.display.swt.SwtImageDisplay.CanvasType;
public abstract class ImageDisplay implements QuietCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(ImageDisplay.class);
private static final AtomicLong sequence = new AtomicLong(0);
private static Thread executorThread;
private static final ExecutorService executor = Executors.newSingleThreadExecutor(
r -> chain(executorThread = new Thread(r, "ImageDisplay Main Thread"), t -> t.setDaemon(true)));
public static final String DEFAULT_WINDOWS_NAME_PREFIX = "Window";
public static final Implementation DEFAULT_IMPLEMENTATION = Implementation.HIGHGUI;
private static final List<Runnable> eventPolling = new CopyOnWriteArrayList<>();
private static Thread eventPollingEmitterLoop = null;
public static synchronized void addEventPollingRunnable(final Runnable eventPoller) {
eventPolling.add(eventPoller);
if(eventPollingEmitterLoop == null) {
eventPollingEmitterLoop = new Thread(() -> {
int curIndex = 0;
while(true) {
final int numPollingRunnables = eventPolling.size();
if(numPollingRunnables == 0)
Functional.uncheck(() -> Thread.sleep(1));
else {
if(curIndex >= eventPolling.size())
curIndex = 0;
final Runnable curRunnable = eventPolling.get(curIndex++);
try {
curRunnable.run();
} catch(final Throwable th) {
LOGGER.warn("Exception thrown by {} when polling for events. But yet, I'm continuing on.", curRunnable, th);
}
}
}
}, "ImageDisplay Event Loop Emitter");
eventPollingEmitterLoop.setDaemon(true);
eventPollingEmitterLoop.start();
}
}
public static void syncExec(final Runnable eventHandler) {
if(eventHandler == null)
throw new NullPointerException("Cannot pass a null Runnable to " + ImageDisplay.class.getSimpleName() + ".syncExec.");
if(Thread.currentThread() == executorThread) {
try {
eventHandler.run();
} catch(final RuntimeException rte) {
LOGGER.info("Exception processing {} event.", ImageDisplay.class.getSimpleName(), rte);
}
} else {
try {
final Future<?> future = executor.submit(() -> {
// can only throw a RuntimeException
eventHandler.run();
});
try {
future.get();
} catch(final ExecutionException e) {
// the eventHandler can only throw a RuntimeException
throw new RuntimeException(e.getCause());
}
} catch(final RuntimeException rte) {
LOGGER.info("Exception processing {} event.", ImageDisplay.class.getSimpleName(), rte);
} catch(final InterruptedException ie) {
throw new RuntimeException(ie);
}
}
}
public static void asyncExec(final Runnable eventHandler) {
executor.submit(eventHandler);
}
public abstract void update(final Mat toUpdate);
public abstract void waitUntilClosed() throws InterruptedException;
public abstract void setCloseCallback(Runnable closeCallback);
@FunctionalInterface
public static interface KeyPressCallback {
public boolean keyPressed(int keyPressed);
}
@FunctionalInterface
public static interface SelectCallback {
public boolean select(Point pointClicked);
}
private static class Proxy extends ImageDisplay {
protected final ImageDisplay underlying;
private Proxy(final ImageDisplay underlying) {
this.underlying = underlying;
}
@Override
public void close() {
underlying.close();
}
@Override
public void update(final Mat toUpdate) {
underlying.update(toUpdate);
}
@Override
public void waitUntilClosed() throws InterruptedException {
underlying.waitUntilClosed();
}
@Override
public void setCloseCallback(final Runnable closeCallback) {
underlying.setCloseCallback(closeCallback);
}
}
/**
* SWT defaults to SWT_SCROLLABLE
*/
public static enum Implementation {
HIGHGUI, SWT, SWT_SCROLLABLE, SWT_RESIZABLE
}
public static class Builder {
private KeyPressCallback keyPressHandler = null;
private Implementation implementation = DEFAULT_IMPLEMENTATION;
private Runnable closeCallback = null;
private Mat toShow = null;
private String windowName = DEFAULT_WINDOWS_NAME_PREFIX + "_" + sequence.incrementAndGet();
private SelectCallback selectCallback = null;
private Size screenDim = null;
private boolean preserveAspectRatio = true;
public Builder keyPressHandler(final KeyPressCallback keyPressHandler) {
this.keyPressHandler = keyPressHandler;
return this;
}
public Builder implementation(final Implementation impl) {
this.implementation = impl;
return this;
}
public Builder closeCallback(final Runnable closeCallback) {
this.closeCallback = closeCallback;
return this;
}
public Builder selectCallback(final SelectCallback selectCallback) {
this.selectCallback = selectCallback;
return this;
}
public Builder windowName(final String windowName) {
this.windowName = windowName;
return this;
}
public Builder show(final Mat toShow) {
this.toShow = toShow;
return this;
}
public Builder dim(final Size screenDim) {
return dim(screenDim, true);
}
public Builder dim(final Size screenDim, final boolean preserveAspectRatio) {
this.screenDim = screenDim;
this.preserveAspectRatio = preserveAspectRatio;
return this;
}
public ImageDisplay build() {
if(screenDim == null)
return dobuild();
else
return new Proxy(dobuild()) {
private Size adjustedSize = null;
@Override
public void update(final Mat mat) {
if(adjustedSize == null)
adjustedSize = preserveAspectRatio ? Utils.scaleDownOrNothing(mat, screenDim)
: screenDim;
try(CvMat tmat = CvMat.shallowCopy(mat);
CvMat resized = new CvMat();) {
Imgproc.resize(mat, resized, adjustedSize, 0, 0, Imgproc.INTER_NEAREST);
underlying.update(resized);
}
}
};
}
protected ImageDisplay dobuild() {
switch(implementation) {
case HIGHGUI: {
if(selectCallback != null)
LOGGER.info("The select callback will be ignored when using the HIGHGUI implementation of ImageDisplay");
return new CvImageDisplay(toShow, windowName, closeCallback, keyPressHandler);
}
case SWT:
case SWT_SCROLLABLE:
return new SwtImageDisplay(toShow, windowName, closeCallback, keyPressHandler, selectCallback, CanvasType.SCROLLABLE);
case SWT_RESIZABLE:
return new SwtImageDisplay(toShow, windowName, closeCallback, keyPressHandler, selectCallback, CanvasType.RESIZABLE);
default:
throw new IllegalStateException();
}
}
}
public static CvMat displayable(final Mat mat) {
final int type = mat.type();
final int depth = CvType.depth(type);
if(depth == CvType.CV_8U || depth == CvType.CV_8S)
return CvMat.shallowCopy(mat);
final int inChannels = mat.channels();
if(inChannels != 1 && inChannels != 3)
throw new IllegalArgumentException("Cannot handle an image of type " + CvType.typeToString(type) + " yet.");
final int newType = (inChannels == 1) ? CvType.CV_8UC1 : CvType.CV_8UC3;
try(CvMat ret = new CvMat()) {
mat.convertTo(ret, newType, 1.0 / 256.0);
return ret.returnMe();
}
}
public static void main(final String[] args) throws Exception {
try(final ImageDisplay id = new Builder()
.implementation(Implementation.SWT)
.build();) {
String string = (args.length > 0 ? args[0] : null);
if(string == null) {
final Display display = new Display();
final Shell shell = new Shell(display);
try(QuietCloseable c1 = () -> display.close();
QuietCloseable c2 = () -> shell.close()) {
final FileDialog dialog = new FileDialog(shell, SWT.OPEN);
dialog.setText("Open an image file or cancel");
string = dialog.open();
}
}
if(string != null) {
final CvMat iioimage = ImageFile.readMatFromFile(string);
id.update(iioimage);
}
id.waitUntilClosed();
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display/swing/SwingImageDisplay.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.display.swing;
/******************************************************************************
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
*****************************************************************************/
import javax.swing.JFrame;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.SwingUtilities;
import java.awt.BorderLayout;
import java.awt.Dimension;
import java.awt.Graphics;
/*
* Canvas example snippet: scroll an image (flicker free, no double buffering)
*
* For a list of all SWT example snippets see
* http://www.eclipse.org/swt/snippets/
*/
import java.awt.image.BufferedImage;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.concurrent.atomic.AtomicReference;
import org.eclipse.swt.SWT;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.FileDialog;
import org.eclipse.swt.widgets.Shell;
import org.opencv.core.Mat;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.image.ImageFile;
import ai.kognition.pilecv4j.image.display.ImageDisplay;
public class SwingImageDisplay extends ImageDisplay {
private static class Guard implements AutoCloseable {
final Object d;
public Guard(final Object d) throws Exception {
this.d = d;
}
@Override
public void close() throws Exception {
final Method closeMethod = d.getClass().getDeclaredMethod("close");
closeMethod.invoke(d);
}
}
public static void main(final String[] args) throws Exception {
Display display;
Shell shell;
BufferedImage iioimage = null;
try(Guard g = new Guard(display = new Display());
Guard g2 = new Guard(shell = new Shell(display))) {
final FileDialog dialog = new FileDialog(shell, SWT.OPEN);
dialog.setText("Open an image file or cancel");
final String string = dialog.open();
if(string != null)
iioimage = ImageFile.readBufferedImageFromFile(string);
}
if(iioimage != null)
showImage(iioimage);
}
public static QuietCloseable showImage(final BufferedImage iioimage) throws InvocationTargetException,
InterruptedException {
final AtomicReference<JFrame> frame = new AtomicReference<JFrame>(null);
SwingUtilities.invokeAndWait(() -> {
final JPanel p = new ScrollImagePanel(iioimage);
final JFrame f = new JFrame();
f.setContentPane(p);
f.setSize(iioimage.getWidth(), iioimage.getHeight());
f.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
f.setVisible(true);
frame.set(f);
});
return () -> frame.get().dispose();
}
public static class ScrollImagePanel extends JPanel {
private static final long serialVersionUID = 1L;
public ScrollImagePanel(final BufferedImage image) {
final JPanel canvas = new JPanel() {
private static final long serialVersionUID = 1L;
@Override
protected void paintComponent(final Graphics g) {
super.paintComponent(g);
g.drawImage(image, 0, 0, null);
}
};
canvas.setPreferredSize(new Dimension(image.getWidth(), image.getHeight()));
final JScrollPane sp = new JScrollPane(canvas);
setLayout(new BorderLayout());
add(sp, BorderLayout.CENTER);
}
}
@Override
public void close() {
// TODO Auto-generated method stub
}
@Override
public void update(final Mat toUpdate) {
// TODO Auto-generated method stub
}
@Override
public void waitUntilClosed() throws InterruptedException {
// TODO Auto-generated method stub
}
@Override
public void setCloseCallback(final Runnable closeCallback) {
// TODO Auto-generated method stub
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display/swt/ResizableSwtCanvasImageDisplay.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.display.swt;
import org.eclipse.swt.SWT;
import org.eclipse.swt.graphics.GC;
import org.eclipse.swt.graphics.Image;
import org.eclipse.swt.graphics.ImageData;
import org.eclipse.swt.graphics.Rectangle;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.layout.RowData;
import org.eclipse.swt.layout.RowLayout;
import org.eclipse.swt.widgets.Canvas;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Layout;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
import net.dempsy.util.Functional;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.Utils;
import ai.kognition.pilecv4j.image.display.ImageDisplay;
import ai.kognition.pilecv4j.image.geometry.SimplePoint;
public class ResizableSwtCanvasImageDisplay extends SwtCanvasImageDisplay {
private Size parentBounds = null;
public ResizableSwtCanvasImageDisplay() {}
public ResizableSwtCanvasImageDisplay(final Composite parent, final Runnable closeCallback, final KeyPressCallback kpCallback,
final SelectCallback selectCallback) {
attach(parent, closeCallback, kpCallback, selectCallback);
}
public Canvas attach(final Composite parent) {
return attach(parent, null, null, null);
}
private void updateBounds() {
final Rectangle curBounds = parent.getBounds();
parentBounds = new Size(curBounds.width, curBounds.height);
}
// This can return null if there is no image yet.
protected Size getDisplayImageSize() {
try(final CvMat lcurrentImageMat = Functional.applyIfExistsAndReturnResult(currentImageRef, CvMat::shallowCopy);) {
if(lcurrentImageMat != null)
return lcurrentImageMat.size();
return null;
}
}
/**
* @return the current image to display which should be assigned to a try-with-resource managed variable
*/
protected CvMat getDisplayImage() {
return Functional.applyIfExistsAndReturnResult(currentImageRef, CvMat::shallowCopy);
}
public SimplePoint translateToImageCoords(final int canvasRow, final int canvasCol) {
final Size lcurrentImageMatSize = getDisplayImageSize();
if(lcurrentImageMatSize != null) {
final Size size = Utils.scaleDownOrNothing(lcurrentImageMatSize, parentBounds);
final double nr = canvasRow * lcurrentImageMatSize.height / size.height;
final double nc = canvasCol * lcurrentImageMatSize.width / size.width;
return new SimplePoint(nr, nc);
}
return null;
}
public Canvas attach(final Composite parentx, final Runnable closeCallback, final KeyPressCallback kpCallback,
final SelectCallback selectCallback) {
super.setup(new Canvas(parentx, SWT.NO_BACKGROUND), closeCallback, kpCallback, selectCallback);
if(parent.isVisible()) {
updateBounds();
}
final Display display = SwtUtils.getDisplay();
ImageDisplay.syncExec(() -> {
canvas.addListener(SWT.Paint, e -> {
try(final CvMat lcurrentImageMat = getDisplayImage();) {
if(lcurrentImageMat != null) {
if(parent.isVisible()) {
if(parentBounds == null) {
updateBounds();
}
final Image lcurrentImage = new Image(display, convertToDisplayableSWT(lcurrentImageMat));
try(QuietCloseable qc = () -> lcurrentImage.dispose();) {
final int x = ((int)parentBounds.width - lcurrentImage.getBounds().width) >>> 1;
final int y = ((int)parentBounds.height - lcurrentImage.getBounds().height) >>> 1;
canvas.setBounds(x, y, lcurrentImage.getBounds().width, lcurrentImage.getBounds().height);
final GC gc = e.gc;
gc.drawImage(lcurrentImage, 0, 0);
// get the bounds of the image.
final Rectangle rect = lcurrentImage.getBounds();
// get the bounds of the canvas
final Rectangle client = canvas.getClientArea();
// there may be a margin between the image and the edge of the canvas
// if the canvas is bigger than the image.
final int marginWidth = client.width - rect.width;
if(marginWidth > 0) {
gc.fillRectangle(rect.width, 0, marginWidth, client.height);
}
final int marginHeight = client.height - rect.height;
if(marginHeight > 0) {
gc.fillRectangle(0, rect.height, client.width, marginHeight);
}
// if we haven't packed the display layout since we either never have
// or the image changed size, we need to do that now.
if(!alreadySized.get()) {
final Layout layout = parent.getLayout();
if(layout instanceof GridLayout) {
final GridData layoutData = new GridData(GridData.FILL_BOTH);
layoutData.widthHint = parent.getBounds().width;
layoutData.heightHint = parent.getBounds().height;
canvas.setLayoutData(layoutData);
} else if(layout instanceof RowLayout) {
final RowData layoutData = new RowData(parent.getBounds().width, parent.getBounds().height);
canvas.setLayoutData(layoutData);
}
canvas.layout(true, true);
parent.layout(true, true);
parent.requestLayout();
// shell.pack(true);
alreadySized.set(true);
}
}
}
}
}
});
parent.addListener(SWT.Resize, e -> {
updateBounds();
});
});
return canvas;
}
@Override
public org.opencv.core.Point canvasLocationToImageLocation(final int x, final int y) {
// TODO: fix this
return new org.opencv.core.Point(x, y);
}
public ImageData convertToDisplayableSWT(final Mat image) {
try(final CvMat toDisplay = new CvMat();) {
Imgproc.resize(image, toDisplay, Utils.scaleDownOrNothing(image, parentBounds));
return SwtUtils.convertToDisplayableSWT(toDisplay);
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display/swt/ScrollableSwtCanvasImageDisplay.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.display.swt;
import org.eclipse.swt.SWT;
import org.eclipse.swt.graphics.GC;
import org.eclipse.swt.graphics.Image;
import org.eclipse.swt.graphics.ImageData;
import org.eclipse.swt.graphics.Point;
import org.eclipse.swt.graphics.Rectangle;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.widgets.Canvas;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.ScrollBar;
import org.opencv.core.Mat;
import net.dempsy.util.Functional;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.display.ImageDisplay;
public class ScrollableSwtCanvasImageDisplay extends SwtCanvasImageDisplay {
private final Point origin = new Point(0, 0);
public ScrollableSwtCanvasImageDisplay(final Composite parent, final Runnable closeCallback, final KeyPressCallback kpCallback,
final SelectCallback selectCallback) {
super.setup(new Canvas(parent, SWT.NO_BACKGROUND | SWT.NO_REDRAW_RESIZE | SWT.V_SCROLL | SWT.H_SCROLL), closeCallback, kpCallback, selectCallback);
final Display display = SwtUtils.getDisplay();
ImageDisplay.syncExec(() -> {
canvas.addListener(SWT.Paint, e -> {
try(final CvMat lcurrentImageMat = Functional.applyIfExistsAndReturnResult(currentImageRef, CvMat::shallowCopy);) {
if(lcurrentImageMat != null) {
final Image lcurrentImage = new Image(display, convertToDisplayableSWT(lcurrentImageMat));
try(QuietCloseable qc = () -> lcurrentImage.dispose();) {
// Draw the image into the current graphics context at the current position
final GC gc = e.gc;
gc.drawImage(lcurrentImage, this.origin.x, this.origin.y);
// get the bounds of the image.
final Rectangle rect = lcurrentImage.getBounds();
// get the bounds of the canvas
final Rectangle client = canvas.getClientArea();
// there may be a margin between the image and the edge of the canvas
// if the canvas is bigger than the image.
final int marginWidth = client.width - rect.width;
if(marginWidth > 0) {
gc.fillRectangle(rect.width, 0, marginWidth, client.height);
}
final int marginHeight = client.height - rect.height;
if(marginHeight > 0) {
gc.fillRectangle(0, rect.height, client.width, marginHeight);
}
// if we haven't packed the display layout since we either never have
// or the image changed size, we need to do that now.
if(!alreadySized.get()) {
final GridData gridData = new GridData(GridData.FILL_BOTH);
gridData.widthHint = lcurrentImage.getBounds().width;
gridData.heightHint = lcurrentImage.getBounds().height;
canvas.setLayoutData(gridData);
canvas.layout(true, true);
parent.layout(true, true);
parent.pack(true);
alreadySized.set(true);
}
}
}
}
});
final ScrollBar hBar = canvas.getHorizontalBar();
hBar.addListener(SWT.Selection, e -> {
try(final CvMat currentImage = Functional.applyIfExistsAndReturnResult(currentImageRef, CvMat::shallowCopy);) {
if(currentImage != null) {
final int hSelection = hBar.getSelection();
final int destX = -hSelection - origin.x;
final Rectangle rect = new Rectangle(0, 0, currentImage.width(), currentImage.height());
canvas.scroll(destX, 0, 0, 0, rect.width, rect.height, false);
origin.x = -hSelection;
}
}
});
final ScrollBar vBar = canvas.getVerticalBar();
vBar.addListener(SWT.Selection, e -> {
try(final CvMat currentImage = Functional.applyIfExistsAndReturnResult(currentImageRef, CvMat::shallowCopy);) {
if(currentImage != null) {
final int vSelection = vBar.getSelection();
final int destY = -vSelection - origin.y;
final Rectangle rect = new Rectangle(0, 0, currentImage.width(), currentImage.height());
canvas.scroll(0, destY, 0, 0, rect.width, rect.height, false);
origin.y = -vSelection;
}
}
});
canvas.addListener(SWT.Resize, e -> {
try(final CvMat currentImage = Functional.applyIfExistsAndReturnResult(currentImageRef, CvMat::shallowCopy);) {
if(currentImage != null) {
final Rectangle rect = new Rectangle(0, 0, currentImage.width(), currentImage.height());
final Rectangle client = canvas.getClientArea();
hBar.setMaximum(rect.width);
vBar.setMaximum(rect.height);
hBar.setThumb(Math.min(rect.width, client.width));
vBar.setThumb(Math.min(rect.height, client.height));
final int hPage = rect.width - client.width;
final int vPage = rect.height - client.height;
int hSelection = hBar.getSelection();
int vSelection = vBar.getSelection();
if(hSelection >= hPage) {
if(hPage <= 0)
hSelection = 0;
origin.x = -hSelection;
}
if(vSelection >= vPage) {
if(vPage <= 0)
vSelection = 0;
origin.y = -vSelection;
}
canvas.redraw();
}
}
});
});
}
@Override
public org.opencv.core.Point canvasLocationToImageLocation(final int x, final int y) {
return new org.opencv.core.Point(x - origin.x, y - origin.y);
}
public ImageData convertToDisplayableSWT(final Mat image) {
return SwtUtils.convertToDisplayableSWT(image);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display/swt/SwtCanvasImageDisplay.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.display.swt;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.KeyEvent;
import org.eclipse.swt.events.KeyListener;
import org.eclipse.swt.events.MouseEvent;
import org.eclipse.swt.events.MouseListener;
import org.eclipse.swt.widgets.Canvas;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Listener;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.display.ImageDisplay;
public abstract class SwtCanvasImageDisplay extends ImageDisplay {
private static final Logger LOGGER = LoggerFactory.getLogger(SwtCanvasImageDisplay.class);
// Event callbacks
protected KeyPressCallback callback;
protected Listener closeCallback;
protected SelectCallback selectCallback;
protected final AtomicReference<CvMat> currentImageRef = new AtomicReference<CvMat>(null);
protected final AtomicBoolean done = new AtomicBoolean(false);
public Canvas canvas;
// protected Display display;
protected Composite parent;
protected final CountDownLatch waitUntilClosedLatch = new CountDownLatch(1);
// These are for tracking changes in the bounds which reqiures
// a repacking of the layouts.
private Size prevBounds = null;
final AtomicBoolean alreadySized = new AtomicBoolean(false);
public SwtCanvasImageDisplay() {}
@Override
public void setCloseCallback(final Runnable closeCallback) {
removeCurrentCloseCallback();
this.closeCallback = e -> closeCallback.run();
if(closeCallback != null) {
ImageDisplay.syncExec(() -> {
canvas.addListener(SWT.Dispose, this.closeCallback);
});
}
}
private void removeCurrentCloseCallback() {
if(this.closeCallback != null) {
ImageDisplay.syncExec(() -> {
canvas.removeListener(SWT.Dispose, this.closeCallback);
});
}
}
protected void setup(final Canvas canvas, final Runnable closeCallback, final KeyPressCallback kpCallback,
final SelectCallback selectCallback) {
this.canvas = canvas;
this.parent = canvas.getParent();
// this.display = canvas.getDisplay();
this.callback = kpCallback;
this.selectCallback = selectCallback;
ImageDisplay.syncExec(() -> {
if(selectCallback != null) {
canvas.addMouseListener(new MouseListener() {
@Override
public void mouseUp(final MouseEvent e) {}
@Override
public void mouseDown(final MouseEvent e) {
// origin is negative when scrolled since it's in that direction from the origin of the viewport.
if(selectCallback.select(canvasLocationToImageLocation(e.x, e.y))) {
// need to close the shell
close();
}
}
@Override
public void mouseDoubleClick(final MouseEvent e) {}
});
}
if(callback != null) {
canvas.addKeyListener(new KeyListener() {
@Override
public void keyReleased(final KeyEvent e) {}
@Override
public void keyPressed(final KeyEvent e) {
if(callback.keyPressed(e.keyCode)) {
// need to close the shell
close();
}
}
});
}
canvas.addListener(SWT.Dispose, e -> close());
});
setCloseCallback(closeCallback);
}
public abstract org.opencv.core.Point canvasLocationToImageLocation(int x, int y);
@Override
public void close() {
if(!done.get()) {
done.set(true);
ImageDisplay.syncExec(() -> {
if(canvas != null) {
canvas.dispose();
final CvMat img = currentImageRef.getAndSet(null);
if(img != null)
img.close();
}
});
waitUntilClosedLatch.countDown();
}
}
@Override
public void update(final Mat image) {
LOGGER.trace("Showing image {}", image);
ImageDisplay.syncExec(() -> {
try(final CvMat prev = currentImageRef.getAndSet(CvMat.shallowCopy(image));) {}
final Size bounds = image.size();
if(!bounds.equals(prevBounds)) {
// shell.setSize(bounds.width, bounds.height);
alreadySized.set(false);
prevBounds = bounds;
}
if(canvas != null && !canvas.isDisposed())
canvas.redraw();
});
}
@Override
public void waitUntilClosed() throws InterruptedException {
waitUntilClosedLatch.await();
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display/swt/SwtImageDisplay.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.display.swt;
import static net.dempsy.util.Functional.chain;
import java.util.function.Function;
import org.eclipse.swt.SWT;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Shell;
import org.opencv.core.Mat;
import ai.kognition.pilecv4j.image.display.ImageDisplay;
public class SwtImageDisplay extends ImageDisplay {
public static enum CanvasType {
SCROLLABLE, RESIZABLE
}
private final String name;
private Display display = null;
private Shell shell = null;
private SwtCanvasImageDisplay canvasWriter = null;
private boolean setupCalled = false;
private final Function<Shell, SwtCanvasImageDisplay> canvasHandlerMaker;
public SwtImageDisplay(final Mat mat, final String name, final Runnable closeCallback, final KeyPressCallback kpCallback,
final SelectCallback selectCallback, final CanvasType canvasType) {
this.name = name;
switch(canvasType) {
case SCROLLABLE:
canvasHandlerMaker = s -> new ScrollableSwtCanvasImageDisplay(shell, closeCallback, kpCallback, selectCallback);
break;
case RESIZABLE:
canvasHandlerMaker = s -> new ResizableSwtCanvasImageDisplay(shell, closeCallback, kpCallback, selectCallback);
break;
default:
throw new IllegalArgumentException("Cannot create an swt canvas of type " + canvasType);
}
if(mat != null)
update(mat);
}
@Override
public void setCloseCallback(final Runnable closeCallback) {
canvasWriter.setCloseCallback(closeCallback);
}
private void setup() {
setupCalled = true;
display = SwtUtils.getDisplay();
ImageDisplay.syncExec(() -> {
shell = new Shell(display);
if(name != null) shell.setText(name);
// set the GridLayout on the shell
chain(new GridLayout(), l -> l.numColumns = 1, shell::setLayout);
canvasWriter = canvasHandlerMaker.apply(shell);
shell.addListener(SWT.Close, e -> {
if(!shell.isDisposed())
shell.dispose();
});
shell.open();
});
}
@Override
public synchronized void update(final Mat image) {
if(!setupCalled)
setup();
canvasWriter.update(image);
}
@Override
public void close() {
if(display != null) {
ImageDisplay.syncExec(() -> {
if(canvasWriter != null)
canvasWriter.close();
if(shell != null)
shell.close();
});
}
}
@Override
public void waitUntilClosed() throws InterruptedException {
canvasWriter.waitUntilClosed();
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/display/swt/SwtUtils.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.display.swt;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.stream.IntStream;
import org.eclipse.swt.graphics.ImageData;
import org.eclipse.swt.graphics.PaletteData;
import org.eclipse.swt.graphics.RGB;
import org.eclipse.swt.widgets.Display;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.imgproc.Imgproc;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.display.ImageDisplay;
public class SwtUtils {
private static Display theDisplay = null;
private static boolean loaded = false;
public static synchronized void loadNative() {
if(!loaded) {
// test if the jar is already on the classpath or is shadded
boolean onClasspath = false;
try {
Class.forName("org.eclipse.swt.widgets.Display");
onClasspath = true;
} catch(final ClassNotFoundException cnfe) {
onClasspath = false;
}
if(!onClasspath) {
final String osName = System.getProperty("os.name").toLowerCase();
final String osArch = System.getProperty("os.arch").toLowerCase();
final String swtFileNameOsPart = osName.contains("win") ? "win32"
: osName.contains("mac") ? "macosx" : osName.contains("linux") || osName.contains("nix") ? "linux_gtk" : ""; // throw new
// RuntimeException("Unknown
// OS name: "+osName)
final String swtFileNameArchPart = osArch.contains("64") ? "x64" : "x86";
final String swtFileName = "swt_" + swtFileNameOsPart + "_" + swtFileNameArchPart + ".jar";
try {
final URLClassLoader classLoader = (URLClassLoader)SwtUtils.class.getClassLoader();
final Method addUrlMethod = URLClassLoader.class.getDeclaredMethod("addURL", URL.class);
addUrlMethod.setAccessible(true);
final URL swtFileUrl = new URL("file:" + swtFileName);
addUrlMethod.invoke(classLoader, swtFileUrl);
} catch(final Exception e) {
throw new RuntimeException("Unable to add the SWT jar to the class path: " + swtFileName, e);
}
}
} else
loaded = true;
}
public static synchronized Display getDisplay() {
if(theDisplay == null) {
startSwt();
}
return theDisplay;
}
private static synchronized void startSwt() {
ImageDisplay.syncExec(() -> {
theDisplay = new Display();
ImageDisplay.addEventPollingRunnable(() -> {
ImageDisplay.syncExec(() -> {
while(theDisplay.readAndDispatch());
});
});
});
}
public static synchronized void stopSwt() {
ImageDisplay.syncExec(() -> {
theDisplay.syncExec(() -> {
if(theDisplay != null && !theDisplay.isDisposed())
theDisplay.dispose();
});
});
}
private static RGB[] grayscalePaletteColors = new RGB[256];
static {
IntStream.range(0, 256).forEach(i -> grayscalePaletteColors[i] = new RGB(i, i, i));
}
public static ImageData convertToDisplayableSWT(final Mat toUse) {
try(CvMat displayable = ImageDisplay.displayable(toUse)) {
return convertToSWT(displayable);
}
}
public static ImageData convertToSWT(final Mat image) {
final int type = image.type();
final int inChannels = image.channels();
final int cvDepth = CvType.depth(type);
if(cvDepth != CvType.CV_8U && cvDepth != CvType.CV_8S)
throw new IllegalArgumentException("Cannot convert Mat to SWT image with elements larger than a byte yet.");
final int width = image.cols();
final int height = image.rows();
Mat toUse = image;
final PaletteData palette;
try(CvMat alt = new CvMat();) {
switch(inChannels) {
case 1:
palette = new PaletteData(grayscalePaletteColors);
break;
case 3:
palette = new PaletteData(0x0000FF, 0x00FF00, 0xFF0000);
break;
case 4:
// hack for B&W pngs
// palette = new PaletteData(0xFF0000, 0xFF0000, 0xFF0000);
Imgproc.cvtColor(image, alt, Imgproc.COLOR_BGRA2BGR);
toUse = alt;
palette = new PaletteData(0x0000FF, 0x00FF00, 0xFF0000);
break;
// throw new IllegalArgumentException("Can't handle alpha channel yet.");
default:
throw new IllegalArgumentException("Can't handle an image with " + inChannels + " channels");
}
final int elemSize = CvType.ELEM_SIZE(toUse.type());
final ImageData id = new ImageData(width, height, elemSize * 8, palette, 1, new byte[width * height * elemSize]);
CvMat.bulkAccess(toUse, raster -> raster.get(id.data));
return id;
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/LineSegment.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry;
public class LineSegment {
public final Point p1;
public final Point p2;
public final Direction direction;
/**
* This is the quantified gradient direction.
*/
public final byte gradientDirection;
/**
* Direction is whether or not to use the right hand rule (see http://mathworld.wolfram.com/Right-HandRule.html )
* indicating the perpendicular direction is given by ((p2 - p1) / | p2 - p1 |) X [0, 0, 1]. For example:
*
* if P1 = [0, 0]
* P2 = [0, 1]
*
* then P2 - P1 = [0, 1].
* Given the "Direction" = RIGHT, the perpendicular direction to the line would be: [0, 1, 0] X [0, 0, 1] =
*
* | i j k |
* | 0 1 0 |
* | 0 0 1 |
*
* which is = i ( 1*1 - 0*0) - j (0*1 - 0*0) + k (0*0 - 1*0) =
* i1 + j0 +k0 => (1, 0).
*
* So, given a "RIGHT" direction, if you stand on P1 and look at P2, the direction of the perpendicular
* will be from left to right. Stand on the origin, look "North" (toward P2 = [0, 1]) and the direction
* of the perpendicular will be toward [1, 0] (from your left, to your right).
*/
public static enum Direction {
LEFT, RIGHT;
public static Direction FORWARD = RIGHT;
public static Direction REVERSE = LEFT;
}
private final Point p2Trans;
private final double p2TransMagSq;
// This will tell the distance algorithm which dimension to use to
// tell if the point being checked is off one end of the line segment.
private final boolean xbiased;
public LineSegment(final Point p1, final Point p2) {
this(p1, p2, Direction.RIGHT);
}
public LineSegment(final Point p1, final Point p2, final Direction direction) {
this.p1 = p1;
this.p2 = p2;
this.direction = direction;
p2Trans = p2.subtract(p1);
p2TransMagSq = p2Trans.magnitudeSquared();
xbiased = Math.abs(p1.x() - p2.x()) > Math.abs(p1.y() - p2.y());
final Point zcross = p2Trans.crossWithZ(direction == Direction.LEFT);
gradientDirection = zcross.quantizedDirection();
}
public Point closestPointTo(final Point x) {
final Point xTrans = x.subtract(p1);
// xTrans.p2Trans = |xTrans| |p2Trans| cos th
// so the projection of x on the line from p1 to p2 is:
// ((xTrans.p2Trans / |p2Trans|) * unit(p2Trans)) + p1 =
// ((xTrans.p2Trans / |p2Trans|) * p2Trans /|p2Trans|) + p1 =
// ((xTrans.p2Trans / |p2Trans|^2) * p2Trans) + p1
// final Point projection = p2Trans.multiply(xTrans.dot(p2Trans) / p2TransMagSq).add(p1);
// is the point off the end of the line segment
final Point closest;
if(xbiased) { // We're x-biased
if(p2Trans.x() > 0.0) { // ... and p2 is right of p1
if(x.x() > p2.x()) // ... and x.x is outside of p2.x
closest = p2;
else if(x.x() < p1.x()) // ... or x.x is outside of p1.x
closest = p1;
else // ... otherwise.
closest = p2Trans.multiply(xTrans.dot(p2Trans) / p2TransMagSq).add(p1);
} else { // ... p2 is left of p1
if(x.x() < p2.x()) // ... and x is left of p2.
closest = p2;
else if(x.x() > p1.x()) // ... and x is right of p1
closest = p1;
else // ... otherwise
closest = p2Trans.multiply(xTrans.dot(p2Trans) / p2TransMagSq).add(p1);
}
} else { // we're y-biased
if(p2Trans.y() > 0.0) { // ... and p2 is above p1
if(x.y() > p2.y()) // ... and x is above p2
closest = p2;
else if(x.y() < p1.y()) // ... or x is below p1
closest = p1;
else // ... otherwise.
closest = p2Trans.multiply(xTrans.dot(p2Trans) / p2TransMagSq).add(p1);
} else { // ... p2 is below p1
if(x.y() < p2.y()) // ... and x is below p2.
closest = p2;
else if(x.y() > p1.y()) // ... and x is above p1
closest = p1;
else // ... otherwise
closest = p2Trans.multiply(xTrans.dot(p2Trans) / p2TransMagSq).add(p1);
}
}
return closest;
}
public double distance(final Point x) {
return x.distance(closestPointTo(x));
}
@Override
public String toString() {
return p1.toString() + "=>" + p2.toString();
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/PerpendicularLine.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry;
/**
* <p>
* A line defined in "perpendicular line coordinates" is expressed as a single point. This point
* is a reference for the line that's perpendicular to the line drawn from the origin to that point.
* </p>
*/
public class PerpendicularLine implements Point {
public final Point perpRef;
public PerpendicularLine(final Point perpRef) {
this.perpRef = perpRef;
}
public PerpendicularLine(final double r, final double c) {
perpRef = new SimplePoint(r, c);
}
@Override
public double getRow() {
return perpRef.getRow();
}
@Override
public double getCol() {
return perpRef.getCol();
}
@Override
public String toString() {
return "[" + getRow() + "," + getCol() + "]";
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/PerpendicularLineCoordFit.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import ai.kognition.pilecv4j.nr.Minimizer;
/**
* <p>
* This class can be used to find the best line through a set of points where the
* result is a line in "perpendicular line coordinates." (yes, I made that term up)
* </p>
*
* <p>
* A line defined in "perpendicular line coordinates" is expressed as a single point. This point
* is a reference for the line that's perpendicular to the line drawn from the origin to that point.
* </p>
*/
public class PerpendicularLineCoordFit implements Minimizer.Func {
private final List<AwtPoint> points;
public Point worst = null;
public double maxErrSq;
private final boolean weighted;
private boolean awtp = false;
/**
* This constructor takes either a list of java.awt.Point's or a list of {@link AwtPoint}. If you pass it another list the Fit will fail with a
* RuntimeException dealing with class casting.
*
* @param points
* is either a {@link AwtPoint} or java.awt.Points
* @param weighted
* is whether or not the points are weighted.
*/
@SuppressWarnings("unchecked")
public PerpendicularLineCoordFit(final List<?> points, final boolean weighted) {
this.points = new ArrayList<AwtPoint>();
final Object o = points.get(0);
if(o instanceof java.awt.Point) {
for(int i = 0; i < points.size(); i++)
this.points.add(new AwtPoint((java.awt.Point)points.get(i)));
awtp = true;
} else
this.points.addAll((List<AwtPoint>)points);
this.weighted = weighted;
}
public PerpendicularLineCoordFit(final List<?> points) {
this(points, false);
}
public double getFurthestDistance() {
return Math.sqrt(maxErrSq);
}
public double getStdDev(final double sumSqError) {
return Math.sqrt(sumSqError / points.size());
}
public static PerpendicularLine interpretFinalPosition(final double[] finalPos) {
return new PerpendicularLine(finalPos[1], finalPos[0]);
}
@Override
public double func(final double[] x) {
final double xmagsq = (x[0] * x[0]) + (x[1] * x[1]);
final double xmag = Math.sqrt(xmagsq);
double ret = 0.0;
maxErrSq = -1.0;
for(final Point p: points) {
final double y1 = p.getRow();
final double x1 = p.getCol();
final double xdotxi = (y1 * x[1]) + (x1 * x[0]);
double err = (xmag - (xdotxi / xmag));
if(weighted)
err *= ((WeightedPoint)p).getWeight();
final double errSq = err * err;
if(maxErrSq < errSq) {
worst = p;
maxErrSq = errSq;
}
ret += errSq;
}
return ret;
}
public List<?> prune(final double maxDist, final double[] x) {
final double xmagsq = (x[0] * x[0]) + (x[1] * x[1]);
final double xmag = Math.sqrt(xmagsq);
final List<Object> ret = new ArrayList<Object>();
for(final Iterator<AwtPoint> iter = points.iterator(); iter.hasNext();) {
final Point p = iter.next();
final double y1 = p.getRow();
final double x1 = p.getCol();
final double xdotxi = (y1 * x[1]) + (x1 * x[0]);
final double err = Math.abs((xmag - (xdotxi / xmag)));
if(err > maxDist) {
ret.add(awtp ? (Object)(((AwtPoint)p).p) : (Object)p);
iter.remove();
}
}
return ret;
}
static public double perpendicularDistance(final Point x, final PerpendicularLine perpRef) {
return perpendicularDistance(x, perpRef.x(), perpRef.y());
}
static private double perpendicularDistance(final Point x, final double perpRefX, final double perpRefY) {
// We need to find the distance from a point X0 to the perp ref line described by P.
//
// Also define the point Xi on the line where the distance is smallest so the number
// we are looking is | X0 - Xi |. (drawing this out helps)
//
// If we project X0 onto P we can see that this projected vector will be exactly
// | X0 - Xi | longer (or shorter if X0 is on the other side of the line) than
// the length of P itself. The length projection of X0 onto P is:
//
// (P.X0)/|P|
//
// so the distance is:
// abs( |P| - P.X0/|P| )
final double xmagsq = (perpRefX * perpRefX) + (perpRefY * perpRefY);
final double xmag = Math.sqrt(xmagsq);
final double xdotxi = (x.getRow() * perpRefY) + (x.getCol() * perpRefX);
return Math.abs(xmag - (xdotxi / xmag));
}
public static double distance(final Point p1, final Point p2) {
final double r = p1.getRow() - p2.getRow();
final double c = p1.getCol() - p2.getCol();
return Math.sqrt((r * r) + (c * c));
}
public static class AwtPoint implements Point {
private final java.awt.Point p;
AwtPoint(final java.awt.Point p) {
this.p = p;
}
@Override
public double getRow() {
return p.y;
}
@Override
public double getCol() {
return p.x;
}
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/Point.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry;
public interface Point {
public static final double twoPi = Math.PI * 2.0;
public static Point ocv(final org.opencv.core.Point ocvPoint) {
return new Point() {
@Override
public double getRow() {
return ocvPoint.y;
}
@Override
public double getCol() {
return ocvPoint.x;
}
@Override
public String toString() {
return Point.toString(this);
}
};
}
default public org.opencv.core.Point toOcv() {
return new org.opencv.core.Point(x(), y());
}
public static String toString(final Point p) {
return p.getClass().getSimpleName() + "[ x=" + p.x() + ", y=" + p.y() + " ]";
}
public double getRow();
public double getCol();
default public double x() {
return getCol();
}
default public double y() {
return getRow();
}
/**
* This will return a point that's translated such that if the point passed in
* is the same as {@code this} then the result will be the [0, 0].
*
* It basically results in [ this - toOrigin ];
*/
default public Point subtract(final Point toOrigin) {
return new SimplePoint(y() - toOrigin.y(), x() - toOrigin.x());
}
default public Point add(final Point toOrigin) {
return new SimplePoint(y() + toOrigin.y(), x() + toOrigin.x());
}
default public double magnitudeSquared() {
final double y = y();
final double x = x();
return (y * y) + (x * x);
}
default public double magnitude() {
return Math.sqrt(magnitudeSquared());
}
default public double dot(final Point other) {
return (x() * other.x()) + (y() * other.y());
}
default public double distance(final Point other) {
final Point trans = subtract(other);
return trans.magnitude();
}
default public Point multiply(final double scalar) {
return new SimplePoint(getRow() * scalar, getCol() * scalar);
}
default public Point crossWithZ(final boolean flipZ) {
// !flipZ flipZ
// | i j k | | i j k |
// | x y 0 | | x y 0 |
// | 0 0 1 | | 0 0 -1 |
//
// !flipZ = i(y * 1) - j(x * 1) => x=y*1, y=-x*1 => r=-x*1, c=y*1
// flipZ = i(y * -1) - j(x * -1) => x=-y*1, y=x*1 => r=x*1, c=-y*1
return flipZ ? new SimplePoint(x(), -y()) : new SimplePoint(-x(), y());
}
default public byte quantizedDirection() {
final double rawang = Math.atan2(y(), x());
// angle should be between -Pi and Pi. We want it from 0 -> 2Pi
final double ang = rawang < 0.0 ? (2.0 * Math.PI) + rawang : rawang;
final int bytified = (int)Math.round((ang * 256.0) / twoPi);
return(bytified >= 256 ? 0 : (byte)(bytified & 0xff));
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/SimplePoint.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry;
public class SimplePoint implements Point {
private final double r;
private final double c;
// Serialization
@SuppressWarnings("unused")
private SimplePoint() {
r = c = -1.0;
}
public SimplePoint(final double r, final double c) {
this.r = r;
this.c = c;
}
@Override
public double getRow() {
return r;
}
@Override
public double getCol() {
return c;
}
@Override
public String toString() {
return Point.toString(this);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(c);
result = prime * result + (int)(temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(r);
result = prime * result + (int)(temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(final Object obj) {
if(this == obj) return true;
if(obj == null) return false;
if(getClass() != obj.getClass()) return false;
final SimplePoint other = (SimplePoint)obj;
if(Double.doubleToLongBits(c) != Double.doubleToLongBits(other.c)) return false;
if(Double.doubleToLongBits(r) != Double.doubleToLongBits(other.r)) return false;
return true;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/WeightedPoint.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry;
public interface WeightedPoint extends Point {
public double getWeight();
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/WeightedPointComparator.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry;
import java.util.Comparator;
public class WeightedPointComparator implements Comparator<WeightedPoint> {
public WeightedPointComparator() {}
@Override
public int compare(final WeightedPoint o1, final WeightedPoint o2) {
final double diff = (o2.getWeight() - o1.getWeight());
return diff > 0 ? 1 : (diff == 0.0D ? 0 : -1);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/transform/AffineTransform.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry.transform;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.imgproc.Imgproc;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.Utils;
public class AffineTransform implements Transform2D {
private final double tx;
private final double ty;
private final double sa;
private final double sb;
private final double sc;
private final double sd;
public AffineTransform(final ControlPoints cps) {
final Point[] src = new Point[cps.controlPoints.length];
final Point[] dst = new Point[cps.controlPoints.length];
int index = 0;
for(final ControlPoint cp: cps.controlPoints) {
src[index] = cp.originalPoint;
dst[index++] = cp.transformedPoint;
}
final double[][] transform;
try(CvMat cvmat = CvMat.move(Imgproc.getAffineTransform(new MatOfPoint2f(src), new MatOfPoint2f(dst)));) {
transform = Utils.to2dDoubleArray(cvmat);
}
sa = transform[0][0];
sb = transform[0][1];
sc = transform[1][0];
sd = transform[1][1];
tx = transform[0][2];
ty = transform[1][2];
}
@Override
public Point transform(final Point point) {
final double x = point.x;
final double y = point.y;
return new Point(x * sa + y * sb + tx, x * sc + y * sd + ty);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/transform/ControlPoint.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry.transform;
import org.opencv.core.Point;
public class ControlPoint {
public final Point originalPoint;
public final Point transformedPoint;
@SuppressWarnings("unused")
private ControlPoint() {
originalPoint = null;
transformedPoint = null;
}
public ControlPoint(final Point originalPoint, final Point transformedPoint) {
this.originalPoint = originalPoint;
this.transformedPoint = transformedPoint;
}
@Override
public String toString() {
return "ControlPoint [originalPoint=" + originalPoint + ", transformedPoint=" + transformedPoint + "]";
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/transform/ControlPoints.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry.transform;
public class ControlPoints {
public final ControlPoint[] controlPoints;
@SuppressWarnings("unused")
private ControlPoints() {
controlPoints = null;
}
public ControlPoints(final ControlPoint[] controlPoints) {
this.controlPoints = controlPoints;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/transform/GaussianBlur.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry.transform;
import org.opencv.core.Core;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
import ai.kognition.pilecv4j.image.CvMat;
/**
* A <a href="https://docs.opencv.org/3.3.1/d4/d86/group__imgproc__filter.html#gaabe8c836e97159a9193fb0b11ac52cf1">Gaussian blur</a> is a <a
* href="http://northstar-www.dartmouth.edu/doc/idl/html_6.2/Filtering_an_Imagehvr.html">low-pass filter</a>. It essentially makes changes more gradual which
* has the effect of reducing edges. It is often used in image processing to reduce detail or smooth an image through the application of a
* <a href="https://shapeofdata.wordpress.com/2013/07/23/gaussian-kernels/">Gaussian kernel</a>. This essentially weights
* the blurring of the pixels in an image using a <a href="http://mathworld.wolfram.com/GaussianFunction.html">Gaussian function</a>.
* <p>
* This class implements the {@link Imgproc} GaussianBlur method to return a CvMat that has been transformed by a Gaussian blur with the user's specifications.
*/
public class GaussianBlur {
/**
* The list of <a href="https://docs.opencv.org/3.3.1/d2/de8/group__core__array.html#ga209f2f4869e304c82d07739337eae7c5">BorderTypes</a> includes all the
* options for pixel extrapolation, which are ways of predicting pixel values, that are in this case used at image borders. This is necessary because the
* Gaussian kernel uses surrounding pixels to transform a given region.
*/
public enum BorderTypes {
BORDER_CONSTANT(Core.BORDER_CONSTANT), BORDER_REPLICATE(Core.BORDER_REPLICATE), BORDER_REFLECT(Core.BORDER_REFLECT), BORDER_WRAP(
Core.BORDER_WRAP), BORDER_REFLECT_101(Core.BORDER_REFLECT_101), BORDER_TRANSPARENT(
Core.BORDER_TRANSPARENT), BORDER_REFLECT101(Core.BORDER_REFLECT101), BORDER_DEFAULT(Core.BORDER_DEFAULT), BORDER_ISOLATED(Core.BORDER_ISOLATED);
private BorderTypes(final int value) {}
}
private final Size kernelSize;
private final double sigmaX;
private final double sigmaY;
private final int borderType;
/**
* Creates a <a href="https://docs.opencv.org/3.3.1/d4/d86/group__imgproc__filter.html#gaabe8c836e97159a9193fb0b11ac52cf1">GaussianBlur</a> using the given
* specifications to construct a <a href="https://docs.opencv.org/3.3.1/d4/d86/group__imgproc__filter.html#gac05a120c1ae92a6060dd0db190a61afa">Gaussian
* kernel</a>.
*
* @param kernelSize controls the dimensions of the Gaussian kernel. The width and height should be positive and odd
* @param sigmaX half of the distance between the points at half the Gaussian function's maximum value in the x direction
* @param sigmaY half of the distance between the points at half the Gaussian function's maximum value in the y direction
* @param borderType the border extrapolation method to be used, see {@link BorderTypes}
*/
public GaussianBlur(final Size kernelSize, final double sigmaX, final double sigmaY, final int borderType) {
this.kernelSize = kernelSize;
this.sigmaX = sigmaX;
this.sigmaY = sigmaY;
this.borderType = borderType;
}
/**
* Uses the {@link Imgproc} GaussianBlur method to smooth the image using the specifications given in the constructor and returns a CvMat of the transformed
* image, instead of void like the Imgproc method.
*
* @param mat CvMat of image to be blurred
*
* @return a CvMat of the blurred image
*/
public CvMat gaussianBlur(final CvMat mat) {
Imgproc.GaussianBlur(mat, mat, kernelSize, sigmaX, sigmaY, borderType);
return mat.returnMe();
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/transform/ScaleRotateAndTranslate.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry.transform;
import java.util.Arrays;
import org.opencv.core.Point;
import ai.kognition.pilecv4j.image.geometry.SimplePoint;
public class ScaleRotateAndTranslate implements Transform2D {
private final double tx;
private final double ty;
private final double sa;
private final double sb;
private final double sc;
private final double sd;
private static ai.kognition.pilecv4j.image.geometry.Point ocv(final Point p) {
return ai.kognition.pilecv4j.image.geometry.Point.ocv(p);
}
public ScaleRotateAndTranslate(final ControlPoint p1, final ControlPoint p2) {
final ai.kognition.pilecv4j.image.geometry.Point p1Original = ocv(p1.originalPoint);
final ai.kognition.pilecv4j.image.geometry.Point p2Original = ocv(p2.originalPoint);
final ai.kognition.pilecv4j.image.geometry.Point p1Transformed = ocv(p1.transformedPoint);
final ai.kognition.pilecv4j.image.geometry.Point p2Transformed = ocv(p2.transformedPoint);
final ai.kognition.pilecv4j.image.geometry.Point originalAtOrigin = p2Original.subtract(p1Original);
final ai.kognition.pilecv4j.image.geometry.Point transformedAtOrigin = p2Transformed.subtract(p1Transformed);
double angleRad = Math.atan2(transformedAtOrigin.y(), transformedAtOrigin.x()) - Math.atan2(originalAtOrigin.y(), originalAtOrigin.x());
if(angleRad > Math.PI)
angleRad = angleRad - (2.0 * Math.PI);
else if(angleRad < -Math.PI)
angleRad = angleRad + (2.0 * Math.PI);
final double magOriginal = originalAtOrigin.magnitude();
final double magTransformed = transformedAtOrigin.magnitude();
final double scale = magTransformed / magOriginal;
final double cos = Math.cos(angleRad);
final double sin = Math.sin(angleRad);
sa = scale * cos;
sb = scale * (-sin);
sc = scale * sin;
sd = scale * cos;
// apply the scale and rotation to original and then figure out how to translate it to get it to
// the transformed point.
final ai.kognition.pilecv4j.image.geometry.Point scaledAndRotOriginal = new SimplePoint(
/* y= */ sc * p1Original.x() + sd * p1Original.y(),
/* x= */ sa * p1Original.x() + sb * p1Original.y());
tx = p1Transformed.x() - scaledAndRotOriginal.x();
ty = p1Transformed.y() - scaledAndRotOriginal.y();
}
public ScaleRotateAndTranslate(final ControlPoints points) {
this(sanitize(points), points.controlPoints[1]);
}
@Override
public Point transform(final Point point) {
final double x = point.x;
final double y = point.y;
return new Point(x * sa + y * sb + tx, x * sc + y * sd + ty);
}
private static ControlPoint sanitize(final ControlPoints points) {
if(points == null)
throw new NullPointerException("Cannot pass null controlPoints to a " + ScaleRotateAndTranslate.class.getSimpleName());
if(points.controlPoints.length != 2)
throw new IllegalArgumentException(
"Can only instantiate a " + ScaleRotateAndTranslate.class.getSimpleName() + " with exactly 2 control points. You passed "
+ points.controlPoints.length);
if(Arrays.stream(points.controlPoints).filter(p -> p == null).findAny().isPresent())
throw new NullPointerException(
"Cannot pass a ControlPoints instance with any null controlPoints to a " + ScaleRotateAndTranslate.class.getSimpleName());
return points.controlPoints[0];
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/geometry/transform/Transform2D.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.geometry.transform;
import org.opencv.core.Point;
import net.dempsy.util.QuietCloseable;
@FunctionalInterface
public interface Transform2D extends QuietCloseable {
public Point transform(final Point point);
@Override
default public void close() {}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/houghspace/Model.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.houghspace;
/**
* This interface represents a 'generator' of sorts for patterns
* to be searched for in the image.
*/
public interface Model {
/**
* This method needs to be defined to return the distance from the
* pixel position supplied, to the nearest edge of the model. This
* method will be used to generate a mask as well as called for
* error minimization.
*/
default public double distance(final double ox, final double oy, final double theta, final double scale) {
// a rotation matrix to transform point counter clockwise
// around the origin (which is intuitively 'theta' in a
// standard Cartesian world where the first quadrant
// is in the upper-right hand side of the universe)
// is given by:
//
// | cos(theta) -sin(theta) |
// | |
// | sin(theta) cos(theta) |
//
// Since theta means to rotate the entire model by that angle
// (counter clockwise around the center) then, instead, we
// can simply rotate the point around the center of the model
// in the other direction (clockwise) before measuring the
// distance - that is, we will simply negate theta
final double ang = -theta;
double rx, ry;
if(ang != 0.0) {
final double sinang = Math.sin(ang);
final double cosang = Math.cos(ang);
rx = (ox * cosang) - (oy * sinang);
ry = (ox * sinang) + (oy * cosang);
} else {
rx = ox;
ry = oy;
}
return distance(rx, ry, scale);
}
public double distance(final double rx, final double ry, final double scale);
/**
* This method should return the gradient direction expected at the provided
* pixel. If the pixel isn't on an edge it should return the gradient at the
* closest edge. The gradient should be quantized to 8-bits unsigned. In other
* words, from 0 to 255. 0 is in the direction parallel to the positive x-axis.
* 180 degrees should be parallel to the x-axis in the negative direction and
* would be quantized as:
* <code>
* int q = (int) Math.round((180 * 256)/360);
* if (q == 256) q = 0;
* return (byte)(q & 0xff);
* </code>
*/
public byte gradientDirection(double ox, double oy);
/**
* This should return the extent of the model (at a scale of 1.0)
* in pixels.
*/
public double featureWidth();
/**
* This should return the extent of the model (at a scale of 1.0)
* in pixels.
*/
public double featureHeight();
/**
* The model will get edge locations passed to the distance method
* in the coordinate system of the model (that is, translated so the
* center of the coordinate system is the center of the model). Normally
* the y axis will be in terms of the 'row' that the pixel is in, referenced
* from the upper left and counting HIGHER (+ in the Y direction) as
* the position moves DOWN the image. This is reverse of the normal
* mathematical Cartesian space. If the model expect the normal Cartesian
* coordinates then it should return 'true' for the following method.
*/
public boolean flipYAxis();
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/houghspace/SegmentModel.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.houghspace;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.stream.Collectors;
import ai.kognition.pilecv4j.image.geometry.LineSegment;
import ai.kognition.pilecv4j.image.geometry.Point;
import ai.kognition.pilecv4j.image.geometry.SimplePoint;
public class SegmentModel implements Model {
final private LineSegment[] segments;
// final private double num;
// final private double minX;
// final private double minY;
// final private double maxX;
// final private double maxY;
final private double w;
final private double h;
final private double shiftrow;
final private double shiftcol;
public SegmentModel(final Collection<LineSegment> segments) {
if(segments == null || segments.size() == 0)
throw new IllegalArgumentException();
final LineSegment[] array = segments.stream().toArray(LineSegment[]::new);
this.segments = array;
// this.num = this.segments.length;
final List<Point> points = segments.stream()
.map(l -> Arrays.asList(l.p1, l.p2))
.flatMap(ps -> ps.stream())
.collect(Collectors.toList());
double minX = Double.POSITIVE_INFINITY;
double maxX = Double.NEGATIVE_INFINITY;
double minY = Double.POSITIVE_INFINITY;
double maxY = Double.NEGATIVE_INFINITY;
for(final Point p: points) {
final double x = p.x();
if(x < minX)
minX = x;
if(x > maxX)
maxX = x;
final double y = p.y();
if(y < minY)
minY = y;
if(y > maxY)
maxY = y;
}
this.w = maxX - minX;
this.h = maxY - minY;
// move minX to -1/2 w
final double halfw = this.w / 2.0;
this.shiftcol = 0.0 - (minX - halfw);
final double halfh = this.h / 2.0;
this.shiftrow = 0.0 - (minY - halfh);
}
@Override
public double distance(final double ox, final double oy, final double scale) {
final double x = shiftcol + ox;
final double y = shiftrow + oy;
double minDist = Double.POSITIVE_INFINITY;
for(final LineSegment seg: segments) {
final double dist = seg.distance(new SimplePoint(y, x));
if(dist < minDist)
minDist = dist;
}
return minDist;
}
@Override
public byte gradientDirection(final double ox, final double oy) {
final double x = shiftcol + ox;
final double y = shiftrow + oy;
return closest(x, y, 1.0).gradientDirection;
}
@Override
public double featureWidth() {
return w;
}
@Override
public double featureHeight() {
return h;
}
@Override
public boolean flipYAxis() {
return false;
}
private LineSegment closest(final double ox, final double oy, final double scale) {
double minDist = Double.POSITIVE_INFINITY;
LineSegment nearest = null;
for(final LineSegment seg: segments) {
final double dist = seg.distance(new SimplePoint(oy, ox));
if(dist < minDist) {
minDist = dist;
nearest = seg;
}
}
return nearest;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/houghspace/Transform.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.houghspace;
import java.awt.Color;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.CvRaster.FlatBytePixelSetter;
import ai.kognition.pilecv4j.image.ImageAPI;
import ai.kognition.pilecv4j.image.Utils;
import ai.kognition.pilecv4j.image.geometry.Point;
import ai.kognition.pilecv4j.image.geometry.WeightedPoint;
import ai.kognition.pilecv4j.image.houghspace.internal.GradientDirectionMask;
import ai.kognition.pilecv4j.image.houghspace.internal.Mask;
import ai.kognition.pilecv4j.nr.Minimizer;
import ai.kognition.pilecv4j.nr.MinimizerException;
public class Transform {
static {
CvMat.initOpenCv();
}
public final double quantFactor;
public final Mask mask;
public final GradientDirectionMask gradDirMask;
public final double gradientDirSlopDeg;
public final Model model;
public Transform(final Model model, final double quantFactor, final double scale, final double gradientDirSlopDeg) {
this.quantFactor = quantFactor;
this.mask = Mask.generateMask(model, quantFactor, scale);
this.gradDirMask = GradientDirectionMask.generateGradientMask(model, model.featureWidth(), model.featureHeight(), quantFactor);
this.gradientDirSlopDeg = gradientDirSlopDeg;
this.model = model;
}
/**
* This method assumes raster is an edge detected image. If gradient raster is supplied then it will be used to
* greatly improve the results.
*/
public HoughSpace transform(final CvMat raster, final CvMat gradientRaster, final int houghThreshold) {
final int height = raster.rows();
final int width = raster.cols();
return transform(raster, gradientRaster, houghThreshold, 0, height - 1, 0, width - 1);
}
public HoughSpace transform(final CvMat mat, final CvMat gradient, final int houghThreshold,
final int rowstartp, final int rowendp, final int colstartp, final int colendp) {
final int height = mat.rows();
final int width = mat.cols();
{
final long gradientDirImage = gradient.getNativeAddressOfData();
// the size of the hough space should be quantFactor smaller
final int htheight = (int)((height) / quantFactor) + 1;
final int htwidth = (int)((width) / quantFactor) + 1;
final short[] ret = new short[htheight * htwidth];
final HoughSpaceEntryManager hsem = new HoughSpaceEntryManager(quantFactor);
final ImageAPI.AddHoughSpaceEntryContributorFunc cb = (final int orow, final int ocol, final int hsr, final int hsc,
final int hscount) -> {
try {
hsem.addHoughSpaceEntryContributor(orow, ocol, hsr, hsc, hscount);
} catch(final RuntimeException rte) {
rte.printStackTrace(System.err);
return false;
}
return true;
};
final int rowstart = (rowstartp < 0) ? 0 : rowstartp;
final int rowend = (rowendp >= height) ? height - 1 : rowendp;
final int colstart = (colstartp < 0) ? 0 : colstartp;
final int colend = (colendp >= width) ? width - 1 : colendp;
ImageAPI.pilecv4j_image_Transform_houghTransformNative(mat.getNativeAddressOfData(), width, height, gradientDirImage,
mask.mask, mask.mwidth, mask.mheight, mask.maskcr, mask.maskcc,
gradDirMask.mask, gradDirMask.mwidth, gradDirMask.mheight, gradDirMask.maskcr, gradDirMask.maskcc,
gradientDirSlopDeg, quantFactor, ret, htwidth, htheight, cb, houghThreshold,
rowstart, rowend, colstart, colend, Mask.EDGE);
hsem.entryMap.clear(); // help the gc
return new HoughSpace(ret, htwidth, htheight, quantFactor, hsem.entries);
}
}
public List<Cluster> cluster(final List<HoughSpaceEntry> houghEntries, final double percentModelCoverage) {
final List<Cluster> ret = new ArrayList<Cluster>();
final double minDist = ((mask.mwidth > mask.mheight ? mask.mheight : mask.mwidth) + 1) * percentModelCoverage;
// this is going to do rather simplistic clustering.
for(final HoughSpaceEntry cur: houghEntries) {
if(ret.size() == 0)
ret.add(new Cluster(cur));
else // see if the cur belongs within a current cluster
{
boolean done = false;
for(int i = 0; i < ret.size() && !done; i++) {
final Cluster c = ret.get(i);
if(c.distance(cur) <= minDist) {
c.add(cur);
done = true;
}
}
if(!done)
ret.add(new Cluster(cur));
}
}
return ret;
}
public List<Fit> bestFit(final List<Cluster> clusters, final CvMat ti, final byte overlayPixelValueRemovedEdge,
final byte overlayPixelValueEdge) {
return bestFit(clusters, ti, overlayPixelValueRemovedEdge, overlayPixelValueEdge, null);
}
public List<Fit> bestFit(final List<Cluster> clusters, final CvMat ti, final byte overlayPixelValueRemovedEdge,
final byte overlayPixelValueEdge, final List<java.awt.Point> savedPruned) {
return clusters.stream()
.map(c -> bestFit(c, ti, overlayPixelValueRemovedEdge, overlayPixelValueEdge, savedPruned))
.collect(Collectors.toList());
}
/**
* This method will take a Cluster and use it to minimize the sum of square error
* with the error against the model that would fit the actual edge pixels. This is
* what finds the actual feature from the cluster. The passed image and overlay
* values are for bookkeeping only. A null ti means ignore book keeping.
*/
public Fit bestFit(final Cluster cluster, final CvMat ti, final byte overlayPixelValueRemovedEdge, final byte overlayPixelValueEdge)
throws MinimizerException {
return bestFit(cluster, ti, overlayPixelValueRemovedEdge, overlayPixelValueEdge, null);
}
/**
* This method will take a Cluster and use it to minimize the sum of square error with
* the error against the model that would fit the actual edge pixels. This is what
* finds the actual feature from the cluster. The passed image and overlay values
* are for bookkeeping only. A null ti means ignore book keeping.
*/
public Fit bestFit(final Cluster cluster, final CvMat ti, final byte overlayPixelValueRemovedEdge, final byte overlayPixelValueEdge,
final List<java.awt.Point> savedPruned)
throws MinimizerException {
// need to go through the raster around the cluster using the highest
// count cluster value
// find the original pixels that contributed to this
// value.
// there is a sprocket centered at e.r, e.c so we
// need to see which pixels contribute to it
final List<java.awt.Point> edgeVals = new ArrayList<java.awt.Point>();
edgeVals.addAll(cluster.getContributingEdges());
// now edgevals contains the list of all of the edge values that contributed to
// this cluster.
double[] result = null;
boolean pruning = true;
final List<java.awt.Point> pruned = new ArrayList<java.awt.Point>();
double stdDev = -1.0;
for(boolean done = false; !done;) {
pruned.clear();
final FitSumSquaresDist func = new FitSumSquaresDist(edgeVals, model);
final Minimizer m = new Minimizer(func);
final double[] params = new double[4];
params[0] = cluster.imageCol();
params[1] = cluster.imageRow();
params[2] = 0.0;
params[3] = 1.0;
/* double sumSqErr = */ m.minimize(params);
result = m.getFinalPostion();
stdDev = func.stdDev;
if(pruning) {
pruning = func.prune(func.stdDev * 3.0, result, pruned);
// This will remove one pixel at a time until the std dev
// is below some value. It's too slow.
// if (!pruning && func.stdDev > 1.0)
// {
// pruning = true;
// func.pruneFurthest(pruned);
// }
}
// if we want to write a debug image, then do it.
final byte[] overlayRemovedEdgePixel = new byte[] {overlayPixelValueRemovedEdge};
if(ti != null) {
if(pruned.size() > 0) {
for(final java.awt.Point p: pruned)
ti.put(p.y, p.x, overlayPixelValueRemovedEdge);
}
}
if(savedPruned != null)
savedPruned.addAll(pruned);
if(!pruning) // if we are not pruning the exit
done = true;
}
if(ti != null) {
final byte[] overlayPixelEdge = new byte[] {overlayPixelValueEdge};
for(final java.awt.Point p: edgeVals)
ti.put(p.y, p.x, overlayPixelEdge);
}
return new Fit(result[1], result[0], result[3], result[2], cluster, stdDev, edgeVals);
}
public static void drawClusters(final List<Cluster> clusters, final Mat ti, final byte color) {
final Color colorC = new Color(color, color, color);
for(final Cluster c: clusters)
Utils.drawCircle(c.imageRow(), c.imageCol(), ti, colorC);
}
public static void drawFits(final List<Transform.Fit> fits, final Mat ti, final byte color) {
final Color colorC = new Color(color, color, color);
for(final Fit c: fits)
Utils.drawCircle((int)Math.round(c.cr), (int)Math.round(c.cc), ti, colorC);
}
public static class HoughSpaceEntryManager {
private final double quantFactor;
public Map<java.awt.Point, HoughSpaceEntry> entryMap = new HashMap<java.awt.Point, HoughSpaceEntry>();
public List<HoughSpaceEntry> entries = new ArrayList<HoughSpaceEntry>();
HoughSpaceEntryManager(final double quantFactor) {
this.quantFactor = quantFactor;
}
public void addHoughSpaceEntryContributor(final int imrow, final int imcol, final int hsr, final int hsc, final int count) {
// find the entry from the hough space position
final java.awt.Point hsrc = new java.awt.Point(hsc, hsr);
HoughSpaceEntry e = entryMap.get(hsrc);
if(e == null) {
e = new HoughSpaceEntry(hsr, hsc, count, quantFactor);
entryMap.put(hsrc, e);
entries.add(e);
}
e.addContribution(imrow, imcol);
}
}
public static class HoughSpaceEntry {
public int r;
public int c;
public int count;
public int ir;
public int ic;
public double quantFactor;
public List<java.awt.Point> contributingImagePoints = new ArrayList<java.awt.Point>();
public HoughSpaceEntry(final int r, final int c, final int count, final double quantFactor) {
this.r = r;
this.c = c;
this.quantFactor = quantFactor;
this.count = count;
this.ir = (int)((this.r + 1) * this.quantFactor);
this.ic = (int)((this.c + 1) * this.quantFactor);
}
public void addContribution(final int imr, final int imc) {
contributingImagePoints.add(new java.awt.Point(imc, imr));
}
@Override
public boolean equals(final Object o) {
final HoughSpaceEntry e = (HoughSpaceEntry)o;
return(e.r == r && e.c == c && e.count == count);
}
@Override
public int hashCode() {
return Integer.hashCode(r) +
Integer.hashCode(c) +
Integer.hashCode(count);
}
@Override
public String toString() {
return "(" + r + "," + c + "," + count + ")->" + contributingImagePoints;
}
public static class HSEComparator implements Comparator<HoughSpaceEntry> {
@Override
public int compare(final HoughSpaceEntry o1, final HoughSpaceEntry o2) {
// reverse order
return o2.count - o1.count;
}
}
}
public static class HoughSpace {
public HoughSpace(final short[] houghSpace, final int width, final int height,
final double quantFactor, final List<HoughSpaceEntry> backMapEntries) {
this.houghSpace = houghSpace;
this.hswidth = width;
this.hsheight = height;
this.quantFactor = quantFactor;
this.backMapEntries = backMapEntries;
}
public short[] houghSpace;
public int hswidth;
public int hsheight;
public double quantFactor;
public List<HoughSpaceEntry> backMapEntries;
/**
* @return A CvMat of the Hough space for the model. The caller owns the CvMat
*/
public CvMat createTransformCvMat() {
final int width = hswidth;
final int height = hsheight;
try(final CvMat gradRaster = new CvMat(height, width, CvType.CV_8UC1);) {
int max = 0;
for(int i = 0; i < houghSpace.length; i++) {
final int count = houghSpace[i];
if(max < count)
max = count;
}
final byte[] pixel = new byte[1];
final double finalMax = max;
gradRaster.rasterAp(raster -> raster.apply((FlatBytePixelSetter)pos -> {
int intVal = (int)(((houghSpace[pos]) / finalMax) * 255.0);
if(intVal < 0)
intVal = 0;
else if(intVal > 255)
intVal = 255;
pixel[0] = (byte)intVal;
return pixel;
}));
return gradRaster.returnMe();
}
}
public List<HoughSpaceEntry> getSortedEntries() {
final List<HoughSpaceEntry> sortedSet = new LinkedList<HoughSpaceEntry>();
sortedSet.addAll(backMapEntries);
Collections.sort(sortedSet, new HoughSpaceEntry.HSEComparator());
return sortedSet;
}
/**
* This method does not do much any more. Now it simply writes the inverse transform (that is,
* the edge pixels identified by the transform) back into the image for debugging purposes.
*/
public List<HoughSpaceEntry> inverseTransform(final CvMat ti, final byte overlayPixelValue,
final byte peakCircleColorValue) {
final List<HoughSpaceEntry> sortedSet = getSortedEntries();
final Color peakCircleColor = new Color(peakCircleColorValue, peakCircleColorValue, peakCircleColorValue);
if(ti != null) {
System.out.println("Constructing reverse hough transform image.");
final byte[] overlayPixel = new byte[] {overlayPixelValue};
for(final HoughSpaceEntry e: sortedSet) {
final int eir = e.ir;
final int eic = e.ic;
Utils.drawCircle(eir, eic, ti, peakCircleColor);
ti.rasterAp(raster -> {
for(final java.awt.Point p: e.contributingImagePoints)
raster.set(p.y, p.x, overlayPixel);
});
}
}
return sortedSet;
}
}
public static class Cluster implements WeightedPoint {
private double ccr;
private double ccc;
private final List<HoughSpaceEntry> choughEntries;
private boolean cisSorted = false;
private double cquantFactor;
// private int totalcount = -1;
private List<java.awt.Point> edgeVals = null;
public Cluster() {
choughEntries = new ArrayList<HoughSpaceEntry>();
}
public Cluster(final HoughSpaceEntry e) {
choughEntries = new ArrayList<HoughSpaceEntry>();
add(e);
}
public int totalCount() {
return getContributingEdges().size();
}
public int imageRow() {
return (int)((ccr + 1.0) * cquantFactor);
}
public int imageCol() {
return (int)((ccc + 1.0) * cquantFactor);
}
public double row() {
return ccr;
}
public double col() {
return ccc;
}
public void add(final HoughSpaceEntry e) {
cisSorted = false;
if(choughEntries.size() == 0) {
ccr = (e.r);
ccc = (e.c);
choughEntries.add(e);
cquantFactor = e.quantFactor;
} else {
final double n = (choughEntries.size());
// find the centroid by averaging ...
// if ccr,ccc is already an average
// of the current houghEntries
// then we can do an incremental
// average.
ccr = ((ccr * n) + (e.r)) / (n + 1.0);
ccc = ((ccc * n) + (e.c)) / (n + 1.0);
choughEntries.add(e);
}
}
public double distance(final HoughSpaceEntry e) {
final double dr = ccr - (e.r);
final double dc = ccc - (e.c);
return Math.sqrt((dr * dr) + (dc * dc));
}
@Override
public String toString() {
return "(" + imageRow() + "," + imageCol() + ")";
}
public int getMaxCount() {
sortCheck();
return choughEntries.get(0).count;
}
public List<HoughSpaceEntry> getHoughEntries() {
sortCheck();
return choughEntries;
}
public synchronized List<java.awt.Point> getContributingEdges() {
if(edgeVals == null) {
edgeVals = new ArrayList<java.awt.Point>();
final List<HoughSpaceEntry> houghEntries = getHoughEntries();
// we want to accumulate all of the edge vals that went
// into this cluster
for(int hei = 0; hei < houghEntries.size(); hei++) {
final HoughSpaceEntry e = houghEntries.get(hei);
for(final java.awt.Point p: e.contributingImagePoints) {
if(!edgeVals.contains(p))
edgeVals.add(new java.awt.Point(p.x, p.y));
}
}
}
return Collections.unmodifiableList(edgeVals);
}
private void sortCheck() {
if(!cisSorted) {
Collections.sort(choughEntries, new HoughSpaceEntry.HSEComparator());
cisSorted = true;
}
}
// Point interface
@Override
public double getRow() {
return imageRow();
}
@Override
public double getCol() {
return imageCol();
}
@Override
public double getWeight() {
return totalCount();
}
}
public static class FitSumSquaresDist implements Minimizer.Func {
private final List<java.awt.Point> edgeVals;
private final Model sm;
public java.awt.Point furthest;
public double maxdist;
public double stdDev;
private final boolean flipYAxis;
public FitSumSquaresDist(final List<java.awt.Point> edgeVals, final Model sm) {
this.edgeVals = edgeVals;
this.sm = sm;
this.flipYAxis = sm.flipYAxis();
}
public boolean prune(final double maxDist, final double[] x, final List<java.awt.Point> pruned) {
boolean ret = false;
final double cx = x[0];
final double cy = x[1];
for(int i = edgeVals.size() - 1; i >= 0; i--) {
final java.awt.Point p = edgeVals.get(i);
final double vx = p.x - cx;
final double vy = p.y - cy;
final double dist = sm.distance(vx, vy, x[2], x[3]);
if(dist >= maxDist) {
pruned.add(edgeVals.remove(i));
ret = true;
}
}
return ret;
}
public void pruneFurthest(final List<java.awt.Point> pruned) {
if(furthest != null) {
boolean done = false;
for(int i = 0; i < edgeVals.size() && !done; i++) {
if(furthest == edgeVals.get(i)) {
edgeVals.remove(i);
pruned.add(furthest);
System.out.print(".");
done = true;
}
}
}
}
@Override
public double func(final double[] x) {
final double cx = x[0];
final double cy = x[1];
maxdist = -1.0;
double ret = 0.0;
for(int i = 0; i < edgeVals.size(); i++) {
final java.awt.Point p = edgeVals.get(i);
// now, if the sprocket is centered at cx,cy -
// we need to translate the point p into the sprocket
// coords
final double vx = p.x - cx;
double vy = p.y - cy;
if(flipYAxis)
vy = -vy;
final double dist = sm.distance(vx, vy, x[2], x[3]);
if(maxdist < dist) {
maxdist = dist;
furthest = p;
}
ret += (dist * dist);
}
stdDev = Math.sqrt(ret / edgeVals.size());
return ret;
}
}
public static class Fit implements Point {
public final double cr; // center of sprocket instance row
public final double cc; // center of sprocket instance col
public final double rotation; // orientation of the sprocket instance
public final double scale; // scale of the sprocket
public final Cluster sourceCluster;
public final double stdDev;
public final List<java.awt.Point> edgeVals;
// public int rank;
public Fit(final double cr, final double cc, final double scale, final double rotation,
final Cluster sourceCluster, final double stdDev, final List<java.awt.Point> edgeVals) {
this.cr = cr;
this.cc = cc;
this.rotation = rotation;
this.scale = scale;
this.sourceCluster = sourceCluster;
this.stdDev = stdDev;
this.edgeVals = edgeVals;
}
@Override
public String toString() {
return "[(rc)=(" + cr + "," + cc + ") * " + scale + " ang(deg)=" + (rotation * (180.0 / Math.PI)) + "] sd=" +
stdDev + " " + edgeVals.size();
}
@Override
public double getRow() {
return cr;
}
@Override
public double getCol() {
return cc;
}
public int imageRow() {
return (int)(cr + 0.5);
}
public int imageCol() {
return (int)(cc + 0.5);
}
// @Override
// public double getWeight() {
// return rank;
// }
public static final Comparator<Transform.Fit> stdDeviationOrder = (o1, o2) -> o1.stdDev > o2.stdDev ? 1 : ((o1.stdDev == o2.stdDev) ? 0 : -1);
public static final Comparator<Transform.Fit> edgeCountOrder = (o1, o2) -> o2.edgeVals.size() - o1.edgeVals.size();
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/houghspace
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/houghspace/internal/GradientDirectionMask.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.houghspace.internal;
import org.opencv.core.CvType;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.CvRaster.BytePixelSetter;
import ai.kognition.pilecv4j.image.houghspace.Model;
/**
* A mask underpinned by an array of shorts that's used to hold a raster of gradient direction indications.
*/
public class GradientDirectionMask {
public int mwidth;
public int mheight;
public int maskcr;
public int maskcc;
public byte[] mask;
/**
* Instantiate a mask of the given dimensions assuming that the reference point is the center of the mask.
*/
public GradientDirectionMask(int mwidth, int mheight) {
// mwidth and mheight need to be odd
// so that the center falls exactly
// on a pixel.
mwidth += (((mwidth & 0x01) == 0) ? 1 : 0);
mheight += (((mheight & 0x01) == 0) ? 1 : 0);
this.mwidth = mwidth;
this.mheight = mheight;
this.mask = new byte[mwidth * mheight];
this.maskcr = (this.mheight + 1) / 2 - 1;
this.maskcc = (this.mwidth + 1) / 2 - 1;
}
/**
* Generate a byte image that contains a view of the mask.
*
* @return A CvMat with the byte image of the mask. The caller owns the CvMat.
*/
public CvMat getMaskRaster() {
try(final CvMat raster = new CvMat(mheight, mwidth, CvType.CV_8UC1);) {
final byte[] pixel = new byte[1];
raster.rasterAp(r -> r.apply((BytePixelSetter)(row, col) -> {
final short gradDeg = get(row, col);
int gradByte = (int)Math.round((gradDeg * 256.0) / 360.0);
if(gradByte >= 256)
gradByte = 0;
pixel[0] = (byte)(gradByte & 0xff);
return pixel;
}));
return raster.returnMe();
}
}
public static GradientDirectionMask generateGradientMask(final Model m, final double w, final double h, final double quantFactor) {
final GradientDirectionMask gradDirMask = new GradientDirectionMask((int)((w / quantFactor) + 1.5), (int)((h / quantFactor) + 1.5));
// now set the mask by sweeping the center
final double x0 = gradDirMask.maskcc; // x0,y0 is the
final double y0 = gradDirMask.maskcr; // origin of
// the mask
for(int r = 0; r < gradDirMask.mheight; r++) {
for(int c = 0; c < gradDirMask.mwidth; c++) {
// is the point r,c a possible
// center if the center of the
// mask is the point in question.
// to figure this out, translate
// r,c to the center.
// but first, find out what r,c is
// in the coordinate system of the
// mask with the origin centerd.
final double y1 = gradDirMask.mheight - r - 1 - y0;
final double x1 = (c) - x0;
// now, if x1,y1 is the center
// of the sprocket hole, will
// the origin be on the sprocket?
// That means we need to check
// -x1,-y1 since that is where
// the origin will be pushed to
// upon translating x1,y1 to the
// origin.
gradDirMask.set(r, c, m.gradientDirection(-x1 * quantFactor, -y1 * quantFactor));
}
}
return gradDirMask;
}
/**
* Set the value of the mask at a location to the given value. The value should be either EDGE or NOEDGE. Entries in
* the mask are accessed by row and column (not x,y).
*/
private void set(final int r, final int c, final byte v) {
mask[(r * mwidth) + c] = v;
}
/**
* Get the value of the mask at a location The return value should be either EDGE or NOEDGE. Entries in the mask are
* accessed by row and column (not x,y).
*/
private short get(final int r, final int c) {
return mask[(r * mwidth) + c];
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/houghspace
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/houghspace/internal/Mask.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.houghspace.internal;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import ai.kognition.pilecv4j.image.houghspace.Model;
/**
* <p>
* A mask underpinned by an array of bytes, each containing an indication as to whether or not
* that position is the center of the model, if the center of the mask is on an EDGE in the
* original image.
* </p>
*
* <p>
* What does that mean? If you take this mask and place it centered at an edge in the original
* image, then everywhere that this mask reads NON-zero is potentially a "center" of the model
* in the original image.
* </p>
*/
public class Mask {
public static byte EDGE = (byte)-1;
public static byte NOEDGE = (byte)0;
public final int mwidth;
public final int mheight;
/**
* Mask center, row
*/
public final int maskcr;
/**
* Mask center, column
*/
public final int maskcc;
/**
* monochrome image of the mask
*/
public final byte[] mask;
/**
* Instantiate a mask of the given dimensions assuming
* that the reference point is the center of the mask.
*/
private Mask(int mwidth, int mheight) {
// mwidth and mheight need to be odd
// so that the center falls exactly
// on a pixel.
mwidth += (((mwidth & 0x01) == 0) ? 1 : 0);
mheight += (((mheight & 0x01) == 0) ? 1 : 0);
this.mwidth = mwidth;
this.mheight = mheight;
this.mask = new byte[mwidth * mheight];
this.maskcr = (this.mheight + 1) / 2 - 1;
this.maskcc = (this.mwidth + 1) / 2 - 1;
}
/**
* Generate an OpenCV Mat image that contains a view of the mask.
*/
public Mat getMaskImage() {
final Mat m = new Mat(mheight, mwidth, CvType.CV_8UC1);
m.put(0, 0, mask);
return m;
}
/**
* Set the value of the mask at a location to
* the given value. The value should be either
* EDGE or NOEDGE. Entries in the mask are
* accessed by row and column (not x,y).
*/
private void set(final int r, final int c, final byte v) {
mask[(r * mwidth) + c] = v;
}
public static Mask generateMask(final Model m, final double quantFactor, final double scaleModel) {
final double w = m.featureWidth() * scaleModel;
final double h = m.featureHeight() * scaleModel;
// mask is 1 pixel wider than w and higher than h
// round(w/quant + 1) = (int)((w/quant) + 1.5)
final Mask mask = new Mask((int)((w / quantFactor) + 1.5), (int)((h / quantFactor) + 1.5));
// now set the mask by sweeping the center
final double x0 = mask.maskcc; // x0,y0 is the
final double y0 = mask.maskcr; // origin of
// the mask
for(int r = 0; r < mask.mheight; r++) {
for(int c = 0; c < mask.mwidth; c++) {
// is the point r,c a possible model
// center if an edge appears at the
// center of the mask?
// to figure this out, translate
// r,c to the center.
// but first, find out what r,c is
// in the coordinate system of the
// mask with the origin centered.
final double y1 = mask.mheight - r - 1 - y0;
final double x1 = (c) - x0;
// now, if x1,y1 is the center
// of the sprocket hole, will
// the origin be on the sprocket?
// That means we need to check
// -x1,-y1 since that is where
// the origin will be pushed to
// upon translating x1,y1 to the
// origin.
final double dist = m.distance(-(x1 * quantFactor), -(y1 * quantFactor), 0.0, 1.0);
// if we are within a 1/2 pixel of the
// theoretical sprocket then we're on it.
if(dist <= quantFactor / 2.0)
mask.set(r, c, EDGE);
else
mask.set(r, c, NOEDGE);
}
}
return mask;
}
public static void setEdgePixVals(final byte edgePixVal, final byte noedgePixVal) {
EDGE = edgePixVal;
NOEDGE = noedgePixVal;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image
|
java-sources/ai/kognition/pilecv4j/lib-image/1.0/ai/kognition/pilecv4j/image/mjpeg/MJPEGWriter.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.image.mjpeg;
import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.opencv.core.Mat;
import org.opencv.imgcodecs.Imgcodecs;
import net.dempsy.util.CommandLineParser;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.ImageAPI;
public class MJPEGWriter {
static {
CvMat.initOpenCv();
}
static public File pdir = null;
static public String avifile = "out.avi";
public static int avifps = 16;
public static void main(final String[] args) {
if(!commandLine(args))
System.exit(-1);
// assume args are file names
initializeMJPEG(avifile);
boolean working = true;
final File[] files = pdir.listFiles(
f -> {
final String fp = f.getAbsolutePath();
return f.isFile() && (fp.endsWith(".jpeg") || fp.endsWith(".JPEG") ||
fp.endsWith("jpg") || fp.endsWith("JPG"));
});
final List<File> fileList = Arrays.asList(files);
Collections.sort(fileList, (o1, o2) -> o1.getName().compareTo(o2.getName()));
for(final File f: fileList)
working = appendFile(f.getAbsolutePath());
if(working)
close(avifps);
else
System.out.println("Failed to create AVI - Who knows why!");
cleanUp();
}
public static boolean initializeMJPEG(final String filename) {
return ImageAPI.pilecv4j_image_mjpeg_initializeMJPEG(filename) == 0 ? false : true;
}
public static boolean doappendFile(final String filename, final int width, final int height) {
return ImageAPI.pilecv4j_image_mjpeg_doappendFile(filename, width, height) == 0 ? false : true;
}
public static boolean close(final int fps) {
return ImageAPI.pilecv4j_image_mjpeg_close(fps) == 0 ? false : true;
}
public static void cleanUp() {
ImageAPI.pilecv4j_image_mjpeg_cleanUp();
}
private static void usage() {
System.out.println("usage: java [javaargs] " + MJPEGWriter.class.getName() + " -pdir parentDir [-avifile out.avi] [-avifps 16]");
}
public static boolean commandLine(final String[] args) {
final CommandLineParser cl = new CommandLineParser(args);
// see if we are asking for help
if(cl.getProperty("help") != null ||
cl.getProperty("-help") != null) {
usage();
return false;
}
final String parentDir = cl.getProperty("pdir");
if(parentDir == null) {
usage();
return false;
}
pdir = new File(parentDir);
if(!pdir.isDirectory()) {
System.out.println("\"" + parentDir + "\" is not a directory.");
usage();
return false;
}
String tmps = cl.getProperty("avifile");
if(tmps != null)
avifile = tmps;
tmps = cl.getProperty("avifps");
if(tmps != null)
avifps = Integer.parseInt(tmps);
return true;
}
static private int width = -1;
static private int height = -1;
static public boolean appendFile(final String filename) {
if(height == -1) {
final Mat origImage = Imgcodecs.imread(filename);
width = origImage.cols();
height = origImage.rows();
}
return doappendFile(filename, width, height);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ipc/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ipc/1.0/ai/kognition/pilecv4j/ipc/ErrorHandling.java
|
package ai.kognition.pilecv4j.ipc;
import com.sun.jna.Pointer;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.ipc.internal.IpcApi;
public class ErrorHandling {
public static final long OK = IpcApi.pcv4j_ipc_errHandling_getOK();
public static final long EAGAIN = IpcApi.pcv4j_ipc_errHandling_getEAGAIN();
public static long throwIfNecessary(final long code, final boolean throwOnEAGAIN) {
if(code != OK && (throwOnEAGAIN || code != EAGAIN)) {
final Pointer errMsg = IpcApi.pcv4j_ipc_errHandling_errString(code);
try(QuietCloseable qc = () -> IpcApi.pcv4j_ipc_errHandling_freeErrString(errMsg);) {
if(Pointer.nativeValue(errMsg) == 0)
throw new IpcException("Bad Error Code", code);
else
throw new IpcException(errMsg.getString(0L), code);
}
}
return code;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ipc/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ipc/1.0/ai/kognition/pilecv4j/ipc/IpcException.java
|
package ai.kognition.pilecv4j.ipc;
public class IpcException extends RuntimeException {
private static final long serialVersionUID = 1L;
/**
* This will hold the value of the underlying native error code if
* this exception was generated from a native library return code.
* Otherwise it will be set to {@link #NOT_NATIVE_ERROR_CODE}
*/
public final long nativeErrCode;
/**
* A {@link #nativeErrCode} set to this means the error wasn't generated
* from the native library.
*/
public static final long NOT_NATIVE_ERROR_CODE = -1;
IpcException(final String errStr, final long errCode) {
super(String.format("Error(0x%016x): %s", errCode, errStr));
this.nativeErrCode = errCode;
}
IpcException(final String msg) {
super(msg);
this.nativeErrCode = NOT_NATIVE_ERROR_CODE;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ipc/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-ipc/1.0/ai/kognition/pilecv4j/ipc/ShmQueue.java
|
package ai.kognition.pilecv4j.ipc;
import static ai.kognition.pilecv4j.ipc.ErrorHandling.EAGAIN;
import static ai.kognition.pilecv4j.ipc.ErrorHandling.throwIfNecessary;
import static net.dempsy.util.Functional.uncheck;
import java.nio.BufferOverflowException;
import java.nio.ByteBuffer;
import java.security.MessageDigest;
import java.util.function.Consumer;
import com.sun.jna.Pointer;
import com.sun.jna.ptr.IntByReference;
import com.sun.jna.ptr.LongByReference;
import com.sun.jna.ptr.PointerByReference;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.ImageAPI;
import ai.kognition.pilecv4j.ipc.internal.IpcApi;
/**
* <p>
* The class can be used to do IPC through shared memory.
* </p>
*
* <p>
* It's specifically optimized for a single writer and a single reader
* and so shouldn't be used anything other than one-to-one configuration though
* it can run <em>duplex</em> for request-response cases.
* </p>
*
* <p>
* There are multiple underlying implementations. Some of these implementations
* identify a unique shared memory segment using a string (posix, windows), and some
* using an integer (system V). This is why the constructor takes both a {@code String},
* as well as an {@code int}. The underlying implementation may use one or the other
* but not typically both. Therefore, when using a ShmQueue, you should make sure
* that either both the {@code String} and the {@code int} uniquely identify the
* shared memory segment you want OR choose the native implementation that uses
* the one you care about.
* </p>
*
* <p>
* <b>NOTE:</b> This functionality is built on POSIX shared memory so if
* that's not available on your platform then it wont compile there.
* </p>
*
* <p>
* <b>NOTE:</b>This class is NOT THREAD SAFE. If you want to use this across threads then
* instantiate another one on the same shared memory segment or manage your own
* access.
* </p>
*
* <p>
* <b>NOTE:</b>This class may be (and likely is) compiled with locking disabled
* and so unless you know better, you should not rely on the anything that locks
* to prevent access from the sibling process sharing data.
* </p>
*/
public class ShmQueue implements QuietCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(ShmQueue.class);
public final long nativeRef;
public final String name;
/**
* When passed as a timeout for acquiring a lock it means don't wait at all
* and return immediately if the lock can't be aquired.
*/
public static final long TRY_LOCK = 0;
/**
* When passed in as a timeout for acquiring a lock, it means wait forever.
*/
public static final long INFINITE = -1;
private long size = -1; // until it's open this is unset
private boolean isClosed = false;
// Oddly, because we spin on these, many get instantiated stressing the memory/gc. So we're going to reuse the
// same one over and over. This class is not thread safe.
private final IntByReference intResult = new IntByReference();
private final LongByReference longResult = new LongByReference();
private final PointerByReference ptrResult = new PointerByReference();
private ByteBuffer reusedBb;
private final int[] rowCol = new int[2];
/**
* <p>
* Construct a ShmQueue uniquely identified by BOTH the {@code name} AND the {@code key}.
* </p>
*
* <p>
* As described on the class, there are multiple underlying implementations. Some of these
* implementations identify a unique shared memory segment using a string (posix, windows),
* and some using an integer (system V). This is why the constructor takes both. The underlying
* implementation may use one or the other but not typically both. Therefore, when using a
* ShmQueue, you should make sure that either both the {@code String} and the {@code int}
* uniquely identify the shared memory segment you want OR choose the native implementation
* that uses the one you care about.
* </p>
*
*/
public ShmQueue(final String name, final int key) {
this.name = name;
nativeRef = IpcApi.pilecv4j_ipc_create_shmQueue(name, key);
}
/**
* Generate the {@code int} nameRep by using the MD5 hash of the name.
*/
public static ShmQueue createUsingMd5Hash(final String name) {
final MessageDigest md = uncheck(() -> MessageDigest.getInstance("MD5"));
md.update(name.getBytes());
final int key = ByteBuffer.wrap(md.digest()).getInt();
return new ShmQueue(name, key);
}
/**
* Cleanup the native resources associated with this ShmQueue. If this is the owner
* of the queue then the shared memory segment will also be closed.
*/
@Override
public void close() {
if(isClosed)
throw new IllegalStateException("Double close on " + this);
IpcApi.pilecv4j_ipc_destroy_shmQueue(nativeRef);
isClosed = true;
}
/**
* Create the underlying shared memory space of the given {@code size}. If {@code owner} is
* {@code true} then closing this ShmQueue will also close the underlying shared memory
* segment.
*
* @param size is the total size in bytes of the shared memory segment.
* @param owner is whether or not this ShmQueue is the owner and therefore will close the underlying
* shared memory segment when the ShmQueue is closed.
* @param numMailboxes is how many <em>posting flags</em> to create. usually this is
* 1 for simplex communication and 2 for duplex communication.
*/
public void create(final long size, final boolean owner, final int numMailboxes) {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_create(nativeRef, size, owner ? 1 : 0, numMailboxes), true);
this.size = size;
this.reusedBb = getBuffer(0);
}
/**
* Create the underlying shared memory space of the given {@code size}. If {@code owner} is
* {@code true} then closing this ShmQueue will also close the underlying shared memory
* segment. This is equivalent to calling {@code create(size, owner, 1)}
*
* @param size is the total size in bytes of the shared memory segment.
* @param owner is whether or not this ShmQueue is the owner and therefore will close the underlying
* shared memory segment when the ShmQueue is closed.
*/
public void create(final long size, final boolean owner) {
create(size, owner, 1);
}
/**
* Open an existing shared memory segment from this process. If {@code owner} is
* {@code true} then closing this ShmQueue will also close the underlying shared memory
* segment.
*
* @param owner is whether or not this ShmQueue is the owner and therefore will close the underlying
* shared memory segment when the ShmQueue is closed.
* @return true if the segment is opened. If the named shared memory segment doesn't exist
* then return false.
*/
public boolean open(final boolean owner) {
final long result = throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_open(nativeRef, owner ? 1 : 0), false);
if(result == EAGAIN)
return false;
size = getSize();
this.reusedBb = getBuffer(0);
return true;
}
/**
* This will reset and clear the underlying shared memory segment and set all mailboxes
* to empty.
*/
public void reset() {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_reset(nativeRef), true);
}
/**
* Is the shared memory segment currently opened by this {@link ShmQueue}
*/
public boolean isOpen() {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_isOpen(nativeRef, intResult), true);
return intResult.getValue() == 0 ? false : true;
}
/**
* Is the shared memory segment currently owned by this {@link ShmQueue}
*/
public boolean isOwner() {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_isOwner(nativeRef, intResult), true);
return intResult.getValue() == 0 ? false : true;
}
/**
* This will delete the shared memory segment. Normally this is done automatically when closed
* but it can be done explicitly. The shared memory segment will still be usable until it's
* closed. However, it will not be discoverable from another process and so can't be re-{@code open}ed.
* If another process @code create}s one with the same name, it will not be this one.
*/
public void unlink() {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_unlink(nativeRef), true);
}
/**
* Obtain access to the shared memory segment locking it if possible (the
* native code would have needed to be compiled with the -DLOCKING enabled). The
* method will wait up to {@code timeoutMillis} milliseconds to obtain the lock (if
* locking is enabled in the native code) and will return false if it cannot. If it
* gets the lock (or if the native code is compiled without locking) then it will
* pass a ByteBuffer to the lambda that can then write or read the segment.
*
* @param bbconsumer is the lambda that will be passed a ByteBuffer with access to the
* shared memory if access is obtained.
* @param timeoutMillis is the time to wait in milliseconds to get the lock (if the native
* code is actually compiled for locking).
* @return true if the access was obtained and the lambda was passed the ByteBuffer access
* to the shared memory segment.
*/
public boolean access(final Consumer<ByteBuffer> bbconsumer, final long timeoutMillis) {
final boolean gotLock = lock(timeoutMillis);
if(gotLock) {
try(QuietCloseable qc = () -> unlock();) {
reusedBb.rewind();
bbconsumer.accept(reusedBb);
return true;
}
} else
return false;
}
/**
* Obtain access to the shared memory segment locking it if possible (the
* native code would have needed to be compiled with the -DLOCKING enabled). The
* method will wait forever to obtain the lock (if
* locking is enabled in the native code) and will return false if it cannot. If it
* gets the lock (or if the native code is compiled without locking) then it will
* pass a ByteBuffer to the lambda that can then write or read the segment. This
* is equivalent to calling {@code access(bbconsumer, INFINITE)}
*
* @param bbconsumer is the lambda that will be passed a ByteBuffer with access to the
* shared memory if access is obtained.
* @return true if the access was obtained and the lambda was passed the ByteBuffer access
* to the shared memory segment.
*/
public boolean access(final Consumer<ByteBuffer> bbconsumer) {
return access(bbconsumer, INFINITE);
}
/**
* Obtain access to the shared memory segment locking it if possible (the
* native code would have needed to be compiled with the -DLOCKING enabled). The
* method try to obtain the lock once without waiting (if
* locking is enabled in the native code) and will return false if it cannot. If it
* gets the lock (or if the native code is compiled without locking) then it will
* pass a ByteBuffer to the lambda that can then write or read the segment. This
* is equivalent to calling {@code access(bbconsumer, TRY_LOCK)}
*
* @param bbconsumer is the lambda that will be passed a ByteBuffer with access to the
* shared memory if access is obtained.
* @return true if the access was obtained and the lambda was passed the ByteBuffer access
* to the shared memory segment.
*/
public boolean tryAccess(final Consumer<ByteBuffer> bbconsumer) {
return access(bbconsumer, TRY_LOCK);
}
/**
* Mark the data in the shared memory to be read by another process. This
* is equivalent to calling {@code post(0)} and assumes simplex communication.
*/
public void post() {
post(0);
}
/**
* Mark a particular mailbox as ready for the data in the shared memory to be read
* by another process
*/
public void post(final int mailbox) {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_postMessage(nativeRef, mailbox), true);
}
/**
* Mark the data in the shared memory as having been read and so another process can write
* the next message. This is equivalent to calling {@code post(0)} and assumes simplex communication.
*/
public void unpost() {
unpost(0);
}
/**
* Mark the data in the shared memory as having been read through the given mailbox and so another process can write
* the next message.
*/
public void unpost(final int mailbox) {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_unpostMessage(nativeRef, mailbox), true);
}
/**
* A {@code CvMat} that represents a shared memory segment or a portion of a shared memory segment
* that automatically manages the underlying locking mechanism (if the native code is compiled
* with -DLOCKING). If locking is enabled, the lock will be held as long as this mat hasn't bee
* closed.
*/
public class ShmQueueCvMat extends CvMat {
boolean gotLock = false;
private ShmQueueCvMat(final long nativeRef) {
super(nativeRef);
}
@Override
protected void doNativeDelete() {
if(gotLock)
unlock();
}
/**
* Mark a particular mailbox as ready for the data in the shared memory to be read
* by another process. This is a convenience method for {@code ShmQueue.this.post(mailbox)}
*/
public void post(final int mailbox) {
ShmQueue.this.post(mailbox);
}
/**
* Mark the data in the shared memory to be read by another process. This
* is equivalent to calling {@code post(0)} and assumes simplex communication.
*/
public void post() {
post(0);
}
/**
* Mark the data in the shared memory as having been read through the given mailbox and so another process can write
* the next message. This is a convenience method for {@code ShmQueue.this.unpost(mailbox)}
*/
public void unpost(final int mailbox) {
ShmQueue.this.unpost(mailbox);
}
/**
* Mark the data in the shared memory as having been read and so another process can write
* the next message. This is equivalent to calling {@code post(0)} and assumes simplex communication.
*/
public void unpost() {
unpost(0);
}
}
/**
* This checks to see if a message has been posted to the given mailbox.
*/
public boolean isMessageAvailable(final int mailbox) {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_isMessageAvailable(nativeRef, intResult, mailbox), true);
return intResult.getValue() == 0 ? false : true;
}
/**
* In simplex mode, when there's only one mailbox, this checks to see if a message has been
* posted to the mailbox. It's equivalent to {@code isMessageAvailable(0)}
*/
public boolean isMessageAvailable() {
return isMessageAvailable(0);
}
/**
* This checks to see if there's room to post a message to the given mailbox.
*/
public boolean canWriteMessage(final int mailbox) {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_canWriteMessage(nativeRef, intResult, mailbox), true);
return intResult.getValue() == 0 ? false : true;
}
/**
* In simplex mode, when there's only one mailbox, this checks to see if there's
* room to post a message to the mailbox. It's equivalent to {@code canWriteMessage(0)}
*/
public boolean canWriteMessage() {
return canWriteMessage(0);
}
/**
* Present the shared wrapped in a Mat. If locking is enabled then lock will be acquired
* if possible and held until the mat is closed. If the lock cannot be acquired in the
* time given then null will be returned.
*
* @param offset is the offset into the shared memory segment where the mat data begins
* @param sizes is the dimensions of the mat.
* @param type is the CvType of the data in the mat.
* @param millis is the milliseconds to wait to acquire the lock. This can be {@link #INFINITE}
* to wait forever or {@link #TRY_LOCK} to make one attempt and return immediately.
* @return if the lock can be obtained, a Mat representing the shared memory segment or part
* thereof. When the Mat is closed the lock will be released. If the lock cannot be obtained,
* {@code null} will be returned.
*/
public ShmQueueCvMat accessAsMat(final long offset, final int[] sizes, final int type, final long millis) {
try(CvMat ret = getUnlockedBufferAsMat(offset, sizes, type);
ShmQueueCvMat aret = shallowCopy(ret);) {
aret.gotLock = lock(millis);
if(aret.gotLock)
return (ShmQueueCvMat)aret.returnMe();
else
return null;
}
}
/**
* Convenience method for access as a 2D Mat. It's the same as calling
*
* <code>
* <pre>
* accessAsMat(offset, new int[] {rows,cols}, type, millis)
* </pre>
* </code>
*
* @see #accessAsMat(long, int[], int, long)
*/
public ShmQueueCvMat accessAsMat(final long offset, final int rows, final int cols, final int type, final long millis) {
rowCol[0] = rows;
rowCol[1] = cols;
return accessAsMat(offset, rowCol, type, millis);
}
/**
* Convenience method for access as a 2D Mat. It's the same as calling
*
* <code>
* <pre>
* accessAsMat(offset, new int[] {rows,cols}, type, INFINITE)
* </pre>
* </code>
*
* @see #accessAsMat(long, int[], int, long)
*/
public ShmQueueCvMat accessAsMat(final long offset, final int rows, final int cols, final int type) {
return accessAsMat(offset, rows, cols, type, INFINITE);
}
/**
* Convenience method. It's the same as calling
*
* <code>
* <pre>
* accessAsMat(offset, sizes, type, INFINITE)
* </pre>
* </code>
*
* @see #accessAsMat(long, int[], int, long)
*/
public ShmQueueCvMat accessAsMat(final long offset, final int[] sizes, final int type) {
return accessAsMat(offset, sizes, type, INFINITE);
}
/**
* Convenience method for access as a 2D Mat. It's the same as calling
*
* <code>
* <pre>
* accessAsMat(offset, new int[] {rows,cols}, type, TRY_LOCK)
* </pre>
* </code>
*
* @see #accessAsMat(long, int[], int, long)
*/
public ShmQueueCvMat tryAccessAsMat(final long offset, final int rows, final int cols, final int type) {
return accessAsMat(offset, rows, cols, type, TRY_LOCK);
}
/**
* Convenience method. It's the same as calling
*
* <code>
* <pre>
* accessAsMat(offset, sizes, type, TRY_LOCK)
* </pre>
* </code>
*
* @see #accessAsMat(long, int[], int, long)
*/
public ShmQueueCvMat tryAccessAsMat(final long offset, final int[] sizes, final int type) {
return accessAsMat(offset, sizes, type, TRY_LOCK);
}
/**
* Returns a pointer to the native location within the shared memory segment.
*/
public long getRawBuffer(final long offset) {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_buffer(nativeRef, offset, ptrResult), true);
return Pointer.nativeValue(ptrResult.getValue());
}
/**
* This will return a ByteBuffer representing the entire shared memory segment. Each
* call will return the IDENTICAL byte buffer.
*
*/
public ByteBuffer getReusedByteBuffer() {
return reusedBb;
}
/**
* Return a ByteBuffer mapping the portion of the shared memory segment requested.
*
* @param offset is the offset in bytes into the shared memory segment where the resulting
* ByteBuffer should begin.
*/
public ByteBuffer getBuffer(final long offset) {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_buffer(nativeRef, offset, ptrResult), true);
final Pointer data = ptrResult.getValue();
if(Pointer.nativeValue(data) == 0)
throw new IpcException("Null data buffer");
return data.getByteBuffer(0, size - offset);
}
/**
* Return a ByteBuffer mapping the portion of the shared memory segment requested.
*
* @param offset is the offset in bytes into the shared memory segment where the resulting
* ByteBuffer should begin.
*/
public ByteBuffer getBuffer(final long offset, final long length) {
if(length + offset > size) {
LOGGER.error("Cannot allocate a bytebuffer of size {} with offset {} when the underlying data is only {} bytes long", length, offset, size);
throw new BufferOverflowException();
}
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_buffer(nativeRef, offset, ptrResult), true);
final Pointer data = ptrResult.getValue();
if(Pointer.nativeValue(data) == 0)
throw new IpcException("Null data buffer");
return data.getByteBuffer(0, length);
}
/**
* Obtain the lock if possible. If the native code isn't compiled with locking enabled
* then this method will always return {@code true}.
*
* @param millis is the maximum amount of time to wait in milliseconds to obtain the lock.
* @return {@code true} of the lock was obtained. {@code false} otherwise.
*/
public boolean lock(final long millis) {
return (throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_lock(nativeRef, millis, 0), false) == EAGAIN) ? false : true;
}
/**
* <p>
* Obtain the lock waiting forever if necessary. If the native code isn't compiled with locking enabled
* then this method will always return {@code true}.
* </p>
*
* <p>
* This is a convenience method and is the same as calling {@code lock(INFINITE)}.
* </p>
*/
public boolean lock() {
return lock(INFINITE);
}
/**
* <p>
* Obtain the lock if it's immediately available. If the native code isn't compiled with locking enabled
* then this method will always return {@code true}.
* </p>
*
* <p>
* This is a convenience method and is the same as calling {@code lock(TRY_LOCK)}.
* </p>
*/
public boolean tryLock() {
return lock(TRY_LOCK);
}
/**
* Return a Resource that can be auto-closed. It will return null if the lock cannot be accessed
* so the return value will need to be checked.
*/
public QuietCloseable lockAsResource(final long timeoutMillis) {
return lock(timeoutMillis) ? () -> unlock() : null;
}
/**
* Return a Resource that can be auto-closed. It will return null if the lock cannot be accessed
* so the return value will need to be checked. Convenience method for {@code lockAsResource(TRY_LOCK)}
*/
public QuietCloseable tryLockAsResource() {
return lockAsResource(TRY_LOCK);
}
/**
* Return a Resource that can be auto-closed. It will return null if the lock cannot be accessed
* so the return value will need to be checked. Convenience method for {@code lockAsResource(INFINITE)}
*/
public QuietCloseable lockAsResource() {
return lockAsResource(INFINITE);
}
/**
* If you're holding the lock, then release it. If the native code isn't compiled with locking enabled,
* this method will do nothing. If you're not holding the lock, this method will likely put the lock
* management in an unmanageable state.
*/
public void unlock() {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_unlock(nativeRef), true);
}
/**
* This will return the size of the shared memory segment. The shared memory segment
* must be open already.
*/
public long getSize() {
throwIfNecessary(IpcApi.pilecv4j_ipc_shmQueue_bufferSize(nativeRef, longResult), true);
return longResult.getValue();
}
@Override
public String toString() {
return "ShmQueue [nativeRef=" + nativeRef + ", name=" + name + ", size=" + size + "]";
}
/**
* This will return {@code true} if the native code has been compiled to enabled
* locking. Otherwise it will return {@code false}.
*/
public static boolean isLockingEnabled() {
return IpcApi.pilecv4j_ipc_locking_isLockingEnabled() == 1 ? true : false;
}
public CvMat getUnlockedBufferAsMat(final long offset, final int[] sizes, final int type) {
if(sizes == null || sizes.length == 0)
return new CvMat();
final long nativeData = getRawBuffer(offset); // this will throw an exeception if it's not open so we wont
// need to worry about 'size' being set.
long matSizeBytes = CvType.ELEM_SIZE(type);
for(final int sz: sizes)
matSizeBytes *= sz;
if(matSizeBytes > size)
throw new IpcException("Can't allocate a mat with " + matSizeBytes + " bytes given a data buffer of " + size + " bytes");
try(CvMat ret = CvMat.create(sizes, type, nativeData);) {
return ret.returnMe();
}
}
private ShmQueueCvMat shallowCopy(final Mat mat) {
final long newNativeObj = ImageAPI.pilecv4j_image_CvRaster_copy(mat.nativeObj);
if(newNativeObj == 0L) {
// let's do some checking
if(!mat.isContinuous())
LOGGER.error("Cannot shallow copy a discontinuous Mat");
else
LOGGER.error("Failed to shallow copy mat");
return null;
}
return new ShmQueueCvMat(newNativeObj);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-ipc/1.0/ai/kognition/pilecv4j/ipc
|
java-sources/ai/kognition/pilecv4j/lib-ipc/1.0/ai/kognition/pilecv4j/ipc/internal/IpcApi.java
|
package ai.kognition.pilecv4j.ipc.internal;
import com.sun.jna.Native;
import com.sun.jna.NativeLibrary;
import com.sun.jna.Pointer;
import com.sun.jna.ptr.IntByReference;
import com.sun.jna.ptr.LongByReference;
import com.sun.jna.ptr.PointerByReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.kognition.pilecv4j.util.NativeLibraryLoader;
public class IpcApi {
private static final Logger LOGGER = LoggerFactory.getLogger(IpcApi.class);
public static final String LIBNAME = "ai.kognition.pilecv4j.ipc";
// needs to match LogLevel enum in the C++ code.
public static final int LOG_LEVEL_TRACE = 0;
public static final int LOG_LEVEL_DEBUG = 1;
public static final int LOG_LEVEL_INFO = 2;
public static final int LOG_LEVEL_WARN = 3;
public static final int LOG_LEVEL_ERROR = 4;
public static final int LOG_LEVEL_FATAL = 5;
public static void _init() {}
static {
NativeLibraryLoader.loader()
.library(LIBNAME)
.addPreLoadCallback((dir, libname, oslibname) -> {
if(LIBNAME.equals(libname))
NativeLibrary.addSearchPath(libname, dir.getAbsolutePath());
})
.load();
Native.register(LIBNAME);
pcv4j_ipc_logging_setLogLevel(kogLoglevel(LOGGER));
LOGGER.info("native-ipc implementation is " + pilecv4j_ipc_implementationName());
}
/**
* Lookup the int based log level to pass to native logging calls given the
* configuration of the Logger.
*/
public static int kogLoglevel(final Logger logger) {
// find the level
final int logLevelSet;
if(logger.isTraceEnabled())
logLevelSet = LOG_LEVEL_TRACE;
else if(logger.isDebugEnabled())
logLevelSet = LOG_LEVEL_DEBUG;
else if(logger.isInfoEnabled())
logLevelSet = LOG_LEVEL_INFO;
else if(logger.isWarnEnabled())
logLevelSet = LOG_LEVEL_WARN;
else if(logger.isErrorEnabled())
logLevelSet = LOG_LEVEL_ERROR;
else
logLevelSet = LOG_LEVEL_FATAL;
return logLevelSet;
}
// ===============================================================
// Overall system management functionality
/**
* General utilities
*/
public static native void pcv4j_ipc_logging_setLogLevel(int logLevel);
public static native byte pilecv4j_ipc_locking_isLockingEnabled();
/*
* MatQueue
*/
public static native long pilecv4j_ipc_create_shmQueue(String name, int nameRep);
public static native void pilecv4j_ipc_destroy_shmQueue(long nativeRef);
public static native String pilecv4j_ipc_implementationName();
public static native long pilecv4j_ipc_shmQueue_create(long nativeRef, long size, int owner, int numMailboxes);
public static native long pilecv4j_ipc_shmQueue_open(long nativeRef, int owner);
public static native long pilecv4j_ipc_shmQueue_reset(long nativeRef);
public static native long pilecv4j_ipc_shmQueue_isOwner(long nativeRef, IntByReference ret);
public static native long pilecv4j_ipc_shmQueue_isOpen(long nativeRef, IntByReference ret);
public static native long pilecv4j_ipc_shmQueue_unlink(long nativeRef);
public static native long pilecv4j_ipc_shmQueue_buffer(long nativeRef, long offset, PointerByReference owner);
public static native long pilecv4j_ipc_shmQueue_bufferSize(long nativeRef, LongByReference bufSizeOut);
public static native long pilecv4j_ipc_shmQueue_lock(long nativeRef, long millis, int aggressive);
public static native long pilecv4j_ipc_shmQueue_unlock(long nativeRef);
public static native long pilecv4j_ipc_shmQueue_postMessage(long nativeRef, int mailbox);
public static native long pilecv4j_ipc_shmQueue_unpostMessage(long nativeRef, int mailbox);
public static native long pilecv4j_ipc_shmQueue_isMessageAvailable(long nativeRef, IntByReference result, int mailbox);
public static native long pilecv4j_ipc_shmQueue_canWriteMessage(long nativeRef, IntByReference result, int mailbox);
/*
* Error handling
*/
public static native Pointer pcv4j_ipc_errHandling_errString(long code);
public static native void pcv4j_ipc_errHandling_freeErrString(Pointer errStr);
public static native long pcv4j_ipc_errHandling_getEAGAIN();
public static native long pcv4j_ipc_errHandling_getOK();
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j/nr/LinearRegression.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.nr;
import java.util.Arrays;
import ai.kognition.pilecv4j.nr.Minimizer.Func;
/**
* <p>
* NOTE: DO NOT USE THIS CLASS. IT'S ONLY KEPT FOR REFERENCE.
* The error term is calculated using the perpendicular distance. This is LESS robust
* for determining the slope/intercept form of a line than minimizing the vertical
* distance (i.e. the error in the 'y' term only). because the starting point cannot
* be rotated through the Y axis. As an example, if the slope of the points is approximately
* -3.0 and the slope of the starting iteration is 3.0, then RAISING the slope
* lowers the perpendicular distance. This moves the iterations in the wrong direction.
* </p>
*
* This class will do a linear regression by minimizing the squared error
* between the points provided to the constructor and the line specified
* by y = m[0]x + m[1]
*/
@Deprecated
public class LinearRegression implements Func {
private final double[] y;
private final double[] x;
public LinearRegression(final double[] x, final double[] y) {
this.x = x;
this.y = y;
}
@Override
public double func(final double[] lineDefMb) {
final double m = lineDefMb[0];
final double b = lineDefMb[1];
System.out.println(Arrays.toString(lineDefMb));
// translate the line so it goes through the origin and find a unit vector
final double t = -b;
final double yTransWhenXis1 = m;
final double tmpMag = Math.sqrt((yTransWhenXis1 * yTransWhenXis1) + 1.0);
final double xut = 1.0 / tmpMag;
final double yut = yTransWhenXis1 / tmpMag;
// [ xut, yut ] = a unit vector in the direction of the line y = mx.
// This is the line y = mx + b translated so it goes through the origin.
// now we want to translate each point the same amount (i.e., by 't')
// and measure the perpendicular distance to the unit vector [ xut, yut ]
double error2 = 0.0;
for(int i = 0; i < x.length; i++) {
final double yit = y[i] + t;
final double xi = x[i];
// dot product [ xi, yi ] with [ xut, yut ] = the length of
// the projection of [ xi, yi ] onto [ xut, yut ]
final double dot = (xi * xut) + (yit * yut);
final double projXi = xut * dot;
final double projYit = yut * dot;
// the error is the distance between [ projXi, projYit ] and [ xi, yit ]
final double diffX = projXi - xi;
final double diffY = projYit - yit;
final double curErr2 = (diffX * diffX) + (diffY * diffY);
System.out.print("" + curErr2 + " ");
// sum the squared error.
error2 += curErr2;
}
System.out.println(" = " + error2);
return error2;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j/nr/LinearRegressionWithKnownSlope.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.nr;
import ai.kognition.pilecv4j.nr.Minimizer.Func;
/**
* <p>
* This class will do a linear regression by minimizing the squared error
* between the points provided to the constructor and the line specified
* by y = m[0]x + m[1]
* </p>
*
* <p>
* NOTE: The error term is calculated using the vertical distance. That is
* it minimizes the error in the 'y' term only. This is more robust for determining
* the slope/intercept form of a line because the starting point cannot be rotated
* through the Y axis. In other words, if the slope of the points is approximately
* -3.0 and the slope of the starting iteration is 3.0, then RAISING the slope
* lowers the perpendicular distance. This moves the iterations in the wrong
* direction.
* </p>
*/
public class LinearRegressionWithKnownSlope implements Func {
private final double[] y;
private final double[] x;
private double slope = 0.0;
public LinearRegressionWithKnownSlope(final double slope, final double[] x, final double[] y) {
this.x = x;
this.y = y;
this.slope = slope;
}
@Override
public double func(final double[] m) {
double error2 = 0.0;
for(int i = 0; i < x.length; i++) {
final double ycur = (slope * x[i] + m[0]);
final double ecur = ycur - y[i];
error2 += (ecur * ecur);
}
return error2;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j/nr/Minimizer.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.nr;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicReference;
import ai.kognition.pilecv4j.util.NativePointerWrap;
/**
* <p>
* This class encapsulates the running of
* <a href="https://en.wikipedia.org/wiki/Powell's_method">Powell's Method</a> on a
* given function in order to determine a local minimum.
* </p>
*
* <p>
* The function to be minimized can have a domain of any dimension as it takes an array
* of {@code double}s and returns the value that needs to be minimized.
* </p>
*
* <p>
* For example, to minimize the function {@code (x - 2)^2 - 3} you would:
* </p>
*
* <pre>
* {
* @code
* final Minimizer m = new Minimizer(x -> ((x[0] - 2.0) * (x[0] - 2.0)) - 3.0);
* final double minVal = m.minimize(new double[] {-45.0});
* final double minParam = m.getFinalPostion()[0];
* }
* </pre>
*/
public class Minimizer {
static {
MinimizerAPI._init();
}
private final Func f;
private double[] minVec;
/**
* Interface representing the function/lambda to be minimized.
*/
@FunctionalInterface
public interface Func {
public double func(double[] x);
}
/**
* Default float tolerance.
*/
public static double ftol = 1.0e-10;
/**
* Construct the minimizer with the function to be minimized.
*/
public Minimizer(final Func f) {
this.f = f;
}
/**
* Minimize the function that the {@link Minimizer} was instantiated with using the
* identity matrix as the starting position.
*/
public double minimize(final double[] p)
throws MinimizerException {
final double[][] xi = newUnitMatrix(p.length);
return minimize(p, xi);
}
/**
* Minimize the function that the {@link Minimizer} was instantiated with using the
* supplied starting position.
*/
public double minimize(final double[] p, final double[][] xi) throws MinimizerException {
minVec = new double[p.length];
return dominimize_jna(f, p, xi, ftol, minVec);
}
public static class FinalPosition {
public final double error;
public final double[] position;
private FinalPosition(final double error, final double[] position) {
this.error = error;
this.position = position;
}
@Override
public String toString() {
return "[ minimized error: " + error + ", minimized solution: " + Arrays.toString(position) + "]";
}
}
public static FinalPosition minimize(final Func functionToMinimize, final double[] startingPosition) {
final Minimizer minimizer = new Minimizer(functionToMinimize);
final double err = minimizer.minimize(startingPosition);
return new FinalPosition(err, minimizer.getFinalPostion());
}
private double dominimize_jna(final Func f, final double[] pd, final double[][] xi, final double jftol, final double[] minVal) {
final int n = xi.length;
// check to make sure xi is square.
final int col = xi[0] == null ? 0 : xi[0].length;
if(n != col)
throw new IllegalArgumentException("xi matrix needs to be square. It's currently " + n + " X " + col);
final double[] xiflat = new double[n * n];
for(int i = 0; i < n; i++)
System.arraycopy(xi[i], 0, xiflat, i * n, n);
final int[] status = new int[1];
status[0] = 0;
// temporary double array to hold values being passed to Func
final double[] tmp = new double[n];
// cheap mutable to detect and pass around the side exceptions thrown by Func
final AtomicReference<RuntimeException> error = new AtomicReference<>(null);
final double ret = MinimizerAPI.pilecv4j_image_dominimize((x, p_status) -> {
int xindex = 0;
for(int i = 0; i < n; i++) {
tmp[i] = x.getFloat(xindex);
xindex += Float.BYTES;
}
try {
final float retx = (float)f.func(tmp);
return retx;
} catch(final RuntimeException th) {
error.set(th);
p_status.setInt(0, 1);
return 0.0f;
}
}, pd.length, pd, xiflat, jftol, minVal, status);
if(error.get() != null)
throw new MinimizerException("Exception ocurred in function being minimized.", error.get());
if(status[0] != 0) {
try(final NativePointerWrap message = new NativePointerWrap(MinimizerAPI.pilecv4j_image_nrGetErrorMessage());) {
final String msgStr = message.ptr.getString(0, "UTF-8");
throw new MinimizerException("Powell mimimization failed with a non-zero status (" + status[0] + ") and message \"" + msgStr + "\"");
}
}
return ret;
}
/**
* Return the final domain value of the minimized solution.
*/
public double[] getFinalPostion() {
return minVec;
}
private double[][] newUnitMatrix(final int n) {
final double[][] ret = new double[n][];
for(int i = 0; i < n; i++) {
ret[i] = new double[n];
for(int j = 0; j < n; j++)
ret[i][j] = (i == j) ? 1.0 : 0.0;
}
return ret;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j/nr/MinimizerAPI.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.nr;
import com.sun.jna.Callback;
import com.sun.jna.Native;
import com.sun.jna.NativeLibrary;
import com.sun.jna.Pointer;
import ai.kognition.pilecv4j.util.NativeLibraryLoader;
public class MinimizerAPI {
public static final String LIBNAME = "ai.kognition.pilecv4j.util";
static {
NativeLibraryLoader.loader()
.library(LIBNAME)
.addPreLoadCallback((dir, libname, oslibname) -> {
NativeLibrary.addSearchPath(libname, dir.getAbsolutePath());
})
.load();
Native.register(LIBNAME);
}
static void _init() {}
public interface EvalCallback extends Callback {
float eval(Pointer floatArrayX, Pointer status);
}
public static native double pilecv4j_image_dominimize(EvalCallback func, int n, double[] pd, double[] xi, double jftol, double[] minVal, int[] status);
public static native Pointer pilecv4j_image_nrGetErrorMessage();
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j/nr/MinimizerException.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.nr;
public class MinimizerException extends RuntimeException {
private static final long serialVersionUID = 2687515753273397453L;
public MinimizerException() {}
public MinimizerException(final String msg) {
super(msg);
}
public MinimizerException(final String msg, final Throwable th) {
super(msg, th);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-nr/1.0/ai/kognition/pilecv4j/nr/SimpleLinearRegression.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.nr;
import ai.kognition.pilecv4j.nr.Minimizer.Func;
/**
* <p>
* This class will do a linear regression by minimizing the squared error
* between the points provided to the constructor and the line specified
* by y = m[0]x + m[1]
* </p>
*
* <p>
* NOTE: The error term is calculated using the vertical distance. That is
* it minimizes the error in the 'y' term only. This is more robust for determining
* the slope/intercept form of a line because the starting point cannot be rotated
* through the Y axis. In other words, if the slope of the points is approximately
* -3.0 and the slope of the starting iteration is 3.0, then RAISING the slope
* lowers the perpendicular distance. This moves the iterations in the wrong
* direction.
* </p>
*/
public class SimpleLinearRegression implements Func {
public final double[] y;
public final double[] x;
public SimpleLinearRegression(final double[] x, final double[] y) {
this.x = x;
this.y = y;
}
@Override
public double func(final double[] lineDefMb) {
final double m = lineDefMb[0];
final double b = lineDefMb[1];
double error2 = 0.0;
for(int i = 0; i < x.length; i++) {
final double ycur = (m * x[i] + b);
final double ecur = ycur - y[i];
error2 += (ecur * ecur);
}
return error2;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j/python/ParamBlock.java
|
package ai.kognition.pilecv4j.python;
import static ai.kognition.pilecv4j.python.PythonHandle.throwIfNecessary;
import java.util.ArrayList;
import java.util.List;
import java.util.function.LongConsumer;
import org.apache.commons.lang3.mutable.MutableBoolean;
import org.opencv.core.Mat;
import net.dempsy.util.MutableInt;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.python.internal.PythonAPI;
public class ParamBlock {
private final List<LongConsumer> dictCreator = new ArrayList<>(50);
private final List<LongConsumer> tupleCreator = new ArrayList<>(50);
public static ParamBlock builder() {
return new ParamBlock();
}
public ParamBlock arg(final String kwd, final String val) {
dictCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_dict_putString(l2, kwd, val)));
return this;
}
public ParamBlock arg(final String kwd, final boolean val) {
dictCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_dict_putBoolean(l2, kwd, val ? 1 : 0)));
return this;
}
public ParamBlock arg(final String kwd, final PythonHandle val) {
dictCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_dict_putKogSys(l2, kwd, val.nativeObj)));
return this;
}
public ParamBlock arg(final String kwd, final long val) {
dictCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_dict_putInt(l2, kwd, val)));
return this;
}
public ParamBlock arg(final String kwd, final double val) {
dictCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_dict_putFloat(l2, kwd, val)));
return this;
}
public ParamBlock arg(final String kwd, final Mat val) {
dictCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_dict_putMat(l2, kwd, val.nativeObj)));
return this;
}
public ParamBlock arg(final String kwd, final PyObject val) {
dictCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_dict_putPyObject(l2, kwd, val.nativeRef)));
return this;
}
public ParamBlock arg(final String val) {
final int index = tupleCreator.size();
tupleCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putString(l2, index, val)));
return this;
}
public ParamBlock arg(final Mat val) {
final int index = tupleCreator.size();
tupleCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putMat(l2, index, val.nativeObj)));
return this;
}
public ParamBlock arg(final List<?> val) {
final int index = tupleCreator.size();
tupleCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putPyObject(l2, index, parseTuple(val))));
return this;
}
public ParamBlock arg(final PyObject val) {
final int index = tupleCreator.size();
tupleCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putPyObject(l2, index, val.nativeRef)));
return this;
}
public ParamBlock arg(final boolean val) {
final int index = tupleCreator.size();
tupleCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putBoolean(l2, index, val ? 1 : 0)));
return this;
}
public ParamBlock arg(final PythonHandle val) {
final int index = tupleCreator.size();
tupleCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putKogSys(l2, index, val.nativeObj)));
return this;
}
public ParamBlock arg(final long val) {
final int index = tupleCreator.size();
tupleCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putInt(l2, index, val)));
return this;
}
public ParamBlock arg(final double val) {
final int index = tupleCreator.size();
tupleCreator.add(l2 -> throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putFloat(l2, index, val)));
return this;
}
static record Tuple(long tupleRef) implements QuietCloseable {
@Override
public void close() {
PythonAPI.pilecv4j_python_tuple_destroy(tupleRef);
}
}
static record Dict(long dictRef) implements QuietCloseable {
@Override
public void close() {
PythonAPI.pilecv4j_python_dict_destroy(dictRef);
}
}
Tuple buildArgs() {
final long tupleRef = PythonAPI.pilecv4j_python_tuple_create(tupleCreator.size());
if(tupleRef == 0)
throw new IllegalStateException("Failed to create a python PyTuple of size " + tupleCreator.size());
final MutableBoolean doClose = new MutableBoolean(true);
try(QuietCloseable q = () -> {
if(doClose.booleanValue())
PythonAPI.pilecv4j_python_tuple_destroy(tupleRef);
};) {
tupleCreator.forEach(c -> c.accept(tupleRef));
doClose.setFalse();
}
return new Tuple(tupleRef);
}
Dict buildKeywordArgs() {
final long dictRef = PythonAPI.pilecv4j_python_dict_create();
if(dictRef == 0)
throw new IllegalStateException("Failed to create a python PyDict");
final MutableBoolean doClose = new MutableBoolean(true);
try(QuietCloseable q = () -> {
if(doClose.booleanValue())
PythonAPI.pilecv4j_python_dict_destroy(dictRef);
};) {
dictCreator.forEach(c -> c.accept(dictRef));
doClose.setFalse();
}
return new Dict(dictRef);
}
private static long parseTuple(final List<?> val) {
final long pyList = PythonAPI.pilecv4j_python_tuple_create(val.size());
final MutableInt pyListIndex = new MutableInt(0);
val.forEach(o -> {
if(o instanceof String)
throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putString(pyList, (int)pyListIndex.val++, (String)o));
else if(o instanceof Number) {
final Number p = (Number)o;
if(o instanceof Long || o instanceof Integer || o instanceof Short || o instanceof Byte)
throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putInt(pyList, (int)pyListIndex.val++, p.longValue()));
else
throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putFloat(pyList, (int)pyListIndex.val++, p.doubleValue()));
} else if(o instanceof Mat)
throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putMat(pyList, (int)pyListIndex.val++, ((Mat)o).nativeObj));
else if(o instanceof PythonHandle)
throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putKogSys(pyList, (int)pyListIndex.val++, ((PythonHandle)o).nativeObj));
else if(o instanceof List)
throwIfNecessary(PythonAPI.pilecv4j_python_tuple_putPyObject(pyList, (int)pyListIndex.val++, parseTuple((List<?>)o)));
});
return pyList;
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j/python/PyObject.java
|
package ai.kognition.pilecv4j.python;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.python.internal.PythonAPI;
public class PyObject implements QuietCloseable {
final long nativeRef;
private boolean closed = false;
private final boolean unmanaged;
PyObject(final long nativeRef, final boolean unmanaged) {
if(nativeRef == 0)
throw new IllegalArgumentException("Null PyObject");
this.unmanaged = unmanaged;
if(!unmanaged)
PythonAPI.pilecv4j_python_pyObject_incref(nativeRef);
this.nativeRef = nativeRef;
}
@Override
public void close() {
if(!closed && !unmanaged)
PythonAPI.pilecv4j_python_pyObject_decref(nativeRef);
closed = true;
}
public PyObject shallowCopy() {
return new PyObject(nativeRef, false);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j/python/PythonException.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.python;
public class PythonException extends RuntimeException {
private static final long serialVersionUID = 1L;
public PythonException(final String message) {
super(message);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j/python/PythonHandle.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.python;
import static ai.kognition.pilecv4j.python.internal.PythonAPI.LOG_LEVEL_DEBUG;
import static ai.kognition.pilecv4j.python.internal.PythonAPI.LOG_LEVEL_ERROR;
import static ai.kognition.pilecv4j.python.internal.PythonAPI.LOG_LEVEL_FATAL;
import static ai.kognition.pilecv4j.python.internal.PythonAPI.LOG_LEVEL_INFO;
import static ai.kognition.pilecv4j.python.internal.PythonAPI.LOG_LEVEL_TRACE;
import static ai.kognition.pilecv4j.python.internal.PythonAPI.LOG_LEVEL_WARN;
import static net.dempsy.util.Functional.chain;
import static net.dempsy.util.Functional.uncheck;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import com.sun.jna.Pointer;
import com.sun.jna.ptr.IntByReference;
import com.sun.jna.ptr.PointerByReference;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.dempsy.util.QuietCloseable;
import net.dempsy.vfs.Path;
import net.dempsy.vfs.Vfs;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.python.ParamBlock.Dict;
import ai.kognition.pilecv4j.python.ParamBlock.Tuple;
import ai.kognition.pilecv4j.python.internal.PythonAPI;
import ai.kognition.pilecv4j.python.internal.PythonAPI.get_image_source;
/**
* <p>
* This object can be used to call functions in Python. It can be used to simply
* call a function or it can be used to set up a message exchange between a
* a function running in Python in one thread, and a function running in Java
* in another thread. The more straightforward way is to just call a function.
* </p>
* <p>
* As an example of how to simply call a function. First you create a {@link PythonHandle}.
*
* <pre>
* <code>
* try (final PythonHandle python = new PythonHandle();) {
* </code>
* </pre>
* </p>
*
* <p>
* You can add paths for modules. This is typical since you're probably
* running a script that's not already on the PYTHONPATH. For example.
* </p>
*
* <pre>
* <code>
* python.addModulePath("/path/to/directory/with/python_files");
* </code>
* </pre>
* </p>
*
* <p>
* You can also have the PythonHandle expand python modules that are in
* jar files on the classpath. In the following example, there's a directory
* in the jar file called "python" that has scripts in it.
* </p>
*
* <pre>
* <code>
* python.unpackAndAddModule("classpath:///python");
* </code>
* </pre>
* </p>
*
* <p>
* Finally, you can do this in one step while creating the python handle.
* </p>
*
* <pre>
* <code>
* try (PythonHandle python = PythonHandle.initModule("classpath:///python");) {
* </code>
* </pre>
* </p>
*
* There are two different modes that communication with Python can operate. You
* can simply synchronously call a function in a Python *.py file (<em>SYNCHRONOUS</em>
* mode), or you can start a Python function in a separate thread and set up
* a hand-off between Java and Python that allows for passing images and retrieving
* results (ASYNCHRONOUS mode).
*/
public class PythonHandle implements QuietCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(PythonHandle.class);
private static final Object pythonExpandedLock = new Object();
private static Map<String, File> pythonIsExpanded = new HashMap<>();
private static final int PyResultNONE = 0;
private static final int PyResultLONG = 1;
private static final int PyResultFLOAT = 2;
private static final int PyResultSTRING = 3;
private static final int PyResultMAT = 4;
private static final int PyResultPyObject = 5;
private static final int PyResultLIST = 6;
static {
// find the level
final int logLevelSet;
if(LOGGER.isTraceEnabled())
logLevelSet = LOG_LEVEL_TRACE;
else if(LOGGER.isDebugEnabled())
logLevelSet = LOG_LEVEL_DEBUG;
else if(LOGGER.isInfoEnabled())
logLevelSet = LOG_LEVEL_INFO;
else if(LOGGER.isWarnEnabled())
logLevelSet = LOG_LEVEL_WARN;
else if(LOGGER.isErrorEnabled())
logLevelSet = LOG_LEVEL_ERROR;
else
logLevelSet = LOG_LEVEL_FATAL;
throwIfNecessary(PythonAPI.pilecv4j_python_setLogLevel(logLevelSet));
throwIfNecessary(PythonAPI.pilecv4j_python_initPython());
}
public ImageSource imageSource = null;
long nativeObj = 0L;
private String currentModule;
private String currentFunction;
private final get_image_source callback = new get_image_source() {
@Override
public long image_source(final long ptRef) {
synchronized(PythonHandle.this) {
if(imageSource == null) imageSource = new ImageSource(PythonAPI.pilecv4j_python_imageSource_create(ptRef));
return imageSource.imageSourceRef;
}
}
};
/**
* Create a {@link PythonHandle}
*
* @throws PythonException if the underlying Python environment couldn't be instantiated
*/
public PythonHandle() throws PythonException {
nativeObj = PythonAPI.pilecv4j_python_kogSys_create(callback);
if(nativeObj == 0L)
throw new PythonException("Failed to instantiate native PyTorch instance.");
}
/**
* Run a Python function. Once you have the modules set up (see {@link PythonHandle})
* you can invoke a function in from a *.py file. For example, if you have a python file
* called "python_script.py" that has a function in it called "def func(...):" then
* you can invoke it with parameters as follows:
*
* <pre>
* <code>
* try (ResultBlock results = python.runPythonFunction("python_script", "fumc",
* ParamBlock.builder()
* .arg(arg1)
* .arg(arg2)
* .arg("keyword",kwdArg)
* ...);) {
* </code>
* </pre>
*
* <p>
* You can pass parameters of the following types:
* </p>
* <ul>
* <li>String</li>
* <li>An numeric type.</li>
* <li>A {@link CvMat}</li>
* <li>The PythonHandle itself. This is primarily used to set up a communication channel between
* a running Python script and the Java side.</li>
* <li>A list of any of these (including another list)</li>
* </ul>
*
* <p>
* The ResultBlock will hold the return from the function. If the function has no return
* value then the ResultBlock will be null. You can retrieve any of the following types:
* </p>
*
* <ul>
* <li>String</li>
* <li>An numeric type.</li>
* <li>A {@link CvMat} - you will get a shallow copy.</li>
* <li>A PyObject. This will be an opaque handle to an underlying Python return object.
* It can passed back into another script</li>
* <li>A list of any of these (including another list)</li>
* </ul>
*
*/
public ResultBlock runPythonFunction(final String module, final String function, final ParamBlock params) {
try(Tuple args = params.buildArgs();
Dict kwds = params.buildKeywordArgs();) {
final PointerByReference result = new PointerByReference();
final IntByReference resultSizeByRef = new IntByReference();
throwIfNecessary(PythonAPI.pilecv4j_python_runPythonFunction(module, function, args.tupleRef(),
kwds.dictRef(), result, resultSizeByRef));
final int resultSize = resultSizeByRef.getValue();
final Pointer p = result.getValue();
if(resultSize == 0 || p.equals(Pointer.NULL))
return null;
return new ResultBlock(p, resultSize);
}
}
/**
* When running a Python script asynchronously, this object will represent the state
* of the Python script.
*/
public static class PythonRunningState {
public final AtomicBoolean isRunning = new AtomicBoolean(false);
public final AtomicReference<RuntimeException> failed = new AtomicReference<>();
public Thread thread = null;
private final PythonHandle system;
private PythonRunningState(final PythonHandle system) {
this.system = system;
}
public boolean hasFailed() {
return failed.get() != null;
}
public boolean sourceIsInitialized() {
return system.imageSource != null;
}
public void waitUntilSourceInitialized(final long timeout) {
final long startTime = System.currentTimeMillis();
while(!sourceIsInitialized() && (System.currentTimeMillis() - startTime) < timeout && !hasFailed())
Thread.yield();
if(hasFailed())
throw new PythonException(
"The module \"" + system.currentModule + ".py\" using function \"" + system.currentFunction
+ "\" failed with the following exception before it ever initialized the source:" +
failed.get());
if(!sourceIsInitialized())
throw new PythonException(
"The module \"" + system.currentModule + ".py\" using function \"" + system.currentFunction
+ "\" never initialized the image source. Did you call runPythonFunction somewhere?");
}
}
/**
* When in SYNCHRONOUS mode, given the use of calling Python in PileCV4J is to call
* a neural network, you can write the script to hand the labels (classes) back to Java.
* This is usually done on the script side before initializing the source and on the Java
* side this should then be called after the source is initialized
* (see {@link PythonRunningState#waitUntilSourceInitialized(long)}) and then call this method.
*/
public String[] retrieveModelLabels() {
// how many labels does the model handle.
final int numModelLabels = numModelLabels();
// retrieve the model labels from the python side
final String[] labels = new String[numModelLabels];
for(int i = 0; i < numModelLabels; i++)
labels[i] = getModelLabel(i);
return labels;
}
/**
* Run the script asynchronously. It is assumed the python function will loop and
* communicate back with the Java side through the {@link ImageSource}. If you just want
* to call a Python function you should use {@link PythonHandle#runPythonFunction(String, String, ParamBlock)}.
*/
public PythonRunningState runPythonFunctionAsynch(final String module, final String function, final ParamBlock pb) {
final var ret = new PythonRunningState(this);
final AtomicBoolean started = new AtomicBoolean(false);
chain(
ret.thread = new Thread(() -> {
ret.isRunning.set(true);
started.set(true);
try {
runPythonFunction(module, function, pb);
} catch(final RuntimeException rte) {
LOGGER.error("Python function call {} (from module {}) with parameters {} failed", function, module, pb, rte);
rte.printStackTrace();
throw rte;
} finally {
ret.isRunning.set(false);
}
}, "Python Thread"),
t -> t.setDaemon(true),
t -> t.start());
while(started.get() == false)
Thread.yield();
return ret;
}
/**
* Add a path to the Python environment where Python should search for modules (*.py files).
*/
public void addModulePath(final String dir) {
final String absDir = FileSystems.getDefault().getPath(dir).normalize().toAbsolutePath().toString();
PythonAPI.pilecv4j_python_addModulePath(absDir);
}
/**
* While running in ASYNCHRONOUS mode, you can send a {@link CvMat} vide the
* image source to the running Python script. Obviously, on the Pthon side, you
* will have needed to write the script to read from the ImageSource.
*/
public PythonResults sendMat(final CvMat mat, final boolean isRgb, final ParamBlock params) {
if(imageSource != null)
return imageSource.send(mat, isRgb, params);
throw new IllegalStateException("There's no current image source");
}
/**
* While running in ASYNCHRONOUS mode, send an indication to the Python script that
* we're finished.
*/
public void eos() {
sendMat(null, false, null);
}
/**
* Clean up the resources. This will close the ImageSource if it's been created
* for communication in ASYNCHRONOUS mode, and also close done the Python interpreter.
*/
@Override
public void close() {
if(imageSource != null)
imageSource.close();
if(nativeObj != 0)
PythonAPI.pilecv4j_python_kogSys_destroy(nativeObj);
}
/**
* Create the {@link PythonHandle), unpack the Python module located at
* {@code pythonModuleUri} (e.g. "classpath:///python"), and add the
* unpacked module to the Python path.
*/
public static PythonHandle initModule(final String pythonModuleUri) {
final File pythonModulePath = unpackModule(pythonModuleUri);
final PythonHandle ret = new PythonHandle();
ret.addModulePath(pythonModulePath.getAbsolutePath());
return ret;
}
/**
* This is will unpack a module and add it to the path that Python searches
* for *.py modules.
*/
public void unpackAndAddModule(final String pythonModuleUri) {
final File tmpDirWithPythonModule = unpackModule(pythonModuleUri);
addModulePath(tmpDirWithPythonModule.getAbsolutePath());
}
/**
* This is will unpack a module into a temp directory and return
* to you the path where it was unpacked.
*/
public static File unpackModule(final String pythonModuleUri) {
synchronized(pythonExpandedLock) {
final File ret = pythonIsExpanded.get(pythonModuleUri);
if(ret == null) {
try(Vfs vfs = new Vfs();) {
final Path path = vfs.toPath(uncheck(() -> new URI(pythonModuleUri)));
if(!path.exists() || !path.isDirectory())
throw new IllegalStateException("The python code isn't properly bundled in the jar file.");
final File pythonCodeDir = Files.createTempDirectory("pilecv4j-lib-python").toFile();
pythonCodeDir.deleteOnExit();
copy(path, pythonCodeDir.getAbsolutePath(), true);
pythonIsExpanded.put(pythonModuleUri, pythonCodeDir);
return pythonCodeDir;
} catch(final IOException ioe) {
throw new IllegalStateException("Failed to expand python code.", ioe);
}
}
return ret;
}
}
/**
* When communicating an ASYNCHRONOUS mode you send a Mat using {@link PythonHandle#sendMat(CvMat, boolean, ParamBlock)}
* and you'll get a {@link PythonResults} back. This acts like a Java Future. The script should eventually
* set a result Mat from the CNN operation in response to the {@code sendMat}. At that point
* {@link PythonResults#hasResult()} will return true and {@link PythonResults#getResultMat()} will
* return the results that were set from the Python script.
*/
public static class PythonResults implements QuietCloseable {
private final long nativeObj;
PythonResults(final long nativeObj) {
this.nativeObj = nativeObj;
}
/**
* Once the Python script has set the result of an operation that was started using
* {@link PythonHandle#sendMat(CvMat, boolean, ParamBlock)}, this will return those results. Until
* then it will return null. You can poll for the result using {PythonResults{@link #hasResult()}.
*/
public CvMat getResultMat() {
if(nativeObj != 0L) {
final long resRef = PythonAPI.pilecv4j_python_kogMatResults_getResults(nativeObj);
if(resRef != 0L)
return CvMat.wrapNative(resRef);
else
return null;
}
throw new NullPointerException("Illegal KogMatResults. Null underlying reference.");
}
/**
* Clean up the underlying resources. The {@link PythonResults} should not be used
* after they have been closed.
*/
@Override
public void close() {
if(nativeObj != 0L)
PythonAPI.pilecv4j_python_kogMatResults_destroy(nativeObj);
}
/**
* Once the Python script has set the result of an operation that was started using
* {@link PythonHandle#sendMat(CvMat, boolean, ParamBlock)}, this will return true and
* any actual results can be retrieved using {@link PythonResults#getResultMat()}.
*/
public boolean hasResult() {
if(nativeObj != 0L)
return PythonAPI.pilecv4j_python_kogMatResults_hasResult(nativeObj) == 0 ? false : true;
throw new NullPointerException("Illegal KogMatResults. Null underlying reference.");
}
public boolean isAbandoned() {
if(nativeObj != 0L)
return PythonAPI.pilecv4j_python_kogMatResults_isAbandoned(nativeObj) == 0 ? false : true;
throw new NullPointerException("Illegal KogMatResults. Null underlying reference.");
}
}
static class ImageSource implements QuietCloseable {
private final long imageSourceRef;
ImageSource(final long imageSourceRef) {
this.imageSourceRef = imageSourceRef;
}
public PythonResults send(final CvMat mat, final boolean isRgb) {
return send(mat, isRgb, 0L);
}
public PythonResults send(final CvMat mat, final boolean isRgb, final ParamBlock params) {
if(params == null)
return send(mat, isRgb, 0L);
try(Dict kwds = params.buildKeywordArgs();) {
return send(mat, isRgb, kwds.dictRef());
}
}
public long peek() {
return PythonAPI.pilecv4j_python_imageSource_peek(imageSourceRef);
}
@Override
public void close() {
PythonAPI.pilecv4j_python_imageSource_destroy(imageSourceRef);
}
private PythonResults send(final CvMat mat, final boolean isRgb, final long dictRef) {
final long result;
if(mat != null)
result = PythonAPI.pilecv4j_python_imageSource_send(imageSourceRef, dictRef, mat.nativeObj, (isRgb ? 1 : 0));
else
result = PythonAPI.pilecv4j_python_imageSource_send(imageSourceRef, dictRef, 0L, 0);
return (result == 0L) ? null : new PythonResults(result);
}
}
static Object parseResult(final ByteBuffer bb) {
final byte type = bb.get();
switch(type) {
case PyResultNONE:
return null;
case PyResultLONG:
return bb.getLong();
case PyResultFLOAT:
return bb.getDouble();
case PyResultSTRING: {
final int size = bb.getInt();
final byte[] strBytes = new byte[size];
bb.get(strBytes);
return new String(strBytes, StandardCharsets.UTF_8);
}
case PyResultMAT: {
final long nativeRef = bb.getLong();
try(var qc = new UnmanagedMat(nativeRef);) {
return qc;
}
}
case PyResultPyObject: {
final long nativeRef = bb.getLong();
final var ret = new PyObject(nativeRef, true);
return ret;
}
case PyResultLIST: {
final int size = bb.getInt();
final List<Object> ret = new ArrayList<>(size);
for(int i = 0; i < size; i++) {
ret.add(parseResult(bb));
}
return ret;
}
default:
throw new IllegalArgumentException("Can't handle result type:" + type);
}
}
static void throwIfNecessary(final int status) throws PythonException {
if(status != 0) {
final Pointer p = PythonAPI.pilecv4j_python_status_message(status);
try(final QuietCloseable qc = () -> PythonAPI.pilecv4j_python_status_freeMessage(p);) {
if(Pointer.nativeValue(p) == 0L)
throw new PythonException("Null status message. Status code:" + status);
else {
final String message = p.getString(0);
throw new PythonException(message);
}
}
}
}
private static String stripTrailingSlash(final String path) {
if(path.endsWith("/") || path.endsWith("\\"))
return path.substring(0, path.length() - 1);
else
return path;
}
private static class UnmanagedMat extends CvMat {
private UnmanagedMat(final long nativeRef) {
super(nativeRef);
}
// we're skipping the delete because this mat is actually
// managed by the result block
@Override
protected void doNativeDelete() {}
}
private static String getPath(final URI uri) {
final String pathToUse;
if("jar".equals(uri.getScheme())) {
final String uriStr = uri.toString();
int indexOfEx = uriStr.lastIndexOf('!');
if(indexOfEx < 0) {
// just cut off from the last ':'
indexOfEx = uriStr.lastIndexOf(':');
if(indexOfEx < 0)
throw new IllegalArgumentException("Cannot interpret the jar uri: " + uriStr);
}
pathToUse = uriStr.substring(indexOfEx + 1);
} else
pathToUse = uri.getPath();
return pathToUse;
}
private static void copy(final Path from, final String destDirStrX, final boolean skipThisDir) throws IOException {
final String destDirStr = stripTrailingSlash(destDirStrX);
final File destDir = new File(destDirStr);
if(!destDir.exists())
destDir.mkdirs();
if(!destDir.isDirectory())
throw new IOException("The destination \"" + destDir.getAbsolutePath() + "\" was expected to be a directory.");
// if from is a direrectory, we need to act recursively.
if(from.isDirectory()) {
final String newDest;
if(skipThisDir) {
newDest = destDir.getAbsolutePath();
} else {
final String relativeName = new File(getPath(from.uri())).getName();
newDest = destDir.getAbsolutePath() + "/" + relativeName;
}
for(final Path sp: from.list()) {
copy(sp, newDest, false);
}
} else {
final String filename = new File(getPath(from.uri())).getName();
try(InputStream is = from.read();) {
FileUtils.copyInputStreamToFile(is, new File(destDir, filename));
}
}
}
private String getModelLabel(final int i) {
final Pointer ml = PythonAPI.pilecv4j_python_kogSys_modelLabel(nativeObj, i);
if(Pointer.nativeValue(ml) == 0L)
return null;
else
return ml.getString(0);
}
private int numModelLabels() {
return PythonAPI.pilecv4j_python_kogSys_numModelLabels(nativeObj);
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j/python/ResultBlock.java
|
package ai.kognition.pilecv4j.python;
import java.util.List;
import com.sun.jna.Pointer;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.python.internal.PythonAPI;
public class ResultBlock implements QuietCloseable {
private final Pointer results;
private final int resultSize;
private Object parsed = null;
ResultBlock(final Pointer results, final int resultSize) {
this.results = results;
this.resultSize = resultSize;
}
public Object parse() {
parseIfNecessary();
return parsed;
}
public long longValue() {
parseIfNecessary();
return ((Number)parsed).longValue();
}
public int intValue() {
parseIfNecessary();
return ((Number)parsed).intValue();
}
public short shortValue() {
parseIfNecessary();
return ((Number)parsed).shortValue();
}
public byte byteValue() {
parseIfNecessary();
return ((Number)parsed).byteValue();
}
public float floatValue() {
parseIfNecessary();
return ((Number)parsed).floatValue();
}
public double doubleValue() {
parseIfNecessary();
return ((Number)parsed).doubleValue();
}
@Override
public void close() {
PythonHandle.throwIfNecessary(PythonAPI.pilecv4j_python_freeFunctionResults(results));
}
public Object doparse() {
final var ret = PythonHandle.parseResult(results.getByteBuffer(0, resultSize));
return ret;
}
public CvMat asMat() {
parseIfNecessary();
return CvMat.shallowCopy(((CvMat)parsed));
}
public PyObject asPyObject() {
parseIfNecessary();
return parsed == null ? null : ((PyObject)parsed).shallowCopy();
}
public List<?> asList() {
parseIfNecessary();
return parsed == null ? null : ((List<?>)parsed);
}
private void parseIfNecessary() {
if(parsed == null)
parsed = doparse();
}
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j/python
|
java-sources/ai/kognition/pilecv4j/lib-python/1.0/ai/kognition/pilecv4j/python/internal/PythonAPI.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.python.internal;
import com.sun.jna.Callback;
import com.sun.jna.Native;
import com.sun.jna.NativeLibrary;
import com.sun.jna.Pointer;
import com.sun.jna.ptr.IntByReference;
import com.sun.jna.ptr.PointerByReference;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.util.NativeLibraryLoader;
public class PythonAPI {
public static final String LIBNAME = "pilecv4jpython";
public static void _init() {}
// needs to match LogLevel enum in the C++ code.
public static final int LOG_LEVEL_TRACE = 0;
public static final int LOG_LEVEL_DEBUG = 1;
public static final int LOG_LEVEL_INFO = 2;
public static final int LOG_LEVEL_WARN = 3;
public static final int LOG_LEVEL_ERROR = 4;
public static final int LOG_LEVEL_FATAL = 5;
static {
CvMat.initOpenCv();
NativeLibraryLoader.loader()
.library(LIBNAME)
.addPreLoadCallback((dir, libname, oslibname) -> {
if(LIBNAME.equals(libname))
NativeLibrary.addSearchPath(libname, dir.getAbsolutePath());
})
.load();
Native.register(LIBNAME);
}
public static interface get_image_source extends Callback {
public long image_source(long ptRef);
}
// ===================================================
// Global python calls
// ===================================================
public static native int pilecv4j_python_initPython();
public static native void pilecv4j_python_addModulePath(String absDir);
public static native int pilecv4j_python_runPythonFunction(String module, String function, long args, long dictRef,
PointerByReference result, IntByReference resultSize);
public static native int pilecv4j_python_freeFunctionResults(Pointer resultBuf);
public static native void pilecv4j_python_pyObject_decref(long nativeRef);
public static native void pilecv4j_python_pyObject_incref(long nativeRef);
// ===================================================
// KogSys lifecycle and methods
// ===================================================
public static native long pilecv4j_python_kogSys_create(get_image_source cb);
public static native int pilecv4j_python_kogSys_destroy(long kogSysRef);
public static native int pilecv4j_python_kogSys_numModelLabels(long ptRef);
public static native Pointer pilecv4j_python_kogSys_modelLabel(final long ptRef, final int index);
// ==============================================================
// ImageSource lifecycle and methods
// ==============================================================
public static native long pilecv4j_python_imageSource_create(long ptRef);
public static native void pilecv4j_python_imageSource_destroy(long imageSourceRef);
public static native long pilecv4j_python_imageSource_send(long imageSourceRef, long paramsDict, long matRef, int rgb);
public static native long pilecv4j_python_imageSource_peek(long imageSourceRef);
// ==============================================================
// KogMatResults lifecycle and methods
// ==============================================================
public static native void pilecv4j_python_kogMatResults_destroy(long nativeObj);
public static native long pilecv4j_python_kogMatResults_getResults(long nativeObj);
public static native int pilecv4j_python_kogMatResults_hasResult(long nativeObj);
public static native int pilecv4j_python_kogMatResults_isAbandoned(long nativeObj);
// ==============================================================
// Python Tuple lifecycle and methods
// ==============================================================
public static native long pilecv4j_python_tuple_create(int size);
public static native void pilecv4j_python_tuple_destroy(long tupleRef);
public static native int pilecv4j_python_tuple_putString(long tupleRef, int index, String valRaw);
public static native int pilecv4j_python_tuple_putMat(long tupleRef, int index, long valRef);
public static native int pilecv4j_python_tuple_putPyObject(long tupleRef, int index, long valRef);
public static native int pilecv4j_python_tuple_putInt(long tupleRef, int index, long valRaw);
public static native int pilecv4j_python_tuple_putFloat(long tupleRef, int index, double valRaw);
public static native int pilecv4j_python_tuple_putKogSys(long tupleRef, int index, long nativeObj);
public static native int pilecv4j_python_tuple_putBoolean(long tupleRef, int index, int i);
// ==============================================================
// Python Tuple lifecycle and methods
// ==============================================================
public static native long pilecv4j_python_dict_create();
public static native void pilecv4j_python_dict_destroy(long dictRef);
public static native int pilecv4j_python_dict_putString(long dictRef, String key, String valRaw);
public static native int pilecv4j_python_dict_putMat(long dictRef, String key, long valRef);
public static native int pilecv4j_python_dict_putPyObject(long dictRef, String key, long valRef);
public static native int pilecv4j_python_dict_putInt(long dictRef, String key, long valRaw);
public static native int pilecv4j_python_dict_putFloat(long dictRef, String key, double valRaw);
public static native int pilecv4j_python_dict_putKogSys(long dictRef, String key, long nativeObj);
public static native int pilecv4j_python_dict_putBoolean(long dict, String key, int i);
// ===================================================
// Status/Error code access
// ===================================================
public static native Pointer pilecv4j_python_status_message(int status);
public static native void pilecv4j_python_status_freeMessage(Pointer pointer);
// ===================================================
// Logging
// ===================================================
public static native int pilecv4j_python_setLogLevel(int logLevelSet);
}
|
0
|
java-sources/ai/kognition/pilecv4j/lib-tf/1.0/ai/kognition/pilecv4j
|
java-sources/ai/kognition/pilecv4j/lib-tf/1.0/ai/kognition/pilecv4j/tf/TensorUtils.java
|
/*
* Copyright 2022 Jim Carroll
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.kognition.pilecv4j.tf;
import java.lang.reflect.Array;
import java.nio.ByteBuffer;
import java.util.stream.LongStream;
import com.google.protobuf.InvalidProtocolBufferException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.tensorflow.Graph;
import org.tensorflow.Tensor;
import org.tensorflow.ndarray.Shape;
import org.tensorflow.ndarray.buffer.ByteDataBuffer;
import org.tensorflow.ndarray.buffer.DataBuffers;
import org.tensorflow.proto.framework.GraphDef;
import org.tensorflow.types.TFloat32;
import org.tensorflow.types.family.TType;
import net.dempsy.util.QuietCloseable;
import ai.kognition.pilecv4j.image.CvMat;
import ai.kognition.pilecv4j.image.CvRaster;
public class TensorUtils {
public static final Logger LOGGER = LoggerFactory.getLogger(TensorUtils.class);
public static Tensor toTensor(final CvRaster raster, final Class<? extends TType> clazz) {
final Shape shape = Shape.of(new long[] {1,raster.rows(),raster.cols(),raster.channels()});
final ByteBuffer bb = raster.underlying();
bb.rewind();
try(QuietCloseable qc = () -> bb.rewind();) {
final ByteDataBuffer bdb = DataBuffers.of(bb);
return Tensor.of(clazz, shape, bdb);
}
}
public static Tensor toTensor(final CvMat mat, final Class<? extends TType> clazz) {
return mat.rasterOp(raster -> {
return TensorUtils.toTensor(raster, clazz);
});
}
public static Graph inflate(final byte[] graphBytes) throws InvalidProtocolBufferException {
final Graph graph = new Graph();
final GraphDef gd = GraphDef.parseFrom(graphBytes);
graph.importGraphDef(gd);
return graph;
}
public static float getScalar(final Tensor tensor) {
// expect a 1 dim array with 1 value.
return ((TFloat32)tensor).getFloat();
}
public static float[] getVector(final Tensor tensor) {
// expect a 1 dim array with 1 value.
final int dim1 = (int)tensor.shape().asArray()[1];
final float[][] result = new float[1][dim1];
for(long i = 0; i < result.length; i++) {
for(long j = 0; j < dim1; j++) {
result[(int)i][(int)j] = ((TFloat32)tensor).getFloat(i, j);
}
}
return result[0];
}
public static float[][] getMatrix(final Tensor tensor) {
final int[] dimentions = LongStream.of(tensor.shape().asArray())
.mapToInt(l -> (int)l)
.toArray();
final float[][][] matrix = (float[][][])Array.newInstance(float.class, dimentions);
for(long i = 0; i < dimentions[0]; i++) {
for(long j = 0; j < dimentions[1]; j++) {
for(long k = 0; k < dimentions[2]; k++) {
matrix[(int)i][(int)j][(int)k] = ((TFloat32)tensor).getFloat(i, j, k);
}
}
}
return matrix[0];
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.