index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/nlp
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/nlp/translation/LanguageTranslationRequest.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.nlp.translation;
public class LanguageTranslationRequest {
private String input;
public LanguageTranslationRequest(String input) {
this.input = input;
}
public String getInput() {
return input;
}
public void setInput(String input) {
this.input = input;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/nlp
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/nlp/translation/LanguageTranslationResponse.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.nlp.translation;
public class LanguageTranslationResponse {
private String translated;
public LanguageTranslationResponse(String translated) {
this.translated = translated;
}
public String getTranslated() {
return translated;
}
public void setTranslated(String translated) {
this.translated = translated;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/nlp
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/nlp/translation/Translator.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.nlp.translation;
/**
* Performs translation of natural language text.
*
* @author Mountain Fog, Inc.
*
*/
public interface Translator {
/**
* Translates the input text to a different language.
* @param input The input natural language text to translate.
* @return The translated text.
*/
public LanguageTranslationResponse translate(LanguageTranslationRequest request);
}
|
0
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/stats/StatsReporter.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.stats;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.manifest.ModelManifest;
import ai.idylnlp.model.manifest.StandardModelManifest;
/**
* Interface for statistics reporters. Note that Idyl NLP
* does not care how statistics are reported. Any implementations
* of this class are allowed. This allows the user flexibility
* to use any statistics reporting methods available to them.
*
* @author Mountain Fog, Inc.
*
*/
public interface StatsReporter {
/**
* The count of entity extraction requests.
*/
public final String EXTRACTION_REQUESTS = "extraction.requests";
/**
* The total count of extracted entities.
*/
public final String ENTITY_COUNT = "entity.count";
/**
* Record an entity extraction.
* @param entity The extracted {@link Entity entity}.
* @param modelManifest The {@link StandardModelManifest} that extracted the entity.
*/
public void recordEntityExtraction(Entity entity, ModelManifest modelManifest);
/**
* Increment a value.
* @param metricName The name of the metric.
*/
public void increment(String metricName);
/**
* Increments a value.
* @param metricName The name of the metric.
* @param value The value.
*/
public void increment(String metricName, long value);
/**
* Report an elapsed time.
* @param metricName The name of the metric.
* @param elapsedTime The elapsed time.
*/
public void time(String metricName, long elapsedTime);
}
|
0
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/training/AccuracyEvaluationResult.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.training;
/**
* Contains evaluation results for models whose evaluations
* are based on accuracy (such as lemma and part of speech models).
* This class extends {@link EvaluationResult}.
*
* @author Mountain Fog, Inc.
*
*/
public class AccuracyEvaluationResult extends EvaluationResult {
private double wordAccuracy;
private long wordCount;
/**
* Creates a new result.
* @param wordAccuracy The word accuracy value.
* @param wordCount The count of words.
*/
public AccuracyEvaluationResult(double wordAccuracy, long wordCount) {
this.wordAccuracy = wordAccuracy;
this.wordCount = wordCount;
}
@Override
public String toString() {
return "Word Accuracy: " + wordAccuracy + "; Word Count: " + wordCount;
}
/**
* Gets the word accuracy.
* @return The word accuracy.
*/
public double getWordAccuracy() {
return wordAccuracy;
}
/**
* Sets the word accuracy.
* @param wordAccuracy The word accuracy.
*/
public void setWordAccuracy(double wordAccuracy) {
this.wordAccuracy = wordAccuracy;
}
/**
* Gets the word count.
* @return The word count.
*/
public long getWordCount() {
return wordCount;
}
/**
* Sets the word count.
* @param wordCount The word count.
*/
public void setWordCount(long wordCount) {
this.wordCount = wordCount;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/training/EvaluationResult.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.training;
/**
* An abstract class for model evaluation results.
*
* @author Mountain Fog, Inc.
*
*/
public abstract class EvaluationResult {
}
|
0
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/training/FMeasure.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.training;
public class FMeasure {
private double precision;
private double recall;
private double fmeasure;
public FMeasure(double precision, double recall, double fmeasure) {
this.precision = precision;
this.recall = recall;
this.fmeasure = fmeasure;
}
public double getPrecision() {
return precision;
}
public double getRecall() {
return recall;
}
public double getFmeasure() {
return fmeasure;
}
@Override
public String toString() {
return String.format("Precision: %f, Recall: %f, F-Measure: %f", precision, recall, fmeasure);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model
|
java-sources/ai/idylnlp/idylnlp-model/1.1.0/ai/idylnlp/model/training/FMeasureModelValidationResult.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.training;
import java.util.List;
/**
* Contains the {@link FMeasure} values resulting from
* model evaluations that produce F-measures. This class
* extends {@link EvaluationResult}.
*
* @author Mountain Fog, Inc.
*
*/
public class FMeasureModelValidationResult extends EvaluationResult {
private FMeasure fmeasure;
private List<FMeasure> fmeasures;
/**
* Creates a new result.
* @param fmeasure A {@link FMeasure}.
*/
public FMeasureModelValidationResult(FMeasure fmeasure) {
this.fmeasure = fmeasure;
}
/**
* Creates a new result.
* @param fmeasure A {@link FMeasure}.
* @param fmeasures A list of {@link FMeasure}.
*/
public FMeasureModelValidationResult(FMeasure fmeasure, List<FMeasure> fmeasures) {
this.fmeasure = fmeasure;
this.fmeasures = fmeasures;
}
/**
* Gets the F-measure.
* @return The {@link FMeasure}.
*/
public FMeasure getFmeasure() {
return fmeasure;
}
/**
* Sets the F-measure.
* @param fmeasure The {@link FMeasure}.
*/
public void setFmeasure(FMeasure fmeasure) {
this.fmeasure = fmeasure;
}
/**
* Gets the F-measures.
* @return The {@link FMeasure}.
*/
public List<FMeasure> getFmeasures() {
return fmeasures;
}
/**
* Sets the F-measures.
* @param fmeasures A list of {@link FMeasure}.
*/
public void setFmeasures(List<FMeasure> fmeasures) {
this.fmeasures = fmeasures;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/DeepLearningEntityModelOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.deeplearning4j.api.storage.StatsStorage;
import org.deeplearning4j.earlystopping.EarlyStoppingConfiguration;
import org.deeplearning4j.earlystopping.EarlyStoppingResult;
import org.deeplearning4j.earlystopping.saver.LocalFileModelSaver;
import org.deeplearning4j.earlystopping.scorecalc.DataSetLossCalculator;
import org.deeplearning4j.earlystopping.termination.MaxTimeIterationTerminationCondition;
import org.deeplearning4j.earlystopping.termination.ScoreImprovementEpochTerminationCondition;
import org.deeplearning4j.earlystopping.trainer.EarlyStoppingTrainer;
import org.deeplearning4j.eval.Evaluation;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.deeplearning4j.nn.api.Classifier;
import org.deeplearning4j.nn.conf.LearningRatePolicy;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.layers.GravesLSTM;
import org.deeplearning4j.nn.conf.layers.RnnOutputLayer;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.deeplearning4j.optimize.api.IterationListener;
import org.deeplearning4j.optimize.listeners.ScoreIterationListener;
import org.deeplearning4j.parallelism.ParallelWrapper;
import org.deeplearning4j.ui.stats.StatsListener;
import org.deeplearning4j.ui.storage.FileStatsStorage;
import org.deeplearning4j.util.ModelSerializer;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.DataSet;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.lossfunctions.LossFunctions;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import ai.idylnlp.model.nlp.annotation.AnnotationTypes;
import ai.idylnlp.model.nlp.subjects.CoNLL2003SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.IdylNLPSubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.OpenNLPSubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.models.ObjectStreamUtils;
import ai.idylnlp.models.deeplearning.training.model.DeepLearningTrainingDefinition;
import ai.idylnlp.models.deeplearning.training.model.HyperParameters;
import ai.idylnlp.nlp.recognizer.deep.NameSampleDataSetIterator;
import opennlp.tools.namefind.NameSample;
import opennlp.tools.util.ObjectStream;
public class DeepLearningEntityModelOperations {
private static final Logger LOGGER = LogManager.getLogger(DeepLearningEntityModelOperations.class);
private Gson gson;
public DeepLearningEntityModelOperations() {
GsonBuilder gsonBuilder = new GsonBuilder();
gsonBuilder.serializeSpecialFloatingPointValues();
gson = gsonBuilder.setPrettyPrinting().create();
}
/**
* Trains a deep learning model.
* @param parameters The {@link BasicDeepLearningTrainingParameters parameters} for the training.
* @param serializedModelFile The output trained model {@link File file}.
* @return The {@link MultiLayerNetwork network}.
* @throws IOException Thrown if the cross-validation encounters an error.
*/
public String train(DeepLearningTrainingDefinition definition) throws IOException {
LOGGER.info("Starting training.");
GsonBuilder gsonBuilder = new GsonBuilder();
gsonBuilder.serializeSpecialFloatingPointValues();
Gson gson = gsonBuilder.setPrettyPrinting().create();
String jsonString = gson.toJson(definition, DeepLearningTrainingDefinition.class);
LOGGER.debug(jsonString);
final File wordVectorsFile = new File(definition.getTrainingData().getWordVectorsFile());
final WordVectors wordVectors = WordVectorSerializer.loadStaticModel(wordVectorsFile);
final int vectorSize = wordVectors.getWordVector(wordVectors.vocab().wordAtIndex(0)).length;
final String[] labels = getLabels(definition.getEntityType());
LOGGER.debug("Using vector size: {}", vectorSize);
// Dataset for training.
final ObjectStream<NameSample> trainingSampleStream = ObjectStreamUtils.getObjectStream(getSubjectOfTraining(definition));
final DataSetIterator trainDataSetIterator = new NameSampleDataSetIterator(trainingSampleStream, wordVectors, vectorSize, definition.getHyperParameters().getWindowSize(), labels, definition.getHyperParameters().getBatchSize());
// Dataset for evaluation.
final ObjectStream<NameSample> evaluationSampleStream = ObjectStreamUtils.getObjectStream(getSubjectOfEvaluation(definition));
final DataSetIterator evaluationDataSetIterator = new NameSampleDataSetIterator(evaluationSampleStream, wordVectors, vectorSize, definition.getHyperParameters().getWindowSize(), labels, definition.getHyperParameters().getBatchSize());
// Build the networks.
final MultiLayerConfiguration multiLayerConfiguration = buildNetworkConfiguration(definition.getHyperParameters(), vectorSize);
MultiLayerNetwork multiLayerNetwork = buildNetwork(multiLayerConfiguration, definition);
// Get the early stopping parameters.
if(definition.getEarlyTermination() != null) {
LOGGER.info("Enabling early-termination training.");
EarlyStoppingConfiguration.Builder<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>();
if(definition.getEarlyTermination().getMaxEpochs() != null) {
esConf.epochTerminationConditions(new ScoreImprovementEpochTerminationCondition(definition.getEarlyTermination().getMaxEpochs()));
}
if(definition.getEarlyTermination().getMaxMinutes() != null) {
esConf.iterationTerminationConditions(new MaxTimeIterationTerminationCondition(definition.getEarlyTermination().getMaxMinutes(), TimeUnit.MINUTES));
}
esConf.scoreCalculator(new DataSetLossCalculator(evaluationDataSetIterator, true));
esConf.evaluateEveryNEpochs(1);
esConf.modelSaver(new LocalFileModelSaver(System.getProperty("java.io.tmpdir")));
EarlyStoppingConfiguration<MultiLayerNetwork> earlyStoppingConfiguration = esConf.build();
EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(earlyStoppingConfiguration, multiLayerNetwork, trainDataSetIterator);
EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit();
multiLayerNetwork = result.getBestModel();
LOGGER.info("Termination reason: " + result.getTerminationReason());
LOGGER.info("Termination details: " + result.getTerminationDetails());
LOGGER.info("Total epochs: " + result.getTotalEpochs());
LOGGER.info("Best epoch number: " + result.getBestModelEpoch());
LOGGER.info("Score at best epoch: " + result.getBestModelScore());
} else {
if(definition.getParallelTraining() != null) {
LOGGER.info("Doing parallel training.");
final ParallelWrapper wrapper = new ParallelWrapper.Builder<Classifier>(multiLayerNetwork)
.prefetchBuffer(definition.getParallelTraining().getPrefetchBuffer())
.workers(definition.getParallelTraining().getWorkers())
.reportScoreAfterAveraging(definition.getParallelTraining().isReportScoreAfterAveraging())
.averagingFrequency(definition.getParallelTraining().getAveragingFrequency())
.useLegacyAveraging(definition.getParallelTraining().isLegacyAveraging())
.build();
for (int i = 1; i <= definition.getHyperParameters().getEpochs(); i++) {
// Do parallel training based on the ParallelTraining properties.
wrapper.fit(trainDataSetIterator);
trainDataSetIterator.reset();
LOGGER.info("Finished epoch {}", i);
Evaluation evaluation = new Evaluation();
while (evaluationDataSetIterator.hasNext()) {
DataSet t = evaluationDataSetIterator.next();
INDArray features = t.getFeatureMatrix();
INDArray lables = t.getLabels();
INDArray inMask = t.getFeaturesMaskArray();
INDArray outMask = t.getLabelsMaskArray();
INDArray predicted = multiLayerNetwork.output(features, false, inMask, outMask);
evaluation.evalTimeSeries(lables, predicted, outMask);
}
evaluationDataSetIterator.reset();
LOGGER.info("Evaluation statistics:\n{}", evaluation.stats());
}
} else {
LOGGER.info("Doing single node training.");
for (int i = 1; i <= definition.getHyperParameters().getEpochs(); i++) {
multiLayerNetwork.fit(trainDataSetIterator);
trainDataSetIterator.reset();
LOGGER.info("Finished epoch {}", i);
Evaluation evaluation = new Evaluation();
while (evaluationDataSetIterator.hasNext()) {
DataSet t = evaluationDataSetIterator.next();
INDArray features = t.getFeatureMatrix();
INDArray lables = t.getLabels();
INDArray inMask = t.getFeaturesMaskArray();
INDArray outMask = t.getLabelsMaskArray();
INDArray predicted = multiLayerNetwork.output(features, false, inMask, outMask);
evaluation.evalTimeSeries(lables, predicted, outMask);
}
evaluationDataSetIterator.reset();
LOGGER.info("Evaluation statistics:\n{}", evaluation.stats());
}
}
}
// Serialize the model to a file.
final File serializedModelFile = new File(definition.getOutput().getOutputFile());
ModelSerializer.writeModel(multiLayerNetwork, serializedModelFile, false);
LOGGER.info("Model serialized to {}", serializedModelFile.getAbsolutePath());
return UUID.randomUUID().toString();
}
public Gson getGson() {
return gson;
}
public DeepLearningTrainingDefinition deserializeDefinition(String json) throws IOException {
return gson.fromJson(json, DeepLearningTrainingDefinition.class);
}
private MultiLayerNetwork buildNetwork(MultiLayerConfiguration multiLayerConfiguration, DeepLearningTrainingDefinition definition) {
MultiLayerNetwork multiLayerNetwork = new MultiLayerNetwork(multiLayerConfiguration);
multiLayerNetwork.init();
List<IterationListener> listeners = new ArrayList<IterationListener>();
if(StringUtils.isNotEmpty(definition.getOutput().getStatsFile())) {
File statsFile = new File(definition.getOutput().getStatsFile());
StatsStorage statsStorage = new FileStatsStorage(statsFile);
listeners.add(new StatsListener(statsStorage));
}
listeners.add(new ScoreIterationListener(definition.getMonitoring().getScoreIteration()));
multiLayerNetwork.setListeners(listeners);
return multiLayerNetwork;
}
private MultiLayerConfiguration buildNetworkConfiguration(HyperParameters hp, int vectorSize) {
// https://deeplearning4j.org/neuralnet-configuration
NeuralNetConfiguration.Builder builder = new NeuralNetConfiguration.Builder();
builder.seed(hp.getSeed());
builder.biasInit(hp.getNetworkConfigurationParameters().getBiasInit());
builder.convolutionMode(hp.getConvolutionModeParam());
builder.dropOut(hp.getNetworkConfigurationParameters().getDropOut());
builder.iterations(hp.getNetworkConfigurationParameters().getIterations());
builder.regularization(hp.getNetworkConfigurationParameters().getRegularizationParameters().getRegularization());
builder.l1(hp.getNetworkConfigurationParameters().getRegularizationParameters().getL1());
builder.l1Bias(hp.getNetworkConfigurationParameters().getRegularizationParameters().getL1Bias());
builder.l2(hp.getNetworkConfigurationParameters().getRegularizationParameters().getL2());
builder.l2Bias(hp.getNetworkConfigurationParameters().getRegularizationParameters().getL2Bias());
builder.updater(hp.getNetworkConfigurationParameters().getUpdaterParameters().getUpdaterParam());
//builder.adamMeanDecay(adamMeanDecay);
//builder.adamVarDecay(adamVarDecay);
builder.useDropConnect(hp.getNetworkConfigurationParameters().isUseDropConnect());
builder.optimizationAlgo(hp.getNetworkConfigurationParameters().getOptimizationAlgorithmParam());
builder.gradientNormalization(hp.getNetworkConfigurationParameters().getGradientNormalizationParam());
builder.gradientNormalizationThreshold(hp.getNetworkConfigurationParameters().getGradientNormalizationThreshold());
builder.weightInit(hp.getNetworkConfigurationParameters().getWeightInitParam());
//builder.dist(dist);
MultiLayerConfiguration multiLayerConfiguration = builder.list()
.layer(0, new GravesLSTM.Builder()
.nIn(vectorSize)
.nOut(256)
.activation(Activation.TANH)
//.learningRate(hp.getNetworkConfigurationParameters().getLayers().getLayer1().getLearningRate())
.learningRateDecayPolicy(LearningRatePolicy.Schedule)
.learningRateSchedule(hp.getNetworkConfigurationParameters().getLayers().getLayer1().getLearningRateScheduleParam())
.biasLearningRate(hp.getNetworkConfigurationParameters().getLayers().getLayer1().getBiasLearningRate())
.build())
.layer(1, new RnnOutputLayer.Builder()
.nIn(256)
.nOut(3) // Equal to the number of labels (START, CONT, END)
// The softmax function is often used in the final layer of a neural
// network-based classifier. Such networks are commonly trained under
// a log loss (or cross-entropy) regime, giving a non-linear variant
// of multinomial logistic regression.
.activation(Activation.SOFTMAX)
.lossFunction(LossFunctions.LossFunction.MCXENT)
//.learningRate(hp.getNetworkConfigurationParameters().getLayers().getLayer2().getLearningRate())
.learningRateDecayPolicy(LearningRatePolicy.Schedule)
.learningRateSchedule(hp.getNetworkConfigurationParameters().getLayers().getLayer2().getLearningRateScheduleParam())
.biasLearningRate(hp.getNetworkConfigurationParameters().getLayers().getLayer2().getBiasLearningRate())
.build())
.pretrain(hp.getNetworkConfigurationParameters().isPretrain())
.backprop(hp.getNetworkConfigurationParameters().isBackprop())
.build();
return multiLayerConfiguration;
}
private String[] getLabels(String entityType) {
return new String[] { entityType + "-start", entityType + "-cont", "other" };
}
private SubjectOfTrainingOrEvaluation getSubjectOfTraining(DeepLearningTrainingDefinition definition) {
final String trainingInputFile = definition.getTrainingData().getInputFile();
if(definition.getTrainingData().getFormat().equalsIgnoreCase(AnnotationTypes.IDYLNLP.getName())) {
return new IdylNLPSubjectOfTrainingOrEvaluation(trainingInputFile, definition.getTrainingData().getAnnotationsFile());
} else if(definition.getTrainingData().getFormat().equalsIgnoreCase(AnnotationTypes.CONLL2003.getName())) {
return new CoNLL2003SubjectOfTrainingOrEvaluation(trainingInputFile);
} else {
LOGGER.info("Defaulting to OpenNLP subject of training.");
return new OpenNLPSubjectOfTrainingOrEvaluation(trainingInputFile);
}
}
private SubjectOfTrainingOrEvaluation getSubjectOfEvaluation(DeepLearningTrainingDefinition definition) {
final String trainingInputFile = definition.getEvaluationData().getInputFile();
if(definition.getEvaluationData().getFormat().equalsIgnoreCase(AnnotationTypes.IDYLNLP.getName())) {
return new IdylNLPSubjectOfTrainingOrEvaluation(trainingInputFile, definition.getTrainingData().getAnnotationsFile());
} else if(definition.getEvaluationData().getFormat().equalsIgnoreCase(AnnotationTypes.CONLL2003.getName())) {
return new CoNLL2003SubjectOfTrainingOrEvaluation(trainingInputFile);
} else {
LOGGER.info("Defaulting to OpenNLP subject of training.");
return new OpenNLPSubjectOfTrainingOrEvaluation(trainingInputFile);
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/DeepLearningTrainingDefinition.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import java.io.IOException;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class DeepLearningTrainingDefinition {
@SerializedName("Output")
@Expose
private Output output;
@SerializedName("TrainingData")
@Expose
private TrainingData trainingData;
@SerializedName("EvaluationData")
@Expose
private EvaluationData evaluationData;
@SerializedName("HyperParameters")
@Expose
private HyperParameters hyperParameters;
@SerializedName("EarlyTermination")
@Expose
private EarlyTermination earlyTermination;
@SerializedName("EntityType")
@Expose
private String entityType;
@SerializedName("Language")
@Expose
private String language = "en";
@SerializedName("Name")
@Expose
private String name = "model";
@SerializedName("Monitoring")
@Expose
private Monitoring monitoring;
@SerializedName("ParallelTraining")
@Expose
private ParallelTraining parallelTraining;
@SerializedName("SparkTraining")
@Expose
private SparkTraining sparkTraining = new SparkTraining();
public DeepLearningTrainingDefinition() throws IOException {
monitoring = new Monitoring();
}
public DeepLearningTrainingDefinition(Output output, TrainingData trainingData,
EvaluationData evaluationData, HyperParameters hyperParameters, EarlyTermination earlyTermination,
String entityType, SparkTraining sparkTraining, Monitoring monitoring, ParallelTraining parallelTraining,
String name, String language) {
this.output = output;
this.trainingData = trainingData;
this.evaluationData = evaluationData;
this.hyperParameters = hyperParameters;
this.earlyTermination = earlyTermination;
this.entityType = entityType;
this.sparkTraining = sparkTraining;
this.monitoring = monitoring;
this.parallelTraining = parallelTraining;
this.name = name;
this.language = language;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public Output getOutput() {
return output;
}
public void setOutput(Output output) {
this.output = output;
}
public HyperParameters getHyperParameters() {
return hyperParameters;
}
public void setTraining(HyperParameters hyperParameters) {
this.hyperParameters = hyperParameters;
}
public TrainingData getTrainingData() {
return trainingData;
}
public void setTrainingData(TrainingData trainingData) {
this.trainingData = trainingData;
}
public EvaluationData getEvaluationData() {
return evaluationData;
}
public void setEvaluationData(EvaluationData evaluationData) {
this.evaluationData = evaluationData;
}
public void setHyperParameters(HyperParameters hyperParameters) {
this.hyperParameters = hyperParameters;
}
public EarlyTermination getEarlyTermination() {
return earlyTermination;
}
public void setEarlyTermination(EarlyTermination earlyTermination) {
this.earlyTermination = earlyTermination;
}
public String getEntityType() {
return entityType;
}
public void setEntityType(String entityType) {
this.entityType = entityType;
}
public SparkTraining getSparkTraining() {
return sparkTraining;
}
public void setSparkTraining(SparkTraining sparkTraining) {
this.sparkTraining = sparkTraining;
}
public Monitoring getMonitoring() {
return monitoring;
}
public void setMonitoring(Monitoring monitoring) {
this.monitoring = monitoring;
}
public ParallelTraining getParallelTraining() {
return parallelTraining;
}
public void setParallelTraining(ParallelTraining parallelTraining) {
this.parallelTraining = parallelTraining;
}
public String getLanguage() {
return language;
}
public void setLanguage(String language) {
this.language = language;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/EarlyTermination.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class EarlyTermination {
@SerializedName("MaxEpochs")
@Expose
private Integer maxEpochs;
@SerializedName("MaxMinutes")
@Expose
private Integer maxMinutes;
public EarlyTermination() {
}
public EarlyTermination(Integer maxEpochs, Integer maxMinutes) {
this.maxEpochs = maxEpochs;
this.maxMinutes = maxMinutes;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public Integer getMaxEpochs() {
return maxEpochs;
}
public void setMaxEpochs(Integer maxEpochs) {
this.maxEpochs = maxEpochs;
}
public Integer getMaxMinutes() {
return maxMinutes;
}
public void setMaxMinutes(Integer maxMinutes) {
this.maxMinutes = maxMinutes;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/EvaluationData.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class EvaluationData {
@SerializedName("Format")
@Expose
private String format;
@SerializedName("InputFile")
@Expose
private String inputFile;
@SerializedName("AnnotationsFile")
@Expose
private String annotationsFile;
public EvaluationData(String format, String inputFile) {
this.format = format;
this.inputFile = inputFile;
}
public EvaluationData(String format, String inputFile, String annotationsFile) {
this.format = format;
this.inputFile = inputFile;
this.annotationsFile = annotationsFile;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public String getInputFile() {
return inputFile;
}
public void setInputFile(String inputFile) {
this.inputFile = inputFile;
}
public String getAnnotationsFile() {
return annotationsFile;
}
public void setAnnotationsFile(String annotationsFile) {
this.annotationsFile = annotationsFile;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/HyperParameters.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.deeplearning4j.nn.conf.ConvolutionMode;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class HyperParameters {
@SerializedName("WindowSize")
@Expose
private int windowSize = 15;
@SerializedName("Seed")
@Expose
private long seed = 1497630814976308L;
@SerializedName("Epochs")
@Expose
private int epochs = 1;
@SerializedName("BatchSize")
@Expose
private int batchSize = 1;
@SerializedName("ConvolutionMode")
@Expose
private String convolutionMode = "truncate";
@SerializedName("NetworkConfigurationParameters")
@Expose
private NetworkConfigurationParameters networkConfigurationParameters;
public ConvolutionMode getConvolutionModeParam() {
if(StringUtils.equalsIgnoreCase(convolutionMode, "truncate")) {
return ConvolutionMode.Truncate;
} else if(StringUtils.equalsIgnoreCase(convolutionMode, "same")) {
return ConvolutionMode.Same;
} else if(StringUtils.equalsIgnoreCase(convolutionMode, "strict")) {
return ConvolutionMode.Strict;
} else {
return null;
}
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public int getWindowSize() {
return windowSize;
}
public void setWindowSize(int windowSize) {
this.windowSize = windowSize;
}
public long getSeed() {
return seed;
}
public void setSeed(long seed) {
this.seed = seed;
}
public NetworkConfigurationParameters getNetworkConfigurationParameters() {
return networkConfigurationParameters;
}
public void setNetworkConfigurationParameters(NetworkConfigurationParameters networkConfigurationParameters) {
this.networkConfigurationParameters = networkConfigurationParameters;
}
public int getEpochs() {
return epochs;
}
public void setEpochs(int eoochs) {
this.epochs = eoochs;
}
public int getBatchSize() {
return batchSize;
}
public void setBatchSize(int batchSize) {
this.batchSize = batchSize;
}
public String getConvolutionMode() {
return convolutionMode;
}
public void setConvolutionMode(String convolutionMode) {
this.convolutionMode = convolutionMode;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/Layer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class Layer {
@SerializedName("LearningRate")
@Expose
private double learningRate = 1e-1;
@SerializedName("BiasLearningRate")
@Expose
private double biasLearningRate = Double.NaN;
@SerializedName("LearningRateDecayPolicy")
@Expose
private String learningRateDecayPolicy = "schedule";
@SerializedName("LearningRateSchedule")
@Expose
private Map<String, Double> learningRateSchedule;
public Layer() {
}
public Layer(double learningRate) {
this.learningRate = learningRate;
}
public Layer(double learningRate, double biasLearningRate, Map<String, Double> learningRateSchedule) {
this.learningRate = learningRate;
this.biasLearningRate = biasLearningRate;
this.learningRateSchedule = learningRateSchedule;
}
public Map<Integer, Double> getLearningRateScheduleParam() {
Map<Integer, Double> param = new HashMap<Integer, Double>();
if(learningRateSchedule != null) {
for(String key : learningRateSchedule.keySet()) {
param.put(Integer.valueOf(key), learningRateSchedule.get(key));
}
}
return param;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public double getLearningRate() {
return learningRate;
}
public void setLearningRate(double learningRate) {
this.learningRate = learningRate;
}
public double getBiasLearningRate() {
return biasLearningRate;
}
public void setBiasLearningRate(double biasLearningRate) {
this.biasLearningRate = biasLearningRate;
}
public Map<String, Double> getLearningRateSchedule() {
return learningRateSchedule;
}
public void setLearningRateSchedule(Map<String, Double> learningRateSchedule) {
this.learningRateSchedule = learningRateSchedule;
}
public String getLearningRateDecayPolicy() {
return learningRateDecayPolicy;
}
public void setLearningRateDecayPolicy(String learningRateDecayPolicy) {
this.learningRateDecayPolicy = learningRateDecayPolicy;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/Layers.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class Layers {
@SerializedName("Layer1")
@Expose
private Layer layer1;
@SerializedName("Layer2")
@Expose
private Layer layer2;
public Layers() {
}
public Layers(Layer layer1, Layer layer2) {
this.layer1 = layer1;
this.layer2 = layer2;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public Layer getLayer1() {
return layer1;
}
public void setLayer1(Layer layer1) {
this.layer1 = layer1;
}
public Layer getLayer2() {
return layer2;
}
public void setLayer2(Layer layer2) {
this.layer2 = layer2;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/Monitoring.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import java.io.IOException;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class Monitoring {
@SerializedName("ScoreIteration")
@Expose
private int scoreIteration = 100;
public Monitoring() throws IOException {
}
public Monitoring(int scoreIteration, String statsFile) {
this.scoreIteration = scoreIteration;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public int getScoreIteration() {
return scoreIteration;
}
public void setScoreIteration(int scoreIteration) {
this.scoreIteration = scoreIteration;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/NetworkConfigurationParameters.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.deeplearning4j.nn.api.OptimizationAlgorithm;
import org.deeplearning4j.nn.conf.GradientNormalization;
import org.deeplearning4j.nn.weights.WeightInit;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class NetworkConfigurationParameters {
@SerializedName("BiasInit")
@Expose
private double biasInit = 0.0;
@SerializedName("BiasLearningRate")
@Expose
private Integer biasLearningRate = 0;
@SerializedName("ConvolutionMode")
@Expose
private String convolutionMode;
@SerializedName("UseDropConnect")
@Expose
private boolean useDropConnect = false;
@SerializedName("DropOut")
@Expose
private double dropOut = 0.0;
@SerializedName("Iterations")
@Expose
private int iterations = 1;
@SerializedName("OptimizationAlgorithm")
@Expose
private String optimizationAlgorithm = "stochastic_gradient_descent";
@SerializedName("WeightInit")
@Expose
private String weightInit;
@SerializedName("Pretrain")
@Expose
private boolean pretrain = false;
@SerializedName("Backprop")
@Expose
private boolean backprop = true;
@SerializedName("RegularizationParameters")
@Expose
private RegularizationParameters regularizationParameters;
@SerializedName("UpdaterParameters")
@Expose
private UpdaterParameters updaterParameters;
@SerializedName("Layers")
@Expose
private Layers layers;
@SerializedName("GradientNormalization")
@Expose
private String gradientNormalization;
@SerializedName("GradientNormalizationThreshold")
@Expose
private double gradientNormalizationThreshold = 0.0;
public NetworkConfigurationParameters() {
}
public NetworkConfigurationParameters(double biasInit, int biasLearningRate, String convolutionMode, boolean useDropConnect,
int dropOut, int iterations, String optimizationAlgorithm, String weightInit, double learningRate,
boolean pretrain, boolean backprop, RegularizationParameters regularizationParameters,
UpdaterParameters updaterParameters, Layers layers, String gradientNormalization, double gradientNormalizationThreshold) {
this.biasInit = biasInit;
this.biasLearningRate = biasLearningRate;
this.convolutionMode = convolutionMode;
this.useDropConnect = useDropConnect;
this.iterations = iterations;
this.optimizationAlgorithm = optimizationAlgorithm;
this.weightInit = weightInit;
this.pretrain = pretrain;
this.backprop = backprop;
this.regularizationParameters = regularizationParameters;
this.updaterParameters = updaterParameters;
this.layers = layers;
this.gradientNormalization = gradientNormalization;
this.gradientNormalizationThreshold = gradientNormalizationThreshold;
}
public OptimizationAlgorithm getOptimizationAlgorithmParam() {
if(StringUtils.equalsIgnoreCase(optimizationAlgorithm, "conjugate_gradient")) {
return OptimizationAlgorithm.CONJUGATE_GRADIENT;
} else if(StringUtils.equalsIgnoreCase(optimizationAlgorithm, "lbfgs")) {
return OptimizationAlgorithm.LBFGS;
} else if(StringUtils.equalsIgnoreCase(optimizationAlgorithm, "line_gradient_descent")) {
return OptimizationAlgorithm.LINE_GRADIENT_DESCENT;
} else if(StringUtils.equalsIgnoreCase(optimizationAlgorithm, "stochastic_gradient_descent")) {
return OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT;
} else {
return null;
}
}
public WeightInit getWeightInitParam() {
if(StringUtils.equalsIgnoreCase(weightInit, "xavier")) {
return WeightInit.XAVIER;
} else if(StringUtils.equalsIgnoreCase(weightInit, "distribution")) {
return WeightInit.DISTRIBUTION;
} else if(StringUtils.equalsIgnoreCase(weightInit, "relu")) {
return WeightInit.RELU;
} else if(StringUtils.equalsIgnoreCase(weightInit, "relu_uniform")) {
return WeightInit.RELU_UNIFORM;
} else if(StringUtils.equalsIgnoreCase(weightInit, "sigmoid_uniform")) {
return WeightInit.SIGMOID_UNIFORM;
} else if(StringUtils.equalsIgnoreCase(weightInit, "uniform")) {
return WeightInit.UNIFORM;
} else if(StringUtils.equalsIgnoreCase(weightInit, "xavier_fan_in")) {
return WeightInit.XAVIER_FAN_IN;
} else if(StringUtils.equalsIgnoreCase(weightInit, "xavier_legacy")) {
return WeightInit.XAVIER_LEGACY;
} else if(StringUtils.equalsIgnoreCase(weightInit, "xavier_uniform")) {
return WeightInit.XAVIER_UNIFORM;
} else if(StringUtils.equalsIgnoreCase(weightInit, "zero")) {
return WeightInit.ZERO;
} else {
return null;
}
}
public GradientNormalization getGradientNormalizationParam() {
if(StringUtils.equalsIgnoreCase(gradientNormalization, "clipelementwiseabsolutevalue")) {
return GradientNormalization.ClipElementWiseAbsoluteValue;
} else if(StringUtils.equalsIgnoreCase(gradientNormalization, "clipl2perlayer")) {
return GradientNormalization.ClipL2PerLayer;
} else if(StringUtils.equalsIgnoreCase(gradientNormalization, "clipl2perparamtype")) {
return GradientNormalization.ClipL2PerParamType;
} else if(StringUtils.equalsIgnoreCase(gradientNormalization, "none")) {
return GradientNormalization.None;
} else if(StringUtils.equalsIgnoreCase(gradientNormalization, "renormalizel2perlayer")) {
return GradientNormalization.RenormalizeL2PerLayer;
} else if(StringUtils.equalsIgnoreCase(gradientNormalization, "renormalizel2perparamtype")) {
return GradientNormalization.RenormalizeL2PerParamType;
} else {
return null;
}
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public double getBiasInit() {
return biasInit;
}
public void setBiasInit(double biasInit) {
this.biasInit = biasInit;
}
public Integer getBiasLearningRate() {
return biasLearningRate;
}
public void setBiasLearningRate(Integer biasLearningRate) {
this.biasLearningRate = biasLearningRate;
}
public String getConvolutionMode() {
return convolutionMode;
}
public void setConvolutionMode(String convolutionMode) {
this.convolutionMode = convolutionMode;
}
public Boolean isUseDropConnect() {
return useDropConnect;
}
public void setUseDropConnect(boolean useDropConnect) {
this.useDropConnect = useDropConnect;
}
public double getDropOut() {
return dropOut;
}
public void setDropOut(double dropOut) {
this.dropOut = dropOut;
}
public int getIterations() {
return iterations;
}
public void setIterations(int iterations) {
this.iterations = iterations;
}
public String getOptimizationAlgorithm() {
return optimizationAlgorithm;
}
public void setOptimizationAlgorithm(String optimizationAlgorithm) {
this.optimizationAlgorithm = optimizationAlgorithm;
}
public String getWeightInit() {
return weightInit;
}
public void setWeightInit(String weightInit) {
this.weightInit = weightInit;
}
public boolean isPretrain() {
return pretrain;
}
public void setPretrain(boolean pretrain) {
this.pretrain = pretrain;
}
public boolean isBackprop() {
return backprop;
}
public void setBackprop(boolean backprop) {
this.backprop = backprop;
}
public RegularizationParameters getRegularizationParameters() {
return regularizationParameters;
}
public void setRegularizationParameters(RegularizationParameters regularizationParameters) {
this.regularizationParameters = regularizationParameters;
}
public UpdaterParameters getUpdaterParameters() {
return updaterParameters;
}
public void setUpdaterParameters(UpdaterParameters updaterParameters) {
this.updaterParameters = updaterParameters;
}
public Layers getLayers() {
return layers;
}
public void setLayers(Layers layers) {
this.layers = layers;
}
public String getGradientNormalization() {
return gradientNormalization;
}
public void setGradientNormalization(String gradientNormalization) {
this.gradientNormalization = gradientNormalization;
}
public double getGradientNormalizationThreshold() {
return gradientNormalizationThreshold;
}
public void setGradientNormalizationThreshold(double gradientNormalizationThreshold) {
this.gradientNormalizationThreshold = gradientNormalizationThreshold;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/Output.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import java.io.File;
import java.io.IOException;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class Output {
@SerializedName("OutputFile")
@Expose
private String outputFile;
@SerializedName("StatsFile")
@Expose
private String statsFile;
public Output() throws IOException {
statsFile = File.createTempFile("stats", "dl4j").getAbsolutePath();
}
public Output(String outputFile, String statsFile) {
this.outputFile = outputFile;
this.statsFile = statsFile;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public String getOutputFile() {
return outputFile;
}
public void setOutputFile(String outputFile) {
this.outputFile = outputFile;
}
public String getStatsFile() {
return statsFile;
}
public void setStatsFile(String statsFile) {
this.statsFile = statsFile;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/ParallelTraining.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class ParallelTraining {
@SerializedName("PrefetchBuffer")
@Expose
private int prefetchBuffer = 2;
@SerializedName("Workers")
@Expose
private int workers = 2;
@SerializedName("AveragingFrequency")
@Expose
private int averagingFrequency = 3;
@SerializedName("ReportScoreAfterAveraging")
@Expose
private boolean reportScoreAfterAveraging = true;
@SerializedName("LegacyAveraging")
@Expose
private boolean legacyAveraging = false;
public ParallelTraining() {
}
public ParallelTraining(int prefetchBuffer, int workers, int averagingFrequency,
boolean reportScoreAfterAveraging, boolean legacyAveraging) {
this.prefetchBuffer = prefetchBuffer;
this.workers = workers;
this.averagingFrequency = averagingFrequency;
this.reportScoreAfterAveraging = reportScoreAfterAveraging;
this.legacyAveraging = legacyAveraging;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public int getPrefetchBuffer() {
return prefetchBuffer;
}
public void setPrefetchBuffer(int prefetchBuffer) {
this.prefetchBuffer = prefetchBuffer;
}
public int getWorkers() {
return workers;
}
public void setWorkers(int workers) {
this.workers = workers;
}
public int getAveragingFrequency() {
return averagingFrequency;
}
public void setAveragingFrequency(int averagingFrequency) {
this.averagingFrequency = averagingFrequency;
}
public boolean isReportScoreAfterAveraging() {
return reportScoreAfterAveraging;
}
public void setReportScoreAfterAveraging(boolean reportScoreAfterAveraging) {
this.reportScoreAfterAveraging = reportScoreAfterAveraging;
}
public boolean isLegacyAveraging() {
return legacyAveraging;
}
public void setLegacyAveraging(boolean legacyAveraging) {
this.legacyAveraging = legacyAveraging;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/RegularizationParameters.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class RegularizationParameters {
@SerializedName("Regularization")
@Expose
private boolean regularization = false;
@SerializedName("L1")
@Expose
private Double l1 = Double.NaN;
@SerializedName("L1Bias")
@Expose
private Double l1Bias = Double.NaN;
@SerializedName("L2")
@Expose
private Double l2 = Double.NaN;
@SerializedName("L2Bias")
@Expose
private Double l2Bias = Double.NaN;
public RegularizationParameters() {
}
public RegularizationParameters(boolean regularization, Double l1, Double l1Bias, Double l2, Double l2Bias) {
this.regularization = regularization;
this.l1 = l1;
this.l1Bias = l1Bias;
this.l2 = l2;
this.l2Bias = l2Bias;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public boolean getRegularization() {
return regularization;
}
public void setRegularization(boolean regularization) {
this.regularization = regularization;
}
public Double getL1() {
return l1;
}
public void setL1(Double l1) {
this.l1 = l1;
}
public Double getL1Bias() {
return l1Bias;
}
public void setL1Bias(Double l1Bias) {
this.l1Bias = l1Bias;
}
public Double getL2() {
return l2;
}
public void setL2(Double l2) {
this.l2 = l2;
}
public Double getL2Bias() {
return l2Bias;
}
public void setL2Bias(Double l2Bias) {
this.l2Bias = l2Bias;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/SparkTraining.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class SparkTraining {
@SerializedName("AveragingFrequency")
@Expose
private int averagingFrequency = 5;
@SerializedName("BatchSizePerWorker")
@Expose
private int batchSizePerWorker = 32;
@SerializedName("WorkerPrefetchNumBatches")
@Expose
private int workerPrefetchNumBatches = 2;
@SerializedName("EnableSparkTraining")
@Expose
private boolean enableSparkTraining = false;
@SerializedName("Master")
@Expose
private String master = "local[*]";
public SparkTraining() {
}
public SparkTraining(int averagingFrequency, int batchSizePerWorker, int workerPrefetchNumBatches,
String master) {
this.averagingFrequency = averagingFrequency;
this.batchSizePerWorker = batchSizePerWorker;
this.workerPrefetchNumBatches = workerPrefetchNumBatches;
this.enableSparkTraining = true;
this.master = master;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public int getAveragingFrequency() {
return averagingFrequency;
}
public void setAveragingFrequency(int averagingFrequency) {
this.averagingFrequency = averagingFrequency;
}
public int getBatchSizePerWorker() {
return batchSizePerWorker;
}
public void setBatchSizePerWorker(int batchSizePerWorker) {
this.batchSizePerWorker = batchSizePerWorker;
}
public int getWorkerPrefetchNumBatches() {
return workerPrefetchNumBatches;
}
public void setWorkerPrefetchNumBatches(int workerPrefetchNumBatches) {
this.workerPrefetchNumBatches = workerPrefetchNumBatches;
}
public boolean isEnableSparkTraining() {
return enableSparkTraining;
}
public void setEnableSparkTraining(boolean enableSparkTraining) {
this.enableSparkTraining = enableSparkTraining;
}
public String getMaster() {
return master;
}
public void setMaster(String master) {
this.master = master;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/TrainingData.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class TrainingData {
@SerializedName("Format")
@Expose
private String format;
@SerializedName("InputFile")
@Expose
private String inputFile;
@SerializedName("AnnotationsFile")
@Expose
private String annotationsFile;
@SerializedName("WordVectorsFile")
@Expose
private String wordVectorsFile;
public TrainingData() {
}
public TrainingData(String format, String inputFile, String wordVectorsFile) {
this.format = format;
this.inputFile = inputFile;
this.wordVectorsFile = wordVectorsFile;
}
public TrainingData(String format, String inputFile, String annotationsFile, String wordVectorsFile) {
this.format = format;
this.inputFile = inputFile;
this.annotationsFile = annotationsFile;
this.wordVectorsFile = wordVectorsFile;
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public String getInputFile() {
return inputFile;
}
public void setInputFile(String inputFile) {
this.inputFile = inputFile;
}
public String getAnnotationsFile() {
return annotationsFile;
}
public void setAnnotationsFile(String annotationsFile) {
this.annotationsFile = annotationsFile;
}
public String getWordVectorsFile() {
return wordVectorsFile;
}
public void setWordVectorsFile(String wordVectorsFile) {
this.wordVectorsFile = wordVectorsFile;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training
|
java-sources/ai/idylnlp/idylnlp-models-deeplearning/1.1.0/ai/idylnlp/models/deeplearning/training/model/UpdaterParameters.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.deeplearning.training.model;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.deeplearning4j.nn.conf.Updater;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class UpdaterParameters {
@SerializedName("Updater")
@Expose
private String updater;
@SerializedName("Epsilon")
@Expose
private Integer epsilon;
@SerializedName("Decay")
@Expose
private Integer decay;
public UpdaterParameters() {
}
public UpdaterParameters(String updater, int epsilon, int decay, int learningRate) {
this.updater = updater;
this.epsilon = epsilon;
this.decay = decay;
}
public Updater getUpdaterParam() {
if(StringUtils.equalsIgnoreCase(updater, "adadelta")) {
return Updater.ADADELTA;
} else if(StringUtils.equalsIgnoreCase(updater, "adagrad")) {
return Updater.ADAGRAD;
} else if(StringUtils.equalsIgnoreCase(updater, "adam")) {
return Updater.ADAM;
} else if(StringUtils.equalsIgnoreCase(updater, "nesterovs")) {
return Updater.NESTEROVS;
} else if(StringUtils.equalsIgnoreCase(updater, "none")) {
return Updater.NONE;
} else if(StringUtils.equalsIgnoreCase(updater, "rmsprop")) {
return Updater.RMSPROP;
} else if(StringUtils.equalsIgnoreCase(updater, "sgd")) {
return Updater.SGD;
} else {
return Updater.NONE;
}
}
@Override
public boolean equals(Object o) {
return EqualsBuilder.reflectionEquals(this, o);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
public String getUpdater() {
return updater;
}
public void setUpdater(String updater) {
this.updater = updater;
}
public Integer getEpsilon() {
return epsilon;
}
public void setEpsilon(Integer epsilon) {
this.epsilon = epsilon;
}
public Integer getDecay() {
return decay;
}
public void setDecay(Integer decay) {
this.decay = decay;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/ModelOperationsUtils.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.nlp.annotation.AnnotationTypes;
import ai.idylnlp.model.nlp.subjects.BratSubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.CoNLL2003SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.IdylNLPSubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.OpenNLPSubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.training.definition.model.TrainingDefinitionReader;
public class ModelOperationsUtils {
private static final Logger LOGGER = LogManager.getLogger(ModelOperationsUtils.class);
/**
* Gets a {@link SubjectOfTraining} based on the training definition.
* @param reader A {@link TrainingDefinitionReader}.
* @return A {@link SubjectOfTraining}.
*/
public static SubjectOfTrainingOrEvaluation getSubjectOfTrainingOrEvaluation(TrainingDefinitionReader reader) {
final String inputFile = reader.getTrainingDefinition().getTrainingdata().getFile();
SubjectOfTrainingOrEvaluation subjectOfTraining = null;
// Set this based on what's in the training definition file.
if(reader.getTrainingDefinition().getTrainingdata().getFormat().equalsIgnoreCase(AnnotationTypes.IDYLNLP.getName())) {
LOGGER.debug("Using Idyl NLP data format.");
subjectOfTraining = new IdylNLPSubjectOfTrainingOrEvaluation(inputFile, reader.getTrainingDefinition().getTrainingdata().getAnnotations());
} else if(reader.getTrainingDefinition().getTrainingdata().getFormat().equalsIgnoreCase(AnnotationTypes.CONLL2003.getName())) {
LOGGER.debug("Using CoNLL2003 data format.");
subjectOfTraining = new CoNLL2003SubjectOfTrainingOrEvaluation(inputFile);
} else if(reader.getTrainingDefinition().getTrainingdata().getFormat().equalsIgnoreCase(AnnotationTypes.BRAT.getName())) {
LOGGER.debug("Using Brat data format.");
subjectOfTraining = new BratSubjectOfTrainingOrEvaluation(inputFile);
} else {
LOGGER.debug("Using OpenNLP data format.");
subjectOfTraining = new OpenNLPSubjectOfTrainingOrEvaluation(inputFile);
}
return subjectOfTraining;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/EntityModelOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.opennlp.custom.encryption.OpenNLPEncryptionFactory;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.Constants;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.training.FMeasure;
import ai.idylnlp.model.training.FMeasureModelValidationResult;
import ai.idylnlp.models.ModelOperationsUtils;
import ai.idylnlp.models.ObjectStreamUtils;
import ai.idylnlp.models.opennlp.training.model.ModelCrossValidationOperations;
import ai.idylnlp.models.opennlp.training.model.ModelSeparateDataValidationOperations;
import ai.idylnlp.models.opennlp.training.model.ModelTrainingOperations;
import ai.idylnlp.models.opennlp.training.model.TrainingAlgorithm;
import ai.idylnlp.training.definition.model.TrainingDefinitionReader;
import opennlp.tools.cmdline.namefind.NameEvaluationErrorListener;
import opennlp.tools.ml.maxent.quasinewton.QNTrainer;
import opennlp.tools.namefind.BioCodec;
import opennlp.tools.namefind.NameFinderME;
import opennlp.tools.namefind.NameSample;
import opennlp.tools.namefind.TokenNameFinderCrossValidator;
import opennlp.tools.namefind.TokenNameFinderEvaluationMonitor;
import opennlp.tools.namefind.TokenNameFinderEvaluator;
import opennlp.tools.namefind.TokenNameFinderFactory;
import opennlp.tools.namefind.TokenNameFinderModel;
import opennlp.tools.util.ObjectStream;
import opennlp.tools.util.SequenceCodec;
import opennlp.tools.util.TrainingParameters;
/**
* Operations for training and validating entity models.
*
* @author Mountain Fog, Inc.
*
*/
public class EntityModelOperations implements ModelTrainingOperations, ModelSeparateDataValidationOperations<FMeasureModelValidationResult>, ModelCrossValidationOperations<FMeasureModelValidationResult> {
private static final Logger LOGGER = LogManager.getLogger(EntityModelOperations.class);
private String type;
private String featureGeneratorXml;
/**
* Performs model training using a training definition file.
* @param reader A {@link TrainingDefinitionReader}.
* @return The generated model's ID.
* @throws IOException Thrown if the model creation fails.
*/
public static String train(TrainingDefinitionReader reader) throws IOException {
final String type = reader.getTrainingDefinition().getModel().getType();
final String featureGeneratorXml = reader.getFeatures();
final EntityModelOperations ops = new EntityModelOperations(type, featureGeneratorXml);
final SubjectOfTrainingOrEvaluation subjectOfTraining = ModelOperationsUtils.getSubjectOfTrainingOrEvaluation(reader);
final String modelFile = reader.getTrainingDefinition().getModel().getFile();
final String language = reader.getTrainingDefinition().getModel().getLanguage();
final String encryptionKey = reader.getTrainingDefinition().getModel().getEncryptionkey();
final int cutOff = reader.getTrainingDefinition().getAlgorithm().getCutoff().intValue();
final int iterations = reader.getTrainingDefinition().getAlgorithm().getIterations().intValue();
final int threads = reader.getTrainingDefinition().getAlgorithm().getThreads().intValue();
final String algorithm = reader.getTrainingDefinition().getAlgorithm().getName();
LanguageCode languageCode = LanguageCode.getByCodeIgnoreCase(language);
if(algorithm.equalsIgnoreCase(TrainingAlgorithm.PERCEPTRON.getName())) {
return ops.trainPerceptron(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations);
} else if(algorithm.equalsIgnoreCase(TrainingAlgorithm.MAXENT_QN.getName())) {
final double l1 = reader.getTrainingDefinition().getAlgorithm().getL1().doubleValue();
final double l2 = reader.getTrainingDefinition().getAlgorithm().getL2().doubleValue();
int m = reader.getTrainingDefinition().getAlgorithm().getM().intValue();
int max = reader.getTrainingDefinition().getAlgorithm().getMax().intValue();
return ops.trainMaxEntQN(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations, threads, l1, l2, m, max);
} else {
throw new IOException("Invalid algorithm specified in the training definition file: " + algorithm);
}
}
/**
* Performs cross-validation of an entity model.
* @param reader A {@link TrainingDefinitionReader}.
* @param folds The number of cross-validation folds.
* @return A {@link FMeasureModelValidationResult}.
* @throws IOException Thrown if the model cannot be validated.
*/
public static FMeasureModelValidationResult crossValidate(TrainingDefinitionReader reader, int folds) throws IOException {
final String language = reader.getTrainingDefinition().getModel().getLanguage();
final int iterations = reader.getTrainingDefinition().getAlgorithm().getIterations().intValue();
final int cutoff = reader.getTrainingDefinition().getAlgorithm().getCutoff().intValue();
final String featureGeneratorXml = reader.getFeatures();
final String type = reader.getTrainingDefinition().getModel().getType();
final String algorithm = reader.getTrainingDefinition().getAlgorithm().getName();
final double l1 = reader.getTrainingDefinition().getAlgorithm().getL1().doubleValue();
final double l2 = reader.getTrainingDefinition().getAlgorithm().getL2().doubleValue();
final int m = reader.getTrainingDefinition().getAlgorithm().getM().intValue();
final int max = reader.getTrainingDefinition().getAlgorithm().getMax().intValue();
final LanguageCode languageCode = LanguageCode.getByCodeIgnoreCase(language);
// Get the subject of training based on what's specified in the training definition file.
final SubjectOfTrainingOrEvaluation subjectOfTraining = ModelOperationsUtils.getSubjectOfTrainingOrEvaluation(reader);
// Now we can set up the entity model operations.
final EntityModelOperations entityModelOperations = new EntityModelOperations(type, featureGeneratorXml);
FMeasureModelValidationResult result = null;
if(StringUtils.equalsIgnoreCase(algorithm, TrainingAlgorithm.PERCEPTRON.getName())) {
result = entityModelOperations.crossValidationEvaluatePerceptron(subjectOfTraining, languageCode, iterations, cutoff, folds);
} else if(StringUtils.equalsIgnoreCase(algorithm, TrainingAlgorithm.MAXENT_QN.getName())) {
result = entityModelOperations.crossValidationEvaluateMaxEntQN(subjectOfTraining, languageCode, iterations, cutoff, folds, l1, l2, m, max);
} else {
throw new IOException("Invalid algorithm specified in the training definition file: " + algorithm);
}
return result;
}
/**
* Creates a new instance.
* @param type The entity type.
* @param featureGeneratorXml The XML of the feature generators.
*/
public EntityModelOperations(String type, String featureGeneratorXml) {
this.type = type;
this.featureGeneratorXml = featureGeneratorXml;
}
@Override
public FMeasureModelValidationResult crossValidationEvaluateMaxEntQN(SubjectOfTrainingOrEvaluation subjectOfTraining, LanguageCode language, int iterations, int cutOff, int folds, double l1, double l2, int m, int max) throws IOException {
LOGGER.info("Doing model evaluation using cross-validation with {} folds.", folds);
ObjectStream<NameSample> sampleStream = ObjectStreamUtils.getObjectStream(subjectOfTraining);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.MAXENT_QN.getAlgorithm());
trainParams.put(QNTrainer.L1COST_PARAM, String.valueOf(l1));
trainParams.put(QNTrainer.L2COST_PARAM, String.valueOf(l2));
trainParams.put(QNTrainer.M_PARAM, String.valueOf(m));
trainParams.put(QNTrainer.MAX_FCT_EVAL_PARAM, String.valueOf(max));
byte[] featureGeneratorBytes = featureGeneratorXml.getBytes(Charset.forName(Constants.ENCODING_UTF8));
Map<String, Object> resources = new HashMap<String, Object>();
TokenNameFinderEvaluationMonitor monitor = new NameEvaluationErrorListener();
TokenNameFinderCrossValidator evaluator = new TokenNameFinderCrossValidator(language.getAlpha3().toString(), type, trainParams, featureGeneratorBytes, resources, monitor);
evaluator.evaluate(sampleStream, folds);
// TODO: The code to get the F-measures is duplicated in the 3 cross-validation functions.
// Move the code somewhere so it is not duplicated.
final List<FMeasure> fmeasures = new LinkedList<FMeasure>();
for(opennlp.tools.util.eval.FMeasure f : evaluator.getFMeasures()) {
fmeasures.add(new FMeasure(f.getPrecisionScore(), f.getRecallScore(), f.getFMeasure()));
}
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure, fmeasures);
}
@Override
public FMeasureModelValidationResult crossValidationEvaluatePerceptron(SubjectOfTrainingOrEvaluation subjectOfTraining, LanguageCode language, int iterations, int cutOff, int folds) throws IOException {
LOGGER.info("Doing model evaluation using cross-validation with {} folds.", folds);
ObjectStream<NameSample> sampleStream = ObjectStreamUtils.getObjectStream(subjectOfTraining);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.PERCEPTRON.getAlgorithm());
byte[] featureGeneratorBytes = featureGeneratorXml.getBytes(Charset.forName(Constants.ENCODING_UTF8));
Map<String, Object> resources = new HashMap<String, Object>();
TokenNameFinderEvaluationMonitor monitor = new NameEvaluationErrorListener();
TokenNameFinderCrossValidator evaluator = new TokenNameFinderCrossValidator(language.getAlpha3().toString(), type, trainParams, featureGeneratorBytes, resources, monitor);
evaluator.evaluate(sampleStream, folds);
final List<FMeasure> fmeasures = new LinkedList<FMeasure>();
for(opennlp.tools.util.eval.FMeasure f : evaluator.getFMeasures()) {
fmeasures.add(new FMeasure(f.getPrecisionScore(), f.getRecallScore(), f.getFMeasure()));
}
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure, fmeasures);
}
@Override
public FMeasureModelValidationResult separateDataEvaluate(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFileName, String encryptionKey) throws IOException {
LOGGER.info("Doing model evaluation using separate training data.");
ObjectStream<NameSample> sampleStream = ObjectStreamUtils.getObjectStream(subjectOfTraining);
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
TokenNameFinderModel model = new TokenNameFinderModel(new File(modelFileName));
NameFinderME nameFinderME = new NameFinderME(model);
TokenNameFinderEvaluator evaluator = new TokenNameFinderEvaluator(nameFinderME);
evaluator.evaluate(sampleStream);
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure);
}
@Override
public String trainPerceptron(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations) throws IOException {
LOGGER.info("Beginning entity model training. Output model will be: {}", modelFile);
ObjectStream<NameSample> sampleStream = ObjectStreamUtils.getObjectStream(subjectOfTraining);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.PERCEPTRON.getAlgorithm());
// Use null to use the standard Bio codec.
SequenceCodec<String> sequenceCodec = new BioCodec();// TokenNameFinderFactory.instantiateSequenceCodec(null);
byte[] featureGeneratorBytes = featureGeneratorXml.getBytes(Charset.forName(Constants.ENCODING_UTF8));
Map<String, Object> resources = new HashMap<String, Object>();
TokenNameFinderFactory tokenNameFinderFactory = TokenNameFinderFactory.create(
TokenNameFinderFactory.class.getName(), featureGeneratorBytes, resources, sequenceCodec);
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
// Create the model.
TokenNameFinderModel model = NameFinderME.train(language.getAlpha3().toString(), type, sampleStream, trainParams, tokenNameFinderFactory);
BufferedOutputStream modelOut = null;
// The generated model's ID. Assigned during the training process.
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} catch (Exception ex) {
LOGGER.error("Unable to create the model.", ex);
} finally {
if (modelOut != null) {
modelOut.close();
}
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
@Override
public String trainMaxEntQN(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations, int threads, double l1, double l2, int m, int max) throws IOException {
LOGGER.info("Beginning entity model training with {} threads. Output model will be: {}", threads, modelFile);
ObjectStream<NameSample> sampleStream = ObjectStreamUtils.getObjectStream(subjectOfTraining);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.MAXENT_QN.getAlgorithm());
trainParams.put(TrainingParameters.THREADS_PARAM, Integer.toString(threads));
trainParams.put(QNTrainer.L1COST_PARAM, String.valueOf(l1));
trainParams.put(QNTrainer.L2COST_PARAM, String.valueOf(l2));
trainParams.put(QNTrainer.M_PARAM, String.valueOf(m));
trainParams.put(QNTrainer.MAX_FCT_EVAL_PARAM, String.valueOf(max));
// Use null to use the standard Bio codec.
SequenceCodec<String> sequenceCodec = TokenNameFinderFactory.instantiateSequenceCodec(null);
byte[] featureGeneratorBytes = featureGeneratorXml.getBytes(Charset.forName(Constants.ENCODING_UTF8));
Map<String, Object> resources = new HashMap<String, Object>();
TokenNameFinderFactory tokenNameFinderFactory = TokenNameFinderFactory.create(
TokenNameFinderFactory.class.getName(), featureGeneratorBytes, resources, sequenceCodec);
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
// Create the model.
TokenNameFinderModel model = NameFinderME.train(language.getAlpha3().toString(), type, sampleStream, trainParams, tokenNameFinderFactory);
BufferedOutputStream modelOut = null;
// The generated model's ID. Assigned during the training process.
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} catch (Exception ex) {
LOGGER.error("Unable to create the model.", ex);
} finally {
if (modelOut != null) {
modelOut.close();
}
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/LemmatizerModelOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.opennlp.custom.encryption.OpenNLPEncryptionFactory;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.Constants;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.training.AccuracyEvaluationResult;
import ai.idylnlp.models.ModelOperationsUtils;
import ai.idylnlp.models.opennlp.training.model.ModelSeparateDataValidationOperations;
import ai.idylnlp.models.opennlp.training.model.ModelTrainingOperations;
import ai.idylnlp.models.opennlp.training.model.TrainingAlgorithm;
import ai.idylnlp.training.definition.model.TrainingDefinitionReader;
import opennlp.tools.cmdline.lemmatizer.LemmatizerFineGrainedReportListener;
import opennlp.tools.lemmatizer.LemmaSample;
import opennlp.tools.lemmatizer.LemmaSampleStream;
import opennlp.tools.lemmatizer.LemmatizerEvaluationMonitor;
import opennlp.tools.lemmatizer.LemmatizerEvaluator;
import opennlp.tools.lemmatizer.LemmatizerFactory;
import opennlp.tools.lemmatizer.LemmatizerME;
import opennlp.tools.lemmatizer.LemmatizerModel;
import opennlp.tools.ml.maxent.quasinewton.QNTrainer;
import opennlp.tools.util.InputStreamFactory;
import opennlp.tools.util.MarkableFileInputStreamFactory;
import opennlp.tools.util.ObjectStream;
import opennlp.tools.util.PlainTextByLineStream;
import opennlp.tools.util.TrainingParameters;
/**
* Operations for training and validating lemma models.
*
* @author Mountain Fog, Inc.
*
*/
public class LemmatizerModelOperations implements ModelTrainingOperations, ModelSeparateDataValidationOperations<AccuracyEvaluationResult> {
private static final Logger LOGGER = LogManager.getLogger(LemmatizerModelOperations.class);
/**
* Performs sentence model training using a training definition file.
* @param reader A {@link TrainingDefinitionReader}.
* @return The generated model's ID.
* @throws IOException Thrown if the model creation fails.
*/
public static String train(TrainingDefinitionReader reader) throws IOException {
final LemmatizerModelOperations ops = new LemmatizerModelOperations();
final SubjectOfTrainingOrEvaluation subjectOfTraining = ModelOperationsUtils.getSubjectOfTrainingOrEvaluation(reader);
final String modelFile = reader.getTrainingDefinition().getModel().getFile();
final String language = reader.getTrainingDefinition().getModel().getLanguage();
final String encryptionKey = reader.getTrainingDefinition().getModel().getEncryptionkey();
final int cutOff = reader.getTrainingDefinition().getAlgorithm().getCutoff().intValue();
final int iterations = reader.getTrainingDefinition().getAlgorithm().getIterations().intValue();
final int threads = reader.getTrainingDefinition().getAlgorithm().getThreads().intValue();
final String algorithm = reader.getTrainingDefinition().getAlgorithm().getName();
final LanguageCode languageCode = LanguageCode.getByCodeIgnoreCase(language);
if(algorithm.equalsIgnoreCase(TrainingAlgorithm.PERCEPTRON.getName())) {
return ops.trainPerceptron(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations);
} else if(algorithm.equalsIgnoreCase(TrainingAlgorithm.MAXENT_QN.getName())) {
final double l1 = reader.getTrainingDefinition().getAlgorithm().getL1().doubleValue();
final double l2 = reader.getTrainingDefinition().getAlgorithm().getL2().doubleValue();
int m = reader.getTrainingDefinition().getAlgorithm().getM().intValue();
int max = reader.getTrainingDefinition().getAlgorithm().getMax().intValue();
return ops.trainMaxEntQN(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations, threads, l1, l2, m, max);
} else {
throw new IOException("Invalid algorithm specified in the training definition file: " + algorithm);
}
}
@Override
public AccuracyEvaluationResult separateDataEvaluate(SubjectOfTrainingOrEvaluation SubjectOfTrainingOrEvaluation, String modelFileName, String encryptionKey) throws IOException {
LOGGER.info("Doing model evaluation using separate training data.");
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(SubjectOfTrainingOrEvaluation.getInputFile()));
ObjectStream<LemmaSample> sample = new LemmaSampleStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8));
LemmatizerModel model = new LemmatizerModel(new File(SubjectOfTrainingOrEvaluation.getInputFile()));
LemmatizerME posTaggerME = new LemmatizerME(model);
LemmatizerEvaluationMonitor monitor = new LemmatizerFineGrainedReportListener();
LemmatizerEvaluator evaluator = new LemmatizerEvaluator(posTaggerME, monitor);
evaluator.evaluate(sample);
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
return new AccuracyEvaluationResult(evaluator.getWordAccuracy(), evaluator.getWordCount());
}
@Override
public String trainMaxEntQN(SubjectOfTrainingOrEvaluation SubjectOfTrainingOrEvaluation, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations, int threads, double l1, double l2, int m, int max) throws IOException {
LOGGER.info("Beginning tokenizer model training. Output model will be: " + modelFile);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(SubjectOfTrainingOrEvaluation.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<LemmaSample> sampleStream = new LemmaSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.MAXENT_QN.getAlgorithm());
trainParams.put(TrainingParameters.THREADS_PARAM, Integer.toString(threads));
trainParams.put(QNTrainer.L1COST_PARAM, String.valueOf(l1));
trainParams.put(QNTrainer.L2COST_PARAM, String.valueOf(l2));
trainParams.put(QNTrainer.M_PARAM, String.valueOf(m));
trainParams.put(QNTrainer.MAX_FCT_EVAL_PARAM, String.valueOf(max));
LemmatizerFactory lemmatizerFactory = new LemmatizerFactory();
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
LemmatizerModel model = LemmatizerME.train(language.getAlpha3().toString(), sampleStream, trainParams, lemmatizerFactory);
BufferedOutputStream modelOut = null;
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
lineStream.close();
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
@Override
public String trainPerceptron(SubjectOfTrainingOrEvaluation SubjectOfTrainingOrEvaluation, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations) throws IOException {
LOGGER.info("Beginning tokenizer model training. Output model will be: " + modelFile);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(SubjectOfTrainingOrEvaluation.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<LemmaSample> sampleStream = new LemmaSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.PERCEPTRON.getAlgorithm());
LemmatizerFactory lemmatizerFactory = new LemmatizerFactory();
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
LemmatizerModel model = LemmatizerME.train(language.getAlpha3().toString(), sampleStream, trainParams, lemmatizerFactory);
BufferedOutputStream modelOut = null;
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
lineStream.close();
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/PartOfSpeechModelOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.opennlp.custom.encryption.OpenNLPEncryptionFactory;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.Constants;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.training.AccuracyEvaluationResult;
import ai.idylnlp.models.ModelOperationsUtils;
import ai.idylnlp.models.opennlp.training.model.ModelSeparateDataValidationOperations;
import ai.idylnlp.models.opennlp.training.model.ModelTrainingOperations;
import ai.idylnlp.models.opennlp.training.model.TrainingAlgorithm;
import ai.idylnlp.training.definition.model.TrainingDefinitionReader;
import opennlp.tools.cmdline.postag.POSEvaluationErrorListener;
import opennlp.tools.cmdline.postag.POSTaggerFineGrainedReportListener;
import opennlp.tools.ml.maxent.quasinewton.QNTrainer;
import opennlp.tools.postag.POSEvaluator;
import opennlp.tools.postag.POSModel;
import opennlp.tools.postag.POSSample;
import opennlp.tools.postag.POSTaggerEvaluationMonitor;
import opennlp.tools.postag.POSTaggerFactory;
import opennlp.tools.postag.POSTaggerME;
import opennlp.tools.postag.WordTagSampleStream;
import opennlp.tools.util.InputStreamFactory;
import opennlp.tools.util.MarkableFileInputStreamFactory;
import opennlp.tools.util.ObjectStream;
import opennlp.tools.util.PlainTextByLineStream;
import opennlp.tools.util.TrainingParameters;
/**
* Operations for training and validating part of speech models.
*
* @author Mountain Fog, Inc.
*
*/
public class PartOfSpeechModelOperations implements ModelTrainingOperations, ModelSeparateDataValidationOperations<AccuracyEvaluationResult> {
private static final Logger LOGGER = LogManager.getLogger(PartOfSpeechModelOperations.class);
/**
* Performs part-of-speech model training using a training definition file.
* @param reader A {@link TrainingDefinitionReader}.
* @return The generated model's ID.
* @throws IOException Thrown if the model creation fails.
*/
public static String train(TrainingDefinitionReader reader) throws IOException {
final PartOfSpeechModelOperations ops = new PartOfSpeechModelOperations();
final SubjectOfTrainingOrEvaluation subjectOfTraining = ModelOperationsUtils.getSubjectOfTrainingOrEvaluation(reader);
final String modelFile = reader.getTrainingDefinition().getModel().getFile();
final String language = reader.getTrainingDefinition().getModel().getLanguage();
final String encryptionKey = reader.getTrainingDefinition().getModel().getEncryptionkey();
final int cutOff = reader.getTrainingDefinition().getAlgorithm().getCutoff().intValue();
final int iterations = reader.getTrainingDefinition().getAlgorithm().getIterations().intValue();
final int threads = reader.getTrainingDefinition().getAlgorithm().getThreads().intValue();
final String algorithm = reader.getTrainingDefinition().getAlgorithm().getName();
final LanguageCode languageCode = LanguageCode.getByCodeIgnoreCase(language);
if(algorithm.equalsIgnoreCase(TrainingAlgorithm.PERCEPTRON.getName())) {
return ops.trainPerceptron(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations);
} else if(algorithm.equalsIgnoreCase(TrainingAlgorithm.MAXENT_QN.getName())) {
final double l1 = reader.getTrainingDefinition().getAlgorithm().getL1().doubleValue();
final double l2 = reader.getTrainingDefinition().getAlgorithm().getL2().doubleValue();
int m = reader.getTrainingDefinition().getAlgorithm().getM().intValue();
int max = reader.getTrainingDefinition().getAlgorithm().getMax().intValue();
return ops.trainMaxEntQN(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations, threads, l1, l2, m, max);
} else {
throw new IOException("Invalid algorithm specified in the training definition file: " + algorithm);
}
}
@Override
public AccuracyEvaluationResult separateDataEvaluate(SubjectOfTrainingOrEvaluation SubjectOfTrainingOrEvaluation, String modelFileName, String encryptionKey) throws IOException {
LOGGER.info("Doing model evaluation using separate training data.");
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(SubjectOfTrainingOrEvaluation.getInputFile()));
ObjectStream<POSSample> sample = new WordTagSampleStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8));
POSModel model = new POSModel(new File(modelFileName));
POSTaggerME posTaggerME = new POSTaggerME(model);
POSTaggerEvaluationMonitor missclassifiedListener = new POSEvaluationErrorListener();
POSTaggerFineGrainedReportListener reportListener = new POSTaggerFineGrainedReportListener(System.out);
POSEvaluator evaluator = new POSEvaluator(posTaggerME, missclassifiedListener, reportListener);
evaluator.evaluate(sample);
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
return new AccuracyEvaluationResult(evaluator.getWordAccuracy(), evaluator.getWordCount());
}
@Override
public String trainMaxEntQN(SubjectOfTrainingOrEvaluation SubjectOfTrainingOrEvaluation, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations, int threads, double l1, double l2, int m, int max) throws IOException {
LOGGER.info("Beginning parts-of-speech model training. Output model will be: " + modelFile);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(SubjectOfTrainingOrEvaluation.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<POSSample> sampleStream = new WordTagSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.MAXENT_QN.getAlgorithm());
trainParams.put(TrainingParameters.THREADS_PARAM, Integer.toString(threads));
trainParams.put(QNTrainer.L1COST_PARAM, String.valueOf(l1));
trainParams.put(QNTrainer.L2COST_PARAM, String.valueOf(l2));
trainParams.put(QNTrainer.M_PARAM, String.valueOf(m));
trainParams.put(QNTrainer.MAX_FCT_EVAL_PARAM, String.valueOf(max));
POSTaggerFactory posTaggerFactory = new POSTaggerFactory();
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
POSModel model = POSTaggerME.train(language.getAlpha3().toString(), sampleStream, trainParams, posTaggerFactory);
BufferedOutputStream modelOut = null;
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
lineStream.close();
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
@Override
public String trainPerceptron(SubjectOfTrainingOrEvaluation SubjectOfTrainingOrEvaluation, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations) throws IOException {
LOGGER.info("Beginning parts-of-speech model training. Output model will be: " + modelFile);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(SubjectOfTrainingOrEvaluation.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<POSSample> sampleStream = new WordTagSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.PERCEPTRON.getAlgorithm());
POSTaggerFactory posTaggerFactory = new POSTaggerFactory();
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
POSModel model = POSTaggerME.train(language.getAlpha3().toString(), sampleStream, trainParams, posTaggerFactory);
BufferedOutputStream modelOut = null;
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
lineStream.close();
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/SentenceModelOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.opennlp.custom.encryption.OpenNLPEncryptionFactory;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.Constants;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.training.FMeasure;
import ai.idylnlp.model.training.FMeasureModelValidationResult;
import ai.idylnlp.models.ModelOperationsUtils;
import ai.idylnlp.models.opennlp.training.model.ModelCrossValidationOperations;
import ai.idylnlp.models.opennlp.training.model.ModelSeparateDataValidationOperations;
import ai.idylnlp.models.opennlp.training.model.ModelTrainingOperations;
import ai.idylnlp.models.opennlp.training.model.TrainingAlgorithm;
import ai.idylnlp.training.definition.model.TrainingDefinitionReader;
import opennlp.tools.cmdline.sentdetect.SentenceEvaluationErrorListener;
import opennlp.tools.dictionary.Dictionary;
import opennlp.tools.ml.maxent.quasinewton.QNTrainer;
import opennlp.tools.sentdetect.SDCrossValidator;
import opennlp.tools.sentdetect.SentenceDetectorEvaluationMonitor;
import opennlp.tools.sentdetect.SentenceDetectorEvaluator;
import opennlp.tools.sentdetect.SentenceDetectorFactory;
import opennlp.tools.sentdetect.SentenceDetectorME;
import opennlp.tools.sentdetect.SentenceModel;
import opennlp.tools.sentdetect.SentenceSample;
import opennlp.tools.sentdetect.SentenceSampleStream;
import opennlp.tools.util.InputStreamFactory;
import opennlp.tools.util.MarkableFileInputStreamFactory;
import opennlp.tools.util.ObjectStream;
import opennlp.tools.util.PlainTextByLineStream;
import opennlp.tools.util.TrainingParameters;
/**
* Operations for training and validating sentence models.
*
* @author Mountain Fog, Inc.
*
*/
public class SentenceModelOperations implements ModelTrainingOperations, ModelSeparateDataValidationOperations<FMeasureModelValidationResult>, ModelCrossValidationOperations<FMeasureModelValidationResult> {
private static final Logger LOGGER = LogManager.getLogger(SentenceModelOperations.class);
/**
* Performs sentence model training using a training definition file.
* @param reader A {@link TrainingDefinitionReader}.
* @return The generated model's ID.
* @throws IOException Thrown if the model creation fails.
*/
public static String train(TrainingDefinitionReader reader) throws IOException {
final SentenceModelOperations ops = new SentenceModelOperations();
final SubjectOfTrainingOrEvaluation subjectOfTraining = ModelOperationsUtils.getSubjectOfTrainingOrEvaluation(reader);
final String modelFile = reader.getTrainingDefinition().getModel().getFile();
final String language = reader.getTrainingDefinition().getModel().getLanguage();
final String encryptionKey = reader.getTrainingDefinition().getModel().getEncryptionkey();
final int cutOff = reader.getTrainingDefinition().getAlgorithm().getCutoff().intValue();
final int iterations = reader.getTrainingDefinition().getAlgorithm().getIterations().intValue();
final int threads = reader.getTrainingDefinition().getAlgorithm().getThreads().intValue();
final String algorithm = reader.getTrainingDefinition().getAlgorithm().getName();
final LanguageCode languageCode = LanguageCode.getByCodeIgnoreCase(language);
if(algorithm.equalsIgnoreCase(TrainingAlgorithm.PERCEPTRON.getName())) {
return ops.trainPerceptron(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations);
} else if(algorithm.equalsIgnoreCase(TrainingAlgorithm.MAXENT_QN.getName())) {
final double l1 = reader.getTrainingDefinition().getAlgorithm().getL1().doubleValue();
final double l2 = reader.getTrainingDefinition().getAlgorithm().getL2().doubleValue();
int m = reader.getTrainingDefinition().getAlgorithm().getM().intValue();
int max = reader.getTrainingDefinition().getAlgorithm().getMax().intValue();
return ops.trainMaxEntQN(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations, threads, l1, l2, m, max);
} else {
throw new IOException("Invalid algorithm specified in the training definition file: " + algorithm);
}
}
/**
* Performs cross-validation of a sentence model.
* @param reader A {@link TrainingDefinitionReader}.
* @param folds The number of cross-validation folds.
* @return A {@link FMeasureModelValidationResult}.
* @throws IOException Thrown if the model cannot be validated.
*/
public static FMeasureModelValidationResult crossValidate(TrainingDefinitionReader reader, int folds) throws IOException {
final String language = reader.getTrainingDefinition().getModel().getLanguage();
final int iterations = reader.getTrainingDefinition().getAlgorithm().getIterations().intValue();
final int cutoff = reader.getTrainingDefinition().getAlgorithm().getCutoff().intValue();
final String algorithm = reader.getTrainingDefinition().getAlgorithm().getName();
final double l1 = reader.getTrainingDefinition().getAlgorithm().getL1().doubleValue();
final double l2 = reader.getTrainingDefinition().getAlgorithm().getL2().doubleValue();
final int m = reader.getTrainingDefinition().getAlgorithm().getM().intValue();
final int max = reader.getTrainingDefinition().getAlgorithm().getMax().intValue();
final LanguageCode languageCode = LanguageCode.getByCodeIgnoreCase(language);
// Get the subject of training based on what's specified in the training definition file.
final SubjectOfTrainingOrEvaluation subjectOfTraining = ModelOperationsUtils.getSubjectOfTrainingOrEvaluation(reader);
// Now we can set up the entity model operations.
final SentenceModelOperations sentenceModelOperations = new SentenceModelOperations();
FMeasureModelValidationResult result = null;
if(StringUtils.equalsIgnoreCase(algorithm, TrainingAlgorithm.PERCEPTRON.getName())) {
result = sentenceModelOperations.crossValidationEvaluatePerceptron(subjectOfTraining, languageCode, iterations, cutoff, folds);
} else if(StringUtils.equalsIgnoreCase(algorithm, TrainingAlgorithm.MAXENT_QN.getName())) {
result = sentenceModelOperations.crossValidationEvaluateMaxEntQN(subjectOfTraining, languageCode, iterations, cutoff, folds, l1, l2, m, max);
} else {
throw new IOException("Invalid algorithm specified in the training definition file: " + algorithm);
}
return result;
}
@Override
public FMeasureModelValidationResult crossValidationEvaluateMaxEntQN(SubjectOfTrainingOrEvaluation subjectOfTraining, LanguageCode language, int iterations, int cutOff, int folds, double l1, double l2, int m, int max) throws IOException {
LOGGER.info("Doing model evaluation using cross-validation with {} folds using input {}.", folds, subjectOfTraining.getInputFile());
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<SentenceSample> sampleStream = new SentenceSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.MAXENT_QN.getAlgorithm());
trainParams.put(TrainingParameters.THREADS_PARAM, Integer.toString(1));
SentenceDetectorFactory sentenceDetectorFactory = new SentenceDetectorFactory(language.getAlpha3().toString(), true, new Dictionary(), null);
SentenceDetectorEvaluationMonitor monitor = new SentenceEvaluationErrorListener();
SDCrossValidator evaluator = new SDCrossValidator(language.getAlpha3().toString(), trainParams, sentenceDetectorFactory, monitor);
evaluator.evaluate(sampleStream, folds);
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure);
}
@Override
public FMeasureModelValidationResult crossValidationEvaluatePerceptron(SubjectOfTrainingOrEvaluation subjectOfTraining, LanguageCode language, int iterations, int cutOff, int folds) throws IOException {
LOGGER.info("Doing model evaluation using cross-validation with {} folds using input {}.", folds, subjectOfTraining.getInputFile());
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<SentenceSample> sampleStream = new SentenceSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.PERCEPTRON.getAlgorithm());
trainParams.put(TrainingParameters.THREADS_PARAM, Integer.toString(1));
SentenceDetectorFactory sentenceDetectorFactory = new SentenceDetectorFactory(language.getAlpha3().toString(), true, new Dictionary(), null);
SentenceDetectorEvaluationMonitor monitor = new SentenceEvaluationErrorListener();
SDCrossValidator evaluator = new SDCrossValidator(language.getAlpha3().toString(), trainParams, sentenceDetectorFactory, monitor);
evaluator.evaluate(sampleStream, folds);
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure);
}
@Override
public FMeasureModelValidationResult separateDataEvaluate(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFileName, String encryptionKey) throws IOException {
LOGGER.info("Doing model evaluation using separate training data.");
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<SentenceSample> sample = new SentenceSampleStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8));
SentenceModel model = new SentenceModel(new File(modelFileName));
SentenceDetectorME nameFinderME = new SentenceDetectorME(model);
SentenceDetectorEvaluator evaluator = new SentenceDetectorEvaluator(nameFinderME);
evaluator.evaluate(sample);
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure);
}
@Override
public String trainMaxEntQN(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations, int threads, double l1, double l2, int m, int max) throws IOException {
LOGGER.info("Beginning sentence model training. Output model will be: " + modelFile);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<SentenceSample> sampleStream = new SentenceSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.MAXENT_QN.getAlgorithm());
trainParams.put(TrainingParameters.THREADS_PARAM, Integer.toString(threads));
trainParams.put(QNTrainer.L1COST_PARAM, String.valueOf(l1));
trainParams.put(QNTrainer.L2COST_PARAM, String.valueOf(l2));
trainParams.put(QNTrainer.M_PARAM, String.valueOf(m));
trainParams.put(QNTrainer.MAX_FCT_EVAL_PARAM, String.valueOf(max));
SentenceDetectorFactory sentenceDetectorFactory = new SentenceDetectorFactory(language.getAlpha3().toString(), true, new Dictionary(), null);
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
SentenceModel model = SentenceDetectorME.train(language.getAlpha3().toString(), sampleStream, sentenceDetectorFactory, trainParams);
BufferedOutputStream modelOut = null;
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
lineStream.close();
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
@Override
public String trainPerceptron(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations) throws IOException {
LOGGER.info("Beginning sentence model training. Output model will be: " + modelFile);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<SentenceSample> sampleStream = new SentenceSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.PERCEPTRON.getAlgorithm());
SentenceDetectorFactory sentenceDetectorFactory = new SentenceDetectorFactory(language.getAlpha3().toString(), true, new Dictionary(), null);
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
SentenceModel model = SentenceDetectorME.train(language.getAlpha3().toString(), sampleStream, sentenceDetectorFactory, trainParams);
BufferedOutputStream modelOut = null;
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
lineStream.close();
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/TokenModelOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.opennlp.custom.encryption.OpenNLPEncryptionFactory;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.Constants;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.training.FMeasure;
import ai.idylnlp.model.training.FMeasureModelValidationResult;
import ai.idylnlp.models.ModelOperationsUtils;
import ai.idylnlp.models.opennlp.training.model.ModelCrossValidationOperations;
import ai.idylnlp.models.opennlp.training.model.ModelSeparateDataValidationOperations;
import ai.idylnlp.models.opennlp.training.model.ModelTrainingOperations;
import ai.idylnlp.models.opennlp.training.model.TrainingAlgorithm;
import ai.idylnlp.training.definition.model.TrainingDefinitionReader;
import opennlp.tools.cmdline.tokenizer.TokenEvaluationErrorListener;
import opennlp.tools.dictionary.Dictionary;
import opennlp.tools.ml.maxent.quasinewton.QNTrainer;
import opennlp.tools.tokenize.TokenSample;
import opennlp.tools.tokenize.TokenSampleStream;
import opennlp.tools.tokenize.TokenizerCrossValidator;
import opennlp.tools.tokenize.TokenizerEvaluationMonitor;
import opennlp.tools.tokenize.TokenizerEvaluator;
import opennlp.tools.tokenize.TokenizerFactory;
import opennlp.tools.tokenize.TokenizerME;
import opennlp.tools.tokenize.TokenizerModel;
import opennlp.tools.util.InputStreamFactory;
import opennlp.tools.util.MarkableFileInputStreamFactory;
import opennlp.tools.util.ObjectStream;
import opennlp.tools.util.PlainTextByLineStream;
import opennlp.tools.util.TrainingParameters;
/**
* Operations for training and validating token models.
*
* @author Mountain Fog, Inc.
*
*/
public class TokenModelOperations implements ModelTrainingOperations, ModelSeparateDataValidationOperations<FMeasureModelValidationResult>, ModelCrossValidationOperations<FMeasureModelValidationResult> {
private static final Logger LOGGER = LogManager.getLogger(TokenModelOperations.class);
/**
* Performs token model training using a training definition file.
* @param reader A {@link TrainingDefinitionReader}.
* @return The generated model's ID.
* @throws IOException Thrown if the model creation fails.
*/
public static String train(TrainingDefinitionReader reader) throws IOException {
final TokenModelOperations ops = new TokenModelOperations();
final SubjectOfTrainingOrEvaluation subjectOfTraining = ModelOperationsUtils.getSubjectOfTrainingOrEvaluation(reader);
final String modelFile = reader.getTrainingDefinition().getModel().getFile();
final String language = reader.getTrainingDefinition().getModel().getLanguage();
final String encryptionKey = reader.getTrainingDefinition().getModel().getEncryptionkey();
final int cutOff = reader.getTrainingDefinition().getAlgorithm().getCutoff().intValue();
final int iterations = reader.getTrainingDefinition().getAlgorithm().getIterations().intValue();
final int threads = reader.getTrainingDefinition().getAlgorithm().getThreads().intValue();
final String algorithm = reader.getTrainingDefinition().getAlgorithm().getName();
final LanguageCode languageCode = LanguageCode.getByCodeIgnoreCase(language);
if(algorithm.equalsIgnoreCase(TrainingAlgorithm.PERCEPTRON.getName())) {
return ops.trainPerceptron(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations);
} else if(algorithm.equalsIgnoreCase(TrainingAlgorithm.MAXENT_QN.getName())) {
final double l1 = reader.getTrainingDefinition().getAlgorithm().getL1().doubleValue();
final double l2 = reader.getTrainingDefinition().getAlgorithm().getL2().doubleValue();
int m = reader.getTrainingDefinition().getAlgorithm().getM().intValue();
int max = reader.getTrainingDefinition().getAlgorithm().getMax().intValue();
return ops.trainMaxEntQN(subjectOfTraining, modelFile, languageCode, encryptionKey, cutOff, iterations, threads, l1, l2, m, max);
} else {
throw new IOException("Invalid algorithm specified in the training definition file: " + algorithm);
}
}
/**
* Performs cross-validation of a token model.
* @param reader A {@link TrainingDefinitionReader}.
* @param folds The number of cross-validation folds.
* @return A {@link FMeasureModelValidationResult}.
* @throws IOException Thrown if the model cannot be validated.
*/
public static FMeasureModelValidationResult crossValidate(TrainingDefinitionReader reader, int folds) throws IOException {
final String language = reader.getTrainingDefinition().getModel().getLanguage();
final int iterations = reader.getTrainingDefinition().getAlgorithm().getIterations().intValue();
final int cutoff = reader.getTrainingDefinition().getAlgorithm().getCutoff().intValue();
final String algorithm = reader.getTrainingDefinition().getAlgorithm().getName();
final double l1 = reader.getTrainingDefinition().getAlgorithm().getL1().doubleValue();
final double l2 = reader.getTrainingDefinition().getAlgorithm().getL2().doubleValue();
final int m = reader.getTrainingDefinition().getAlgorithm().getM().intValue();
final int max = reader.getTrainingDefinition().getAlgorithm().getMax().intValue();
final LanguageCode languageCode = LanguageCode.getByCodeIgnoreCase(language);
// Get the subject of training based on what's specified in the training definition file.
final SubjectOfTrainingOrEvaluation subjectOfTraining = ModelOperationsUtils.getSubjectOfTrainingOrEvaluation(reader);
// Now we can set up the entity model operations.
final TokenModelOperations tokenModelOperations = new TokenModelOperations();
FMeasureModelValidationResult result = null;
if(StringUtils.equalsIgnoreCase(algorithm, TrainingAlgorithm.PERCEPTRON.getName())) {
result = tokenModelOperations.crossValidationEvaluatePerceptron(subjectOfTraining, languageCode, iterations, cutoff, folds);
} else if(StringUtils.equalsIgnoreCase(algorithm, TrainingAlgorithm.MAXENT_QN.getName())) {
result = tokenModelOperations.crossValidationEvaluateMaxEntQN(subjectOfTraining, languageCode, iterations, cutoff, folds, l1, l2, m, max);
} else {
throw new IOException("Invalid algorithm specified in the training definition file: " + algorithm);
}
return result;
}
@Override
public FMeasureModelValidationResult crossValidationEvaluateMaxEntQN(SubjectOfTrainingOrEvaluation subjectOfTraining, LanguageCode language, int iterations, int cutOff, int folds, double l1, double l2, int m, int max) throws IOException {
LOGGER.info("Doing model evaluation using cross-validation with {} folds.", folds);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<TokenSample> sample = new TokenSampleStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8));
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.MAXENT_QN.getAlgorithm());
trainParams.put(QNTrainer.L1COST_PARAM, String.valueOf(l1));
trainParams.put(QNTrainer.L2COST_PARAM, String.valueOf(l2));
trainParams.put(QNTrainer.M_PARAM, String.valueOf(m));
trainParams.put(QNTrainer.MAX_FCT_EVAL_PARAM, String.valueOf(max));
TokenizerFactory factory = new TokenizerFactory(language.getAlpha3().toString(), null, false, null);
TokenizerEvaluationMonitor monitor = new TokenEvaluationErrorListener();
TokenizerCrossValidator evaluator = new TokenizerCrossValidator(trainParams, factory, monitor);
evaluator.evaluate(sample, folds);
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure);
}
@Override
public FMeasureModelValidationResult crossValidationEvaluatePerceptron(SubjectOfTrainingOrEvaluation subjectOfTraining, LanguageCode language, int iterations, int cutOff, int folds) throws IOException {
LOGGER.info("Doing model evaluation using cross-validation with {} folds.", folds);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<TokenSample> sample = new TokenSampleStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8));
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.PERCEPTRON.getAlgorithm());
TokenizerFactory factory = new TokenizerFactory(language.getAlpha3().toString(), null, false, null);
TokenizerEvaluationMonitor monitor = new TokenEvaluationErrorListener();
TokenizerCrossValidator evaluator = new TokenizerCrossValidator(trainParams, factory, monitor);
evaluator.evaluate(sample, folds);
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure);
}
@Override
public FMeasureModelValidationResult separateDataEvaluate(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFileName, String encryptionKey) throws IOException {
LOGGER.info("Doing model evaluation using separate training data.");
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<TokenSample> sample = new TokenSampleStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8));
TokenizerModel model = new TokenizerModel(new File(modelFileName));
TokenizerME nameFinderME = new TokenizerME(model);
TokenizerEvaluator evaluator = new TokenizerEvaluator(nameFinderME);
evaluator.evaluate(sample);
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
final FMeasure fmeasure = new FMeasure(evaluator.getFMeasure().getPrecisionScore(),
evaluator.getFMeasure().getRecallScore(), evaluator.getFMeasure().getFMeasure());
return new FMeasureModelValidationResult(fmeasure);
}
@Override
public String trainMaxEntQN(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations, int threads, double l1, double l2, int m, int max) throws IOException {
LOGGER.info("Beginning tokenizer model training. Output model will be: " + modelFile);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<TokenSample> sampleStream = new TokenSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.MAXENT_QN.getAlgorithm());
trainParams.put(TrainingParameters.THREADS_PARAM, Integer.toString(threads));
trainParams.put(QNTrainer.L1COST_PARAM, String.valueOf(l1));
trainParams.put(QNTrainer.L2COST_PARAM, String.valueOf(l2));
trainParams.put(QNTrainer.M_PARAM, String.valueOf(m));
trainParams.put(QNTrainer.MAX_FCT_EVAL_PARAM, String.valueOf(max));
TokenizerFactory tokenizerFactory = new TokenizerFactory(language.getAlpha3().toString(), new Dictionary(), false, null);
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
TokenizerModel model = TokenizerME.train(sampleStream, tokenizerFactory, trainParams);
BufferedOutputStream modelOut = null;
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
lineStream.close();
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
@Override
public String trainPerceptron(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations) throws IOException {
LOGGER.info("Beginning tokenizer model training. Output model will be: " + modelFile);
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
ObjectStream<String> lineStream = new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8);
ObjectStream<TokenSample> sampleStream = new TokenSampleStream(lineStream);
TrainingParameters trainParams = new TrainingParameters();
trainParams.put(TrainingParameters.CUTOFF_PARAM, Integer.toString(cutOff));
trainParams.put(TrainingParameters.ITERATIONS_PARAM, Integer.toString(iterations));
trainParams.put(TrainingParameters.ALGORITHM_PARAM, TrainingAlgorithm.PERCEPTRON.getAlgorithm());
TokenizerFactory tokenizerFactory = new TokenizerFactory(language.getAlpha3().toString(), new Dictionary(), false, null);
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(encryptionKey);
TokenizerModel model = TokenizerME.train(sampleStream, tokenizerFactory, trainParams);
BufferedOutputStream modelOut = null;
String modelId = "";
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
lineStream.close();
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
return modelId;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/model/ModelCrossValidationOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training.model;
import java.io.IOException;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.training.EvaluationResult;
import ai.idylnlp.model.training.FMeasureModelValidationResult;
/**
* Provides model cross validation operations.
*
* @author Mountain Fog, Inc.
*/
public interface ModelCrossValidationOperations<T extends EvaluationResult> {
/**
* Performs model cross validation using the perceptron algorithm.
* @param subjectOfTraining The {@link SubjectOfTrainingOrEvaluation}.
* @param language The language of the model.
* @param iterations The number of iterations.
* @param cutOff The value of the cutoff.
* @param folds The number of cross validation folds.
* @return A {@link FMeasureModelValidationResult}.
* @throws IOException Thrown if the cross validation fails.
*/
public FMeasureModelValidationResult crossValidationEvaluatePerceptron(SubjectOfTrainingOrEvaluation subjectOfTraining, LanguageCode language, int iterations, int cutOff, int folds) throws IOException;
/**
* Performs model cross validation using the maxent QN algorithm.
* @param subjectOfTraining The {@link SubjectOfTrainingOrEvaluation}.
* @param language The language of the model.
* @param iterations The number of iterations.
* @param cutOff The value of the cutoff.
* @param folds The number of cross validation folds.
* @param l1
* @param l2
* @param m
* @param max
* @return A {@link FMeasureModelValidationResult}.
* @throws IOException Thrown if the cross validation fails.
*/
public T crossValidationEvaluateMaxEntQN(SubjectOfTrainingOrEvaluation subjectOfTraining, LanguageCode language, int iterations, int cutOff, int folds, double l1, double l2, int m, int max) throws IOException;
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/model/ModelSeparateDataValidationOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training.model;
import java.io.IOException;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.training.EvaluationResult;
import opennlp.tools.util.eval.FMeasure;
/**
* Provides operations for performing model validation
* using separate data.
*
* @author Mountain Fog, Inc.
*/
public interface ModelSeparateDataValidationOperations<T extends EvaluationResult> {
/**
* Performs model validation using separate data. This validation requires a built model file.
* @param subjectOfTraining The {@link SubjectOfTrainingOrEvaluation}.
* @param modelFileName The full path to the model file.
* @param encryptionKey The model's encryption key.
* @return The results of the validation as an {@link FMeasure}.
* @throws IOException Thrown if any of the input files cannot be read.
*/
public T separateDataEvaluate(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFileName, String encryptionKey) throws IOException;
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/model/ModelTrainingOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training.model;
import java.io.IOException;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
/**
* Provides model training operations.
*
* @author Mountain Fog, Inc.
*
*/
public interface ModelTrainingOperations {
/**
* Train a maxent model using quasi-newton.
* @param subjectOfTraining The {@link SubjectOfTrainingOrEvaluation}.
* @param modelFile The output model file.
* @param language The language of the model.
* @param encryptionKey The model's encryption key.
* @param cutOff The training cutoff.
* @param iterations The training iterations.
* @param threads The number of training threads.
* @param l1
* @param l2
* @param m
* @param max
* @return The generated model's ID.
* @throws IOException Thrown if the model cannot be trained.
*/
public String trainMaxEntQN(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations, int threads, double l1, double l2, int m, int max) throws IOException;
/**
* Train a perceptron model using.
* @param subjectOfTraining The {@link SubjectOfTrainingOrEvaluation}.
* @param modelFile The output model file.
* @param language The language of the model.
* @param encryptionKey The model's encryption key.
* @param cutOff The training cutoff.
* @param iterations The training iterations.
* @return The generated model's ID.
* @throws IOException Thrown if the model cannot be trained.
*/
public String trainPerceptron(SubjectOfTrainingOrEvaluation subjectOfTraining, String modelFile, LanguageCode language, String encryptionKey, int cutOff, int iterations) throws IOException;
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training
|
java-sources/ai/idylnlp/idylnlp-models-opennlp/1.1.0/ai/idylnlp/models/opennlp/training/model/TrainingAlgorithm.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models.opennlp.training.model;
import opennlp.tools.ml.maxent.quasinewton.QNTrainer;
import opennlp.tools.ml.perceptron.PerceptronTrainer;
/**
* A training algorithm.
*
* @author Mountain Fog, Inc.
*
*/
public enum TrainingAlgorithm {
/**
* Uses the maxent algorithm with L-BFGFS.
*/
MAXENT_QN(QNTrainer.MAXENT_QN_VALUE, "maxent-qn"),
/**
* Uses the perceptron algorithm.
*/
PERCEPTRON(PerceptronTrainer.PERCEPTRON_VALUE, "perceptron");
private String algorithm;
private String name;
private TrainingAlgorithm(String algorithm, String name) {
this.algorithm = algorithm;
this.name = name;
}
/**
* Gets the algorithm.
* @return The algorithm.
*/
public String getAlgorithm() {
return algorithm;
}
/**
* Gets the name of the algorithm.
* @return The name of the algorithm.
*/
public String getName() {
return name;
}
/**
* Gets the default {@link TrainingAlgorithm}.
* @return The default {@link TrainingAlgorithm}.
*/
public static TrainingAlgorithm getDefaultAlgorithm() {
return PERCEPTRON;
}
@Override
public String toString() {
return algorithm;
}
/**
* Gets the {@link TrainingAlgorithm} from a string value or
* throws an {@link IllegalArgumentException} if the algorithm
* string value is not a valid algorithm.
* @param algorithm The algorithm.
* @return A {@link TrainingAlgorithm}.
*/
public static TrainingAlgorithm fromValue(String algorithm) {
if(algorithm.equalsIgnoreCase(MAXENT_QN.getName())) {
return MAXENT_QN;
} else if(algorithm.equalsIgnoreCase(PERCEPTRON.getName())) {
return PERCEPTRON;
} else {
throw new IllegalArgumentException("Invalid algorithm.");
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-tools/1.1.0/ai/idylnlp
|
java-sources/ai/idylnlp/idylnlp-models-tools/1.1.0/ai/idylnlp/models/ModelEvaluationUtils.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.math3.stat.inference.TestUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.training.FMeasure;
import ai.idylnlp.model.training.FMeasureModelValidationResult;
/**
* Utility functions for model evaluations.
*
* @author Mountain Fog, Inc.
*
*/
public class ModelEvaluationUtils {
private static final Logger LOGGER = LogManager.getLogger(ModelEvaluationUtils.class);
private ModelEvaluationUtils() {
// This is a utility class.
}
/**
* Perform a paired T-test on the F-Measures of two cross validation results
* to determine if the F-Measures are significantly different.
* @param result1 A {@link FMeasureModelValidationResult}.
* @param result2 A {@link FMeasureModelValidationResult}.
* @param alpha The alpha value.
* @return <code>true</code> iff the null hypothesis can be rejected with confidence <code>1 - alpha</code>.
*/
public static boolean performPairedTTest(FMeasureModelValidationResult result1, FMeasureModelValidationResult result2, double alpha) {
// Null hypothesis - the hypothesis that there is no significant difference between specified populations,
// any observed difference being due to sampling or experimental error.
final List<Double> f1 = new LinkedList<Double>();
for(FMeasure fmeasure : result1.getFmeasures()) {
LOGGER.trace("Adding F-Measure for feature set 1: {}", fmeasure.getFmeasure());
LOGGER.trace("\t{}", fmeasure.toString());
f1.add(fmeasure.getFmeasure());
}
final List<Double> f2 = new LinkedList<Double>();
for(FMeasure fmeasure : result2.getFmeasures()) {
LOGGER.trace("Adding F-Measure for feature set 2: {}", fmeasure.getFmeasure());
LOGGER.trace("\t{}", fmeasure.toString());
f2.add(fmeasure.getFmeasure());
}
final double[] pa = f1.stream().mapToDouble(Double::doubleValue).toArray();
final double[] ra = f2.stream().mapToDouble(Double::doubleValue).toArray();
return performPairedTTest(pa, ra, alpha);
}
/**
* Perform a paired T-test on the F-Measures of two sets of values.
* @param result1 The first value set.
* @param result2 The second value set.
* @param alpha The alpha value.
* @return <code>true</code> iff the null hypothesis can be rejected with confidence <code>1 - alpha</code>.
*/
public static boolean performPairedTTest(double[] result1, double[] result2, double alpha) {
// Null hypothesis - the hypothesis that there is no significant difference between specified populations,
// any observed difference being due to sampling or experimental error.
// Returns true iff the null hypothesis can be rejected with confidence 1 - alpha.
return TestUtils.pairedTTest(result1, result2, alpha);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-models-tools/1.1.0/ai/idylnlp
|
java-sources/ai/idylnlp/idylnlp-models-tools/1.1.0/ai/idylnlp/models/ObjectStreamUtils.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.models;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.nlp.annotation.reader.IdylNLPFileAnnotationReader;
import ai.idylnlp.opennlp.custom.formats.IdylNLPNameSampleStream;
import ai.idylnlp.model.Constants;
import ai.idylnlp.model.nlp.annotation.AnnotationReader;
import ai.idylnlp.model.nlp.subjects.BratSubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.CoNLL2003SubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.IdylNLPSubjectOfTrainingOrEvaluation;
import ai.idylnlp.model.nlp.subjects.SubjectOfTrainingOrEvaluation;
import opennlp.tools.formats.Conll02NameSampleStream;
import opennlp.tools.formats.Conll03NameSampleStream;
import opennlp.tools.formats.brat.AnnotationConfiguration;
import opennlp.tools.namefind.NameSample;
import opennlp.tools.namefind.NameSampleDataStream;
import opennlp.tools.util.InputStreamFactory;
import opennlp.tools.util.MarkableFileInputStreamFactory;
import opennlp.tools.util.ObjectStream;
import opennlp.tools.util.PlainTextByLineStream;
public class ObjectStreamUtils {
private static final Logger LOGGER = LogManager.getLogger(ObjectStreamUtils.class);
private ObjectStreamUtils() {
// This is a utility class.
}
/**
* Gets the {@link ObjectStream} for training.
* @param subjectOfTraining The {@link SubjectOfTrainingOrEvaluation}.
* @return An {@link ObjectStream} derived from the given {@link SubjectOfTraining}.
* @throws IOException Thrown if any of the input or annotation files cannot be read.
*/
public static ObjectStream<NameSample> getObjectStream(SubjectOfTrainingOrEvaluation subjectOfTraining) throws IOException {
ObjectStream<NameSample> sampleStream = null;
if(subjectOfTraining instanceof IdylNLPSubjectOfTrainingOrEvaluation) {
IdylNLPSubjectOfTrainingOrEvaluation nameFinderSubjectOfTraining = (IdylNLPSubjectOfTrainingOrEvaluation) subjectOfTraining;
LOGGER.info("Using Idyl NLP formatted annotations.");
final AnnotationReader annotationReader = new IdylNLPFileAnnotationReader(nameFinderSubjectOfTraining.getAnnotationsFile());
final InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
sampleStream = new IdylNLPNameSampleStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8), annotationReader);
} else if(subjectOfTraining instanceof BratSubjectOfTrainingOrEvaluation) {
BratSubjectOfTrainingOrEvaluation nameFinderSubjectOfTraining = (BratSubjectOfTrainingOrEvaluation) subjectOfTraining;
LOGGER.info("Using Brat formatted annotations.");
Map<String, String> typeToClassMap = new HashMap<>();
typeToClassMap.put("Person", AnnotationConfiguration.ENTITY_TYPE);
typeToClassMap.put("Location", AnnotationConfiguration.ENTITY_TYPE);
typeToClassMap.put("Organization", AnnotationConfiguration.ENTITY_TYPE);
typeToClassMap.put("Date", AnnotationConfiguration.ENTITY_TYPE);
AnnotationConfiguration config = new AnnotationConfiguration(typeToClassMap);
InputStream in = ObjectStreamUtils.class.getResourceAsStream(nameFinderSubjectOfTraining.getInputFile() + ".ann");
// TODO: Return the brat annotations stream.
// sampleStream = new BratAnnotationStream(config, "idylnlp", in);
} else if(subjectOfTraining instanceof CoNLL2003SubjectOfTrainingOrEvaluation) {
CoNLL2003SubjectOfTrainingOrEvaluation nameFinderSubjectOfTraining = (CoNLL2003SubjectOfTrainingOrEvaluation) subjectOfTraining;
LOGGER.info("Using CoNLL-2003 formatted data.");
InputStreamFactory in = new MarkableFileInputStreamFactory(new File(nameFinderSubjectOfTraining.getInputFile()));
sampleStream = new Conll03NameSampleStream(Conll03NameSampleStream.LANGUAGE.EN, in, Conll02NameSampleStream.GENERATE_PERSON_ENTITIES);
} else {
LOGGER.info("Using OpenNLP formatted data.");
final InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(new File(subjectOfTraining.getInputFile()));
sampleStream = new NameSampleDataStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8));
}
return sampleStream;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-annotation-readers/1.1.0/ai/idylnlp/nlp/annotation
|
java-sources/ai/idylnlp/idylnlp-nlp-annotation-readers/1.1.0/ai/idylnlp/nlp/annotation/reader/IdylNLPFileAnnotationReader.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.annotation.reader;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.nlp.annotation.AnnotationReader;
import ai.idylnlp.model.nlp.annotation.IdylNLPAnnotation;
/**
* Implementation of {@link AnnotationReader} that reads IdylNLP annotations
* from a file with the format: lineNumber start end type
*
* @author Mountain Fog, Inc.
*
*/
public class IdylNLPFileAnnotationReader implements AnnotationReader {
private static final Logger LOGGER = LogManager.getLogger(IdylNLPFileAnnotationReader.class);
private Map<Integer, Collection<IdylNLPAnnotation>> annotations;
public IdylNLPFileAnnotationReader(String fileName) throws IOException {
annotations = new HashMap<Integer, Collection<IdylNLPAnnotation>>();
File file = new File(fileName);
List<String> lines = FileUtils.readLines(file);
for(String line : lines) {
if(!line.startsWith("#") && !StringUtils.isEmpty(line)) {
String[] annotation = line.split(" ");
int lineNumber = Integer.parseInt(annotation[0]);
IdylNLPAnnotation a = new IdylNLPAnnotation();
a.setLineNumber(lineNumber);
a.setTokenStart(Integer.parseInt(annotation[1]));
a.setTokenEnd(Integer.parseInt(annotation[2]));
a.setType(annotation[3]);
Collection<IdylNLPAnnotation> m = annotations.get(lineNumber);
if(m == null) {
m = new LinkedList<IdylNLPAnnotation>();
annotations.put(lineNumber, m);
}
m.add(a);
}
}
for(Integer i : annotations.keySet()) {
for(IdylNLPAnnotation annotation : annotations.get(i)) {
LOGGER.debug("{}\t{}", i, annotation.toString());
}
}
}
@Override
public Collection<IdylNLPAnnotation> getAnnotations(int lineNumber) {
Collection<IdylNLPAnnotation> a = annotations.get(lineNumber);
if(a != null) {
return a;
} else {
return Collections.emptyList();
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-annotation-writers/1.1.0/ai/idylnlp/nlp/annotation
|
java-sources/ai/idylnlp/idylnlp-nlp-annotation-writers/1.1.0/ai/idylnlp/nlp/annotation/writers/OpenNLPFileAnnotationWriter.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.annotation.writers;
import java.util.Collection;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.nlp.AnnotationWriter;
/**
* Writes the annotated text to a file.
*
* @author Mountain Fog, Inc.
*
*/
public class OpenNLPFileAnnotationWriter implements AnnotationWriter {
/**
* Creates a new {@link OpenNLPFileAnnotationWriter}.
*/
public OpenNLPFileAnnotationWriter() {
}
@Override
public String annotateText(Collection<Entity> entities, String text) {
String annotatedText = text;
for(Entity entity : entities) {
annotatedText = annotatedText.replaceAll(entity.getText(), "<START:" + entity.getType().toLowerCase() + "> " + entity.getText() + " <END>");
}
return annotatedText;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents/dl4j/DeepLearningDocumentClassifier.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.documents.dl4j;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.deeplearning4j.berkeley.Pair;
import org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.paragraphvectors.ParagraphVectors;
import org.deeplearning4j.models.word2vec.VocabWord;
import org.deeplearning4j.text.documentiterator.FileLabelAwareIterator;
import org.deeplearning4j.text.documentiterator.LabelledDocument;
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor;
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.linalg.api.ndarray.INDArray;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.manifest.DocumentModelManifest;
import ai.idylnlp.model.nlp.documents.DeepLearningDocumentClassificationRequest;
import ai.idylnlp.model.nlp.documents.DocumentClassificationEvaluationRequest;
import ai.idylnlp.model.nlp.documents.DocumentClassificationEvaluationResponse;
import ai.idylnlp.model.nlp.documents.DocumentClassificationResponse;
import ai.idylnlp.model.nlp.documents.DocumentClassificationScores;
import ai.idylnlp.model.nlp.documents.DocumentClassifier;
import ai.idylnlp.model.nlp.documents.DocumentClassifierException;
import ai.idylnlp.nlp.documents.dl4j.model.DeepLearningDocumentClassifierConfiguration;
import ai.idylnlp.nlp.documents.dl4j.utils.LabelSeeker;
import ai.idylnlp.nlp.documents.dl4j.utils.MeansBuilder;
/**
* Implementation of {@link DocumentClassifier} that performs
* document classification using DeepLearning4J.
*
* @author Mountain Fog, Inc.
*
*/
public class DeepLearningDocumentClassifier
implements DocumentClassifier<DeepLearningDocumentClassifierConfiguration, DeepLearningDocumentClassificationRequest> {
private static final Logger LOGGER = LogManager.getLogger(DeepLearningDocumentClassifier.class);
private DeepLearningDocumentClassifierConfiguration configuration;
private Map<LanguageCode, ParagraphVectors> models;
/**
* Creates a new deep learning document classifier.
* @param configuration A {@link DeepLearningDocumentClassifierConfiguration}.
* @throws DocumentClassifierException Thrown if the models cannot be preloaded.
* @throws IOException
*/
public DeepLearningDocumentClassifier(DeepLearningDocumentClassifierConfiguration configuration) throws DocumentClassifierException {
this.configuration = configuration;
models = new HashMap<>();
for(DocumentModelManifest model : configuration.getModels()) {
// If the file is not found an IOException will be thrown.
final File modelFile = new File(model.getModelFileName());
try {
LOGGER.info("Loading model {}", modelFile.getAbsolutePath());
final ParagraphVectors paragraphVectors = WordVectorSerializer.readParagraphVectors(modelFile);
models.put(model.getLanguageCode(), paragraphVectors);
} catch (IOException ex) {
LOGGER.error("Unable to load document classification model {}. Verify the file exists.", ex, model.getModelFileName());
}
}
}
@Override
public DocumentClassificationResponse classify(DeepLearningDocumentClassificationRequest request) throws DocumentClassifierException {
try {
// TODO: Allow the user to pass in a String[] instead of a String in the request.
// The String[] is tokens.
ParagraphVectors paragraphVectors = models.get(request.getLanguageCode());
if(paragraphVectors != null) {
// Get the matching manifest for this model.
// TODO: Should the manifest be the object in the map's key? I don't think so.
Optional<DocumentModelManifest> matchingObjects = configuration.getModels().stream()
.filter(p -> p.getLanguageCode().equals(request.getLanguageCode()))
.findFirst();
// Should never return null.
final DocumentModelManifest model = matchingObjects.get();
final TokenizerFactory tokenizerFactory = new DefaultTokenizerFactory();
tokenizerFactory.setTokenPreProcessor(new CommonPreprocessor());
final InMemoryLookupTable<VocabWord> tab = (InMemoryLookupTable<VocabWord>) paragraphVectors.getLookupTable();
final MeansBuilder meansBuilder = new MeansBuilder(tab, tokenizerFactory);
final LabelSeeker seeker = new LabelSeeker(model.getLabels(), tab);
final LabelledDocument document = new LabelledDocument();
document.setContent(request.getText());
final INDArray documentAsCentroid = meansBuilder.documentAsVector(document);
final List<Pair<String, Double>> scores = seeker.getScores(documentAsCentroid);
final Map<String, Double> sc = new HashMap<>();
for(Pair<String, Double> score : scores) {
sc.put(score.getFirst(), score.getSecond());
}
return new DocumentClassificationResponse(new DocumentClassificationScores(sc));
} else {
throw new DocumentClassifierException("No model for language " + request.getLanguageCode().getAlpha3().toString() + ".");
}
} catch (Exception ex) {
throw new DocumentClassifierException("Unable to classify document.", ex);
}
}
@Override
public DocumentClassificationEvaluationResponse evaluate(DocumentClassificationEvaluationRequest request) throws DocumentClassifierException {
// Actual class -> <Predicted class, Number of times>, for example:
// positive -> negative, 10
// Means, documents from the positive class were classified as negative 10 times.
Map<String, Map<String, AtomicInteger>> results = new LinkedHashMap<>();
final FileLabelAwareIterator iterator = new FileLabelAwareIterator.Builder().addSourceFolder(new File(request.getDirectory())).build();
LOGGER.info("Beginning model evaluation using directory {}", request.getDirectory());
while(iterator.hasNext()) {
final LabelledDocument document = iterator.nextDocument();
final String text = document.getContent();
final DocumentClassificationResponse response = classify(new DeepLearningDocumentClassificationRequest(text, request.getLanguageCode()));
// TODO: Is it possible to not be assigned to any category?
final String actualCategory = document.getLabels().get(0);
final String predictedCategory = response.getScores().getPredictedCategory().getLeft();
// LOGGER.trace("Actual: " + actualCategory + "; Predicted: " + predictedCategory);
results.putIfAbsent(actualCategory, new HashMap<String, AtomicInteger>());
results.get(actualCategory).putIfAbsent(predictedCategory, new AtomicInteger(0));
results.get(actualCategory).get(predictedCategory).incrementAndGet();
}
return new DocumentClassificationEvaluationResponse(results);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents/dl4j/DeepLearningDocumentModelOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.documents.dl4j;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.paragraphvectors.ParagraphVectors;
import org.deeplearning4j.text.documentiterator.FileLabelAwareIterator;
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor;
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.linalg.factory.Nd4j;
import ai.idylnlp.model.nlp.documents.DeepLearningDocumentClassifierTrainingRequest;
import ai.idylnlp.model.nlp.documents.DocumentClassificationFile;
import ai.idylnlp.model.nlp.documents.DocumentClassificationTrainingResponse;
import ai.idylnlp.model.nlp.documents.DocumentClassifier;
import ai.idylnlp.model.nlp.documents.DocumentClassifierModelOperations;
import ai.idylnlp.model.nlp.documents.DocumentModelTrainingException;
/**
* Implementation of {@link DocumentClassifier} that performs
* document classification using DeepLearning4J.
*
* @author Mountain Fog, Inc.
*
*/
public class DeepLearningDocumentModelOperations implements DocumentClassifierModelOperations<DeepLearningDocumentClassifierTrainingRequest> {
private static final Logger LOGGER = LogManager.getLogger(DeepLearningDocumentModelOperations.class);
@Override
public DocumentClassificationTrainingResponse train(DeepLearningDocumentClassifierTrainingRequest request) throws DocumentModelTrainingException {
// https://deeplearning4j.org/workspaces
Nd4j.getMemoryManager().setAutoGcWindow(10000);
try {
LOGGER.info("Loading training iterator...");
FileLabelAwareIterator.Builder builder = new FileLabelAwareIterator.Builder();
for(String directory : request.getDirectories()) {
final File d = new File(directory);
// Make sure the directory exists and is a directory.
if(d.exists() && d.isDirectory()) {
LOGGER.info("Adding training directory {}", d.getAbsolutePath());
builder.addSourceFolder(d);
} else {
LOGGER.warn("Training directory {} does not exist and will be skipped.", directory);
}
}
final FileLabelAwareIterator iterator = builder.build();
TokenizerFactory tokenizerFactory = new DefaultTokenizerFactory();
tokenizerFactory.setTokenPreProcessor(new CommonPreprocessor());
final ParagraphVectors paragraphVectors = new ParagraphVectors.Builder()
.learningRate(request.getLearningRate())
.minLearningRate(request.getMinLearningRate())
.minWordFrequency(request.getMinWordFrequency())
.layerSize(request.getLayerSize())
.batchSize(request.getBatchSize())
.epochs(request.getEpochs())
.iterations(5)
.iterate(iterator)
.tokenizerFactory(tokenizerFactory)
.sampling(0)
.windowSize(5)
.build();
LOGGER.info("Starting training...");
paragraphVectors.fit();
final File serializedModelFile = File.createTempFile("model", ".bin");
WordVectorSerializer.writeParagraphVectors(paragraphVectors, serializedModelFile);
LOGGER.info("Model serialized to {}", serializedModelFile.getAbsolutePath());
Map<DocumentClassificationFile, File> files = new HashMap<>();
files.put(DocumentClassificationFile.MODEL_FILE, serializedModelFile);
final String modelId = UUID.randomUUID().toString();
return new DocumentClassificationTrainingResponse(modelId, files, iterator.getLabelsSource().getLabels());
} catch(Exception ex) {
throw new DocumentModelTrainingException("Unable to train document classification model.", ex);
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents/dl4j
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents/dl4j/model/DeepLearningDocumentClassifierConfiguration.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.documents.dl4j.model;
import java.util.Collection;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.manifest.DocumentModelManifest;
import ai.idylnlp.model.nlp.documents.AbstractDocumentClassifierConfiguration;
/**
* Configuration for the OpenNLP document classifier.
*
* @author Mountain Fog, Inc.
*
*/
public class DeepLearningDocumentClassifierConfiguration extends AbstractDocumentClassifierConfiguration {
private static final Logger LOGGER = LogManager.getLogger(DeepLearningDocumentClassifierConfiguration.class);
private Collection<DocumentModelManifest> models;
private DeepLearningDocumentClassifierConfiguration(Collection<DocumentModelManifest> models) {
this.models = models;
}
/**
* Builder class to construct {@link DeepLearningDocumentClassifierConfiguration}.
*
* @author Mountain Fog, Inc.
*
*/
public static class Builder {
private Collection<DocumentModelManifest> models;
public Builder withModels(Collection<DocumentModelManifest> models) {
this.models = models;
return this;
}
/**
* Creates a configured {@link DeepLearningDocumentClassifierConfiguration}.
* @return A configured {@link DeepLearningDocumentClassifierConfiguration}.
*/
public DeepLearningDocumentClassifierConfiguration build() {
return new DeepLearningDocumentClassifierConfiguration(models);
}
}
public Collection<DocumentModelManifest> getModels() {
return models;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents/dl4j
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents/dl4j/utils/LabelSeeker.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.documents.dl4j.utils;
import org.deeplearning4j.berkeley.Pair;
import org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable;
import org.deeplearning4j.models.word2vec.VocabWord;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.ops.transforms.Transforms;
import java.util.ArrayList;
import java.util.List;
/**
* This is primitive seeker for nearest labels.
* It's used instead of basic wordsNearest method because for ParagraphVectors
* only labels should be taken into account, not individual words
*
* @author raver119@gmail.com
*/
public class LabelSeeker {
private List<String> labelsUsed;
private InMemoryLookupTable<VocabWord> lookupTable;
public LabelSeeker(List<String> labelsUsed, InMemoryLookupTable<VocabWord> lookupTable) {
if(labelsUsed.isEmpty()) {
throw new IllegalStateException("You can't have 0 labels used for ParagraphVectors");
}
this.lookupTable = lookupTable;
this.labelsUsed = labelsUsed;
}
/**
* This method accepts vector, that represents any document,
* and returns distances between this document, and previously trained categories
* @return
*/
public List<Pair<String, Double>> getScores(INDArray vector) {
List<Pair<String, Double>> result = new ArrayList<>();
for(String label : labelsUsed) {
INDArray vecLabel = lookupTable.vector(label);
if(vecLabel == null) {
throw new IllegalStateException("Label '" + label + "' has no known vector!");
}
double sim = Transforms.cosineSim(vector, vecLabel);
result.add(new Pair<String, Double>(label, sim));
}
return result;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents/dl4j
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-dl4j/1.1.0/ai/idylnlp/nlp/documents/dl4j/utils/MeansBuilder.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.documents.dl4j.utils;
import org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable;
import org.deeplearning4j.models.word2vec.VocabWord;
import org.deeplearning4j.models.word2vec.wordstore.VocabCache;
import org.deeplearning4j.text.documentiterator.LabelledDocument;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
public class MeansBuilder {
private VocabCache<VocabWord> vocabCache;
private InMemoryLookupTable<VocabWord> lookupTable;
private TokenizerFactory tokenizerFactory;
public MeansBuilder(InMemoryLookupTable<VocabWord> lookupTable, TokenizerFactory tokenizerFactory) {
this.lookupTable = lookupTable;
this.vocabCache = lookupTable.getVocab();
this.tokenizerFactory = tokenizerFactory;
}
/**
* This method returns centroid (mean vector) for document.
*
* @param document
* @return
*/
public INDArray documentAsVector(LabelledDocument document) {
List<String> documentAsTokens = tokenizerFactory.create(document.getContent()).getTokens();
AtomicInteger cnt = new AtomicInteger(0);
for(String word : documentAsTokens) {
if(vocabCache.containsWord(word)) {
cnt.incrementAndGet();
}
}
INDArray allWords = Nd4j.create(cnt.get(), lookupTable.layerSize());
cnt.set(0);
for(String word: documentAsTokens) {
if(vocabCache.containsWord(word)) {
allWords.putRow(cnt.getAndIncrement(), lookupTable.vector(word));
}
}
INDArray mean = allWords.mean(0);
return mean;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-opennlp/1.1.0/ai/idylnlp/nlp/documents
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-opennlp/1.1.0/ai/idylnlp/nlp/documents/opennlp/OpenNLPDocumentClassifier.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.documents.opennlp;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.nlp.documents.DocumentClassificationEvaluationRequest;
import ai.idylnlp.model.nlp.documents.DocumentClassificationEvaluationResponse;
import ai.idylnlp.model.nlp.documents.DocumentClassificationResponse;
import ai.idylnlp.model.nlp.documents.DocumentClassificationScores;
import ai.idylnlp.model.nlp.documents.DocumentClassifier;
import ai.idylnlp.model.nlp.documents.DocumentClassifierException;
import ai.idylnlp.model.nlp.documents.OpenNLPDocumentClassificationRequest;
import ai.idylnlp.nlp.documents.opennlp.model.OpenNLPDocumentClassifierConfiguration;
import opennlp.tools.doccat.DoccatModel;
import opennlp.tools.doccat.DocumentCategorizerME;
import opennlp.tools.tokenize.WhitespaceTokenizer;
/**
* Implementation of {@link DocumentClassifier} that performs document classification
* using OpenNLP.
*
* @author Mountain Fog, Inc.
*
*/
public class OpenNLPDocumentClassifier
implements DocumentClassifier<OpenNLPDocumentClassifierConfiguration, OpenNLPDocumentClassificationRequest> {
private static final Logger LOGGER = LogManager.getLogger(OpenNLPDocumentClassifier.class);
private OpenNLPDocumentClassifierConfiguration configuration;
private Map<LanguageCode, DocumentCategorizerME> doccatModelCache;
/**
* Creates a new OpenNLP document classifier.
* @param configuration A {@link OpenNLPDocumentClassifierConfiguration}.
* @throws DocumentClassifierException Thrown if the models cannot be preloaded.
*/
public OpenNLPDocumentClassifier(OpenNLPDocumentClassifierConfiguration configuration) throws DocumentClassifierException {
this.configuration = configuration;
this.doccatModelCache = new HashMap<LanguageCode, DocumentCategorizerME>();
if(configuration.isPreloadModels()) {
LOGGER.info("Preloading the document classification models.");
// Preload the models.
for(LanguageCode languageCode : configuration.getDoccatModels().keySet()) {
try {
getDocumentCategorizer(languageCode);
} catch (FileNotFoundException ex) {
final String fileName = configuration.getDoccatModels().get(languageCode).getAbsolutePath();
LOGGER.error("The model file {} was not found.", ex, fileName);
}
}
}
}
@Override
public DocumentClassificationResponse classify(OpenNLPDocumentClassificationRequest request) throws DocumentClassifierException {
try {
DocumentCategorizerME categorizer = getDocumentCategorizer(request.getLanguageCode());
// TODO: Tokenize the text.
final String tokens[] = WhitespaceTokenizer.INSTANCE.tokenize(request.getText());
final double[] outcomes = categorizer.categorize(tokens);
Map<String, Double> scores = new HashMap<>();
for(int i = 0; i < outcomes.length; i++) {
scores.put(categorizer.getCategory(i), outcomes[i]);
}
return new DocumentClassificationResponse(new DocumentClassificationScores(scores));
} catch (Exception ex) {
throw new DocumentClassifierException("Unable to classify document.", ex);
}
}
private DocumentCategorizerME getDocumentCategorizer(LanguageCode languageCode) throws DocumentClassifierException, FileNotFoundException {
LOGGER.info("Loading document classification model for language {}.", languageCode.getAlpha3().toString());
// Has this model been loaded before?
DocumentCategorizerME documentCategorizer = doccatModelCache.get(languageCode);
if(documentCategorizer == null) {
final File file = configuration.getDoccatModels().get(languageCode);
if(file != null) {
if(file.exists()) {
// The model has not been loaded so we will load it now.
final InputStream is = new FileInputStream(file);
try {
final DoccatModel doccatModel = new DoccatModel(is);
documentCategorizer = new DocumentCategorizerME(doccatModel);
doccatModelCache.put(languageCode, documentCategorizer);
} catch (IOException ex) {
LOGGER.error("Unable to perform document classification.", ex);
throw new DocumentClassifierException("Unable to perform document classification.", ex);
} finally {
IOUtils.closeQuietly(is);
}
} else {
throw new DocumentClassifierException("The model file for language " + languageCode.getAlpha3().toString() + " does not exist.");
}
} else {
throw new DocumentClassifierException("No model file for language " + languageCode.getAlpha3().toString() + ".");
}
}
return documentCategorizer;
}
@Override
public DocumentClassificationEvaluationResponse evaluate(DocumentClassificationEvaluationRequest request)
throws DocumentClassifierException {
// TODO: Implement this.
return null;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-opennlp/1.1.0/ai/idylnlp/nlp/documents
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-opennlp/1.1.0/ai/idylnlp/nlp/documents/opennlp/OpenNLPDocumentModelOperations.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.documents.opennlp;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.opennlp.custom.encryption.OpenNLPEncryptionFactory;
import ai.idylnlp.model.Constants;
import ai.idylnlp.model.nlp.documents.DocumentClassificationFile;
import ai.idylnlp.model.nlp.documents.DocumentClassificationTrainingResponse;
import ai.idylnlp.model.nlp.documents.DocumentClassifier;
import ai.idylnlp.model.nlp.documents.DocumentClassifierModelOperations;
import ai.idylnlp.model.nlp.documents.DocumentModelTrainingException;
import ai.idylnlp.model.nlp.documents.OpenNLPDocumentClassifierTrainingRequest;
import opennlp.tools.doccat.DoccatFactory;
import opennlp.tools.doccat.DoccatModel;
import opennlp.tools.doccat.DocumentCategorizerME;
import opennlp.tools.doccat.DocumentSample;
import opennlp.tools.doccat.DocumentSampleStream;
import opennlp.tools.util.InputStreamFactory;
import opennlp.tools.util.MarkableFileInputStreamFactory;
import opennlp.tools.util.ObjectStream;
import opennlp.tools.util.PlainTextByLineStream;
import opennlp.tools.util.TrainingParameters;
/**
* Implementation of {@link DocumentClassifier} that performs document classification
* using OpenNLP.
*
* @author Mountain Fog, Inc.
*
*/
public class OpenNLPDocumentModelOperations implements DocumentClassifierModelOperations<OpenNLPDocumentClassifierTrainingRequest> {
private static final Logger LOGGER = LogManager.getLogger(OpenNLPDocumentModelOperations.class);
@Override
public DocumentClassificationTrainingResponse train(OpenNLPDocumentClassifierTrainingRequest request) throws DocumentModelTrainingException {
try {
InputStreamFactory inputStreamFactory = new MarkableFileInputStreamFactory(request.getTrainingFile());
ObjectStream<DocumentSample> sample = new DocumentSampleStream(new PlainTextByLineStream(inputStreamFactory, Constants.ENCODING_UTF8));
final String language = request.getLanguageCode().getAlpha3().toString();
DoccatModel model = DocumentCategorizerME.train(language, sample, TrainingParameters.defaultParams(), new DoccatFactory());
BufferedOutputStream modelOut = null;
// Set the encryption key.
OpenNLPEncryptionFactory.getDefault().setKey(request.getEncryptionKey());
// The generated model's ID. Assigned during the training process.
String modelId = "";
File modelFile = File.createTempFile("model", ".bin");
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
modelId = model.serialize(modelOut);
} catch (Exception ex) {
LOGGER.error("Unable to create the model.", ex);
} finally {
if (modelOut != null) {
modelOut.close();
}
// Clear the encryption key.
OpenNLPEncryptionFactory.getDefault().clearKey();
}
final Map<DocumentClassificationFile, File> files = new HashMap<>();
files.put(DocumentClassificationFile.MODEL_FILE, modelFile);
// TODO: Get the categories and return them.
return new DocumentClassificationTrainingResponse(modelId, files, Collections.emptyList());
} catch (IOException ex) {
throw new DocumentModelTrainingException("Unable to train document classification model.", ex);
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-opennlp/1.1.0/ai/idylnlp/nlp/documents/opennlp
|
java-sources/ai/idylnlp/idylnlp-nlp-document-classification-opennlp/1.1.0/ai/idylnlp/nlp/documents/opennlp/model/OpenNLPDocumentClassifierConfiguration.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.documents.opennlp.model;
import java.io.File;
import java.util.Map;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.nlp.documents.AbstractDocumentClassifierConfiguration;
/**
* Configuration for the OpenNLP document classifier.
*
* @author Mountain Fog, Inc.
*
*/
public class OpenNLPDocumentClassifierConfiguration extends AbstractDocumentClassifierConfiguration {
private static final Logger LOGGER = LogManager.getLogger(OpenNLPDocumentClassifierConfiguration.class);
private Map<LanguageCode, File> doccatModels;
private boolean preloadModels;
private OpenNLPDocumentClassifierConfiguration(Map<LanguageCode, File> doccatModels, boolean preloadModels) {
this.doccatModels = doccatModels;
this.preloadModels = preloadModels;
}
/**
* Builder class to construct {@link OpenNLPDocumentClassifierConfiguration}.
*
* @author Mountain Fog, Inc.
*
*/
public static class Builder {
private Map<LanguageCode, File> doccatModels;
private boolean preloadModels;
public Builder withDoccatModels(Map<LanguageCode, File> doccatModels) {
this.doccatModels = doccatModels;
return this;
}
public Builder withPreloadModels(boolean preloadModels) {
this.preloadModels = preloadModels;
return this;
}
/**
* Creates a configured {@link OpenNLPDocumentClassifierConfiguration}.
* @return A configured {@link OpenNLPDocumentClassifierConfiguration}.
*/
public OpenNLPDocumentClassifierConfiguration build() {
return new OpenNLPDocumentClassifierConfiguration(doccatModels, preloadModels);
}
}
public Map<LanguageCode, File> getDoccatModels() {
return doccatModels;
}
/**
* Gets whether or not to preload the document classification models.
* @return Whether or not to preload the document classification models.
*/
public boolean isPreloadModels() {
return preloadModels;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer/DeepLearningEntityRecognizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer;
import java.io.File;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.deeplearning4j.util.ModelSerializer;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.exceptions.EntityFinderException;
import ai.idylnlp.model.exceptions.ModelLoaderException;
import ai.idylnlp.model.manifest.SecondGenModelManifest;
import ai.idylnlp.model.nlp.AbstractEntityRecognizer;
import ai.idylnlp.model.nlp.SentenceSanitizer;
import ai.idylnlp.model.nlp.ner.EntityExtractionRequest;
import ai.idylnlp.model.nlp.ner.EntityExtractionResponse;
import ai.idylnlp.model.nlp.ner.EntityRecognizer;
import ai.idylnlp.nlp.sentence.sanitizers.DefaultSentenceSanitizer;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.nlp.recognizer.configuration.DeepLearningEntityRecognizerConfiguration;
import ai.idylnlp.nlp.recognizer.deep.DeepLearningTokenNameFinder;
import opennlp.tools.namefind.TokenNameFinder;
/**
* An {@link EntityRecognizer} that is powered by the
* deeplearning4j framework. It uses a neural network
* to perform entity extraction.
*
* @author Mountain Fog, Inc.
*
*/
public class DeepLearningEntityRecognizer extends AbstractEntityRecognizer<DeepLearningEntityRecognizerConfiguration> implements EntityRecognizer {
private static final Logger LOGGER = LogManager.getLogger(DeepLearningEntityRecognizer.class);
// Language -> (Type, [Network, Vectors])
private Map<LanguageCode, Map<String, ImmutablePair<MultiLayerNetwork, WordVectors>>> loadedModels;
public DeepLearningEntityRecognizer(DeepLearningEntityRecognizerConfiguration configuration) {
super(configuration);
loadedModels = new HashMap<LanguageCode, Map<String, ImmutablePair<MultiLayerNetwork, WordVectors>>>();
for(String type : configuration.getEntityModels().keySet()) {
Map<LanguageCode, Set<SecondGenModelManifest>> types = configuration.getEntityModels().get(type);
for(LanguageCode language : types.keySet()) {
for(SecondGenModelManifest modelManifest : types.get(language)) {
if(!configuration.getBlacklistedModelIDs().contains(modelManifest.getModelId())) {
try {
final String modelFileName = new File(configuration.getEntityModelDirectory(), modelManifest.getModelFileName()).getAbsolutePath();
// Load the network from the model file.
LOGGER.info("Loading {} {} model from file: {}", language.getAlpha3().toString(), type, modelFileName);
final File modelFile = new File(modelFileName);
final MultiLayerNetwork multiLayerNetwork = ModelSerializer.restoreMultiLayerNetwork(modelFile.getAbsolutePath());
final String vectorsFileName = new File(configuration.getEntityModelDirectory(), modelManifest.getVectorsFileName()).getAbsolutePath();
// Verify the vectors file exists.
final File vectorsFile = new File(vectorsFileName);
// Load the word vectors from the file.
LOGGER.info("Loading vectors from file: {}", vectorsFileName);
final WordVectors wordVectors = WordVectorSerializer.loadStaticModel(vectorsFile);
final Map<String, ImmutablePair<MultiLayerNetwork, WordVectors>> t = new HashMap<>();
t.put(type, new ImmutablePair<MultiLayerNetwork, WordVectors>(multiLayerNetwork, wordVectors));
loadedModels.put(language, t);
} catch (Exception ex) {
LOGGER.error("Unable to load model: " + modelManifest.getModelFileName(), ex);
getConfiguration().getBlacklistedModelIDs().add(modelManifest.getModelId());
LOGGER.warn("Model {} is blacklisted. Loading will not be attempted until restart.", modelManifest.getModelFileName());
// TODO: This should probably be made visible to the user somehow - maybe through the API?
}
} else {
LOGGER.info("Model {} is blacklisted. Loading will not be attempted until restart.", modelManifest.getModelFileName());
}
}
}
}
}
@Override
public EntityExtractionResponse extractEntities(EntityExtractionRequest request)
throws EntityFinderException, ModelLoaderException {
if(request.getText().length == 0) {
throw new IllegalArgumentException("Input text cannot be empty.");
}
if(request.getConfidenceThreshold() < 0 || request.getConfidenceThreshold() > 100) {
throw new IllegalArgumentException("Confidence threshold must be an integer between 0 and 100.");
}
SentenceSanitizer sentenceSanitizer = new DefaultSentenceSanitizer.Builder().lowerCase().removePunctuation().consolidateSpaces().build();
// All of the extracted entities.
Set<Entity> entities = new LinkedHashSet<Entity>();
// Keep track of the extraction time.
long startTime = System.currentTimeMillis();
String types[] = {};
if(!StringUtils.isEmpty(request.getType())) {
types = request.getType().split(",");
}
for(String type : getConfiguration().getEntityModels().keySet()) {
if(types.length == 0 || ArrayUtils.contains(types, type)) {
LOGGER.trace("Processing entity class {}.", type);
LanguageCode language = request.getLanguage();
// The manifests of the models that will be used for this extraction.
Set<SecondGenModelManifest> modelManifests = new HashSet<SecondGenModelManifest>();
if(request.getLanguage() == null) {
// TODO: Run all languages to support multilingual documents.
Set<LanguageCode> languages = getConfiguration().getEntityModels().get(type).keySet();
for(LanguageCode l : languages) {
modelManifests.addAll(getConfiguration().getEntityModels().get(type).get(l));
}
} else {
// We are doing a single language.
Map<LanguageCode, Set<SecondGenModelManifest>> models = getConfiguration().getEntityModels().get(type);
// If there are not any models for this entity type <code>models</code> will be null.
if(models != null) {
Set<SecondGenModelManifest> manifests = models.get(language);
// If <code>manifests</code> is not null add those manifests to the set.
if(manifests != null) {
modelManifests.addAll(manifests);
}
}
}
if(CollectionUtils.isNotEmpty(modelManifests)) {
for(SecondGenModelManifest modelManifest : modelManifests) {
LOGGER.debug("{} has {} entity models.", type, modelManifests.size());
String t = modelManifest.getType();
// Get the network and word vectors for this language.
LOGGER.info("Getting model for type {}, language {}", modelManifest.getLanguageCode().getAlpha3().toString(), t);
ImmutablePair<MultiLayerNetwork, WordVectors> pair = loadedModels.get(modelManifest.getLanguageCode()).get(t);
MultiLayerNetwork multiLayerNetwork = pair.getLeft();
WordVectors wordVectors = pair.getRight();
// Get the nameFinder for this model if it exists.
TokenNameFinder nameFinder = nameFinders.get(modelManifest);
if(nameFinder == null) {
// Create a new namefinder and put it in the map.
nameFinder = new DeepLearningTokenNameFinder(multiLayerNetwork, wordVectors,
modelManifest.getWindowSize(), getLabels(request.getType()));
nameFinders.put(modelManifest, nameFinder);
}
Collection<Entity> extractedEntities = findEntities(nameFinder, request, modelManifest, sentenceSanitizer);
entities.addAll(extractedEntities);
}
} else {
LOGGER.warn("No entity models available for language {}.", language.getAlpha3().toString());
}
}
}
long extractionTime = (System.currentTimeMillis() - startTime);
// Create the response with the extracted entities and the time it took to extract them.
EntityExtractionResponse response = new EntityExtractionResponse(entities, extractionTime, true);
return response;
}
private String[] getLabels(String entityType) {
return new String[] { entityType + "-start", entityType + "-cont", "other" };
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer/configuration/DeepLearningEntityRecognizerConfiguration.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer.configuration;
import java.io.File;
import java.util.LinkedHashSet;
import java.util.Set;
import ai.idylnlp.model.manifest.SecondGenModelManifest;
import ai.idylnlp.model.nlp.ConfidenceFilter;
import ai.idylnlp.model.nlp.configuration.AbstractEntityRecognizerConfiguration;
import ai.idylnlp.nlp.filters.confidence.SimpleConfidenceFilter;
import ai.idylnlp.nlp.recognizer.DeepLearningEntityRecognizer;
/**
* Configuration for a {@link DeepLearningEntityRecognizer}.
*
* @author Mountain Fog, Inc.
*
*/
public class DeepLearningEntityRecognizerConfiguration extends AbstractEntityRecognizerConfiguration<SecondGenModelManifest> {
private String entityModelDirectory;
public static class Builder {
private ConfidenceFilter confidenceFilter;
private Set<String> blacklistedModelIDs;
public Builder withConfidenceFilter(ConfidenceFilter confidenceFilter) {
this.confidenceFilter = confidenceFilter;
return this;
}
public Builder withBlacklistedModelIDs(Set<String> blacklistedModelIDs) {
this.blacklistedModelIDs = blacklistedModelIDs;
return this;
}
/**
* Creates the configuration.
* @param entityModelDirectory The full path to the models directory.
* @return A configured {@link DeepLearningEntityRecognizerConfiguration}.
*/
public DeepLearningEntityRecognizerConfiguration build(String entityModelDirectory) {
if(!entityModelDirectory.endsWith(File.separator)) {
entityModelDirectory = entityModelDirectory + File.separator;
}
if(confidenceFilter == null) {
confidenceFilter = new SimpleConfidenceFilter();
}
if(blacklistedModelIDs == null) {
blacklistedModelIDs = new LinkedHashSet<String>();
}
return new DeepLearningEntityRecognizerConfiguration(entityModelDirectory, confidenceFilter, blacklistedModelIDs);
}
}
private DeepLearningEntityRecognizerConfiguration(
String entityModelDirectory,
ConfidenceFilter confidenceFilter,
Set<String> blacklistedModelIDs) {
super(blacklistedModelIDs);
this.entityModelDirectory = entityModelDirectory;
this.blacklistedModelIDs = blacklistedModelIDs;
this.confidenceFilter = confidenceFilter;
}
public String getEntityModelDirectory() {
return entityModelDirectory;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer/deep/DeepLearningTokenNameFinder.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer.deep;
import java.util.Arrays;
import java.util.List;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.indexing.NDArrayIndex;
import opennlp.tools.namefind.BioCodec;
import opennlp.tools.namefind.TokenNameFinder;
import opennlp.tools.util.Span;
/**
* An implementation of OpenNLP's {@link TokenNameFinder} that
* performs entity extraction via a deeplearning4j neural
* network.
*
* @author Mountain Fog, Inc.
*
*/
public class DeepLearningTokenNameFinder implements TokenNameFinder {
private final MultiLayerNetwork network;
private final WordVectors wordVectors;
private int windowSize;
private String[] labels;
/**
* Creates a new token name finder.
* @param network The neural {@link MultiLayerNetwork network}.
* @param wordVectors The word {@link WordVectors vectors}.
* @param windowSize The size of the window.
* @param labels An array of outcome labels.
*/
public DeepLearningTokenNameFinder(MultiLayerNetwork network, WordVectors wordVectors,
int windowSize, String[] labels) {
this.network = network;
this.wordVectors = wordVectors;
this.windowSize = windowSize;
this.labels = labels;
}
@Override
public Span[] find(String[] tokens) {
List<INDArray> featureMatrices = DeepLearningUtils.mapToFeatureMatrices(wordVectors, tokens, windowSize);
String[] outcomes = new String[tokens.length];
for (int i = 0; i < tokens.length; i++) {
INDArray predictionMatrix = network.output(featureMatrices.get(i), false);
INDArray outcomeVector = predictionMatrix.get(NDArrayIndex.point(0), NDArrayIndex.all(),
NDArrayIndex.point(windowSize - 1));
outcomes[i] = labels[max(outcomeVector)];
}
// Delete invalid spans ...
for (int i = 0; i < outcomes.length; i++) {
if (outcomes[i].endsWith("cont") && (i == 0 || "other".equals(outcomes[i - 1]))) {
outcomes[i] = "other";
}
}
return new BioCodec().decode(Arrays.asList(outcomes));
}
@Override
public void clearAdaptiveData() {
// There is nothing to clear.
}
/**
* Finds the index of the largest element in the {@link INDArray}.
* @param array The {@link INDArray}.
* @return The index of the item having the largest
* element in the array.
*/
// TODO: This function needs tested.
private int max(INDArray array) {
int best = 0;
for (int i = 0; i < array.size(0); i++) {
if (array.getDouble(i) > array.getDouble(best)) {
best = i;
}
}
return best;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer/deep/DeepLearningUtils.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer.deep;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.indexing.INDArrayIndex;
import org.nd4j.linalg.indexing.NDArrayIndex;
import opennlp.tools.namefind.BioCodec;
import opennlp.tools.namefind.NameSample;
/**
* Utility functions for deep learning model training and evaluation.
*
* Note: The functions in this class are not thread-safe.
*
* @author Mountain Fog, Inc.
*
*/
public class DeepLearningUtils {
public synchronized static List<INDArray> mapToLabelVectors(NameSample sample, int windowSize, String[] labelStrings) {
Map<String, Integer> labelToIndex = IntStream.range(0, labelStrings.length).boxed()
.collect(Collectors.toMap(i -> labelStrings[i], i -> i));
List<INDArray> vectors = new ArrayList<INDArray>();
// encode the outcome as one-hot-representation
String outcomes[] = new BioCodec().encode(sample.getNames(), sample.getSentence().length);
for (int i = 0; i < sample.getSentence().length; i++) {
INDArray labels = Nd4j.create(1, labelStrings.length, windowSize);
labels.putScalar(new int[] { 0, labelToIndex.get(outcomes[i]), windowSize - 1 }, 1.0d);
vectors.add(labels);
}
return vectors;
}
public synchronized static List<INDArray> mapToFeatureMatrices(WordVectors wordVectors, String[] tokens, int windowSize) {
List<INDArray> matrices = new ArrayList<>();
final int vectorSize = wordVectors.getWordVector(wordVectors.vocab().wordAtIndex(0)).length;
for (int i = 0; i < tokens.length; i++) {
INDArray features = Nd4j.create(1, vectorSize, windowSize);
for(int vectorIndex = 0; vectorIndex < windowSize; vectorIndex++) {
int tokenIndex = i + vectorIndex - ((windowSize - 1) / 2);
if (tokenIndex >= 0 && tokenIndex < tokens.length) {
String token = tokens[tokenIndex];
if (wordVectors.hasWord(token)) {
INDArray vector = wordVectors.getWordVectorMatrix(token);
features.put(new INDArrayIndex[] { NDArrayIndex.point(0), NDArrayIndex.all(),
NDArrayIndex.point(vectorIndex) }, vector);
}
}
}
matrices.add(features);
}
return matrices;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer/deep/NameSampleDataSetIterator.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer.deep;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.NoSuchElementException;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.DataSet;
import org.nd4j.linalg.dataset.api.DataSetPreProcessor;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.indexing.INDArrayIndex;
import org.nd4j.linalg.indexing.NDArrayIndex;
import opennlp.tools.namefind.NameSample;
import opennlp.tools.util.ObjectStream;
public class NameSampleDataSetIterator implements DataSetIterator {
private static final long serialVersionUID = 1L;
private final int windowSize;
private final String[] labels;
private final int batchSize;
private final int vectorSize;
private final int totalSamples;
private int cursor = 0;
private final ObjectStream<DataSet> samples;
public NameSampleDataSetIterator(ObjectStream<NameSample> samples, WordVectors wordVectors, int vectorSize,
int windowSize, String labels[], int batchSize) throws IOException {
this.windowSize = windowSize;
this.labels = labels;
this.vectorSize = vectorSize;
this.batchSize = batchSize;
this.samples = new NameSampleToDataSetStream(samples, wordVectors, windowSize, vectorSize, labels);
int total = 0;
DataSet sample;
while ((sample = this.samples.read()) != null) {
total++;
}
totalSamples = total;
samples.reset();
}
@Override
public DataSet next(int num) {
if (cursor >= totalExamples()) {
throw new NoSuchElementException();
}
INDArray features = Nd4j.create(num, vectorSize, windowSize);
INDArray featuresMask = Nd4j.zeros(num, windowSize);
INDArray labels = Nd4j.create(num, 3, windowSize);
INDArray labelsMask = Nd4j.zeros(num, windowSize);
// iterate stream and copy to arrays
for (int i = 0; i < num; i++) {
DataSet sample;
try {
sample = samples.read();
} catch (IOException e) {
throw new RuntimeException(e);
}
if (sample != null) {
INDArray feature = sample.getFeatureMatrix();
features.put(new INDArrayIndex[] { NDArrayIndex.point(i) }, feature.get(NDArrayIndex.point(0)));
feature.get(new INDArrayIndex[] { NDArrayIndex.point(0), NDArrayIndex.all(), NDArrayIndex.point(0) });
for (int j = 0; j < windowSize; j++) {
featuresMask.putScalar(new int[] { i, j }, 1.0);
}
INDArray label = sample.getLabels();
labels.put(new INDArrayIndex[] { NDArrayIndex.point(i) }, label.get(NDArrayIndex.point(0)));
labelsMask.putScalar(new int[] { i, windowSize - 1 }, 1.0);
}
cursor++;
}
return new DataSet(features, labels, featuresMask, labelsMask);
}
@Override
public int totalExamples() {
return totalSamples;
}
@Override
public int inputColumns() {
return vectorSize;
}
@Override
public int totalOutcomes() {
return getLabels().size();
}
@Override
public boolean resetSupported() {
return true;
}
@Override
public boolean asyncSupported() {
return false;
}
@Override
public void reset() {
cursor = 0;
try {
samples.reset();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int batch() {
return batchSize;
}
@Override
public int cursor() {
return cursor;
}
@Override
public int numExamples() {
return totalExamples();
}
@Override
public void setPreProcessor(DataSetPreProcessor dataSetPreProcessor) {
throw new UnsupportedOperationException();
}
@Override
public DataSetPreProcessor getPreProcessor() {
throw new UnsupportedOperationException();
}
@Override
public List<String> getLabels() {
return Arrays.asList("start", "cont", "other");
}
@Override
public boolean hasNext() {
return cursor < numExamples();
}
@Override
public DataSet next() {
return next(batchSize);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-deeplearning/1.1.0/ai/idylnlp/nlp/recognizer/deep/NameSampleToDataSetStream.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer.deep;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor;
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.DataSet;
import opennlp.tools.namefind.NameSample;
import opennlp.tools.util.FilterObjectStream;
import opennlp.tools.util.ObjectStream;
public class NameSampleToDataSetStream extends FilterObjectStream<NameSample, DataSet> {
private final WordVectors wordVectors;
private final String[] labels;
private int windowSize;
private int vectorSize;
private Iterator<DataSet> dataSets = Collections.emptyListIterator();
public NameSampleToDataSetStream(ObjectStream<NameSample> samples, WordVectors wordVectors, int windowSize, int vectorSize, String[] labels) {
super(samples);
this.wordVectors = wordVectors;
this.windowSize = windowSize;
this.vectorSize = vectorSize;
this.labels = labels;
}
@Override
public final DataSet read() throws IOException {
if(dataSets.hasNext()) {
return dataSets.next();
} else {
NameSample sample;
while (!dataSets.hasNext() && (sample = samples.read()) != null) {
dataSets = createDataSets(sample);
}
if(dataSets.hasNext()) {
return read();
}
}
return null;
}
private Iterator<DataSet> createDataSets(NameSample sample) {
TokenizerFactory tokenizerFactory = new DefaultTokenizerFactory();
tokenizerFactory.setTokenPreProcessor(new CommonPreprocessor());
String s = String.join(" ", sample.getSentence());
List<String> tokens = tokenizerFactory.create(s).getTokens();
String[] t = tokens.toArray(new String[tokens.size()]);
// sample and t are different tokens at this point due to removing punctuation
/*System.out.println("t = " + t.length);
System.out.println(String.join(" ", t));
System.out.println("sample = " + sample.getSentence().length);
System.out.println(String.join(" ", sample.getSentence()));
System.out.println("--------");*/
List<INDArray> features = DeepLearningUtils.mapToFeatureMatrices(wordVectors, t, windowSize);
List<INDArray> labels = DeepLearningUtils.mapToLabelVectors(sample, windowSize, this.labels);
List<DataSet> dataSetList = new ArrayList<>();
for (int i = 0; i < features.size(); i++) {
dataSetList.add(new DataSet(features.get(i), labels.get(i)));
}
return dataSetList.iterator();
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-dictionary/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-dictionary/1.1.0/ai/idylnlp/nlp/recognizer/DictionaryEntityRecognizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.Set;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.exceptions.EntityFinderException;
import ai.idylnlp.model.nlp.ner.EntityExtractionRequest;
import ai.idylnlp.model.nlp.ner.EntityExtractionResponse;
import ai.idylnlp.model.nlp.ner.EntityRecognizer;
import ai.idylnlp.nlp.utils.ngrams.NgramUtils;
import com.google.common.hash.BloomFilter;
import com.google.common.hash.Funnels;
import com.neovisionaries.i18n.LanguageCode;
/**
* Implementation of {@link EntityRecognizer} that uses a dictionary.
*
* @author Mountain Fog, Inc.
*
*/
public class DictionaryEntityRecognizer implements EntityRecognizer {
private static final Logger LOGGER = LogManager.getLogger(DictionaryEntityRecognizer.class);
private LanguageCode languageCode;
private Set<String> dictionary;
private String type;
private double fpp = 0.1;
private boolean caseSensitive;
/**
* Creates a new dictionary entity recognizer.
* @param dictionary A list of strings, one per line, for the dictionary.
* @param type The type of entity being extracted. There is a one-to-one relationship
* between dictionary and entity type.
* @param fpp The desired false positive probability. Use this to tune the performance.
*/
public DictionaryEntityRecognizer(LanguageCode languageCode, Set<String> dictionary,
String type, double fpp, boolean caseSensitive) {
this.languageCode = languageCode;
this.dictionary = dictionary;
this.type = type;
this.fpp = fpp;
this.caseSensitive = caseSensitive;
}
/**
* Creates a new dictionary entity recognizer.
* @param dictionaryFile The {@link File file} defining the dictionary.
* @param type The type of entity being extracted. There is a one-to-one relationship
* between dictionary and entity type.
* @param fpp The desired false positive probability. Use this to tune the performance.
* @throws IOException Thrown if the dictionary file cannot be accessed.
*/
public DictionaryEntityRecognizer(LanguageCode languageCode, File dictionaryFile, String type,
double fpp, boolean caseSensitive) throws IOException {
this.languageCode = languageCode;
this.type = type;
this.fpp = fpp;
this.caseSensitive = caseSensitive;
try(BufferedReader br = Files.newBufferedReader(dictionaryFile.toPath(), StandardCharsets.UTF_8)) {
for(String line = null; (line = br.readLine()) != null;) {
if(!line.startsWith("#")) {
if(!caseSensitive) {
dictionary.add(line.toLowerCase());
} else {
dictionary.add(line);
}
}
}
}
}
@Override
public EntityExtractionResponse extractEntities(EntityExtractionRequest request) throws EntityFinderException {
final Set<Entity> entities = new LinkedHashSet<Entity>();
long startTime = System.currentTimeMillis();
final String[] tokens = request.getText();
try {
final BloomFilter<String> filter = BloomFilter.create(
Funnels.stringFunnel(Charset.defaultCharset()), dictionary.size(), fpp);
for(String entry : dictionary) {
if(!caseSensitive) {
filter.put(entry.toLowerCase());
} else {
filter.put(entry);
}
}
// Break the tokens into n-grams because some dictionary entries
// may be more than one token.
final String[] ngrams = NgramUtils.getNgrams(tokens);
for(String ngram : ngrams) {
boolean mightContain;
if(!caseSensitive) {
mightContain = filter.mightContain(ngram.toLowerCase());
} else {
mightContain = filter.mightContain(ngram);
}
if(mightContain) {
// Make sure it does exist in the dictionary.
boolean contains;
if(!caseSensitive) {
contains = dictionary.contains(ngram.toLowerCase());
} else {
contains = dictionary.contains(ngram);
}
if(contains) {
// Find the span for this entity.
String[] d = ngram.split(" ");
int start = Collections.indexOfSubList(Arrays.asList(ngrams), Arrays.asList(d));
// Create a new entity object.
final Entity entity = new Entity(ngram, 100.0, type, languageCode.getAlpha3().toString());
entity.setSpan(new ai.idylnlp.model.entity.Span(start, start + d.length - 1));
entity.setContext(request.getContext());
entity.setExtractionDate(System.currentTimeMillis());
LOGGER.debug("Found entity with text: {}", ngram);
entities.add(entity);
}
}
}
final long extractionTime = (System.currentTimeMillis() - startTime);
return new EntityExtractionResponse(entities, extractionTime, true);
} catch (Exception ex) {
LOGGER.error("Unable to find entities with the DictionaryEntityRecognizer.", ex);
throw new EntityFinderException("Unable to find entities with the DictionaryEntityRecognizer.", ex);
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-model/1.1.0/ai/idylnlp/model
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-model/1.1.0/ai/idylnlp/model/nlp/AbstractEntityRecognizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.nlp;
import java.math.BigDecimal;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.math3.util.Precision;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.exceptions.EntityFinderException;
import ai.idylnlp.model.manifest.ModelManifest;
import ai.idylnlp.model.nlp.configuration.AbstractEntityRecognizerConfiguration;
import ai.idylnlp.model.nlp.ner.EntityExtractionRequest;
import ai.idylnlp.model.nlp.ner.EntityRecognizer;
import ai.idylnlp.model.nlp.sentiment.Sentiment;
import ai.idylnlp.model.nlp.sentiment.SentimentAnalysisException;
import ai.idylnlp.model.nlp.sentiment.SentimentAnalysisRequest;
import ai.idylnlp.model.nlp.sentiment.SentimentAnalyzer;
import opennlp.tools.namefind.NameFinderME;
import opennlp.tools.namefind.TokenNameFinder;
/**
* Base class for entity recognizers.
*
* @author Mountain Fog, Inc.
*
*/
public abstract class AbstractEntityRecognizer<T extends AbstractEntityRecognizerConfiguration> implements EntityRecognizer {
private static final Logger LOGGER = LogManager.getLogger(AbstractEntityRecognizer.class);
/**
* The key for the model filename in the entity metadata.
*/
public static final String METADATA_MODEL_FILENAME_KEY = "x-model-filename";
public T configuration;
// A map that contains the name finders to prevent reinstantiating them for every request.
protected Map<ModelManifest, TokenNameFinder> nameFinders;
public AbstractEntityRecognizer(T configuration) {
this.configuration = configuration;
this.nameFinders = new HashMap<ModelManifest, TokenNameFinder>();
}
/**
* Finds entities.
* @param nameFinder The {@link TokenNameFinder} for entity extraction.
* @param tokenizer The {@link Tokenizer} to tokenize the text.
* @param sentenceDetector The {@link SentenceDetector} to detect sentences.
* @param entityExtractionRequest The entity extraction {@link EntityExtractionRequest}.
* @param modelManifest The entity model's {@link ModelManifest manifest}.
* @param sentenceSanitizer The {@link SentenceSanitizer}.
* @return A collection of {@link Entity entities}.
* @throws EntityFinderException Thrown if the entity extraction encounters an error.
*/
protected Collection<Entity> findEntities(TokenNameFinder nameFinder, EntityExtractionRequest entityExtractionRequest,
ModelManifest modelManifest, SentenceSanitizer sentenceSanitizer) throws EntityFinderException {
LOGGER.trace("Identifying entities of type {} with confidence limit {}.", modelManifest.getType(),
entityExtractionRequest.getConfidenceThreshold());
Collection<Entity> entities = new LinkedList<Entity>();
final String tokens[] = entityExtractionRequest.getText();
try {
// find the location names in the tokenized text
// the values used in these Spans are NOT string character offsets, they are indices into the 'tokens' array
opennlp.tools.util.Span names[] = nameFinder.find(tokens);
// Simple way to drop intersecting spans, otherwise the NameSample is invalid
opennlp.tools.util.Span reducedNames[] = NameFinderME.dropOverlappingSpans(names);
// Get the text of the entities.
String[] extractedEntities = opennlp.tools.util.Span.spansToStrings(reducedNames, tokens);
double[] probabilities;
if(nameFinder instanceof NameFinderME) {
probabilities = ((NameFinderME) nameFinder).probs(reducedNames);
} else {
// All probabilities are 100 since a dictionary or regex name finder was used.
probabilities = new double[reducedNames.length];
Arrays.fill(probabilities, 1.0);
}
double normalizedConfidenceThreshold = ConfidenceNormalization.normalizeConfidence(entityExtractionRequest.getConfidenceThreshold());
// Index for looping over the spans returned by OpenNLP.
int x = 0;
//for each name that got found, create our corresponding occurrence
for (opennlp.tools.util.Span name : reducedNames) {
// Check the confidence threshold for extraction.
if(configuration.getConfidenceFilter().test(
modelManifest.getModelId(), probabilities[x], normalizedConfidenceThreshold)) {
String entityText = extractedEntities[x];
// Sanitize the entity.
entityText = sentenceSanitizer.sanitize(entityText);
// Round the confidence value.
double roundedConfidence = Precision.round(probabilities[x], 2, BigDecimal.ROUND_HALF_DOWN);
// Create a new entity object.
Entity entity = new Entity(entityText, roundedConfidence, modelManifest.getType(), modelManifest.getLanguageCode().getAlpha3().toString());
// TODO: Remove last two arguments.
entity.setSpan(new ai.idylnlp.model.entity.Span(name.getStart(), name.getEnd()));
entity.setContext(entityExtractionRequest.getContext());
entity.setExtractionDate(System.currentTimeMillis());
if(entityExtractionRequest.isIncludeModelFileNameInMetadata()) {
// TODO: Put the model filename in the entity metadata.
entity.getMetadata().put(METADATA_MODEL_FILENAME_KEY, modelManifest.getModelFileName());
}
LOGGER.debug("Found entity with text: " + entityText + "; confidence: " + probabilities[x] + "; language: " + modelManifest.getLanguageCode());
// Process the statistics for the entity.
if(configuration.getStatsReporter() != null) {
configuration.getStatsReporter().recordEntityExtraction(entity, modelManifest);
}
entities.add(entity);
}
}
} catch (Exception ex) {
LOGGER.error("Unable to find entities.", ex);
throw new EntityFinderException("Unable to find entities.", ex);
}
LOGGER.trace("Returning {} entities.", entities.size());
return entities;
}
protected Map<String, String> getSentiments(String text, List<SentimentAnalyzer> sentimentAnalyzers) {
Map<String, String> sentiments = new LinkedHashMap<String, String>();
// Run sentiment analysis on the sentence.
for(SentimentAnalyzer sentimentAnalyzer : sentimentAnalyzers) {
try {
Sentiment sentiment = sentimentAnalyzer.analyze(new SentimentAnalysisRequest(text));
sentiments.put("Sentiment", String.valueOf(sentiment.getSentimentValue()));
} catch (SentimentAnalysisException ex) {
LOGGER.error("Unable to run sentiment analysis using analyzer: " + sentimentAnalyzer.getName(), ex);
}
}
return sentiments;
}
/**
* Gets the configuration.
* @return The configuration.
*/
protected T getConfiguration() {
return configuration;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-model/1.1.0/ai/idylnlp/model
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-model/1.1.0/ai/idylnlp/model/nlp/ConfidenceNormalization.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.nlp;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class ConfidenceNormalization {
private static final Logger LOGGER = LogManager.getLogger(ConfidenceNormalization.class);
/**
* Normalizes the confidence from an integer between 0 and 100
* to a decimal value between 0 and 1.
* @param confidence The confidence to normalize.
* @return The normalized confidence.
*/
public static double normalizeConfidence(int confidence) {
// The confidence threshold comes in as an integer between 0 and 100. We need to divide it by 100 to make it between 0 and 1.
double normalizedConfidenceThreshold = 0;
if(confidence > 0) {
normalizedConfidenceThreshold = ((double) confidence / 100);
}
LOGGER.debug("Normalized confidence from {} to {}", confidence, normalizedConfidenceThreshold);
return normalizedConfidenceThreshold;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-model/1.1.0/ai/idylnlp/model/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-model/1.1.0/ai/idylnlp/model/nlp/configuration/AbstractEntityRecognizerConfiguration.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.model.nlp.configuration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.manifest.ModelManifest;
import ai.idylnlp.model.manifest.StandardModelManifest;
import ai.idylnlp.model.stats.StatsReporter;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.nlp.ConfidenceFilter;
public abstract class AbstractEntityRecognizerConfiguration<T extends ModelManifest> {
private static final Logger LOGGER = LogManager.getLogger(AbstractEntityRecognizerConfiguration.class);
protected boolean preloadModels = true;
protected Set<String> blacklistedModelIDs;
protected ConfidenceFilter confidenceFilter;
protected StatsReporter statsReporter;
protected Map<String, Map<LanguageCode, Set<T>>> entityModels = new HashMap<>();
public AbstractEntityRecognizerConfiguration(Set<String> blacklistedModelIDs) {
this.blacklistedModelIDs = blacklistedModelIDs;
}
/**
* Adds a model to the list of entityModels to use during the extraction.
* @param entityType The {@link String class} of the model's type.
* @param language The {@link Language language} supported by the model.
* @param modelManifest The {@link StandardModelManifest manifest} of the model.
*/
public void addEntityModel(String entityType, LanguageCode language, T modelManifest) {
if(entityModels.containsKey(entityType)) {
// There already exists an entity class so add this model to it.
Map<LanguageCode, Set<T>> m = entityModels.get(entityType);
if(m.containsKey(language)) {
LOGGER.trace("Adding manifest for model {}.", modelManifest.getModelId());
entityModels.get(entityType).get(language).add(modelManifest);
} else {
Set<T> manifests = new HashSet<T>();
manifests.add(modelManifest);
m.put(language, manifests);
entityModels.put(entityType, m);
}
} else {
// This entity class does not exist so just add it.
Map<LanguageCode, Set<T>> m = new HashMap<>();
Set<T> modelManifests = new HashSet<T>();
modelManifests.add(modelManifest);
m.put(language, modelManifests);
LOGGER.trace("Adding manifest for model {}.", modelManifest.getModelId());
entityModels.put(entityType, m);
}
}
/**
* Gets the {@link ConfidenceFilter}.
* @return The {@link ConfidenceFilter}.
*/
public ConfidenceFilter getConfidenceFilter() {
return confidenceFilter;
}
/**
* Sets the {@link ConfidenceFilter}.
* @param confidenceFilter The {@link ConfidenceFilter}.
*/
public void setConfidenceFilter(ConfidenceFilter confidenceFilter) {
this.confidenceFilter = confidenceFilter;
}
/**
* Gets the {@link StatsReporter}.
* @return The {@link StatsReporter}.
*/
public StatsReporter getStatsReporter() {
return statsReporter;
}
/**
* Sets the {@link StatsReporter}.
* @param confidenceFilter The {@link StatsReporter}.
*/
public void setStatsReporter(StatsReporter statsReporter) {
this.statsReporter = statsReporter;
}
/**
* Gets a boolean indicating if entityModels will be preloaded.
* @return A boolean indicating if entityModels will be preloaded.
*/
public boolean isPreloadModels() {
return preloadModels;
}
/**
* Sets if entityModels will be preloaded.
* @param preloadModels Set to true to enable model preloading.
*/
public void setPreloadModels(boolean preloadModels) {
this.preloadModels = preloadModels;
}
/**
* Gets the entity models used during the entity extraction.
* @return A map of entity models to their corresponding language and file names.
*/
public Map<String, Map<LanguageCode, Set<T>>> getEntityModels() {
return entityModels;
}
/**
* Sets the entityModels used during the entity extraction.
* @param entityModels A map of entityModels to their corresponding language and file names.
*/
public void setEntityModels(Map<String, Map<LanguageCode, Set<T>>> entityModels) {
this.entityModels = entityModels;
}
/**
* Gets the set of blacklisted model IDs.
* @return The set of blacklisted model IDs.
*/
public Set<String> getBlacklistedModelIDs() {
return blacklistedModelIDs;
}
/**
* Sets the set of blacklisted model IDs.
* @param blacklistedModelIDs The set of blacklisted model IDs.
*/
public void setBlacklistedModelIDs(Set<String> blacklistedModelIDs) {
this.blacklistedModelIDs = blacklistedModelIDs;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-opennlp/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-opennlp/1.1.0/ai/idylnlp/nlp/recognizer/OpenNLPEntityRecognizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.entity.Entity;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.exceptions.EntityFinderException;
import ai.idylnlp.model.exceptions.ModelLoaderException;
import ai.idylnlp.model.manifest.StandardModelManifest;
import ai.idylnlp.model.nlp.AbstractEntityRecognizer;
import ai.idylnlp.model.nlp.SentenceSanitizer;
import ai.idylnlp.model.nlp.Tokenizer;
import ai.idylnlp.model.nlp.ner.EntityExtractionRequest;
import ai.idylnlp.model.nlp.ner.EntityExtractionResponse;
import ai.idylnlp.model.nlp.ner.EntityRecognizer;
import ai.idylnlp.nlp.recognizer.configuration.OpenNLPEntityRecognizerConfiguration;
import ai.idylnlp.nlp.sentence.sanitizers.DefaultSentenceSanitizer;
import opennlp.tools.namefind.NameFinderME;
import opennlp.tools.namefind.TokenNameFinder;
import opennlp.tools.namefind.TokenNameFinderModel;
/**
* Implementation of {@link EntityRecognizer} for performing
* named-entity recognition for natural language text using OpenNLP.
*
* Some code to get the character-based indexes for the spans
* was adapted from CLAVIN (https://github.com/Berico-Technologies/CLAVIN)
* and used under the Apache Software License, version 2.0.
*
* @author Mountain Fog, Inc.
*
*/
public class OpenNLPEntityRecognizer extends AbstractEntityRecognizer<OpenNLPEntityRecognizerConfiguration> implements EntityRecognizer {
private static final Logger LOGGER = LogManager.getLogger(OpenNLPEntityRecognizer.class);
/**
* Create a new ModelEntityRecognizer class that is configured
* per the {@link OpenNLPEntityRecognizerConfiguration}.
* @param configuration The {@link OpenNLPEntityRecognizerConfiguration configuration}.
* @param tokenizer The {@link Tokenizer tokenizer} for the text.
*/
public OpenNLPEntityRecognizer(OpenNLPEntityRecognizerConfiguration configuration) {
super(configuration);
if(configuration.isPreloadModels() && configuration.getEntityModels().size() > 0) {
// Load the models into memory.
LOGGER.debug("Preloading the models.");
// Preload the entity models by looping over all of the entity model types.
for(String type : configuration.getEntityModels().keySet()) {
LOGGER.debug("Preloading models for entity type {}.", type);
// Loop over all languages for this entity type.
for(LanguageCode language : configuration.getEntityModels().get(type).keySet()) {
// Get the model file name for this entity type for this language.
Set<StandardModelManifest> modelManifests = configuration.getEntityModels().get(type).get(language);
LOGGER.debug("There are {} model manifests to preload for entity type {}.", modelManifests.size(), type);
for(StandardModelManifest modelManifest : modelManifests) {
if(!configuration.getBlacklistedModelIDs().contains(modelManifest.getModelId())) {
LOGGER.debug("Preloading model file {}.", modelManifest.getModelFileName());
try {
configuration.getEntityModelLoader().getModel(modelManifest, TokenNameFinderModel.class);
} catch (ModelLoaderException ex) {
LOGGER.error("Unable to load model: " + modelManifest.getModelFileName(), ex);
LOGGER.warn("Model {} is blacklisted. Loading will not be attempted until restart.", modelManifest.getModelFileName());
// Automatically blacklist this model.
configuration.getBlacklistedModelIDs().add(modelManifest.getModelId());
}
}
}
}
}
} else {
if(configuration.getEntityModels().size() > 0) {
LOGGER.info("Model preloading is disabled.");
} else {
// Model preloading was enabled but no models were specified.
LOGGER.warn("Model preloading was enabled but no entity models were specified.");
}
}
}
/**
* {@inheritDoc}
*/
@Override
public EntityExtractionResponse extractEntities(EntityExtractionRequest request) throws EntityFinderException, ModelLoaderException {
if(request.getText().length == 0) {
throw new IllegalArgumentException("Input text cannot be empty.");
}
if(request.getConfidenceThreshold() < 0 || request.getConfidenceThreshold() > 100) {
throw new IllegalArgumentException("Confidence threshold must be an integer between 0 and 100.");
}
// The sanitizer without any properties set does not do anything.
SentenceSanitizer sentenceSanitizer = new DefaultSentenceSanitizer.Builder().build();
// All of the extracted entities.
Set<Entity> entities = new LinkedHashSet<Entity>();
// Keep track of the extraction time.
long startTime = System.currentTimeMillis();
String types[] = {};
if(!StringUtils.isEmpty(request.getType())) {
types = request.getType().split(",");
}
for(String type : getConfiguration().getEntityModels().keySet()) {
if(types.length == 0 || ArrayUtils.contains(types, type)) {
LOGGER.trace("Processing entity class {}.", type);
LanguageCode language = request.getLanguage();
// The manifests of the models that will be used for this extraction.
Set<StandardModelManifest> modelManifests = new HashSet<StandardModelManifest>();
if(request.getLanguage() == null) {
// TODO: Run all languages to support multilingual documents.
Set<LanguageCode> languages = getConfiguration().getEntityModels().get(type).keySet();
for(LanguageCode l : languages) {
modelManifests.addAll(getConfiguration().getEntityModels().get(type).get(l));
}
} else {
// We are doing a single language.
Map<LanguageCode, Set<StandardModelManifest>> models = getConfiguration().getEntityModels().get(type);
// If there are not any models for this entity type <code>models</code> will be null.
if(models != null) {
Set<StandardModelManifest> manifests = models.get(language);
// If <code>manifests</code> is not null add those manifests to the set.
if(manifests != null) {
modelManifests.addAll(manifests);
}
}
}
if(CollectionUtils.isNotEmpty(modelManifests)) {
for(StandardModelManifest modelManifest : modelManifests) {
LOGGER.trace("{} has {} entity models.", type, modelManifests.size());
if(!configuration.getBlacklistedModelIDs().contains(modelManifest.getModelId())) {
// Create the token name finder model.
final TokenNameFinderModel tokenNameFinderModel = getConfiguration().getEntityModelLoader().getModel(modelManifest, TokenNameFinderModel.class);
// The tokenNameFinderModel can be null in cases in which model validation failed.
if(tokenNameFinderModel != null) {
// Get the nameFinder for this model if it exists.
TokenNameFinder nameFinderMe = nameFinders.get(modelManifest);
if(nameFinderMe == null) {
// Create a new namefinder and put it in the map of model manifests to name finders.
nameFinderMe = new NameFinderME(tokenNameFinderModel);
nameFinders.put(modelManifest, nameFinderMe);
}
// Extract the entities.
final Collection<Entity> extractedEntities = findEntities(nameFinderMe, request, modelManifest, sentenceSanitizer);
// TODO: Clear the adaptive data after each entity extraction.
// This really has no effect because the NameFinderME is reinstantiated for every entity extraction request.
// Having a single NameFinderME would run into the threadsafe issue.
nameFinderMe.clearAdaptiveData();
// Want to return all entities.
entities.addAll(extractedEntities);
}
} else {
LOGGER.warn("Entity model {} is blacklisted. Reload will not be attempted until restart.", modelManifest.getModelFileName());
}
}
} else {
LOGGER.warn("No entity models available for language {}.", language.getAlpha3().toString());
}
}
}
long extractionTime = (System.currentTimeMillis() - startTime);
// Create the response with the extracted entities and the time it took to extract them.
EntityExtractionResponse response = new EntityExtractionResponse(entities, extractionTime, true);
return response;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-opennlp/1.1.0/ai/idylnlp/nlp/recognizer
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-opennlp/1.1.0/ai/idylnlp/nlp/recognizer/configuration/OpenNLPEntityRecognizerConfiguration.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer.configuration;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import ai.idylnlp.opennlp.custom.modelloader.ModelLoader;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.manifest.StandardModelManifest;
import ai.idylnlp.model.nlp.ConfidenceFilter;
import ai.idylnlp.model.nlp.configuration.AbstractEntityRecognizerConfiguration;
import ai.idylnlp.nlp.filters.confidence.SimpleConfidenceFilter;
import ai.idylnlp.nlp.recognizer.OpenNLPEntityRecognizer;
import opennlp.tools.namefind.TokenNameFinderModel;
/**
* Configuration sets the required parameters in order to
* initialize the {@link OpenNLPEntityRecognizer}.
*
* @author Mountain Fog, Inc.
*
*/
public class OpenNLPEntityRecognizerConfiguration extends AbstractEntityRecognizerConfiguration<StandardModelManifest> {
private ModelLoader<TokenNameFinderModel> entityModelLoader;
private OpenNLPEntityRecognizerConfiguration(
ModelLoader<TokenNameFinderModel> entityModelLoader,
ConfidenceFilter confidenceFilter,
Map<String, Map<LanguageCode, Set<StandardModelManifest>>> entityModels,
Set<String> blacklistedModelIDs) {
super(blacklistedModelIDs);
this.entityModelLoader = entityModelLoader;
this.confidenceFilter = confidenceFilter;
this.entityModels = entityModels;
}
public static class Builder {
private ModelLoader<TokenNameFinderModel> entityModelLoader;
private ConfidenceFilter confidenceFilter;
private Map<String, Map<LanguageCode, Set<StandardModelManifest>>> entityModels;
private Set<String> blacklistedModelIDs;
public Builder withEntityModels(Map<String, Map<LanguageCode, Set<StandardModelManifest>>> entityModels) {
this.entityModels = entityModels;
return this;
}
public Builder withEntityModelLoader(ModelLoader<TokenNameFinderModel> entityModelLoader) {
this.entityModelLoader = entityModelLoader;
return this;
}
public Builder withConfidenceFilter(ConfidenceFilter confidenceFilter) {
this.confidenceFilter = confidenceFilter;
return this;
}
public Builder withBlacklistedModelIDs(Set<String> blacklistedModelIDs) {
this.blacklistedModelIDs = blacklistedModelIDs;
return this;
}
public OpenNLPEntityRecognizerConfiguration build() {
if(confidenceFilter == null) {
confidenceFilter = new SimpleConfidenceFilter();
}
if(entityModels == null) {
entityModels = new HashMap<String, Map<LanguageCode, Set<StandardModelManifest>>>();
}
if(blacklistedModelIDs == null) {
blacklistedModelIDs = new LinkedHashSet<String>();
}
return new OpenNLPEntityRecognizerConfiguration(
entityModelLoader, confidenceFilter, entityModels, blacklistedModelIDs
);
}
}
/**
* Gets the entity model loader.
* @return A {@link ModelLoader} for a {@link TokenNameFinderModel}.
*/
public ModelLoader<TokenNameFinderModel> getEntityModelLoader() {
return entityModelLoader;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-standard/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-standard/1.1.0/ai/idylnlp/nlp/recognizer/DateEntityRecognizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer;
import java.util.Date;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.joestelmach.natty.DateGroup;
import com.joestelmach.natty.Parser;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.exceptions.EntityFinderException;
import ai.idylnlp.model.nlp.ner.EntityExtractionRequest;
import ai.idylnlp.model.nlp.ner.EntityExtractionResponse;
import ai.idylnlp.model.nlp.ner.EntityRecognizer;
import ai.idylnlp.nlp.utils.SpanUtils;
import com.neovisionaries.i18n.LanguageCode;
import opennlp.tools.tokenize.SimpleTokenizer;
import opennlp.tools.util.Span;
/**
* An {@link EntityRecognizer} that identifies dates.
* This only works for English text.
*
* @author Mountain Fog, Inc.
*
*/
public class DateEntityRecognizer implements EntityRecognizer {
private static final Logger LOGGER = LogManager.getLogger(DateEntityRecognizer.class);
private static final String ENTITY_TYPE = "date";
/**
* {@inheritDoc}
*/
@Override
public EntityExtractionResponse extractEntities(EntityExtractionRequest entityExtractionRequest) throws EntityFinderException {
Set<Entity> entities = new LinkedHashSet<>();
long startTime = System.currentTimeMillis();
Parser parser = new Parser();
final String text = StringUtils.join(entityExtractionRequest.getText(), " ");
List<DateGroup> groups = parser.parse(text);
for (DateGroup group : groups) {
List<Date> dates = group.getDates();
for(Date date : dates) {
final String entityText = date.toString() + " (" + group.getText() + ")";
Entity entity = new Entity(entityText);
entity.setConfidence(100);
entity.setType(ENTITY_TYPE);
entity.getMetadata().put("time", String.valueOf(date.getTime()));
entity.setContext(entityExtractionRequest.getContext());
entity.setExtractionDate(System.currentTimeMillis());
entity.setLanguageCode(LanguageCode.en.getAlpha3().toString());
// TODO: Set the token-based span correctly.
Span span = SpanUtils.getSpan(SimpleTokenizer.INSTANCE, group.getText(), text);
entity.setSpan(new ai.idylnlp.model.entity.Span(span.getStart(), span.getEnd()));
entities.add(entity);
}
}
long elapsedTime = System.currentTimeMillis() - startTime;
return new EntityExtractionResponse(entities, elapsedTime, true);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-standard/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-standard/1.1.0/ai/idylnlp/nlp/recognizer/PhoneNumberEntityRecognizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.collections4.IteratorUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.google.i18n.phonenumbers.PhoneNumberMatch;
import com.google.i18n.phonenumbers.PhoneNumberUtil;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.nlp.ner.EntityExtractionRequest;
import ai.idylnlp.model.nlp.ner.EntityExtractionResponse;
import ai.idylnlp.model.nlp.ner.EntityRecognizer;
public class PhoneNumberEntityRecognizer implements EntityRecognizer {
private static final Logger LOGGER = LogManager.getLogger(PhoneNumberEntityRecognizer.class);
private static final String ENTITY_TYPE = "phone";
@Override
public EntityExtractionResponse extractEntities(EntityExtractionRequest request) {
LOGGER.trace("Finding entities with the phone number entity recognizer.");
Set<Entity> entities = new LinkedHashSet<>();
Set<String> regions = PhoneNumberUtil.getInstance().getSupportedRegions();
long startTime = System.currentTimeMillis();
final String text = StringUtils.join(request.getText(), " ");
for(String region : regions) {
Iterable<PhoneNumberMatch> iterable = PhoneNumberUtil.getInstance().findNumbers(text, region);
List<PhoneNumberMatch> numbers = IteratorUtils.toList(iterable.iterator());
for(PhoneNumberMatch phoneNumberMatch : numbers) {
String phoneNumber = String.valueOf(phoneNumberMatch.number().getNationalNumber());
Entity entity = new Entity();
entity.setText(phoneNumber);
entity.setType(ENTITY_TYPE);
entity.setConfidence(100.0);
entity.setContext(request.getContext());
entity.setExtractionDate(System.currentTimeMillis());
// TODO: Set the token-based span correctly.
entity.setSpan(new ai.idylnlp.model.entity.Span(0, 0));
entities.add(entity);
}
}
long extractionTime = (System.currentTimeMillis() - startTime);
EntityExtractionResponse response = new EntityExtractionResponse(entities, extractionTime, true);
return response;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-standard/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-entity-recognizers-standard/1.1.0/ai/idylnlp/nlp/recognizer/RegularExpressionEntityRecognizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.recognizer;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.exceptions.EntityFinderException;
import ai.idylnlp.model.nlp.ner.EntityExtractionRequest;
import ai.idylnlp.model.nlp.ner.EntityExtractionResponse;
import ai.idylnlp.model.nlp.ner.EntityRecognizer;
import com.neovisionaries.i18n.LanguageCode;
import opennlp.tools.namefind.RegexNameFinder;
import opennlp.tools.tokenize.Tokenizer;
import opennlp.tools.tokenize.WhitespaceTokenizer;
import opennlp.tools.util.Span;
/**
* An {@link EntityRecognizer} that identifies entities based
* on regular expressions.
*
* @author Mountain Fog, Inc.
*
*/
public class RegularExpressionEntityRecognizer implements EntityRecognizer {
private static final Logger LOGGER = LogManager.getLogger(RegularExpressionEntityRecognizer.class);
private Pattern pattern;
private String type;
/**
* Creates a regular expression entity recognizer.
* @param pattern The regular expression {@link Pattern pattern}.
* @param type The {@link String class} of the entities to identify.
*/
public RegularExpressionEntityRecognizer(Pattern pattern, String type) {
this.pattern = pattern;
this.type = type;
}
/**
* {@inheritDoc}
*/
@Override
public EntityExtractionResponse extractEntities(EntityExtractionRequest request) throws EntityFinderException {
long startTime = System.currentTimeMillis();
Set<Entity> entities = new LinkedHashSet<>();
try {
// TODO: Surround all patterns with spaces.
final String text = StringUtils.join(request.getText(), " ").replaceAll(pattern.pattern(), " $1 ");
Pattern[] patterns = {pattern};
// TODO: This recognizer must use the WhitespaceTokenizer.
Tokenizer tokenizer = WhitespaceTokenizer.INSTANCE;
// tokenize the text into the required OpenNLP format
String[] tokens = tokenizer.tokenize(text);
//the values used in these Spans are string character offsets of each token from the sentence beginning
Span[] tokenPositionsWithinSentence = tokenizer.tokenizePos(text);
// find the location names in the tokenized text
// the values used in these Spans are NOT string character offsets, they are indices into the 'tokens' array
RegexNameFinder regexNameFinder = new RegexNameFinder(patterns);
Span names[] = regexNameFinder.find(tokens);
//for each name that got found, create our corresponding occurrence
for (Span name : names) {
//find offsets relative to the start of the sentence
int beginningOfFirstWord = tokenPositionsWithinSentence[name.getStart()].getStart();
// -1 because the high end of a Span is noninclusive
int endOfLastWord = tokenPositionsWithinSentence[name.getEnd() - 1].getEnd();
//to get offsets relative to the document as a whole, just add the offset for the sentence itself
//int startOffsetInDoc = sentenceSpan.getStart() + beginningOfFirstWord;
//int endOffsetInDoc = sentenceSpan.getStart() + endOfLastWord;
//look back into the original input string to figure out what the text is that I got a hit on
String nameInDocument = text.substring(beginningOfFirstWord, endOfLastWord);
// Create a new entity object.
Entity entity = new Entity(nameInDocument, 100.0, type, LanguageCode.undefined.getAlpha3().toString());
entity.setSpan(new ai.idylnlp.model.entity.Span(name.getStart(), name.getEnd()));
entity.setContext(request.getContext());
entity.setExtractionDate(System.currentTimeMillis());
LOGGER.debug("Found entity with text: {}", nameInDocument);
// Add the entity to the list.
entities.add(entity);
LOGGER.trace("Found entity [{}] as a {} with span {}.", nameInDocument, type, name.toString());
}
long extractionTime = (System.currentTimeMillis() - startTime);
EntityExtractionResponse response = new EntityExtractionResponse(entities, extractionTime, true);
return response;
} catch (Exception ex) {
LOGGER.error("Unable to find entities with the RegularExpressionEntityRecognizer.", ex);
throw new EntityFinderException("Unable to find entities with the RegularExpressionEntityRecognizer.", ex);
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-features/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-features/1.1.0/ai/idylnlp/nlp/features/BagOfWords.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.features;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.stream.IntStream;
import org.apache.commons.collections4.Bag;
import org.apache.commons.collections4.bag.HashBag;
import ai.idylnlp.nlp.utils.ngrams.NgramUtils;
/**
* A bag of words or a single document.
*
* @author Mountain Fog, Inc.
*
*/
public class BagOfWords {
private Bag<String> bag;
/**
* Creates a new bag of words from the given tokens. For
* best performance, the tokens should be pre-processed to
* be lowercase and free of stopwords.
* @param tokens The tokens.
*/
public BagOfWords(String[] tokens) {
bag = new HashBag<>();
for(String token : tokens) {
bag.add(token);
}
}
/**
* Creates a new bag of words from the given tokens. For
* best performance, the tokens should be pre-processed to
* be lowercase and free of stopwords.
* @param tokens The tokens.
* @param cutoff The minimum number of times a token
* must appear in order to be included in the bag.
*/
public BagOfWords(String[] tokens, int cutoff) {
bag = new HashBag<>();
for(String token : tokens) {
bag.add(token);
}
removeBelowMinimum(cutoff);
}
/**
* Creates a new bag of words from n-grams generated from
* the given tokens. For best performance, the tokens should
* be pre-processed to be lowercase and free of stopwords.
* @param tokens The tokens.
* @param cutoff The minimum number of times a token
* must appear in order to be included in the bag.
* @param ngramsLength The length of the n-grams. Must be
* greater than or equal to 2.
*/
public BagOfWords(String[] tokens, int cutoff, int ngramsLength) {
if(ngramsLength < 2) {
throw new IllegalArgumentException("Length of n-grams must be at least 2.");
}
bag = new HashBag<>();
final String[] ngrams = NgramUtils.getNgrams(tokens, ngramsLength);
for(String token : ngrams) {
bag.add(token);
}
removeBelowMinimum(cutoff);
}
/**
* Normalizes a set of bags. The normalization is done by
* getting all unique tokens across the bags. The counts are
* then normalized to values between 0 and 1 such that the
* counts sum to 1.
* @param bags A set of {@link BagOfWords bags}.
* @return A map of tokens to normalized values.
*/
public static Map<String, double[]> normalize(Set<BagOfWords> bags) {
Map<String, double[]> tokens = new HashMap<>();
Set<String> unique = new HashSet<>();
for(BagOfWords bag : bags) {
unique.addAll(bag.uniqueSet());
}
for(String token : unique) {
// Make an array the size of the number of bags.
int[] counts = new int[bags.size()];
int x = 0;
// Get the count of this token for each bag.
for(BagOfWords bag : bags) {
counts[x++] = bag.getCount(token);
}
int sum = IntStream.range(0, counts.length).map(i -> counts[i]).sum();
// Normalize the counts to sum to 1.
double[] normalized = new double[bags.size()];
IntStream.range(0, counts.length).forEach(i -> normalized[i] = counts[i] / (double) sum);
tokens.put(token, normalized);
}
return tokens;
}
public Set<String> uniqueSet() {
return bag.uniqueSet();
}
public int getCount(String token) {
return bag.getCount(token);
}
public int size() {
return bag.size();
}
public Iterator<String> iterator() {
return bag.iterator();
}
public boolean isEmpty() {
return bag.isEmpty();
}
public boolean contains(String token) {
return bag.contains(token);
}
public void clear() {
bag.clear();
}
private void removeBelowMinimum(int cutoff) {
bag.removeIf(item -> bag.getCount(item) < cutoff);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-features/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-features/1.1.0/ai/idylnlp/nlp/features/TFIDF.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.features;
import java.util.List;
/**
* Implementation of TF-IDF.
*
* Term Frequency - Inverse Document Frequency is a measure of how important a word is to a given
* document in relation to a collection of documents.
*
* Adapted from https://guendouz.wordpress.com/2015/02/17/implementation-of-tf-idf-in-java/.
*
* @author Mountain Fog, Inc.
*
*/
public class TFIDF {
/**
* Calculate the TF-IDF value for a given term.
*
* @param doc The document in question.
* @param docs A list of all documents.
* @param term The term.
* @return A value indicating the term's importance
* to the document.
*/
public double tfIdf(String[] doc, List<String[]> docs, String term) {
return tf(doc, term) * idf(docs, term);
}
private double tf(String[] doc, String term) {
double result = 0;
for (String word : doc) {
if (term.equalsIgnoreCase(word)) {
result++;
}
}
return result / doc.length;
}
private double idf(List<String[]> docs, String term) {
double n = 0;
for (String[] doc : docs) {
for (String word : doc) {
if (term.equalsIgnoreCase(word)) {
n++;
break;
}
}
}
return Math.log(docs.size() / n);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-filters/1.1.0/ai/idylnlp/nlp/filters
|
java-sources/ai/idylnlp/idylnlp-nlp-filters/1.1.0/ai/idylnlp/nlp/filters/confidence/HeuristicConfidenceFilter.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.filters.confidence;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.math3.stat.descriptive.SynchronizedSummaryStatistics;
import org.apache.commons.math3.stat.inference.TTest;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.nlp.ConfidenceFilter;
import ai.idylnlp.model.nlp.ConfidenceFilterSerializer;
/**
* Implementation of {@link ConfidenceFilter} that uses a T-test to
* determine if an entity should be filtered or not.
*
* @author Mountain Fog, Inc.
*
*/
public class HeuristicConfidenceFilter implements ConfidenceFilter {
private static final Logger LOGGER = LogManager.getLogger(HeuristicConfidenceFilter.class);
protected Map<String, SynchronizedSummaryStatistics> statistics = new HashMap<String, SynchronizedSummaryStatistics>();
private TTest ttest = new TTest();
private ConfidenceFilterSerializer serializer;
private int minSampleSize = 50;
private double alpha = 0.05;
private boolean dirty = false;
/**
* Creates a new filter.
*/
public HeuristicConfidenceFilter(ConfidenceFilterSerializer serializer) {
this.serializer = serializer;
}
/**
* Creates a new filter.
* @param minSampleSize The minimum sample size before tests are used
* to filter the entities.
* @param alpha The alpha value for the test. 0.5 for 95% is recommended.
*/
public HeuristicConfidenceFilter(ConfidenceFilterSerializer serializer, int minSampleSize, double alpha) {
this.serializer = serializer;
this.minSampleSize = minSampleSize;
this.alpha = alpha;
ttest = new TTest();
}
@Override
public boolean test(String modelId, double entityConfidence, double confidenceThreshold) {
SynchronizedSummaryStatistics confidences = statistics.get(modelId);
if(confidences == null) {
confidences = new SynchronizedSummaryStatistics();
statistics.put(modelId, confidences);
}
boolean filter = false;
if(entityConfidence >= confidenceThreshold) {
// If the entity's confidence is greater than the threshold
// always return the entity.
filter = true;
} else {
if(confidences.getN() >= minSampleSize) {
// Null hypothesis: The confidence of the entity is not in the ballpark.
// Performs a two-sided t-test evaluating the null hypothesis that the mean of
// the population from which the dataset described by stats is drawn equals mu.
// Returns true iff the null hypothesis can be rejected with confidence 1 - ALPHA.
// To perform a 1-sided test, use ALPHA * 2.
filter = !ttest.tTest(entityConfidence, confidences, alpha * 2);
// true means do NOT return the entity.
// false means return the entity.
} else {
filter = true;
}
}
// Add this value to the statistics after doing the T-test.
confidences.addValue(entityConfidence);
// Mark it as dirty.
dirty = true;
return filter;
}
@Override
public int serialize() throws Exception {
dirty = false;
return serializer.serialize(statistics);
}
@Override
public int deserialize() throws Exception {
dirty = false;
return serializer.deserialize(statistics);
}
@Override
public void resetAll() {
statistics.clear();
}
@Override
public void reset(String modelId) {
statistics.get(modelId).clear();
}
@Override
public boolean isDirty() {
return dirty;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-filters/1.1.0/ai/idylnlp/nlp/filters
|
java-sources/ai/idylnlp/idylnlp-nlp-filters/1.1.0/ai/idylnlp/nlp/filters/confidence/SimpleConfidenceFilter.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.filters.confidence;
import ai.idylnlp.model.nlp.ConfidenceFilter;
/**
* An implementation of {@link ConfidenceFilter} that simply
* compares the entity confidence with the confidence threshold.
*
* @author Mountain Fog, Inc.
*
*/
public class SimpleConfidenceFilter implements ConfidenceFilter {
@Override
public boolean test(String modelId, double entityConfidence, double confidenceThreshold) {
boolean filter = false;
if(entityConfidence >= confidenceThreshold) {
filter = true;
} else {
filter = false;
}
return filter;
}
@Override
public int serialize() throws Exception {
return 0;
}
@Override
public int deserialize() throws Exception {
return 0;
}
@Override
public void resetAll() {
}
@Override
public void reset(String modelId) {
}
@Override
public boolean isDirty() {
return false;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-filters/1.1.0/ai/idylnlp/nlp/filters/confidence
|
java-sources/ai/idylnlp/idylnlp-nlp-filters/1.1.0/ai/idylnlp/nlp/filters/confidence/serializers/LocalConfidenceFilterSerializer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.filters.confidence.serializers;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.Map;
import org.apache.commons.math3.stat.descriptive.SynchronizedSummaryStatistics;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.nlp.ConfidenceFilterSerializer;
/**
* An implementation of {@link ConfidenceFilterSerializer} that serializes
* confidence values to the local disk.
*
* @author Mountain Fog, Inc.
*
*/
public class LocalConfidenceFilterSerializer implements ConfidenceFilterSerializer {
private static final Logger LOGGER = LogManager.getLogger(LocalConfidenceFilterSerializer.class);
private File serializedFile;
/**
* Creates a new {@link LocalConfidenceFilterSerializer} and sets
* the serialized filename to <code>confidences.dat</code>.
*/
public LocalConfidenceFilterSerializer() {
this.serializedFile = new File("confidences.dat");
}
/**
* Creates a new {@link LocalConfidenceFilterSerializer}.
* @param serializedFile The {@link File} to hold the serialized
* confidence values.
*/
public LocalConfidenceFilterSerializer(File serializedFile) {
this.serializedFile = serializedFile;
}
@Override
public int serialize(Map<String, SynchronizedSummaryStatistics> statistics) throws Exception {
serializedFile.createNewFile();
FileOutputStream fos = new FileOutputStream(serializedFile.getAbsolutePath());
ObjectOutputStream oos = new ObjectOutputStream(fos);
oos.writeObject(statistics);
oos.close();
fos.close();
LOGGER.info("Serialized confidence values for {} entity models to {}.", statistics.size(),
serializedFile.getAbsolutePath());
return statistics.size();
}
@SuppressWarnings("unchecked")
@Override
public int deserialize(Map<String, SynchronizedSummaryStatistics> statistics) throws Exception {
if(serializedFile.exists()) {
FileInputStream fis = new FileInputStream(serializedFile.getAbsolutePath());
ObjectInputStream ois = new ObjectInputStream(fis);
statistics = (Map<String, SynchronizedSummaryStatistics>) ois.readObject();
ois.close();
fis.close();
LOGGER.info("Deserialized confidence values for {} entity models from {}.", statistics.size(),
serializedFile.getAbsolutePath());
return statistics.size();
} else {
return 0;
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-language-detection-opennlp/1.1.0/ai/idylnlp/nlp/language
|
java-sources/ai/idylnlp/idylnlp-nlp-language-detection-opennlp/1.1.0/ai/idylnlp/nlp/language/opennlp/OpenNLPLanguageDetector.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.language.opennlp;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.nlp.language.LanguageDetectionException;
import ai.idylnlp.model.nlp.language.LanguageDetectionResponse;
import ai.idylnlp.model.nlp.language.LanguageDetector;
import opennlp.tools.langdetect.Language;
import opennlp.tools.langdetect.LanguageDetectorME;
import opennlp.tools.langdetect.LanguageDetectorModel;
/**
* An implementation of {@link LanguageDetector} that
* uses Apache OpenNLP's language detector.
*
* @author Mountain Fog, Inc.
*
*/
public class OpenNLPLanguageDetector implements LanguageDetector {
private static final Logger LOGGER = LogManager.getLogger(OpenNLPLanguageDetector.class);
private opennlp.tools.langdetect.LanguageDetector detector;
public OpenNLPLanguageDetector() throws IOException {
InputStream in = ClassLoader.getSystemResourceAsStream("langdetect-183.bin");
LanguageDetectorModel m = new LanguageDetectorModel(in);
detector = new LanguageDetectorME(m);
in.close();
}
public OpenNLPLanguageDetector(InputStream in) throws IOException {
LanguageDetectorModel m = new LanguageDetectorModel(in);
detector = new LanguageDetectorME(m);
}
@Override
public LanguageDetectionResponse detectLanguage(String text, int limit) throws LanguageDetectionException {
List<Pair<String, Double>> pairs = new LinkedList<Pair<String, Double>>();
int count = 0;
for(Language language : detector.predictLanguages(text)) {
pairs.add(new ImmutablePair<String, Double>(language.getLang(), language.getConfidence()));
count++;
if(count == limit) {
break;
}
}
Collections.sort(pairs, new Comparator<Pair<String, Double>>() {
@Override
public int compare(Pair<String, Double> arg0, Pair<String, Double> arg1) {
if(arg1.getRight() > arg0.getRight()) {
return 1;
} else if(arg0.getRight() > arg1.getRight()) {
return -1;
} else {
return 0;
}
}
});
return new LanguageDetectionResponse(pairs);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-language-detection-tika/1.1.0/ai/idylnlp/nlp/language
|
java-sources/ai/idylnlp/idylnlp-nlp-language-detection-tika/1.1.0/ai/idylnlp/nlp/language/tika/TikaLanguageDetector.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.language.tika;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tika.langdetect.OptimaizeLangDetector;
import org.apache.tika.language.detect.LanguageConfidence;
import org.apache.tika.language.detect.LanguageResult;
import ai.idylnlp.model.nlp.language.LanguageDetectionException;
import ai.idylnlp.model.nlp.language.LanguageDetectionResponse;
import ai.idylnlp.model.nlp.language.LanguageDetector;
/**
* An implementation of {@link LanguageDetector} that
* uses Apache Tika's language detector.
*
* @author Mountain Fog, Inc.
*
*/
public class TikaLanguageDetector implements LanguageDetector {
private static final Logger LOGGER = LogManager.getLogger(TikaLanguageDetector.class);
@Override
public LanguageDetectionResponse detectLanguage(String text, int limit) throws LanguageDetectionException {
List<Pair<String, Double>> pairs = new LinkedList<Pair<String, Double>>();
try {
org.apache.tika.language.detect.LanguageDetector languageDetector = new OptimaizeLangDetector().loadModels();
List<LanguageResult> languageResults = languageDetector.detectAll(text);
int x = 0;
for(LanguageResult languageResult : languageResults) {
final String code = languageResult.getLanguage();
double confidence = 0;
if(languageResult.getConfidence() == LanguageConfidence.HIGH) {
confidence = 0.9;
} else if(languageResult.getConfidence() == LanguageConfidence.MEDIUM) {
confidence = 0.6;
} else if(languageResult.getConfidence() == LanguageConfidence.LOW) {
confidence = 0.3;
} else if(languageResult.getConfidence() == LanguageConfidence.NONE) {
confidence = 0;
}
pairs.add(new ImmutablePair<String, Double>(code, confidence));
x++;
if(x == limit) {
break;
}
}
} catch (Exception ex) {
LOGGER.error("Unable to detect language for input: " + text);
throw new LanguageDetectionException("Unable to detect language.");
}
return new LanguageDetectionResponse(pairs);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-lemmatization/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-lemmatization/1.1.0/ai/idylnlp/nlp/stemming/DefaultStemmer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.stemming;
import ai.idylnlp.model.nlp.Stemmer;
import opennlp.tools.stemmer.snowball.SnowballStemmer;
import opennlp.tools.stemmer.snowball.SnowballStemmer.ALGORITHM;
public class DefaultStemmer implements Stemmer {
private opennlp.tools.stemmer.Stemmer stemmer;
public DefaultStemmer() {
stemmer = new SnowballStemmer(ALGORITHM.ENGLISH);
}
@Override
public String stem(String text) {
return stemmer.stem(text).toString();
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-sanitizers/1.1.0/ai/idylnlp/nlp/entity
|
java-sources/ai/idylnlp/idylnlp-nlp-sanitizers/1.1.0/ai/idylnlp/nlp/entity/sanitizers/DefaultEntitySanitizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.entity.sanitizers;
import java.util.Set;
import ai.idylnlp.model.entity.Entity;
import ai.idylnlp.model.nlp.EntitySanitizer;
public class DefaultEntitySanitizer implements EntitySanitizer {
/**
* Sanitize the entities by removing punctuation and other attributes.
*
* @param entities A collection of {@link Entity} objects.
* @return A collection of sanitized {@link Entity} objects. This collection
* will be equal in size to the input collection.
*/
@Override
public Set<Entity> sanitizeEntities(Set<Entity> entities) {
for(Entity entity : entities) {
if(entity.getText().endsWith(",") || entity.getText().endsWith(".")) {
entity.setText(entity.getText().substring(0, entity.getText().length() - 1));
}
// Replace all punctuation.
entity.setText(entity.getText().replaceAll("\\p{P}", ""));
}
return entities;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-sanitizers/1.1.0/ai/idylnlp/nlp/sentence
|
java-sources/ai/idylnlp/idylnlp-nlp-sanitizers/1.1.0/ai/idylnlp/nlp/sentence/sanitizers/DefaultSentenceSanitizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.sentence.sanitizers;
import org.apache.commons.lang3.StringUtils;
import ai.idylnlp.model.nlp.SentenceSanitizer;
/**
* Default implementation of {@link SentenceSanitizer}.
*
* @author Mountain Fog, Inc.
*
*/
public class DefaultSentenceSanitizer implements SentenceSanitizer {
private boolean removePunctuation = false;
private boolean lowerCase = false;
private boolean consolidateSpaces = false;
/**
* Use the builder.
*/
private DefaultSentenceSanitizer() {
}
public static class Builder {
private boolean removePunctuation = false;
private boolean lowerCase = false;
private boolean consolidateSpaces = false;
/**
* Removes all punctuation from the text.
* @return The text with all punctuation removed.
*/
public Builder removePunctuation() {
this.removePunctuation = true;
return this;
}
/**
* Lowercases the text.
* @return The text lowercased.
*/
public Builder lowerCase() {
this.lowerCase = true;
return this;
}
/**
* Replaces all consecutive spaces with a single space.
* @return The text with all consecutive spaces replaced
* with a single space.
*/
public Builder consolidateSpaces() {
this.consolidateSpaces = true;
return this;
}
/**
* Builds the sentence sanitizer.
* @return A configured {@link SentenceSanitizer}.
*/
public SentenceSanitizer build() {
DefaultSentenceSanitizer sanitizer = new DefaultSentenceSanitizer();
sanitizer.removePunctuation = removePunctuation;
sanitizer.lowerCase = lowerCase;
sanitizer.consolidateSpaces = consolidateSpaces;
return sanitizer;
}
}
@Override
public String sanitize(String sentence) {
if(lowerCase) {
sentence = sentence.toLowerCase();
}
if(removePunctuation) {
sentence = sentence.replaceAll("\\p{Punct}+", "");
}
if(consolidateSpaces) {
sentence = StringUtils.normalizeSpace(sentence);
}
return sentence;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp/sentence/BreakIteratorSentenceDetector.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.sentence;
import java.text.BreakIterator;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.nlp.SentenceDetector;
import ai.idylnlp.model.nlp.Span;
public class BreakIteratorSentenceDetector implements SentenceDetector {
private BreakIterator breakIterator;
public BreakIteratorSentenceDetector(String languageCode) {
Locale locale = new Locale.Builder().setLanguage(languageCode).build();
breakIterator = BreakIterator.getSentenceInstance(locale);
}
public BreakIteratorSentenceDetector(LanguageCode languageCode) {
breakIterator = BreakIterator.getSentenceInstance(languageCode.toLocale());
}
/**
* Creates a sentence detector.
*
* @param locale
* The {@link Locale} for the sentence detector.
*/
public BreakIteratorSentenceDetector(Locale locale) {
breakIterator = BreakIterator.getSentenceInstance(locale);
}
@Override
public List<String> getLanguageCodes() {
List<String> languageCodes = new LinkedList<>();
for(Locale locale : BreakIterator.getAvailableLocales()) {
languageCodes.add(LanguageCode.getByLocale(locale).getAlpha3().toString());
}
return languageCodes;
}
@Override
public String[] sentDetect(String s) {
return Span.spansToStrings(sentPosDetect(s), s);
}
@Override
public Span[] sentPosDetect(String s) {
List<Span> sentences = new ArrayList<>();
breakIterator.setText(s);
int lastIndex = breakIterator.first();
while (lastIndex != BreakIterator.DONE) {
int firstIndex = lastIndex;
lastIndex = breakIterator.next();
if (lastIndex != BreakIterator.DONE) {
sentences.add(new Span(firstIndex, lastIndex));
}
}
return sentences.toArray(new Span[sentences.size()]);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp/sentence/ModelSentenceDetector.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.sentence;
import java.util.Arrays;
import java.util.List;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.opennlp.custom.utils.SpansToSpans;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.exceptions.ModelLoaderException;
import ai.idylnlp.model.nlp.SentenceDetector;
import ai.idylnlp.model.nlp.Span;
import opennlp.tools.sentdetect.SentenceDetectorME;
import opennlp.tools.sentdetect.SentenceModel;
/**
* An implementation of a {@link SentenceDetector}.
*
* @author Mountain Fog, Inc.
*
*/
public class ModelSentenceDetector implements SentenceDetector {
private static final Logger LOGGER = LogManager.getLogger(ModelSentenceDetector.class);
private SentenceDetectorME sentenceDetector;
private LanguageCode languageCode;
/**
* Creates a new sentence detector.
* @param modelLoader A {@link SentenceDetectorModelLoader}.
* @throws SentenceDetectionException Thrown if the model cannot be loaded or read.
*/
public ModelSentenceDetector(SentenceDetectorModelLoader modelLoader) throws ModelLoaderException {
LOGGER.debug("Using sentence model directory: " + modelLoader.getModelDirectory());
LOGGER.debug("Using sentence model file: " + modelLoader.getModelManifest().getModelFileName());
// Load the model.
SentenceModel model = modelLoader.getModel(modelLoader.getModelManifest(), SentenceModel.class);
this.sentenceDetector = new SentenceDetectorME(model);
this.languageCode = modelLoader.getModelManifest().getLanguageCode();
}
public ModelSentenceDetector(SentenceModel sentenceModel, LanguageCode languageCode) throws ModelLoaderException {
this.sentenceDetector = new SentenceDetectorME(sentenceModel);
this.languageCode = languageCode;
}
@Override
public List<String> getLanguageCodes() {
return Arrays.asList(languageCode.getAlpha3().toString());
}
@Override
public Span[] sentPosDetect(String text) {
opennlp.tools.util.Span[] sentenceSpans = sentenceDetector.sentPosDetect(text);
return SpansToSpans.toSpans(sentenceSpans);
}
@Override
public String[] sentDetect(String text) {
String[] sentences = sentenceDetector.sentDetect(text);
return sentences;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp/sentence/SegmentedSentenceDetector.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.sentence;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.nlp.SentenceDetector;
import ai.idylnlp.model.nlp.Span;
import net.loomchild.segment.TextIterator;
import net.loomchild.segment.srx.SrxDocument;
import net.loomchild.segment.srx.SrxParser;
import net.loomchild.segment.srx.SrxTextIterator;
import net.loomchild.segment.srx.io.Srx2SaxParser;
/**
* An implementation of {@link SentenceDetector} that performs
* sentence detection using segmentation.
*
* @author Mountain Fog, Inc.
*
*/
public class SegmentedSentenceDetector implements SentenceDetector {
private LanguageCode languageCode;
private SrxDocument srxDocument;
public SegmentedSentenceDetector(String srx, LanguageCode languageCode) throws UnsupportedEncodingException {
this.languageCode = languageCode;
final InputStream inputStream = new ByteArrayInputStream(srx.getBytes(StandardCharsets.UTF_8));
BufferedReader srxReader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
Map<String, Object> parserParameters = new HashMap<>();
parserParameters.put(Srx2SaxParser.VALIDATE_PARAMETER, true);
SrxParser srxParser = new Srx2SaxParser(parserParameters);
srxDocument = srxParser.parse(srxReader);
}
@Override
public List<String> getLanguageCodes() {
return Arrays.asList(languageCode.getAlpha3().toString());
}
@Override
public String[] sentDetect(String s) {
return Span.spansToStrings(sentPosDetect(s), s);
}
@Override
public Span[] sentPosDetect(String s) {
List<Span> spans = new ArrayList<>();
List<String> sentences = tokenize(s);
for(String sentence : sentences) {
String trimmedSentence = sentence.trim();
final int start = s.indexOf(trimmedSentence);
Span span = new Span(start, start + trimmedSentence.length());
spans.add(span);
}
return spans.toArray(new Span[spans.size()]);
}
private List<String> tokenize(String text) {
List<String> segments = new ArrayList<>();
TextIterator textIterator = new SrxTextIterator(srxDocument, languageCode.getAlpha3().toString(), text);
while(textIterator.hasNext()) {
segments.add(textIterator.next().trim());
}
return segments;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp/sentence/SentenceDetectorModelLoader.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.sentence;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.opennlp.custom.modelloader.ModelLoader;
import ai.idylnlp.zoo.IdylNLPModelZoo;
import ai.idylnlp.model.ModelValidator;
import ai.idylnlp.model.manifest.StandardModelManifest;
import opennlp.tools.sentdetect.SentenceModel;
/**
* Model loader for sentence detection models.
*
* @author Mountain Fog, Inc.
*
*/
public class SentenceDetectorModelLoader extends ModelLoader<SentenceModel> {
private static final Logger LOGGER = LogManager.getLogger(SentenceDetectorModelLoader.class);
private StandardModelManifest modelManifest;
/**
* Create a new {@link SentenceDetectorModelLoader}.
* @param modelValidator A {@link ModelValidator} to validate the model.
* @param modelDirectory The full path to the directory containing the model.
* @param modelManifest The model's {@link StandardModelManifest}.
*/
public SentenceDetectorModelLoader(ModelValidator modelValidator, String modelDirectory, StandardModelManifest modelManifest) {
super(modelValidator);
super.setModelDirectory(modelDirectory);
this.modelManifest = modelManifest;
}
/**
* Create a new {@link SentenceDetectorModelLoader}.
* @param modelValidator A {@link ModelValidator} to validate the model.
* @param modelDirectory The full path to the directory containing the model.
* @param modelManifest The model's {@link StandardModelManifest}.
* @param idylNlpModelZoo A {@link IdylNLPModelZoo} client.
*/
public SentenceDetectorModelLoader(ModelValidator modelValidator, String modelDirectory, StandardModelManifest modelManifest,
IdylNLPModelZoo idylNlpModelZoo) {
super(modelValidator);
super.setModelDirectory(modelDirectory);
super.setIdylNLPModelZoo(idylNlpModelZoo);
this.modelManifest = modelManifest;
}
/**
* Gets the full path to the model.
* @return The full path (directory and filename) of the model.
*/
public String getFullModelPath() {
return super.getModelDirectory() + modelManifest.getModelFileName();
}
/**
* Gets the model's manifest.
* @return The model's {@link StandardModelManifest}..
*/
public StandardModelManifest getModelManifest() {
return modelManifest;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-sentence-detection/1.1.0/ai/idylnlp/nlp/sentence/SimpleSentenceDetector.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.sentence;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.collections4.CollectionUtils;
import ai.idylnlp.model.nlp.SentenceDetector;
import ai.idylnlp.model.nlp.Span;
/**
* An implementation of {@link SentenceDetector} that identifies
* sentences based on the presence of periods. Usage is not
* generally recommended.
*
* @author Mountain Fog, Inc.
*
*/
public class SimpleSentenceDetector implements SentenceDetector {
@Override
public List<String> getLanguageCodes() {
return Collections.emptyList();
}
@Override
public Span[] sentPosDetect(String text) {
List<Span> spans = new LinkedList<Span>();
List<String> sentences = Arrays.asList(text.split("."));
if(CollectionUtils.isEmpty(sentences)) {
spans.add(new Span(0, text.length() - 1));
} else {
int lastPeriod = 0;
for(String sentence : sentences) {
int period = sentence.indexOf(".");
spans.add(new Span(lastPeriod, period));
lastPeriod = period + 1;
}
}
return spans.toArray(new Span[spans.size()]);
}
@Override
public String[] sentDetect(String text) {
// TOOD: Implement this.
return null;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-tokenizers/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-tokenizers/1.1.0/ai/idylnlp/nlp/tokenizers/BreakIteratorTokenizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.tokenizers;
import java.text.BreakIterator;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import org.apache.commons.lang3.NotImplementedException;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.nlp.Span;
import ai.idylnlp.model.nlp.Stemmer;
import ai.idylnlp.model.nlp.Tokenizer;
/**
* A {@link Tokenizer} that uses a {@link BreakIterator}.
*
* @author Mountain Fog, Inc.
*
*/
public class BreakIteratorTokenizer implements Tokenizer {
private BreakIterator breakIterator;
public BreakIteratorTokenizer(String languageCode) {
Locale locale = new Locale.Builder().setLanguage(languageCode).build();
breakIterator = BreakIterator.getWordInstance(locale);
}
public BreakIteratorTokenizer(LanguageCode languageCode) {
breakIterator = BreakIterator.getWordInstance(languageCode.toLocale());
}
/**
* Creates a tokenizer.
*
* @param locale The {@link Locale} for the tokenizer.
*/
public BreakIteratorTokenizer(Locale locale) {
breakIterator = BreakIterator.getWordInstance(locale);
}
@Override
public List<String> getLanguageCodes() {
List<String> languageCodes = new LinkedList<>();
for(Locale locale : BreakIterator.getAvailableLocales()) {
languageCodes.add(LanguageCode.getByLocale(locale).getAlpha3().toString());
}
return languageCodes;
}
@Override
public String[] tokenize(String s) {
return Span.spansToStrings(tokenizePos(s), s);
}
@Override
public Span[] tokenizePos(String d) {
List<Span> tokens = new ArrayList<>();
breakIterator.setText(d);
int lastIndex = breakIterator.first();
while (lastIndex != BreakIterator.DONE) {
int firstIndex = lastIndex;
lastIndex = breakIterator.next();
if (lastIndex != BreakIterator.DONE
&& Character.isLetterOrDigit(d.charAt(firstIndex))) {
tokens.add(new Span(firstIndex, lastIndex));
}
}
return tokens.toArray(new Span[tokens.size()]);
}
@Override
public String[] tokenize(String s, Stemmer stemmer) {
// TODO: Implement this.
throw new NotImplementedException("Not yet implemented.");
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-tokenizers/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-tokenizers/1.1.0/ai/idylnlp/nlp/tokenizers/ModelTokenizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.tokenizers;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.List;
import com.neovisionaries.i18n.LanguageCode;
import ai.idylnlp.model.exceptions.ModelLoaderException;
import ai.idylnlp.model.nlp.Span;
import ai.idylnlp.model.nlp.Stemmer;
import ai.idylnlp.model.nlp.Tokenizer;
import ai.idylnlp.opennlp.custom.utils.SpansToSpans;
import opennlp.tools.tokenize.TokenizerME;
import opennlp.tools.tokenize.TokenizerModel;
/**
* A tokenizer that tokenizes using a maximum entropy trained model.
* This tokenizer is a pass-through to OpenNLP's {@link TokenizerME}.
*
* The {@link TokenizerME} and hence this class is not thread-safe.
*
* @author Mountain Fog, Inc.
*
*/
public class ModelTokenizer implements Tokenizer {
private TokenizerME tokenizer;
private LanguageCode languageCode;
/**
* Creates a new model tokenizer.
* @param modelInputStream The {@link InputStream stream} containing the model.
* @param languageCode The {@link LanguageCode} for this tokenizer.
* @throws IOException Thrown if the token model cannot be loaded.
* @throws ModelLoaderException
*/
public ModelTokenizer(InputStream modelInputStream, LanguageCode languageCode) throws ModelLoaderException {
this.languageCode = languageCode;
try {
final TokenizerModel tokenModel = new TokenizerModel(modelInputStream);
tokenizer = new TokenizerME(tokenModel);
modelInputStream.close();
} catch (IOException ex) {
throw new ModelLoaderException("Unable to load token model.", ex);
}
}
/**
* Creates a new model tokenizer.
* @param tokenModel A {@link TokenizerModel} for this tokenizer.
* @param languageCode The {@link LanguageCode} for this tokenizer.
*/
public ModelTokenizer(TokenizerModel tokenModel, LanguageCode languageCode) {
this.languageCode = languageCode;
this.tokenizer = new TokenizerME(tokenModel);
}
@Override
public List<String> getLanguageCodes() {
return Arrays.asList(languageCode.getAlpha3().toString());
}
@Override
public String[] tokenize(String s) {
return tokenizer.tokenize(s);
}
@Override
public Span[] tokenizePos(String s) {
opennlp.tools.util.Span[] tokenSpans = tokenizer.tokenizePos(s);
return SpansToSpans.toSpans(tokenSpans);
}
@Override
public String[] tokenize(String s, Stemmer stemmer) {
String[] tokens = tokenizer.tokenize(s);
for (int i = 0; i < tokens.length; i++) {
tokens[i] = stemmer.stem(tokens[i]);
}
return tokens;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-tokenizers/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-tokenizers/1.1.0/ai/idylnlp/nlp/tokenizers/WhitespaceTokenizer.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.tokenizers;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import ai.idylnlp.model.nlp.Span;
import ai.idylnlp.model.nlp.Stemmer;
import ai.idylnlp.model.nlp.Tokenizer;
import opennlp.tools.util.StringUtil;
/**
* This tokenizer uses whitespace to tokenize the input text.
*
* To obtain an instance of this tokenizer use the static final
* <code>INSTANCE</code> field.
*/
public class WhitespaceTokenizer implements Tokenizer {
public static final WhitespaceTokenizer INSTANCE = new WhitespaceTokenizer();
private WhitespaceTokenizer() {
}
@Override
public List<String> getLanguageCodes() {
// This tokenizer is not language-dependent so return an empty list.
return Collections.EMPTY_LIST;
}
@Override
public String[] tokenize(String s) {
return Span.spansToStrings(tokenizePos(s), s);
}
@Override
public String[] tokenize(String s, Stemmer stemmer) {
String[] tokens = tokenize(s);
for (int i = 0; i < tokens.length; i++) {
tokens[i] = stemmer.stem(tokens[i]);
}
return tokens;
}
@Override
public Span[] tokenizePos(String d) {
int tokStart = -1;
List<Span> tokens = new ArrayList<>();
boolean inTok = false;
// gather up potential tokens
int end = d.length();
for (int i = 0; i < end; i++) {
if (StringUtil.isWhitespace(d.charAt(i))) {
if (inTok) {
tokens.add(new Span(tokStart, i));
inTok = false;
tokStart = -1;
}
} else {
if (!inTok) {
tokStart = i;
inTok = true;
}
}
}
if (inTok) {
tokens.add(new Span(tokStart, end));
}
return tokens.toArray(new Span[tokens.size()]);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-translation-joshua/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-translation-joshua/1.1.0/ai/idylnlp/nlp/translation/JoshuaTranslator.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.translation;
import java.io.IOException;
import java.util.List;
import org.apache.joshua.decoder.Decoder;
import org.apache.joshua.decoder.JoshuaConfiguration;
import org.apache.joshua.decoder.StructuredTranslation;
import org.apache.joshua.decoder.Translation;
import org.apache.joshua.decoder.segment_file.Sentence;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import ai.idylnlp.model.nlp.translation.LanguageTranslationRequest;
import ai.idylnlp.model.nlp.translation.LanguageTranslationResponse;
import ai.idylnlp.model.nlp.translation.Translator;
/**
* Implementation of {@link Translator} that uses Apache Joshua
* to perform translation of natural language text.
*
* @author Mountain Fog, Inc.
*
*/
public class JoshuaTranslator implements Translator {
private static final Logger LOGGER = LogManager.getLogger(JoshuaTranslator.class);
private Decoder decoder;
private int counter = 0;
/**
* Creates a new translator.
* @param joshuaLanguagePackPath The full path to the Apache Joshua language pack.
* The joshua.config file is expected to be located in this path.
* @throws IOException Thrown if the language pack cannot be loaded.
*/
public JoshuaTranslator(final String joshuaLanguagePackPath) throws IOException {
LOGGER.info("Initialize Apache Joshua translator from {}.", joshuaLanguagePackPath);
String deEnJoshuaConfigFile = joshuaLanguagePackPath + "/joshua.config";
JoshuaConfiguration deEnConf = new JoshuaConfiguration();
deEnConf.readConfigFile(deEnJoshuaConfigFile);
deEnConf.use_structured_output = true;
deEnConf.modelRootPath = joshuaLanguagePackPath;
decoder = new Decoder(deEnConf, deEnJoshuaConfigFile);
}
@Override
public LanguageTranslationResponse translate(LanguageTranslationRequest request) {
final String input = request.getInput();
Sentence sentence = new Sentence(input, counter++, decoder.getJoshuaConfiguration());
Translation translation = decoder.decode(sentence);
List<StructuredTranslation> structuredTranslations = translation.getStructuredTranslations();
StringBuilder sb = new StringBuilder();
for (StructuredTranslation st : structuredTranslations) {
sb.append(st.getTranslationString());
}
return new LanguageTranslationResponse(sb.toString());
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/EnglishStopWordRemover.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;
import ai.idylnlp.model.nlp.language.StopWordRemover;
/**
* Class to remove English stop words from a given text.
*
* @author Mountain Fog, Inc.
*
*/
public class EnglishStopWordRemover implements StopWordRemover {
private Set<String> words;
public EnglishStopWordRemover() {
// List linked from Wikipedia article on stop words: http://www.textfixer.com/resources/common-english-words.txt
String stopWords = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,new,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your";
words = new HashSet<String>(Arrays.asList(stopWords.split(",")));
}
/**
* {@inheritDoc}
*/
@Override
public boolean isStopWord(String input) {
if(words.contains(input.trim().toLowerCase())) {
return true;
} else {
return false;
}
}
/**
* {@inheritDoc}
*/
@Override
public Collection<String> removeStopWords(Collection<String> input) {
Collection<String> stemmedWords = new LinkedList<String>();
for(String word : input) {
if(words.contains(word.toLowerCase()) == false) {
stemmedWords.add(word.replace(".", "").replace(",", ""));
}
}
return stemmedWords;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/SpanUtils.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils;
import java.util.Arrays;
import java.util.Collections;
import opennlp.tools.tokenize.Tokenizer;
import opennlp.tools.util.Span;
/**
* Utility functions for OpenNLP's {@link Span}.
*
* @author Mountain Fog, Inc.
*
*/
public class SpanUtils {
private SpanUtils() {
// This is a utility class.
}
/**
* Gets a {@link Span span} for an entity in a text.
* @param tokenizer The {@link Tokenizer tokenizer}.
* @param entity The text of the entity.
* @param text The text containing the entity.
* @return A {@link Span span} of the entity in the text, or <code>null</code> if no span is found.
*/
public static Span getSpan(Tokenizer tokenizer, String entity, String text) {
// TODO: If the entity appears more than once in the text only the first one will be found.
// Tokenize the entity.
final String entityTokenizerLine[] = tokenizer.tokenize(entity);
// Tokenize the text.
final String whitespaceTokenizerLine[] = tokenizer.tokenize(text);
// Find the entity tokens in the text tokens.
final int start = Collections.indexOfSubList(Arrays.asList(whitespaceTokenizerLine), Arrays.asList(entityTokenizerLine));
if(start > -1) {
int end = start + entityTokenizerLine.length;
return new Span(start, end);
} else {
return null;
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/distance/CosineDistance.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils.distance;
import ai.idylnlp.model.nlp.strings.Similarity;
/**
* Calculates cosine distance.
*
* @author Mountain Fog, Inc.
*
*/
public class CosineDistance implements Similarity {
public static Similarity INSTANCE() {
return new CosineDistance();
}
@Override
public double calculate(CharSequence s, CharSequence t) {
org.apache.commons.text.similarity.CosineDistance distance = new org.apache.commons.text.similarity.CosineDistance();
return distance.apply(s, t);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/distance/FuzzyScoreDistance.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils.distance;
import java.util.Locale;
import org.apache.commons.text.similarity.FuzzyScore;
import ai.idylnlp.model.nlp.strings.Distance;
/**
* Calculates fuzzy score distances.
*
* @author Mountain Fog, Inc.
*
*/
public class FuzzyScoreDistance implements Distance {
private Locale locale;
public static Distance INSTANCE(Locale locale) {
return new FuzzyScoreDistance(locale);
}
/**
* Creates a new instance.
* @param locale The {@link locale} (used to normalize strings to lower case).
*/
public FuzzyScoreDistance(Locale locale) {
this.locale = locale;
}
@Override
public double calculate(CharSequence s, CharSequence t) {
FuzzyScore distance = new FuzzyScore(locale);
return distance.fuzzyScore(s, t);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/distance/JaccardDistance.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils.distance;
import ai.idylnlp.model.nlp.strings.Distance;
/**
* Calculates Jaccard distance.
*
* @author Mountain Fog, Inc.
*
*/
public class JaccardDistance implements Distance {
public static Distance INSTANCE() {
return new JaccardDistance();
}
@Override
public double calculate(CharSequence s, CharSequence t) {
org.apache.commons.text.similarity.JaccardDistance distance = new org.apache.commons.text.similarity.JaccardDistance();
return distance.apply(s, t);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/distance/JaccardSimilarity.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils.distance;
import ai.idylnlp.model.nlp.strings.Similarity;
/**
* Calculates Jaccard similarity.
*
* @author Mountain Fog, Inc.
*
*/
public class JaccardSimilarity implements Similarity {
public static Similarity INSTANCE() {
return new JaccardSimilarity();
}
@Override
public double calculate(CharSequence s, CharSequence t) {
org.apache.commons.text.similarity.JaccardSimilarity similatity = new org.apache.commons.text.similarity.JaccardSimilarity();
return similatity.apply(s, t);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/distance/LevenshteinDistance.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils.distance;
import ai.idylnlp.model.nlp.strings.Distance;
/**
* Calculates Levenshtein distance.
*
* @author Mountain Fog, Inc.
*
*/
public class LevenshteinDistance implements Distance {
public static Distance INSTANCE() {
return new LevenshteinDistance();
}
@Override
public double calculate(CharSequence s, CharSequence t) {
org.apache.commons.text.similarity.LevenshteinDistance distance = new org.apache.commons.text.similarity.LevenshteinDistance();
return distance.apply(s, t);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/ngrams/NgramIterator.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils.ngrams;
import java.util.Iterator;
/**
* An implementation of {@link Iterator} that produces N-Grams.
*
* @author Mountain Fog, Inc.
*
*/
public class NgramIterator implements Iterator<String> {
private String[] tokens;
private int pos = 0, n;
/**
* Creates a new N-gram iterator.
* @param tokens The tokens.
* @param n The size of the n-grams.
*/
public NgramIterator(String[] tokens, int n) {
this.tokens = tokens;
this.n = n;
}
@Override
public boolean hasNext() {
return pos < tokens.length - n + 1;
}
@Override
public String next() {
StringBuilder sb = new StringBuilder();
for (int i = pos; i < pos + n; i++) {
sb.append((i > pos ? " " : "") + tokens[i]);
}
pos++;
return sb.toString();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils
|
java-sources/ai/idylnlp/idylnlp-nlp-utils/1.1.0/ai/idylnlp/nlp/utils/ngrams/NgramUtils.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.nlp.utils.ngrams;
import java.util.LinkedList;
import java.util.List;
/**
* Utility functions for N-Grams.
*
* @author Mountain Fog, Inc.
*
*/
public class NgramUtils {
private NgramUtils() {
// This is a utility class.
}
/**
* Returns the N-Grams for a string of a given length.
* @param tokens An array of tokens.
* @param len The length of the n-grams.
* @return A collection of N-Grams for the input string.
*/
public static String[] getNgrams(String[] tokens, int len) {
final List<String> ngrams = new LinkedList<>();
for(int i = 0; i < tokens.length - len + 1; i++) {
StringBuilder sb = new StringBuilder();
for(int k = 0; k < len; k++) {
if(k > 0) sb.append(' ');
sb.append(tokens[i+k]);
}
ngrams.add(sb.toString());
}
final String[] n = new String[ngrams.size()];
return ngrams.toArray(n);
}
/**
* Gets all n-grams for a given set of tokens.
* @param tokens The tokens.
* @return All n-grams for a given set of tokens.
*/
public static String[] getNgrams(String[] tokens) {
final List<String> ngrams = new LinkedList<>();
for(int len = 1; len <= tokens.length; len++) {
for(int i = 0; i < tokens.length - len + 1; i++) {
StringBuilder sb = new StringBuilder();
for(int k = 0; k < len; k++) {
if(k > 0) sb.append(' ');
sb.append(tokens[i+k]);
}
ngrams.add(sb.toString());
}
}
final String[] n = new String[ngrams.size()];
return ngrams.toArray(n);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/custom/encryption/AbstractEncryption.java
|
package ai.idylnlp.custom.encryption;
import static java.lang.Character.digit;
import ai.idylnlp.opennlp.custom.encryption.OpenNLPEncryption;
/**
* Base class for OpenNLP model encryption implementations of {@link OpenNLPEncryption}.
*
* @author Mountain Fog, Inc.
*
*/
public abstract class AbstractEncryption implements OpenNLPEncryption {
/**
* Converts a string to a byte array.
* @param input The string to convert to bytes.
* @return A byte array of the string.
*/
protected byte[] stringToBytes(String input) {
/*
* When you call String.getBytes() (JDK documentation) you encodes characters
* of the given string into a sequence of bytes using the platform's default charset.
* What you are actually need to do is to convert each hexadecimal (also base 16)
* number (represented by two characters from 0 to 9 and A to F e.g. 1A, 99, etc.)
* into its corresponding numerical (byte) value e.g. "FF" -> -1 byte.
* http://stackoverflow.com/questions/14368374/how-to-turn-64-character-string-into-key-for-256-aes-encryption
*/
int length = input.length();
byte[] output = new byte[length / 2];
for (int i = 0; i < length; i += 2) {
output[i / 2] = (byte) ((digit(input.charAt(i), 16) << 4) | digit(input.charAt(i+1), 16));
}
return output;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/opennlp
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/opennlp/custom/EncryptedDataOutputStream.java
|
package ai.idylnlp.opennlp.custom;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.commons.lang3.StringUtils;
import ai.idylnlp.opennlp.custom.encryption.OpenNLPEncryptionFactory;
public class EncryptedDataOutputStream extends DataOutputStream {
public EncryptedDataOutputStream(OutputStream out) {
super(out);
}
public void writeEncryptedUTF(String s) throws IOException {
if(StringUtils.isNotEmpty(OpenNLPEncryptionFactory.getDefault().getKey())) {
try {
// Encrypt the input.
s = OpenNLPEncryptionFactory.getDefault().encrypt(s);
} catch (Exception ex) {
throw new RuntimeException("Unable to write encrypted model.", ex);
}
}
writeUTF(s);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/opennlp/custom/encryption/OpenNLP183Encryption.java
|
package ai.idylnlp.opennlp.custom.encryption;
import javax.crypto.Cipher;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.codec.digest.DigestUtils;
import ai.idylnlp.custom.encryption.AbstractEncryption;
/**
* Model encryption for OpenNLP 1.8.3. This encryption is a different implementation but
* it is 100% compatible with the {@link OpenNLP160Encryption} implementation. There is
* a test in OpenNLP170EncryptionTest to ensure the compatibility.
* <p>
* IMPORTANT NOTE:
* <p>
* If you get an error like "java.security.InvalidKeyException: Illegal key size or default parameters" when trying
* to load and decrypt a model make sure you have the Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files
* for your JDK. Refer to: http://stackoverflow.com/a/6481658 and http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html
* <p>
* You will NOT get this error when using OpenJDK!
*
* @author Mountain Fog, Inc.
*
*/
public class OpenNLP183Encryption extends AbstractEncryption implements OpenNLPEncryption {
private String encryptionKey;
@Override
public String encrypt(String strToEncrypt) throws Exception {
SecretKeySpec key = generateKey(encryptionKey);
Cipher cipher = Cipher.getInstance("AES");
cipher.init(Cipher.ENCRYPT_MODE, key);
return Base64.encodeBase64String(cipher.doFinal(strToEncrypt.getBytes("UTF-8")));
}
@Override
public String decrypt(String strToDecrypt) throws Exception {
return decrypt(strToDecrypt, encryptionKey);
}
private SecretKeySpec generateKey(String key) throws Exception {
String encryptionKey = DigestUtils.sha256Hex(key + "uGrClE0GW1Sm7DRsiavg");
// Generate the encryption key based on the string value.
return new SecretKeySpec(stringToBytes(encryptionKey), "AES");
}
@Override
public void setKey(String encryptionKey) {
this.encryptionKey = encryptionKey;
}
@Override
public String getKey() {
return encryptionKey;
}
@Override
public void clearKey() {
this.encryptionKey = null;
}
@Override
public String decrypt(String text, String encryptionKey) throws Exception {
SecretKeySpec key = generateKey(encryptionKey);
Cipher cipher = Cipher.getInstance("AES");
cipher.init(Cipher.DECRYPT_MODE, key);
return new String(cipher.doFinal(Base64.decodeBase64(text)));
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/opennlp/custom/encryption/OpenNLPEncryption.java
|
package ai.idylnlp.opennlp.custom.encryption;
/**
* Interface for encryption methods used by OpenNLP model encryption.
*
* @author Mountain Fog, Inc.
*
*/
public interface OpenNLPEncryption {
/**
* Encrypt the input text.
* @param text The text to encrypt.
* @return The encrypted text.
* @throws Exception Thrown if the text cannot be encrypted.
*/
public String encrypt(String text) throws Exception;
/**
* Decrypt the input text.
* @param text The text to decrypt.
* @return The decrypted text.
* @throws Exception Thrown if the text cannot be decrypted.
*/
public String decrypt(String text) throws Exception;
public String decrypt(String text, String encryptionKey) throws Exception;
/**
* Sets the encryption key.
* @param encryptionKey The encryption key.
*/
public void setKey(String encryptionKey);
public String getKey();
/**
* Clears the encryption key.
*/
public void clearKey();
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-custom/1.1.0/ai/idylnlp/opennlp/custom/encryption/OpenNLPEncryptionFactory.java
|
package ai.idylnlp.opennlp.custom.encryption;
public class OpenNLPEncryptionFactory {
private static OpenNLPEncryption openNLPEncryption;
private OpenNLPEncryptionFactory() {
// This is a factory class.
}
public static OpenNLPEncryption getDefault() {
if(openNLPEncryption == null) {
openNLPEncryption = new OpenNLP183Encryption();
}
return openNLPEncryption;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom/features/SpecialCharacterFeatureGenerator.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.opennlp.custom.features;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator;
/**
* Generates features for tokens containing hyphens.
*
* @author Mountain Fog, Inc.
*
*/
public class SpecialCharacterFeatureGenerator implements AdaptiveFeatureGenerator {
private Pattern p;
public SpecialCharacterFeatureGenerator() {
p = Pattern.compile("[^a-z0-9 ]", Pattern.CASE_INSENSITIVE);
}
@Override
public void createFeatures(List<String> features, String[] tokens, int index, String[] previousOutcomes) {
Matcher m = p.matcher(tokens[index]);
boolean containsSpecialCharacters = m.find();
if(containsSpecialCharacters) {
features.add("specchar=" + tokens[index]);
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom/features/SpecialCharacterFeatureGeneratorFactory.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.opennlp.custom.features;
import java.util.Map;
import org.w3c.dom.Element;
import opennlp.tools.util.InvalidFormatException;
import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator;
import opennlp.tools.util.featuregen.FeatureGeneratorResourceProvider;
import opennlp.tools.util.featuregen.GeneratorFactory.XmlFeatureGeneratorFactory;
/**
* Factory for {@link SpecialCharacterFeatureGenerator}.
*
* @author Mountain Fog, Inc.
*
*/
public class SpecialCharacterFeatureGeneratorFactory implements XmlFeatureGeneratorFactory {
private static final String ELEMENT_NAME = "specchar";
@Override
public AdaptiveFeatureGenerator create(Element generatorElement, FeatureGeneratorResourceProvider resourceManager)
throws InvalidFormatException {
return new SpecialCharacterFeatureGenerator();
}
public static void register(Map<String, XmlFeatureGeneratorFactory> factoryMap) {
factoryMap.put(ELEMENT_NAME, new SpecialCharacterFeatureGeneratorFactory());
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom/features/TokenPartOfSpeechFeatureGenerator.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.opennlp.custom.features;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import ai.idylnlp.model.nlp.pos.PartsOfSpeechTagger;
import ai.idylnlp.model.nlp.pos.PartsOfSpeechToken;
import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator;
/**
* Generates features for tokens based on the token's part of speech.
*
* @author Mountain Fog, Inc.
*
*/
public class TokenPartOfSpeechFeatureGenerator implements AdaptiveFeatureGenerator {
private static final String POS_PREFIX = "tpos";
private PartsOfSpeechTagger tagger;
private Map<String, String> tokPosMap;
public TokenPartOfSpeechFeatureGenerator(PartsOfSpeechTagger tagger) {
this.tagger = tagger;
tokPosMap = new HashMap<String, String>();
}
@Override
public void createFeatures(List<String> features, String[] tokens, int index, String[] previousOutcomes) {
String[] postags = getPostags(tokens);
features.add(POS_PREFIX + "=" + postags[index]);
}
private String[] getPostags(String[] tokens) {
String text = StringUtils.join(tokens, " ");
if (tokPosMap.containsKey(text)) {
return tokPosMap.get(text).split(" ");
} else {
List<PartsOfSpeechToken> partsOfSpeechTokens = tagger.tag(tokens);
String[] tags = PartsOfSpeechToken.getTokens(partsOfSpeechTokens);
tokPosMap.put(text, StringUtils.join(tags, " "));
return tags;
}
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom/features/TokenPartOfSpeechFeatureGeneratorFactory.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.opennlp.custom.features;
import java.util.Map;
import org.w3c.dom.Element;
import ai.idylnlp.model.manifest.ModelManifest;
import ai.idylnlp.model.manifest.ModelManifestUtils;
import ai.idylnlp.model.manifest.StandardModelManifest;
import ai.idylnlp.model.nlp.pos.PartsOfSpeechTagger;
import ai.idylnlp.opennlp.custom.nlp.pos.DefaultPartsOfSpeechTagger;
import ai.idylnlp.opennlp.custom.validators.TrueModelValidator;
import opennlp.tools.util.InvalidFormatException;
import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator;
import opennlp.tools.util.featuregen.FeatureGeneratorResourceProvider;
import opennlp.tools.util.featuregen.GeneratorFactory.XmlFeatureGeneratorFactory;
/**
* Factory for {@link TokenPartOfSpeechFeatureGenerator}.
*
* @author Mountain Fog, Inc.
*
*/
public class TokenPartOfSpeechFeatureGeneratorFactory implements XmlFeatureGeneratorFactory {
private static final String ELEMENT_NAME = "tokenpos";
@Override
public AdaptiveFeatureGenerator create(Element generatorElement, FeatureGeneratorResourceProvider resourceManager)
throws InvalidFormatException {
final String modelPath = generatorElement.getAttribute("modelPath");
final String modelManifestPath = generatorElement.getAttribute("modelManifest");
try {
ModelManifest modelManifest = ModelManifestUtils.readManifest(modelPath + modelManifestPath);
StandardModelManifest standardModelManifest = (StandardModelManifest) modelManifest;
PartsOfSpeechTagger tagger = new DefaultPartsOfSpeechTagger(modelPath, standardModelManifest, new TrueModelValidator());
return new TokenPartOfSpeechFeatureGenerator(tagger);
} catch (Exception ex) {
throw new InvalidFormatException("Unable to load the parts-of-speech model.", ex);
}
}
public static void register(Map<String, XmlFeatureGeneratorFactory> factoryMap) {
factoryMap.put(ELEMENT_NAME, new TokenPartOfSpeechFeatureGeneratorFactory());
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom/features/WordNormalizationFeatureGenerator.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.opennlp.custom.features;
import java.util.List;
import ai.idylnlp.model.nlp.lemma.Lemmatizer;
import ai.idylnlp.model.nlp.pos.PartsOfSpeechTagger;
import ai.idylnlp.model.nlp.pos.PartsOfSpeechToken;
import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator;
/**
* Generates features for normalized words.
* For example, 'IL' is normalized to 'AA', 'IL-2' is normalized to 'AA-0'
* and 'IL-8' is also normalized to 'AA-0'.
*
* @author Mountain Fog, Inc.
*
*/
public class WordNormalizationFeatureGenerator implements AdaptiveFeatureGenerator {
private Lemmatizer modelLemmatizer;
private Lemmatizer dictionaryLemmatizer;
private PartsOfSpeechTagger partsOfSpeechTagger;
public WordNormalizationFeatureGenerator(PartsOfSpeechTagger partsOfSpeechTagger, Lemmatizer modelLemmatizer, Lemmatizer dictionaryLemmatizer) {
this.modelLemmatizer = modelLemmatizer;
this.dictionaryLemmatizer = dictionaryLemmatizer;
this.partsOfSpeechTagger = partsOfSpeechTagger;
}
@Override
public void createFeatures(List<String> features, String[] tokens, int index, String[] previousOutcomes) {
// A partsOfSpeechTagger is required for both lemmatizers.
if(partsOfSpeechTagger != null) {
List<PartsOfSpeechToken> partsOfSpeechTokens = partsOfSpeechTagger.tag(tokens);
String[] tags = PartsOfSpeechToken.getTokens(partsOfSpeechTokens);
if(modelLemmatizer != null) {
tokens = modelLemmatizer.lemmatize(tokens, tags);
}
if(dictionaryLemmatizer != null) {
tokens = dictionaryLemmatizer.lemmatize(tokens, tags);
}
}
features.add("wnormal=" + normalize(tokens[index]));
}
private String normalize(String token) {
String normalizedToken = token.replaceAll("([A-Z])", "A");
normalizedToken = normalizedToken.replaceAll("([a-z])", "a");
normalizedToken = normalizedToken.replaceAll("([0-9])", "0");
return normalizedToken;
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom/features/WordNormalizationFeatureGeneratorFactory.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.opennlp.custom.features;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.w3c.dom.Element;
import ai.idylnlp.model.ModelValidator;
import ai.idylnlp.model.manifest.ModelManifest;
import ai.idylnlp.model.manifest.ModelManifestUtils;
import ai.idylnlp.model.manifest.StandardModelManifest;
import ai.idylnlp.model.nlp.lemma.Lemmatizer;
import ai.idylnlp.model.nlp.pos.PartsOfSpeechTagger;
import ai.idylnlp.opennlp.custom.nlp.lemmatization.DefaultLemmatizer;
import ai.idylnlp.opennlp.custom.nlp.pos.DefaultPartsOfSpeechTagger;
import ai.idylnlp.opennlp.custom.validators.TrueModelValidator;
import opennlp.tools.util.InvalidFormatException;
import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator;
import opennlp.tools.util.featuregen.FeatureGeneratorResourceProvider;
import opennlp.tools.util.featuregen.GeneratorFactory.XmlFeatureGeneratorFactory;
/**
* Factory for {@link WordNormalizationFeatureGenerator}.
*
* @author Mountain Fog, Inc.
*
*/
public class WordNormalizationFeatureGeneratorFactory implements XmlFeatureGeneratorFactory {
private static final String ELEMENT_NAME = "wordnormalization";
private Lemmatizer modelLemmatizer;
private Lemmatizer dictonaryLemmatizer;
private PartsOfSpeechTagger partsOfSpeechTagger;
private ModelValidator validator;
@Override
public AdaptiveFeatureGenerator create(Element generatorElement, FeatureGeneratorResourceProvider resourceManager)
throws InvalidFormatException {
validator = new TrueModelValidator();
try {
loadLemmatizers(generatorElement);
loadPartsOfSpeechTagger(generatorElement);
return new WordNormalizationFeatureGenerator(partsOfSpeechTagger, modelLemmatizer, dictonaryLemmatizer);
} catch (Exception ex) {
throw new InvalidFormatException("Unable to load lemmatizer or parts-of-speech model.", ex);
}
}
public static void register(Map<String, XmlFeatureGeneratorFactory> factoryMap) {
factoryMap.put(ELEMENT_NAME, new WordNormalizationFeatureGeneratorFactory());
}
private void loadLemmatizers(Element generatorElement) throws Exception {
final String lemmaModelPath = generatorElement.getAttribute("modelPath");
final String lemmaModelManifest = generatorElement.getAttribute("modelManifest");
final String lemmaDictionary = generatorElement.getAttribute("dictionary");
ModelManifest modelManifest = ModelManifestUtils.readManifest(lemmaModelPath + lemmaModelManifest);
StandardModelManifest standardModelManifest = (StandardModelManifest) modelManifest;
if(StringUtils.isNotEmpty(lemmaModelPath) && StringUtils.isNotEmpty(lemmaModelManifest)) {
modelLemmatizer = new DefaultLemmatizer(lemmaModelPath, standardModelManifest, validator);
}
if(StringUtils.isNotEmpty(lemmaDictionary)) {
dictonaryLemmatizer = new DefaultLemmatizer(lemmaDictionary);
}
}
private void loadPartsOfSpeechTagger(Element generatorElement) throws Exception {
final String posModelpath = generatorElement.getAttribute("modelPath");
final String posModelManfiest = generatorElement.getAttribute("modelManifest");
ModelManifest modelManifest = ModelManifestUtils.readManifest(posModelpath + posModelManfiest);
StandardModelManifest standardModelManifest = (StandardModelManifest) modelManifest;
// TODO: Get a Validator in here.
partsOfSpeechTagger = new DefaultPartsOfSpeechTagger(posModelpath, standardModelManifest, validator);
}
}
|
0
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom
|
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/ai/idylnlp/opennlp/custom/formats/IdylNLPNameSampleStream.java
|
/*******************************************************************************
* Copyright 2018 Mountain Fog, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package ai.idylnlp.opennlp.custom.formats;
import java.io.IOException;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import opennlp.tools.namefind.NameSample;
import opennlp.tools.tokenize.WhitespaceTokenizer;
import opennlp.tools.util.ObjectStream;
import opennlp.tools.util.Span;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import ai.idylnlp.model.nlp.annotation.AnnotationReader;
import ai.idylnlp.model.nlp.annotation.IdylNLPAnnotation;
/**
* Implementation of {@link ObjectStream} that reads text that is annotated
* in the Idyl NLP format.
*
* @author Mountain Fog, Inc.
*
*/
public class IdylNLPNameSampleStream implements ObjectStream<NameSample> {
private final ObjectStream<String> lineStream;
private final AnnotationReader annotationReader;
private int lineNumber = 1;
public IdylNLPNameSampleStream(ObjectStream<String> lineStream, AnnotationReader annotationReader) {
this.lineStream = lineStream;
this.annotationReader = annotationReader;
}
@Override
public NameSample read() throws IOException {
final List<String> sentences = new LinkedList<>();
final String line = lineStream.read();
lineNumber++;
if(line != null && !StringUtils.isEmpty(line.trim())) {
// TODO: Should this tokenizer be customizable?
for(String token : WhitespaceTokenizer.INSTANCE.tokenize(line)) {
sentences.add(token);
}
}
if (sentences.size() > 0) {
final List<Span> names = new LinkedList<>();
// It is lineNumber - 1 here because we have already incremented the line number above.
Collection<IdylNLPAnnotation> annotations = annotationReader.getAnnotations(lineNumber - 1);
if(CollectionUtils.isNotEmpty(annotations)) {
for(IdylNLPAnnotation annotation : annotations) {
Span span = new Span(annotation.getTokenStart(), annotation.getTokenEnd(), annotation.getType());
names.add(span);
}
}
return new NameSample(sentences.toArray(new String[sentences.size()]), names.toArray(new Span[names.size()]), true);
} else if (line != null) {
// Just filter out empty events, if two lines in a row are empty
return read();
}
else {
// source stream is not returning anymore lines
return null;
}
}
@Override
public void reset() throws IOException, UnsupportedOperationException {
lineStream.reset();
lineNumber = 1;
}
@Override
public void close() throws IOException {
lineStream.close();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.