index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/DictionaryNameFinder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.util.LinkedList; import java.util.List; import java.util.Objects; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.util.Span; import opennlp.tools.util.StringList; /** * This is a dictionary based name finder, it scans text * for names inside a dictionary. */ public class DictionaryNameFinder implements TokenNameFinder { private static final String DEFAULT_TYPE = "default"; private Dictionary mDictionary; private final String type; /** * Initialized the current instance with he provided dictionary * and a type. * * @param dictionary * @param type the name type used for the produced spans */ public DictionaryNameFinder(Dictionary dictionary, String type) { mDictionary = Objects.requireNonNull(dictionary, "dictionary must not be null"); this.type = Objects.requireNonNull(type, "type must not be null"); } /** * Initializes the current instance with the provided dictionary. * * @param dictionary */ public DictionaryNameFinder(Dictionary dictionary) { this(dictionary, DEFAULT_TYPE); } public Span[] find(String[] textTokenized) { List<Span> namesFound = new LinkedList<>(); for (int offsetFrom = 0; offsetFrom < textTokenized.length; offsetFrom++) { Span nameFound = null; String[] tokensSearching; for (int offsetTo = offsetFrom; offsetTo < textTokenized.length; offsetTo++) { int lengthSearching = offsetTo - offsetFrom + 1; if (lengthSearching > mDictionary.getMaxTokenCount()) { break; } else { tokensSearching = new String[lengthSearching]; System.arraycopy(textTokenized, offsetFrom, tokensSearching, 0, lengthSearching); StringList entryForSearch = new StringList(tokensSearching); if (mDictionary.contains(entryForSearch)) { nameFound = new Span(offsetFrom, offsetTo + 1, type); } } } if (nameFound != null) { namesFound.add(nameFound); // skip over the found tokens for the next search offsetFrom += nameFound.length() - 1; } } return namesFound.toArray(new Span[namesFound.size()]); } public void clearAdaptiveData() { // nothing to clear } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/DocumentNameFinder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import opennlp.tools.util.Span; /** * Name finding interface which processes an entire document allowing the name finder to use context * from the entire document. * * <strong>EXPERIMENTAL</strong>. * This interface has been added as part of a work in progress and might change without notice. */ public interface DocumentNameFinder { /** * Returns tokens span for the specified document of sentences and their tokens. * Span start and end indices are relative to the sentence they are in. * For example, a span identifying a name consisting of the first and second word * of the second sentence would be 0..2 and be referenced as spans[1][0]. * * @param document An array of tokens for each sentence of a document. * @return The token spans for each sentence of the specified document. */ Span[][] find(String[][] document); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/NameContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import opennlp.tools.util.BeamSearchContextGenerator; import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator; /** * Interface for generating the context for an name finder by specifying a set of geature generators. * */ public interface NameContextGenerator extends BeamSearchContextGenerator<String> { /** * Adds a feature generator to this set of feature generators. * @param generator The feature generator to add. */ void addFeatureGenerator(AdaptiveFeatureGenerator generator); /** * Informs all the feature generators for a name finder that the specified tokens have * been classified with the coorisponds set of specified outcomes. * @param tokens The tokens of the sentence or other text unit which has been processed. * @param outcomes The outcomes associated with the specified tokens. */ void updateAdaptiveData(String[] tokens, String[] outcomes); /** * Informs all the feature generators for a name finder that the context of the adaptive * data (typically a document) is no longer valid. */ void clearAdaptiveData(); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/NameFinderEventStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import opennlp.tools.ml.model.Event; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.SequenceCodec; import opennlp.tools.util.Span; import opennlp.tools.util.featuregen.AdditionalContextFeatureGenerator; import opennlp.tools.util.featuregen.WindowFeatureGenerator; /** * Class for creating an event stream out of data files for training an name * finder. */ public class NameFinderEventStream extends opennlp.tools.util.AbstractEventStream<NameSample> { private NameContextGenerator contextGenerator; private AdditionalContextFeatureGenerator additionalContextFeatureGenerator = new AdditionalContextFeatureGenerator(); private SequenceCodec<String> codec; private final String defaultType; /** * Creates a new name finder event stream using the specified data stream and context generator. * @param dataStream The data stream of events. * @param type null or overrides the type parameter in the provided samples * @param contextGenerator The context generator used to generate features for the event stream. */ public NameFinderEventStream(ObjectStream<NameSample> dataStream, String type, NameContextGenerator contextGenerator, SequenceCodec<String> codec) { super(dataStream); this.codec = codec; if (codec == null) { this.codec = new BioCodec(); } this.contextGenerator = contextGenerator; this.contextGenerator.addFeatureGenerator( new WindowFeatureGenerator(additionalContextFeatureGenerator, 8, 8)); this.defaultType = type; } public NameFinderEventStream(ObjectStream<NameSample> dataStream) { this(dataStream, null, new DefaultNameContextGenerator(), null); } /** * Generates the name tag outcomes (start, continue, other) for each token in a sentence * with the specified length using the specified name spans. * @param names Token spans for each of the names. * @param type null or overrides the type parameter in the provided samples * @param length The length of the sentence. * @return An array of start, continue, other outcomes based on the specified names and sentence length. * * @deprecated use the BioCodec implementation of the SequenceValidator instead! */ @Deprecated public static String[] generateOutcomes(Span[] names, String type, int length) { String[] outcomes = new String[length]; for (int i = 0; i < outcomes.length; i++) { outcomes[i] = NameFinderME.OTHER; } for (Span name : names) { if (name.getType() == null) { outcomes[name.getStart()] = type + "-" + NameFinderME.START; } else { outcomes[name.getStart()] = name.getType() + "-" + NameFinderME.START; } // now iterate from begin + 1 till end for (int i = name.getStart() + 1; i < name.getEnd(); i++) { if (name.getType() == null) { outcomes[i] = type + "-" + NameFinderME.CONTINUE; } else { outcomes[i] = name.getType() + "-" + NameFinderME.CONTINUE; } } } return outcomes; } public static List<Event> generateEvents(String[] sentence, String[] outcomes, NameContextGenerator cg) { List<Event> events = new ArrayList<>(outcomes.length); for (int i = 0; i < outcomes.length; i++) { events.add(new Event(outcomes[i], cg.getContext(i, sentence, outcomes,null))); } cg.updateAdaptiveData(sentence, outcomes); return events; } @Override protected Iterator<Event> createEvents(NameSample sample) { if (sample.isClearAdaptiveDataSet()) { contextGenerator.clearAdaptiveData(); } Span[] names = sample.getNames(); if (!Objects.isNull(this.defaultType)) { overrideType(names); } String[] outcomes = codec.encode(names, sample.getSentence().length); // String outcomes[] = generateOutcomes(sample.getNames(), type, sample.getSentence().length); additionalContextFeatureGenerator.setCurrentContext(sample.getAdditionalContext()); String[] tokens = new String[sample.getSentence().length]; for (int i = 0; i < sample.getSentence().length; i++) { tokens[i] = sample.getSentence()[i]; } return generateEvents(tokens, outcomes, contextGenerator).iterator(); } private void overrideType(Span[] names) { for (int i = 0; i < names.length; i++) { Span n = names[i]; names[i] = new Span(n.getStart(), n.getEnd(), this.defaultType, n.getProb()); } } /** * Generated previous decision features for each token based on contents of the specified map. * @param tokens The token for which the context is generated. * @param prevMap A mapping of tokens to their previous decisions. * @return An additional context array with features for each token. */ public static String[][] additionalContext(String[] tokens, Map<String, String> prevMap) { String[][] ac = new String[tokens.length][1]; for (int ti = 0; ti < tokens.length; ti++) { String pt = prevMap.get(tokens[ti]); ac[ti][0] = "pd=" + pt; } return ac; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/NameFinderME.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; import opennlp.tools.ml.BeamSearch; import opennlp.tools.ml.EventModelSequenceTrainer; import opennlp.tools.ml.EventTrainer; import opennlp.tools.ml.SequenceTrainer; import opennlp.tools.ml.TrainerFactory; import opennlp.tools.ml.TrainerFactory.TrainerType; import opennlp.tools.ml.model.Event; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.ml.model.SequenceClassificationModel; import opennlp.tools.ml.perceptron.PerceptronTrainer; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Sequence; import opennlp.tools.util.SequenceCodec; import opennlp.tools.util.SequenceValidator; import opennlp.tools.util.Span; import opennlp.tools.util.TrainingParameters; import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator; import opennlp.tools.util.featuregen.AdditionalContextFeatureGenerator; import opennlp.tools.util.featuregen.GeneratorFactory; import opennlp.tools.util.featuregen.WindowFeatureGenerator; /** * Class for creating a maximum-entropy-based name finder. */ public class NameFinderME implements TokenNameFinder { private static String[][] EMPTY = new String[0][0]; public static final int DEFAULT_BEAM_SIZE = 3; private static final Pattern typedOutcomePattern = Pattern.compile("(.+)-\\w+"); public static final String START = "start"; public static final String CONTINUE = "cont"; public static final String OTHER = "other"; private SequenceCodec<String> seqCodec = new BioCodec(); protected SequenceClassificationModel<String> model; protected NameContextGenerator contextGenerator; private Sequence bestSequence; private AdditionalContextFeatureGenerator additionalContextFeatureGenerator = new AdditionalContextFeatureGenerator(); private SequenceValidator<String> sequenceValidator; public NameFinderME(TokenNameFinderModel model) { TokenNameFinderFactory factory = model.getFactory(); seqCodec = factory.createSequenceCodec(); sequenceValidator = seqCodec.createSequenceValidator(); this.model = model.getNameFinderSequenceModel(); contextGenerator = factory.createContextGenerator(); // TODO: We should deprecate this. And come up with a better solution! contextGenerator.addFeatureGenerator( new WindowFeatureGenerator(additionalContextFeatureGenerator, 8, 8)); } private static AdaptiveFeatureGenerator createFeatureGenerator( byte[] generatorDescriptor, final Map<String, Object> resources) throws IOException { AdaptiveFeatureGenerator featureGenerator; if (generatorDescriptor != null) { featureGenerator = GeneratorFactory.create(new ByteArrayInputStream( generatorDescriptor), key -> { if (resources != null) { return resources.get(key); } return null; }); } else { featureGenerator = null; } return featureGenerator; } public Span[] find(String[] tokens) { return find(tokens, EMPTY); } /** * Generates name tags for the given sequence, typically a sentence, returning * token spans for any identified names. * * @param tokens an array of the tokens or words of the sequence, typically a sentence. * @param additionalContext features which are based on context outside of the * sentence but which should also be used. * * @return an array of spans for each of the names identified. */ public Span[] find(String[] tokens, String[][] additionalContext) { additionalContextFeatureGenerator.setCurrentContext(additionalContext); bestSequence = model.bestSequence(tokens, additionalContext, contextGenerator, sequenceValidator); List<String> c = bestSequence.getOutcomes(); contextGenerator.updateAdaptiveData(tokens, c.toArray(new String[c.size()])); Span[] spans = seqCodec.decode(c); spans = setProbs(spans); return spans; } /** * Forgets all adaptive data which was collected during previous calls to one * of the find methods. * * This method is typical called at the end of a document. */ public void clearAdaptiveData() { contextGenerator.clearAdaptiveData(); } /** * Populates the specified array with the probabilities of the last decoded * sequence. The sequence was determined based on the previous call to * <code>chunk</code>. The specified array should be at least as large as the * number of tokens in the previous call to <code>chunk</code>. * * @param probs An array used to hold the probabilities of the last decoded * sequence. */ public void probs(double[] probs) { bestSequence.getProbs(probs); } /** * Returns an array with the probabilities of the last decoded sequence. The * sequence was determined based on the previous call to <code>chunk</code>. * * @return An array with the same number of probabilities as tokens were sent * to <code>chunk</code> when it was last called. */ public double[] probs() { return bestSequence.getProbs(); } /** * sets the probs for the spans * * @param spans * @return */ private Span[] setProbs(Span[] spans) { double[] probs = probs(spans); if (probs != null) { for (int i = 0; i < probs.length; i++) { double prob = probs[i]; spans[i] = new Span(spans[i], prob); } } return spans; } /** * Returns an array of probabilities for each of the specified spans which is * the arithmetic mean of the probabilities for each of the outcomes which * make up the span. * * @param spans The spans of the names for which probabilities are desired. * * @return an array of probabilities for each of the specified spans. */ public double[] probs(Span[] spans) { double[] sprobs = new double[spans.length]; double[] probs = bestSequence.getProbs(); for (int si = 0; si < spans.length; si++) { double p = 0; for (int oi = spans[si].getStart(); oi < spans[si].getEnd(); oi++) { p += probs[oi]; } p /= spans[si].length(); sprobs[si] = p; } return sprobs; } public static TokenNameFinderModel train(String languageCode, String type, ObjectStream<NameSample> samples, TrainingParameters trainParams, TokenNameFinderFactory factory) throws IOException { trainParams.putIfAbsent(TrainingParameters.ALGORITHM_PARAM, PerceptronTrainer.PERCEPTRON_VALUE); trainParams.putIfAbsent(TrainingParameters.CUTOFF_PARAM, 0); trainParams.putIfAbsent(TrainingParameters.ITERATIONS_PARAM, 300); int beamSize = trainParams.getIntParameter(BeamSearch.BEAM_SIZE_PARAMETER, NameFinderME.DEFAULT_BEAM_SIZE); Map<String, String> manifestInfoEntries = new HashMap<>(); MaxentModel nameFinderModel = null; SequenceClassificationModel<String> seqModel = null; TrainerType trainerType = TrainerFactory.getTrainerType(trainParams); if (TrainerType.EVENT_MODEL_TRAINER.equals(trainerType)) { ObjectStream<Event> eventStream = new NameFinderEventStream(samples, type, factory.createContextGenerator(), factory.createSequenceCodec()); EventTrainer trainer = TrainerFactory.getEventTrainer(trainParams, manifestInfoEntries); nameFinderModel = trainer.train(eventStream); } // TODO: Maybe it is not a good idea, that these two don't use the context generator ?! // These also don't use the sequence codec ?! else if (TrainerType.EVENT_MODEL_SEQUENCE_TRAINER.equals(trainerType)) { NameSampleSequenceStream ss = new NameSampleSequenceStream(samples, factory.createContextGenerator()); EventModelSequenceTrainer trainer = TrainerFactory.getEventModelSequenceTrainer( trainParams, manifestInfoEntries); nameFinderModel = trainer.train(ss); } else if (TrainerType.SEQUENCE_TRAINER.equals(trainerType)) { SequenceTrainer trainer = TrainerFactory.getSequenceModelTrainer( trainParams, manifestInfoEntries); NameSampleSequenceStream ss = new NameSampleSequenceStream(samples, factory.createContextGenerator(), false); seqModel = trainer.train(ss); } else { throw new IllegalStateException("Unexpected trainer type!"); } if (seqModel != null) { return new TokenNameFinderModel(languageCode, seqModel, factory.getFeatureGenerator(), factory.getResources(), manifestInfoEntries, factory.getSequenceCodec(), factory); } else { return new TokenNameFinderModel(languageCode, nameFinderModel, beamSize, factory.getFeatureGenerator(), factory.getResources(), manifestInfoEntries, factory.getSequenceCodec(), factory); } } /** * Gets the name type from the outcome * * @param outcome the outcome * @return the name type, or null if not set */ static String extractNameType(String outcome) { Matcher matcher = typedOutcomePattern.matcher(outcome); if (matcher.matches()) { return matcher.group(1); } return null; } /** * Removes spans with are intersecting or crossing in anyway. * * <p> * The following rules are used to remove the spans:<br> * Identical spans: The first span in the array after sorting it remains<br> * Intersecting spans: The first span after sorting remains<br> * Contained spans: All spans which are contained by another are removed<br> * * @param spans * * @return non-overlapping spans */ public static Span[] dropOverlappingSpans(Span[] spans) { List<Span> sortedSpans = new ArrayList<>(spans.length); Collections.addAll(sortedSpans, spans); Collections.sort(sortedSpans); Iterator<Span> it = sortedSpans.iterator(); Span lastSpan = null; while (it.hasNext()) { Span span = it.next(); if (lastSpan != null) { if (lastSpan.intersects(span)) { it.remove(); span = lastSpan; } } lastSpan = span; } return sortedSpans.toArray(new Span[sortedSpans.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/NameFinderSequenceValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import opennlp.tools.util.SequenceValidator; /** * This class is created by the {@link BioCodec}. */ public class NameFinderSequenceValidator implements SequenceValidator<String> { public boolean validSequence(int i, String[] inputSequence, String[] outcomesSequence, String outcome) { // outcome is formatted like "cont" or "sometype-cont", so we // can check if it ends with "cont". if (outcome.endsWith(BioCodec.CONTINUE)) { int li = outcomesSequence.length - 1; if (li == -1) { return false; } else if (outcomesSequence[li].endsWith(BioCodec.OTHER)) { return false; } else if (outcomesSequence[li].endsWith(BioCodec.CONTINUE) || outcomesSequence[li].endsWith(BioCodec.START)) { // if it is continue or start, we have to check if previous match was of the same type String previousNameType = NameFinderME.extractNameType(outcomesSequence[li]); String nameType = NameFinderME.extractNameType(outcome); if (previousNameType != null || nameType != null ) { if (nameType != null ) { if (nameType.equals(previousNameType)) { return true; } } return false; // outcomes types are not equal } } } return true; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/NameSample.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; import opennlp.tools.tokenize.WhitespaceTokenizer; import opennlp.tools.util.Span; /** * Class for holding names for a single unit of text. */ public class NameSample implements Serializable { private final String id; private final List<String> sentence; private final List<Span> names; private final String[][] additionalContext; private final boolean isClearAdaptiveData; /** The a default type value when there is no type in training data. */ public static final String DEFAULT_TYPE = "default"; public NameSample(String id, String[] sentence, Span[] names, String[][] additionalContext, boolean clearAdaptiveData) { this.id = id; Objects.requireNonNull(sentence, "sentence must not be null"); if (names == null) { names = new Span[0]; } this.sentence = Collections.unmodifiableList(new ArrayList<>(Arrays.asList(sentence))); List<Span> namesList = Arrays.asList(names); Collections.sort(namesList); this.names = Collections.unmodifiableList(namesList); if (additionalContext != null) { this.additionalContext = new String[additionalContext.length][]; for (int i = 0; i < additionalContext.length; i++) { this.additionalContext[i] = new String[additionalContext[i].length]; System.arraycopy(additionalContext[i], 0, this.additionalContext[i], 0, additionalContext[i].length); } } else { this.additionalContext = null; } isClearAdaptiveData = clearAdaptiveData; // Check that name spans are not overlapping, otherwise throw exception if (this.names.size() > 1) { for (int i = 1; i < this.names.size(); i++) { if (this.names.get(i).getStart() < this.names.get(i - 1).getEnd()) { throw new RuntimeException(String.format("name spans %s and %s are overlapped", this.names.get(i - 1), this.names.get(i))); } } } } /** * Initializes the current instance. * * @param sentence training sentence * @param names * @param additionalContext * @param clearAdaptiveData if true the adaptive data of the * feature generators is cleared */ public NameSample(String[] sentence, Span[] names, String[][] additionalContext, boolean clearAdaptiveData) { this(null, sentence, names, additionalContext, clearAdaptiveData); } public NameSample(String[] sentence, Span[] names, boolean clearAdaptiveData) { this(sentence, names, null, clearAdaptiveData); } public String getId() { return id; } public String[] getSentence() { return sentence.toArray(new String[sentence.size()]); } public Span[] getNames() { return names.toArray(new Span[names.size()]); } public String[][] getAdditionalContext() { return additionalContext; } public boolean isClearAdaptiveDataSet() { return isClearAdaptiveData; } @Override public int hashCode() { return Objects.hash(Arrays.hashCode(getSentence()), Arrays.hashCode(getNames()), Arrays.hashCode(getAdditionalContext()), isClearAdaptiveDataSet()); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof NameSample) { NameSample a = (NameSample) obj; return Arrays.equals(getSentence(), a.getSentence()) && Arrays.equals(getNames(), a.getNames()) && Arrays.equals(getAdditionalContext(), a.getAdditionalContext()) && isClearAdaptiveDataSet() == a.isClearAdaptiveDataSet(); } return false; } @Override public String toString() { StringBuilder result = new StringBuilder(); // If adaptive data must be cleared insert an empty line // before the sample sentence line if (isClearAdaptiveDataSet()) result.append("\n"); for (int tokenIndex = 0; tokenIndex < sentence.size(); tokenIndex++) { // token for (Span name : names) { if (name.getStart() == tokenIndex) { // check if nameTypes is null, or if the nameType for this specific // entity is empty. If it is, we leave the nameType blank. if (name.getType() == null) { result.append(NameSampleDataStream.START_TAG).append(' '); } else { result.append(NameSampleDataStream.START_TAG_PREFIX).append(name.getType()).append("> "); } } if (name.getEnd() == tokenIndex) { result.append(NameSampleDataStream.END_TAG).append(' '); } } result.append(sentence.get(tokenIndex)).append(' '); } if (sentence.size() > 1) result.setLength(result.length() - 1); for (Span name : names) { if (name.getEnd() == sentence.size()) { result.append(' ').append(NameSampleDataStream.END_TAG); } } return result.toString(); } private static String errorTokenWithContext(String[] sentence, int index) { StringBuilder errorString = new StringBuilder(); // two token before if (index > 1) errorString.append(sentence[index - 2]).append(" "); if (index > 0) errorString.append(sentence[index - 1]).append(" "); // token itself errorString.append("###"); errorString.append(sentence[index]); errorString.append("###").append(" "); // two token after if (index + 1 < sentence.length) errorString.append(sentence[index + 1]).append(" "); if (index + 2 < sentence.length) errorString.append(sentence[index + 2]); return errorString.toString(); } private static final Pattern START_TAG_PATTERN = Pattern.compile("<START(:([^:>\\s]*))?>"); public static NameSample parse(String taggedTokens, boolean isClearAdaptiveData) throws IOException { return parse(taggedTokens, DEFAULT_TYPE, isClearAdaptiveData); } public static NameSample parse(String taggedTokens, String defaultType, boolean isClearAdaptiveData) throws IOException { // TODO: Should throw another exception, and then convert it into an IOException in the stream String[] parts = WhitespaceTokenizer.INSTANCE.tokenize(taggedTokens); List<String> tokenList = new ArrayList<>(parts.length); List<Span> nameList = new ArrayList<>(); String nameType = defaultType; int startIndex = -1; int wordIndex = 0; // we check if at least one name has the a type. If no one has, we will // leave the NameType property of NameSample null. boolean catchingName = false; for (int pi = 0; pi < parts.length; pi++) { Matcher startMatcher = START_TAG_PATTERN.matcher(parts[pi]); if (startMatcher.matches()) { if (catchingName) { throw new IOException("Found unexpected annotation" + " while handling a name sequence: " + errorTokenWithContext(parts, pi)); } catchingName = true; startIndex = wordIndex; String nameTypeFromSample = startMatcher.group(2); if (nameTypeFromSample != null) { if (nameTypeFromSample.length() == 0) { throw new IOException("Missing a name type: " + errorTokenWithContext(parts, pi)); } nameType = nameTypeFromSample; } } else if (parts[pi].equals(NameSampleDataStream.END_TAG)) { if (!catchingName) { throw new IOException("Found unexpected annotation: " + errorTokenWithContext(parts, pi)); } catchingName = false; // create name nameList.add(new Span(startIndex, wordIndex, nameType)); } else { tokenList.add(parts[pi]); wordIndex++; } } String[] sentence = tokenList.toArray(new String[tokenList.size()]); Span[] names = nameList.toArray(new Span[nameList.size()]); return new NameSample(sentence, names, isClearAdaptiveData); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/NameSampleDataStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.io.IOException; import opennlp.tools.ml.maxent.DataStream; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; /** * The {@link NameSampleDataStream} class converts tagged {@link String}s * provided by a {@link DataStream} to {@link NameSample} objects. * It uses text that is is one-sentence per line and tokenized * with names identified by <code>&lt;START&gt;</code> and <code>&lt;END&gt;</code> tags. */ public class NameSampleDataStream extends FilterObjectStream<String, NameSample> { public static final String START_TAG_PREFIX = "<START:"; public static final String START_TAG = "<START>"; public static final String END_TAG = "<END>"; public NameSampleDataStream(ObjectStream<String> in) { super(in); } public NameSample read() throws IOException { String token = samples.read(); boolean isClearAdaptiveData = false; // An empty line indicates the begin of a new article // for which the adaptive data in the feature generators // must be cleared while (token != null && token.trim().length() == 0) { isClearAdaptiveData = true; token = samples.read(); } if (token != null) { return NameSample.parse(token, isClearAdaptiveData); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/NameSampleSequenceStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.io.IOException; import java.util.Collections; import opennlp.tools.ml.model.AbstractModel; import opennlp.tools.ml.model.Event; import opennlp.tools.ml.model.Sequence; import opennlp.tools.ml.model.SequenceStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.SequenceCodec; import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator; public class NameSampleSequenceStream implements SequenceStream { private NameContextGenerator pcg; private final boolean useOutcomes; private ObjectStream<NameSample> psi; private SequenceCodec<String> seqCodec; public NameSampleSequenceStream(ObjectStream<NameSample> psi) throws IOException { this(psi, new DefaultNameContextGenerator((AdaptiveFeatureGenerator) null), true); } public NameSampleSequenceStream(ObjectStream<NameSample> psi, AdaptiveFeatureGenerator featureGen) throws IOException { this(psi, new DefaultNameContextGenerator(featureGen), true); } public NameSampleSequenceStream(ObjectStream<NameSample> psi, AdaptiveFeatureGenerator featureGen, boolean useOutcomes) throws IOException { this(psi, new DefaultNameContextGenerator(featureGen), useOutcomes); } public NameSampleSequenceStream(ObjectStream<NameSample> psi, NameContextGenerator pcg) throws IOException { this(psi, pcg, true); } public NameSampleSequenceStream(ObjectStream<NameSample> psi, NameContextGenerator pcg, boolean useOutcomes) throws IOException { this(psi, pcg, useOutcomes, new BioCodec()); } public NameSampleSequenceStream(ObjectStream<NameSample> psi, NameContextGenerator pcg, boolean useOutcomes, SequenceCodec<String> seqCodec) throws IOException { this.psi = psi; this.useOutcomes = useOutcomes; this.pcg = pcg; this.seqCodec = seqCodec; } @SuppressWarnings("unchecked") public Event[] updateContext(Sequence sequence, AbstractModel model) { TokenNameFinder tagger = new NameFinderME(new TokenNameFinderModel( "x-unspecified", model, Collections.emptyMap(), null)); String[] sentence = ((Sequence<NameSample>) sequence).getSource().getSentence(); String[] tags = seqCodec.encode(tagger.find(sentence), sentence.length); Event[] events = new Event[sentence.length]; NameFinderEventStream.generateEvents(sentence,tags,pcg).toArray(events); return events; } @Override public Sequence read() throws IOException { NameSample sample = psi.read(); if (sample != null) { String[] sentence = sample.getSentence(); String[] tags = seqCodec.encode(sample.getNames(), sentence.length); Event[] events = new Event[sentence.length]; for (int i = 0; i < sentence.length; i++) { // it is safe to pass the tags as previous tags because // the context generator does not look for non predicted tags String[] context; if (useOutcomes) { context = pcg.getContext(i, sentence, tags, null); } else { context = pcg.getContext(i, sentence, null, null); } events[i] = new Event(tags[i], context); } return new Sequence<>(events,sample); } else { return null; } } @Override public void reset() throws IOException, UnsupportedOperationException { psi.reset(); } @Override public void close() throws IOException { psi.close(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/NameSampleTypeFilter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; /** * A stream which removes Name Samples which do not have a certain type. */ public class NameSampleTypeFilter extends FilterObjectStream<NameSample, NameSample> { private final Set<String> types; public NameSampleTypeFilter(String[] types, ObjectStream<NameSample> samples) { super(samples); this.types = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(types))); } public NameSampleTypeFilter(Set<String> types, ObjectStream<NameSample> samples) { super(samples); this.types = Collections.unmodifiableSet(new HashSet<>(types)); } public NameSample read() throws IOException { NameSample sample = samples.read(); if (sample != null) { List<Span> filteredNames = new ArrayList<>(); for (Span name : sample.getNames()) { if (types.contains(name.getType())) { filteredNames.add(name); } } return new NameSample(sample.getId(), sample.getSentence(), filteredNames.toArray(new Span[filteredNames.size()]), null, sample.isClearAdaptiveDataSet()); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/RegexNameFinder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.util.Collection; import java.util.HashMap; import java.util.LinkedList; import java.util.Map; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; import opennlp.tools.util.Span; /** * Name finder based on a series of regular expressions. */ public final class RegexNameFinder implements TokenNameFinder { private Pattern[] mPatterns; private String sType; private Map<String, Pattern[]> regexMap; public RegexNameFinder(Map<String, Pattern[]> regexMap) { this.regexMap = Objects.requireNonNull(regexMap, "regexMap must not be null"); } public RegexNameFinder(Pattern[] patterns, String type) { if (patterns == null || patterns.length == 0) { throw new IllegalArgumentException("patterns must not be null or empty!"); } mPatterns = patterns; sType = type; } /** * use constructor {@link #RegexNameFinder(Pattern[], String)} * for single types, and/or constructor * {@link #RegexNameFinder(Map)} */ @Deprecated public RegexNameFinder(Pattern[] patterns) { if (patterns == null || patterns.length == 0) { throw new IllegalArgumentException("patterns must not be null or empty!"); } mPatterns = patterns; sType = null; } @Override public Span[] find(String[] tokens) { Map<Integer, Integer> sentencePosTokenMap = new HashMap<>(); StringBuilder sentenceString = new StringBuilder(tokens.length * 10); for (int i = 0; i < tokens.length; i++) { int startIndex = sentenceString.length(); sentencePosTokenMap.put(startIndex, i); sentenceString.append(tokens[i]); int endIndex = sentenceString.length(); sentencePosTokenMap.put(endIndex, i + 1); if (i < tokens.length - 1) { sentenceString.append(' '); } } Collection<Span> annotations = new LinkedList<>(); if (regexMap != null) { for (Map.Entry<String, Pattern[]> entry : regexMap.entrySet()) { for (Pattern mPattern : entry.getValue()) { Matcher matcher = mPattern.matcher(sentenceString); while (matcher.find()) { Integer tokenStartIndex = sentencePosTokenMap.get(matcher.start()); Integer tokenEndIndex = sentencePosTokenMap.get(matcher.end()); if (tokenStartIndex != null && tokenEndIndex != null) { Span annotation = new Span(tokenStartIndex, tokenEndIndex, entry.getKey()); annotations.add(annotation); } } } } } else { for (Pattern mPattern : mPatterns) { Matcher matcher = mPattern.matcher(sentenceString); while (matcher.find()) { Integer tokenStartIndex = sentencePosTokenMap.get(matcher.start()); Integer tokenEndIndex = sentencePosTokenMap.get(matcher.end()); if (tokenStartIndex != null && tokenEndIndex != null) { Span annotation = new Span(tokenStartIndex, tokenEndIndex, sType); annotations.add(annotation); } } } } return annotations.toArray( new Span[annotations.size()]); } /** * NEW. This method removes the need for tokenization, but returns the Span * with character indices, rather than word. * * @param text * @return */ public Span[] find(String text) { return getAnnotations(text); } private Span[] getAnnotations(String text) { Collection<Span> annotations = new LinkedList<>(); if (regexMap != null) { for (Map.Entry<String, Pattern[]> entry : regexMap.entrySet()) { for (Pattern mPattern : entry.getValue()) { Matcher matcher = mPattern.matcher(text); while (matcher.find()) { Integer tokenStartIndex = matcher.start(); Integer tokenEndIndex = matcher.end(); Span annotation = new Span(tokenStartIndex, tokenEndIndex, entry.getKey()); annotations.add(annotation); } } } } else { for (Pattern mPattern : mPatterns) { Matcher matcher = mPattern.matcher(text); while (matcher.find()) { Integer tokenStartIndex = matcher.start(); Integer tokenEndIndex = matcher.end(); Span annotation = new Span(tokenStartIndex, tokenEndIndex, sType); annotations.add(annotation); } } } return annotations.toArray( new Span[annotations.size()]); } @Override public void clearAdaptiveData() { // nothing to clear } public Pattern[] getmPatterns() { return mPatterns; } public void setmPatterns(Pattern[] mPatterns) { this.mPatterns = mPatterns; } public String getsType() { return sType; } public void setsType(String sType) { this.sType = sType; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/RegexNameFinderFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.regex.Pattern; /** * * Returns a RegexNameFinder based on A selection of * defaults or a configuration and a selection of defaults */ public class RegexNameFinderFactory { /** * Allows for use of selected Defaults as well as regexes from external * configuration * * @param config a map where the key is a type, and the value is a * Pattern[]. If the keys clash with default keys, the config * map will win * @param defaults the OpenNLP default regexes * @return {@link RegexNameFinder} */ public static synchronized RegexNameFinder getDefaultRegexNameFinders( Map<String, Pattern[]> config, DEFAULT_REGEX_NAME_FINDER... defaults) { Objects.requireNonNull(config, "config must not be null"); Map<String, Pattern[]> defaultsToMap = new HashMap<>(); if (defaults != null) { defaultsToMap = defaultsToMap(defaults); } defaultsToMap.putAll(config); return new RegexNameFinder(defaultsToMap); } /** * Returns a RegexNamefinder that will utilize specified default regexes. * * @param defaults the OpenNLP default regexes * @return {@link RegexNameFinder} */ public static synchronized RegexNameFinder getDefaultRegexNameFinders( DEFAULT_REGEX_NAME_FINDER... defaults) { Objects.requireNonNull(defaults, "defaults must not be null"); return new RegexNameFinder(defaultsToMap(defaults)); } private synchronized static Map<String, Pattern[]> defaultsToMap( DEFAULT_REGEX_NAME_FINDER... defaults) { Map<String, Pattern[]> regexMap = new HashMap<>(); for (DEFAULT_REGEX_NAME_FINDER def : defaults) { regexMap.putAll(def.getRegexMap()); } return regexMap; } public interface RegexAble { Map<String, Pattern[]> getRegexMap(); String getType(); } public enum DEFAULT_REGEX_NAME_FINDER implements RegexAble { USA_PHONE_NUM { @Override public Map<String, Pattern[]> getRegexMap() { Pattern[] p = new Pattern[1]; // p[0] = Pattern.compile("([\\+(]?(\\d){2,}[)]?[- \\.]?(\\d){2,}[- \\.]?(\\d){2,}[- \\.]? // (\\d){2,}[- \\.]?(\\d){2,})|([\\+(]?(\\d){2,}[)]?[- \\.]?(\\d){2,}[- \\.]?(\\d){2,}[- // \\.]?(\\d){2,})|([\\+(]?(\\d){2,}[)]?[- \\.]?(\\d){2,}[- \\.]?(\\d){2,})", // Pattern.CASE_INSENSITIVE); p[0] = Pattern.compile("((\\(\\d{3}\\) ?)|(\\d{3}-))?\\d{3}-\\d{4}"); Map<String, Pattern[]> regexMap = new HashMap<>(); regexMap.put(getType(), p); return regexMap; } @Override public String getType() { return "PHONE_NUM"; } }, EMAIL { @Override public Map<String, Pattern[]> getRegexMap() { Pattern[] p = new Pattern[1]; p[0] = Pattern.compile("([a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*" + "|\"([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09" + "\\x0b\\x0c\\x0e-\\x7f])*\")@(?:(?:[a-z0-9]([a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]" + "*[a-z0-9])?|\\[((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]" + "?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]" + "|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])", Pattern.CASE_INSENSITIVE); Map<String, Pattern[]> regexMap = new HashMap<>(); regexMap.put(getType(), p); return regexMap; } @Override public String getType() { return "EMAIL"; } }, URL { @Override public Map<String, Pattern[]> getRegexMap() { Pattern[] p = new Pattern[1]; p[0] = Pattern.compile("\\b(((ht|f)tp(s?)\\:\\/\\/|~\\/|\\/)|www.)" + "(\\w+:\\w+@)?(([-\\w]+\\.)+(com|org|net|gov" + "|mil|biz|info|mobi|name|aero|jobs|museum" + "|travel|[a-z]{2}))(:[\\d]{1,5})?" + "(((\\/([-\\w~!$+|.,=]|%[a-f\\d]{2})+)+|\\/)+|\\?|#)?" + "((\\?([-\\w~!$+|.,*:]|%[a-f\\d{2}])+=?" + "([-\\w~!$+|.,*:=]|%[a-f\\d]{2})*)" + "(&(?:[-\\w~!$+|.,*:]|%[a-f\\d{2}])+=?" + "([-\\w~!$+|.,*:=]|%[a-f\\d]{2})*)*)*" + "(#([-\\w~!$+|.,*:=]|%[a-f\\d]{2})*)?\\b", Pattern.CASE_INSENSITIVE); Map<String, Pattern[]> regexMap = new HashMap<>(); regexMap.put(getType(), p); return regexMap; } @Override public String getType() { return "URL"; } }, MGRS { @Override public Map<String, Pattern[]> getRegexMap() { Pattern[] p = new Pattern[1]; p[0] = Pattern.compile("\\d{1,2}[A-Za-z]\\s*[A-Za-z]{2}\\s*\\d{1,5}\\s*\\d{1,5}", Pattern.CASE_INSENSITIVE); Map<String, Pattern[]> regexMap = new HashMap<>(); regexMap.put(getType(), p); return regexMap; } @Override public String getType() { return "MGRS"; } }, DEGREES_MIN_SEC_LAT_LON { @Override public Map<String, Pattern[]> getRegexMap() { Pattern[] p = new Pattern[1]; p[0] = Pattern.compile("([-|\\+]?\\d{1,3}[d|D|\\u00B0|\\s](\\s*\\d{1,2}['|\\u2019|\\s])" + "?(\\s*\\d{1,2}[\\\"|\\u201d])?\\s*[N|n|S|s]?)(\\s*|,|,\\s*)([-|\\+]?\\d{1,3}[d|D|\\u00B0|" + "\\s](\\s*\\d{1,2}['|\\u2019|\\s])?(\\s*\\d{1,2}[\\\"|\\u201d])?\\s*[E|e|W|w]?)", Pattern.CASE_INSENSITIVE); Map<String, Pattern[]> regexMap = new HashMap<>(); regexMap.put(getType(), p); return regexMap; } @Override public String getType() { return "DEGREES_MIN_SEC_LAT_LON"; } } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/TokenNameFinder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import opennlp.tools.util.Span; /** * The interface for name finders which provide name tags for a sequence of tokens. */ public interface TokenNameFinder { /** Generates name tags for the given sequence, typically a sentence, * returning token spans for any identified names. * * @param tokens an array of the tokens or words of the sequence, typically a sentence. * @return an array of spans for each of the names identified. */ Span[] find(String[] tokens); /** * Forgets all adaptive data which was collected during previous * calls to one of the find methods. * * This method is typical called at the end of a document. */ void clearAdaptiveData(); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/TokenNameFinderCrossValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.SequenceCodec; import opennlp.tools.util.TrainingParameters; import opennlp.tools.util.eval.CrossValidationPartitioner; import opennlp.tools.util.eval.FMeasure; public class TokenNameFinderCrossValidator { private static class DocumentSample implements Serializable { private NameSample[] samples; DocumentSample(NameSample[] samples) { this.samples = samples; } private NameSample[] getSamples() { return samples; } } /** * Reads Name Samples to group them as a document based on the clear adaptive data flag. */ private static class NameToDocumentSampleStream extends FilterObjectStream<NameSample, DocumentSample> { private NameSample beginSample; protected NameToDocumentSampleStream(ObjectStream<NameSample> samples) { super(samples); } public DocumentSample read() throws IOException { List<NameSample> document = new ArrayList<>(); if (beginSample == null) { // Assume that the clear flag is set beginSample = samples.read(); } // Underlying stream is exhausted! if (beginSample == null) { return null; } document.add(beginSample); NameSample sample; while ((sample = samples.read()) != null) { if (sample.isClearAdaptiveDataSet()) { beginSample = sample; break; } document.add(sample); } // Underlying stream is exhausted, // next call must return null if (sample == null) { beginSample = null; } return new DocumentSample(document.toArray(new NameSample[document.size()])); } @Override public void reset() throws IOException, UnsupportedOperationException { super.reset(); beginSample = null; } } /** * Splits DocumentSample into NameSamples. */ private static class DocumentToNameSampleStream extends FilterObjectStream<DocumentSample, NameSample> { protected DocumentToNameSampleStream(ObjectStream<DocumentSample> samples) { super(samples); } private Iterator<NameSample> documentSamples = Collections.<NameSample>emptyList().iterator(); public NameSample read() throws IOException { // Note: Empty document samples should be skipped if (documentSamples.hasNext()) { return documentSamples.next(); } else { DocumentSample docSample = samples.read(); if (docSample != null) { documentSamples = Arrays.asList(docSample.getSamples()).iterator(); return read(); } else { return null; } } } } private final String languageCode; private final TrainingParameters params; private final String type; private byte[] featureGeneratorBytes; private Map<String, Object> resources; private TokenNameFinderEvaluationMonitor[] listeners; private FMeasure fmeasure = new FMeasure(); private TokenNameFinderFactory factory; private List<FMeasure> fmeasures = new LinkedList<FMeasure>(); /** * Name finder cross validator * * @param languageCode * the language of the training data * @param type * null or an override type for all types in the training data * @param trainParams * machine learning train parameters * @param featureGeneratorBytes * descriptor to configure the feature generation or null * @param listeners * a list of listeners * @param resources * the resources for the name finder or null if none */ public TokenNameFinderCrossValidator(String languageCode, String type, TrainingParameters trainParams, byte[] featureGeneratorBytes, Map<String, Object> resources, SequenceCodec<String> codec, TokenNameFinderEvaluationMonitor... listeners) { this.languageCode = languageCode; this.type = type; this.featureGeneratorBytes = featureGeneratorBytes; this.resources = resources; this.params = trainParams; this.listeners = listeners; } public TokenNameFinderCrossValidator(String languageCode, String type, TrainingParameters trainParams, byte[] featureGeneratorBytes, Map<String, Object> resources, TokenNameFinderEvaluationMonitor... listeners) { this(languageCode, type, trainParams, featureGeneratorBytes, resources, new BioCodec(), listeners); } public TokenNameFinderCrossValidator(String languageCode, String type, TrainingParameters trainParams, TokenNameFinderFactory factory, TokenNameFinderEvaluationMonitor... listeners) { this.languageCode = languageCode; this.type = type; this.params = trainParams; this.factory = factory; this.listeners = listeners; } /** * Starts the evaluation. * * @param samples * the data to train and test * @param nFolds * number of folds * @throws IOException */ public void evaluate(ObjectStream<NameSample> samples, int nFolds) throws IOException { // Note: The name samples need to be grouped on a document basis. CrossValidationPartitioner<DocumentSample> partitioner = new CrossValidationPartitioner<>( new NameToDocumentSampleStream(samples), nFolds); while (partitioner.hasNext()) { CrossValidationPartitioner.TrainingSampleStream<DocumentSample> trainingSampleStream = partitioner.next(); TokenNameFinderModel model; if (factory != null) { model = NameFinderME.train(languageCode, type, new DocumentToNameSampleStream(trainingSampleStream), params, factory); } else { model = NameFinderME.train(languageCode, type, new DocumentToNameSampleStream(trainingSampleStream), params, TokenNameFinderFactory.create(null, featureGeneratorBytes, resources, new BioCodec())); } // do testing TokenNameFinderEvaluator evaluator = new TokenNameFinderEvaluator( new NameFinderME(model), listeners); evaluator.evaluate(new DocumentToNameSampleStream(trainingSampleStream.getTestSampleStream())); fmeasure.mergeInto(evaluator.getFMeasure()); fmeasures.add(fmeasure); } } public FMeasure getFMeasure() { return fmeasure; } public List<FMeasure> getFMeasures() { return fmeasures; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/TokenNameFinderEvaluationMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import opennlp.tools.util.eval.EvaluationMonitor; public interface TokenNameFinderEvaluationMonitor extends EvaluationMonitor<NameSample> { }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/TokenNameFinderEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import opennlp.tools.util.Span; import opennlp.tools.util.eval.Evaluator; import opennlp.tools.util.eval.FMeasure; /** * The {@link TokenNameFinderEvaluator} measures the performance * of the given {@link TokenNameFinder} with the provided * reference {@link NameSample}s. * * @see Evaluator * @see TokenNameFinder * @see NameSample */ public class TokenNameFinderEvaluator extends Evaluator<NameSample> { private FMeasure fmeasure = new FMeasure(); /** * The {@link TokenNameFinder} used to create the predicted * {@link NameSample} objects. */ private TokenNameFinder nameFinder; /** * Initializes the current instance with the given * {@link TokenNameFinder}. * * @param nameFinder the {@link TokenNameFinder} to evaluate. * @param listeners evaluation sample listeners */ public TokenNameFinderEvaluator(TokenNameFinder nameFinder, TokenNameFinderEvaluationMonitor ... listeners) { super(listeners); this.nameFinder = nameFinder; } /** * Evaluates the given reference {@link NameSample} object. * * This is done by finding the names with the * {@link TokenNameFinder} in the sentence from the reference * {@link NameSample}. The found names are then used to * calculate and update the scores. * * @param reference the reference {@link NameSample}. * * @return the predicted {@link NameSample}. */ @Override protected NameSample processSample(NameSample reference) { if (reference.isClearAdaptiveDataSet()) { nameFinder.clearAdaptiveData(); } Span[] predictedNames = nameFinder.find(reference.getSentence()); Span[] references = reference.getNames(); // OPENNLP-396 When evaluating with a file in the old format // the type of the span is null, but must be set to default to match // the output of the name finder. for (int i = 0; i < references.length; i++) { if (references[i].getType() == null) { references[i] = new Span(references[i].getStart(), references[i].getEnd(), "default"); } } fmeasure.updateScores(references, predictedNames); return new NameSample(reference.getSentence(), predictedNames, reference.isClearAdaptiveDataSet()); } public FMeasure getFMeasure() { return fmeasure; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/TokenNameFinderFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.Map; import opennlp.tools.namefind.TokenNameFinderModel.FeatureGeneratorCreationError; import opennlp.tools.util.BaseToolFactory; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.SequenceCodec; import opennlp.tools.util.ext.ExtensionLoader; import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator; import opennlp.tools.util.featuregen.AggregatedFeatureGenerator; import opennlp.tools.util.featuregen.BigramNameFeatureGenerator; import opennlp.tools.util.featuregen.CachedFeatureGenerator; import opennlp.tools.util.featuregen.GeneratorFactory; import opennlp.tools.util.featuregen.OutcomePriorFeatureGenerator; import opennlp.tools.util.featuregen.PreviousMapFeatureGenerator; import opennlp.tools.util.featuregen.SentenceFeatureGenerator; import opennlp.tools.util.featuregen.TokenClassFeatureGenerator; import opennlp.tools.util.featuregen.TokenFeatureGenerator; import opennlp.tools.util.featuregen.WindowFeatureGenerator; // Idea of this factory is that most resources/impls used by the name finder // can be modified through this class! // That only works if that's the central class used for training/runtime public class TokenNameFinderFactory extends BaseToolFactory { private byte[] featureGeneratorBytes; private Map<String, Object> resources; private SequenceCodec<String> seqCodec; /** * Creates a {@link TokenNameFinderFactory} that provides the default implementation * of the resources. */ public TokenNameFinderFactory() { this.seqCodec = new BioCodec(); } public TokenNameFinderFactory(byte[] featureGeneratorBytes, final Map<String, Object> resources, SequenceCodec<String> seqCodec) { init(featureGeneratorBytes, resources, seqCodec); } void init(byte[] featureGeneratorBytes, final Map<String, Object> resources, SequenceCodec<String> seqCodec) { this.featureGeneratorBytes = featureGeneratorBytes; this.resources = resources; this.seqCodec = seqCodec; } private static byte[] loadDefaultFeatureGeneratorBytes() { ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (InputStream in = TokenNameFinderFactory.class.getResourceAsStream( "/opennlp/tools/namefind/ner-default-features.xml")) { if (in == null) { throw new IllegalStateException("Classpath must contain ner-default-features.xml file!"); } byte[] buf = new byte[1024]; int len; while ((len = in.read(buf)) > 0) { bytes.write(buf, 0, len); } } catch (IOException e) { throw new IllegalStateException("Failed reading from ner-default-features.xml file on classpath!"); } return bytes.toByteArray(); } protected SequenceCodec<String> getSequenceCodec() { return seqCodec; } protected Map<String, Object> getResources() { return resources; } protected byte[] getFeatureGenerator() { return featureGeneratorBytes; } public static TokenNameFinderFactory create(String subclassName, byte[] featureGeneratorBytes, final Map<String, Object> resources, SequenceCodec<String> seqCodec) throws InvalidFormatException { TokenNameFinderFactory theFactory; if (subclassName == null) { // will create the default factory theFactory = new TokenNameFinderFactory(); } else { try { theFactory = ExtensionLoader.instantiateExtension( TokenNameFinderFactory.class, subclassName); } catch (Exception e) { String msg = "Could not instantiate the " + subclassName + ". The initialization throw an exception."; System.err.println(msg); e.printStackTrace(); throw new InvalidFormatException(msg, e); } } theFactory.init(featureGeneratorBytes, resources, seqCodec); return theFactory; } @Override public void validateArtifactMap() throws InvalidFormatException { // no additional artifacts } public SequenceCodec<String> createSequenceCodec() { if (artifactProvider != null) { String sequeceCodecImplName = artifactProvider.getManifestProperty( TokenNameFinderModel.SEQUENCE_CODEC_CLASS_NAME_PARAMETER); return instantiateSequenceCodec(sequeceCodecImplName); } else { return seqCodec; } } public NameContextGenerator createContextGenerator() { AdaptiveFeatureGenerator featureGenerator = createFeatureGenerators(); if (featureGenerator == null) { featureGenerator = new CachedFeatureGenerator( new WindowFeatureGenerator(new TokenFeatureGenerator(), 2, 2), new WindowFeatureGenerator(new TokenClassFeatureGenerator(true), 2, 2), new OutcomePriorFeatureGenerator(), new PreviousMapFeatureGenerator(), new BigramNameFeatureGenerator(), new SentenceFeatureGenerator(true, false)); } return new DefaultNameContextGenerator(featureGenerator); } /** * Creates the {@link AdaptiveFeatureGenerator}. Usually this * is a set of generators contained in the {@link AggregatedFeatureGenerator}. * * Note: * The generators are created on every call to this method. * * @return the feature generator or null if there is no descriptor in the model */ public AdaptiveFeatureGenerator createFeatureGenerators() { if (featureGeneratorBytes == null && artifactProvider != null) { featureGeneratorBytes = artifactProvider.getArtifact( TokenNameFinderModel.GENERATOR_DESCRIPTOR_ENTRY_NAME); } if (featureGeneratorBytes == null) { featureGeneratorBytes = loadDefaultFeatureGeneratorBytes(); } InputStream descriptorIn = new ByteArrayInputStream(featureGeneratorBytes); AdaptiveFeatureGenerator generator; try { generator = GeneratorFactory.create(descriptorIn, key -> { if (artifactProvider != null) { return artifactProvider.getArtifact(key); } else { return resources.get(key); } }); } catch (InvalidFormatException e) { // It is assumed that the creation of the feature generation does not // fail after it succeeded once during model loading. // But it might still be possible that such an exception is thrown, // in this case the caller should not be forced to handle the exception // and a Runtime Exception is thrown instead. // If the re-creation of the feature generation fails it is assumed // that this can only be caused by a programming mistake and therefore // throwing a Runtime Exception is reasonable throw new FeatureGeneratorCreationError(e); } catch (IOException e) { throw new IllegalStateException("Reading from mem cannot result in an I/O error", e); } return generator; } public static SequenceCodec<String> instantiateSequenceCodec( String sequenceCodecImplName) { if (sequenceCodecImplName != null) { return ExtensionLoader.instantiateExtension( SequenceCodec.class, sequenceCodecImplName); } else { // If nothing is specified return old default! return new BioCodec(); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/TokenNameFinderModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.namefind; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.nio.file.Path; import java.util.Map; import java.util.Properties; import opennlp.tools.ml.BeamSearch; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.ml.model.SequenceClassificationModel; import opennlp.tools.util.BaseToolFactory; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.SequenceCodec; import opennlp.tools.util.featuregen.BrownCluster; import opennlp.tools.util.featuregen.WordClusterDictionary; import opennlp.tools.util.model.ArtifactSerializer; import opennlp.tools.util.model.BaseModel; import opennlp.tools.util.model.ByteArraySerializer; /** * The {@link TokenNameFinderModel} is the model used * by a learnable {@link TokenNameFinder}. * * @see NameFinderME */ // TODO: Fix the model validation, on loading via constructors and input streams public class TokenNameFinderModel extends BaseModel { public static class FeatureGeneratorCreationError extends RuntimeException { FeatureGeneratorCreationError(Throwable t) { super(t); } } private static final String COMPONENT_NAME = "NameFinderME"; private static final String MAXENT_MODEL_ENTRY_NAME = "nameFinder.model"; static final String GENERATOR_DESCRIPTOR_ENTRY_NAME = "generator.featuregen"; static final String SEQUENCE_CODEC_CLASS_NAME_PARAMETER = "sequenceCodecImplName"; public TokenNameFinderModel(String languageCode, SequenceClassificationModel<String> nameFinderModel, byte[] generatorDescriptor, Map<String, Object> resources, Map<String, String> manifestInfoEntries, SequenceCodec<String> seqCodec, TokenNameFinderFactory factory) { super(COMPONENT_NAME, languageCode, manifestInfoEntries, factory); init(nameFinderModel, generatorDescriptor, resources, manifestInfoEntries, seqCodec); if (!seqCodec.areOutcomesCompatible(nameFinderModel.getOutcomes())) { throw new IllegalArgumentException("Model not compatible with name finder!"); } } public TokenNameFinderModel(String languageCode, MaxentModel nameFinderModel, int beamSize, byte[] generatorDescriptor, Map<String, Object> resources, Map<String, String> manifestInfoEntries, SequenceCodec<String> seqCodec, TokenNameFinderFactory factory) { super(COMPONENT_NAME, languageCode, manifestInfoEntries, factory); Properties manifest = (Properties) artifactMap.get(MANIFEST_ENTRY); manifest.put(BeamSearch.BEAM_SIZE_PARAMETER, Integer.toString(beamSize)); init(nameFinderModel, generatorDescriptor, resources, manifestInfoEntries, seqCodec); if (!isModelValid(nameFinderModel)) { throw new IllegalArgumentException("Model not compatible with name finder!"); } } // TODO: Extend this one with beam size! public TokenNameFinderModel(String languageCode, MaxentModel nameFinderModel, byte[] generatorDescriptor, Map<String, Object> resources, Map<String, String> manifestInfoEntries) { this(languageCode, nameFinderModel, NameFinderME.DEFAULT_BEAM_SIZE, generatorDescriptor, resources, manifestInfoEntries, new BioCodec(), new TokenNameFinderFactory()); } public TokenNameFinderModel(String languageCode, MaxentModel nameFinderModel, Map<String, Object> resources, Map<String, String> manifestInfoEntries) { this(languageCode, nameFinderModel, null, resources, manifestInfoEntries); } public TokenNameFinderModel(InputStream in) throws IOException { super(COMPONENT_NAME, in); } public TokenNameFinderModel(File modelFile) throws IOException { super(COMPONENT_NAME, modelFile); } public TokenNameFinderModel(Path modelPath) throws IOException { this(modelPath.toFile()); } public TokenNameFinderModel(URL modelURL) throws IOException { super(COMPONENT_NAME, modelURL); } private void init(Object nameFinderModel, byte[] generatorDescriptor, Map<String, Object> resources, Map<String, String> manifestInfoEntries, SequenceCodec<String> seqCodec) { Properties manifest = (Properties) artifactMap.get(MANIFEST_ENTRY); manifest.put(SEQUENCE_CODEC_CLASS_NAME_PARAMETER, seqCodec.getClass().getName()); artifactMap.put(MAXENT_MODEL_ENTRY_NAME, nameFinderModel); if (generatorDescriptor != null && generatorDescriptor.length > 0) artifactMap.put(GENERATOR_DESCRIPTOR_ENTRY_NAME, generatorDescriptor); if (resources != null) { // The resource map must not contain key which are already taken // like the name finder maxent model name if (resources.containsKey(MAXENT_MODEL_ENTRY_NAME) || resources.containsKey(GENERATOR_DESCRIPTOR_ENTRY_NAME)) { throw new IllegalArgumentException(); } // TODO: Add checks to not put resources where no serializer exists, // make that case fail here, should be done in the BaseModel artifactMap.putAll(resources); } checkArtifactMap(); } public SequenceClassificationModel<String> getNameFinderSequenceModel() { Properties manifest = (Properties) artifactMap.get(MANIFEST_ENTRY); if (artifactMap.get(MAXENT_MODEL_ENTRY_NAME) instanceof MaxentModel) { String beamSizeString = manifest.getProperty(BeamSearch.BEAM_SIZE_PARAMETER); int beamSize = NameFinderME.DEFAULT_BEAM_SIZE; if (beamSizeString != null) { beamSize = Integer.parseInt(beamSizeString); } return new BeamSearch<>(beamSize, (MaxentModel) artifactMap.get(MAXENT_MODEL_ENTRY_NAME)); } else if (artifactMap.get(MAXENT_MODEL_ENTRY_NAME) instanceof SequenceClassificationModel) { return (SequenceClassificationModel) artifactMap.get(MAXENT_MODEL_ENTRY_NAME); } else { return null; } } @Override protected Class<? extends BaseToolFactory> getDefaultFactory() { return TokenNameFinderFactory.class; } public SequenceCodec<String> getSequenceCodec() { return this.getFactory().getSequenceCodec(); } public TokenNameFinderFactory getFactory() { return (TokenNameFinderFactory) this.toolFactory; } @Override protected void createArtifactSerializers(Map<String, ArtifactSerializer> serializers) { super.createArtifactSerializers(serializers); serializers.put("featuregen", new ByteArraySerializer()); } /** * Create the artifact serializers. Currently for serializers related to * features that require external resources, such as {@code W2VClassesDictionary} * objects, the convention is to add its element tag name as key of the serializer map. * For example, the element tag name for the {@code WordClusterFeatureGenerator} which * uses {@code W2VClassesDictionary} objects serialized by the {@code W2VClassesDictionarySerializer} * is 'wordcluster', which is the key used to add the serializer to the map. * @return the map containing the added serializers */ public static Map<String, ArtifactSerializer> createArtifactSerializers() { // TODO: Not so nice, because code cannot really be reused by the other create serializer method // Has to be redesigned, we need static access to default serializers // and these should be able to extend during runtime ?! // // The XML feature generator factory should provide these mappings. // Usually the feature generators should know what type of resource they expect. Map<String, ArtifactSerializer> serializers = BaseModel.createArtifactSerializers(); serializers.put("featuregen", new ByteArraySerializer()); serializers.put("wordcluster", new WordClusterDictionary.WordClusterDictionarySerializer()); serializers.put("brownclustertoken", new BrownCluster.BrownClusterSerializer()); serializers.put("brownclustertokenclass", new BrownCluster.BrownClusterSerializer()); serializers.put("brownclusterbigram", new BrownCluster.BrownClusterSerializer()); return serializers; } private boolean isModelValid(MaxentModel model) { String[] outcomes = new String[model.getNumOutcomes()]; for (int i = 0; i < model.getNumOutcomes(); i++) { outcomes[i] = model.getOutcome(i); } return getFactory().createSequenceCodec().areOutcomesCompatible(outcomes); } @Override protected void validateArtifactMap() throws InvalidFormatException { super.validateArtifactMap(); if (!(artifactMap.get(MAXENT_MODEL_ENTRY_NAME) instanceof MaxentModel) && !(artifactMap.get(MAXENT_MODEL_ENTRY_NAME) instanceof SequenceClassificationModel)) { throw new InvalidFormatException("Token Name Finder model is incomplete!"); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/namefind/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package related to finding proper names and numeric amounts. */ package opennlp.tools.namefind;
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/ngram/NGramGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.ngram; import java.util.ArrayList; import java.util.List; /** * Generates an nGram, with optional separator, and returns the grams as a list * of strings */ public class NGramGenerator { /** * Creates an ngram separated * by the separator param value i.e. a,b,c,d with n = 3 and separator = "-" * would return a-b-c,b-c-d * * @param input the input tokens the output ngrams will be derived from * @param n the number of tokens as the sliding window * @param separator each string in each gram will be separated by this value if desired. * Pass in empty string if no separator is desired * @return */ public static List<String> generate(List<String> input, int n, String separator) { List<String> outGrams = new ArrayList<>(); for (int i = 0; i < input.size() - (n - 2); i++) { final StringBuilder sb = new StringBuilder(); if ((i + n) <= input.size()) { for (int x = i; x < (n + i); x++) { sb.append(input.get(x)); sb.append(separator); } String gram = sb.toString(); gram = gram.substring(0, gram.lastIndexOf(separator)); outGrams.add(gram); } } return outGrams; } /** *Generates an nGram based on a char[] input * @param input the array of chars to convert to nGram * @param n The number of grams (chars) that each output gram will consist of * @param separator each char in each gram will be separated by this value if desired. * Pass in empty string if no separator is desired * @return */ public static List<String> generate(char[] input, int n, String separator) { List<String> outGrams = new ArrayList<>(); for (int i = 0; i < input.length - (n - 2); i++) { final StringBuilder sb = new StringBuilder(); if ((i + n) <= input.length) { for (int x = i; x < (n + i); x++) { sb.append(input[x]); sb.append(separator); } String gram = sb.toString(); gram = gram.substring(0, gram.lastIndexOf(separator)); outGrams.add(gram); } } return outGrams; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/ngram/NGramModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.ngram; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.dictionary.serializer.Attributes; import opennlp.tools.dictionary.serializer.DictionaryEntryPersistor; import opennlp.tools.dictionary.serializer.Entry; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.StringList; import opennlp.tools.util.StringUtil; /** * The {@link NGramModel} can be used to crate ngrams and character ngrams. * * @see StringList */ public class NGramModel implements Iterable<StringList> { protected static final String COUNT = "count"; private Map<StringList, Integer> mNGrams = new HashMap<>(); /** * Initializes an empty instance. */ public NGramModel() { } /** * Initializes the current instance. * * @param in the serialized model stream * @throws IOException */ public NGramModel(InputStream in) throws IOException { DictionaryEntryPersistor.create(in, entry -> { int count; String countValueString = null; try { countValueString = entry.getAttributes().getValue(COUNT); if (countValueString == null) { throw new InvalidFormatException( "The count attribute must be set!"); } count = Integer.parseInt(countValueString); } catch (NumberFormatException e) { throw new InvalidFormatException("The count attribute '" + countValueString + "' must be a number!", e); } add(entry.getTokens()); setCount(entry.getTokens(), count); }); } /** * Retrieves the count of the given ngram. * * @param ngram an ngram * @return count of the ngram or 0 if it is not contained * */ public int getCount(StringList ngram) { Integer count = mNGrams.get(ngram); if (count == null) { return 0; } return count; } /** * Sets the count of an existing ngram. * * @param ngram * @param count */ public void setCount(StringList ngram, int count) { Integer oldCount = mNGrams.put(ngram, count); if (oldCount == null) { mNGrams.remove(ngram); throw new NoSuchElementException(); } } /** * Adds one NGram, if it already exists the count increase by one. * * @param ngram */ public void add(StringList ngram) { if (contains(ngram)) { setCount(ngram, getCount(ngram) + 1); } else { mNGrams.put(ngram, 1); } } /** * Adds NGrams up to the specified length to the current instance. * * @param ngram the tokens to build the uni-grams, bi-grams, tri-grams, .. * from. * @param minLength - minimal length * @param maxLength - maximal length */ public void add(StringList ngram, int minLength, int maxLength) { if (minLength < 1 || maxLength < 1) throw new IllegalArgumentException("minLength and maxLength param must be at least 1. " + "minLength=" + minLength + ", maxLength= " + maxLength); if (minLength > maxLength) throw new IllegalArgumentException("minLength param must not be larger than " + "maxLength param. minLength=" + minLength + ", maxLength= " + maxLength); for (int lengthIndex = minLength; lengthIndex < maxLength + 1; lengthIndex++) { for (int textIndex = 0; textIndex + lengthIndex - 1 < ngram.size(); textIndex++) { String[] grams = new String[lengthIndex]; for (int i = textIndex; i < textIndex + lengthIndex; i++) { grams[i - textIndex] = ngram.getToken(i); } add(new StringList(grams)); } } } /** * Adds character NGrams to the current instance. * * @param chars * @param minLength * @param maxLength */ public void add(CharSequence chars, int minLength, int maxLength) { for (int lengthIndex = minLength; lengthIndex < maxLength + 1; lengthIndex++) { for (int textIndex = 0; textIndex + lengthIndex - 1 < chars.length(); textIndex++) { String gram = StringUtil.toLowerCase( chars.subSequence(textIndex, textIndex + lengthIndex)); add(new StringList(new String[]{gram})); } } } /** * Removes the specified tokens form the NGram model, they are just dropped. * * @param tokens */ public void remove(StringList tokens) { mNGrams.remove(tokens); } /** * Checks fit he given tokens are contained by the current instance. * * @param tokens * * @return true if the ngram is contained */ public boolean contains(StringList tokens) { return mNGrams.containsKey(tokens); } /** * Retrieves the number of {@link StringList} entries in the current instance. * * @return number of different grams */ public int size() { return mNGrams.size(); } /** * Retrieves an {@link Iterator} over all {@link StringList} entries. * * @return iterator over all grams */ @Override public Iterator<StringList> iterator() { return mNGrams.keySet().iterator(); } /** * Retrieves the total count of all Ngrams. * * @return total count of all ngrams */ public int numberOfGrams() { int counter = 0; for (StringList ngram : this) { counter += getCount(ngram); } return counter; } /** * Deletes all ngram which do appear less than the cutoffUnder value * and more often than the cutoffOver value. * * @param cutoffUnder * @param cutoffOver */ public void cutoff(int cutoffUnder, int cutoffOver) { if (cutoffUnder > 0 || cutoffOver < Integer.MAX_VALUE) { for (Iterator<StringList> it = iterator(); it.hasNext(); ) { StringList ngram = it.next(); int count = getCount(ngram); if (count < cutoffUnder || count > cutoffOver) { it.remove(); } } } } /** * Creates a dictionary which contain all {@link StringList} which * are in the current {@link NGramModel}. * * Entries which are only different in the case are merged into one. * * Calling this method is the same as calling {@link #toDictionary(boolean)} with true. * * @return a dictionary of the ngrams */ public Dictionary toDictionary() { return toDictionary(false); } /** * Creates a dictionary which contains all {@link StringList}s which * are in the current {@link NGramModel}. * * @param caseSensitive Specifies whether case distinctions should be kept * in the creation of the dictionary. * * @return a dictionary of the ngrams */ public Dictionary toDictionary(boolean caseSensitive) { Dictionary dict = new Dictionary(caseSensitive); for (StringList stringList : this) { dict.put(stringList); } return dict; } /** * Writes the ngram instance to the given {@link OutputStream}. * * @param out * * @throws IOException if an I/O Error during writing occurs */ public void serialize(OutputStream out) throws IOException { Iterator<Entry> entryIterator = new Iterator<Entry>() { private Iterator<StringList> mDictionaryIterator = NGramModel.this.iterator(); @Override public boolean hasNext() { return mDictionaryIterator.hasNext(); } @Override public Entry next() { StringList tokens = mDictionaryIterator.next(); Attributes attributes = new Attributes(); attributes.setValue(COUNT, Integer.toString(getCount(tokens))); return new Entry(tokens, attributes); } @Override public void remove() { throw new UnsupportedOperationException(); } }; DictionaryEntryPersistor.serialize(out, entryIterator, false); } @Override public boolean equals(Object obj) { boolean result; if (obj == this) { result = true; } else if (obj instanceof NGramModel) { NGramModel model = (NGramModel) obj; result = mNGrams.equals(model.mNGrams); } else { result = false; } return result; } @Override public String toString() { return "Size: " + size(); } @Override public int hashCode() { return mNGrams.hashCode(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/ngram/NGramUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.ngram; import java.util.Collection; import java.util.HashSet; import java.util.LinkedList; import opennlp.tools.util.StringList; /** * Utility class for ngrams. * Some methods apply specifically to certain 'n' values, for e.g. tri/bi/uni-grams. */ public class NGramUtils { /** * calculate the probability of a ngram in a vocabulary using Laplace smoothing algorithm * * @param ngram the ngram to get the probability for * @param set the vocabulary * @param k the smoothing factor * @return the Laplace smoothing probability * @see <a href="https://en.wikipedia.org/wiki/Additive_smoothing">Additive Smoothing</a> */ public static double calculateLaplaceSmoothingProbability(StringList ngram, Iterable<StringList> set, Double k) { return (count(ngram, set) + k) / (count(getNMinusOneTokenFirst(ngram), set) + k * 1); } /** * calculate the probability of a unigram in a vocabulary using maximum likelihood estimation * * @param word the only word in the unigram * @param set the vocabulary * @return the maximum likelihood probability */ public static double calculateUnigramMLProbability(String word, Collection<StringList> set) { double vocSize = 0d; for (StringList s : set) { vocSize += s.size(); } return count(new StringList(word), set) / vocSize; } /** * calculate the probability of a bigram in a vocabulary using maximum likelihood estimation * * @param x0 first word in the bigram * @param x1 second word in the bigram * @param set the vocabulary * @return the maximum likelihood probability */ public static double calculateBigramMLProbability(String x0, String x1, Collection<StringList> set) { return calculateNgramMLProbability(new StringList(x0, x1), set); } /** * calculate the probability of a trigram in a vocabulary using maximum likelihood estimation * * @param x0 first word in the trigram * @param x1 second word in the trigram * @param x2 third word in the trigram * @param set the vocabulary * @return the maximum likelihood probability */ public static double calculateTrigramMLProbability(String x0, String x1, String x2, Iterable<StringList> set) { return calculateNgramMLProbability(new StringList(x0, x1, x2), set); } /** * calculate the probability of a ngram in a vocabulary using maximum likelihood estimation * * @param ngram a ngram * @param set the vocabulary * @return the maximum likelihood probability */ public static double calculateNgramMLProbability(StringList ngram, Iterable<StringList> set) { StringList ngramMinusOne = getNMinusOneTokenFirst(ngram); return count(ngram, set) / count(ngramMinusOne, set); } /** * calculate the probability of a bigram in a vocabulary using prior Laplace smoothing algorithm * * @param x0 the first word in the bigram * @param x1 the second word in the bigram * @param set the vocabulary * @param k the smoothing factor * @return the prior Laplace smoothiing probability */ public static double calculateBigramPriorSmoothingProbability(String x0, String x1, Collection<StringList> set, Double k) { return (count(new StringList(x0, x1), set) + k * calculateUnigramMLProbability(x1, set)) / (count(new StringList(x0), set) + k * set.size()); } /** * calculate the probability of a trigram in a vocabulary using a linear interpolation algorithm * * @param x0 the first word in the trigram * @param x1 the second word in the trigram * @param x2 the third word in the trigram * @param set the vocabulary * @param lambda1 trigram interpolation factor * @param lambda2 bigram interpolation factor * @param lambda3 unigram interpolation factor * @return the linear interpolation probability */ public static double calculateTrigramLinearInterpolationProbability(String x0, String x1, String x2, Collection<StringList> set, Double lambda1, Double lambda2, Double lambda3) { assert lambda1 + lambda2 + lambda3 == 1 : "lambdas sum should be equals to 1"; assert lambda1 > 0 && lambda2 > 0 && lambda3 > 0 : "lambdas should all be greater than 0"; return lambda1 * calculateTrigramMLProbability(x0, x1, x2, set) + lambda2 * calculateBigramMLProbability(x1, x2, set) + lambda3 * calculateUnigramMLProbability(x2, set); } /** * calculate the probability of a ngram in a vocabulary using the missing probability mass algorithm * * @param ngram the ngram * @param discount discount factor * @param set the vocabulary * @return the probability */ public static double calculateMissingNgramProbabilityMass(StringList ngram, Double discount, Iterable<StringList> set) { Double missingMass = 0d; Double countWord = count(ngram, set); for (String word : flatSet(set)) { missingMass += (count(getNPlusOneNgram(ngram, word), set) - discount) / countWord; } return 1 - missingMass; } /** * get the (n-1)th ngram of a given ngram, that is the same ngram except the last word in the ngram * * @param ngram a ngram * @return a ngram */ public static StringList getNMinusOneTokenFirst(StringList ngram) { String[] tokens = new String[ngram.size() - 1]; for (int i = 0; i < ngram.size() - 1; i++) { tokens[i] = ngram.getToken(i); } return tokens.length > 0 ? new StringList(tokens) : null; } /** * get the (n-1)th ngram of a given ngram, that is the same ngram except the first word in the ngram * * @param ngram a ngram * @return a ngram */ public static StringList getNMinusOneTokenLast(StringList ngram) { String[] tokens = new String[ngram.size() - 1]; for (int i = 1; i < ngram.size(); i++) { tokens[i - 1] = ngram.getToken(i); } return tokens.length > 0 ? new StringList(tokens) : null; } private static StringList getNPlusOneNgram(StringList ngram, String word) { String[] tokens = new String[ngram.size() + 1]; for (int i = 0; i < ngram.size(); i++) { tokens[i] = ngram.getToken(i); } tokens[tokens.length - 1] = word; return new StringList(tokens); } private static Double count(StringList ngram, Iterable<StringList> sentences) { Double count = 0d; for (StringList sentence : sentences) { int idx0 = indexOf(sentence, ngram.getToken(0)); if (idx0 >= 0 && sentence.size() >= idx0 + ngram.size()) { boolean match = true; for (int i = 1; i < ngram.size(); i++) { String sentenceToken = sentence.getToken(idx0 + i); String ngramToken = ngram.getToken(i); match &= sentenceToken.equals(ngramToken); } if (match) { count++; } } } return count; } private static int indexOf(StringList sentence, String token) { for (int i = 0; i < sentence.size(); i++) { if (token.equals(sentence.getToken(i))) { return i; } } return -1; } private static Collection<String> flatSet(Iterable<StringList> set) { Collection<String> flatSet = new HashSet<>(); for (StringList sentence : set) { for (String word : sentence) { flatSet.add(word); } } return flatSet; } /** * get the ngrams of dimension n of a certain input sequence of tokens * * @param sequence a sequence of tokens * @param size the size of the resulting ngrmams * @return all the possible ngrams of the given size derivable from the input sequence */ public static Collection<StringList> getNGrams(StringList sequence, int size) { Collection<StringList> ngrams = new LinkedList<>(); if (size == -1 || size >= sequence.size()) { ngrams.add(sequence); } else { String[] ngram = new String[size]; for (int i = 0; i < sequence.size() - size + 1; i++) { ngram[0] = sequence.getToken(i); for (int j = 1; j < size; j++) { ngram[j] = sequence.getToken(i + j); } ngrams.add(new StringList(ngram)); } } return ngrams; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/ngram/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package related to computing and storing n-gram frequencies. */ package opennlp.tools.ngram;
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/AbstractBottomUpParser.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import opennlp.tools.chunker.Chunker; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ngram.NGramModel; import opennlp.tools.parser.chunking.ParserEventStream; import opennlp.tools.postag.POSTagger; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Sequence; import opennlp.tools.util.Span; import opennlp.tools.util.StringList; import opennlp.tools.util.TrainingParameters; /** * Abstract class which contains code to tag and chunk parses for bottom up parsing and * leaves implementation of advancing parses and completing parses to extend class. * <p> * <b>Note:</b> <br> The nodes within the returned parses are shared with other parses * and therefore their parent node references will not be consistent with their child * node reference. {@link #setParents setParents} can be used to make the parents consistent * with a particular parse, but subsequent calls to <code>setParents</code> can invalidate * the results of earlier calls.<br> */ public abstract class AbstractBottomUpParser implements Parser { /** * The maximum number of parses advanced from all preceding * parses at each derivation step. */ protected int M; /** * The maximum number of parses to advance from a single preceding parse. */ protected int K; /** * The minimum total probability mass of advanced outcomes. */ protected double Q; /** * The default beam size used if no beam size is given. */ public static final int defaultBeamSize = 20; /** * The default amount of probability mass required of advanced outcomes. */ public static final double defaultAdvancePercentage = 0.95; /** * Completed parses. */ private SortedSet<Parse> completeParses; /** * Incomplete parses which will be advanced. */ private SortedSet<Parse> odh; /** * Incomplete parses which have been advanced. */ private SortedSet<Parse> ndh; /** * The head rules for the parser. */ protected HeadRules headRules; /** * The set strings which are considered punctuation for the parser. * Punctuation is not attached, but floats to the top of the parse as attachment * decisions are made about its non-punctuation sister nodes. */ protected Set<String> punctSet; /** * The label for the top node. */ public static final String TOP_NODE = "TOP"; /** * The label for the top if an incomplete node. */ public static final String INC_NODE = "INC"; /** * The label for a token node. */ public static final String TOK_NODE = "TK"; /** * The integer 0. */ public static final Integer ZERO = 0; /** * Prefix for outcomes starting a constituent. */ public static final String START = "S-"; /** * Prefix for outcomes continuing a constituent. */ public static final String CONT = "C-"; /** * Outcome for token which is not contained in a basal constituent. */ public static final String OTHER = "O"; /** * Outcome used when a constituent is complete. */ public static final String COMPLETE = "c"; /** * Outcome used when a constituent is incomplete. */ public static final String INCOMPLETE = "i"; /** * The pos-tagger that the parser uses. */ protected POSTagger tagger; /** * The chunker that the parser uses to chunk non-recursive structures. */ protected Chunker chunker; /** * Specifies whether failed parses should be reported to standard error. */ protected boolean reportFailedParse; /** * Specifies whether a derivation string should be created during parsing. * This is useful for debugging. */ protected boolean createDerivationString = false; /** * Turns debug print on or off. */ protected boolean debugOn = false; public AbstractBottomUpParser(POSTagger tagger, Chunker chunker, HeadRules headRules, int beamSize, double advancePercentage) { this.tagger = tagger; this.chunker = chunker; this.M = beamSize; this.K = beamSize; this.Q = advancePercentage; reportFailedParse = true; this.headRules = headRules; this.punctSet = headRules.getPunctuationTags(); odh = new TreeSet<>(); ndh = new TreeSet<>(); completeParses = new TreeSet<>(); } /** * Specifies whether the parser should report when it was unable to find a parse for * a particular sentence. * @param errorReporting If true then un-parsed sentences are reported, false otherwise. */ public void setErrorReporting(boolean errorReporting) { this.reportFailedParse = errorReporting; } /** * Assigns parent references for the specified parse so that they * are consistent with the children references. * @param p The parse whose parent references need to be assigned. */ public static void setParents(Parse p) { Parse[] children = p.getChildren(); for (int ci = 0; ci < children.length; ci++) { children[ci].setParent(p); setParents(children[ci]); } } /** * Removes the punctuation from the specified set of chunks, adds it to the parses * adjacent to the punctuation is specified, and returns a new array of parses with * the punctuation removed. * * @param chunks A set of parses. * @param punctSet The set of punctuation which is to be removed. * @return An array of parses which is a subset of chunks with punctuation removed. */ public static Parse[] collapsePunctuation(Parse[] chunks, Set<String> punctSet) { List<Parse> collapsedParses = new ArrayList<>(chunks.length); int lastNonPunct = -1; int nextNonPunct; for (int ci = 0, cn = chunks.length; ci < cn; ci++) { if (punctSet.contains(chunks[ci].getType())) { if (lastNonPunct >= 0) { chunks[lastNonPunct].addNextPunctuation(chunks[ci]); } for (nextNonPunct = ci + 1; nextNonPunct < cn; nextNonPunct++) { if (!punctSet.contains(chunks[nextNonPunct].getType())) { break; } } if (nextNonPunct < cn) { chunks[nextNonPunct].addPreviousPunctuation(chunks[ci]); } } else { collapsedParses.add(chunks[ci]); lastNonPunct = ci; } } if (collapsedParses.size() == chunks.length) { return chunks; } //System.err.println("collapsedPunctuation: collapsedParses"+collapsedParses); return collapsedParses.toArray(new Parse[collapsedParses.size()]); } /** * Advances the specified parse and returns the an array advanced parses whose * probability accounts for more than the specified amount of probability mass. * * @param p The parse to advance. * @param probMass The amount of probability mass that should be accounted for * by the advanced parses. */ protected abstract Parse[] advanceParses(final Parse p, double probMass); /** * Adds the "TOP" node to the specified parse. * @param p The complete parse. */ protected abstract void advanceTop(Parse p); public Parse[] parse(Parse tokens, int numParses) { if (createDerivationString) tokens.setDerivation(new StringBuffer(100)); odh.clear(); ndh.clear(); completeParses.clear(); int derivationStage = 0; //derivation length int maxDerivationLength = 2 * tokens.getChildCount() + 3; odh.add(tokens); Parse guess = null; double minComplete = 2; double bestComplete = -100000; //approximating -infinity/0 in ln domain while (odh.size() > 0 && (completeParses.size() < M || (odh.first()).getProb() < minComplete) && derivationStage < maxDerivationLength) { ndh = new TreeSet<>(); int derivationRank = 0; for (Iterator<Parse> pi = odh.iterator(); pi.hasNext() && derivationRank < K; derivationRank++) { // foreach derivation Parse tp = pi.next(); //TODO: Need to look at this for K-best parsing cases /* //this parse and the ones which follow will never win, stop advancing. if (tp.getProb() < bestComplete) { break; } */ if (guess == null && derivationStage == 2) { guess = tp; } if (debugOn) { System.out.print(derivationStage + " " + derivationRank + " " + tp.getProb()); tp.show(); System.out.println(); } Parse[] nd; if (0 == derivationStage) { nd = advanceTags(tp); } else if (1 == derivationStage) { if (ndh.size() < K) { //System.err.println("advancing ts "+j+" "+ndh.size()+" < "+K); nd = advanceChunks(tp,bestComplete); } else { //System.err.println("advancing ts "+j+" prob="+((Parse) ndh.last()).getProb()); nd = advanceChunks(tp,(ndh.last()).getProb()); } } else { // i > 1 nd = advanceParses(tp, Q); } if (nd != null) { for (int k = 0, kl = nd.length; k < kl; k++) { if (nd[k].complete()) { advanceTop(nd[k]); if (nd[k].getProb() > bestComplete) { bestComplete = nd[k].getProb(); } if (nd[k].getProb() < minComplete) { minComplete = nd[k].getProb(); } completeParses.add(nd[k]); } else { ndh.add(nd[k]); } } } else { //if (reportFailedParse) { // System.err.println("Couldn't advance parse " + derivationStage // + " stage " + derivationRank + "!\n"); //} advanceTop(tp); completeParses.add(tp); } } derivationStage++; odh = ndh; } if (completeParses.size() == 0) { // if (reportFailedParse) System.err.println("Couldn't find parse for: " + tokens); //Parse r = (Parse) odh.first(); //r.show(); //System.out.println(); return new Parse[] {guess}; } else if (numParses == 1) { return new Parse[] {completeParses.first()}; } else { List<Parse> topParses = new ArrayList<>(numParses); while (!completeParses.isEmpty() && topParses.size() < numParses) { Parse tp = completeParses.last(); completeParses.remove(tp); topParses.add(tp); //parses.remove(tp); } return topParses.toArray(new Parse[topParses.size()]); } } public Parse parse(Parse tokens) { if (tokens.getChildCount() > 0) { Parse p = parse(tokens,1)[0]; setParents(p); return p; } else { return tokens; } } /** * Returns the top chunk sequences for the specified parse. * @param p A pos-tag assigned parse. * @param minChunkScore A minimum score below which chunks should not be advanced. * @return The top chunk assignments to the specified parse. */ protected Parse[] advanceChunks(final Parse p, double minChunkScore) { // chunk Parse[] children = p.getChildren(); String[] words = new String[children.length]; String[] ptags = new String[words.length]; double[] probs = new double[words.length]; for (int i = 0, il = children.length; i < il; i++) { Parse sp = children[i]; words[i] = sp.getHead().getCoveredText(); ptags[i] = sp.getType(); } //System.err.println("adjusted mcs = "+(minChunkScore-p.getProb())); Sequence[] cs = chunker.topKSequences(words, ptags,minChunkScore - p.getProb()); Parse[] newParses = new Parse[cs.length]; for (int si = 0, sl = cs.length; si < sl; si++) { newParses[si] = (Parse) p.clone(); //copies top level if (createDerivationString) newParses[si].getDerivation().append(si).append("."); String[] tags = cs[si].getOutcomes().toArray(new String[words.length]); cs[si].getProbs(probs); int start = -1; int end = 0; String type = null; //System.err.print("sequence "+si+" "); for (int j = 0; j <= tags.length; j++) { // if (j != tags.length) {System.err.println(words[j]+" " // +ptags[j]+" "+tags[j]+" "+probs.get(j));} if (j != tags.length) { newParses[si].addProb(Math.log(probs[j])); } // if continue just update end chunking tag don't use contTypeMap if (j != tags.length && tags[j].startsWith(CONT)) { end = j; } else { //make previous constituent if it exists if (type != null) { //System.err.println("inserting tag "+tags[j]); Parse p1 = p.getChildren()[start]; Parse p2 = p.getChildren()[end]; // System.err.println("Putting "+type+" at "+start+","+end+" for " // +j+" "+newParses[si].getProb()); Parse[] cons = new Parse[end - start + 1]; cons[0] = p1; //cons[0].label="Start-"+type; if (end - start != 0) { cons[end - start] = p2; //cons[end-start].label="Cont-"+type; for (int ci = 1; ci < end - start; ci++) { cons[ci] = p.getChildren()[ci + start]; //cons[ci].label="Cont-"+type; } } Parse chunk = new Parse(p1.getText(), new Span(p1.getSpan().getStart(), p2.getSpan().getEnd()), type, 1, headRules.getHead(cons, type)); chunk.isChunk(true); newParses[si].insert(chunk); } if (j != tags.length) { //update for new constituent if (tags[j].startsWith(START)) { // don't use startTypeMap these are chunk tags type = tags[j].substring(START.length()); start = j; end = j; } else { // other type = null; } } } } //newParses[si].show();System.out.println(); } return newParses; } /** * Advances the parse by assigning it POS tags and returns multiple tag sequences. * @param p The parse to be tagged. * @return Parses with different POS-tag sequence assignments. */ protected Parse[] advanceTags(final Parse p) { Parse[] children = p.getChildren(); String[] words = new String[children.length]; double[] probs = new double[words.length]; for (int i = 0,il = children.length; i < il; i++) { words[i] = children[i].getCoveredText(); } Sequence[] ts = tagger.topKSequences(words); Parse[] newParses = new Parse[ts.length]; for (int i = 0; i < ts.length; i++) { String[] tags = ts[i].getOutcomes().toArray(new String[words.length]); ts[i].getProbs(probs); newParses[i] = (Parse) p.clone(); //copies top level if (createDerivationString) newParses[i].getDerivation().append(i).append("."); for (int j = 0; j < words.length; j++) { Parse word = children[j]; //System.err.println("inserting tag "+tags[j]); double prob = probs[j]; newParses[i].insert(new Parse(word.getText(), word.getSpan(), tags[j], prob,j)); newParses[i].addProb(Math.log(prob)); } } return newParses; } /** * Determines the mapping between the specified index into the specified parses without punctuation to * the corresponding index into the specified parses. * @param index An index into the parses without punctuation. * @param nonPunctParses The parses without punctuation. * @param parses The parses wit punctuation. * @return An index into the specified parses which corresponds to the same node the specified index * into the parses with punctuation. */ protected int mapParseIndex(int index, Parse[] nonPunctParses, Parse[] parses) { int parseIndex = index; while (parses[parseIndex] != nonPunctParses[index]) { parseIndex++; } return parseIndex; } private static boolean lastChild(Parse child, Parse parent, Set<String> punctSet) { if (parent == null) { return false; } Parse[] kids = collapsePunctuation(parent.getChildren(), punctSet); return (kids[kids.length - 1] == child); } /** * Creates a n-gram dictionary from the specified data stream using the specified * head rule and specified cut-off. * * @param data The data stream of parses. * @param rules The head rules for the parses. * @param params can contain a cutoff, the minimum number of entries required for the * n-gram to be saved as part of the dictionary. * @return A dictionary object. */ public static Dictionary buildDictionary(ObjectStream<Parse> data, HeadRules rules, TrainingParameters params) throws IOException { int cutoff = params.getIntParameter("dict", TrainingParameters.CUTOFF_PARAM, 5); NGramModel mdict = new NGramModel(); Parse p; while ((p = data.read()) != null) { p.updateHeads(rules); Parse[] pwords = p.getTagNodes(); String[] words = new String[pwords.length]; //add all uni-grams for (int wi = 0;wi < words.length; wi++) { words[wi] = pwords[wi].getCoveredText(); } mdict.add(new StringList(words), 1, 1); //add tri-grams and bi-grams for inital sequence Parse[] chunks = collapsePunctuation(ParserEventStream.getInitialChunks(p), rules.getPunctuationTags()); String[] cwords = new String[chunks.length]; for (int wi = 0; wi < cwords.length; wi++) { cwords[wi] = chunks[wi].getHead().getCoveredText(); } mdict.add(new StringList(cwords), 2, 3); //emulate reductions to produce additional n-grams int ci = 0; while (ci < chunks.length) { // System.err.println("chunks["+ci+"]="+chunks[ci].getHead().getCoveredText() // +" chunks.length="+chunks.length + " " + chunks[ci].getParent()); if (chunks[ci].getParent() == null) { chunks[ci].show(); } if (lastChild(chunks[ci], chunks[ci].getParent(),rules.getPunctuationTags())) { //perform reduce int reduceStart = ci; while (reduceStart >= 0 && chunks[reduceStart].getParent() == chunks[ci].getParent()) { reduceStart--; } reduceStart++; chunks = ParserEventStream.reduceChunks(chunks,ci,chunks[ci].getParent()); ci = reduceStart; if (chunks.length != 0) { String[] window = new String[5]; int wi = 0; if (ci - 2 >= 0) window[wi++] = chunks[ci - 2].getHead().getCoveredText(); if (ci - 1 >= 0) window[wi++] = chunks[ci - 1].getHead().getCoveredText(); window[wi++] = chunks[ci].getHead().getCoveredText(); if (ci + 1 < chunks.length) window[wi++] = chunks[ci + 1].getHead().getCoveredText(); if (ci + 2 < chunks.length) window[wi++] = chunks[ci + 2].getHead().getCoveredText(); if (wi < 5) { String[] subWindow = new String[wi]; System.arraycopy(window, 0, subWindow, 0, wi); window = subWindow; } if (window.length >= 3) { mdict.add(new StringList(window), 2, 3); } else if (window.length == 2) { mdict.add(new StringList(window), 2, 2); } } ci = reduceStart - 1; //ci will be incremented at end of loop } ci++; } } //System.err.println("gas,and="+mdict.getCount((new TokenList(new String[] {"gas","and"})))); mdict.cutoff(cutoff, Integer.MAX_VALUE); return mdict.toDictionary(true); } /** * Creates a n-gram dictionary from the specified data stream using the specified * head rule and specified cut-off. * * @param data The data stream of parses. * @param rules The head rules for the parses. * @param cutoff The minimum number of entries required for the n-gram to be * saved as part of the dictionary. * @return A dictionary object. */ public static Dictionary buildDictionary(ObjectStream<Parse> data, HeadRules rules, int cutoff) throws IOException { TrainingParameters params = new TrainingParameters(); params.put("dict", TrainingParameters.CUTOFF_PARAM, cutoff); return buildDictionary(data, rules, params); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/AbstractContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Set; /** * Abstract class containing many of the methods used to generate contexts for parsing. */ public abstract class AbstractContextGenerator { protected static final String EOS = "eos"; protected boolean zeroBackOff; /** Set of punctuation to be used in generating features. */ protected Set<String> punctSet; protected boolean useLabel; /** * Creates punctuation feature for the specified punctuation at the specified index * based on the punctuation mark. * @param punct The punctuation which is in context. * @param i The index of the punctuation with relative to the parse. * @return Punctuation feature for the specified parse and the specified punctuation at the specfied index. */ protected String punct(Parse punct, int i) { return String.valueOf(i) + "=" + punct.getCoveredText(); } /** * Creates punctuation feature for the specified punctuation at the specfied index * based on the punctuation's tag. * @param punct The punctuation which is in context. * @param i The index of the punctuation relative to the parse. * @return Punctuation feature for the specified parse and the specified punctuation at the specfied index. */ protected String punctbo(Parse punct, int i) { return String.valueOf(i) + "=" + punct.getType(); } protected String cons(Parse p, int i) { StringBuilder feat = new StringBuilder(20); feat.append(i).append("="); if (p != null) { if (useLabel && i < 0) { feat.append(p.getLabel()).append("|"); } feat.append(p.getType()).append("|").append(p.getHead().getCoveredText()); } else { feat.append(EOS); } return feat.toString(); } protected String consbo(Parse p, int i) { //cons back-off StringBuilder feat = new StringBuilder(20); feat.append(i).append("*="); if (p != null) { if (useLabel && i < 0) { feat.append(p.getLabel()).append("|"); } feat.append(p.getType()); } else { feat.append(EOS); } return feat.toString(); } /** * Generates a string representing the grammar rule production that the specified parse * is starting. The rule is of the form p.type -&gt; c.children[0..n].type. * @param p The parse which stats teh production. * @param includePunctuation Whether punctuation should be included in the production. * @return a string representing the grammar rule production that the specified parse * is starting. */ protected String production(Parse p, boolean includePunctuation) { StringBuilder production = new StringBuilder(20); production.append(p.getType()).append("->"); Parse[] children = AbstractBottomUpParser.collapsePunctuation(p.getChildren(),punctSet); for (int ci = 0; ci < children.length; ci++) { production.append(children[ci].getType()); if (ci + 1 != children.length) { production.append(","); Collection<Parse> nextPunct = children[ci].getNextPunctuationSet(); if (includePunctuation && nextPunct != null) { //TODO: make sure multiple punctuation comes out the same for (Iterator<Parse> pit = nextPunct.iterator(); pit.hasNext();) { Parse punct = pit.next(); production.append(punct.getType()).append(","); } } } } return production.toString(); } protected void cons2(List<String> features, Cons c0, Cons c1, Collection<Parse> punct1s, boolean bigram) { if (punct1s != null) { for (Iterator<Parse> pi = punct1s.iterator();pi.hasNext();) { Parse p = pi.next(); String punctbo = punctbo(p,c1.index <= 0 ? c1.index - 1 : c1.index); //punctbo(1); features.add(punctbo); if (c0.index == 0) { //TODO look at removing case //cons(0)punctbo(1) if (c0.unigram) features.add(c0.cons + "," + punctbo); features.add(c0.consbo + "," + punctbo); } if (c1.index == 0) { //TODO look at removing case //punctbo(1)cons(1) if (c1.unigram) features.add(punctbo + "," + c1.cons); features.add(punctbo + "," + c1.consbo); } //cons(0)punctbo(1)cons(1) if (bigram) features.add(c0.cons + "," + punctbo + "," + c1.cons); if (c1.unigram) features.add(c0.consbo + "," + punctbo + "," + c1.cons); if (c0.unigram) features.add(c0.cons + "," + punctbo + "," + c1.consbo); features.add(c0.consbo + "," + punctbo + "," + c1.consbo); } } else { //cons(0),cons(1) if (bigram) features.add(c0.cons + "," + c1.cons); if (c1.unigram) features.add(c0.consbo + "," + c1.cons); if (c0.unigram) features.add(c0.cons + "," + c1.consbo); features.add(c0.consbo + "," + c1.consbo); } } /** * Creates cons features involving the 3 specified nodes and adds them to the specified feature list. * @param features The list of features. * @param c0 The first node. * @param c1 The second node. * @param c2 The third node. * @param punct1s The punctuation between the first and second node. * @param punct2s The punctuation between the second and third node. * @param trigram Specifies whether lexical tri-gram features between these nodes should be generated. * @param bigram1 Specifies whether lexical bi-gram features between the first and second * node should be generated. * @param bigram2 Specifies whether lexical bi-gram features between the second and third * node should be generated. */ protected void cons3(List<String> features, Cons c0, Cons c1, Cons c2, Collection<Parse> punct1s, Collection<Parse> punct2s, boolean trigram, boolean bigram1, boolean bigram2) { // features.add("stage=cons(0),cons(1),cons(2)"); if (punct1s != null) { if (c0.index == -2) { for (Iterator<Parse> pi = punct1s.iterator(); pi.hasNext();) { Parse p = pi.next(); // String punct = punct(p,c1.index); String punctbo = punctbo(p,c1.index <= 0 ? c1.index - 1 : c1.index); //punct(-2) //TODO consider changing //features.add(punct); //punctbo(-2) features.add(punctbo); } } } if (punct2s != null) { if (c2.index == 2) { for (Iterator<Parse> pi = punct2s.iterator(); pi.hasNext();) { Parse p = pi.next(); // String punct = punct(p,c2.index); String punctbo = punctbo(p, c2.index); //punct(2) //TODO consider changing //features.add(punct); //punctbo(2) features.add(punctbo); } } if (punct1s != null) { //cons(0),punctbo(1),cons(1),punctbo(2),cons(2) for (Iterator<Parse> pi2 = punct2s.iterator(); pi2.hasNext();) { String punctbo2 = punctbo(pi2.next(),c2.index <= 0 ? c2.index - 1 : c2.index); for (Iterator<Parse> pi1 = punct1s.iterator(); pi1.hasNext();) { String punctbo1 = punctbo(pi1.next(),c1.index <= 0 ? c1.index - 1 : c1.index); if (trigram) features.add(c0.cons + "," + punctbo1 + "," + c1.cons + "," + punctbo2 + "," + c2.cons); if (bigram2) features.add(c0.consbo + "," + punctbo1 + "," + c1.cons + "," + punctbo2 + "," + c2.cons); if (c0.unigram && c2.unigram) features.add(c0.cons + "," + punctbo1 + "," + c1.consbo + "," + punctbo2 + "," + c2.cons); if (bigram1) features.add(c0.cons + "," + punctbo1 + "," + c1.cons + "," + punctbo2 + "," + c2.consbo); if (c2.unigram) features.add(c0.consbo + "," + punctbo1 + "," + c1.consbo + "," + punctbo2 + "," + c2.cons); if (c1.unigram) features.add(c0.consbo + "," + punctbo1 + "," + c1.cons + "," + punctbo2 + "," + c2.consbo); if (c0.unigram) features.add(c0.cons + "," + punctbo1 + "," + c1.consbo + "," + punctbo2 + "," + c2.consbo); features.add(c0.consbo + "," + punctbo1 + "," + c1.consbo + "," + punctbo2 + "," + c2.consbo); if (zeroBackOff) { if (bigram1) features.add(c0.cons + "," + punctbo1 + "," + c1.cons + "," + punctbo2); if (c1.unigram) features.add(c0.consbo + "," + punctbo1 + "," + c1.cons + "," + punctbo2); if (c0.unigram) features.add(c0.cons + "," + punctbo1 + "," + c1.consbo + "," + punctbo2); features.add(c0.consbo + "," + punctbo1 + "," + c1.consbo + "," + punctbo2); } } } } else { //punct1s == null //cons(0),cons(1),punctbo(2),cons(2) for (Iterator<Parse> pi2 = punct2s.iterator(); pi2.hasNext();) { String punctbo2 = punctbo(pi2.next(),c2.index <= 0 ? c2.index - 1 : c2.index); if (trigram) features.add(c0.cons + "," + c1.cons + "," + punctbo2 + "," + c2.cons); if (bigram2) features.add(c0.consbo + "," + c1.cons + "," + punctbo2 + "," + c2.cons); if (c0.unigram && c2.unigram) features.add(c0.cons + "," + c1.consbo + "," + punctbo2 + "," + c2.cons); if (bigram1) features.add(c0.cons + "," + c1.cons + "," + punctbo2 + "," + c2.consbo); if (c2.unigram) features.add(c0.consbo + "," + c1.consbo + "," + punctbo2 + "," + c2.cons); if (c1.unigram) features.add(c0.consbo + "," + c1.cons + "," + punctbo2 + "," + c2.consbo); if (c0.unigram) features.add(c0.cons + "," + c1.consbo + "," + punctbo2 + "," + c2.consbo); features.add(c0.consbo + "," + c1.consbo + "," + punctbo2 + "," + c2.consbo); if (zeroBackOff) { if (bigram1) features.add(c0.cons + "," + c1.cons + "," + punctbo2); if (c1.unigram) features.add(c0.consbo + "," + c1.cons + "," + punctbo2); if (c0.unigram) features.add(c0.cons + "," + c1.consbo + "," + punctbo2); features.add(c0.consbo + "," + c1.consbo + "," + punctbo2); } } } } else { if (punct1s != null) { //cons(0),punctbo(1),cons(1),cons(2) for (Iterator<Parse> pi1 = punct1s.iterator(); pi1.hasNext();) { String punctbo1 = punctbo(pi1.next(), c1.index <= 0 ? c1.index - 1 : c1.index); if (trigram) features.add(c0.cons + "," + punctbo1 + "," + c1.cons + "," + c2.cons); if (bigram2) features.add(c0.consbo + "," + punctbo1 + "," + c1.cons + "," + c2.cons); if (c0.unigram && c2.unigram) features.add(c0.cons + "," + punctbo1 + "," + c1.consbo + "," + c2.cons); if (bigram1) features.add(c0.cons + "," + punctbo1 + "," + c1.cons + "," + c2.consbo); if (c2.unigram) features.add(c0.consbo + "," + punctbo1 + "," + c1.consbo + "," + c2.cons); if (c1.unigram) features.add(c0.consbo + "," + punctbo1 + "," + c1.cons + "," + c2.consbo); if (c0.unigram) features.add(c0.cons + "," + punctbo1 + "," + c1.consbo + "," + c2.consbo); features.add(c0.consbo + "," + punctbo1 + "," + c1.consbo + "," + c2.consbo); //zero backoff case covered by cons(0)cons(1) } } else { //cons(0),cons(1),cons(2) if (trigram) features.add(c0.cons + "," + c1.cons + "," + c2.cons); if (bigram2) features.add(c0.consbo + "," + c1.cons + "," + c2.cons); if (c0.unigram && c2.unigram) features.add(c0.cons + "," + c1.consbo + "," + c2.cons); if (bigram1) features.add(c0.cons + "," + c1.cons + "," + c2.consbo); if (c2.unigram) features.add(c0.consbo + "," + c1.consbo + "," + c2.cons); if (c1.unigram) features.add(c0.consbo + "," + c1.cons + "," + c2.consbo); if (c0.unigram) features.add(c0.cons + "," + c1.consbo + "," + c2.consbo); features.add(c0.consbo + "," + c1.consbo + "," + c2.consbo); } } } /** * Generates features for nodes surrounding a completed node of the specified type. * @param node A surrounding node. * @param i The index of the surrounding node with respect to the completed node. * @param type The type of the completed node. * @param punctuation The punctuation adjacent and between the specified surrounding node. * @param features A list to which features are added. */ protected void surround(Parse node, int i, String type, Collection<Parse> punctuation, List<String> features) { StringBuilder feat = new StringBuilder(20); feat.append("s").append(i).append("="); if (punctuation != null) { for (Iterator<Parse> pi = punctuation.iterator(); pi.hasNext();) { Parse punct = pi.next(); if (node != null) { feat.append(node.getHead().getCoveredText()).append("|").append(type) .append("|").append(node.getType()).append("|").append(punct.getType()); } else { feat.append(type).append("|").append(EOS).append("|").append(punct.getType()); } features.add(feat.toString()); feat.setLength(0); feat.append("s").append(i).append("*="); if (node != null) { feat.append(type).append("|").append(node.getType()).append("|").append(punct.getType()); } else { feat.append(type).append("|").append(EOS).append("|").append(punct.getType()); } features.add(feat.toString()); feat.setLength(0); feat.append("s").append(i).append("*="); feat.append(type).append("|").append(punct.getType()); features.add(feat.toString()); } } else { if (node != null) { feat.append(node.getHead().getCoveredText()).append("|").append(type) .append("|").append(node.getType()); } else { feat.append(type).append("|").append(EOS); } features.add(feat.toString()); feat.setLength(0); feat.append("s").append(i).append("*="); if (node != null) { feat.append(type).append("|").append(node.getType()); } else { feat.append(type).append("|").append(EOS); } features.add(feat.toString()); } } /** * Produces features to determine whether the specified child node is part of * a complete constituent of the specified type and adds those features to the * specfied list. * @param child The parse node to consider. * @param i A string indicating the position of the child node. * @param type The type of constituent being built. * @param features List to add features to. */ protected void checkcons(Parse child, String i, String type, List<String> features) { StringBuilder feat = new StringBuilder(20); feat.append("c").append(i).append("=").append(child.getType()).append("|") .append(child.getHead().getCoveredText()).append("|").append(type); features.add(feat.toString()); feat.setLength(0); feat.append("c").append(i).append("*=").append(child.getType()).append("|").append(type); features.add(feat.toString()); } protected void checkcons(Parse p1, Parse p2, String type, List<String> features) { StringBuilder feat = new StringBuilder(20); feat.append("cil=").append(type).append(",").append(p1.getType()).append("|") .append(p1.getHead().getCoveredText()).append(",").append(p2.getType()) .append("|").append(p2.getHead().getCoveredText()); features.add(feat.toString()); feat.setLength(0); feat.append("ci*l=").append(type).append(",").append(p1.getType()).append(",") .append(p2.getType()).append("|").append(p2.getHead().getCoveredText()); features.add(feat.toString()); feat.setLength(0); feat.append("cil*=").append(type).append(",").append(p1.getType()).append("|") .append(p1.getHead().getCoveredText()).append(",").append(p2.getType()); features.add(feat.toString()); feat.setLength(0); feat.append("ci*l*=").append(type).append(",").append(p1.getType()) .append(",").append(p2.getType()); features.add(feat.toString()); } /** * Populates specified nodes array with left-most right frontier * node with a unique head. If the right frontier doesn't contain * enough nodes, then nulls are placed in the array elements. * @param rf The current right frontier. * @param nodes The array to be populated. */ protected void getFrontierNodes(List<Parse> rf, Parse[] nodes) { int leftIndex = 0; int prevHeadIndex = -1; for (int fi = 0; fi < rf.size(); fi++) { Parse fn = rf.get(fi); int headIndex = fn.getHeadIndex(); if (headIndex != prevHeadIndex) { nodes[leftIndex] = fn; leftIndex++; prevHeadIndex = headIndex; if (leftIndex == nodes.length) { break; } } } for (int ni = leftIndex; ni < nodes.length; ni++) { nodes[ni] = null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/AbstractParserEventStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Set; import opennlp.tools.chunker.ChunkerContextGenerator; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.model.Event; import opennlp.tools.parser.chunking.Parser; import opennlp.tools.postag.DefaultPOSContextGenerator; import opennlp.tools.postag.POSContextGenerator; import opennlp.tools.util.ObjectStream; /** * Abstract class extended by parser event streams which perform tagging and chunking. */ public abstract class AbstractParserEventStream extends opennlp.tools.util.AbstractEventStream<Parse> { private ChunkerContextGenerator chunkerContextGenerator; private POSContextGenerator tagContextGenerator; protected HeadRules rules; protected Set<String> punctSet; /** * The type of events being generated by this event stream. */ protected ParserEventTypeEnum etype; protected boolean fixPossesives; protected Dictionary dict; public AbstractParserEventStream(ObjectStream<Parse> d, HeadRules rules, ParserEventTypeEnum etype, Dictionary dict) { super(d); this.dict = dict; if (etype == ParserEventTypeEnum.CHUNK) { this.chunkerContextGenerator = new ChunkContextGenerator(); } else if (etype == ParserEventTypeEnum.TAG) { this.tagContextGenerator = new DefaultPOSContextGenerator(null); } this.rules = rules; punctSet = rules.getPunctuationTags(); this.etype = etype; init(); } @Override protected Iterator<Event> createEvents(Parse sample) { List<Event> newEvents = new ArrayList<>(); Parse.pruneParse(sample); if (fixPossesives) { Parse.fixPossesives(sample); } sample.updateHeads(rules); Parse[] chunks = getInitialChunks(sample); if (etype == ParserEventTypeEnum.TAG) { addTagEvents(newEvents, chunks); } else if (etype == ParserEventTypeEnum.CHUNK) { addChunkEvents(newEvents, chunks); } else { addParseEvents(newEvents, Parser.collapsePunctuation(chunks,punctSet)); } return newEvents.iterator(); } protected void init() { fixPossesives = false; } public AbstractParserEventStream(ObjectStream<Parse> d, HeadRules rules, ParserEventTypeEnum etype) { this(d,rules,etype,null); } public static Parse[] getInitialChunks(Parse p) { List<Parse> chunks = new ArrayList<>(); getInitialChunks(p, chunks); return chunks.toArray(new Parse[chunks.size()]); } private static void getInitialChunks(Parse p, List<Parse> ichunks) { if (p.isPosTag()) { ichunks.add(p); } else { Parse[] kids = p.getChildren(); boolean allKidsAreTags = true; for (int ci = 0, cl = kids.length; ci < cl; ci++) { if (!kids[ci].isPosTag()) { allKidsAreTags = false; break; } } if (allKidsAreTags) { ichunks.add(p); } else { for (int ci = 0, cl = kids.length; ci < cl; ci++) { getInitialChunks(kids[ci], ichunks); } } } } /** * Produces all events for the specified sentence chunks * and adds them to the specified list. * @param newEvents A list of events to be added to. * @param chunks Pre-chunked constituents of a sentence. */ protected abstract void addParseEvents(List<Event> newEvents, Parse[] chunks); private void addChunkEvents(List<Event> chunkEvents, Parse[] chunks) { List<String> toks = new ArrayList<>(); List<String> tags = new ArrayList<>(); List<String> preds = new ArrayList<>(); for (int ci = 0, cl = chunks.length; ci < cl; ci++) { Parse c = chunks[ci]; if (c.isPosTag()) { toks.add(c.getCoveredText()); tags.add(c.getType()); preds.add(Parser.OTHER); } else { boolean start = true; String ctype = c.getType(); Parse[] kids = c.getChildren(); for (int ti = 0, tl = kids.length; ti < tl; ti++) { Parse tok = kids[ti]; toks.add(tok.getCoveredText()); tags.add(tok.getType()); if (start) { preds.add(Parser.START + ctype); start = false; } else { preds.add(Parser.CONT + ctype); } } } } for (int ti = 0, tl = toks.size(); ti < tl; ti++) { chunkEvents.add(new Event(preds.get(ti), chunkerContextGenerator.getContext(ti, toks.toArray(new String[toks.size()]), tags.toArray(new String[tags.size()]), preds.toArray(new String[preds.size()])))); } } private void addTagEvents(List<Event> tagEvents, Parse[] chunks) { List<String> toks = new ArrayList<>(); List<String> preds = new ArrayList<>(); for (int ci = 0, cl = chunks.length; ci < cl; ci++) { Parse c = chunks[ci]; if (c.isPosTag()) { toks.add(c.getCoveredText()); preds.add(c.getType()); } else { Parse[] kids = c.getChildren(); for (int ti = 0, tl = kids.length; ti < tl; ti++) { Parse tok = kids[ti]; toks.add(tok.getCoveredText()); preds.add(tok.getType()); } } } for (int ti = 0, tl = toks.size(); ti < tl; ti++) { tagEvents.add(new Event(preds.get(ti), tagContextGenerator.getContext(ti, toks.toArray(new String[toks.size()]), preds.toArray(new String[preds.size()]), null))); } } /** * Returns true if the specified child is the last child of the specified parent. * @param child The child parse. * @param parent The parent parse. * @return true if the specified child is the last child of the specified parent; false otherwise. */ protected boolean lastChild(Parse child, Parse parent) { Parse[] kids = AbstractBottomUpParser.collapsePunctuation(parent.getChildren(),punctSet); return kids[kids.length - 1] == child; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ChunkContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.util.ArrayList; import java.util.List; import opennlp.tools.chunker.ChunkerContextGenerator; import opennlp.tools.util.Cache; import opennlp.tools.util.TokenTag; /** * Creates predivtive context for the pre-chunking phases of parsing. */ public class ChunkContextGenerator implements ChunkerContextGenerator { private static final String EOS = "eos"; private Cache<String, String[]> contextsCache; private Object wordsKey; public ChunkContextGenerator() { this(0); } public ChunkContextGenerator(int cacheSize) { super(); if (cacheSize > 0) { contextsCache = new Cache<>(cacheSize); } } @Deprecated public String[] getContext(Object o) { Object[] data = (Object[]) o; return getContext((Integer) data[0], (String[]) data[1], (String[]) data[2], (String[]) data[3]); } @Deprecated public String[] getContext(int i, String[] words, String[] prevDecisions, Object[] ac) { return getContext(i,words,(String[]) ac[0],prevDecisions); } public String[] getContext(int i, String[] words, String[] tags, String[] preds) { List<String> features = new ArrayList<>(19); int x_2 = i - 2; int x_1 = i - 1; int x2 = i + 2; int x1 = i + 1; String w_2,w_1,w0,w1,w2; String t_2,t_1,t0,t1,t2; String p_2,p_1; // chunkandpostag(-2) if (x_2 >= 0) { t_2 = tags[x_2]; p_2 = preds[x_2]; w_2 = words[x_2]; } else { t_2 = EOS; p_2 = EOS; w_2 = EOS; } // chunkandpostag(-1) if (x_1 >= 0) { t_1 = tags[x_1]; p_1 = preds[x_1]; w_1 = words[x_1]; } else { t_1 = EOS; p_1 = EOS; w_1 = EOS; } // chunkandpostag(0) t0 = tags[i]; w0 = words[i]; // chunkandpostag(1) if (x1 < tags.length) { t1 = tags[x1]; w1 = words[x1]; } else { t1 = EOS; w1 = EOS; } // chunkandpostag(2) if (x2 < tags.length) { t2 = tags[x2]; w2 = words[x2]; } else { t2 = EOS; w2 = EOS; } String cacheKey = i + t_2 + t1 + t0 + t1 + t2 + p_2 + p_1; if (contextsCache != null) { if (wordsKey == words) { String[] contexts = contextsCache.get(cacheKey); if (contexts != null) { return contexts; } } else { contextsCache.clear(); wordsKey = words; } } String ct_2 = chunkandpostag(-2, w_2, t_2, p_2); String ctbo_2 = chunkandpostagbo(-2, t_2, p_2); String ct_1 = chunkandpostag(-1, w_1, t_1, p_1); String ctbo_1 = chunkandpostagbo(-1, t_1, p_1); String ct0 = chunkandpostag(0, w0, t0, null); String ctbo0 = chunkandpostagbo(0, t0, null); String ct1 = chunkandpostag(1, w1, t1, null); String ctbo1 = chunkandpostagbo(1, t1, null); String ct2 = chunkandpostag(2, w2, t2, null); String ctbo2 = chunkandpostagbo(2, t2, null); features.add("default"); features.add(ct_2); features.add(ctbo_2); features.add(ct_1); features.add(ctbo_1); features.add(ct0); features.add(ctbo0); features.add(ct1); features.add(ctbo1); features.add(ct2); features.add(ctbo2); //chunkandpostag(-1,0) features.add(ct_1 + "," + ct0); features.add(ctbo_1 + "," + ct0); features.add(ct_1 + "," + ctbo0); features.add(ctbo_1 + "," + ctbo0); //chunkandpostag(0,1) features.add(ct0 + "," + ct1); features.add(ctbo0 + "," + ct1); features.add(ct0 + "," + ctbo1); features.add(ctbo0 + "," + ctbo1); String[] contexts = features.toArray(new String[features.size()]); if (contextsCache != null) { contextsCache.put(cacheKey,contexts); } return contexts; } private String chunkandpostag(int i, String tok, String tag, String chunk) { StringBuilder feat = new StringBuilder(20); feat.append(i).append("=").append(tok).append("|").append(tag); if (i < 0) { feat.append("|").append(chunk); } return feat.toString(); } private String chunkandpostagbo(int i, String tag, String chunk) { StringBuilder feat = new StringBuilder(20); feat.append(i).append("*=").append(tag); if (i < 0) { feat.append("|").append(chunk); } return feat.toString(); } @Override public String[] getContext(int index, TokenTag[] sequence, String[] priorDecisions, Object[] additionalContext) { String[] token = TokenTag.extractTokens(sequence); String[] tags = TokenTag.extractTags(sequence); return getContext(index, token, tags, priorDecisions); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ChunkSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.io.IOException; import java.util.ArrayList; import java.util.List; import opennlp.tools.chunker.ChunkSample; import opennlp.tools.parser.chunking.Parser; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public class ChunkSampleStream extends FilterObjectStream<Parse, ChunkSample> { public ChunkSampleStream(ObjectStream<Parse> in) { super(in); } private static void getInitialChunks(Parse p, List<Parse> ichunks) { if (p.isPosTag()) { ichunks.add(p); } else { Parse[] kids = p.getChildren(); boolean allKidsAreTags = true; for (int ci = 0, cl = kids.length; ci < cl; ci++) { if (!kids[ci].isPosTag()) { allKidsAreTags = false; break; } } if (allKidsAreTags) { ichunks.add(p); } else { for (int ci = 0, cl = kids.length; ci < cl; ci++) { getInitialChunks(kids[ci], ichunks); } } } } public static Parse[] getInitialChunks(Parse p) { List<Parse> chunks = new ArrayList<>(); getInitialChunks(p, chunks); return chunks.toArray(new Parse[chunks.size()]); } public ChunkSample read() throws IOException { Parse parse = samples.read(); if (parse != null) { Parse[] chunks = getInitialChunks(parse); List<String> toks = new ArrayList<>(); List<String> tags = new ArrayList<>(); List<String> preds = new ArrayList<>(); for (int ci = 0, cl = chunks.length; ci < cl; ci++) { Parse c = chunks[ci]; if (c.isPosTag()) { toks.add(c.getCoveredText()); tags.add(c.getType()); preds.add(Parser.OTHER); } else { boolean start = true; String ctype = c.getType(); Parse[] kids = c.getChildren(); for (int ti = 0, tl = kids.length; ti < tl; ti++) { Parse tok = kids[ti]; toks.add(tok.getCoveredText()); tags.add(tok.getType()); if (start) { preds.add(Parser.START + ctype); start = false; } else { preds.add(Parser.CONT + ctype); } } } } return new ChunkSample(toks.toArray(new String[toks.size()]), tags.toArray(new String[tags.size()]), preds.toArray(new String[preds.size()])); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/Cons.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; /** * Class to hold feature information about a specific parse node. */ public class Cons { final String cons; final String consbo; final int index; final boolean unigram; public Cons(String cons, String consbo, int index, boolean unigram) { this.cons = cons; this.consbo = consbo; this.index = index; this.unigram = unigram; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/Constituent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import opennlp.tools.util.Span; /** * Class used to hold constituents when reading parses. */ public class Constituent { private String label; private Span span; public Constituent(String label, Span span) { this.label = label; this.span = span; } /** * Returns the label of the constituent. * @return the label of the constituent. */ public String getLabel() { return label; } /** * Assigns the label to the constituent. * @param label The label to set. */ public void setLabel(String label) { this.label = label; } /** * Returns the span of the constituent. * @return the span of the constituent. */ public Span getSpan() { return span; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/GapLabeler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.util.Stack; /** * Interface for labeling nodes which contain traces so that these traces can be predicted * by the parser. */ public interface GapLabeler { /** * Labels the constituents found in the stack with gap labels if appropriate. * @param stack The stack of un-completed constituents. */ void labelGaps(Stack<Constituent> stack); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/HeadRules.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.util.Set; /** * Interface for encoding the head rules associated with parsing. */ public interface HeadRules { /** * Returns the head constituent for the specified constituents of the specified type. * * @param constituents The constituents which make up a constituent of the specified type. * @param type The type of a constituent which is made up of the specified constituents. * @return The constituent which is the head. */ Parse getHead(Parse[] constituents, String type); /** * Returns the set of punctuation tags. Attachment decisions for these tags will not be modeled. * * @return the set of punctuation tags. */ Set<String> getPunctuationTags(); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/Parse.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.Set; import java.util.Stack; import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; import opennlp.tools.util.Span; /** * Data structure for holding parse constituents. */ public class Parse implements Cloneable, Comparable<Parse> { public static final String BRACKET_LRB = "("; public static final String BRACKET_RRB = ")"; public static final String BRACKET_LCB = "{"; public static final String BRACKET_RCB = "}"; public static final String BRACKET_LSB = "["; public static final String BRACKET_RSB = "]"; /** * The text string on which this parse is based. * This object is shared among all parses for the same sentence. */ private String text; /** * The character offsets into the text for this constituent. */ private Span span; /** * The syntactic type of this parse. */ private String type; /** * The sub-constituents of this parse. */ private List<Parse> parts; /** * The head parse of this parse. A parse can be its own head. */ private Parse head; /** * A string used during parse construction to specify which * stage of parsing has been performed on this node. */ private String label; /** * Index in the sentence of the head of this constituent. */ private int headIndex; /** * The parent parse of this parse. */ private Parse parent; /** * The probability associated with the syntactic type * assigned to this parse. */ private double prob; /** * The string buffer used to track the derivation of this parse. */ private StringBuffer derivation; /** * Specifies whether this constituent was built during the chunking phase. */ private boolean isChunk; /** * The pattern used to find the base constituent label of a * Penn Treebank labeled constituent. */ private static Pattern typePattern = Pattern.compile("^([^ =-]+)"); /** * The pattern used to find the function tags. */ private static Pattern funTypePattern = Pattern.compile("^[^ =-]+-([^ =-]+)"); /** * The patter used to identify tokens in Penn Treebank labeled constituents. */ private static Pattern tokenPattern = Pattern.compile("^[^ ()]+ ([^ ()]+)\\s*\\)"); /** * The set of punctuation parses which are between this parse and the previous parse. */ private Collection<Parse> prevPunctSet; /** * The set of punctuation parses which are between this parse and * the subsequent parse. */ private Collection<Parse> nextPunctSet; /** * Specifies whether constituent labels should include parts specified * after minus character. */ private static boolean useFunctionTags; /** * Creates a new parse node for this specified text and span of the specified type * with the specified probability and the specified head index. * * @param text The text of the sentence for which this node is a part of. * @param span The character offsets for this node within the specified text. * @param type The constituent label of this node. * @param p The probability of this parse. * @param index The token index of the head of this parse. */ public Parse(String text, Span span, String type, double p, int index) { this.text = text; this.span = span; this.type = type; this.prob = p; this.head = this; this.headIndex = index; this.parts = new LinkedList<>(); this.label = null; this.parent = null; } /** * Creates a new parse node for this specified text and span of the specified type with * the specified probability and the specified head and head index. * * @param text The text of the sentence for which this node is a part of. * @param span The character offsets for this node within the specified text. * @param type The constituent label of this node. * @param p The probability of this parse. * @param h The head token of this parse. */ public Parse(String text, Span span, String type, double p, Parse h) { this(text, span, type, p, 0); if (h != null) { this.head = h; this.headIndex = h.headIndex; } } @Override public Object clone() { Parse p = new Parse(this.text, this.span, this.type, this.prob, this.head); p.parts = new LinkedList<>(); p.parts.addAll(this.parts); if (derivation != null) { p.derivation = new StringBuffer(100); p.derivation.append(this.derivation.toString()); } p.label = this.label; return (p); } /** * Clones the right frontier of parse up to the specified node. * * @param node The last node in the right frontier of the parse tree which should be cloned. * @return A clone of this parse and its right frontier up to and including the specified node. */ public Parse clone(Parse node) { if (this == node) { return (Parse) this.clone(); } else { Parse c = (Parse) this.clone(); Parse lc = c.parts.get(parts.size() - 1); c.parts.set(parts.size() - 1,lc.clone(node)); return c; } } /** * Clones the right frontier of this root parse up to and including the specified node. * * @param node The last node in the right frontier of the parse tree which should be cloned. * @param parseIndex The child index of the parse for this root node. * @return A clone of this root parse and its right frontier up to and including the specified node. */ public Parse cloneRoot(Parse node, int parseIndex) { Parse c = (Parse) this.clone(); Parse fc = c.parts.get(parseIndex); c.parts.set(parseIndex,fc.clone(node)); return c; } /** * Specifies whether function tags should be included as part of the constituent type. * * @param uft true is they should be included; false otherwise. */ public static void useFunctionTags(boolean uft) { useFunctionTags = uft; } /** * Set the type of this constituent to the specified type. * * @param type The type of this constituent. */ public void setType(String type) { this.type = type; } /** * Returns the constituent label for this node of the parse. * * @return The constituent label for this node of the parse. */ public String getType() { return type; } /** * Returns the set of punctuation parses that occur immediately before this parse. * * @return the set of punctuation parses that occur immediately before this parse. */ public Collection<Parse> getPreviousPunctuationSet() { return prevPunctSet; } /** * Designates that the specified punctuation should is prior to this parse. * * @param punct The punctuation. */ public void addPreviousPunctuation(Parse punct) { if (this.prevPunctSet == null) { this.prevPunctSet = new TreeSet<>(); } prevPunctSet.add(punct); } /** * Returns the set of punctuation parses that occur immediately after this parse. * * @return the set of punctuation parses that occur immediately after this parse. */ public Collection<Parse> getNextPunctuationSet() { return nextPunctSet; } /** * Designates that the specified punctuation follows this parse. * * @param punct The punctuation set. */ public void addNextPunctuation(Parse punct) { if (this.nextPunctSet == null) { this.nextPunctSet = new TreeSet<>(); } nextPunctSet.add(punct); } /** * Sets the set of punctuation tags which follow this parse. * * @param punctSet The set of punctuation tags which follow this parse. */ public void setNextPunctuation(Collection<Parse> punctSet) { this.nextPunctSet = punctSet; } /** * Sets the set of punctuation tags which preceed this parse. * * @param punctSet The set of punctuation tags which preceed this parse. */ public void setPrevPunctuation(Collection<Parse> punctSet) { this.prevPunctSet = punctSet; } /** * Inserts the specified constituent into this parse based on its text span.This * method assumes that the specified constituent can be inserted into this parse. * * @param constituent The constituent to be inserted. */ public void insert(final Parse constituent) { Span ic = constituent.span; if (span.contains(ic)) { //double oprob=c.prob; int pi = 0; int pn = parts.size(); for (; pi < pn; pi++) { Parse subPart = parts.get(pi); //System.err.println("Parse.insert:con="+constituent+" sp["+pi+"] "+subPart+" "+subPart.getType()); Span sp = subPart.span; if (sp.getStart() >= ic.getEnd()) { break; } // constituent contains subPart else if (ic.contains(sp)) { //System.err.println("Parse.insert:con contains subPart"); parts.remove(pi); pi--; constituent.parts.add(subPart); subPart.setParent(constituent); //System.err.println("Parse.insert: "+subPart.hashCode()+" -> "+subPart.getParent().hashCode()); pn = parts.size(); } else if (sp.contains(ic)) { //System.err.println("Parse.insert:subPart contains con"); subPart.insert(constituent); return; } } //System.err.println("Parse.insert:adding con="+constituent+" to "+this); parts.add(pi, constituent); constituent.setParent(this); // System.err.println("Parse.insert: "+constituent.hashCode()+" -> " // +constituent.getParent().hashCode()); } else { throw new IllegalArgumentException("Inserting constituent not contained in the sentence!"); } } /** * Appends the specified string buffer with a string representation of this parse. * * @param sb A string buffer into which the parse string can be appended. */ public void show(StringBuffer sb) { int start; start = span.getStart(); if (!type.equals(AbstractBottomUpParser.TOK_NODE)) { sb.append("("); sb.append(type).append(" "); //System.out.print(label+" "); //System.out.print(head+" "); //System.out.print(df.format(prob)+" "); } for (Iterator<Parse> i = parts.iterator(); i.hasNext();) { Parse c = i.next(); Span s = c.span; if (start < s.getStart()) { //System.out.println("pre "+start+" "+s.getStart()); sb.append(encodeToken(text.substring(start, s.getStart()))); } c.show(sb); start = s.getEnd(); } if (start < span.getEnd()) { sb.append(encodeToken(text.substring(start, span.getEnd()))); } if (!type.equals(AbstractBottomUpParser.TOK_NODE)) { sb.append(")"); } } /** * Displays this parse using Penn Treebank-style formatting. */ public void show() { StringBuffer sb = new StringBuffer(text.length() * 4); show(sb); System.out.println(sb); } /** * Returns the probability associated with the pos-tag sequence assigned to this parse. * * @return The probability associated with the pos-tag sequence assigned to this parse. */ public double getTagSequenceProb() { //System.err.println("Parse.getTagSequenceProb: "+type+" "+this); if (parts.size() == 1 && (parts.get(0)).type.equals(AbstractBottomUpParser.TOK_NODE)) { //System.err.println(this+" "+prob); return (Math.log(prob)); } else if (parts.size() == 0) { System.err.println("Parse.getTagSequenceProb: Wrong base case!"); return (0.0); } else { double sum = 0.0; for (Iterator<Parse> pi = parts.iterator(); pi.hasNext();) { sum += pi.next().getTagSequenceProb(); } return sum; } } /** * Returns whether this parse is complete. * * @return Returns true if the parse contains a single top-most node. */ public boolean complete() { return (parts.size() == 1); } public String getCoveredText() { return text.substring(span.getStart(), span.getEnd()); } /** * Represents this parse in a human readable way. */ @Override public String toString() { // TODO: Use the commented code in next bigger release, // change probably breaks backward compatibility in some // applications //StringBuffer buffer = new StringBuffer(); //show(buffer); //return buffer.toString(); return getCoveredText(); } /** * Returns the text of the sentence over which this parse was formed. * * @return The text of the sentence over which this parse was formed. */ public String getText() { return text; } /** * Returns the character offsets for this constituent. * * @return The character offsets for this constituent. */ public Span getSpan() { return span; } /** * Returns the log of the product of the probability associated with all the * decisions which formed this constituent. * * @return The log of the product of the probability associated with all the * decisions which formed this constituent. */ public double getProb() { return prob; } /** * Adds the specified probability log to this current log for this parse. * * @param logProb The probability of an action performed on this parse. */ public void addProb(double logProb) { this.prob += logProb; } /** * Returns the child constituents of this constituent * . * @return The child constituents of this constituent. */ public Parse[] getChildren() { return parts.toArray(new Parse[parts.size()]); } /** * Replaces the child at the specified index with a new child with the specified label. * * @param index The index of the child to be replaced. * @param label The label to be assigned to the new child. */ public void setChild(int index, String label) { Parse newChild = (Parse) (parts.get(index)).clone(); newChild.setLabel(label); parts.set(index,newChild); } public void add(Parse daughter, HeadRules rules) { if (daughter.prevPunctSet != null) { parts.addAll(daughter.prevPunctSet); } parts.add(daughter); this.span = new Span(span.getStart(),daughter.getSpan().getEnd()); this.head = rules.getHead(getChildren(),type); this.headIndex = head.headIndex; } public void remove(int index) { parts.remove(index); if (! parts.isEmpty()) { if (index == 0 || index == parts.size()) { //size is orig last element span = new Span((parts.get(0)).span.getStart(),(parts.get(parts.size() - 1)).span.getEnd()); } } } public Parse adjoinRoot(Parse node, HeadRules rules, int parseIndex) { Parse lastChild = parts.get(parseIndex); Parse adjNode = new Parse(this.text,new Span(lastChild.getSpan().getStart(), node.getSpan().getEnd()),lastChild.getType(),1, rules.getHead(new Parse[]{lastChild,node},lastChild.getType())); adjNode.parts.add(lastChild); if (node.prevPunctSet != null) { adjNode.parts.addAll(node.prevPunctSet); } adjNode.parts.add(node); parts.set(parseIndex,adjNode); return adjNode; } /** * Sister adjoins this node's last child and the specified sister node and returns their * new parent node. The new parent node replace this nodes last child. * * @param sister The node to be adjoined. * @param rules The head rules for the parser. * @return The new parent node of this node and the specified sister node. */ public Parse adjoin(Parse sister, HeadRules rules) { Parse lastChild = parts.get(parts.size() - 1); Parse adjNode = new Parse(this.text,new Span(lastChild.getSpan().getStart(),sister.getSpan().getEnd()), lastChild.getType(),1,rules.getHead(new Parse[]{lastChild,sister},lastChild.getType())); adjNode.parts.add(lastChild); if (sister.prevPunctSet != null) { adjNode.parts.addAll(sister.prevPunctSet); } adjNode.parts.add(sister); parts.set(parts.size() - 1, adjNode); this.span = new Span(span.getStart(),sister.getSpan().getEnd()); this.head = rules.getHead(getChildren(),type); this.headIndex = head.headIndex; return adjNode; } public void expandTopNode(Parse root) { boolean beforeRoot = true; //System.err.println("expandTopNode: parts="+parts); for (int pi = 0, ai = 0; pi < parts.size(); pi++,ai++) { Parse node = parts.get(pi); if (node == root) { beforeRoot = false; } else if (beforeRoot) { root.parts.add(ai,node); parts.remove(pi); pi--; } else { root.parts.add(node); parts.remove(pi); pi--; } } root.updateSpan(); } /** * Returns the number of children for this parse node. * * @return the number of children for this parse node. */ public int getChildCount() { return parts.size(); } /** * Returns the index of this specified child. * * @param child A child of this parse. * * @return the index of this specified child or -1 if the specified child is not a child of this parse. */ public int indexOf(Parse child) { return parts.indexOf(child); } /** * Returns the head constituent associated with this constituent. * * @return The head constituent associated with this constituent. */ public Parse getHead() { return head; } /** * Returns the index within a sentence of the head token for this parse. * * @return The index within a sentence of the head token for this parse. */ public int getHeadIndex() { return headIndex; } /** * Returns the label assigned to this parse node during parsing * which specifies how this node will be formed into a constituent. * * @return The outcome label assigned to this node during parsing. */ public String getLabel() { return label; } /** * Assigns this parse the specified label. This is used by parsing schemes to * tag parsing nodes while building. * * @param label A label indicating something about the stage of building for this parse node. */ public void setLabel(String label) { this.label = label; } private static String getType(String rest) { if (rest.startsWith("-LCB-")) { return "-LCB-"; } else if (rest.startsWith("-RCB-")) { return "-RCB-"; } else if (rest.startsWith("-LRB-")) { return "-LRB-"; } else if (rest.startsWith("-RRB-")) { return "-RRB-"; } else if (rest.startsWith("-RSB-")) { return "-RSB-"; } else if (rest.startsWith("-LSB-")) { return "-LSB-"; } else if (rest.startsWith("-NONE-")) { return "-NONE-"; } else { Matcher typeMatcher = typePattern.matcher(rest); if (typeMatcher.find()) { String type = typeMatcher.group(1); if (useFunctionTags) { Matcher funMatcher = funTypePattern.matcher(rest); if (funMatcher.find()) { String ftag = funMatcher.group(1); type = type + "-" + ftag; } } return type; } } return null; } private static String encodeToken(String token) { if (BRACKET_LRB.equals(token)) { return "-LRB-"; } else if (BRACKET_RRB.equals(token)) { return "-RRB-"; } else if (BRACKET_LCB.equals(token)) { return "-LCB-"; } else if (BRACKET_RCB.equals(token)) { return "-RCB-"; } else if (BRACKET_LSB.equals(token)) { return "-LSB-"; } else if (BRACKET_RSB.equals(token)) { return "-RSB-"; } return token; } private static String decodeToken(String token) { if ("-LRB-".equals(token)) { return BRACKET_LRB; } else if ("-RRB-".equals(token)) { return BRACKET_RRB; } else if ("-LCB-".equals(token)) { return BRACKET_LCB; } else if ("-RCB-".equals(token)) { return BRACKET_RCB; } else if ("-LSB-".equals(token)) { return BRACKET_LSB; } else if ("-RSB-".equals(token)) { return BRACKET_RSB; } return token; } /** * Returns the string containing the token for the specified portion of the parse string or * null if the portion of the parse string does not represent a token. * * @param rest The portion of the parse string remaining to be processed. * * @return The string containing the token for the specified portion of the parse string or * null if the portion of the parse string does not represent a token. */ private static String getToken(String rest) { Matcher tokenMatcher = tokenPattern.matcher(rest); if (tokenMatcher.find()) { return decodeToken(tokenMatcher.group(1)); } return null; } /** * Computes the head parses for this parse and its sub-parses and stores this information * in the parse data structure. * * @param rules The head rules which determine how the head of the parse is computed. */ public void updateHeads(HeadRules rules) { if (parts != null && parts.size() != 0) { for (int pi = 0, pn = parts.size(); pi < pn; pi++) { Parse c = parts.get(pi); c.updateHeads(rules); } this.head = rules.getHead(parts.toArray(new Parse[parts.size()]), type); if (head == null) { head = this; } else { this.headIndex = head.headIndex; } } else { this.head = this; } } public void updateSpan() { span = new Span((parts.get(0)).span.getStart(),(parts.get(parts.size() - 1)).span.getEnd()); } /** * Prune the specified sentence parse of vacuous productions. * * @param parse */ public static void pruneParse(Parse parse) { List<Parse> nodes = new LinkedList<>(); nodes.add(parse); while (nodes.size() != 0) { Parse node = nodes.remove(0); Parse[] children = node.getChildren(); if (children.length == 1 && node.getType().equals(children[0].getType())) { int index = node.getParent().parts.indexOf(node); children[0].setParent(node.getParent()); node.getParent().parts.set(index,children[0]); node.parent = null; node.parts = null; } nodes.addAll(Arrays.asList(children)); } } public static void fixPossesives(Parse parse) { Parse[] tags = parse.getTagNodes(); for (int ti = 0; ti < tags.length; ti++) { if (tags[ti].getType().equals("POS")) { if (ti + 1 < tags.length && tags[ti + 1].getParent() == tags[ti].getParent().getParent()) { int start = tags[ti + 1].getSpan().getStart(); int end = tags[ti + 1].getSpan().getEnd(); for (int npi = ti + 2; npi < tags.length; npi++) { if (tags[npi].getParent() == tags[npi - 1].getParent()) { end = tags[npi].getSpan().getEnd(); } else { break; } } Parse npPos = new Parse(parse.getText(), new Span(start,end), "NP", 1 , tags[ti + 1]); parse.insert(npPos); } } } } /** * Parses the specified tree-bank style parse string and return a Parse structure for that string. * * @param parse A tree-bank style parse string. * * @return a Parse structure for the specified tree-bank style parse string. */ public static Parse parseParse(String parse) { return parseParse(parse,null); } /** * Parses the specified tree-bank style parse string and return a Parse structure * for that string. * * @param parse A tree-bank style parse string. * @param gl The gap labeler. * * @return a Parse structure for the specified tree-bank style parse string. */ public static Parse parseParse(String parse, GapLabeler gl) { StringBuilder text = new StringBuilder(); int offset = 0; Stack<Constituent> stack = new Stack<>(); List<Constituent> cons = new LinkedList<>(); for (int ci = 0, cl = parse.length(); ci < cl; ci++) { char c = parse.charAt(ci); if (c == '(') { String rest = parse.substring(ci + 1); String type = getType(rest); if (type == null) { System.err.println("null type for: " + rest); } String token = getToken(rest); stack.push(new Constituent(type, new Span(offset,offset))); if (token != null) { if (Objects.equals(type, "-NONE-") && gl != null) { //System.err.println("stack.size="+stack.size()); gl.labelGaps(stack); } else { cons.add(new Constituent(AbstractBottomUpParser.TOK_NODE, new Span(offset, offset + token.length()))); text.append(token).append(" "); offset += token.length() + 1; } } } else if (c == ')') { Constituent con = stack.pop(); int start = con.getSpan().getStart(); if (start < offset) { cons.add(new Constituent(con.getLabel(), new Span(start, offset - 1))); } } } String txt = text.toString(); int tokenIndex = -1; Parse p = new Parse(txt, new Span(0, txt.length()), AbstractBottomUpParser.TOP_NODE, 1,0); for (int ci = 0; ci < cons.size(); ci++) { Constituent con = cons.get(ci); String type = con.getLabel(); if (!type.equals(AbstractBottomUpParser.TOP_NODE)) { if (AbstractBottomUpParser.TOK_NODE.equals(type)) { tokenIndex++; } Parse c = new Parse(txt, con.getSpan(), type, 1,tokenIndex); //System.err.println("insert["+ci+"] "+type+" "+c.toString()+" "+c.hashCode()); p.insert(c); //codeTree(p); } } return p; } /** * Returns the parent parse node of this constituent. * * @return The parent parse node of this constituent. */ public Parse getParent() { return parent; } /** * Specifies the parent parse node for this constituent. * * @param parent The parent parse node for this constituent. */ public void setParent(Parse parent) { this.parent = parent; } /** * Indicates whether this parse node is a pos-tag. * * @return true if this node is a pos-tag, false otherwise. */ public boolean isPosTag() { return (parts.size() == 1 && (parts.get(0)).getType().equals(AbstractBottomUpParser.TOK_NODE)); } /** * Returns true if this constituent contains no sub-constituents. * * @return true if this constituent contains no sub-constituents; false otherwise. */ public boolean isFlat() { boolean flat = true; for (int ci = 0; ci < parts.size(); ci++) { flat &= (parts.get(ci)).isPosTag(); } return flat; } public void isChunk(boolean ic) { this.isChunk = ic; } public boolean isChunk() { return isChunk; } /** * Returns the parse nodes which are children of this node and which are pos tags. * * @return the parse nodes which are children of this node and which are pos tags. */ public Parse[] getTagNodes() { List<Parse> tags = new LinkedList<>(); List<Parse> nodes = new LinkedList<>(); nodes.addAll(this.parts); while (nodes.size() != 0) { Parse p = nodes.remove(0); if (p.isPosTag()) { tags.add(p); } else { nodes.addAll(0,p.parts); } } return tags.toArray(new Parse[tags.size()]); } public Parse[] getTokenNodes() { List<Parse> tokens = new LinkedList<>(); List<Parse> nodes = new LinkedList<>(); nodes.addAll(this.parts); while (nodes.size() != 0) { Parse p = nodes.remove(0); if (p.getType().equals(AbstractBottomUpParser.TOK_NODE)) { tokens.add(p); } else { nodes.addAll(0, p.parts); } } return tokens.toArray(new Parse[tokens.size()]); } /** * Returns the deepest shared parent of this node and the specified node. * If the nodes are identical then their parent is returned. * If one node is the parent of the other then the parent node is returned. * * @param node The node from which parents are compared to this node's parents. * * @return the deepest shared parent of this node and the specified node. */ public Parse getCommonParent(Parse node) { if (this == node) { return parent; } Set<Parse> parents = new HashSet<>(); Parse cparent = this; while (cparent != null) { parents.add(cparent); cparent = cparent.getParent(); } while (node != null) { if (parents.contains(node)) { return node; } node = node.getParent(); } return null; } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof Parse) { Parse p = (Parse) obj; return Objects.equals(label, p.label) && span.equals(p.span) && text.equals(p.text) && parts.equals(p.parts); } return false; } @Override public int hashCode() { // Note: label is missing here! return Objects.hash(span, text); } public int compareTo(Parse p) { return Double.compare(p.getProb(), this.getProb()); } /** * Returns the derivation string for this parse if one has been created. * * @return the derivation string for this parse or null if no derivation string has been created. */ public StringBuffer getDerivation() { return derivation; } /** * Specifies the derivation string to be associated with this parse. * * @param derivation The derivation string to be associated with this parse. */ public void setDerivation(StringBuffer derivation) { this.derivation = derivation; } private void codeTree(Parse p,int[] levels) { Parse[] kids = p.getChildren(); StringBuilder levelsBuff = new StringBuilder(); levelsBuff.append("["); int[] nlevels = new int[levels.length + 1]; for (int li = 0; li < levels.length; li++) { nlevels[li] = levels[li]; levelsBuff.append(levels[li]).append("."); } for (int ki = 0; ki < kids.length; ki++) { nlevels[levels.length] = ki; System.out.println(levelsBuff.toString() + ki + "] " + kids[ki].getType() + " " + kids[ki].hashCode() + " -> " + kids[ki].getParent().hashCode() + " " + kids[ki].getParent().getType() + " " + kids[ki].getCoveredText()); codeTree(kids[ki],nlevels); } } /** * Prints to standard out a representation of the specified parse which * contains hash codes so that parent/child relationships can be explicitly seen. */ public void showCodeTree() { codeTree(this,new int[0]); } /** * Utility method to inserts named entities. * * @param tag * @param names * @param tokens */ public static void addNames(String tag, Span[] names, Parse[] tokens) { for (Span nameTokenSpan : names) { Parse startToken = tokens[nameTokenSpan.getStart()]; Parse endToken = tokens[nameTokenSpan.getEnd() - 1]; Parse commonParent = startToken.getCommonParent(endToken); //System.err.println("addNames: "+startToken+" .. "+endToken+" commonParent = "+commonParent); if (commonParent != null) { Span nameSpan = new Span(startToken.getSpan().getStart(), endToken.getSpan().getEnd()); if (nameSpan.equals(commonParent.getSpan())) { commonParent.insert(new Parse(commonParent.getText(), nameSpan, tag, 1.0, endToken.getHeadIndex())); } else { Parse[] kids = commonParent.getChildren(); boolean crossingKids = false; for (Parse kid : kids) { if (nameSpan.crosses(kid.getSpan())) { crossingKids = true; } } if (!crossingKids) { commonParent.insert(new Parse(commonParent.getText(), nameSpan, tag, 1.0, endToken.getHeadIndex())); } else { if (commonParent.getType().equals("NP")) { Parse[] grandKids = kids[0].getChildren(); if (grandKids.length > 1 && nameSpan.contains(grandKids[grandKids.length - 1].getSpan())) { commonParent.insert(new Parse(commonParent.getText(), commonParent.getSpan(), tag, 1.0, commonParent.getHeadIndex())); } } } } } } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParseSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.io.IOException; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public class ParseSampleStream extends FilterObjectStream<String, Parse> { public ParseSampleStream(ObjectStream<String> in) { super(in); } public Parse read() throws IOException { String parse = samples.read(); if (parse != null) { if (!parse.trim().isEmpty()) { return Parse.parseParse(parse); } else { return read(); } } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/Parser.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; /** * Interface for full-syntactic parsers. */ public interface Parser { /** * Returns the specified number of parses or fewer for the specified tokens. <br> * <b>Note:</b> The nodes within * the returned parses are shared with other parses and therefore their parent node references * will not be consistent with their child node reference. {@link Parse#setParent(Parse)} * can be used to make the parents consistent with a particular parse, but subsequent calls * to <code>setParents</code> can invalidate the results of earlier calls.<br> * @param tokens A parse containing the tokens with a single parent node. * @param numParses The number of parses desired. * @return the specified number of parses for the specified tokens. */ Parse[] parse(Parse tokens, int numParses); /** * Returns a parse for the specified parse of tokens. * * @param tokens The root node of a flat parse containing only tokens. * @return A full parse of the specified tokens or the flat chunks of the tokens if a * fullparse could not be found. */ Parse parse(Parse tokens); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserChunkerFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import opennlp.tools.chunker.ChunkerContextGenerator; import opennlp.tools.chunker.ChunkerFactory; import opennlp.tools.chunker.ChunkerME; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.util.SequenceValidator; import opennlp.tools.util.TokenTag; public class ParserChunkerFactory extends ChunkerFactory { @Override public ChunkerContextGenerator getContextGenerator() { return new ChunkContextGenerator(ChunkerME.DEFAULT_BEAM_SIZE); } @Override public SequenceValidator<TokenTag> getSequenceValidator() { MaxentModel model = artifactProvider.getArtifact("chunker.model"); String[] outcomes = new String[model.getNumOutcomes()]; for (int i = 0; i < outcomes.length; i++) { outcomes[i] = model.getOutcome(i); } return new ParserChunkerSequenceValidator(outcomes); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserChunkerSequenceValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.util.HashMap; import java.util.Map; import opennlp.tools.parser.chunking.Parser; import opennlp.tools.util.SequenceValidator; import opennlp.tools.util.TokenTag; public class ParserChunkerSequenceValidator implements SequenceValidator<TokenTag> { private Map<String, String> continueStartMap; public ParserChunkerSequenceValidator(String[] outcomes) { continueStartMap = new HashMap<>(outcomes.length); for (int oi = 0, on = outcomes.length; oi < on; oi++) { String outcome = outcomes[oi]; if (outcome.startsWith(Parser.CONT)) { continueStartMap.put(outcome,Parser.START + outcome.substring( Parser.CONT.length())); } } } public boolean validSequence(int i, String[] inputSequence, String[] tagList, String outcome) { if (continueStartMap.containsKey(outcome)) { int lti = tagList.length - 1; if (lti == -1) { return false; } else { String lastTag = tagList[lti]; if (lastTag.equals(outcome)) { return true; } if (lastTag.equals(continueStartMap.get(outcome))) { return true; } if (lastTag.equals(Parser.OTHER)) { return false; } return false; } } return true; } @Override public boolean validSequence(int i, TokenTag[] inputTuples, String[] outcomesSequence, String outcome) { String[] inputSequence = TokenTag.extractTokens(inputTuples); return validSequence(i, inputSequence, outcomesSequence, outcome); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserCrossValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.io.IOException; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.TrainingParameters; import opennlp.tools.util.eval.CrossValidationPartitioner; import opennlp.tools.util.eval.FMeasure; public class ParserCrossValidator { private final String languageCode; private final TrainingParameters params; private final HeadRules rules; private final FMeasure fmeasure = new FMeasure(); private ParserType parserType; private ParserEvaluationMonitor[] monitors; public ParserCrossValidator(String languageCode, TrainingParameters params, HeadRules rules, ParserType parserType, ParserEvaluationMonitor... monitors) { this.languageCode = languageCode; this.params = params; this.rules = rules; this.parserType = parserType; } public void evaluate(ObjectStream<Parse> samples, int nFolds) throws IOException { CrossValidationPartitioner<Parse> partitioner = new CrossValidationPartitioner<>(samples, nFolds); while (partitioner.hasNext()) { CrossValidationPartitioner.TrainingSampleStream<Parse> trainingSampleStream = partitioner.next(); ParserModel model; if (ParserType.CHUNKING.equals(parserType)) { model = opennlp.tools.parser.chunking.Parser.train(languageCode, samples, rules, params); } else if (ParserType.TREEINSERT.equals(parserType)) { model = opennlp.tools.parser.treeinsert.Parser.train(languageCode, samples, rules, params); } else { throw new IllegalStateException("Unexpected parser type: " + parserType); } ParserEvaluator evaluator = new ParserEvaluator(ParserFactory.create(model), monitors); evaluator.evaluate(trainingSampleStream.getTestSampleStream()); fmeasure.mergeInto(evaluator.getFMeasure()); } } public FMeasure getFMeasure() { return fmeasure; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserEvaluationMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import opennlp.tools.util.eval.EvaluationMonitor; public interface ParserEvaluationMonitor extends EvaluationMonitor<Parse> { }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.util.ArrayList; import java.util.List; import java.util.Stack; import opennlp.tools.cmdline.parser.ParserTool; import opennlp.tools.util.Span; import opennlp.tools.util.eval.Evaluator; import opennlp.tools.util.eval.FMeasure; /** * Class for ParserEvaluator. * This ParserEvaluator behaves like EVALB with no exceptions, e.g, * without removing punctuation tags, or equality between ADVP and PRT * (as in COLLINS convention). To follow parsing evaluation conventions * (Bikel, Collins, Charniak, etc.) as in EVALB, options are to be added * to the {@code ParserEvaluatorTool}. * */ public class ParserEvaluator extends Evaluator<Parse> { /** * fmeasure. */ private FMeasure fmeasure = new FMeasure(); /** * The parser to evaluate. */ private final Parser parser; /** * Construct a parser with some evaluation monitors. * @param aParser * @param monitors the evaluation monitors */ public ParserEvaluator(final Parser aParser, final ParserEvaluationMonitor... monitors) { super(monitors); this.parser = aParser; } /** * Obtain {@code Span}s for every parse in the sentence. * @param parse the parse from which to obtain the spans * @return an array containing every span for the parse */ private static Span[] getConstituencySpans(final Parse parse) { Stack<Parse> stack = new Stack<>(); if (parse.getChildCount() > 0) { for (Parse child : parse.getChildren()) { stack.push(child); } } List<Span> consts = new ArrayList<>(); while (!stack.isEmpty()) { Parse constSpan = stack.pop(); if (!constSpan.isPosTag()) { Span span = constSpan.getSpan(); consts.add(new Span(span.getStart(), span.getEnd(), constSpan.getType())); for (Parse child : constSpan.getChildren()) { stack.push(child); } } } return consts.toArray(new Span[consts.size()]); } @Override protected final Parse processSample(final Parse reference) { List<String> tokens = new ArrayList<>(); for (Parse token : reference.getTokenNodes()) { tokens.add(token.getSpan().getCoveredText(reference.getText()).toString()); } Parse[] predictions = ParserTool.parseLine(String.join(" ", tokens), parser, 1); Parse prediction = null; if (predictions.length > 0) { prediction = predictions[0]; fmeasure.updateScores(getConstituencySpans(reference), getConstituencySpans(prediction)); } return prediction; } /** * It returns the fmeasure result. * @return the fmeasure value */ public final FMeasure getFMeasure() { return fmeasure; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserEventTypeEnum.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; /** * Enumerated type of event types for the parser. */ public enum ParserEventTypeEnum { BUILD, CHECK, @Deprecated CHUNK, @Deprecated TAG, ATTACH }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; public class ParserFactory { private ParserFactory() { } public static Parser create(ParserModel model, int beamSize, double advancePercentage) { if (ParserType.CHUNKING.equals(model.getParserType())) { return new opennlp.tools.parser.chunking.Parser(model, beamSize, advancePercentage); } else if (ParserType.TREEINSERT.equals(model.getParserType())) { return new opennlp.tools.parser.treeinsert.Parser(model, beamSize, advancePercentage); } else { throw new IllegalStateException("Unexpected ParserType: " + model.getParserType().name()); } } public static Parser create(ParserModel model) { return create(model, AbstractBottomUpParser.defaultBeamSize, AbstractBottomUpParser.defaultAdvancePercentage); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.Map; import java.util.Objects; import opennlp.tools.chunker.ChunkerModel; import opennlp.tools.ml.model.AbstractModel; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.postag.POSModel; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.model.ArtifactSerializer; import opennlp.tools.util.model.BaseModel; import opennlp.tools.util.model.ChunkerModelSerializer; import opennlp.tools.util.model.POSModelSerializer; /** * This is an abstract base class for {@link ParserModel} implementations. */ // TODO: Model should validate the artifact map public class ParserModel extends BaseModel { private static class HeadRulesSerializer implements ArtifactSerializer<opennlp.tools.parser.lang.en.HeadRules> { public opennlp.tools.parser.lang.en.HeadRules create(InputStream in) throws IOException, InvalidFormatException { return new opennlp.tools.parser.lang.en.HeadRules(new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8))); } public void serialize(opennlp.tools.parser.lang.en.HeadRules artifact, OutputStream out) throws IOException { artifact.serialize(new OutputStreamWriter(out, StandardCharsets.UTF_8)); } } private static final String COMPONENT_NAME = "Parser"; private static final String BUILD_MODEL_ENTRY_NAME = "build.model"; private static final String CHECK_MODEL_ENTRY_NAME = "check.model"; private static final String ATTACH_MODEL_ENTRY_NAME = "attach.model"; private static final String PARSER_TAGGER_MODEL_ENTRY_NAME = "parsertager.postagger"; private static final String CHUNKER_TAGGER_MODEL_ENTRY_NAME = "parserchunker.chunker"; private static final String HEAD_RULES_MODEL_ENTRY_NAME = "head-rules.headrules"; private static final String PARSER_TYPE = "parser-type"; public ParserModel(String languageCode, MaxentModel buildModel, MaxentModel checkModel, MaxentModel attachModel, POSModel parserTagger, ChunkerModel chunkerTagger, opennlp.tools.parser.HeadRules headRules, ParserType modelType, Map<String, String> manifestInfoEntries) { super(COMPONENT_NAME, languageCode, manifestInfoEntries); setManifestProperty(PARSER_TYPE, modelType.name()); artifactMap.put(BUILD_MODEL_ENTRY_NAME, buildModel); artifactMap.put(CHECK_MODEL_ENTRY_NAME, checkModel); if (ParserType.CHUNKING.equals(modelType)) { if (attachModel != null) throw new IllegalArgumentException("attachModel must be null for chunking parser!"); } else if (ParserType.TREEINSERT.equals(modelType)) { Objects.requireNonNull(attachModel, "attachModel must not be null"); artifactMap.put(ATTACH_MODEL_ENTRY_NAME, attachModel); } else { throw new IllegalStateException("Unknown ParserType '" + modelType + "'!"); } artifactMap.put(PARSER_TAGGER_MODEL_ENTRY_NAME, parserTagger); artifactMap.put(CHUNKER_TAGGER_MODEL_ENTRY_NAME, chunkerTagger); artifactMap.put(HEAD_RULES_MODEL_ENTRY_NAME, headRules); checkArtifactMap(); } public ParserModel(String languageCode, MaxentModel buildModel, MaxentModel checkModel, MaxentModel attachModel, POSModel parserTagger, ChunkerModel chunkerTagger, opennlp.tools.parser.HeadRules headRules, ParserType modelType) { this (languageCode, buildModel, checkModel, attachModel, parserTagger, chunkerTagger, headRules, modelType, null); } public ParserModel(String languageCode, MaxentModel buildModel, MaxentModel checkModel, POSModel parserTagger, ChunkerModel chunkerTagger, opennlp.tools.parser.HeadRules headRules, ParserType type, Map<String, String> manifestInfoEntries) { this (languageCode, buildModel, checkModel, null, parserTagger, chunkerTagger, headRules, type, manifestInfoEntries); } public ParserModel(InputStream in) throws IOException { super(COMPONENT_NAME, in); } public ParserModel(File modelFile) throws IOException { super(COMPONENT_NAME, modelFile); } public ParserModel(Path modelPath) throws IOException { this(modelPath.toFile()); } public ParserModel(URL modelURL) throws IOException { super(COMPONENT_NAME, modelURL); } @Override protected void createArtifactSerializers( Map<String, ArtifactSerializer> serializers) { super.createArtifactSerializers(serializers); // In 1.6.x the headrules artifact is serialized with the new API // which uses the Serializeable interface // This change is not backward compatible with the 1.5.x models. // In order to laod 1.5.x model the English headrules serializer must be // put on the serializer map. if (getVersion().getMajor() == 1 && getVersion().getMinor() == 5) { serializers.put("headrules", new HeadRulesSerializer()); } serializers.put("postagger", new POSModelSerializer()); serializers.put("chunker", new ChunkerModelSerializer()); } public ParserType getParserType() { return ParserType.parse(getManifestProperty(PARSER_TYPE)); } public MaxentModel getBuildModel() { return (MaxentModel) artifactMap.get(BUILD_MODEL_ENTRY_NAME); } public MaxentModel getCheckModel() { return (MaxentModel) artifactMap.get(CHECK_MODEL_ENTRY_NAME); } public MaxentModel getAttachModel() { return (MaxentModel) artifactMap.get(ATTACH_MODEL_ENTRY_NAME); } public POSModel getParserTaggerModel() { return (POSModel) artifactMap.get(PARSER_TAGGER_MODEL_ENTRY_NAME); } public ChunkerModel getParserChunkerModel() { return (ChunkerModel) artifactMap.get(CHUNKER_TAGGER_MODEL_ENTRY_NAME); } public opennlp.tools.parser.HeadRules getHeadRules() { return (opennlp.tools.parser.HeadRules) artifactMap.get(HEAD_RULES_MODEL_ENTRY_NAME); } // TODO: Update model methods should make sure properties are copied correctly ... public ParserModel updateBuildModel(MaxentModel buildModel) { return new ParserModel(getLanguage(), buildModel, getCheckModel(), getAttachModel(), getParserTaggerModel(), getParserChunkerModel(), getHeadRules(), getParserType()); } public ParserModel updateCheckModel(MaxentModel checkModel) { return new ParserModel(getLanguage(), getBuildModel(), checkModel, getAttachModel(), getParserTaggerModel(), getParserChunkerModel(), getHeadRules(), getParserType()); } public ParserModel updateTaggerModel(POSModel taggerModel) { return new ParserModel(getLanguage(), getBuildModel(), getCheckModel(), getAttachModel(), taggerModel, getParserChunkerModel(), getHeadRules(), getParserType()); } public ParserModel updateChunkerModel(ChunkerModel chunkModel) { return new ParserModel(getLanguage(), getBuildModel(), getCheckModel(), getAttachModel(), getParserTaggerModel(), chunkModel, getHeadRules(), getParserType()); } @Override protected void validateArtifactMap() throws InvalidFormatException { super.validateArtifactMap(); if (!(artifactMap.get(BUILD_MODEL_ENTRY_NAME) instanceof AbstractModel)) { throw new InvalidFormatException("Missing the build model!"); } ParserType modelType = getParserType(); if (modelType != null) { if (ParserType.CHUNKING.equals(modelType)) { if (artifactMap.get(ATTACH_MODEL_ENTRY_NAME) != null) throw new InvalidFormatException("attachModel must be null for chunking parser!"); } else if (ParserType.TREEINSERT.equals(modelType)) { if (!(artifactMap.get(ATTACH_MODEL_ENTRY_NAME) instanceof AbstractModel)) throw new InvalidFormatException("attachModel must not be null!"); } else { throw new InvalidFormatException("Unknown ParserType '" + modelType + "'!"); } } else { throw new InvalidFormatException("Missing the parser type property!"); } if (!(artifactMap.get(CHECK_MODEL_ENTRY_NAME) instanceof AbstractModel)) { throw new InvalidFormatException("Missing the check model!"); } if (!(artifactMap.get(PARSER_TAGGER_MODEL_ENTRY_NAME) instanceof POSModel)) { throw new InvalidFormatException("Missing the tagger model!"); } if (!(artifactMap.get(CHUNKER_TAGGER_MODEL_ENTRY_NAME) instanceof ChunkerModel)) { throw new InvalidFormatException("Missing the chunker model!"); } if (!(artifactMap.get(HEAD_RULES_MODEL_ENTRY_NAME) instanceof HeadRules)) { throw new InvalidFormatException("Missing the head rules!"); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/ParserType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; public enum ParserType { CHUNKING, TREEINSERT; public static ParserType parse(String type) { if (ParserType.CHUNKING.name().equals(type)) { return ParserType.CHUNKING; } else if (ParserType.TREEINSERT.name().equals(type)) { return ParserType.TREEINSERT; } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/PosSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser; import java.io.IOException; import opennlp.tools.postag.POSSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public class PosSampleStream extends FilterObjectStream<Parse, POSSample> { public PosSampleStream(ObjectStream<Parse> in) { super(in); } public POSSample read() throws IOException { Parse parse = samples.read(); if (parse != null) { Parse[] nodes = parse.getTagNodes(); String[] toks = new String[nodes.length]; String[] preds = new String[nodes.length]; for (int ti = 0; ti < nodes.length; ti++) { Parse tok = nodes[ti]; toks[ti] = tok.getCoveredText(); preds[ti] = tok.getType(); } return new POSSample(toks, preds); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package containing common code for performing full syntactic parsing. */ package opennlp.tools.parser;
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/chunking/BuildContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.chunking; import java.util.ArrayList; import java.util.Collection; import java.util.List; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.parser.AbstractContextGenerator; import opennlp.tools.parser.Cons; import opennlp.tools.parser.Parse; import opennlp.tools.util.StringList; /** * Class to generator predictive contexts for deciding how constituents should be combined together. */ public class BuildContextGenerator extends AbstractContextGenerator { private Dictionary dict; private String[] unigram; private String[] bigram; private String[] trigram; /** * Creates a new context generator for making decisions about combining constitients togehter. * */ public BuildContextGenerator() { super(); zeroBackOff = false; useLabel = true; } public BuildContextGenerator(Dictionary dict) { this(); this.dict = dict; unigram = new String[1]; bigram = new String[2]; trigram = new String[3]; } public String[] getContext(Object o) { Object[] params = (Object[]) o; return getContext((Parse[]) params[0], (Integer) params[1]); } /** * Returns the predictive context used to determine how constituent at the specified index * should be combined with other contisuents. * @param constituents The constituents which have yet to be combined into new constituents. * @param index The index of the constituent whcihi is being considered. * @return the context for building constituents at the specified index. */ public String[] getContext(Parse[] constituents, int index) { List<String> features = new ArrayList<>(100); int ps = constituents.length; // cons(-2), cons(-1), cons(0), cons(1), cons(2) // cons(-2) Collection<Parse> punct2s = null; Collection<Parse> punct_2s = null; Parse p_2 = null; if (index - 2 >= 0) { p_2 = constituents[index - 2]; } Parse p_1 = null; if (index - 1 >= 0) { p_1 = constituents[index - 1]; punct_2s = p_1.getPreviousPunctuationSet(); } Parse p0 = constituents[index]; Collection<Parse> punct_1s = p0.getPreviousPunctuationSet(); Collection<Parse> punct1s = p0.getNextPunctuationSet(); Parse p1 = null; if (index + 1 < ps) { p1 = constituents[index + 1]; punct2s = p1.getNextPunctuationSet(); } Parse p2 = null; if (index + 2 < ps) { p2 = constituents[index + 2]; } boolean u_2 = true; boolean u_1 = true; boolean u0 = true; boolean u1 = true; boolean u2 = true; boolean b_2_1 = true; boolean b_10 = true; boolean b01 = true; boolean b12 = true; boolean t_2_10 = true; boolean t_101 = true; boolean t012 = true; if (dict != null) { if (p_2 != null) { unigram[0] = p_2.getHead().getCoveredText(); u_2 = dict.contains(new StringList(unigram)); } if (p2 != null) { unigram[0] = p2.getHead().getCoveredText(); u2 = dict.contains(new StringList(unigram)); } unigram[0] = p0.getHead().getCoveredText(); u0 = dict.contains(new StringList(unigram)); if (p_2 != null && p_1 != null) { bigram[0] = p_2.getHead().getCoveredText(); bigram[1] = p_1.getHead().getCoveredText(); b_2_1 = dict.contains(new StringList(bigram)); trigram[0] = p_2.getHead().getCoveredText(); trigram[1] = p_1.getHead().getCoveredText(); trigram[2] = p0.getHead().getCoveredText(); t_2_10 = dict.contains(new StringList(trigram)); } if (p_1 != null && p1 != null) { trigram[0] = p_1.getHead().getCoveredText(); trigram[1] = p0.getHead().getCoveredText(); trigram[2] = p1.getHead().getCoveredText(); t_101 = dict.contains(new StringList(trigram)); } if (p_1 != null) { unigram[0] = p_1.getHead().getCoveredText(); u_1 = dict.contains(new StringList(unigram)); //extra check for 2==null case b_2_1 = b_2_1 && u_1 & u_2; t_2_10 = t_2_10 && u_1 & u_2 & u0; t_101 = t_101 && u_1 & u0 && u1; bigram[0] = p_1.getHead().getCoveredText(); bigram[1] = p0.getHead().getCoveredText(); b_10 = dict.contains(new StringList(bigram)) && u_1 && u0; } if (p1 != null && p2 != null) { bigram[0] = p1.getHead().getCoveredText(); bigram[1] = p2.getHead().getCoveredText(); b12 = dict.contains(new StringList(bigram)); trigram[0] = p0.getHead().getCoveredText(); trigram[1] = p1.getHead().getCoveredText(); trigram[2] = p2.getHead().getCoveredText(); t012 = dict.contains(new StringList(trigram)); } if (p1 != null) { unigram[0] = p1.getHead().getCoveredText(); u1 = dict.contains(new StringList(unigram)); //extra check for 2==null case b12 = b12 && u1 && u2; t012 = t012 && u1 && u2 && u0; t_101 = t_101 && u0 && u_1 && u1; bigram[0] = p0.getHead().getCoveredText(); bigram[1] = p1.getHead().getCoveredText(); b01 = dict.contains(new StringList(bigram)); b01 = b01 && u0 && u1; } } String consp_2 = cons(p_2, -2); String consp_1 = cons(p_1, -1); String consp0 = cons(p0, 0); String consp1 = cons(p1, 1); String consp2 = cons(p2, 2); String consbop_2 = consbo(p_2, -2); String consbop_1 = consbo(p_1, -1); String consbop0 = consbo(p0, 0); String consbop1 = consbo(p1, 1); String consbop2 = consbo(p2, 2); Cons c_2 = new Cons(consp_2,consbop_2,-2,u_2); Cons c_1 = new Cons(consp_1,consbop_1,-1,u_1); Cons c0 = new Cons(consp0,consbop0,0,u0); Cons c1 = new Cons(consp1,consbop1,1,u1); Cons c2 = new Cons(consp2,consbop2,2,u2); //default features.add("default"); //first constituent label //features.add("fl="+constituents[0].getLabel()); // features.add("stage=cons(i)"); // cons(-2), cons(-1), cons(0), cons(1), cons(2) if (u0) features.add(consp0); features.add(consbop0); if (u_2) features.add(consp_2); features.add(consbop_2); if (u_1) features.add(consp_1); features.add(consbop_1); if (u1) features.add(consp1); features.add(consbop1); if (u2) features.add(consp2); features.add(consbop2); //cons(0),cons(1) cons2(features,c0,c1,punct1s,b01); //cons(-1),cons(0) cons2(features,c_1,c0,punct_1s,b_10); //features.add("stage=cons(0),cons(1),cons(2)"); cons3(features,c0,c1,c2,punct1s,punct2s,t012,b01,b12); cons3(features,c_2,c_1,c0,punct_2s,punct_1s,t_2_10,b_2_1,b_10); cons3(features,c_1,c0,c1,punct_1s,punct1s,t_101,b_10,b01); //features.add("stage=other"); String p0Tag = p0.getType(); if (p0Tag.equals("-RRB-")) { for (int pi = index - 1; pi >= 0; pi--) { Parse p = constituents[pi]; if (p.getType().equals("-LRB-")) { features.add("bracketsmatch"); break; } if (p.getLabel().startsWith(Parser.START)) { break; } } } if (p0Tag.equals("-RCB-")) { for (int pi = index - 1; pi >= 0; pi--) { Parse p = constituents[pi]; if (p.getType().equals("-LCB-")) { features.add("bracketsmatch"); break; } if (p.getLabel().startsWith(Parser.START)) { break; } } } if (p0Tag.equals("''")) { for (int pi = index - 1; pi >= 0; pi--) { Parse p = constituents[pi]; if (p.getType().equals("``")) { features.add("quotesmatch"); break; } if (p.getLabel().startsWith(Parser.START)) { break; } } } if (p0Tag.equals("'")) { for (int pi = index - 1; pi >= 0; pi--) { Parse p = constituents[pi]; if (p.getType().equals("`")) { features.add("quotesmatch"); break; } if (p.getLabel().startsWith(Parser.START)) { break; } } } if (p0Tag.equals(",")) { for (int pi = index - 1; pi >= 0; pi--) { Parse p = constituents[pi]; if (p.getType().equals(",")) { features.add("iscomma"); break; } if (p.getLabel().startsWith(Parser.START)) { break; } } } if (p0Tag.equals(".") && index == ps - 1) { for (int pi = index - 1; pi >= 0; pi--) { Parse p = constituents[pi]; if (p.getLabel().startsWith(Parser.START)) { if (pi == 0) { features.add("endofsentence"); } break; } } } return features.toArray(new String[features.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/chunking/CheckContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.chunking; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; import opennlp.tools.parser.AbstractContextGenerator; import opennlp.tools.parser.Parse; /** * Class for generating predictive context for deciding when a constituent is complete. */ public class CheckContextGenerator extends AbstractContextGenerator { /** * Creates a new context generator for generating predictive context for deciding * when a constituent is complete. */ public CheckContextGenerator() { super(); } public String[] getContext(Object o) { Object[] params = (Object[]) o; return getContext((Parse[]) params[0], (String) params[1], (Integer) params[2], (Integer) params[3]); } /** * Returns predictive context for deciding whether the specified constituents between the * specified start and end index can be combined to form a new constituent of the specified type. * * @param constituents The constituents which have yet to be combined into new constituents. * @param type The type of the new constituent proposed. * @param start The first constituent of the proposed constituent. * @param end The last constituent of the proposed constituent. * @return The predictive context for deciding whether a new constituent should be created. */ public String[] getContext(Parse[] constituents, String type, int start, int end) { int ps = constituents.length; List<String> features = new ArrayList<>(100); //default features.add("default"); //first constituent label features.add("fl=" + constituents[0].getLabel()); Parse pstart = constituents[start]; Parse pend = constituents[end]; checkcons(pstart, "begin", type, features); checkcons(pend, "last", type, features); StringBuilder production = new StringBuilder(20); StringBuilder punctProduction = new StringBuilder(20); production.append("p=").append(type).append("->"); punctProduction.append("pp=").append(type).append("->"); for (int pi = start; pi < end; pi++) { Parse p = constituents[pi]; checkcons(p, pend, type, features); production.append(p.getType()).append(","); punctProduction.append(p.getType()).append(","); Collection<Parse> nextPunct = p.getNextPunctuationSet(); if (nextPunct != null) { for (Iterator<Parse> pit = nextPunct.iterator(); pit.hasNext();) { Parse punct = pit.next(); punctProduction.append(punct.getType()).append(","); } } } production.append(pend.getType()); punctProduction.append(pend.getType()); features.add(production.toString()); features.add(punctProduction.toString()); Parse p_2 = null; Parse p_1 = null; Parse p1 = null; Parse p2 = null; Collection<Parse> p1s = constituents[end].getNextPunctuationSet(); Collection<Parse> p2s = null; Collection<Parse> p_1s = constituents[start].getPreviousPunctuationSet(); Collection<Parse> p_2s = null; if (start - 2 >= 0) { p_2 = constituents[start - 2]; } if (start - 1 >= 0) { p_1 = constituents[start - 1]; p_2s = p_1.getPreviousPunctuationSet(); } if (end + 1 < ps) { p1 = constituents[end + 1]; p2s = p1.getNextPunctuationSet(); } if (end + 2 < ps) { p2 = constituents[end + 2]; } surround(p_1, -1, type, p_1s, features); surround(p_2, -2, type, p_2s, features); surround(p1, 1, type, p1s, features); surround(p2, 2, type, p2s, features); return features.toArray(new String[features.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/chunking/Parser.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.chunking; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import opennlp.tools.chunker.Chunker; import opennlp.tools.chunker.ChunkerME; import opennlp.tools.chunker.ChunkerModel; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.BeamSearch; import opennlp.tools.ml.EventTrainer; import opennlp.tools.ml.TrainerFactory; import opennlp.tools.ml.model.Event; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.parser.AbstractBottomUpParser; import opennlp.tools.parser.ChunkSampleStream; import opennlp.tools.parser.HeadRules; import opennlp.tools.parser.Parse; import opennlp.tools.parser.ParserChunkerFactory; import opennlp.tools.parser.ParserEventTypeEnum; import opennlp.tools.parser.ParserModel; import opennlp.tools.parser.ParserType; import opennlp.tools.parser.PosSampleStream; import opennlp.tools.postag.POSModel; import opennlp.tools.postag.POSTagger; import opennlp.tools.postag.POSTaggerFactory; import opennlp.tools.postag.POSTaggerME; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; import opennlp.tools.util.TrainingParameters; /** * Class for a shift reduce style parser based on Adwait Ratnaparkhi's 1998 thesis. */ public class Parser extends AbstractBottomUpParser { private MaxentModel buildModel; private MaxentModel checkModel; private BuildContextGenerator buildContextGenerator; private CheckContextGenerator checkContextGenerator; private double[] bprobs; private double[] cprobs; private static final String TOP_START = START + TOP_NODE; private int topStartIndex; private Map<String, String> startTypeMap; private Map<String, String> contTypeMap; private int completeIndex; private int incompleteIndex; public Parser(ParserModel model, int beamSize, double advancePercentage) { this(model.getBuildModel(), model.getCheckModel(), new POSTaggerME(model.getParserTaggerModel()), new ChunkerME(model.getParserChunkerModel()), model.getHeadRules(), beamSize, advancePercentage); } public Parser(ParserModel model) { this(model, defaultBeamSize, defaultAdvancePercentage); } /** * Creates a new parser using the specified models and head rules using the specified beam * size and advance percentage. * @param buildModel The model to assign constituent labels. * @param checkModel The model to determine a constituent is complete. * @param tagger The model to assign pos-tags. * @param chunker The model to assign flat constituent labels. * @param headRules The head rules for head word perculation. * @param beamSize The number of different parses kept during parsing. * @param advancePercentage The minimal amount of probability mass which advanced outcomes must represent. * Only outcomes which contribute to the top "advancePercentage" will be explored. */ private Parser(MaxentModel buildModel, MaxentModel checkModel, POSTagger tagger, Chunker chunker, HeadRules headRules, int beamSize, double advancePercentage) { super(tagger, chunker, headRules, beamSize, advancePercentage); this.buildModel = buildModel; this.checkModel = checkModel; bprobs = new double[buildModel.getNumOutcomes()]; cprobs = new double[checkModel.getNumOutcomes()]; this.buildContextGenerator = new BuildContextGenerator(); this.checkContextGenerator = new CheckContextGenerator(); startTypeMap = new HashMap<>(); contTypeMap = new HashMap<>(); for (int boi = 0, bon = buildModel.getNumOutcomes(); boi < bon; boi++) { String outcome = buildModel.getOutcome(boi); if (outcome.startsWith(START)) { //System.err.println("startMap "+outcome+"->"+outcome.substring(START.length())); startTypeMap.put(outcome, outcome.substring(START.length())); } else if (outcome.startsWith(CONT)) { //System.err.println("contMap "+outcome+"->"+outcome.substring(CONT.length())); contTypeMap.put(outcome, outcome.substring(CONT.length())); } } topStartIndex = buildModel.getIndex(TOP_START); completeIndex = checkModel.getIndex(COMPLETE); incompleteIndex = checkModel.getIndex(INCOMPLETE); } @Override protected void advanceTop(Parse p) { buildModel.eval(buildContextGenerator.getContext(p.getChildren(), 0), bprobs); p.addProb(Math.log(bprobs[topStartIndex])); checkModel.eval(checkContextGenerator.getContext(p.getChildren(), TOP_NODE, 0, 0), cprobs); p.addProb(Math.log(cprobs[completeIndex])); p.setType(TOP_NODE); } @Override protected Parse[] advanceParses(final Parse p, double probMass) { double q = 1 - probMass; /* The closest previous node which has been labeled as a start node. */ Parse lastStartNode = null; /* The index of the closest previous node which has been labeled as a start node. */ int lastStartIndex = -1; /* The type of the closest previous node which has been labeled as a start node. */ String lastStartType = null; /* The index of the node which will be labeled in this iteration of advancing the parse. */ int advanceNodeIndex; /* The node which will be labeled in this iteration of advancing the parse. */ Parse advanceNode = null; Parse[] originalChildren = p.getChildren(); Parse[] children = collapsePunctuation(originalChildren,punctSet); int numNodes = children.length; if (numNodes == 0) { return null; } //determines which node needs to be labeled and prior labels. for (advanceNodeIndex = 0; advanceNodeIndex < numNodes; advanceNodeIndex++) { advanceNode = children[advanceNodeIndex]; if (advanceNode.getLabel() == null) { break; } else if (startTypeMap.containsKey(advanceNode.getLabel())) { lastStartType = startTypeMap.get(advanceNode.getLabel()); lastStartNode = advanceNode; lastStartIndex = advanceNodeIndex; //System.err.println("lastStart "+i+" "+lastStart.label+" "+lastStart.prob); } } int originalAdvanceIndex = mapParseIndex(advanceNodeIndex,children,originalChildren); List<Parse> newParsesList = new ArrayList<>(buildModel.getNumOutcomes()); //call build buildModel.eval(buildContextGenerator.getContext(children, advanceNodeIndex), bprobs); double bprobSum = 0; while (bprobSum < probMass) { // The largest unadvanced labeling. int max = 0; for (int pi = 1; pi < bprobs.length; pi++) { //for each build outcome if (bprobs[pi] > bprobs[max]) { max = pi; } } if (bprobs[max] == 0) { break; } double bprob = bprobs[max]; bprobs[max] = 0; //zero out so new max can be found bprobSum += bprob; String tag = buildModel.getOutcome(max); //System.out.println("trying "+tag+" "+bprobSum+" lst="+lst); if (max == topStartIndex) { // can't have top until complete continue; } //System.err.println(i+" "+tag+" "+bprob); if (startTypeMap.containsKey(tag)) { //update last start lastStartIndex = advanceNodeIndex; lastStartNode = advanceNode; lastStartType = startTypeMap.get(tag); } else if (contTypeMap.containsKey(tag)) { if (lastStartNode == null || !lastStartType.equals(contTypeMap.get(tag))) { continue; //Cont must match previous start or continue } } Parse newParse1 = (Parse) p.clone(); //clone parse if (createDerivationString) newParse1.getDerivation().append(max).append("-"); //replace constituent being labeled to create new derivation newParse1.setChild(originalAdvanceIndex,tag); newParse1.addProb(Math.log(bprob)); //check //String[] context = checkContextGenerator.getContext(newParse1.getChildren(), lastStartType, // lastStartIndex, advanceNodeIndex); checkModel.eval(checkContextGenerator.getContext( collapsePunctuation(newParse1.getChildren(),punctSet), lastStartType, lastStartIndex, advanceNodeIndex), cprobs); //System.out.println("check "+lastStartType+" "+cprobs[completeIndex]+" "+cprobs[incompleteIndex] // +" "+tag+" "+java.util.Arrays.asList(context)); Parse newParse2; if (cprobs[completeIndex] > q) { //make sure a reduce is likely newParse2 = (Parse) newParse1.clone(); if (createDerivationString) newParse2.getDerivation().append(1).append("."); newParse2.addProb(Math.log(cprobs[completeIndex])); Parse[] cons = new Parse[advanceNodeIndex - lastStartIndex + 1]; boolean flat = true; //first cons[0] = lastStartNode; flat &= cons[0].isPosTag(); //last cons[advanceNodeIndex - lastStartIndex] = advanceNode; flat &= cons[advanceNodeIndex - lastStartIndex].isPosTag(); //middle for (int ci = 1; ci < advanceNodeIndex - lastStartIndex; ci++) { cons[ci] = children[ci + lastStartIndex]; flat &= cons[ci].isPosTag(); } if (!flat) { //flat chunks are done by chunker //check for top node to include end and begining punctuation if (lastStartIndex == 0 && advanceNodeIndex == numNodes - 1) { //System.err.println("ParserME.advanceParses: reducing entire span: " // +new Span(lastStartNode.getSpan().getStart(), advanceNode.getSpan().getEnd())+" " // +lastStartType+" "+java.util.Arrays.asList(children)); newParse2.insert(new Parse(p.getText(), p.getSpan(), lastStartType, cprobs[1], headRules.getHead(cons, lastStartType))); } else { newParse2.insert(new Parse(p.getText(), new Span(lastStartNode.getSpan().getStart(), advanceNode.getSpan().getEnd()), lastStartType, cprobs[1], headRules.getHead(cons, lastStartType))); } newParsesList.add(newParse2); } } if (cprobs[incompleteIndex] > q) { //make sure a shift is likely if (createDerivationString) newParse1.getDerivation().append(0).append("."); if (advanceNodeIndex != numNodes - 1) { //can't shift last element newParse1.addProb(Math.log(cprobs[incompleteIndex])); newParsesList.add(newParse1); } } } Parse[] newParses = new Parse[newParsesList.size()]; newParsesList.toArray(newParses); return newParses; } public static void mergeReportIntoManifest(Map<String, String> manifest, Map<String, String> report, String namespace) { for (Map.Entry<String, String> entry : report.entrySet()) { manifest.put(namespace + "." + entry.getKey(), entry.getValue()); } } public static ParserModel train(String languageCode, ObjectStream<Parse> parseSamples, HeadRules rules, TrainingParameters mlParams) throws IOException { System.err.println("Building dictionary"); Dictionary mdict = buildDictionary(parseSamples, rules, mlParams); parseSamples.reset(); Map<String, String> manifestInfoEntries = new HashMap<>(); // build System.err.println("Training builder"); ObjectStream<Event> bes = new ParserEventStream(parseSamples, rules, ParserEventTypeEnum.BUILD, mdict); Map<String, String> buildReportMap = new HashMap<>(); EventTrainer buildTrainer = TrainerFactory.getEventTrainer(mlParams.getParameters("build"), buildReportMap); MaxentModel buildModel = buildTrainer.train(bes); mergeReportIntoManifest(manifestInfoEntries, buildReportMap, "build"); parseSamples.reset(); // tag TrainingParameters posTaggerParams = mlParams.getParameters("tagger"); if (!posTaggerParams.getObjectSettings().containsKey(BeamSearch.BEAM_SIZE_PARAMETER)) { mlParams.put("tagger", BeamSearch.BEAM_SIZE_PARAMETER, 10); } POSModel posModel = POSTaggerME.train(languageCode, new PosSampleStream(parseSamples), mlParams.getParameters("tagger"), new POSTaggerFactory()); parseSamples.reset(); // chunk ChunkerModel chunkModel = ChunkerME.train(languageCode, new ChunkSampleStream(parseSamples), mlParams.getParameters("chunker"), new ParserChunkerFactory()); parseSamples.reset(); // check System.err.println("Training checker"); ObjectStream<Event> kes = new ParserEventStream(parseSamples, rules, ParserEventTypeEnum.CHECK); Map<String, String> checkReportMap = new HashMap<>(); EventTrainer checkTrainer = TrainerFactory.getEventTrainer(mlParams.getParameters("check"), checkReportMap); MaxentModel checkModel = checkTrainer.train(kes); mergeReportIntoManifest(manifestInfoEntries, checkReportMap, "check"); // TODO: Remove cast for HeadRules return new ParserModel(languageCode, buildModel, checkModel, posModel, chunkModel, rules, ParserType.CHUNKING, manifestInfoEntries); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/chunking/ParserEventStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.chunking; import java.util.List; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.model.Event; import opennlp.tools.parser.AbstractBottomUpParser; import opennlp.tools.parser.AbstractParserEventStream; import opennlp.tools.parser.HeadRules; import opennlp.tools.parser.Parse; import opennlp.tools.parser.ParserEventTypeEnum; import opennlp.tools.util.ObjectStream; /** * Wrapper class for one of four parser event streams. The particular event stream is specified * at construction. */ public class ParserEventStream extends AbstractParserEventStream { protected BuildContextGenerator bcg; protected CheckContextGenerator kcg; /** * Create an event stream based on the specified data stream of the specified type using * the specified head rules. * @param d A 1-parse-per-line Penn Treebank Style parse. * @param rules The head rules. * @param etype The type of events desired (tag, chunk, build, or check). * @param dict A tri-gram dictionary to reduce feature generation. */ public ParserEventStream(ObjectStream<Parse> d, HeadRules rules, ParserEventTypeEnum etype, Dictionary dict) { super(d,rules,etype,dict); } @Override protected void init() { if (etype == ParserEventTypeEnum.BUILD) { this.bcg = new BuildContextGenerator(dict); } else if (etype == ParserEventTypeEnum.CHECK) { this.kcg = new CheckContextGenerator(); } } public ParserEventStream(ObjectStream<Parse> d, HeadRules rules, ParserEventTypeEnum etype) { this (d,rules,etype,null); } /** * Returns true if the specified child is the first child of the specified parent. * @param child The child parse. * @param parent The parent parse. * @return true if the specified child is the first child of the specified parent; false otherwise. */ protected boolean firstChild(Parse child, Parse parent) { return AbstractBottomUpParser.collapsePunctuation(parent.getChildren(), punctSet)[0] == child; } public static Parse[] reduceChunks(Parse[] chunks, int ci, Parse parent) { String type = parent.getType(); // perform reduce int reduceStart = ci; int reduceEnd = ci; while (reduceStart >= 0 && chunks[reduceStart].getParent() == parent) { reduceStart--; } reduceStart++; Parse[] reducedChunks; if (!type.equals(AbstractBottomUpParser.TOP_NODE)) { //total - num_removed + 1 (for new node) reducedChunks = new Parse[chunks.length - (reduceEnd - reduceStart + 1) + 1]; //insert nodes before reduction System.arraycopy(chunks, 0, reducedChunks, 0, reduceStart); //insert reduced node reducedChunks[reduceStart] = parent; //propagate punctuation sets parent.setPrevPunctuation(chunks[reduceStart].getPreviousPunctuationSet()); parent.setNextPunctuation(chunks[reduceEnd].getNextPunctuationSet()); //insert nodes after reduction int ri = reduceStart + 1; for (int rci = reduceEnd + 1; rci < chunks.length; rci++) { reducedChunks[ri] = chunks[rci]; ri++; } ci = reduceStart - 1; //ci will be incremented at end of loop } else { reducedChunks = new Parse[0]; } return reducedChunks; } /** * Adds events for parsing (post tagging and chunking to the specified list of events for * the specified parse chunks. * @param parseEvents The events for the specified chunks. * @param chunks The incomplete parses to be parsed. */ @Override protected void addParseEvents(List<Event> parseEvents, Parse[] chunks) { int ci = 0; while (ci < chunks.length) { //System.err.println("parserEventStream.addParseEvents: chunks="+Arrays.asList(chunks)); Parse c = chunks[ci]; Parse parent = c.getParent(); if (parent != null) { String type = parent.getType(); String outcome; if (firstChild(c, parent)) { outcome = AbstractBottomUpParser.START + type; } else { outcome = AbstractBottomUpParser.CONT + type; } // System.err.println("parserEventStream.addParseEvents: chunks["+ci+"]="+c+" label=" // +outcome+" bcg="+bcg); c.setLabel(outcome); if (etype == ParserEventTypeEnum.BUILD) { parseEvents.add(new Event(outcome, bcg.getContext(chunks, ci))); } int start = ci - 1; while (start >= 0 && chunks[start].getParent() == parent) { start--; } if (lastChild(c, parent)) { if (etype == ParserEventTypeEnum.CHECK) { parseEvents.add(new Event(Parser.COMPLETE, kcg.getContext( chunks, type, start + 1, ci))); } //perform reduce int reduceStart = ci; while (reduceStart >= 0 && chunks[reduceStart].getParent() == parent) { reduceStart--; } reduceStart++; chunks = reduceChunks(chunks,ci,parent); ci = reduceStart - 1; //ci will be incremented at end of loop } else { if (etype == ParserEventTypeEnum.CHECK) { parseEvents.add(new Event(Parser.INCOMPLETE, kcg.getContext(chunks, type, start + 1, ci))); } } } ci++; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/chunking/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package containing code for performing full syntactic parsing using shift/reduce-style decisions. */ package opennlp.tools.parser.chunking;
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/lang
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/lang/en/HeadRules.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.lang.en; import java.io.BufferedReader; import java.io.FileReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Reader; import java.io.Writer; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.Stack; import java.util.StringTokenizer; import opennlp.tools.parser.Constituent; import opennlp.tools.parser.GapLabeler; import opennlp.tools.parser.Parse; import opennlp.tools.parser.chunking.Parser; import opennlp.tools.util.model.ArtifactSerializer; import opennlp.tools.util.model.SerializableArtifact; /** * Class for storing the English head rules associated with parsing. */ public class HeadRules implements opennlp.tools.parser.HeadRules, GapLabeler, SerializableArtifact { public static class HeadRulesSerializer implements ArtifactSerializer<HeadRules> { public HeadRules create(InputStream in) throws IOException { return new HeadRules(new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))); } public void serialize(opennlp.tools.parser.lang.en.HeadRules artifact, OutputStream out) throws IOException { artifact.serialize(new OutputStreamWriter(out, StandardCharsets.UTF_8)); } } private static class HeadRule { public boolean leftToRight; public String[] tags; public HeadRule(boolean l2r, String[] tags) { leftToRight = l2r; for (String tag : tags) { Objects.requireNonNull(tag, "tags must not contain null values"); } this.tags = tags; } @Override public int hashCode() { return Objects.hash(leftToRight, Arrays.hashCode(tags)); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof HeadRule) { HeadRule rule = (HeadRule) obj; return rule.leftToRight == leftToRight && Arrays.equals(rule.tags, tags); } return false; } } private Map<String, HeadRule> headRules; private Set<String> punctSet; /** * Creates a new set of head rules based on the specified head rules file. * * @param ruleFile the head rules file. * * @throws IOException if the head rules file can not be read. */ @Deprecated public HeadRules(String ruleFile) throws IOException { this(new BufferedReader(new FileReader(ruleFile))); } /** * Creates a new set of head rules based on the specified reader. * * @param rulesReader the head rules reader. * * @throws IOException if the head rules reader can not be read. */ public HeadRules(Reader rulesReader) throws IOException { BufferedReader in = new BufferedReader(rulesReader); readHeadRules(in); punctSet = new HashSet<>(); punctSet.add("."); punctSet.add(","); punctSet.add("``"); punctSet.add("''"); //punctSet.add(":"); } public Set<String> getPunctuationTags() { return punctSet; } public Parse getHead(Parse[] constituents, String type) { if (Parser.TOK_NODE.equals(constituents[0].getType())) { return null; } HeadRule hr; if (type.equals("NP") || type.equals("NX")) { String[] tags1 = { "NN", "NNP", "NNPS", "NNS", "NX", "JJR", "POS" }; for (int ci = constituents.length - 1; ci >= 0; ci--) { for (int ti = tags1.length - 1; ti >= 0; ti--) { if (constituents[ci].getType().equals(tags1[ti])) { return constituents[ci].getHead(); } } } for (int ci = 0; ci < constituents.length; ci++) { if (constituents[ci].getType().equals("NP")) { return constituents[ci].getHead(); } } String[] tags2 = { "$", "ADJP", "PRN" }; for (int ci = constituents.length - 1; ci >= 0; ci--) { for (int ti = tags2.length - 1; ti >= 0; ti--) { if (constituents[ci].getType().equals(tags2[ti])) { return constituents[ci].getHead(); } } } String[] tags3 = { "JJ", "JJS", "RB", "QP" }; for (int ci = constituents.length - 1; ci >= 0; ci--) { for (int ti = tags3.length - 1; ti >= 0; ti--) { if (constituents[ci].getType().equals(tags3[ti])) { return constituents[ci].getHead(); } } } return constituents[constituents.length - 1].getHead(); } else if ((hr = headRules.get(type)) != null) { String[] tags = hr.tags; int cl = constituents.length; int tl = tags.length; if (hr.leftToRight) { for (int ti = 0; ti < tl; ti++) { for (int ci = 0; ci < cl; ci++) { if (constituents[ci].getType().equals(tags[ti])) { return constituents[ci].getHead(); } } } return constituents[0].getHead(); } else { for (int ti = 0; ti < tl; ti++) { for (int ci = cl - 1; ci >= 0; ci--) { if (constituents[ci].getType().equals(tags[ti])) { return constituents[ci].getHead(); } } } return constituents[cl - 1].getHead(); } } return constituents[constituents.length - 1].getHead(); } private void readHeadRules(BufferedReader str) throws IOException { String line; headRules = new HashMap<>(30); while ((line = str.readLine()) != null) { StringTokenizer st = new StringTokenizer(line); String num = st.nextToken(); String type = st.nextToken(); String dir = st.nextToken(); String[] tags = new String[Integer.parseInt(num) - 2]; int ti = 0; while (st.hasMoreTokens()) { tags[ti] = st.nextToken(); ti++; } headRules.put(type, new HeadRule(dir.equals("1"), tags)); } } public void labelGaps(Stack<Constituent> stack) { if (stack.size() > 4) { //Constituent con0 = (Constituent) stack.get(stack.size()-1); Constituent con1 = stack.get(stack.size() - 2); Constituent con2 = stack.get(stack.size() - 3); Constituent con3 = stack.get(stack.size() - 4); Constituent con4 = stack.get(stack.size() - 5); // System.err.println("con0="+con0.label+" con1="+con1.label+" con2=" // +con2.label+" con3="+con3.label+" con4="+con4.label); //subject extraction if (con1.getLabel().equals("NP") && con2.getLabel().equals("S") && con3.getLabel().equals("SBAR")) { con1.setLabel(con1.getLabel() + "-G"); con2.setLabel(con2.getLabel() + "-G"); con3.setLabel(con3.getLabel() + "-G"); } //object extraction else if (con1.getLabel().equals("NP") && con2.getLabel().equals("VP") && con3.getLabel().equals("S") && con4.getLabel().equals("SBAR")) { con1.setLabel(con1.getLabel() + "-G"); con2.setLabel(con2.getLabel() + "-G"); con3.setLabel(con3.getLabel() + "-G"); con4.setLabel(con4.getLabel() + "-G"); } } } /** * Writes the head rules to the writer in a format suitable for loading * the head rules again with the constructor. The encoding must be * taken into account while working with the writer and reader. * <p> * After the entries have been written, the writer is flushed. * The writer remains open after this method returns. * * @param writer * @throws IOException */ public void serialize(Writer writer) throws IOException { for (Entry<String, HeadRule> entry : headRules.entrySet()) { String type = entry.getKey(); HeadRule headRule = entry.getValue(); // write num of tags writer.write(Integer.toString(headRule.tags.length + 2)); writer.write(' '); // write type writer.write(type); writer.write(' '); // write l2r true == 1 if (headRule.leftToRight) writer.write("1"); else writer.write("0"); // write tags for (String tag : headRule.tags) { writer.write(' '); writer.write(tag); } writer.write('\n'); } writer.flush(); } @Override public int hashCode() { return Objects.hash(headRules, punctSet); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof HeadRules) { HeadRules rules = (HeadRules) obj; return rules.headRules.equals(headRules) && rules.punctSet.equals(punctSet); } return false; } @Override public Class<?> getArtifactSerializerClass() { return HeadRulesSerializer.class; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/lang
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/lang/es/AncoraSpanishHeadRules.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.lang.es; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Reader; import java.io.Writer; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.Stack; import java.util.StringTokenizer; import opennlp.tools.parser.Constituent; import opennlp.tools.parser.GapLabeler; import opennlp.tools.parser.HeadRules; import opennlp.tools.parser.Parse; import opennlp.tools.parser.chunking.Parser; import opennlp.tools.util.model.ArtifactSerializer; import opennlp.tools.util.model.SerializableArtifact; /** * Class for storing the Ancora Spanish head rules associated with parsing. In this class * headrules for noun phrases are specified. The rest of the rules are * in opennlp-tools/lang/es/parser/es-head-rules * * NOTE: This class has been adapted from opennlp.tools.parser.lang.en.HeadRules * * The main change is the constituents search direction in the first for loop. * * Note also the change in the return of the getHead() method: * In the lang.en.HeadRules class: return constituents[ci].getHead(); * Now: return constituents[ci]; * * Other changes include removal of deprecated methods. * */ public class AncoraSpanishHeadRules implements HeadRules, GapLabeler, SerializableArtifact { public static class HeadRulesSerializer implements ArtifactSerializer<AncoraSpanishHeadRules> { public AncoraSpanishHeadRules create(InputStream in) throws IOException { return new AncoraSpanishHeadRules(new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8))); } public void serialize(opennlp.tools.parser.lang.es.AncoraSpanishHeadRules artifact, OutputStream out) throws IOException { artifact.serialize(new OutputStreamWriter(out, StandardCharsets.UTF_8)); } } private static class HeadRule { public boolean leftToRight; public String[] tags; public HeadRule(boolean l2r, String[] tags) { leftToRight = l2r; for (String tag : tags) { Objects.requireNonNull(tag, "tags must not contain null values!"); } this.tags = tags; } @Override public int hashCode() { return Objects.hash(leftToRight, Arrays.hashCode(tags)); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof HeadRule) { HeadRule rule = (HeadRule) obj; return (rule.leftToRight == leftToRight) && Arrays.equals(rule.tags, tags); } return false; } } private Map<String, HeadRule> headRules; private Set<String> punctSet; /** * Creates a new set of head rules based on the specified reader. * * @param rulesReader the head rules reader. * * @throws IOException if the head rules reader can not be read. */ public AncoraSpanishHeadRules(Reader rulesReader) throws IOException { BufferedReader in = new BufferedReader(rulesReader); readHeadRules(in); punctSet = new HashSet<>(); punctSet.add("."); punctSet.add(","); punctSet.add("``"); punctSet.add("''"); //punctSet.add(":"); } public Set<String> getPunctuationTags() { return punctSet; } public Parse getHead(Parse[] constituents, String type) { if (Parser.TOK_NODE.equals(constituents[0].getType())) { return null; } HeadRule hr; if (type.equals("SN") || type.equals("GRUP.NOM")) { String[] tags1 = {"AQA.*","AQC.*","GRUP\\.A","S\\.A","NC.*S.*", "NP.*","NC.*P.*", "GRUP\\.NOM"}; for (int i = 0; i < constituents.length; i++) { for (int t = tags1.length - 1; t >= 0; t--) { if (constituents[i].getType().matches(tags1[t])) { return constituents[i]; } } } for (int ci = 0; ci < constituents.length; ci++) { if (constituents[ci].getType().equals("SN") || constituents[ci].getType().equals("GRUP.NOM")) { return constituents[ci]; } } String[] tags2 = {"\\$","GRUP\\.A","SA"}; for (int ci = constituents.length - 1; ci >= 0; ci--) { for (int ti = tags2.length - 1; ti >= 0; ti--) { if (constituents[ci].getType().matches(tags2[ti])) { return constituents[ci]; } } } String[] tags3 = {"AQ0.*", "AQ[AC].*","AO.*","GRUP\\.A","S\\.A","RG","RN","GRUP\\.NOM"}; for (int ci = constituents.length - 1; ci >= 0; ci--) { for (int ti = tags3.length - 1; ti >= 0; ti--) { if (constituents[ci].getType().matches(tags3[ti])) { return constituents[ci]; } } } return constituents[constituents.length - 1].getHead(); } else if ((hr = headRules.get(type)) != null) { String[] tags = hr.tags; int cl = constituents.length; int tl = tags.length; if (hr.leftToRight) { for (int ti = 0; ti < tl; ti++) { for (int ci = 0; ci < cl; ci++) { if (constituents[ci].getType().matches(tags[ti])) { return constituents[ci]; } } } return constituents[0].getHead(); } else { for (int ti = 0; ti < tl; ti++) { for (int ci = cl - 1; ci >= 0; ci--) { if (constituents[ci].getType().matches(tags[ti])) { return constituents[ci]; } } } return constituents[cl - 1].getHead(); } } return constituents[constituents.length - 1].getHead(); } private void readHeadRules(BufferedReader str) throws IOException { String line; headRules = new HashMap<>(60); while ((line = str.readLine()) != null) { StringTokenizer st = new StringTokenizer(line); String num = st.nextToken(); String type = st.nextToken(); String dir = st.nextToken(); String[] tags = new String[Integer.parseInt(num) - 2]; int ti = 0; while (st.hasMoreTokens()) { tags[ti] = st.nextToken(); ti++; } headRules.put(type, new HeadRule(dir.equals("1"), tags)); } } public void labelGaps(Stack<Constituent> stack) { if (stack.size() > 4) { //Constituent con0 = (Constituent) stack.get(stack.size()-1); Constituent con1 = stack.get(stack.size() - 2); Constituent con2 = stack.get(stack.size() - 3); Constituent con3 = stack.get(stack.size() - 4); Constituent con4 = stack.get(stack.size() - 5); //subject extraction if (con1.getLabel().equals("SN") && con2.getLabel().equals("S") && con3.getLabel().equals("GRUP.NOM")) { con1.setLabel(con1.getLabel() + "-G"); con2.setLabel(con2.getLabel() + "-G"); con3.setLabel(con3.getLabel() + "-G"); } //object extraction else if (con1.getLabel().equals("SN") && con2.getLabel().equals("GRUP.VERB") && con3.getLabel().equals("S") && con4.getLabel().equals("GRUP.NOM")) { con1.setLabel(con1.getLabel() + "-G"); con2.setLabel(con2.getLabel() + "-G"); con3.setLabel(con3.getLabel() + "-G"); con4.setLabel(con4.getLabel() + "-G"); } } } /** * Writes the head rules to the writer in a format suitable for loading * the head rules again with the constructor. The encoding must be * taken into account while working with the writer and reader. * <p> * After the entries have been written, the writer is flushed. * The writer remains open after this method returns. * * @param writer * @throws IOException */ public void serialize(Writer writer) throws IOException { for (Entry<String, HeadRule> entry : headRules.entrySet()) { String type = entry.getKey(); HeadRule headRule = entry.getValue(); // write num of tags writer.write(Integer.toString(headRule.tags.length + 2)); writer.write(' '); // write type writer.write(type); writer.write(' '); // write l2r true == 1 if (headRule.leftToRight) writer.write("1"); else writer.write("0"); // write tags for (String tag : headRule.tags) { writer.write(' '); writer.write(tag); } writer.write('\n'); } writer.flush(); } @Override public int hashCode() { return Objects.hash(headRules, punctSet); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof AncoraSpanishHeadRules) { AncoraSpanishHeadRules rules = (AncoraSpanishHeadRules) obj; return rules.headRules.equals(headRules) && rules.punctSet.equals(punctSet); } return false; } @Override public Class<?> getArtifactSerializerClass() { return HeadRulesSerializer.class; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/treeinsert/AttachContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.treeinsert; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Set; import opennlp.tools.parser.AbstractContextGenerator; import opennlp.tools.parser.Cons; import opennlp.tools.parser.Parse; public class AttachContextGenerator extends AbstractContextGenerator { public AttachContextGenerator(Set<String> punctSet) { this.punctSet = punctSet; } public String[] getContext(Object o) { Object[] parts = (Object[]) o; return getContext((Parse[]) parts[0], (Integer) parts[1],(List<Parse>) parts[2], (Integer) parts[3]); } private boolean containsPunct(Collection<Parse> puncts, String punct) { if (puncts != null) { for (Parse p : puncts) { if (p.getType().equals(punct)) { return true; } } } return false; } /** * * @param constituents The constituents as they have been constructed so far. * @param index The constituent index of the node being attached. * @param rightFrontier The nodes which have been not attach to so far. * @return A set of contextual features about this attachment. */ public String[] getContext(Parse[] constituents, int index, List<Parse> rightFrontier, int rfi) { List<String> features = new ArrayList<>(100); Parse fn = rightFrontier.get(rfi); Parse fp = null; if (rfi + 1 < rightFrontier.size()) { fp = rightFrontier.get(rfi + 1); } Parse p_1 = null; if (rightFrontier.size() > 0) { p_1 = rightFrontier.get(0); } Parse p0 = constituents[index]; Parse p1 = null; if (index + 1 < constituents.length) { p1 = constituents[index + 1]; } Collection<Parse> punct_1fs = fn.getPreviousPunctuationSet(); Collection<Parse> punct_1s = p0.getPreviousPunctuationSet(); Collection<Parse> punct1s = p0.getNextPunctuationSet(); String consfp = cons(fp, -3); String consf = cons(fn, -2); String consp_1 = cons(p_1, -1); String consp0 = cons(p0, 0); String consp1 = cons(p1, 1); String consbofp = consbo(fp, -3); String consbof = consbo(fn, -2); String consbop_1 = consbo(p_1, -1); String consbop0 = consbo(p0, 0); String consbop1 = consbo(p1, 1); Cons cfp = new Cons(consfp,consbofp,-3,true); Cons cf = new Cons(consf,consbof,-2,true); Cons c_1 = new Cons(consp_1,consbop_1,-1,true); Cons c0 = new Cons(consp0,consbop0,0,true); Cons c1 = new Cons(consp1,consbop1,1,true); //default features.add("default"); //unigrams features.add(consfp); features.add(consbofp); features.add(consf); features.add(consbof); features.add(consp_1); features.add(consbop_1); features.add(consp0); features.add(consbop0); features.add(consp1); features.add(consbop1); //productions String prod = production(fn,false); //String punctProd = production(fn,true,punctSet); features.add("pn=" + prod); features.add("pd=" + prod + "," + p0.getType()); features.add("ps=" + fn.getType() + "->" + fn.getType() + "," + p0.getType()); if (punct_1s != null) { StringBuilder punctBuf = new StringBuilder(5); for (Parse punct : punct_1s) { punctBuf.append(punct.getType()).append(","); } //features.add("ppd="+punctProd+","+punctBuf.toString()+p0.getType()); //features.add("pps="+fn.getType()+"->"+fn.getType()+","+punctBuf.toString()+p0.getType()); } //bi-grams //cons(fn),cons(0) cons2(features,cfp,c0,punct_1s,true); cons2(features,cf,c0,punct_1s,true); cons2(features,c_1,c0,punct_1s,true); cons2(features,c0,c1,punct1s,true); cons3(features,cf,c_1,c0,null,punct_1s,true,true,true); cons3(features,cf,c0,c1,punct_1s,punct1s,true,true,true); cons3(features,cfp,cf,c0,null,punct_1s,true,true,true); /* for (int ri=0;ri<rfi;ri++) { Parse jn = (Parse) rightFrontier.get(ri); features.add("jn="+jn.getType()); } */ int headDistance = (p0.getHeadIndex() - fn.getHeadIndex()); features.add("hd=" + headDistance); features.add("nd=" + rfi); features.add("nd=" + p0.getType() + "." + rfi); features.add("hd=" + p0.getType() + "." + headDistance); //features.add("fs="+rightFrontier.size()); //paired punct features if (containsPunct(punct_1s,"''")) { if (containsPunct(punct_1fs,"``")) { features.add("quotematch");//? not generating feature correctly } } return features.toArray(new String[features.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/treeinsert/BuildContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.treeinsert; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import opennlp.tools.parser.AbstractContextGenerator; import opennlp.tools.parser.Cons; import opennlp.tools.parser.Parse; /** * Creates the features or contexts for the building phase of parsing. * This phase builds constituents from the left-most node of these * constituents. */ public class BuildContextGenerator extends AbstractContextGenerator { private Parse[] leftNodes; public BuildContextGenerator() { super(); leftNodes = new Parse[2]; } public String[] getContext(Object o) { Object[] parts = (Object[]) o; return getContext((Parse[]) parts[0], (Integer) parts[1]); } /** * Returns the contexts/features for the decision to build a new constituent for the specified parse * at the specified index. * @param constituents The constituents of the parse so far. * @param index The index of the constituent where a build decision is being made. * @return the contexts/features for the decision to build a new constituent. */ public String[] getContext(Parse[] constituents, int index) { int ps = constituents.length; Parse p0 = constituents[index]; Parse p1 = null; if (index + 1 < ps) { p1 = constituents[index + 1]; } Parse p2 = null; if (index + 2 < ps) { p2 = constituents[index + 2]; } Collection<Parse> punct_1s = p0.getPreviousPunctuationSet(); Collection<Parse> punct1s = p0.getNextPunctuationSet(); Collection<Parse> punct2s = null; if (p1 != null) { punct2s = p1.getNextPunctuationSet(); } List<Parse> rf; if (index == 0) { rf = Collections.emptyList(); } else { //this isn't a root node so, punctSet won't be used and can be passed as empty. Set<String> emptyPunctSet = Collections.emptySet(); rf = Parser.getRightFrontier(constituents[0], emptyPunctSet); } getFrontierNodes(rf,leftNodes); Parse p_1 = leftNodes[0]; Parse p_2 = leftNodes[1]; Collection<Parse> punct_2s = null; if (p_1 != null) { punct_2s = p_1.getPreviousPunctuationSet(); } String consp_2 = cons(p_2, -2); String consp_1 = cons(p_1, -1); String consp0 = cons(p0, 0); String consp1 = cons(p1, 1); String consp2 = cons(p2, 2); String consbop_2 = consbo(p_2, -2); String consbop_1 = consbo(p_1, -1); String consbop0 = consbo(p0, 0); String consbop1 = consbo(p1, 1); String consbop2 = consbo(p2, 2); Cons c_2 = new Cons(consp_2,consbop_2,-2,true); Cons c_1 = new Cons(consp_1,consbop_1,-1,true); Cons c0 = new Cons(consp0,consbop0,0,true); Cons c1 = new Cons(consp1,consbop1,1,true); Cons c2 = new Cons(consp2,consbop2,2,true); List<String> features = new ArrayList<>(); features.add("default"); //unigrams features.add(consp_2); features.add(consbop_2); features.add(consp_1); features.add(consbop_1); features.add(consp0); features.add(consbop0); features.add(consp1); features.add(consbop1); features.add(consp2); features.add(consbop2); //cons(0),cons(1) cons2(features,c0,c1,punct1s,true); //cons(-1),cons(0) cons2(features,c_1,c0,punct_1s,true); //features.add("stage=cons(0),cons(1),cons(2)"); cons3(features,c0,c1,c2,punct1s,punct2s,true,true,true); cons3(features,c_2,c_1,c0,punct_2s,punct_1s,true,true,true); cons3(features,c_1,c0,c1,punct_1s,punct_1s,true,true,true); if (rf.isEmpty()) { features.add(EOS + "," + consp0); features.add(EOS + "," + consbop0); } return features.toArray(new String[features.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/treeinsert/CheckContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.treeinsert; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import opennlp.tools.parser.AbstractContextGenerator; import opennlp.tools.parser.Parse; public class CheckContextGenerator extends AbstractContextGenerator { private Parse[] leftNodes; public CheckContextGenerator(Set<String> punctSet) { this.punctSet = punctSet; leftNodes = new Parse[2]; } public String[] getContext(Object arg0) { // TODO Auto-generated method stub return null; } public String[] getContext(Parse parent, Parse[] constituents, int index, boolean trimFrontier) { List<String> features = new ArrayList<>(100); //default features.add("default"); Parse[] children = Parser.collapsePunctuation(parent.getChildren(),punctSet); Parse pstart = children[0]; Parse pend = children[children.length - 1]; String type = parent.getType(); checkcons(pstart, "begin", type, features); checkcons(pend, "last", type, features); String production = "p=" + production(parent,false); String punctProduction = "pp=" + production(parent,true); features.add(production); features.add(punctProduction); Parse p1 = null; Parse p2 = null; Collection<Parse> p1s = constituents[index].getNextPunctuationSet(); Collection<Parse> p2s = null; Collection<Parse> p_1s = constituents[index].getPreviousPunctuationSet(); Collection<Parse> p_2s = null; List<Parse> rf; if (index == 0) { rf = Collections.emptyList(); } else { rf = Parser.getRightFrontier(constituents[0], punctSet); if (trimFrontier) { int pi = rf.indexOf(parent); if (pi == -1) { throw new RuntimeException("Parent not found in right frontier:" + parent + " rf=" + rf); } else { for (int ri = 0; ri <= pi; ri++) { rf.remove(0); } } } } getFrontierNodes(rf,leftNodes); Parse p_1 = leftNodes[0]; Parse p_2 = leftNodes[1]; int ps = constituents.length; if (p_1 != null) { p_2s = p_1.getPreviousPunctuationSet(); } if (index + 1 < ps) { p1 = constituents[index + 1]; p2s = p1.getNextPunctuationSet(); } if (index + 2 < ps) { p2 = constituents[index + 2]; } surround(p_1, -1, type, p_1s, features); surround(p_2, -2, type, p_2s, features); surround(p1, 1, type, p1s, features); surround(p2, 2, type, p2s, features); return features.toArray(new String[features.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/treeinsert/Parser.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.treeinsert; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import opennlp.tools.chunker.Chunker; import opennlp.tools.chunker.ChunkerME; import opennlp.tools.chunker.ChunkerModel; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.EventTrainer; import opennlp.tools.ml.TrainerFactory; import opennlp.tools.ml.model.Event; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.parser.AbstractBottomUpParser; import opennlp.tools.parser.ChunkSampleStream; import opennlp.tools.parser.HeadRules; import opennlp.tools.parser.Parse; import opennlp.tools.parser.ParserChunkerFactory; import opennlp.tools.parser.ParserEventTypeEnum; import opennlp.tools.parser.ParserModel; import opennlp.tools.parser.ParserType; import opennlp.tools.parser.PosSampleStream; import opennlp.tools.postag.POSModel; import opennlp.tools.postag.POSTagger; import opennlp.tools.postag.POSTaggerFactory; import opennlp.tools.postag.POSTaggerME; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.TrainingParameters; /** * Built/attach parser. Nodes are built when their left-most * child is encountered. Subsequent children are attached as * daughters. Attachment is based on node in the right-frontier * of the tree. After each attachment or building, nodes are * assesed as either complete or incomplete. Complete nodes * are no longer elligable for daughter attachment. * Complex modifiers which produce additional node * levels of the same type are attached with sister-adjunction. * Attachment can not take place higher in the right-frontier * than an incomplete node. */ public class Parser extends AbstractBottomUpParser { /** Outcome used when a constituent needs an no additional parent node/building. */ public static final String DONE = "d"; /** Outcome used when a node should be attached as a sister to another node. */ public static final String ATTACH_SISTER = "s"; /** Outcome used when a node should be attached as a daughter to another node. */ public static final String ATTACH_DAUGHTER = "d"; /** Outcome used when a node should not be attached to another node. */ public static final String NON_ATTACH = "n"; /** Label used to distinguish build nodes from non-built nodes. */ public static final String BUILT = "built"; private MaxentModel buildModel; private MaxentModel attachModel; private MaxentModel checkModel; static boolean checkComplete = false; private BuildContextGenerator buildContextGenerator; private AttachContextGenerator attachContextGenerator; private CheckContextGenerator checkContextGenerator; private double[] bprobs; private double[] aprobs; private double[] cprobs; private int doneIndex; private int sisterAttachIndex; private int daughterAttachIndex; private int nonAttachIndex; private int completeIndex; private int[] attachments; public Parser(ParserModel model, int beamSize, double advancePercentage) { this(model.getBuildModel(), model.getAttachModel(), model.getCheckModel(), new POSTaggerME(model.getParserTaggerModel()), new ChunkerME(model.getParserChunkerModel()), model.getHeadRules(), beamSize, advancePercentage); } public Parser(ParserModel model) { this(model, defaultBeamSize, defaultAdvancePercentage); } private Parser(MaxentModel buildModel, MaxentModel attachModel, MaxentModel checkModel, POSTagger tagger, Chunker chunker, HeadRules headRules, int beamSize, double advancePercentage) { super(tagger,chunker,headRules,beamSize,advancePercentage); this.buildModel = buildModel; this.attachModel = attachModel; this.checkModel = checkModel; this.buildContextGenerator = new BuildContextGenerator(); this.attachContextGenerator = new AttachContextGenerator(punctSet); this.checkContextGenerator = new CheckContextGenerator(punctSet); this.bprobs = new double[buildModel.getNumOutcomes()]; this.aprobs = new double[attachModel.getNumOutcomes()]; this.cprobs = new double[checkModel.getNumOutcomes()]; this.doneIndex = buildModel.getIndex(DONE); this.sisterAttachIndex = attachModel.getIndex(ATTACH_SISTER); this.daughterAttachIndex = attachModel.getIndex(ATTACH_DAUGHTER); this.nonAttachIndex = attachModel.getIndex(NON_ATTACH); attachments = new int[] {daughterAttachIndex,sisterAttachIndex}; this.completeIndex = checkModel.getIndex(Parser.COMPLETE); } /** * Returns the right frontier of the specified parse tree with nodes ordered from deepest * to shallowest. * @param root The root of the parse tree. * @return The right frontier of the specified parse tree. */ public static List<Parse> getRightFrontier(Parse root,Set<String> punctSet) { List<Parse> rf = new LinkedList<>(); Parse top; if (AbstractBottomUpParser.TOP_NODE.equals(root.getType()) || AbstractBottomUpParser.INC_NODE.equals(root.getType())) { top = collapsePunctuation(root.getChildren(),punctSet)[0]; } else { top = root; } while (!top.isPosTag()) { rf.add(0,top); Parse[] kids = top.getChildren(); top = kids[kids.length - 1]; } return new ArrayList<>(rf); } private void setBuilt(Parse p) { String l = p.getLabel(); if (l == null) { p.setLabel(Parser.BUILT); } else { if (isComplete(p)) { p.setLabel(Parser.BUILT + "." + Parser.COMPLETE); } else { p.setLabel(Parser.BUILT + "." + Parser.INCOMPLETE); } } } private void setComplete(Parse p) { String l = p.getLabel(); if (!isBuilt(p)) { p.setLabel(Parser.COMPLETE); } else { p.setLabel(Parser.BUILT + "." + Parser.COMPLETE); } } private void setIncomplete(Parse p) { if (!isBuilt(p)) { p.setLabel(Parser.INCOMPLETE); } else { p.setLabel(Parser.BUILT + "." + Parser.INCOMPLETE); } } private boolean isBuilt(Parse p) { String l = p.getLabel(); return l != null && l.startsWith(Parser.BUILT); } private boolean isComplete(Parse p) { String l = p.getLabel(); return l != null && l.endsWith(Parser.COMPLETE); } @Override protected Parse[] advanceChunks(Parse p, double minChunkScore) { Parse[] parses = super.advanceChunks(p, minChunkScore); for (Parse parse : parses) { Parse[] chunks = parse.getChildren(); for (int ci = 0; ci < chunks.length; ci++) { setComplete(chunks[ci]); } } return parses; } @Override protected Parse[] advanceParses(Parse p, double probMass) { double q = 1 - probMass; /* The index of the node which will be labeled in this iteration of advancing the parse. */ int advanceNodeIndex; /* The node which will be labeled in this iteration of advancing the parse. */ Parse advanceNode = null; Parse[] originalChildren = p.getChildren(); Parse[] children = collapsePunctuation(originalChildren,punctSet); int numNodes = children.length; if (numNodes == 0) { return null; } else if (numNodes == 1) { //put sentence initial and final punct in top node if (children[0].isPosTag()) { return null; } else { p.expandTopNode(children[0]); return new Parse[] { p }; } } //determines which node needs to adanced. for (advanceNodeIndex = 0; advanceNodeIndex < numNodes; advanceNodeIndex++) { advanceNode = children[advanceNodeIndex]; if (!isBuilt(advanceNode)) { break; } } int originalZeroIndex = mapParseIndex(0,children,originalChildren); int originalAdvanceIndex = mapParseIndex(advanceNodeIndex,children,originalChildren); List<Parse> newParsesList = new ArrayList<>(); //call build model buildModel.eval(buildContextGenerator.getContext(children, advanceNodeIndex), bprobs); double doneProb = bprobs[doneIndex]; if (debugOn) System.out.println("adi=" + advanceNodeIndex + " " + advanceNode.getType() + "." + advanceNode.getLabel() + " " + advanceNode + " choose build=" + (1 - doneProb) + " attach=" + doneProb); if (1 - doneProb > q) { double bprobSum = 0; while (bprobSum < probMass) { /* The largest unadvanced labeling. */ int max = 0; for (int pi = 1; pi < bprobs.length; pi++) { //for each build outcome if (bprobs[pi] > bprobs[max]) { max = pi; } } if (bprobs[max] == 0) { break; } double bprob = bprobs[max]; bprobs[max] = 0; //zero out so new max can be found bprobSum += bprob; String tag = buildModel.getOutcome(max); if (!tag.equals(DONE)) { Parse newParse1 = (Parse) p.clone(); Parse newNode = new Parse(p.getText(),advanceNode.getSpan(),tag,bprob,advanceNode.getHead()); newParse1.insert(newNode); newParse1.addProb(Math.log(bprob)); newParsesList.add(newParse1); if (checkComplete) { cprobs = checkModel.eval(checkContextGenerator.getContext(newNode, children, advanceNodeIndex,false)); if (debugOn) System.out.println("building " + tag + " " + bprob + " c=" + cprobs[completeIndex]); if (cprobs[completeIndex] > probMass) { //just complete advances setComplete(newNode); newParse1.addProb(Math.log(cprobs[completeIndex])); if (debugOn) System.out.println("Only advancing complete node"); } else if (1 - cprobs[completeIndex] > probMass) { //just incomplete advances setIncomplete(newNode); newParse1.addProb(Math.log(1 - cprobs[completeIndex])); if (debugOn) System.out.println("Only advancing incomplete node"); } else { //both complete and incomplete advance if (debugOn) System.out.println("Advancing both complete and incomplete nodes"); setComplete(newNode); newParse1.addProb(Math.log(cprobs[completeIndex])); Parse newParse2 = (Parse) p.clone(); Parse newNode2 = new Parse(p.getText(),advanceNode.getSpan(),tag,bprob,advanceNode.getHead()); newParse2.insert(newNode2); newParse2.addProb(Math.log(bprob)); newParsesList.add(newParse2); newParse2.addProb(Math.log(1 - cprobs[completeIndex])); setIncomplete(newNode2); //set incomplete for non-clone } } else { if (debugOn) System.out.println("building " + tag + " " + bprob); } } } } //advance attaches if (doneProb > q) { Parse newParse1 = (Parse) p.clone(); //clone parse //mark nodes as built if (checkComplete) { if (isComplete(advanceNode)) { //replace constituent being labeled to create new derivation newParse1.setChild(originalAdvanceIndex,Parser.BUILT + "." + Parser.COMPLETE); } else { //replace constituent being labeled to create new derivation newParse1.setChild(originalAdvanceIndex,Parser.BUILT + "." + Parser.INCOMPLETE); } } else { //replace constituent being labeled to create new derivation newParse1.setChild(originalAdvanceIndex,Parser.BUILT); } newParse1.addProb(Math.log(doneProb)); if (advanceNodeIndex == 0) { //no attach if first node. newParsesList.add(newParse1); } else { List<Parse> rf = getRightFrontier(p,punctSet); for (int fi = 0,fs = rf.size(); fi < fs; fi++) { Parse fn = rf.get(fi); attachModel.eval(attachContextGenerator.getContext(children, advanceNodeIndex, rf, fi), aprobs); if (debugOn) { // List cs = java.util.Arrays.asList(attachContextGenerator.getContext(children, // advanceNodeIndex,rf,fi,punctSet)); System.out.println("Frontier node(" + fi + "): " + fn.getType() + "." + fn.getLabel() + " " + fn + " <- " + advanceNode.getType() + " " + advanceNode + " d=" + aprobs[daughterAttachIndex] + " s=" + aprobs[sisterAttachIndex] + " "); } for (int ai = 0; ai < attachments.length; ai++) { double prob = aprobs[attachments[ai]]; //should we try an attach if p > threshold and // if !checkComplete then prevent daughter attaching to chunk // if checkComplete then prevent daughter attacing to complete node or // sister attaching to an incomplete node if (prob > q && ( (!checkComplete && (attachments[ai] != daughterAttachIndex || !isComplete(fn))) || (checkComplete && ((attachments[ai] == daughterAttachIndex && !isComplete(fn)) || (attachments[ai] == sisterAttachIndex && isComplete(fn)))))) { Parse newParse2 = newParse1.cloneRoot(fn,originalZeroIndex); Parse[] newKids = Parser.collapsePunctuation(newParse2.getChildren(),punctSet); //remove node from top level since were going to attach it (including punct) for (int ri = originalZeroIndex + 1; ri <= originalAdvanceIndex; ri++) { //System.out.println(at"-removing "+(originalZeroIndex+1)+" " // +newParse2.getChildren()[originalZeroIndex+1]); newParse2.remove(originalZeroIndex + 1); } List<Parse> crf = getRightFrontier(newParse2,punctSet); Parse updatedNode; if (attachments[ai] == daughterAttachIndex) { //attach daughter updatedNode = crf.get(fi); updatedNode.add(advanceNode,headRules); } else { //attach sister Parse psite; if (fi + 1 < crf.size()) { psite = crf.get(fi + 1); updatedNode = psite.adjoin(advanceNode,headRules); } else { psite = newParse2; updatedNode = psite.adjoinRoot(advanceNode,headRules,originalZeroIndex); newKids[0] = updatedNode; } } //update spans affected by attachment for (int ni = fi + 1; ni < crf.size(); ni++) { Parse node = crf.get(ni); node.updateSpan(); } //if (debugOn) {System.out.print(ai+"-result: ");newParse2.show();System.out.println();} newParse2.addProb(Math.log(prob)); newParsesList.add(newParse2); if (checkComplete) { cprobs = checkModel.eval( checkContextGenerator.getContext(updatedNode,newKids,advanceNodeIndex,true)); if (cprobs[completeIndex] > probMass) { setComplete(updatedNode); newParse2.addProb(Math.log(cprobs[completeIndex])); if (debugOn) System.out.println("Only advancing complete node"); } else if (1 - cprobs[completeIndex] > probMass) { setIncomplete(updatedNode); newParse2.addProb(Math.log(1 - cprobs[completeIndex])); if (debugOn) System.out.println("Only advancing incomplete node"); } else { setComplete(updatedNode); Parse newParse3 = newParse2.cloneRoot(updatedNode,originalZeroIndex); newParse3.addProb(Math.log(cprobs[completeIndex])); newParsesList.add(newParse3); setIncomplete(updatedNode); newParse2.addProb(Math.log(1 - cprobs[completeIndex])); if (debugOn) System.out.println("Advancing both complete and incomplete nodes; c=" + cprobs[completeIndex]); } } } else { if (debugOn) System.out.println("Skipping " + fn.getType() + "." + fn.getLabel() + " " + fn + " daughter=" + (attachments[ai] == daughterAttachIndex) + " complete=" + isComplete(fn) + " prob=" + prob); } } if (checkComplete && !isComplete(fn)) { if (debugOn) System.out.println("Stopping at incomplete node(" + fi + "): " + fn.getType() + "." + fn.getLabel() + " " + fn); break; } } } } Parse[] newParses = new Parse[newParsesList.size()]; newParsesList.toArray(newParses); return newParses; } @Override protected void advanceTop(Parse p) { p.setType(TOP_NODE); } public static ParserModel train(String languageCode, ObjectStream<Parse> parseSamples, HeadRules rules, TrainingParameters mlParams) throws IOException { Map<String, String> manifestInfoEntries = new HashMap<>(); System.err.println("Building dictionary"); Dictionary mdict = buildDictionary(parseSamples, rules, mlParams); parseSamples.reset(); // tag POSModel posModel = POSTaggerME.train(languageCode, new PosSampleStream( parseSamples), mlParams.getParameters("tagger"), new POSTaggerFactory()); parseSamples.reset(); // chunk ChunkerModel chunkModel = ChunkerME.train(languageCode, new ChunkSampleStream( parseSamples), mlParams.getParameters("chunker"), new ParserChunkerFactory()); parseSamples.reset(); // build System.err.println("Training builder"); ObjectStream<Event> bes = new ParserEventStream(parseSamples, rules, ParserEventTypeEnum.BUILD, mdict); Map<String, String> buildReportMap = new HashMap<>(); EventTrainer buildTrainer = TrainerFactory.getEventTrainer( mlParams.getParameters("build"), buildReportMap); MaxentModel buildModel = buildTrainer.train(bes); opennlp.tools.parser.chunking.Parser.mergeReportIntoManifest( manifestInfoEntries, buildReportMap, "build"); parseSamples.reset(); // check System.err.println("Training checker"); ObjectStream<Event> kes = new ParserEventStream(parseSamples, rules, ParserEventTypeEnum.CHECK); Map<String, String> checkReportMap = new HashMap<>(); EventTrainer checkTrainer = TrainerFactory.getEventTrainer( mlParams.getParameters("check"), checkReportMap); MaxentModel checkModel = checkTrainer.train(kes); opennlp.tools.parser.chunking.Parser.mergeReportIntoManifest( manifestInfoEntries, checkReportMap, "check"); parseSamples.reset(); // attach System.err.println("Training attacher"); ObjectStream<Event> attachEvents = new ParserEventStream(parseSamples, rules, ParserEventTypeEnum.ATTACH); Map<String, String> attachReportMap = new HashMap<>(); EventTrainer attachTrainer = TrainerFactory.getEventTrainer( mlParams.getParameters("attach"), attachReportMap); MaxentModel attachModel = attachTrainer.train(attachEvents); opennlp.tools.parser.chunking.Parser.mergeReportIntoManifest( manifestInfoEntries, attachReportMap, "attach"); // TODO: Remove cast for HeadRules return new ParserModel(languageCode, buildModel, checkModel, attachModel, posModel, chunkModel, rules, ParserType.TREEINSERT, manifestInfoEntries); } public static ParserModel train(String languageCode, ObjectStream<Parse> parseSamples, HeadRules rules, int iterations, int cut) throws IOException { TrainingParameters params = new TrainingParameters(); params.put("dict", TrainingParameters.CUTOFF_PARAM, cut); params.put("tagger", TrainingParameters.CUTOFF_PARAM, cut); params.put("tagger", TrainingParameters.ITERATIONS_PARAM, iterations); params.put("chunker", TrainingParameters.CUTOFF_PARAM, cut); params.put("chunker", TrainingParameters.ITERATIONS_PARAM, iterations); params.put("check", TrainingParameters.CUTOFF_PARAM, cut); params.put("check", TrainingParameters.ITERATIONS_PARAM, iterations); params.put("build", TrainingParameters.CUTOFF_PARAM, cut); params.put("build", TrainingParameters.ITERATIONS_PARAM, iterations); return train(languageCode, parseSamples, rules, params); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/treeinsert/ParserEventStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.parser.treeinsert; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.model.Event; import opennlp.tools.parser.AbstractBottomUpParser; import opennlp.tools.parser.AbstractParserEventStream; import opennlp.tools.parser.HeadRules; import opennlp.tools.parser.Parse; import opennlp.tools.parser.ParserEventTypeEnum; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; public class ParserEventStream extends AbstractParserEventStream { protected AttachContextGenerator attachContextGenerator; protected BuildContextGenerator buildContextGenerator; protected CheckContextGenerator checkContextGenerator; private static final boolean debug = false; public ParserEventStream(ObjectStream<Parse> d, HeadRules rules, ParserEventTypeEnum etype, Dictionary dict) { super(d, rules, etype, dict); } @Override public void init() { buildContextGenerator = new BuildContextGenerator(); attachContextGenerator = new AttachContextGenerator(punctSet); checkContextGenerator = new CheckContextGenerator(punctSet); } public ParserEventStream(ObjectStream<Parse> d, HeadRules rules, ParserEventTypeEnum etype) { super(d, rules, etype); } /** * Returns a set of parent nodes which consist of the immediate * parent of the specified node and any of its parent which * share the same syntactic type. * @param node The node whose parents are to be returned. * @return a set of parent nodes. */ private Map<Parse, Integer> getNonAdjoinedParent(Parse node) { Map<Parse, Integer> parents = new HashMap<>(); Parse parent = node.getParent(); int index = indexOf(node,parent); parents.put(parent, index); while (parent.getType().equals(node.getType())) { node = parent; parent = parent.getParent(); index = indexOf(node,parent); parents.put(parent, index); } return parents; } private int indexOf(Parse child, Parse parent) { Parse[] kids = Parser.collapsePunctuation(parent.getChildren(),punctSet); for (int ki = 0; ki < kids.length; ki++) { if (child == kids[ki]) { return ki; } } return -1; } private int nonPunctChildCount(Parse node) { return Parser.collapsePunctuation(node.getChildren(),punctSet).length; } /* private Set getNonAdjoinedParent(Parse node) { Set parents = new HashSet(); Parse parent = node.getParent(); do { parents.add(parent); parent = parent.getParent(); } while(parent.getType().equals(node.getType())); return parents; } */ @Override protected boolean lastChild(Parse child, Parse parent) { boolean lc = super.lastChild(child, parent); while (!lc) { Parse cp = child.getParent(); if (cp != parent && cp.getType().equals(child.getType())) { lc = super.lastChild(cp,parent); child = cp; } else { break; } } return lc; } @Override protected void addParseEvents(List<Event> parseEvents, Parse[] chunks) { /* Frontier nodes built from node in a completed parse. Specifically, * they have all their children regardless of the stage of parsing.*/ List<Parse> rightFrontier = new ArrayList<>(); List<Parse> builtNodes = new ArrayList<>(); /* Nodes which characterize what the parse looks like to the parser as its being built. * Specifically, these nodes don't have all their children attached like the parents of * the chunk nodes do.*/ Parse[] currentChunks = new Parse[chunks.length]; for (int ci = 0; ci < chunks.length; ci++) { currentChunks[ci] = (Parse) chunks[ci].clone(); currentChunks[ci].setPrevPunctuation(chunks[ci].getPreviousPunctuationSet()); currentChunks[ci].setNextPunctuation(chunks[ci].getNextPunctuationSet()); currentChunks[ci].setLabel(Parser.COMPLETE); chunks[ci].setLabel(Parser.COMPLETE); } for (int ci = 0; ci < chunks.length; ci++) { //System.err.println("parserEventStream.addParseEvents: chunks="+Arrays.asList(chunks)); Parse parent = chunks[ci].getParent(); Parse prevParent = chunks[ci]; int off = 0; //build un-built parents if (!chunks[ci].isPosTag()) { builtNodes.add(off++,chunks[ci]); } //perform build stages while (!parent.getType().equals(AbstractBottomUpParser.TOP_NODE) && parent.getLabel() == null) { if (parent.getLabel() == null && !prevParent.getType().equals(parent.getType())) { //build level if (debug) System.err.println("Build: " + parent.getType() + " for: " + currentChunks[ci]); if (etype == ParserEventTypeEnum.BUILD) { parseEvents.add(new Event(parent.getType(), buildContextGenerator.getContext(currentChunks, ci))); } builtNodes.add(off++,parent); Parse newParent = new Parse(currentChunks[ci].getText(), currentChunks[ci].getSpan(),parent.getType(),1,0); newParent.add(currentChunks[ci],rules); newParent.setPrevPunctuation(currentChunks[ci].getPreviousPunctuationSet()); newParent.setNextPunctuation(currentChunks[ci].getNextPunctuationSet()); currentChunks[ci].setParent(newParent); currentChunks[ci] = newParent; newParent.setLabel(Parser.BUILT); //see if chunk is complete if (lastChild(chunks[ci], parent)) { if (etype == ParserEventTypeEnum.CHECK) { parseEvents.add(new Event(Parser.COMPLETE, checkContextGenerator.getContext(currentChunks[ci],currentChunks, ci,false))); } currentChunks[ci].setLabel(Parser.COMPLETE); parent.setLabel(Parser.COMPLETE); } else { if (etype == ParserEventTypeEnum.CHECK) { parseEvents.add(new Event(Parser.INCOMPLETE, checkContextGenerator.getContext(currentChunks[ci],currentChunks,ci,false))); } currentChunks[ci].setLabel(Parser.INCOMPLETE); parent.setLabel(Parser.COMPLETE); } chunks[ci] = parent; //System.err.println("build: "+newParent+" for "+parent); } //TODO: Consider whether we need to set this label or train parses at all. parent.setLabel(Parser.BUILT); prevParent = parent; parent = parent.getParent(); } //decide to attach if (etype == ParserEventTypeEnum.BUILD) { parseEvents.add(new Event(Parser.DONE, buildContextGenerator.getContext(currentChunks, ci))); } //attach node String attachType = null; /* Node selected for attachment. */ Parse attachNode = null; int attachNodeIndex = -1; if (ci == 0) { Parse top = new Parse(currentChunks[ci].getText(), new Span(0,currentChunks[ci].getText().length()),AbstractBottomUpParser.TOP_NODE,1,0); top.insert(currentChunks[ci]); } else { /* Right frontier consisting of partially-built nodes based on current state of the parse.*/ List<Parse> currentRightFrontier = Parser.getRightFrontier(currentChunks[0],punctSet); if (currentRightFrontier.size() != rightFrontier.size()) { System.err.println("fontiers mis-aligned: " + currentRightFrontier.size() + " != " + rightFrontier.size() + " " + currentRightFrontier + " " + rightFrontier); System.exit(1); } Map<Parse, Integer> parents = getNonAdjoinedParent(chunks[ci]); //try daughters first. for (int cfi = 0; cfi < currentRightFrontier.size(); cfi++) { Parse frontierNode = rightFrontier.get(cfi); Parse cfn = currentRightFrontier.get(cfi); if (!Parser.checkComplete || !Parser.COMPLETE.equals(cfn.getLabel())) { Integer i = parents.get(frontierNode); if (debug) System.err.println("Looking at attachment site (" + cfi + "): " + cfn.getType() + " ci=" + i + " cs=" + nonPunctChildCount(cfn) + ", " + cfn + " :for " + currentChunks[ci].getType() + " " + currentChunks[ci] + " -> " + parents); if (attachNode == null && i != null && i == nonPunctChildCount(cfn)) { attachType = Parser.ATTACH_DAUGHTER; attachNodeIndex = cfi; attachNode = cfn; if (etype == ParserEventTypeEnum.ATTACH) { parseEvents.add(new Event(attachType, attachContextGenerator.getContext(currentChunks, ci, currentRightFrontier, attachNodeIndex))); } //System.err.println("daughter attach "+attachNode+" at "+fi); } } else { if (debug) System.err.println("Skipping (" + cfi + "): " + cfn.getType() + "," + cfn.getPreviousPunctuationSet() + " " + cfn + " :for " + currentChunks[ci].getType() + " " + currentChunks[ci] + " -> " + parents); } // Can't attach past first incomplete node. if (Parser.checkComplete && cfn.getLabel().equals(Parser.INCOMPLETE)) { if (debug) System.err.println("breaking on incomplete:" + cfn.getType() + " " + cfn); break; } } //try sisters, and generate non-attach events. for (int cfi = 0; cfi < currentRightFrontier.size(); cfi++) { Parse frontierNode = rightFrontier.get(cfi); Parse cfn = currentRightFrontier.get(cfi); if (attachNode == null && parents.containsKey(frontierNode.getParent()) && frontierNode.getType().equals(frontierNode.getParent().getType()) ) { //&& frontierNode.getParent().getLabel() == null) { attachType = Parser.ATTACH_SISTER; attachNode = cfn; attachNodeIndex = cfi; if (etype == ParserEventTypeEnum.ATTACH) { parseEvents.add(new Event(Parser.ATTACH_SISTER, attachContextGenerator.getContext(currentChunks, ci, currentRightFrontier, cfi))); } chunks[ci].getParent().setLabel(Parser.BUILT); //System.err.println("in search sister attach "+attachNode+" at "+cfi); } else if (cfi == attachNodeIndex) { //skip over previously attached daughter. } else { if (etype == ParserEventTypeEnum.ATTACH) { parseEvents.add(new Event(Parser.NON_ATTACH, attachContextGenerator.getContext(currentChunks, ci, currentRightFrontier, cfi))); } } //Can't attach past first incomplete node. if (Parser.checkComplete && cfn.getLabel().equals(Parser.INCOMPLETE)) { if (debug) System.err.println("breaking on incomplete:" + cfn.getType() + " " + cfn); break; } } //attach Node if (attachNode != null) { if (Parser.ATTACH_DAUGHTER.equals(attachType)) { Parse daughter = currentChunks[ci]; if (debug) System.err.println("daughter attach a=" + attachNode.getType() + ":" + attachNode + " d=" + daughter + " com=" + lastChild(chunks[ci], rightFrontier.get(attachNodeIndex))); attachNode.add(daughter,rules); daughter.setParent(attachNode); if (lastChild(chunks[ci], rightFrontier.get(attachNodeIndex))) { if (etype == ParserEventTypeEnum.CHECK) { parseEvents.add(new Event(Parser.COMPLETE, checkContextGenerator.getContext(attachNode,currentChunks,ci,true))); } attachNode.setLabel(Parser.COMPLETE); } else { if (etype == ParserEventTypeEnum.CHECK) { parseEvents.add(new Event(Parser.INCOMPLETE, checkContextGenerator.getContext(attachNode,currentChunks,ci,true))); } } } else if (Parser.ATTACH_SISTER.equals(attachType)) { Parse frontierNode = rightFrontier.get(attachNodeIndex); rightFrontier.set(attachNodeIndex,frontierNode.getParent()); Parse sister = currentChunks[ci]; if (debug) System.err.println("sister attach a=" + attachNode.getType() + ":" + attachNode + " s=" + sister + " ap=" + attachNode.getParent() + " com=" + lastChild(chunks[ci], rightFrontier.get(attachNodeIndex))); Parse newParent = attachNode.getParent().adjoin(sister,rules); newParent.setParent(attachNode.getParent()); attachNode.setParent(newParent); sister.setParent(newParent); if (attachNode == currentChunks[0]) { currentChunks[0] = newParent; } if (lastChild(chunks[ci], rightFrontier.get(attachNodeIndex))) { if (etype == ParserEventTypeEnum.CHECK) { parseEvents.add(new Event(Parser.COMPLETE, checkContextGenerator.getContext(newParent,currentChunks,ci,true))); } newParent.setLabel(Parser.COMPLETE); } else { if (etype == ParserEventTypeEnum.CHECK) { parseEvents.add(new Event(Parser.INCOMPLETE, checkContextGenerator.getContext(newParent,currentChunks,ci,true))); } newParent.setLabel(Parser.INCOMPLETE); } } //update right frontier for (int ni = 0; ni < attachNodeIndex; ni++) { //System.err.println("removing: "+rightFrontier.get(0)); rightFrontier.remove(0); } } else { //System.err.println("No attachment!"); throw new RuntimeException("No Attachment: " + chunks[ci]); } } rightFrontier.addAll(0,builtNodes); builtNodes.clear(); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/parser/treeinsert/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package containing experimental code for performing full syntactic * parsing using attachment decisions. */ package opennlp.tools.parser.treeinsert;
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/ConfigurablePOSContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.util.ArrayList; import java.util.List; import java.util.Objects; import opennlp.tools.util.Cache; import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator; /** * A context generator for the POS Tagger. */ public class ConfigurablePOSContextGenerator implements POSContextGenerator { private Cache<String, String[]> contextsCache; private Object wordsKey; private final AdaptiveFeatureGenerator featureGenerator; /** * Initializes the current instance. * * @param cacheSize */ public ConfigurablePOSContextGenerator(int cacheSize, AdaptiveFeatureGenerator featureGenerator) { this.featureGenerator = Objects.requireNonNull(featureGenerator, "featureGenerator must not be null"); if (cacheSize > 0) { contextsCache = new Cache<>(cacheSize); } } /** * Initializes the current instance. * */ public ConfigurablePOSContextGenerator(AdaptiveFeatureGenerator featureGenerator) { this(0, featureGenerator); } /** * Returns the context for making a pos tag decision at the specified token index * given the specified tokens and previous tags. * @param index The index of the token for which the context is provided. * @param tokens The tokens in the sentence. * @param tags The tags assigned to the previous words in the sentence. * @return The context for making a pos tag decision at the specified token index * given the specified tokens and previous tags. */ public String[] getContext(int index, String[] tokens, String[] tags, Object[] additionalContext) { String tagprev = null; String tagprevprev = null; if (index - 1 >= 0) { tagprev = tags[index - 1]; if (index - 2 >= 0) { tagprevprev = tags[index - 2]; } } String cacheKey = index + tagprev + tagprevprev; if (contextsCache != null) { if (wordsKey == tokens) { String[] cachedContexts = contextsCache.get(cacheKey); if (cachedContexts != null) { return cachedContexts; } } else { contextsCache.clear(); wordsKey = tokens; } } List<String> e = new ArrayList<>(); featureGenerator.createFeatures(e, tokens, index, tags); String[] contexts = e.toArray(new String[e.size()]); if (contextsCache != null) { contextsCache.put(cacheKey, contexts); } return contexts; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/DefaultPOSContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.util.Cache; import opennlp.tools.util.StringList; /** * A context generator for the POS Tagger. */ public class DefaultPOSContextGenerator implements POSContextGenerator { protected final String SE = "*SE*"; protected final String SB = "*SB*"; private static final int PREFIX_LENGTH = 4; private static final int SUFFIX_LENGTH = 4; private static Pattern hasCap = Pattern.compile("[A-Z]"); private static Pattern hasNum = Pattern.compile("[0-9]"); private Cache<String, String[]> contextsCache; private Object wordsKey; private Dictionary dict; private String[] dictGram; /** * Initializes the current instance. * * @param dict */ public DefaultPOSContextGenerator(Dictionary dict) { this(0,dict); } /** * Initializes the current instance. * * @param cacheSize * @param dict */ public DefaultPOSContextGenerator(int cacheSize, Dictionary dict) { this.dict = dict; dictGram = new String[1]; if (cacheSize > 0) { contextsCache = new Cache<>(cacheSize); } } protected static String[] getPrefixes(String lex) { String[] prefs = new String[PREFIX_LENGTH]; for (int li = 0; li < PREFIX_LENGTH; li++) { prefs[li] = lex.substring(0, Math.min(li + 1, lex.length())); } return prefs; } protected static String[] getSuffixes(String lex) { String[] suffs = new String[SUFFIX_LENGTH]; for (int li = 0; li < SUFFIX_LENGTH; li++) { suffs[li] = lex.substring(Math.max(lex.length() - li - 1, 0)); } return suffs; } public String[] getContext(int index, String[] sequence, String[] priorDecisions, Object[] additionalContext) { return getContext(index,sequence,priorDecisions); } /** * Returns the context for making a pos tag decision at the specified token index * given the specified tokens and previous tags. * @param index The index of the token for which the context is provided. * @param tokens The tokens in the sentence. * @param tags The tags assigned to the previous words in the sentence. * @return The context for making a pos tag decision at the specified token index * given the specified tokens and previous tags. */ public String[] getContext(int index, Object[] tokens, String[] tags) { String next, nextnext = null, lex, prev, prevprev = null; String tagprev, tagprevprev; tagprev = tagprevprev = null; lex = tokens[index].toString(); if (tokens.length > index + 1) { next = tokens[index + 1].toString(); if (tokens.length > index + 2) nextnext = tokens[index + 2].toString(); else nextnext = SE; // Sentence End } else { next = SE; // Sentence End } if (index - 1 >= 0) { prev = tokens[index - 1].toString(); tagprev = tags[index - 1]; if (index - 2 >= 0) { prevprev = tokens[index - 2].toString(); tagprevprev = tags[index - 2]; } else { prevprev = SB; // Sentence Beginning } } else { prev = SB; // Sentence Beginning } String cacheKey = index + tagprev + tagprevprev; if (contextsCache != null) { if (wordsKey == tokens) { String[] cachedContexts = contextsCache.get(cacheKey); if (cachedContexts != null) { return cachedContexts; } } else { contextsCache.clear(); wordsKey = tokens; } } List<String> e = new ArrayList<>(); e.add("default"); // add the word itself e.add("w=" + lex); dictGram[0] = lex; if (dict == null || !dict.contains(new StringList(dictGram))) { // do some basic suffix analysis String[] suffs = getSuffixes(lex); for (int i = 0; i < suffs.length; i++) { e.add("suf=" + suffs[i]); } String[] prefs = getPrefixes(lex); for (int i = 0; i < prefs.length; i++) { e.add("pre=" + prefs[i]); } // see if the word has any special characters if (lex.indexOf('-') != -1) { e.add("h"); } if (hasCap.matcher(lex).find()) { e.add("c"); } if (hasNum.matcher(lex).find()) { e.add("d"); } } // add the words and pos's of the surrounding context if (prev != null) { e.add("p=" + prev); if (tagprev != null) { e.add("t=" + tagprev); } if (prevprev != null) { e.add("pp=" + prevprev); if (tagprevprev != null) { e.add("t2=" + tagprevprev + "," + tagprev); } } } if (next != null) { e.add("n=" + next); if (nextnext != null) { e.add("nn=" + nextnext); } } String[] contexts = e.toArray(new String[e.size()]); if (contextsCache != null) { contextsCache.put(cacheKey,contexts); } return contexts; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/DefaultPOSSequenceValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.util.Arrays; import opennlp.tools.util.SequenceValidator; public class DefaultPOSSequenceValidator implements SequenceValidator<String> { private TagDictionary tagDictionary; public DefaultPOSSequenceValidator(TagDictionary tagDictionary) { this.tagDictionary = tagDictionary; } public boolean validSequence(int i, String[] inputSequence, String[] outcomesSequence, String outcome) { if (tagDictionary == null) { return true; } else { String[] tags = tagDictionary.getTags(inputSequence[i]); return tags == null || Arrays.asList(tags).contains(outcome); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/MutableTagDictionary.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; /** * Interface that allows {@link TagDictionary} entries to be added and removed. * This can be used to induce the dictionary from training data. */ public interface MutableTagDictionary extends TagDictionary { /** * Associates the specified tags with the specified word. If the dictionary * previously contained keys for the word, the old tags are replaced by the * specified tags. * * @param word * word with which the specified tags is to be associated * @param tags * tags to be associated with the specified word * * @return the previous tags associated with the word, or null if there was no * mapping for word. */ String[] put(String word, String... tags); /** * Whether if the dictionary is case sensitive or not * * @return true if the dictionary is case sensitive */ // TODO: move to TagDictionary, can't do it now because of backward // compatibility. boolean isCaseSensitive(); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import opennlp.tools.util.BeamSearchContextGenerator; /** * The interface for a context generator for the POS Tagger. */ public interface POSContextGenerator extends BeamSearchContextGenerator<String> { String[] getContext(int pos, String[] tokens, String[] prevTags, Object[] ac); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSDictionary.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Objects; import opennlp.tools.dictionary.serializer.Attributes; import opennlp.tools.dictionary.serializer.DictionaryEntryPersistor; import opennlp.tools.dictionary.serializer.Entry; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.StringList; import opennlp.tools.util.StringUtil; import opennlp.tools.util.model.SerializableArtifact; /** * Provides a means of determining which tags are valid for a particular word * based on a tag dictionary read from a file. */ public class POSDictionary implements Iterable<String>, MutableTagDictionary, SerializableArtifact { private Map<String, String[]> dictionary; private boolean caseSensitive = true; /** * Initializes an empty case sensitive {@link POSDictionary}. */ public POSDictionary() { this(true); } /** * Initializes an empty {@link POSDictionary}. * @param caseSensitive the {@link POSDictionary} case sensitivity */ public POSDictionary(boolean caseSensitive) { dictionary = new HashMap<>(); this.caseSensitive = caseSensitive; } /** * Returns a list of valid tags for the specified word. * * @param word The word. * * @return A list of valid tags for the specified word or * null if no information is available for that word. */ public String[] getTags(String word) { if (caseSensitive) { return dictionary.get(word); } else { return dictionary.get(StringUtil.toLowerCase(word)); } } /** * Associates the specified tags with the specified word. If the dictionary * previously contained the word, the old tags are replaced by the specified * ones. * * @param word * The word to be added to the dictionary. * @param tags * The set of tags associated with the specified word. * * @deprecated Use {@link #put(String, String[])} instead */ void addTags(String word, String... tags) { put(word, tags); } /** * Retrieves an iterator over all words in the dictionary. */ public Iterator<String> iterator() { return dictionary.keySet().iterator(); } private static String tagsToString(String[] tags) { StringBuilder tagString = new StringBuilder(); for (String tag : tags) { tagString.append(tag); tagString.append(' '); } // remove last space if (tagString.length() > 0) { tagString.setLength(tagString.length() - 1); } return tagString.toString(); } /** * Writes the {@link POSDictionary} to the given {@link OutputStream}; * * After the serialization is finished the provided * {@link OutputStream} remains open. * * @param out * the {@link OutputStream} to write the dictionary into. * * @throws IOException * if writing to the {@link OutputStream} fails */ public void serialize(OutputStream out) throws IOException { Iterator<Entry> entries = new Iterator<Entry>() { Iterator<String> iterator = dictionary.keySet().iterator(); public boolean hasNext() { return iterator.hasNext(); } public Entry next() { String word = iterator.next(); Attributes tagAttribute = new Attributes(); tagAttribute.setValue("tags", tagsToString(getTags(word))); return new Entry(new StringList(word), tagAttribute); } public void remove() { throw new UnsupportedOperationException(); } }; DictionaryEntryPersistor.serialize(out, entries, caseSensitive); } @Override public int hashCode() { int[] keyHashes = new int[dictionary.size()]; int[] valueHashes = new int[dictionary.size()]; int i = 0; for (String word : this) { keyHashes[i] = word.hashCode(); valueHashes[i] = Arrays.hashCode(getTags(word)); i++; } Arrays.sort(keyHashes); Arrays.sort(valueHashes); return Objects.hash(Arrays.hashCode(keyHashes), Arrays.hashCode(valueHashes)); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof POSDictionary) { POSDictionary posDictionary = (POSDictionary) obj; if (this.dictionary.size() == posDictionary.dictionary.size()) { for (String word : this) { if (!Arrays.equals(getTags(word), posDictionary.getTags(word))) { return false; } } return true; } } return false; } @Override public String toString() { // it is time consuming to output the dictionary entries. // will output something meaningful for debugging, like // POSDictionary{size=100, caseSensitive=true} return "POSDictionary{size=" + dictionary.size() + ", caseSensitive=" + this.caseSensitive + "}"; } /** * Creates a new {@link POSDictionary} from a provided {@link InputStream}. * * After creation is finished the provided {@link InputStream} is closed. * * @param in * * @return the pos dictionary * * @throws IOException * @throws InvalidFormatException */ public static POSDictionary create(InputStream in) throws IOException { final POSDictionary newPosDict = new POSDictionary(); boolean isCaseSensitive = DictionaryEntryPersistor.create(in, entry -> { String tagString = entry.getAttributes().getValue("tags"); String[] tags = tagString.split(" "); StringList word = entry.getTokens(); if (word.size() != 1) throw new InvalidFormatException("Each entry must have exactly one token! " + word); newPosDict.dictionary.put(word.getToken(0), tags); }); newPosDict.caseSensitive = isCaseSensitive; // TODO: The dictionary API needs to be improved to do this better! if (!isCaseSensitive) { Map<String, String[]> lowerCasedDictionary = new HashMap<>(); for (Map.Entry<String, String[]> entry : newPosDict.dictionary.entrySet()) { lowerCasedDictionary.put(StringUtil.toLowerCase(entry.getKey()), entry.getValue()); } newPosDict.dictionary = lowerCasedDictionary; } return newPosDict; } public String[] put(String word, String... tags) { if (this.caseSensitive) { return dictionary.put(word, tags); } else { return dictionary.put(StringUtil.toLowerCase(word), tags); } } public boolean isCaseSensitive() { return this.caseSensitive; } @Override public Class<?> getArtifactSerializerClass() { return POSTaggerFactory.POSDictionarySerializer.class; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import opennlp.tools.util.eval.Evaluator; import opennlp.tools.util.eval.Mean; /** * The {@link POSEvaluator} measures the performance of * the given {@link POSTagger} with the provided reference * {@link POSSample}s. */ public class POSEvaluator extends Evaluator<POSSample> { private POSTagger tagger; private Mean wordAccuracy = new Mean(); /** * Initializes the current instance. * * @param tagger * @param listeners an array of evaluation listeners */ public POSEvaluator(POSTagger tagger, POSTaggerEvaluationMonitor ... listeners) { super(listeners); this.tagger = tagger; } /** * Evaluates the given reference {@link POSSample} object. * * This is done by tagging the sentence from the reference * {@link POSSample} with the {@link POSTagger}. The * tags are then used to update the word accuracy score. * * @param reference the reference {@link POSSample}. * * @return the predicted {@link POSSample}. */ @Override protected POSSample processSample(POSSample reference) { String[] predictedTags = tagger.tag(reference.getSentence(), reference.getAddictionalContext()); String[] referenceTags = reference.getTags(); for (int i = 0; i < referenceTags.length; i++) { if (referenceTags[i].equals(predictedTags[i])) { wordAccuracy.add(1); } else { wordAccuracy.add(0); } } return new POSSample(reference.getSentence(), predictedTags); } /** * Retrieves the word accuracy. * * This is defined as: * word accuracy = correctly detected tags / total words * * @return the word accuracy */ public double getWordAccuracy() { return wordAccuracy.mean(); } /** * Retrieves the total number of words considered * in the evaluation. * * @return the word count */ public long getWordCount() { return wordAccuracy.count(); } /** * Represents this objects as human readable {@link String}. */ @Override public String toString() { return "Accuracy:" + wordAccuracy.mean() + " Number of Samples: " + wordAccuracy.count(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.nio.file.Path; import java.util.Map; import java.util.Objects; import java.util.Properties; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.BeamSearch; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.ml.model.SequenceClassificationModel; import opennlp.tools.util.BaseToolFactory; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.model.ArtifactSerializer; import opennlp.tools.util.model.BaseModel; import opennlp.tools.util.model.ByteArraySerializer; import opennlp.tools.util.model.POSModelSerializer; import opennlp.tools.util.model.SerializableArtifact; /** * The {@link POSModel} is the model used * by a learnable {@link POSTagger}. * * @see POSTaggerME */ public final class POSModel extends BaseModel implements SerializableArtifact { private static final String COMPONENT_NAME = "POSTaggerME"; static final String POS_MODEL_ENTRY_NAME = "pos.model"; static final String GENERATOR_DESCRIPTOR_ENTRY_NAME = "generator.featuregen"; public POSModel(String languageCode, SequenceClassificationModel<String> posModel, Map<String, String> manifestInfoEntries, POSTaggerFactory posFactory) { super(COMPONENT_NAME, languageCode, manifestInfoEntries, posFactory); artifactMap.put(POS_MODEL_ENTRY_NAME, Objects.requireNonNull(posModel, "posModel must not be null")); artifactMap.put(GENERATOR_DESCRIPTOR_ENTRY_NAME, posFactory.getFeatureGenerator()); for (Map.Entry<String, Object> resource : posFactory.getResources().entrySet()) { artifactMap.put(resource.getKey(), resource.getValue()); } // TODO: This fails probably for the sequence model ... ?! // checkArtifactMap(); } public POSModel(String languageCode, MaxentModel posModel, Map<String, String> manifestInfoEntries, POSTaggerFactory posFactory) { this(languageCode, posModel, POSTaggerME.DEFAULT_BEAM_SIZE, manifestInfoEntries, posFactory); } public POSModel(String languageCode, MaxentModel posModel, int beamSize, Map<String, String> manifestInfoEntries, POSTaggerFactory posFactory) { super(COMPONENT_NAME, languageCode, manifestInfoEntries, posFactory); Objects.requireNonNull(posModel, "posModel must not be null"); Properties manifest = (Properties) artifactMap.get(MANIFEST_ENTRY); manifest.setProperty(BeamSearch.BEAM_SIZE_PARAMETER, Integer.toString(beamSize)); artifactMap.put(POS_MODEL_ENTRY_NAME, posModel); artifactMap.put(GENERATOR_DESCRIPTOR_ENTRY_NAME, posFactory.getFeatureGenerator()); for (Map.Entry<String, Object> resource : posFactory.getResources().entrySet()) { artifactMap.put(resource.getKey(), resource.getValue()); } checkArtifactMap(); } public POSModel(InputStream in) throws IOException { super(COMPONENT_NAME, in); } public POSModel(File modelFile) throws IOException { super(COMPONENT_NAME, modelFile); } public POSModel(Path modelPath) throws IOException { this(modelPath.toFile()); } public POSModel(URL modelURL) throws IOException { super(COMPONENT_NAME, modelURL); } @Override protected Class<? extends BaseToolFactory> getDefaultFactory() { return POSTaggerFactory.class; } @Override protected void validateArtifactMap() throws InvalidFormatException { super.validateArtifactMap(); if (!(artifactMap.get(POS_MODEL_ENTRY_NAME) instanceof MaxentModel)) { throw new InvalidFormatException("POS model is incomplete!"); } } /** * @deprecated use getPosSequenceModel instead. This method will be removed soon. * Only required for Parser 1.5.x backward compatibility. Newer models don't need this anymore. */ @Deprecated public MaxentModel getPosModel() { if (artifactMap.get(POS_MODEL_ENTRY_NAME) instanceof MaxentModel) { return (MaxentModel) artifactMap.get(POS_MODEL_ENTRY_NAME); } else { return null; } } public SequenceClassificationModel<String> getPosSequenceModel() { Properties manifest = (Properties) artifactMap.get(MANIFEST_ENTRY); if (artifactMap.get(POS_MODEL_ENTRY_NAME) instanceof MaxentModel) { String beamSizeString = manifest.getProperty(BeamSearch.BEAM_SIZE_PARAMETER); int beamSize = POSTaggerME.DEFAULT_BEAM_SIZE; if (beamSizeString != null) { beamSize = Integer.parseInt(beamSizeString); } return new BeamSearch<>(beamSize, (MaxentModel) artifactMap.get(POS_MODEL_ENTRY_NAME)); } else if (artifactMap.get(POS_MODEL_ENTRY_NAME) instanceof SequenceClassificationModel) { return (SequenceClassificationModel) artifactMap.get(POS_MODEL_ENTRY_NAME); } else { return null; } } public POSTaggerFactory getFactory() { return (POSTaggerFactory) this.toolFactory; } @Override protected void createArtifactSerializers(Map<String, ArtifactSerializer> serializers) { super.createArtifactSerializers(serializers); serializers.put("featuregen", new ByteArraySerializer()); } /** * Retrieves the ngram dictionary. * * @return ngram dictionary or null if not used */ public Dictionary getNgramDictionary() { if (getFactory() != null) return getFactory().getDictionary(); return null; } @Override public Class<POSModelSerializer> getArtifactSerializerClass() { return POSModelSerializer.class; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSSample.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.io.Serializable; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; import opennlp.tools.tokenize.WhitespaceTokenizer; import opennlp.tools.util.InvalidFormatException; /** * Represents an pos-tagged sentence. */ public class POSSample implements Serializable { private List<String> sentence; private List<String> tags; private final String[][] additionalContext; public POSSample(String[] sentence, String[] tags) { this(sentence, tags, null); } public POSSample(List<String> sentence, List<String> tags) { this(sentence, tags, null); } public POSSample(List<String> sentence, List<String> tags, String[][] additionalContext) { this.sentence = Collections.unmodifiableList(sentence); this.tags = Collections.unmodifiableList(tags); checkArguments(); String[][] ac; if (additionalContext != null) { ac = new String[additionalContext.length][]; for (int i = 0; i < additionalContext.length; i++) { ac[i] = new String[additionalContext[i].length]; System.arraycopy(additionalContext[i], 0, ac[i], 0, additionalContext[i].length); } } else { ac = null; } this.additionalContext = ac; } public POSSample(String[] sentence, String[] tags, String[][] additionalContext) { this(Arrays.asList(sentence), Arrays.asList(tags), additionalContext); } private void checkArguments() { if (sentence.size() != tags.size()) { throw new IllegalArgumentException( "There must be exactly one tag for each token. tokens: " + sentence.size() + ", tags: " + tags.size()); } if (sentence.contains(null)) { throw new IllegalArgumentException("null elements are not allowed in sentence tokens!"); } if (tags.contains(null)) { throw new IllegalArgumentException("null elements are not allowed in tags!"); } } public String[] getSentence() { return sentence.toArray(new String[sentence.size()]); } public String[] getTags() { return tags.toArray(new String[tags.size()]); } public String[][] getAddictionalContext() { return this.additionalContext; } @Override public String toString() { StringBuilder result = new StringBuilder(); for (int i = 0; i < getSentence().length; i++) { result.append(getSentence()[i]); result.append('_'); result.append(getTags()[i]); result.append(' '); } if (result.length() > 0) { // get rid of last space result.setLength(result.length() - 1); } return result.toString(); } public static POSSample parse(String sentenceString) throws InvalidFormatException { String[] tokenTags = WhitespaceTokenizer.INSTANCE.tokenize(sentenceString); String[] sentence = new String[tokenTags.length]; String[] tags = new String[tokenTags.length]; for (int i = 0; i < tokenTags.length; i++) { int split = tokenTags[i].lastIndexOf("_"); if (split == -1) { throw new InvalidFormatException("Cannot find \"_\" inside token '" + tokenTags[i] + "'!"); } sentence[i] = tokenTags[i].substring(0, split); tags[i] = tokenTags[i].substring(split + 1); } return new POSSample(sentence, tags); } @Override public int hashCode() { return Objects.hash(Arrays.hashCode(getSentence()), Arrays.hashCode(getTags())); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof POSSample) { POSSample a = (POSSample) obj; return Arrays.equals(getSentence(), a.getSentence()) && Arrays.equals(getTags(), a.getTags()); } return this == obj; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSSampleEventStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import opennlp.tools.ml.model.Event; import opennlp.tools.util.AbstractEventStream; import opennlp.tools.util.ObjectStream; /** * This class reads the {@link POSSample}s from the given {@link Iterator} * and converts the {@link POSSample}s into {@link Event}s which * can be used by the maxent library for training. */ public class POSSampleEventStream extends AbstractEventStream<POSSample> { /** * The {@link POSContextGenerator} used * to create the training {@link Event}s. */ private POSContextGenerator cg; /** * Initializes the current instance with the given samples and the * given {@link POSContextGenerator}. * * @param samples * @param cg */ public POSSampleEventStream(ObjectStream<POSSample> samples, POSContextGenerator cg) { super(samples); this.cg = cg; } /** * Initializes the current instance with given samples * and a {@link DefaultPOSContextGenerator}. * @param samples */ public POSSampleEventStream(ObjectStream<POSSample> samples) { this(samples, new DefaultPOSContextGenerator(null)); } @Override protected Iterator<Event> createEvents(POSSample sample) { String[] sentence = sample.getSentence(); String[] tags = sample.getTags(); Object[] ac = sample.getAddictionalContext(); List<Event> events = generateEvents(sentence, tags, ac, cg); return events.iterator(); } public static List<Event> generateEvents(String[] sentence, String[] tags, Object[] additionalContext, POSContextGenerator cg) { List<Event> events = new ArrayList<Event>(sentence.length); for (int i = 0; i < sentence.length; i++) { // it is safe to pass the tags as previous tags because // the context generator does not look for non predicted tags String[] context = cg.getContext(i, sentence, tags, additionalContext); events.add(new Event(tags[i], context)); } return events; } public static List<Event> generateEvents(String[] sentence, String[] tags, POSContextGenerator cg) { return generateEvents(sentence, tags, null, cg); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSSampleSequenceStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.io.IOException; import opennlp.tools.ml.model.AbstractModel; import opennlp.tools.ml.model.Event; import opennlp.tools.ml.model.Sequence; import opennlp.tools.ml.model.SequenceStream; import opennlp.tools.util.ObjectStream; public class POSSampleSequenceStream implements SequenceStream { private POSContextGenerator pcg; private ObjectStream<POSSample> psi; public POSSampleSequenceStream(ObjectStream<POSSample> psi) throws IOException { this(psi, new DefaultPOSContextGenerator(null)); } public POSSampleSequenceStream(ObjectStream<POSSample> psi, POSContextGenerator pcg) throws IOException { this.psi = psi; this.pcg = pcg; } @SuppressWarnings("unchecked") public Event[] updateContext(Sequence sequence, AbstractModel model) { Sequence<POSSample> pss = sequence; POSTagger tagger = new POSTaggerME(new POSModel("x-unspecified", model, null, new POSTaggerFactory())); String[] sentence = pss.getSource().getSentence(); Object[] ac = pss.getSource().getAddictionalContext(); String[] tags = tagger.tag(pss.getSource().getSentence()); Event[] events = new Event[sentence.length]; POSSampleEventStream.generateEvents(sentence, tags, ac, pcg) .toArray(events); return events; } @Override public Sequence read() throws IOException { POSSample sample = psi.read(); if (sample != null) { String[] sentence = sample.getSentence(); String[] tags = sample.getTags(); Event[] events = new Event[sentence.length]; for (int i = 0; i < sentence.length; i++) { // it is safe to pass the tags as previous tags because // the context generator does not look for non predicted tags String[] context = pcg.getContext(i, sentence, tags, null); events[i] = new Event(tags[i], context); } Sequence<POSSample> sequence = new Sequence<POSSample>(events,sample); return sequence; } return null; } @Override public void reset() throws IOException, UnsupportedOperationException { psi.reset(); } @Override public void close() throws IOException { psi.close(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSTagger.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import opennlp.tools.util.Sequence; /** * The interface for part of speech taggers. */ public interface POSTagger { /** * Assigns the sentence of tokens pos tags. * @param sentence The sentece of tokens to be tagged. * @return an array of pos tags for each token provided in sentence. */ String[] tag(String[] sentence); String[] tag(String[] sentence, Object[] additionaContext); Sequence[] topKSequences(String[] sentence); Sequence[] topKSequences(String[] sentence, Object[] additionaContext); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSTaggerCrossValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.io.File; import java.io.IOException; import java.util.Map; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.TrainingParameters; import opennlp.tools.util.eval.CrossValidationPartitioner; import opennlp.tools.util.eval.Mean; public class POSTaggerCrossValidator { private final String languageCode; private final TrainingParameters params; private byte[] featureGeneratorBytes; private Map<String, Object> resources; private Mean wordAccuracy = new Mean(); private POSTaggerEvaluationMonitor[] listeners; /* this will be used to load the factory after the ngram dictionary was created */ private String factoryClassName; /* user can also send a ready to use factory */ private POSTaggerFactory factory; private Integer tagdicCutoff = null; private File tagDictionaryFile; /** * Creates a {@link POSTaggerCrossValidator} that builds a ngram dictionary * dynamically. It instantiates a sub-class of {@link POSTaggerFactory} using * the tag and the ngram dictionaries. */ public POSTaggerCrossValidator(String languageCode, TrainingParameters trainParam, File tagDictionary, byte[] featureGeneratorBytes, Map<String, Object> resources, Integer tagdicCutoff, String factoryClass, POSTaggerEvaluationMonitor... listeners) { this.languageCode = languageCode; this.params = trainParam; this.featureGeneratorBytes = featureGeneratorBytes; this.resources = resources; this.listeners = listeners; this.factoryClassName = factoryClass; this.tagdicCutoff = tagdicCutoff; this.tagDictionaryFile = tagDictionary; } /** * Creates a {@link POSTaggerCrossValidator} using the given * {@link POSTaggerFactory}. */ public POSTaggerCrossValidator(String languageCode, TrainingParameters trainParam, POSTaggerFactory factory, POSTaggerEvaluationMonitor... listeners) { this.languageCode = languageCode; this.params = trainParam; this.listeners = listeners; this.factory = factory; this.tagdicCutoff = null; } /** * Starts the evaluation. * * @param samples * the data to train and test * @param nFolds * number of folds * * @throws IOException */ public void evaluate(ObjectStream<POSSample> samples, int nFolds) throws IOException { CrossValidationPartitioner<POSSample> partitioner = new CrossValidationPartitioner<>( samples, nFolds); while (partitioner.hasNext()) { CrossValidationPartitioner.TrainingSampleStream<POSSample> trainingSampleStream = partitioner .next(); if (this.tagDictionaryFile != null && this.factory.getTagDictionary() == null) { this.factory.setTagDictionary(this.factory .createTagDictionary(tagDictionaryFile)); } TagDictionary dict = null; if (this.tagdicCutoff != null) { dict = this.factory.getTagDictionary(); if (dict == null) { dict = this.factory.createEmptyTagDictionary(); } if (dict instanceof MutableTagDictionary) { POSTaggerME.populatePOSDictionary(trainingSampleStream, (MutableTagDictionary)dict, this.tagdicCutoff); } else { throw new IllegalArgumentException( "Can't extend a TagDictionary that does not implement MutableTagDictionary."); } trainingSampleStream.reset(); } if (this.factory == null) { this.factory = POSTaggerFactory.create(this.factoryClassName, null, null); } factory.init(featureGeneratorBytes, resources, dict); POSModel model = POSTaggerME.train(languageCode, trainingSampleStream, params, this.factory); POSEvaluator evaluator = new POSEvaluator(new POSTaggerME(model), listeners); evaluator.evaluate(trainingSampleStream.getTestSampleStream()); wordAccuracy.add(evaluator.getWordAccuracy(), evaluator.getWordCount()); if (this.tagdicCutoff != null) { this.factory.setTagDictionary(null); } } } /** * Retrieves the accuracy for all iterations. * * @return the word accuracy */ public double getWordAccuracy() { return wordAccuracy.mean(); } /** * Retrieves the number of words which where validated * over all iterations. The result is the amount of folds * multiplied by the total number of words. * * @return the word count */ public long getWordCount() { return wordAccuracy.count(); } private static POSTaggerFactory create(Dictionary ngram, TagDictionary pos) { return new POSTaggerFactory(ngram, pos); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSTaggerEvaluationMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import opennlp.tools.util.eval.EvaluationMonitor; public interface POSTaggerEvaluationMonitor extends EvaluationMonitor<POSSample> { }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSTaggerFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Properties; import java.util.Set; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.model.AbstractModel; import opennlp.tools.namefind.TokenNameFinderFactory; import opennlp.tools.util.BaseToolFactory; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.SequenceValidator; import opennlp.tools.util.Version; import opennlp.tools.util.ext.ExtensionLoader; import opennlp.tools.util.featuregen.AdaptiveFeatureGenerator; import opennlp.tools.util.featuregen.AggregatedFeatureGenerator; import opennlp.tools.util.featuregen.GeneratorFactory; import opennlp.tools.util.model.ArtifactSerializer; import opennlp.tools.util.model.UncloseableInputStream; /** * The factory that provides POS Tagger default implementations and resources */ public class POSTaggerFactory extends BaseToolFactory { private static final String TAG_DICTIONARY_ENTRY_NAME = "tags.tagdict"; private static final String NGRAM_DICTIONARY_ENTRY_NAME = "ngram.dictionary"; protected Dictionary ngramDictionary; private byte[] featureGeneratorBytes; private Map<String, Object> resources; protected TagDictionary posDictionary; /** * Creates a {@link POSTaggerFactory} that provides the default implementation * of the resources. */ public POSTaggerFactory() { } /** * Creates a {@link POSTaggerFactory}. Use this constructor to * programmatically create a factory. * * @param ngramDictionary * @param posDictionary * * @deprecated this constructor is here for backward compatibility and * is not functional anymore in the training of 1.8.x series models */ @Deprecated public POSTaggerFactory(Dictionary ngramDictionary, TagDictionary posDictionary) { this.init(ngramDictionary, posDictionary); // TODO: This could be made functional by creating some default feature generation // which uses the dictionary ... } public POSTaggerFactory(byte[] featureGeneratorBytes, final Map<String, Object> resources, TagDictionary posDictionary) { this.featureGeneratorBytes = featureGeneratorBytes; if (this.featureGeneratorBytes == null) { this.featureGeneratorBytes = loadDefaultFeatureGeneratorBytes(); } this.resources = resources; this.posDictionary = posDictionary; } @Deprecated // will be removed when only 8 series models are supported protected void init(Dictionary ngramDictionary, TagDictionary posDictionary) { this.ngramDictionary = ngramDictionary; this.posDictionary = posDictionary; } protected void init(byte[] featureGeneratorBytes, final Map<String, Object> resources, TagDictionary posDictionary) { this.featureGeneratorBytes = featureGeneratorBytes; this.resources = resources; this.posDictionary = posDictionary; } private static byte[] loadDefaultFeatureGeneratorBytes() { ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (InputStream in = TokenNameFinderFactory.class.getResourceAsStream( "/opennlp/tools/postag/pos-default-features.xml")) { if (in == null) { throw new IllegalStateException("Classpath must contain pos-default-features.xml file!"); } byte[] buf = new byte[1024]; int len; while ((len = in.read(buf)) > 0) { bytes.write(buf, 0, len); } } catch (IOException e) { throw new IllegalStateException("Failed reading from pos-default-features.xml file on classpath!"); } return bytes.toByteArray(); } /** * Creates the {@link AdaptiveFeatureGenerator}. Usually this * is a set of generators contained in the {@link AggregatedFeatureGenerator}. * * Note: * The generators are created on every call to this method. * * @return the feature generator or null if there is no descriptor in the model */ public AdaptiveFeatureGenerator createFeatureGenerators() { if (featureGeneratorBytes == null && artifactProvider != null) { featureGeneratorBytes = artifactProvider.getArtifact( POSModel.GENERATOR_DESCRIPTOR_ENTRY_NAME); } if (featureGeneratorBytes == null) { featureGeneratorBytes = loadDefaultFeatureGeneratorBytes(); } InputStream descriptorIn = new ByteArrayInputStream(featureGeneratorBytes); AdaptiveFeatureGenerator generator; try { generator = GeneratorFactory.create(descriptorIn, key -> { if (artifactProvider != null) { return artifactProvider.getArtifact(key); } else { return resources.get(key); } }); } catch (InvalidFormatException e) { // It is assumed that the creation of the feature generation does not // fail after it succeeded once during model loading. // But it might still be possible that such an exception is thrown, // in this case the caller should not be forced to handle the exception // and a Runtime Exception is thrown instead. // If the re-creation of the feature generation fails it is assumed // that this can only be caused by a programming mistake and therefore // throwing a Runtime Exception is reasonable throw new IllegalStateException(); // FeatureGeneratorCreationError(e); } catch (IOException e) { throw new IllegalStateException("Reading from mem cannot result in an I/O error", e); } return generator; } @Override @SuppressWarnings("rawtypes") public Map<String, ArtifactSerializer> createArtifactSerializersMap() { Map<String, ArtifactSerializer> serializers = super.createArtifactSerializersMap(); // NOTE: This is only needed for old models and this if can be removed if support is dropped POSDictionarySerializer.register(serializers); return serializers; } @Override public Map<String, Object> createArtifactMap() { Map<String, Object> artifactMap = super.createArtifactMap(); if (posDictionary != null) artifactMap.put(TAG_DICTIONARY_ENTRY_NAME, posDictionary); if (ngramDictionary != null) artifactMap.put(NGRAM_DICTIONARY_ENTRY_NAME, ngramDictionary); return artifactMap; } public TagDictionary createTagDictionary(File dictionary) throws IOException { return createTagDictionary(new FileInputStream(dictionary)); } public TagDictionary createTagDictionary(InputStream in) throws IOException { return POSDictionary.create(in); } public void setTagDictionary(TagDictionary dictionary) { if (artifactProvider != null) { throw new IllegalStateException( "Can not set tag dictionary while using artifact provider."); } this.posDictionary = dictionary; } protected Map<String, Object> getResources() { if (resources != null) { return resources; } return Collections.emptyMap(); } protected byte[] getFeatureGenerator() { return featureGeneratorBytes; } public TagDictionary getTagDictionary() { if (this.posDictionary == null && artifactProvider != null) this.posDictionary = artifactProvider.getArtifact(TAG_DICTIONARY_ENTRY_NAME); return this.posDictionary; } /** * @deprecated this will be reduced in visibility and later removed */ @Deprecated public Dictionary getDictionary() { if (this.ngramDictionary == null && artifactProvider != null) this.ngramDictionary = artifactProvider.getArtifact(NGRAM_DICTIONARY_ENTRY_NAME); return this.ngramDictionary; } @Deprecated public void setDictionary(Dictionary ngramDict) { if (artifactProvider != null) { throw new IllegalStateException( "Can not set ngram dictionary while using artifact provider."); } this.ngramDictionary = ngramDict; } public POSContextGenerator getPOSContextGenerator() { return getPOSContextGenerator(0); } public POSContextGenerator getPOSContextGenerator(int cacheSize) { if (artifactProvider != null) { Properties manifest = (Properties) artifactProvider.getArtifact("manifest.properties"); String version = manifest.getProperty("OpenNLP-Version"); if (Version.parse(version).getMinor() < 8) { return new DefaultPOSContextGenerator(cacheSize, getDictionary()); } } return new ConfigurablePOSContextGenerator(cacheSize, createFeatureGenerators()); } public SequenceValidator<String> getSequenceValidator() { return new DefaultPOSSequenceValidator(getTagDictionary()); } // TODO: This should not be done anymore for 8 models, they can just // use the SerializableArtifact interface public static class POSDictionarySerializer implements ArtifactSerializer<POSDictionary> { public POSDictionary create(InputStream in) throws IOException { return POSDictionary.create(new UncloseableInputStream(in)); } public void serialize(POSDictionary artifact, OutputStream out) throws IOException { artifact.serialize(out); } @SuppressWarnings("rawtypes") static void register(Map<String, ArtifactSerializer> factories) { factories.put("tagdict", new POSDictionarySerializer()); } } protected void validatePOSDictionary(POSDictionary posDict, AbstractModel posModel) throws InvalidFormatException { Set<String> dictTags = new HashSet<>(); for (String word : posDict) { Collections.addAll(dictTags, posDict.getTags(word)); } Set<String> modelTags = new HashSet<>(); for (int i = 0; i < posModel.getNumOutcomes(); i++) { modelTags.add(posModel.getOutcome(i)); } if (!modelTags.containsAll(dictTags)) { StringBuilder unknownTag = new StringBuilder(); for (String d : dictTags) { if (!modelTags.contains(d)) { unknownTag.append(d).append(" "); } } throw new InvalidFormatException("Tag dictionary contains tags " + "which are unknown by the model! The unknown tags are: " + unknownTag.toString()); } } @Override public void validateArtifactMap() throws InvalidFormatException { // Ensure that the tag dictionary is compatible with the model Object tagdictEntry = this.artifactProvider .getArtifact(TAG_DICTIONARY_ENTRY_NAME); if (tagdictEntry != null) { if (tagdictEntry instanceof POSDictionary) { if (!this.artifactProvider.isLoadedFromSerialized()) { AbstractModel posModel = this.artifactProvider .getArtifact(POSModel.POS_MODEL_ENTRY_NAME); POSDictionary posDict = (POSDictionary) tagdictEntry; validatePOSDictionary(posDict, posModel); } } else { throw new InvalidFormatException( "POSTag dictionary has wrong type!"); } } Object ngramDictEntry = this.artifactProvider .getArtifact(NGRAM_DICTIONARY_ENTRY_NAME); if (ngramDictEntry != null && !(ngramDictEntry instanceof Dictionary)) { throw new InvalidFormatException("NGram dictionary has wrong type!"); } } @Deprecated public static POSTaggerFactory create(String subclassName, Dictionary ngramDictionary, TagDictionary posDictionary) throws InvalidFormatException { if (subclassName == null) { // will create the default factory return new POSTaggerFactory(ngramDictionary, posDictionary); } try { POSTaggerFactory theFactory = ExtensionLoader.instantiateExtension( POSTaggerFactory.class, subclassName); theFactory.init(ngramDictionary, posDictionary); return theFactory; } catch (Exception e) { String msg = "Could not instantiate the " + subclassName + ". The initialization throw an exception."; throw new InvalidFormatException(msg, e); } } public static POSTaggerFactory create(String subclassName, byte[] featureGeneratorBytes, Map<String, Object> resources, TagDictionary posDictionary) throws InvalidFormatException { POSTaggerFactory theFactory; if (subclassName == null) { // will create the default factory theFactory = new POSTaggerFactory(null, posDictionary); } else { try { theFactory = ExtensionLoader.instantiateExtension( POSTaggerFactory.class, subclassName); } catch (Exception e) { String msg = "Could not instantiate the " + subclassName + ". The initialization throw an exception."; throw new InvalidFormatException(msg, e); } } theFactory.init(featureGeneratorBytes, resources, posDictionary); return theFactory; } public TagDictionary createEmptyTagDictionary() { this.posDictionary = new POSDictionary(true); return this.posDictionary; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/POSTaggerME.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicInteger; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.BeamSearch; import opennlp.tools.ml.EventModelSequenceTrainer; import opennlp.tools.ml.EventTrainer; import opennlp.tools.ml.SequenceTrainer; import opennlp.tools.ml.TrainerFactory; import opennlp.tools.ml.TrainerFactory.TrainerType; import opennlp.tools.ml.model.Event; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.ml.model.SequenceClassificationModel; import opennlp.tools.ngram.NGramModel; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Sequence; import opennlp.tools.util.SequenceValidator; import opennlp.tools.util.StringList; import opennlp.tools.util.StringUtil; import opennlp.tools.util.TrainingParameters; import opennlp.tools.util.featuregen.StringPattern; /** * A part-of-speech tagger that uses maximum entropy. Tries to predict whether * words are nouns, verbs, or any of 70 other POS tags depending on their * surrounding context. * */ public class POSTaggerME implements POSTagger { public static final int DEFAULT_BEAM_SIZE = 3; private POSModel modelPackage; /** * The feature context generator. */ protected POSContextGenerator contextGen; /** * Tag dictionary used for restricting words to a fixed set of tags. */ protected TagDictionary tagDictionary; protected Dictionary ngramDictionary; /** * Says whether a filter should be used to check whether a tag assignment * is to a word outside of a closed class. */ protected boolean useClosedClassTagsFilter = false; /** * The size of the beam to be used in determining the best sequence of pos tags. */ protected int size; private Sequence bestSequence; private SequenceClassificationModel<String> model; private SequenceValidator<String> sequenceValidator; /** * Initializes the current instance with the provided model. * * @param model */ public POSTaggerME(POSModel model) { POSTaggerFactory factory = model.getFactory(); int beamSize = POSTaggerME.DEFAULT_BEAM_SIZE; String beamSizeString = model.getManifestProperty(BeamSearch.BEAM_SIZE_PARAMETER); if (beamSizeString != null) { beamSize = Integer.parseInt(beamSizeString); } modelPackage = model; contextGen = factory.getPOSContextGenerator(beamSize); tagDictionary = factory.getTagDictionary(); size = beamSize; sequenceValidator = factory.getSequenceValidator(); if (model.getPosSequenceModel() != null) { this.model = model.getPosSequenceModel(); } else { this.model = new opennlp.tools.ml.BeamSearch<>(beamSize, model.getPosModel(), 0); } } /** * Retrieves an array of all possible part-of-speech tags from the * tagger. * * @return String[] */ public String[] getAllPosTags() { return model.getOutcomes(); } public String[] tag(String[] sentence) { return this.tag(sentence, null); } public String[] tag(String[] sentence, Object[] additionaContext) { bestSequence = model.bestSequence(sentence, additionaContext, contextGen, sequenceValidator); List<String> t = bestSequence.getOutcomes(); return t.toArray(new String[t.size()]); } /** * Returns at most the specified number of taggings for the specified sentence. * * @param numTaggings The number of tagging to be returned. * @param sentence An array of tokens which make up a sentence. * * @return At most the specified number of taggings for the specified sentence. */ public String[][] tag(int numTaggings, String[] sentence) { Sequence[] bestSequences = model.bestSequences(numTaggings, sentence, null, contextGen, sequenceValidator); String[][] tags = new String[bestSequences.length][]; for (int si = 0; si < tags.length; si++) { List<String> t = bestSequences[si].getOutcomes(); tags[si] = t.toArray(new String[t.size()]); } return tags; } public Sequence[] topKSequences(String[] sentence) { return this.topKSequences(sentence, null); } public Sequence[] topKSequences(String[] sentence, Object[] additionaContext) { return model.bestSequences(size, sentence, additionaContext, contextGen, sequenceValidator); } /** * Populates the specified array with the probabilities for each tag of the last tagged sentence. * * @param probs An array to put the probabilities into. */ public void probs(double[] probs) { bestSequence.getProbs(probs); } /** * Returns an array with the probabilities for each tag of the last tagged sentence. * * @return an array with the probabilities for each tag of the last tagged sentence. */ public double[] probs() { return bestSequence.getProbs(); } public String[] getOrderedTags(List<String> words, List<String> tags, int index) { return getOrderedTags(words,tags,index,null); } public String[] getOrderedTags(List<String> words, List<String> tags, int index,double[] tprobs) { if (modelPackage.getPosModel() != null) { MaxentModel posModel = modelPackage.getPosModel(); double[] probs = posModel.eval(contextGen.getContext(index, words.toArray(new String[words.size()]), tags.toArray(new String[tags.size()]),null)); String[] orderedTags = new String[probs.length]; for (int i = 0; i < probs.length; i++) { int max = 0; for (int ti = 1; ti < probs.length; ti++) { if (probs[ti] > probs[max]) { max = ti; } } orderedTags[i] = posModel.getOutcome(max); if (tprobs != null) { tprobs[i] = probs[max]; } probs[max] = 0; } return orderedTags; } else { throw new UnsupportedOperationException("This method can only be called if the " + "classifcation model is an event model!"); } } public static POSModel train(String languageCode, ObjectStream<POSSample> samples, TrainingParameters trainParams, POSTaggerFactory posFactory) throws IOException { int beamSize = trainParams.getIntParameter(BeamSearch.BEAM_SIZE_PARAMETER, POSTaggerME.DEFAULT_BEAM_SIZE); POSContextGenerator contextGenerator = posFactory.getPOSContextGenerator(); Map<String, String> manifestInfoEntries = new HashMap<>(); TrainerType trainerType = TrainerFactory.getTrainerType(trainParams); MaxentModel posModel = null; SequenceClassificationModel<String> seqPosModel = null; if (TrainerType.EVENT_MODEL_TRAINER.equals(trainerType)) { ObjectStream<Event> es = new POSSampleEventStream(samples, contextGenerator); EventTrainer trainer = TrainerFactory.getEventTrainer(trainParams, manifestInfoEntries); posModel = trainer.train(es); } else if (TrainerType.EVENT_MODEL_SEQUENCE_TRAINER.equals(trainerType)) { POSSampleSequenceStream ss = new POSSampleSequenceStream(samples, contextGenerator); EventModelSequenceTrainer trainer = TrainerFactory.getEventModelSequenceTrainer(trainParams, manifestInfoEntries); posModel = trainer.train(ss); } else if (TrainerType.SEQUENCE_TRAINER.equals(trainerType)) { SequenceTrainer trainer = TrainerFactory.getSequenceModelTrainer( trainParams, manifestInfoEntries); // TODO: This will probably cause issue, since the feature generator uses the outcomes array POSSampleSequenceStream ss = new POSSampleSequenceStream(samples, contextGenerator); seqPosModel = trainer.train(ss); } else { throw new IllegalArgumentException("Trainer type is not supported: " + trainerType); } if (posModel != null) { return new POSModel(languageCode, posModel, beamSize, manifestInfoEntries, posFactory); } else { return new POSModel(languageCode, seqPosModel, manifestInfoEntries, posFactory); } } public static Dictionary buildNGramDictionary(ObjectStream<POSSample> samples, int cutoff) throws IOException { NGramModel ngramModel = new NGramModel(); POSSample sample; while ((sample = samples.read()) != null) { String[] words = sample.getSentence(); if (words.length > 0) ngramModel.add(new StringList(words), 1, 1); } ngramModel.cutoff(cutoff, Integer.MAX_VALUE); return ngramModel.toDictionary(true); } public static void populatePOSDictionary(ObjectStream<POSSample> samples, MutableTagDictionary dict, int cutoff) throws IOException { System.out.println("Expanding POS Dictionary ..."); long start = System.nanoTime(); // the data structure will store the word, the tag, and the number of // occurrences Map<String, Map<String, AtomicInteger>> newEntries = new HashMap<>(); POSSample sample; while ((sample = samples.read()) != null) { String[] words = sample.getSentence(); String[] tags = sample.getTags(); for (int i = 0; i < words.length; i++) { // only store words if (!StringPattern.recognize(words[i]).containsDigit()) { String word; if (dict.isCaseSensitive()) { word = words[i]; } else { word = StringUtil.toLowerCase(words[i]); } if (!newEntries.containsKey(word)) { newEntries.put(word, new HashMap<>()); } String[] dictTags = dict.getTags(word); if (dictTags != null) { for (String tag : dictTags) { // for this tags we start with the cutoff Map<String, AtomicInteger> value = newEntries.get(word); if (!value.containsKey(tag)) { value.put(tag, new AtomicInteger(cutoff)); } } } if (!newEntries.get(word).containsKey(tags[i])) { newEntries.get(word).put(tags[i], new AtomicInteger(1)); } else { newEntries.get(word).get(tags[i]).incrementAndGet(); } } } } // now we check if the word + tag pairs have enough occurrences, if yes we // add it to the dictionary for (Entry<String, Map<String, AtomicInteger>> wordEntry : newEntries .entrySet()) { List<String> tagsForWord = new ArrayList<>(); for (Entry<String, AtomicInteger> entry : wordEntry.getValue().entrySet()) { if (entry.getValue().get() >= cutoff) { tagsForWord.add(entry.getKey()); } } if (tagsForWord.size() > 0) { dict.put(wordEntry.getKey(), tagsForWord.toArray(new String[tagsForWord.size()])); } } System.out.println("... finished expanding POS Dictionary. [" + (System.nanoTime() - start) / 1000000 + "ms]"); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/TagDictionary.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; /** * Interface to determine which tags are valid for a particular word * based on a tag dictionary. */ public interface TagDictionary { /** * Returns a list of valid tags for the specified word. * * @param word The word. * @return A list of valid tags for the specified word or null if no information * is available for that word. */ String[] getTags(String word); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/WordTagSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.postag; import java.io.IOException; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.ObjectStream; /** * A stream filter which reads a sentence per line which contains * words and tags in word_tag format and outputs a {@link POSSample} objects. */ public class WordTagSampleStream extends FilterObjectStream<String, POSSample> { /** * Initializes the current instance. * * @param sentences the sentences */ public WordTagSampleStream(ObjectStream<String> sentences) { super(sentences); } /** * Parses the next sentence and return the next * {@link POSSample} object. * * If an error occurs an empty {@link POSSample} object is returned * and an warning message is logged. Usually it does not matter if one * of many sentences is ignored. * * TODO: An exception in error case should be thrown. */ public POSSample read() throws IOException { String sentence = samples.read(); if (sentence != null) { POSSample sample; try { sample = POSSample.parse(sentence); } catch (InvalidFormatException e) { System.out.println("Error during parsing, ignoring sentence: " + sentence); sample = new POSSample(new String[]{}, new String[]{}); } return sample; } else { // sentences stream is exhausted return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/postag/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package related to part-of-speech tagging. */ package opennlp.tools.postag;
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/DefaultEndOfSentenceScanner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.util.ArrayList; import java.util.List; /** * Default implementation of the {@link EndOfSentenceScanner}. * It uses an character array with possible end of sentence chars * to identify potential sentence endings. */ public class DefaultEndOfSentenceScanner implements EndOfSentenceScanner { private char[] eosCharacters; /** * Initializes the current instance. * * @param eosCharacters */ public DefaultEndOfSentenceScanner(char[] eosCharacters) { this.eosCharacters = eosCharacters; } public List<Integer> getPositions(String s) { return getPositions(s.toCharArray()); } public List<Integer> getPositions(StringBuffer buf) { return getPositions(buf.toString().toCharArray()); } public List<Integer> getPositions(char[] cbuf) { List<Integer> l = new ArrayList<>(); char[] eosCharacters = getEndOfSentenceCharacters(); for (int i = 0; i < cbuf.length; i++) { for (char eosCharacter : eosCharacters) { if (cbuf[i] == eosCharacter) { l.add(i); break; } } } return l; } public char[] getEndOfSentenceCharacters() { return eosCharacters; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/DefaultSDContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; import opennlp.tools.util.StringUtil; /** * Generate event contexts for maxent decisions for sentence detection. * */ public class DefaultSDContextGenerator implements SDContextGenerator { /** * String buffer for generating features. */ protected StringBuffer buf; /** * List for holding features as they are generated. */ protected List<String> collectFeats; private Set<String> inducedAbbreviations; private char[] eosCharacters; /** * Creates a new <code>SDContextGenerator</code> instance with * no induced abbreviations. * * @param eosCharacters */ public DefaultSDContextGenerator(char[] eosCharacters) { this(Collections.emptySet(), eosCharacters); } /** * Creates a new <code>SDContextGenerator</code> instance which uses * the set of induced abbreviations. * * @param inducedAbbreviations a <code>Set</code> of Strings * representing induced abbreviations in the training data. * Example: &quot;Mr.&quot; * * @param eosCharacters */ public DefaultSDContextGenerator(Set<String> inducedAbbreviations, char[] eosCharacters) { this.inducedAbbreviations = inducedAbbreviations; this.eosCharacters = eosCharacters; buf = new StringBuffer(); collectFeats = new ArrayList<>(); } private static String escapeChar(Character c) { if (c == '\n') { return "<LF>"; } if (c == '\r') { return "<CR>"; } return new String(new char[]{c}); } /* (non-Javadoc) * @see opennlp.tools.sentdetect.SDContextGenerator#getContext(java.lang.StringBuffer, int) */ public String[] getContext(CharSequence sb, int position) { /* * String preceding the eos character in the eos token. */ String prefix; /* * Space delimited token preceding token containing eos character. */ String previous; /* * String following the eos character in the eos token. */ String suffix; /* * Space delimited token following token containing eos character. */ String next; int lastIndex = sb.length() - 1; { // compute space previous and space next features. if (position > 0 && StringUtil.isWhitespace(sb.charAt(position - 1))) collectFeats.add("sp"); if (position < lastIndex && StringUtil.isWhitespace(sb.charAt(position + 1))) collectFeats.add("sn"); collectFeats.add("eos=" + escapeChar(sb.charAt(position))); } int prefixStart = previousSpaceIndex(sb, position); int c = position; { ///assign prefix, stop if you run into a period though otherwise stop at space while (--c > prefixStart) { for (int eci = 0, ecl = eosCharacters.length; eci < ecl; eci++) { if (sb.charAt(c) == eosCharacters[eci]) { prefixStart = c; c++; // this gets us out of while loop. break; } } } prefix = String.valueOf(sb.subSequence(prefixStart, position)).trim(); } int prevStart = previousSpaceIndex(sb, prefixStart); previous = String.valueOf(sb.subSequence(prevStart, prefixStart)).trim(); int suffixEnd = nextSpaceIndex(sb, position, lastIndex); { c = position; while (++c < suffixEnd) { for (int eci = 0, ecl = eosCharacters.length; eci < ecl; eci++) { if (sb.charAt(c) == eosCharacters[eci]) { suffixEnd = c; c--; // this gets us out of while loop. break; } } } } int nextEnd = nextSpaceIndex(sb, suffixEnd + 1, lastIndex + 1); if (position == lastIndex) { suffix = ""; next = ""; } else { suffix = String.valueOf(sb.subSequence(position + 1, suffixEnd)).trim(); next = String.valueOf(sb.subSequence(suffixEnd + 1, nextEnd)).trim(); } collectFeatures(prefix,suffix,previous,next, sb.charAt(position)); String[] context = new String[collectFeats.size()]; context = collectFeats.toArray(context); collectFeats.clear(); return context; } /** * Determines some of the features for the sentence detector and adds them to list features. * * @param prefix String preceding the eos character in the eos token. * @param suffix String following the eos character in the eos token. * @param previous Space delimited token preceding token containing eos character. * @param next Space delimited token following token containing eos character. * * @deprecated use {@link #collectFeatures(String, String, String, String, Character)} instead. */ protected void collectFeatures(String prefix, String suffix, String previous, String next) { collectFeatures(prefix, suffix, previous, next, null); } /** * Determines some of the features for the sentence detector and adds them to list features. * * @param prefix String preceding the eos character in the eos token. * @param suffix String following the eos character in the eos token. * @param previous Space delimited token preceding token containing eos character. * @param next Space delimited token following token containing eos character. * @param eosChar the EOS character been analyzed */ protected void collectFeatures(String prefix, String suffix, String previous, String next, Character eosChar) { buf.append("x="); buf.append(prefix); collectFeats.add(buf.toString()); buf.setLength(0); if (!prefix.equals("")) { collectFeats.add(Integer.toString(prefix.length())); if (isFirstUpper(prefix)) { collectFeats.add("xcap"); } if (eosChar != null && inducedAbbreviations.contains(prefix + eosChar)) { collectFeats.add("xabbrev"); } } buf.append("v="); buf.append(previous); collectFeats.add(buf.toString()); buf.setLength(0); if (!previous.equals("")) { if (isFirstUpper(previous)) { collectFeats.add("vcap"); } if (inducedAbbreviations.contains(previous)) { collectFeats.add("vabbrev"); } } buf.append("s="); buf.append(suffix); collectFeats.add(buf.toString()); buf.setLength(0); if (!suffix.equals("")) { if (isFirstUpper(suffix)) { collectFeats.add("scap"); } if (inducedAbbreviations.contains(suffix)) { collectFeats.add("sabbrev"); } } buf.append("n="); buf.append(next); collectFeats.add(buf.toString()); buf.setLength(0); if (!next.equals("")) { if (isFirstUpper(next)) { collectFeats.add("ncap"); } if (inducedAbbreviations.contains(next)) { collectFeats.add("nabbrev"); } } } private static boolean isFirstUpper(String s) { return Character.isUpperCase(s.charAt(0)); } /** * Finds the index of the nearest space before a specified index which is not itself preceded by a space. * * @param sb The string buffer which contains the text being examined. * @param seek The index to begin searching from. * @return The index which contains the nearest space. */ private static int previousSpaceIndex(CharSequence sb, int seek) { seek--; while (seek > 0 && !StringUtil.isWhitespace(sb.charAt(seek))) { seek--; } if (seek > 0 && StringUtil.isWhitespace(sb.charAt(seek))) { while (seek > 0 && StringUtil.isWhitespace(sb.charAt(seek - 1))) seek--; return seek; } return 0; } /** * Finds the index of the nearest space after a specified index. * * @param sb The string buffer which contains the text being examined. * @param seek The index to begin searching from. * @param lastIndex The highest index of the StringBuffer sb. * @return The index which contains the nearest space. */ private static int nextSpaceIndex(CharSequence sb, int seek, int lastIndex) { seek++; char c; while (seek < lastIndex) { c = sb.charAt(seek); if (StringUtil.isWhitespace(c)) { while (sb.length() > seek + 1 && StringUtil.isWhitespace(sb.charAt(seek + 1))) seek++; return seek; } seek++; } return lastIndex; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/EmptyLinePreprocessorStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.io.IOException; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; /** * Stream to to clean up empty lines for empty line separated document streams.<br> * * - Skips empty line at training data start<br> * - Transforms multiple empty lines in a row into one <br> * - Replaces white space lines with empty lines <br> * - TODO: Terminates last document with empty line if it is missing<br> * <br> * This stream should be used by the components that mark empty lines to mark document boundaries. * <p> * <b>Note:</b> * This class is not thread safe. <br> * Do not use this class, internal use only! */ public class EmptyLinePreprocessorStream extends FilterObjectStream<String, String> { private boolean lastLineWasEmpty = true; public EmptyLinePreprocessorStream(ObjectStream<String> in) { super(in); } private static boolean isLineEmpty(String line) { return line.trim().length() == 0; } public String read() throws IOException { String line = samples.read(); if (lastLineWasEmpty) { lastLineWasEmpty = false; while (line != null && isLineEmpty(line)) { line = samples.read(); } } if (line != null && isLineEmpty(line)) { lastLineWasEmpty = true; line = ""; } return line; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/EndOfSentenceScanner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.util.List; /** * Scans Strings, StringBuffers, and char[] arrays for the offsets of * sentence ending characters. * * <p>Implementations of this interface can use regular expressions, * hand-coded DFAs, and other scanning techniques to locate end of * sentence offsets.</p> */ public interface EndOfSentenceScanner { /** * Returns an array of character which can indicate the end of a sentence. * @return an array of character which can indicate the end of a sentence. */ char[] getEndOfSentenceCharacters(); /** * The receiver scans the specified string for sentence ending characters and * returns their offsets. * * @param s a <code>String</code> value * @return a <code>List</code> of Integer objects. */ List<Integer> getPositions(String s); /** * The receiver scans `buf' for sentence ending characters and * returns their offsets. * * @param buf a <code>StringBuffer</code> value * @return a <code>List</code> of Integer objects. */ List<Integer> getPositions(StringBuffer buf); /** * The receiver scans `cbuf' for sentence ending characters and * returns their offsets. * * @param cbuf a <code>char[]</code> value * @return a <code>List</code> of Integer objects. */ List<Integer> getPositions(char[] cbuf); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/NewlineSentenceDetector.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.util.ArrayList; import java.util.List; import opennlp.tools.util.Span; /** * The Newline Sentence Detector assumes that sentences are line delimited and * recognizes one sentence per non-empty line. */ public class NewlineSentenceDetector implements SentenceDetector { public String[] sentDetect(String s) { return Span.spansToStrings(sentPosDetect(s), s); } public Span[] sentPosDetect(String s) { List<Span> sentences = new ArrayList<>(); int start = 0; for (int i = 0; i < s.length(); i++) { char c = s.charAt(i); if (c == '\n' || c == '\r') { if (i - start > 0) { Span span = new Span(start, i).trim(s); if (span.length() > 0) { sentences.add(span); } start = i + 1; } } } if (s.length() - start > 0) { Span span = new Span(start, s.length()).trim(s); if (span.length() > 0) { sentences.add(span); } } return sentences.toArray(new Span[sentences.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SDContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; /** * Interface for {@link SentenceDetectorME} context generators. */ public interface SDContextGenerator { /** * Returns an array of contextual features for the potential sentence boundary at the * specified position within the specified string buffer. * * @param s The {@link String} for which sentences are being determined. * @param position An index into the specified string buffer when a sentence boundary may occur. * * @return an array of contextual features for the potential sentence boundary at the * specified position within the specified string buffer. */ String[] getContext(CharSequence s, int position); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SDCrossValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.io.IOException; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.TrainingParameters; import opennlp.tools.util.eval.CrossValidationPartitioner; import opennlp.tools.util.eval.FMeasure; import opennlp.tools.util.model.ModelUtil; /** * A cross validator for the sentence detector. */ public class SDCrossValidator { private final String languageCode; private final TrainingParameters params; private FMeasure fmeasure = new FMeasure(); private SentenceDetectorEvaluationMonitor[] listeners; private SentenceDetectorFactory sdFactory; public SDCrossValidator(String languageCode, TrainingParameters params, SentenceDetectorFactory sdFactory, SentenceDetectorEvaluationMonitor... listeners) { this.languageCode = languageCode; this.params = params; this.listeners = listeners; this.sdFactory = sdFactory; } /** * @deprecated Use * {@link #SDCrossValidator(String, TrainingParameters, * SentenceDetectorFactory, SentenceDetectorEvaluationMonitor...)} * and pass in a {@link SentenceDetectorFactory}. */ public SDCrossValidator(String languageCode, TrainingParameters params) { this(languageCode, params, new SentenceDetectorFactory(languageCode, true, null, null)); } /** * @deprecated use * {@link #SDCrossValidator(String, TrainingParameters, SentenceDetectorFactory, * SentenceDetectorEvaluationMonitor...)} * instead and pass in a TrainingParameters object. */ public SDCrossValidator(String languageCode, TrainingParameters params, SentenceDetectorEvaluationMonitor... listeners) { this(languageCode, params, new SentenceDetectorFactory(languageCode, true, null, null), listeners); } /** * @deprecated use {@link #SDCrossValidator(String, TrainingParameters, * SentenceDetectorFactory, SentenceDetectorEvaluationMonitor...)} * instead and pass in a TrainingParameters object. */ public SDCrossValidator(String languageCode) { this(languageCode, ModelUtil.createDefaultTrainingParameters()); } /** * Starts the evaluation. * * @param samples * the data to train and test * @param nFolds * number of folds * * @throws IOException */ public void evaluate(ObjectStream<SentenceSample> samples, int nFolds) throws IOException { CrossValidationPartitioner<SentenceSample> partitioner = new CrossValidationPartitioner<>(samples, nFolds); while (partitioner.hasNext()) { CrossValidationPartitioner.TrainingSampleStream<SentenceSample> trainingSampleStream = partitioner.next(); SentenceModel model; model = SentenceDetectorME.train(languageCode, trainingSampleStream, sdFactory, params); // do testing SentenceDetectorEvaluator evaluator = new SentenceDetectorEvaluator( new SentenceDetectorME(model), listeners); evaluator.evaluate(trainingSampleStream.getTestSampleStream()); fmeasure.mergeInto(evaluator.getFMeasure()); } } public FMeasure getFMeasure() { return fmeasure; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SDEventStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import opennlp.tools.ml.model.Event; import opennlp.tools.util.AbstractEventStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; public class SDEventStream extends AbstractEventStream<SentenceSample> { private SDContextGenerator cg; private EndOfSentenceScanner scanner; /** * Initializes the current instance. * * @param samples */ public SDEventStream(ObjectStream<SentenceSample> samples, SDContextGenerator cg, EndOfSentenceScanner scanner) { super(samples); this.cg = cg; this.scanner = scanner; } @Override protected Iterator<Event> createEvents(SentenceSample sample) { Collection<Event> events = new ArrayList<>(); for (Span sentenceSpan : sample.getSentences()) { String sentenceString = sentenceSpan.getCoveredText(sample.getDocument()).toString(); for (Iterator<Integer> it = scanner.getPositions( sentenceString).iterator(); it.hasNext();) { int candidate = it.next(); String type = SentenceDetectorME.NO_SPLIT; if (!it.hasNext()) { type = SentenceDetectorME.SPLIT; } events.add(new Event(type, cg.getContext(sample.getDocument(), sentenceSpan.getStart() + candidate))); } } return events.iterator(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SentenceDetector.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import opennlp.tools.util.Span; /** * The interface for sentence detectors, which find the sentence boundaries in * a text. */ public interface SentenceDetector { /** * Sentence detect a string. * * @param s The string to be sentence detected. * @return The String[] with the individual sentences as the array * elements. */ String[] sentDetect(String s); /** * Sentence detect a string. * * @param s The string to be sentence detected. * * @return The Span[] with the spans (offsets into s) for each * detected sentence as the individuals array elements. */ Span[] sentPosDetect(String s); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SentenceDetectorEvaluationMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import opennlp.tools.util.eval.EvaluationMonitor; public interface SentenceDetectorEvaluationMonitor extends EvaluationMonitor<SentenceSample> { }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SentenceDetectorEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import opennlp.tools.util.Span; import opennlp.tools.util.eval.Evaluator; import opennlp.tools.util.eval.FMeasure; /** * The {@link SentenceDetectorEvaluator} measures the performance of * the given {@link SentenceDetector} with the provided reference * {@link SentenceSample}s. * * @see Evaluator * @see SentenceDetector * @see SentenceSample */ public class SentenceDetectorEvaluator extends Evaluator<SentenceSample> { private FMeasure fmeasure = new FMeasure(); /** * The {@link SentenceDetector} used to predict sentences. */ private SentenceDetector sentenceDetector; /** * Initializes the current instance. * * @param sentenceDetector * @param listeners evaluation sample listeners */ public SentenceDetectorEvaluator(SentenceDetector sentenceDetector, SentenceDetectorEvaluationMonitor... listeners) { super(listeners); this.sentenceDetector = sentenceDetector; } private Span[] trimSpans(String document, Span[] spans) { Span[] trimedSpans = new Span[spans.length]; for (int i = 0; i < spans.length; i++) { trimedSpans[i] = spans[i].trim(document); } return trimedSpans; } @Override protected SentenceSample processSample(SentenceSample sample) { Span[] predictions = trimSpans(sample.getDocument(), sentenceDetector.sentPosDetect(sample.getDocument())); Span[] references = trimSpans(sample.getDocument(), sample.getSentences()); fmeasure.updateScores(references, predictions); return new SentenceSample(sample.getDocument(), predictions); } public FMeasure getFMeasure() { return fmeasure; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SentenceDetectorFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.util.Collections; import java.util.Map; import java.util.Set; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.sentdetect.lang.Factory; import opennlp.tools.util.BaseToolFactory; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.ext.ExtensionLoader; /** * The factory that provides SentenceDetecor default implementations and * resources */ public class SentenceDetectorFactory extends BaseToolFactory { private String languageCode; private char[] eosCharacters; private Dictionary abbreviationDictionary; private Boolean useTokenEnd = null; private static final String ABBREVIATIONS_ENTRY_NAME = "abbreviations.dictionary"; private static final String EOS_CHARACTERS_PROPERTY = "eosCharacters"; private static final String TOKEN_END_PROPERTY = "useTokenEnd"; /** * Creates a {@link SentenceDetectorFactory} that provides the default * implementation of the resources. */ public SentenceDetectorFactory() { } /** * Creates a {@link SentenceDetectorFactory}. Use this constructor to * programmatically create a factory. * * @param languageCode * @param abbreviationDictionary * @param eosCharacters */ public SentenceDetectorFactory(String languageCode, boolean useTokenEnd, Dictionary abbreviationDictionary, char[] eosCharacters) { this.init(languageCode, useTokenEnd, abbreviationDictionary, eosCharacters); } protected void init(String languageCode, boolean useTokenEnd, Dictionary abbreviationDictionary, char[] eosCharacters) { this.languageCode = languageCode; this.useTokenEnd = useTokenEnd; this.eosCharacters = eosCharacters; this.abbreviationDictionary = abbreviationDictionary; } @Override public void validateArtifactMap() throws InvalidFormatException { if (this.artifactProvider.getManifestProperty(TOKEN_END_PROPERTY) == null) throw new InvalidFormatException(TOKEN_END_PROPERTY + " is a mandatory property!"); Object abbreviationsEntry = this.artifactProvider.getArtifact(ABBREVIATIONS_ENTRY_NAME); if (abbreviationsEntry != null && !(abbreviationsEntry instanceof Dictionary)) { throw new InvalidFormatException( "Abbreviations dictionary '" + abbreviationsEntry + "' has wrong type, needs to be of type Dictionary!"); } } @Override public Map<String, Object> createArtifactMap() { Map<String, Object> artifactMap = super.createArtifactMap(); // Abbreviations are optional if (abbreviationDictionary != null) artifactMap.put(ABBREVIATIONS_ENTRY_NAME, abbreviationDictionary); return artifactMap; } @Override public Map<String, String> createManifestEntries() { Map<String, String> manifestEntries = super.createManifestEntries(); manifestEntries.put(TOKEN_END_PROPERTY, Boolean.toString(isUseTokenEnd())); // EOS characters are optional if (getEOSCharacters() != null) manifestEntries.put(EOS_CHARACTERS_PROPERTY, eosCharArrayToString(getEOSCharacters())); return manifestEntries; } public static SentenceDetectorFactory create(String subclassName, String languageCode, boolean useTokenEnd, Dictionary abbreviationDictionary, char[] eosCharacters) throws InvalidFormatException { if (subclassName == null) { // will create the default factory return new SentenceDetectorFactory(languageCode, useTokenEnd, abbreviationDictionary, eosCharacters); } try { SentenceDetectorFactory theFactory = ExtensionLoader .instantiateExtension(SentenceDetectorFactory.class, subclassName); theFactory.init(languageCode, useTokenEnd, abbreviationDictionary, eosCharacters); return theFactory; } catch (Exception e) { String msg = "Could not instantiate the " + subclassName + ". The initialization throw an exception."; System.err.println(msg); e.printStackTrace(); throw new InvalidFormatException(msg, e); } } public char[] getEOSCharacters() { if (this.eosCharacters == null) { if (artifactProvider != null) { String prop = this.artifactProvider .getManifestProperty(EOS_CHARACTERS_PROPERTY); if (prop != null) { this.eosCharacters = eosStringToCharArray(prop); } } else { // get from language dependent factory Factory f = new Factory(); this.eosCharacters = f.getEOSCharacters(languageCode); } } return this.eosCharacters; } public boolean isUseTokenEnd() { if (this.useTokenEnd == null && artifactProvider != null) { this.useTokenEnd = Boolean.valueOf(artifactProvider .getManifestProperty(TOKEN_END_PROPERTY)); } return this.useTokenEnd; } public Dictionary getAbbreviationDictionary() { if (this.abbreviationDictionary == null && artifactProvider != null) { this.abbreviationDictionary = artifactProvider .getArtifact(ABBREVIATIONS_ENTRY_NAME); } return this.abbreviationDictionary; } public String getLanguageCode() { if (this.languageCode == null && artifactProvider != null) { this.languageCode = this.artifactProvider.getLanguage(); } return this.languageCode; } public EndOfSentenceScanner getEndOfSentenceScanner() { Factory f = new Factory(); char[] eosChars = getEOSCharacters(); if (eosChars != null && eosChars.length > 0) { return f.createEndOfSentenceScanner(eosChars); } else { return f.createEndOfSentenceScanner(this.languageCode); } } public SDContextGenerator getSDContextGenerator() { Factory f = new Factory(); char[] eosChars = getEOSCharacters(); Set<String> abbs; Dictionary abbDict = getAbbreviationDictionary(); if (abbDict != null) { abbs = abbDict.asStringSet(); } else { abbs = Collections.emptySet(); } if (eosChars != null && eosChars.length > 0) { return f.createSentenceContextGenerator(abbs, eosChars); } else { return f.createSentenceContextGenerator(this.languageCode, abbs); } } private String eosCharArrayToString(char[] eosCharacters) { return String.valueOf(eosCharacters); } private char[] eosStringToCharArray(String eosCharacters) { return eosCharacters.toCharArray(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SentenceDetectorME.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.EventTrainer; import opennlp.tools.ml.TrainerFactory; import opennlp.tools.ml.model.Event; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.sentdetect.lang.Factory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; import opennlp.tools.util.StringUtil; import opennlp.tools.util.TrainingParameters; import opennlp.tools.util.model.ModelUtil; /** * A sentence detector for splitting up raw text into sentences. * <p> * A maximum entropy model is used to evaluate end-of-sentence characters in a * string to determine if they signify the end of a sentence. */ public class SentenceDetectorME implements SentenceDetector { /** * Constant indicates a sentence split. */ public static final String SPLIT = "s"; /** * Constant indicates no sentence split. */ public static final String NO_SPLIT = "n"; /** * The maximum entropy model to use to evaluate contexts. */ private MaxentModel model; /** * The feature context generator. */ private final SDContextGenerator cgen; /** * The {@link EndOfSentenceScanner} to use when scanning for end of sentence offsets. */ private final EndOfSentenceScanner scanner; /** * The list of probabilities associated with each decision. */ private List<Double> sentProbs = new ArrayList<>(); protected boolean useTokenEnd; /** * Initializes the current instance. * * @param model the {@link SentenceModel} */ public SentenceDetectorME(SentenceModel model) { SentenceDetectorFactory sdFactory = model.getFactory(); this.model = model.getMaxentModel(); cgen = sdFactory.getSDContextGenerator(); scanner = sdFactory.getEndOfSentenceScanner(); useTokenEnd = sdFactory.isUseTokenEnd(); } /** * @deprecated Use a {@link SentenceDetectorFactory} to extend * SentenceDetector functionality. */ public SentenceDetectorME(SentenceModel model, Factory factory) { this.model = model.getMaxentModel(); // if the model has custom EOS characters set, use this to get the context // generator and the EOS scanner; otherwise use language-specific defaults char[] customEOSCharacters = model.getEosCharacters(); if (customEOSCharacters == null) { cgen = factory.createSentenceContextGenerator(model.getLanguage(), getAbbreviations(model.getAbbreviations())); scanner = factory.createEndOfSentenceScanner(model.getLanguage()); } else { cgen = factory.createSentenceContextGenerator( getAbbreviations(model.getAbbreviations()), customEOSCharacters); scanner = factory.createEndOfSentenceScanner(customEOSCharacters); } useTokenEnd = model.useTokenEnd(); } private static Set<String> getAbbreviations(Dictionary abbreviations) { if (abbreviations == null) { return Collections.emptySet(); } return abbreviations.asStringSet(); } /** * Detect sentences in a String. * * @param s The string to be processed. * * @return A string array containing individual sentences as elements. */ public String[] sentDetect(String s) { Span[] spans = sentPosDetect(s); String[] sentences; if (spans.length != 0) { sentences = new String[spans.length]; for (int si = 0; si < spans.length; si++) { sentences[si] = spans[si].getCoveredText(s).toString(); } } else { sentences = new String[] {}; } return sentences; } private int getFirstWS(String s, int pos) { while (pos < s.length() && !StringUtil.isWhitespace(s.charAt(pos))) pos++; return pos; } private int getFirstNonWS(String s, int pos) { while (pos < s.length() && StringUtil.isWhitespace(s.charAt(pos))) pos++; return pos; } /** * Detect the position of the first words of sentences in a String. * * @param s The string to be processed. * @return A integer array containing the positions of the end index of * every sentence * */ public Span[] sentPosDetect(String s) { sentProbs.clear(); StringBuffer sb = new StringBuffer(s); List<Integer> enders = scanner.getPositions(s); List<Integer> positions = new ArrayList<>(enders.size()); for (int i = 0, end = enders.size(), index = 0; i < end; i++) { int cint = enders.get(i); // skip over the leading parts of non-token final delimiters int fws = getFirstWS(s,cint + 1); if (i + 1 < end && enders.get(i + 1) < fws) { continue; } if (positions.size() > 0 && cint < positions.get(positions.size() - 1)) continue; double[] probs = model.eval(cgen.getContext(sb, cint)); String bestOutcome = model.getBestOutcome(probs); if (bestOutcome.equals(SPLIT) && isAcceptableBreak(s, index, cint)) { if (index != cint) { if (useTokenEnd) { positions.add(getFirstNonWS(s, getFirstWS(s,cint + 1))); } else { positions.add(getFirstNonWS(s, cint + 1)); } sentProbs.add(probs[model.getIndex(bestOutcome)]); } index = cint + 1; } } int[] starts = new int[positions.size()]; for (int i = 0; i < starts.length; i++) { starts[i] = positions.get(i); } // string does not contain sentence end positions if (starts.length == 0) { // remove leading and trailing whitespace int start = 0; int end = s.length(); while (start < s.length() && StringUtil.isWhitespace(s.charAt(start))) start++; while (end > 0 && StringUtil.isWhitespace(s.charAt(end - 1))) end--; if (end - start > 0) { sentProbs.add(1d); return new Span[] {new Span(start, end)}; } else return new Span[0]; } // Convert the sentence end indexes to spans boolean leftover = starts[starts.length - 1] != s.length(); Span[] spans = new Span[leftover ? starts.length + 1 : starts.length]; for (int si = 0; si < starts.length; si++) { int start; if (si == 0) { start = 0; } else { start = starts[si - 1]; } // A span might contain only white spaces, in this case the length of // the span will be zero after trimming and should be ignored. Span span = new Span(start, starts[si]).trim(s); if (span.length() > 0) { spans[si] = span; } else { sentProbs.remove(si); } } if (leftover) { Span span = new Span(starts[starts.length - 1], s.length()).trim(s); if (span.length() > 0) { spans[spans.length - 1] = span; sentProbs.add(1d); } } /* * set the prob for each span */ for (int i = 0; i < spans.length; i++) { double prob = sentProbs.get(i); spans[i] = new Span(spans[i], prob); } return spans; } /** * Returns the probabilities associated with the most recent * calls to sentDetect(). * * @return probability for each sentence returned for the most recent * call to sentDetect. If not applicable an empty array is returned. */ public double[] getSentenceProbabilities() { double[] sentProbArray = new double[sentProbs.size()]; for (int i = 0; i < sentProbArray.length; i++) { sentProbArray[i] = sentProbs.get(i); } return sentProbArray; } /** * Allows subclasses to check an overzealous (read: poorly * trained) model from flagging obvious non-breaks as breaks based * on some boolean determination of a break's acceptability. * * <p>The implementation here always returns true, which means * that the MaxentModel's outcome is taken as is.</p> * * @param s the string in which the break occurred. * @param fromIndex the start of the segment currently being evaluated * @param candidateIndex the index of the candidate sentence ending * @return true if the break is acceptable */ protected boolean isAcceptableBreak(String s, int fromIndex, int candidateIndex) { return true; } /** * @deprecated Use * {@link #train(String, ObjectStream, SentenceDetectorFactory, TrainingParameters)} * and pass in af {@link SentenceDetectorFactory}. */ public static SentenceModel train(String languageCode, ObjectStream<SentenceSample> samples, boolean useTokenEnd, Dictionary abbreviations, TrainingParameters mlParams) throws IOException { SentenceDetectorFactory sdFactory = new SentenceDetectorFactory( languageCode, useTokenEnd, abbreviations, null); return train(languageCode, samples, sdFactory, mlParams); } public static SentenceModel train(String languageCode, ObjectStream<SentenceSample> samples, SentenceDetectorFactory sdFactory, TrainingParameters mlParams) throws IOException { Map<String, String> manifestInfoEntries = new HashMap<>(); // TODO: Fix the EventStream to throw exceptions when training goes wrong ObjectStream<Event> eventStream = new SDEventStream(samples, sdFactory.getSDContextGenerator(), sdFactory.getEndOfSentenceScanner()); EventTrainer trainer = TrainerFactory.getEventTrainer(mlParams, manifestInfoEntries); MaxentModel sentModel = trainer.train(eventStream); return new SentenceModel(languageCode, sentModel, manifestInfoEntries, sdFactory); } /** * @deprecated Use * {@link #train(String, ObjectStream, SentenceDetectorFactory, TrainingParameters)} * and pass in af {@link SentenceDetectorFactory}. */ @Deprecated public static SentenceModel train(String languageCode, ObjectStream<SentenceSample> samples, boolean useTokenEnd, Dictionary abbreviations) throws IOException { return train(languageCode, samples, useTokenEnd, abbreviations, ModelUtil.createDefaultTrainingParameters()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SentenceModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.nio.file.Path; import java.util.Map; import opennlp.tools.dictionary.Dictionary; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.util.BaseToolFactory; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.model.BaseModel; import opennlp.tools.util.model.ModelUtil; /** * The {@link SentenceModel} is the model used * by a learnable {@link SentenceDetector}. * * @see SentenceDetectorME */ public class SentenceModel extends BaseModel { private static final String COMPONENT_NAME = "SentenceDetectorME"; private static final String MAXENT_MODEL_ENTRY_NAME = "sent.model"; public SentenceModel(String languageCode, MaxentModel sentModel, Map<String, String> manifestInfoEntries, SentenceDetectorFactory sdFactory) { super(COMPONENT_NAME, languageCode, manifestInfoEntries, sdFactory); artifactMap.put(MAXENT_MODEL_ENTRY_NAME, sentModel); checkArtifactMap(); } /** * TODO: was added in 1.5.3 -&gt; remove * @deprecated Use * {@link #SentenceModel(String, MaxentModel, Map, SentenceDetectorFactory)} * instead and pass in a {@link SentenceDetectorFactory} */ public SentenceModel(String languageCode, MaxentModel sentModel, boolean useTokenEnd, Dictionary abbreviations, char[] eosCharacters, Map<String, String> manifestInfoEntries) { this(languageCode, sentModel, manifestInfoEntries, new SentenceDetectorFactory(languageCode, useTokenEnd, abbreviations, eosCharacters)); } /** * TODO: was added in 1.5.3 -&gt; remove * * @deprecated Use * {@link #SentenceModel(String, MaxentModel, Map, SentenceDetectorFactory)} * instead and pass in a {@link SentenceDetectorFactory} */ public SentenceModel(String languageCode, MaxentModel sentModel, boolean useTokenEnd, Dictionary abbreviations, char[] eosCharacters) { this(languageCode, sentModel, useTokenEnd, abbreviations, eosCharacters, null); } public SentenceModel(String languageCode, MaxentModel sentModel, boolean useTokenEnd, Dictionary abbreviations, Map<String, String> manifestInfoEntries) { this(languageCode, sentModel, useTokenEnd, abbreviations, null, manifestInfoEntries); } public SentenceModel(String languageCode, MaxentModel sentModel, boolean useTokenEnd, Dictionary abbreviations) { this (languageCode, sentModel, useTokenEnd, abbreviations, null, null); } public SentenceModel(InputStream in) throws IOException { super(COMPONENT_NAME, in); } public SentenceModel(File modelFile) throws IOException { super(COMPONENT_NAME, modelFile); } public SentenceModel(Path modelPath) throws IOException { this(modelPath.toFile()); } public SentenceModel(URL modelURL) throws IOException { super(COMPONENT_NAME, modelURL); } @Override protected void validateArtifactMap() throws InvalidFormatException { super.validateArtifactMap(); if (!(artifactMap.get(MAXENT_MODEL_ENTRY_NAME) instanceof MaxentModel)) { throw new InvalidFormatException("Unable to find " + MAXENT_MODEL_ENTRY_NAME + " maxent model!"); } if (!ModelUtil.validateOutcomes(getMaxentModel(), SentenceDetectorME.SPLIT, SentenceDetectorME.NO_SPLIT)) { throw new InvalidFormatException("The maxent model is not compatible " + "with the sentence detector!"); } } public SentenceDetectorFactory getFactory() { return (SentenceDetectorFactory) this.toolFactory; } @Override protected Class<? extends BaseToolFactory> getDefaultFactory() { return SentenceDetectorFactory.class; } public MaxentModel getMaxentModel() { return (MaxentModel) artifactMap.get(MAXENT_MODEL_ENTRY_NAME); } public Dictionary getAbbreviations() { if (getFactory() != null) { return getFactory().getAbbreviationDictionary(); } return null; } public boolean useTokenEnd() { return getFactory() == null || getFactory().isUseTokenEnd(); } public char[] getEosCharacters() { if (getFactory() != null) { return getFactory().getEOSCharacters(); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SentenceSample.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; import opennlp.tools.tokenize.Detokenizer; import opennlp.tools.util.Span; /** * A {@link SentenceSample} contains a document with * begin indexes of the individual sentences. */ public class SentenceSample implements Serializable { private final String document; private final List<Span> sentences; /** * Initializes the current instance. * * @param document * @param sentences */ public SentenceSample(CharSequence document, Span... sentences) { this.document = document.toString(); this.sentences = Collections.unmodifiableList(new ArrayList<>(Arrays.asList(sentences))); // validate that all spans are inside the document text for (Span sentence : sentences) { if (sentence.getEnd() > document.length()) { throw new IllegalArgumentException( String.format("Sentence span is outside of document text [len %d] and span %s", document.length(), sentence)); } } } public SentenceSample(Detokenizer detokenizer, String[][] sentences) { List<Span> spans = new ArrayList<>(sentences.length); StringBuilder documentBuilder = new StringBuilder(); for (String[] sentenceTokens : sentences) { String sampleSentence = detokenizer.detokenize(sentenceTokens, null); int beginIndex = documentBuilder.length(); documentBuilder.append(sampleSentence); spans.add(new Span(beginIndex, documentBuilder.length())); } document = documentBuilder.toString(); this.sentences = Collections.unmodifiableList(spans); } /** * Retrieves the document. * * @return the document */ public String getDocument() { return document; } /** * Retrieves the sentences. * * @return the begin indexes of the sentences in the document. */ public Span[] getSentences() { return sentences.toArray(new Span[sentences.size()]); } // TODO: This one must output the tags! @Override public String toString() { StringBuilder documentBuilder = new StringBuilder(); for (Span sentSpan : sentences) { documentBuilder.append(sentSpan.getCoveredText(document).toString() .replace("\r", "<CR>").replace("\n", "<LF>")); documentBuilder.append("\n"); } return documentBuilder.toString(); } @Override public int hashCode() { return Objects.hash(getDocument(), Arrays.hashCode(getSentences())); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof SentenceSample) { SentenceSample a = (SentenceSample) obj; return getDocument().equals(a.getDocument()) && Arrays.equals(getSentences(), a.getSentences()); } return false; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/SentenceSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect; import java.io.IOException; import java.util.LinkedList; import java.util.List; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; /** * This class is a stream filter which reads a sentence by line samples from * a <code>Reader</code> and converts them into {@link SentenceSample} objects. * An empty line indicates the begin of a new document. */ public class SentenceSampleStream extends FilterObjectStream<String, SentenceSample> { public SentenceSampleStream(ObjectStream<String> sentences) { super(new EmptyLinePreprocessorStream(sentences)); } public static String replaceNewLineEscapeTags(String s) { return s.replace("<LF>", "\n").replace("<CR>", "\r"); } public SentenceSample read() throws IOException { StringBuilder sentencesString = new StringBuilder(); List<Span> sentenceSpans = new LinkedList<>(); String sentence; while ((sentence = samples.read()) != null && !sentence.equals("")) { int begin = sentencesString.length(); sentence = sentence.trim(); sentence = replaceNewLineEscapeTags(sentence); sentencesString.append(sentence); int end = sentencesString.length(); sentenceSpans.add(new Span(begin, end)); sentencesString.append(' '); } if (sentenceSpans.size() > 0) { return new SentenceSample(sentencesString.toString(), sentenceSpans.toArray(new Span[sentenceSpans.size()])); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package related to identifying sentece boundries. */ package opennlp.tools.sentdetect;
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/lang/Factory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect.lang; import java.util.Collections; import java.util.Set; import opennlp.tools.sentdetect.DefaultEndOfSentenceScanner; import opennlp.tools.sentdetect.DefaultSDContextGenerator; import opennlp.tools.sentdetect.EndOfSentenceScanner; import opennlp.tools.sentdetect.SDContextGenerator; import opennlp.tools.sentdetect.lang.th.SentenceContextGenerator; public class Factory { public static final char[] ptEosCharacters = new char[] { '.', '?', '!', ';', ':', '(', ')', '«', '»', '\'', '"' }; public static final char[] defaultEosCharacters = new char[] { '.', '!', '?' }; public static final char[] thEosCharacters = new char[] { ' ','\n' }; public static final char[] jpEosCharacters = new char[] {'。', '!', '?'}; public EndOfSentenceScanner createEndOfSentenceScanner(String languageCode) { return new DefaultEndOfSentenceScanner(getEOSCharacters(languageCode)); } public EndOfSentenceScanner createEndOfSentenceScanner( char[] customEOSCharacters) { return new DefaultEndOfSentenceScanner(customEOSCharacters); } public SDContextGenerator createSentenceContextGenerator(String languageCode, Set<String> abbreviations) { if ("th".equals(languageCode) || "tha".equals(languageCode)) { return new SentenceContextGenerator(); } else if ("pt".equals(languageCode) || "por".equals(languageCode)) { return new DefaultSDContextGenerator(abbreviations, ptEosCharacters); } return new DefaultSDContextGenerator(abbreviations, defaultEosCharacters); } public SDContextGenerator createSentenceContextGenerator( Set<String> abbreviations, char[] customEOSCharacters) { return new DefaultSDContextGenerator(abbreviations, customEOSCharacters); } public SDContextGenerator createSentenceContextGenerator(String languageCode) { return createSentenceContextGenerator(languageCode, Collections.emptySet()); } public char[] getEOSCharacters(String languageCode) { if ("th".equals(languageCode) || "tha".equals(languageCode)) { return thEosCharacters; } else if ("pt".equals(languageCode) || "por".equals(languageCode)) { return ptEosCharacters; } else if ("jp".equals(languageCode) || "jpn".equals(languageCode)) { return jpEosCharacters; } return defaultEosCharacters; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/lang
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/sentdetect/lang/th/SentenceContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.sentdetect.lang.th; import opennlp.tools.sentdetect.DefaultSDContextGenerator; /** * Creates contexts/features for end-of-sentence detection in Thai text. */ public class SentenceContextGenerator extends DefaultSDContextGenerator { public static final char[] eosCharacters = {' ','\n'}; public SentenceContextGenerator() { super(eosCharacters); } @Override protected void collectFeatures(String prefix, String suffix, String previous, String next) { buf.append("p="); buf.append(prefix); collectFeats.add(buf.toString()); buf.setLength(0); buf.append("s="); buf.append(suffix); collectFeats.add(buf.toString()); buf.setLength(0); collectFeats.add("p1=" + prefix.substring(Math.max(prefix.length() - 1,0))); collectFeats.add("p2=" + prefix.substring(Math.max(prefix.length() - 2,0))); collectFeats.add("p3=" + prefix.substring(Math.max(prefix.length() - 3,0))); collectFeats.add("p4=" + prefix.substring(Math.max(prefix.length() - 4,0))); collectFeats.add("p5=" + prefix.substring(Math.max(prefix.length() - 5,0))); collectFeats.add("p6=" + prefix.substring(Math.max(prefix.length() - 6,0))); collectFeats.add("p7=" + prefix.substring(Math.max(prefix.length() - 7,0))); collectFeats.add("n1=" + suffix.substring(0,Math.min(1, suffix.length()))); collectFeats.add("n2=" + suffix.substring(0,Math.min(2, suffix.length()))); collectFeats.add("n3=" + suffix.substring(0,Math.min(3, suffix.length()))); collectFeats.add("n4=" + suffix.substring(0,Math.min(4, suffix.length()))); collectFeats.add("n5=" + suffix.substring(0,Math.min(5, suffix.length()))); collectFeats.add("n6=" + suffix.substring(0,Math.min(6, suffix.length()))); collectFeats.add("n7=" + suffix.substring(0,Math.min(7, suffix.length()))); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/stemmer/PorterStemmer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Porter stemmer in Java. The original paper is in Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14, no. 3, pp 130-137, See also http://www.tartarus.org/~martin/PorterStemmer/index.html Bug 1 (reported by Gonzalo Parra 16/10/99) fixed as marked below. Tthe words 'aed', 'eed', 'oed' leave k at 'a' for step 3, and b[k-1] is then out outside the bounds of b. Similarly, Bug 2 (reported by Steve Dyrdahl 22/2/00) fixed as marked below. 'ion' by itself leaves j = -1 in the test for 'ion' in step 5, and b[j] is then outside the bounds of b. Release 3. [ This version is derived from Release 3, modified by Brian Goetz to optimize for fewer object creations. ] */ package opennlp.tools.stemmer; /** * * Stemmer, implementing the Porter Stemming Algorithm * * The Stemmer class transforms a word into its root form. The input * word can be provided a character at time (by calling add()), or at once * by calling one of the various stem(something) methods. */ // CHECKSTYLE:OFF public class PorterStemmer implements Stemmer { private char[] b; private int i, /* offset into b */ j, k, k0; private boolean dirty = false; private static final int INC = 50; public PorterStemmer() { b = new char[INC]; i = 0; } /** * reset() resets the stemmer so it can stem another word. If you invoke * the stemmer by calling add(char) and then stem(), you must call reset() * before starting another word. */ public void reset() { i = 0; dirty = false; } /** * Add a character to the word being stemmed. When you are finished * adding characters, you can call stem(void) to process the word. */ public void add(char ch) { if (b.length == i) { char[] new_b = new char[i+INC]; System.arraycopy(b, 0, new_b, 0, i); { b = new_b; } } b[i++] = ch; } /** * After a word has been stemmed, it can be retrieved by toString(), * or a reference to the internal buffer can be retrieved by getResultBuffer * and getResultLength (which is generally more efficient.) */ @Override public String toString() { return new String(b,0,i); } /** * Returns the length of the word resulting from the stemming process. */ public int getResultLength() { return i; } /** * Returns a reference to a character buffer containing the results of * the stemming process. You also need to consult getResultLength() * to determine the length of the result. */ public char[] getResultBuffer() { return b; } /* cons(i) is true <=> b[i] is a consonant. */ private boolean cons(int i) { switch (b[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': return false; case 'y': return (i == k0) || !cons(i - 1); default: return true; } } /* m() measures the number of consonant sequences between k0 and j. if c is a consonant sequence and v a vowel sequence, and <..> indicates arbitrary presence, <c><v> gives 0 <c>vc<v> gives 1 <c>vcvc<v> gives 2 <c>vcvcvc<v> gives 3 .... */ private int m() { int n = 0; int i = k0; while(true) { if (i > j) return n; if (! cons(i)) break; i++; } i++; while(true) { while(true) { if (i > j) return n; if (cons(i)) break; i++; } i++; n++; while(true) { if (i > j) return n; if (! cons(i)) break; i++; } i++; } } /* vowelinstem() is true <=> k0,...j contains a vowel */ private boolean vowelinstem() { int i; for (i = k0; i <= j; i++) if (! cons(i)) return true; return false; } /* doublec(j) is true <=> j,(j-1) contain a double consonant. */ private boolean doublec(int j) { return j >= k0 + 1 && b[j] == b[j - 1] && cons(j); } /* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant and also if the second c is not w,x or y. this is used when trying to restore an e at the end of a short word. e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray. */ private boolean cvc(int i) { if (i < k0+2 || !cons(i) || cons(i-1) || !cons(i-2)) return false; else { int ch = b[i]; if (ch == 'w' || ch == 'x' || ch == 'y') return false; } return true; } private boolean ends(String s) { int l = s.length(); int o = k-l+1; if (o < k0) return false; for (int i = 0; i < l; i++) if (b[o+i] != s.charAt(i)) return false; j = k-l; return true; } /* setto(s) sets (j+1),...k to the characters in the string s, readjusting k. */ void setto(String s) { int l = s.length(); int o = j+1; for (int i = 0; i < l; i++) b[o+i] = s.charAt(i); k = j+l; dirty = true; } /* r(s) is used further down. */ void r(String s) { if (m() > 0) setto(s); } /* step1() gets rid of plurals and -ed or -ing. e.g. caresses -> caress ponies -> poni ties -> ti caress -> caress cats -> cat feed -> feed agreed -> agree disabled -> disable matting -> mat mating -> mate meeting -> meet milling -> mill messing -> mess meetings -> meet */ private void step1() { if (b[k] == 's') { if (ends("sses")) k -= 2; else if (ends("ies")) setto("i"); else if (b[k-1] != 's') k--; } if (ends("eed")) { if (m() > 0) k--; } else if ((ends("ed") || ends("ing")) && vowelinstem()) { k = j; if (ends("at")) setto("ate"); else if (ends("bl")) setto("ble"); else if (ends("iz")) setto("ize"); else if (doublec(k)) { int ch = b[k--]; if (ch == 'l' || ch == 's' || ch == 'z') k++; } else if (m() == 1 && cvc(k)) setto("e"); } } /* step2() turns terminal y to i when there is another vowel in the stem. */ private void step2() { if (ends("y") && vowelinstem()) { b[k] = 'i'; dirty = true; } } /* step3() maps double suffices to single ones. so -ization ( = -ize plus -ation) maps to -ize etc. note that the string before the suffix must give m() > 0. */ private void step3() { if (k == k0) return; /* For Bug 1 */ switch (b[k-1]) { case 'a': if (ends("ational")) { r("ate"); break; } if (ends("tional")) { r("tion"); break; } break; case 'c': if (ends("enci")) { r("ence"); break; } if (ends("anci")) { r("ance"); break; } break; case 'e': if (ends("izer")) { r("ize"); break; } break; case 'l': if (ends("bli")) { r("ble"); break; } if (ends("alli")) { r("al"); break; } if (ends("entli")) { r("ent"); break; } if (ends("eli")) { r("e"); break; } if (ends("ousli")) { r("ous"); break; } break; case 'o': if (ends("ization")) { r("ize"); break; } if (ends("ation")) { r("ate"); break; } if (ends("ator")) { r("ate"); break; } break; case 's': if (ends("alism")) { r("al"); break; } if (ends("iveness")) { r("ive"); break; } if (ends("fulness")) { r("ful"); break; } if (ends("ousness")) { r("ous"); break; } break; case 't': if (ends("aliti")) { r("al"); break; } if (ends("iviti")) { r("ive"); break; } if (ends("biliti")) { r("ble"); break; } break; case 'g': if (ends("logi")) { r("log"); break; } } } /* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */ private void step4() { switch (b[k]) { case 'e': if (ends("icate")) { r("ic"); break; } if (ends("ative")) { r(""); break; } if (ends("alize")) { r("al"); break; } break; case 'i': if (ends("iciti")) { r("ic"); break; } break; case 'l': if (ends("ical")) { r("ic"); break; } if (ends("ful")) { r(""); break; } break; case 's': if (ends("ness")) { r(""); break; } break; } } /* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */ private void step5() { if (k == k0) return; /* for Bug 1 */ switch (b[k-1]) { case 'a': if (ends("al")) break; return; case 'c': if (ends("ance")) break; if (ends("ence")) break; return; case 'e': if (ends("er")) break; return; case 'i': if (ends("ic")) break; return; case 'l': if (ends("able")) break; if (ends("ible")) break; return; case 'n': if (ends("ant")) break; if (ends("ement")) break; if (ends("ment")) break; /* element etc. not stripped before the m */ if (ends("ent")) break; return; case 'o': if (ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't')) break; /* j >= 0 fixes Bug 2 */ if (ends("ou")) break; return; /* takes care of -ous */ case 's': if (ends("ism")) break; return; case 't': if (ends("ate")) break; if (ends("iti")) break; return; case 'u': if (ends("ous")) break; return; case 'v': if (ends("ive")) break; return; case 'z': if (ends("ize")) break; return; default: return; } if (m() > 1) k = j; } /* step6() removes a final -e if m() > 1. */ private void step6() { j = k; if (b[k] == 'e') { int a = m(); if (a > 1 || a == 1 && !cvc(k-1)) k--; } if (b[k] == 'l' && doublec(k) && m() > 1) k--; } /** * Stem a word provided as a String. Returns the result as a String. */ public String stem(String s) { if (stem(s.toCharArray(), s.length())) return toString(); else return s; } /** * Stem a word provided as a CharSequence. * Returns the result as a CharSequence. */ public CharSequence stem(CharSequence word) { return stem(word.toString()); } /** Stem a word contained in a char[]. Returns true if the stemming process * resulted in a word different from the input. You can retrieve the * result with getResultLength()/getResultBuffer() or toString(). */ public boolean stem(char[] word) { return stem(word, word.length); } /** Stem a word contained in a portion of a char[] array. Returns * true if the stemming process resulted in a word different from * the input. You can retrieve the result with * getResultLength()/getResultBuffer() or toString(). */ public boolean stem(char[] wordBuffer, int offset, int wordLen) { reset(); if (b.length < wordLen) { b = new char[wordLen - offset]; } System.arraycopy(wordBuffer, offset, b, 0, wordLen); i = wordLen; return stem(0); } /** Stem a word contained in a leading portion of a char[] array. * Returns true if the stemming process resulted in a word different * from the input. You can retrieve the result with * getResultLength()/getResultBuffer() or toString(). */ public boolean stem(char[] word, int wordLen) { return stem(word, 0, wordLen); } /** Stem the word placed into the Stemmer buffer through calls to add(). * Returns true if the stemming process resulted in a word different * from the input. You can retrieve the result with * getResultLength()/getResultBuffer() or toString(). */ public boolean stem() { return stem(0); } public boolean stem(int i0) { k = i - 1; k0 = i0; if (k > k0+1) { step1(); step2(); step3(); step4(); step5(); step6(); } // Also, a word is considered dirty if we lopped off letters // Thanks to Ifigenia Vairelles for pointing this out. if (i != k+1) dirty = true; i = k+1; return dirty; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/stemmer/Stemmer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.stemmer; /** * The stemmer is reducing a word to its stem. */ public interface Stemmer { CharSequence stem(CharSequence word); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/stemmer
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/stemmer/snowball/AbstractSnowballStemmer.java
// CHECKSTYLE:OFF /* Copyright (c) 2001, Dr Martin Porter Copyright (c) 2002, Richard Boulton All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package opennlp.tools.stemmer.snowball; abstract class AbstractSnowballStemmer extends SnowballProgram { public abstract boolean stem(); }