index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADChunkSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.List; import opennlp.tools.chunker.ChunkSample; import opennlp.tools.formats.ad.ADSentenceStream.Sentence; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.Leaf; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.Node; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.TreeElement; import opennlp.tools.namefind.NameSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; import opennlp.tools.util.StringUtil; /** * Parser for Floresta Sita(c)tica Arvores Deitadas corpus, output to for the * Portuguese Chunker training. * <p> * The heuristic to extract chunks where based o paper 'A Machine Learning * Approach to Portuguese Clause Identification', (Eraldo Fernandes, Cicero * Santos and Ruy Milidiú).<br> * <p> * Data can be found on this web site:<br> * http://www.linguateca.pt/floresta/corpus.html * <p> * Information about the format:<br> * Susana Afonso. * "Árvores deitadas: Descrição do formato e das opções de análise na Floresta Sintáctica" * .<br> * 12 de Fevereiro de 2006. * http://www.linguateca.pt/documentos/Afonso2006ArvoresDeitadas.pdf * <p> * Detailed info about the NER tagset: * http://beta.visl.sdu.dk/visl/pt/info/portsymbol.html#semtags_names * <p> * <b>Note:</b> Do not use this class, internal use only! */ public class ADChunkSampleStream implements ObjectStream<ChunkSample> { protected final ObjectStream<ADSentenceStream.Sentence> adSentenceStream; private int start = -1; private int end = -1; private int index = 0; public static final String OTHER = "O"; /** * Creates a new {@link NameSample} stream from a line stream, i.e. * {@link ObjectStream}&lt;{@link String}&gt;, that could be a * {@link PlainTextByLineStream} object. * * @param lineStream * a stream of lines as {@link String} */ public ADChunkSampleStream(ObjectStream<String> lineStream) { this.adSentenceStream = new ADSentenceStream(lineStream); } public ADChunkSampleStream(InputStreamFactory in, String charsetName) throws IOException { try { this.adSentenceStream = new ADSentenceStream(new PlainTextByLineStream( in, charsetName)); } catch (UnsupportedEncodingException e) { // UTF-8 is available on all JVMs, will never happen throw new IllegalStateException(e); } } public ChunkSample read() throws IOException { Sentence paragraph; while ((paragraph = this.adSentenceStream.read()) != null) { if (end > -1 && index >= end) { // leave return null; } if (start > -1 && index < start) { index++; // skip this one } else { Node root = paragraph.getRoot(); List<String> sentence = new ArrayList<>(); List<String> tags = new ArrayList<>(); List<String> target = new ArrayList<>(); processRoot(root, sentence, tags, target); if (sentence.size() > 0) { index++; return new ChunkSample(sentence, tags, target); } } } return null; } protected void processRoot(Node root, List<String> sentence, List<String> tags, List<String> target) { if (root != null) { TreeElement[] elements = root.getElements(); for (int i = 0; i < elements.length; i++) { if (elements[i].isLeaf()) { processLeaf((Leaf) elements[i], false, OTHER, sentence, tags, target); } else { processNode((Node) elements[i], sentence, tags, target, null); } } } } private void processNode(Node node, List<String> sentence, List<String> tags, List<String> target, String inheritedTag) { String phraseTag = getChunkTag(node); boolean inherited = false; if (phraseTag.equals(OTHER) && inheritedTag != null) { phraseTag = inheritedTag; inherited = true; } TreeElement[] elements = node.getElements(); for (int i = 0; i < elements.length; i++) { if (elements[i].isLeaf()) { boolean isIntermediate = false; String tag = phraseTag; Leaf leaf = (Leaf) elements[i]; String localChunk = getChunkTag(leaf); if (localChunk != null && !tag.equals(localChunk)) { tag = localChunk; } if (isIntermediate(tags, target, tag) && (inherited || i > 0)) { isIntermediate = true; } if (!isIncludePunctuations() && leaf.getFunctionalTag() == null && ( !( i + 1 < elements.length && elements[i + 1].isLeaf() ) || !( i > 0 && elements[i - 1].isLeaf() ) ) ) { isIntermediate = false; tag = OTHER; } processLeaf(leaf, isIntermediate, tag, sentence, tags, target); } else { int before = target.size(); processNode((Node) elements[i], sentence, tags, target, phraseTag); // if the child node was of a different type we should break the chunk sequence for (int j = target.size() - 1; j >= before; j--) { if (!target.get(j).endsWith("-" + phraseTag)) { phraseTag = OTHER; break; } } } } } protected void processLeaf(Leaf leaf, boolean isIntermediate, String phraseTag, List<String> sentence, List<String> tags, List<String> target) { String chunkTag; if (leaf.getFunctionalTag() != null && phraseTag.equals(OTHER)) { phraseTag = getPhraseTagFromPosTag(leaf.getFunctionalTag()); } if (!phraseTag.equals(OTHER)) { if (isIntermediate) { chunkTag = "I-" + phraseTag; } else { chunkTag = "B-" + phraseTag; } } else { chunkTag = phraseTag; } sentence.add(leaf.getLexeme()); if (leaf.getSyntacticTag() == null) { tags.add(leaf.getLexeme()); } else { tags.add(ADChunkSampleStream.convertFuncTag(leaf.getFunctionalTag(), false)); } target.add(chunkTag); } protected String getPhraseTagFromPosTag(String functionalTag) { if (functionalTag.equals("v-fin")) { return "VP"; } else if (functionalTag.equals("n")) { return "NP"; } return OTHER; } public static String convertFuncTag(String t, boolean useCGTags) { if (useCGTags) { if ("art".equals(t) || "pron-det".equals(t) || "pron-indef".equals(t)) { t = "det"; } } return t; } protected String getChunkTag(Leaf leaf) { String tag = leaf.getSyntacticTag(); if ("P".equals(tag)) { return "VP"; } return null; } protected String getChunkTag(Node node) { String tag = node.getSyntacticTag(); String phraseTag = tag.substring(tag.lastIndexOf(":") + 1); while (phraseTag.endsWith("-")) { phraseTag = phraseTag.substring(0, phraseTag.length() - 1); } // maybe we should use only np, vp and pp, but will keep ap and advp. if (phraseTag.equals("np") || phraseTag.equals("vp") || phraseTag.equals("pp") || phraseTag.equals("ap") || phraseTag.equals("advp") || phraseTag.equals("adjp")) { phraseTag = StringUtil.toUpperCase(phraseTag); } else { phraseTag = OTHER; } return phraseTag; } public void setStart(int aStart) { this.start = aStart; } public void setEnd(int aEnd) { this.end = aEnd; } public void reset() throws IOException, UnsupportedOperationException { adSentenceStream.reset(); } public void close() throws IOException { adSentenceStream.close(); } protected boolean isIncludePunctuations() { return false; } protected boolean isIntermediate(List<String> tags, List<String> target, String phraseTag) { return target.size() > 0 && target.get(target.size() - 1).endsWith("-" + phraseTag); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADChunkSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import opennlp.tools.chunker.ChunkSample; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.ArgumentParser.OptionalParameter; import opennlp.tools.cmdline.ArgumentParser.ParameterDescription; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.formats.LanguageSampleStreamFactory; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; /** * A Factory to create a Arvores Deitadas ChunkStream from the command line * utility. * <p> * <b>Note:</b> Do not use this class, internal use only! */ public class ADChunkSampleStreamFactory extends LanguageSampleStreamFactory<ChunkSample> { interface Parameters { //all have to be repeated, because encoding is not optional, //according to the check if (encoding == null) { below (now removed) @ParameterDescription(valueName = "charsetName", description = "encoding for reading and writing text, if absent the system default is used.") Charset getEncoding(); @ParameterDescription(valueName = "sampleData", description = "data to be used, usually a file name.") File getData(); @ParameterDescription(valueName = "language", description = "language which is being processed.") String getLang(); @ParameterDescription(valueName = "start", description = "index of first sentence") @OptionalParameter Integer getStart(); @ParameterDescription(valueName = "end", description = "index of last sentence") @OptionalParameter Integer getEnd(); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(ChunkSample.class, "ad", new ADChunkSampleStreamFactory(Parameters.class)); } protected <P> ADChunkSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<ChunkSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); language = params.getLang(); InputStreamFactory sampleDataIn = CmdLineUtil.createInputStreamFactory(params.getData()); ObjectStream<String> lineStream = null; try { lineStream = new PlainTextByLineStream(sampleDataIn, params.getEncoding()); } catch (IOException ex) { CmdLineUtil.handleCreateObjectStreamError(ex); } ADChunkSampleStream sampleStream = new ADChunkSampleStream(lineStream); if (params.getStart() != null && params.getStart() > -1) { sampleStream.setStart(params.getStart()); } if (params.getEnd() != null && params.getEnd() > -1) { sampleStream.setEnd(params.getEnd()); } return sampleStream; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADNameSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.IOException; import java.io.InputStream; import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; import opennlp.tools.formats.ad.ADSentenceStream.Sentence; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.Leaf; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.Node; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.TreeElement; import opennlp.tools.namefind.NameSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; import opennlp.tools.util.Span; /** * Parser for Floresta Sita(c)tica Arvores Deitadas corpus, output to for the * Portuguese NER training. * <p> * The data contains four named entity types: Person, Organization, Group, * Place, Event, ArtProd, Abstract, Thing, Time and Numeric.<br> * <p> * Data can be found on this web site:<br> * http://www.linguateca.pt/floresta/corpus.html * <p> * Information about the format:<br> * Susana Afonso. * "Árvores deitadas: Descrição do formato e das opções de análise na Floresta Sintáctica" * .<br> * 12 de Fevereiro de 2006. * http://www.linguateca.pt/documentos/Afonso2006ArvoresDeitadas.pdf * <p> * Detailed info about the NER tagset: * http://beta.visl.sdu.dk/visl/pt/info/portsymbol.html#semtags_names * <p> * <b>Note:</b> Do not use this class, internal use only! */ public class ADNameSampleStream implements ObjectStream<NameSample> { /** * Pattern of a NER tag in Arvores Deitadas */ private static final Pattern tagPattern = Pattern.compile("<(NER:)?(.*?)>"); private static final Pattern whitespacePattern = Pattern.compile("\\s+"); private static final Pattern underlinePattern = Pattern.compile("[_]+"); private static final Pattern hyphenPattern = Pattern.compile("((\\p{L}+)-$)|(^-(\\p{L}+)(.*))|((\\p{L}+)-(\\p{L}+)(.*))"); private static final Pattern alphanumericPattern = Pattern.compile("^[\\p{L}\\p{Nd}]+$"); /** * Map to the Arvores Deitadas types to our types. It is read-only. */ private static final Map<String, String> HAREM; static { Map<String, String> harem = new HashMap<>(); final String person = "person"; harem.put("hum", person); harem.put("official", person); harem.put("member", person); final String organization = "organization"; harem.put("admin", organization); harem.put("org", organization); harem.put("inst", organization); harem.put("media", organization); harem.put("party", organization); harem.put("suborg", organization); final String group = "group"; harem.put("groupind", group); harem.put("groupofficial", group); final String place = "place"; harem.put("top", place); harem.put("civ", place); harem.put("address", place); harem.put("site", place); harem.put("virtual", place); harem.put("astro", place); final String event = "event"; harem.put("occ", event); harem.put("event", event); harem.put("history", event); final String artprod = "artprod"; harem.put("tit", artprod); harem.put("pub", artprod); harem.put("product", artprod); harem.put("V", artprod); harem.put("artwork", artprod); final String _abstract = "abstract"; harem.put("brand", _abstract); harem.put("genre", _abstract); harem.put("school", _abstract); harem.put("idea", _abstract); harem.put("plan", _abstract); harem.put("author", _abstract); harem.put("absname", _abstract); harem.put("disease", _abstract); final String thing = "thing"; harem.put("object", thing); harem.put("common", thing); harem.put("mat", thing); harem.put("class", thing); harem.put("plant", thing); harem.put("currency", thing); final String time = "time"; harem.put("date", time); harem.put("hour", time); harem.put("period", time); harem.put("cyclic", time); final String numeric = "numeric"; harem.put("quantity", numeric); harem.put("prednum", numeric); harem.put("currency", numeric); HAREM = Collections.unmodifiableMap(harem); } private final ObjectStream<ADSentenceStream.Sentence> adSentenceStream; /** * To keep the last left contraction part */ private String leftContractionPart = null; private final boolean splitHyphenatedTokens; /** * Creates a new {@link NameSample} stream from a line stream, i.e. * {@link ObjectStream}&lt;{@link String}&gt;, that could be a * {@link PlainTextByLineStream} object. * * @param lineStream * a stream of lines as {@link String} * @param splitHyphenatedTokens * if true hyphenated tokens will be separated: "carros-monstro" &gt; * "carros" "-" "monstro" */ public ADNameSampleStream(ObjectStream<String> lineStream, boolean splitHyphenatedTokens) { this.adSentenceStream = new ADSentenceStream(lineStream); this.splitHyphenatedTokens = splitHyphenatedTokens; } /** * Creates a new {@link NameSample} stream from a {@link InputStream} * * @param in * the Corpus {@link InputStream} * @param charsetName * the charset of the Arvores Deitadas Corpus * @param splitHyphenatedTokens * if true hyphenated tokens will be separated: "carros-monstro" &gt; * "carros" "-" "monstro" */ @Deprecated public ADNameSampleStream(InputStreamFactory in, String charsetName, boolean splitHyphenatedTokens) throws IOException { try { this.adSentenceStream = new ADSentenceStream(new PlainTextByLineStream( in, charsetName)); this.splitHyphenatedTokens = splitHyphenatedTokens; } catch (UnsupportedEncodingException e) { // UTF-8 is available on all JVMs, will never happen throw new IllegalStateException(e); } } private int textID = -1; public NameSample read() throws IOException { Sentence paragraph; // we should look for text here. while ((paragraph = this.adSentenceStream.read()) != null) { int currentTextID = getTextID(paragraph); boolean clearData = false; if (currentTextID != textID) { clearData = true; textID = currentTextID; } Node root = paragraph.getRoot(); List<String> sentence = new ArrayList<>(); List<Span> names = new ArrayList<>(); process(root, sentence, names); return new NameSample(sentence.toArray(new String[sentence.size()]), names.toArray(new Span[names.size()]), clearData); } return null; } /** * Recursive method to process a node in Arvores Deitadas format. * * @param node * the node to be processed * @param sentence * the sentence tokens we got so far * @param names * the names we got so far */ private void process(Node node, List<String> sentence, List<Span> names) { if (node != null) { for (TreeElement element : node.getElements()) { if (element.isLeaf()) { processLeaf((Leaf) element, sentence, names); } else { process((Node) element, sentence, names); } } } } /** * Process a Leaf of Arvores Detaitadas format * * @param leaf * the leaf to be processed * @param sentence * the sentence tokens we got so far * @param names * the names we got so far */ private void processLeaf(Leaf leaf, List<String> sentence, List<Span> names) { boolean alreadyAdded = false; if (leftContractionPart != null) { // will handle the contraction String right = leaf.getLexeme(); String c = PortugueseContractionUtility.toContraction( leftContractionPart, right); if (c != null) { String[] parts = whitespacePattern.split(c); sentence.addAll(Arrays.asList(parts)); alreadyAdded = true; } else { // contraction was missing! why? sentence.add(leftContractionPart); // keep alreadyAdded false. } leftContractionPart = null; } String namedEntityTag = null; int startOfNamedEntity = -1; String leafTag = leaf.getSecondaryTag(); boolean expandLastNER = false; // used when we find a <NER2> tag if (leafTag != null) { if (leafTag.contains("<sam->") && !alreadyAdded) { String[] lexemes = underlinePattern.split(leaf.getLexeme()); if (lexemes.length > 1) { sentence.addAll(Arrays.asList(lexemes).subList(0, lexemes.length - 1)); } leftContractionPart = lexemes[lexemes.length - 1]; return; } if (leafTag.contains("<NER2>")) { // this one an be part of the last name expandLastNER = true; } namedEntityTag = getNER(leafTag); } if (namedEntityTag != null) { startOfNamedEntity = sentence.size(); } if (!alreadyAdded) { sentence.addAll(processLexeme(leaf.getLexeme())); } if (namedEntityTag != null) { names .add(new Span(startOfNamedEntity, sentence.size(), namedEntityTag)); } if (expandLastNER) { // if the current leaf has the tag <NER2>, it can be the continuation of // a NER. // we check if it is true, and expand the last NER int lastIndex = names.size() - 1; if (names.size() > 0) { Span last = names.get(lastIndex); if (last.getEnd() == sentence.size() - 1) { names.set(lastIndex, new Span(last.getStart(), sentence.size(), last.getType())); } } } } private List<String> processLexeme(String lexemeStr) { List<String> out = new ArrayList<>(); String[] parts = underlinePattern.split(lexemeStr); for (String tok : parts) { if (tok.length() > 1 && !alphanumericPattern.matcher(tok).matches()) { out.addAll(processTok(tok)); } else { out.add(tok); } } return out; } private List<String> processTok(String tok) { boolean tokAdded = false; String original = tok; List<String> out = new ArrayList<>(); LinkedList<String> suffix = new LinkedList<>(); char first = tok.charAt(0); if (first == '«') { out.add(Character.toString(first)); tok = tok.substring(1); } char last = tok.charAt(tok.length() - 1); if (last == '»' || last == ':' || last == ',' || last == '!' ) { suffix.add(Character.toString(last)); tok = tok.substring(0, tok.length() - 1); } // lets split all hyphens if (this.splitHyphenatedTokens && tok.contains("-") && tok.length() > 1) { Matcher matcher = hyphenPattern.matcher(tok); String firstTok = null; String hyphen = "-"; String secondTok = null; String rest = null; if (matcher.matches()) { if (matcher.group(1) != null) { firstTok = matcher.group(2); } else if (matcher.group(3) != null) { secondTok = matcher.group(4); rest = matcher.group(5); } else if (matcher.group(6) != null) { firstTok = matcher.group(7); secondTok = matcher.group(8); rest = matcher.group(9); } addIfNotEmpty(firstTok, out); addIfNotEmpty(hyphen, out); addIfNotEmpty(secondTok, out); addIfNotEmpty(rest, out); tokAdded = true; } } if (!tokAdded) { if (!original.equals(tok) && tok.length() > 1 && !alphanumericPattern.matcher(tok).matches()) { out.addAll(processTok(tok)); } else { out.add(tok); } } out.addAll(suffix); return out; } private void addIfNotEmpty(String firstTok, List<String> out) { if (firstTok != null && firstTok.length() > 0) { out.addAll(processTok(firstTok)); } } /** * Parse a NER tag in Arvores Deitadas format. * * @param tags * the NER tag in Arvores Deitadas format * @return the NER tag, or null if not a NER tag in Arvores Deitadas format */ private static String getNER(String tags) { if (tags.contains("<NER2>")) { return null; } String[] tag = tags.split("\\s+"); for (String t : tag) { Matcher matcher = tagPattern.matcher(t); if (matcher.matches()) { String ner = matcher.group(2); if (HAREM.containsKey(ner)) { return HAREM.get(ner); } } } return null; } public void reset() throws IOException, UnsupportedOperationException { adSentenceStream.reset(); } public void close() throws IOException { adSentenceStream.close(); } enum Type { ama, cie, lit } private Type corpusType = null; private Pattern metaPattern; // works for Amazonia // private static final Pattern meta1 = Pattern // .compile("^(?:[a-zA-Z\\-]*(\\d+)).*?p=(\\d+).*"); // // // works for selva cie // private static final Pattern meta2 = Pattern // .compile("^(?:[a-zA-Z\\-]*(\\d+)).*?p=(\\d+).*"); private int textIdMeta2 = -1; private String textMeta2 = ""; private int getTextID(Sentence paragraph) { String meta = paragraph.getMetadata(); if (corpusType == null) { if (meta.startsWith("LIT")) { corpusType = Type.lit; metaPattern = Pattern.compile("^([a-zA-Z\\-]+)(\\d+).*?p=(\\d+).*"); } else if (meta.startsWith("CIE")) { corpusType = Type.cie; metaPattern = Pattern.compile("^.*?source=\"(.*?)\".*"); } else { // ama corpusType = Type.ama; metaPattern = Pattern.compile("^(?:[a-zA-Z\\-]*(\\d+)).*?p=(\\d+).*"); } } if (corpusType.equals(Type.lit)) { Matcher m2 = metaPattern.matcher(meta); if (m2.matches()) { String textId = m2.group(1); if (!textId.equals(textMeta2)) { textIdMeta2++; textMeta2 = textId; } return textIdMeta2; } else { throw new RuntimeException("Invalid metadata: " + meta); } } else if (corpusType.equals(Type.cie)) { Matcher m2 = metaPattern.matcher(meta); if (m2.matches()) { String textId = m2.group(1); if (!textId.equals(textMeta2)) { textIdMeta2++; textMeta2 = textId; } return textIdMeta2; } else { throw new RuntimeException("Invalid metadata: " + meta); } } else if (corpusType.equals(Type.ama)) { Matcher m2 = metaPattern.matcher(meta); if (m2.matches()) { return Integer.parseInt(m2.group(1)); // currentPara = Integer.parseInt(m.group(2)); } else { throw new RuntimeException("Invalid metadata: " + meta); } } return 0; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADNameSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.ArgumentParser.OptionalParameter; import opennlp.tools.cmdline.ArgumentParser.ParameterDescription; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.formats.LanguageSampleStreamFactory; import opennlp.tools.namefind.NameSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; /** * A Factory to create a Arvores Deitadas NameSampleDataStream from the command line * utility. * <p> * <b>Note:</b> Do not use this class, internal use only! */ public class ADNameSampleStreamFactory extends LanguageSampleStreamFactory<NameSample> { interface Parameters { //all have to be repeated, because encoding is not optional, //according to the check if (encoding == null) { below (now removed) @ParameterDescription(valueName = "charsetName", description = "encoding for reading and writing text, if absent the system default is used.") Charset getEncoding(); @ParameterDescription(valueName = "sampleData", description = "data to be used, usually a file name.") File getData(); @ParameterDescription(valueName = "split", description = "if true all hyphenated tokens will be separated (default true)") @OptionalParameter(defaultValue = "true") Boolean getSplitHyphenatedTokens(); @ParameterDescription(valueName = "language", description = "language which is being processed.") String getLang(); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(NameSample.class, "ad", new ADNameSampleStreamFactory(Parameters.class)); } protected <P> ADNameSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<NameSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); language = params.getLang(); InputStreamFactory sampleDataIn = CmdLineUtil.createInputStreamFactory(params.getData()); ObjectStream<String> lineStream = null; try { lineStream = new PlainTextByLineStream(sampleDataIn, params.getEncoding()); } catch (IOException ex) { CmdLineUtil.handleCreateObjectStreamError(ex); } return new ADNameSampleStream(lineStream, params.getSplitHyphenatedTokens()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADPOSSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.IOException; import java.io.InputStream; import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.List; import java.util.StringTokenizer; import opennlp.tools.formats.ad.ADSentenceStream.Sentence; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.Leaf; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.Node; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.TreeElement; import opennlp.tools.postag.POSSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ADPOSSampleStream implements ObjectStream<POSSample> { private final ObjectStream<ADSentenceStream.Sentence> adSentenceStream; private boolean expandME; private boolean isIncludeFeatures; /** * Creates a new {@link POSSample} stream from a line stream, i.e. * {@link ObjectStream}&lt;{@link String}&gt;, that could be a * {@link PlainTextByLineStream} object. * * @param lineStream * a stream of lines as {@link String} * @param expandME * if true will expand the multiword expressions, each word of the * expression will have the POS Tag that was attributed to the * expression plus the prefix B- or I- (CONLL convention) * @param includeFeatures * if true will combine the POS Tag with the feature tags */ public ADPOSSampleStream(ObjectStream<String> lineStream, boolean expandME, boolean includeFeatures) { this.adSentenceStream = new ADSentenceStream(lineStream); this.expandME = expandME; this.isIncludeFeatures = includeFeatures; } /** * Creates a new {@link POSSample} stream from a {@link InputStream} * * @param in * the Corpus {@link InputStream} * @param charsetName * the charset of the Arvores Deitadas Corpus * @param expandME * if true will expand the multiword expressions, each word of the * expression will have the POS Tag that was attributed to the * expression plus the prefix B- or I- (CONLL convention) * @param includeFeatures * if true will combine the POS Tag with the feature tags */ public ADPOSSampleStream(InputStreamFactory in, String charsetName, boolean expandME, boolean includeFeatures) throws IOException { try { this.adSentenceStream = new ADSentenceStream(new PlainTextByLineStream(in, charsetName)); this.expandME = expandME; this.isIncludeFeatures = includeFeatures; } catch (UnsupportedEncodingException e) { // UTF-8 is available on all JVMs, will never happen throw new IllegalStateException(e); } } public POSSample read() throws IOException { Sentence paragraph; while ((paragraph = this.adSentenceStream.read()) != null) { Node root = paragraph.getRoot(); List<String> sentence = new ArrayList<>(); List<String> tags = new ArrayList<>(); process(root, sentence, tags); return new POSSample(sentence, tags); } return null; } private void process(Node node, List<String> sentence, List<String> tags) { if (node != null) { for (TreeElement element : node.getElements()) { if (element.isLeaf()) { processLeaf((Leaf) element, sentence, tags); } else { process((Node) element, sentence, tags); } } } } private void processLeaf(Leaf leaf, List<String> sentence, List<String> tags) { if (leaf != null) { String lexeme = leaf.getLexeme(); String tag = leaf.getFunctionalTag(); if (tag == null) { tag = leaf.getLexeme(); } if (isIncludeFeatures && leaf.getMorphologicalTag() != null) { tag += " " + leaf.getMorphologicalTag(); } tag = tag.replaceAll("\\s+", "="); if (tag == null) tag = lexeme; if (expandME && lexeme.contains("_")) { StringTokenizer tokenizer = new StringTokenizer(lexeme, "_"); if (tokenizer.countTokens() > 0) { List<String> toks = new ArrayList<>(tokenizer.countTokens()); List<String> tagsWithCont = new ArrayList<>( tokenizer.countTokens()); toks.add(tokenizer.nextToken()); tagsWithCont.add("B-" + tag); while (tokenizer.hasMoreTokens()) { toks.add(tokenizer.nextToken()); tagsWithCont.add("I-" + tag); } sentence.addAll(toks); tags.addAll(tagsWithCont); } else { sentence.add(lexeme); tags.add(tag); } } else { sentence.add(lexeme); tags.add(tag); } } } public void reset() throws IOException, UnsupportedOperationException { adSentenceStream.reset(); } public void close() throws IOException { adSentenceStream.close(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADPOSSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.ArgumentParser.OptionalParameter; import opennlp.tools.cmdline.ArgumentParser.ParameterDescription; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.formats.LanguageSampleStreamFactory; import opennlp.tools.postag.POSSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ADPOSSampleStreamFactory extends LanguageSampleStreamFactory<POSSample> { interface Parameters { @ParameterDescription(valueName = "charsetName", description = "encoding for reading and writing text, if absent the system default is used.") Charset getEncoding(); @ParameterDescription(valueName = "sampleData", description = "data to be used, usually a file name.") File getData(); @ParameterDescription(valueName = "language", description = "language which is being processed.") String getLang(); @ParameterDescription(valueName = "expandME", description = "expand multiword expressions.") @OptionalParameter(defaultValue = "false") Boolean getExpandME(); @ParameterDescription(valueName = "includeFeatures", description = "combine POS Tags with word features, like number and gender.") @OptionalParameter(defaultValue = "false") Boolean getIncludeFeatures(); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(POSSample.class, "ad", new ADPOSSampleStreamFactory(Parameters.class)); } protected <P> ADPOSSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<POSSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); language = params.getLang(); InputStreamFactory sampleDataIn = CmdLineUtil.createInputStreamFactory(params.getData()); ObjectStream<String> lineStream = null; try { lineStream = new PlainTextByLineStream(sampleDataIn, params.getEncoding()); } catch (IOException ex) { CmdLineUtil.handleCreateObjectStreamError(ex); } return new ADPOSSampleStream(lineStream, params.getExpandME(), params.getIncludeFeatures()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADSentenceSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.FileInputStream; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import opennlp.tools.formats.ad.ADSentenceStream.Sentence; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.sentdetect.lang.Factory; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; import opennlp.tools.util.Span; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ADSentenceSampleStream implements ObjectStream<SentenceSample> { private final ObjectStream<ADSentenceStream.Sentence> adSentenceStream; private int text = -1; private int para = -1; private boolean isSameText; private boolean isSamePara; private Sentence sent; private boolean isIncludeTitles = true; private boolean isTitle; private final char[] ptEosCharacters; /** * Creates a new {@link SentenceSample} stream from a line stream, i.e. * {@link ObjectStream}&lt;{@link String}&gt;, that could be a * {@link PlainTextByLineStream} object. * * @param lineStream * a stream of lines as {@link String} * @param includeHeadlines * if true will output the sentences marked as news headlines */ public ADSentenceSampleStream(ObjectStream<String> lineStream, boolean includeHeadlines) { this.adSentenceStream = new ADSentenceStream(lineStream); ptEosCharacters = Factory.ptEosCharacters; Arrays.sort(ptEosCharacters); this.isIncludeTitles = includeHeadlines; } /** * Creates a new {@link SentenceSample} stream from a {@link FileInputStream} * * @param in * input stream from the corpus * @param charsetName * the charset to use while reading the corpus * @param includeHeadlines * if true will output the sentences marked as news headlines */ public ADSentenceSampleStream(InputStreamFactory in, String charsetName, boolean includeHeadlines) throws IOException { try { this.adSentenceStream = new ADSentenceStream(new PlainTextByLineStream( in, charsetName)); } catch (UnsupportedEncodingException e) { // UTF-8 is available on all JVMs, will never happen throw new IllegalStateException(e); } ptEosCharacters = Factory.ptEosCharacters; Arrays.sort(ptEosCharacters); this.isIncludeTitles = includeHeadlines; } // The Arvores Deitadas Corpus has information about texts and paragraphs. public SentenceSample read() throws IOException { if (sent == null) { sent = this.adSentenceStream.read(); updateMeta(); if (sent == null) { return null; } } StringBuilder document = new StringBuilder(); List<Span> sentences = new ArrayList<>(); do { do { if (!isTitle || (isTitle && isIncludeTitles)) { if (hasPunctuation(sent.getText())) { int start = document.length(); document.append(sent.getText()); sentences.add(new Span(start, document.length())); document.append(" "); } } sent = this.adSentenceStream.read(); updateMeta(); } while (isSamePara); // break; // got one paragraph! } while (isSameText); String doc; if (document.length() > 0) { doc = document.substring(0, document.length() - 1); } else { doc = document.toString(); } return new SentenceSample(doc, sentences.toArray(new Span[sentences.size()])); } private boolean hasPunctuation(String text) { text = text.trim(); if (text.length() > 0) { char lastChar = text.charAt(text.length() - 1); if (Arrays.binarySearch(ptEosCharacters, lastChar) >= 0) { return true; } } return false; } // there are some different types of metadata depending on the corpus. // todo: merge this patterns private Pattern meta1 = Pattern .compile("^(?:[a-zA-Z\\-]*(\\d+)).*?p=(\\d+).*"); private void updateMeta() { if (this.sent != null) { String meta = this.sent.getMetadata(); Matcher m = meta1.matcher(meta); int currentText; int currentPara; if (m.matches()) { currentText = Integer.parseInt(m.group(1)); currentPara = Integer.parseInt(m.group(2)); } else { throw new RuntimeException("Invalid metadata: " + meta); } isSamePara = isSameText = false; if (currentText == text) isSameText = true; if (isSameText && currentPara == para) isSamePara = true; isTitle = meta.contains("title"); text = currentText; para = currentPara; } else { this.isSamePara = this.isSameText = false; } } public void reset() throws IOException, UnsupportedOperationException { adSentenceStream.reset(); } public void close() throws IOException { adSentenceStream.close(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADSentenceSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.ArgumentParser.OptionalParameter; import opennlp.tools.cmdline.ArgumentParser.ParameterDescription; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.formats.LanguageSampleStreamFactory; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ADSentenceSampleStreamFactory extends LanguageSampleStreamFactory<SentenceSample> { interface Parameters { @ParameterDescription(valueName = "charsetName", description = "encoding for reading and writing text.") Charset getEncoding(); @ParameterDescription(valueName = "sampleData", description = "data to be used, usually a file name.") File getData(); @ParameterDescription(valueName = "language", description = "language which is being processed.") String getLang(); @ParameterDescription(valueName = "includeTitles", description = "if true will include sentences marked as headlines.") @OptionalParameter(defaultValue = "false") Boolean getIncludeTitles(); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(SentenceSample.class, "ad", new ADSentenceSampleStreamFactory(Parameters.class)); } protected <P> ADSentenceSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<SentenceSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); language = params.getLang(); boolean includeTitle = params.getIncludeTitles(); InputStreamFactory sampleDataIn = CmdLineUtil.createInputStreamFactory(params.getData()); ObjectStream<String> lineStream = null; try { lineStream = new PlainTextByLineStream(sampleDataIn, params.getEncoding()); } catch (IOException ex) { CmdLineUtil.handleCreateObjectStreamError(ex); } return new ADSentenceSampleStream(lineStream, includeTitle); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADSentenceStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.io.BufferedReader; import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.List; import java.util.Stack; import java.util.regex.Matcher; import java.util.regex.Pattern; import opennlp.tools.formats.ad.ADSentenceStream.SentenceParser.Node; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; /** * Stream filter which merges text lines into sentences, following the Arvores * Deitadas syntax. * <p> * Information about the format:<br> * Susana Afonso. * "Árvores deitadas: Descrição do formato e das opções de análise na Floresta Sintáctica" * .<br> * 12 de Fevereiro de 2006. * http://www.linguateca.pt/documentos/Afonso2006ArvoresDeitadas.pdf * <p> * <b>Note:</b> Do not use this class, internal use only! */ public class ADSentenceStream extends FilterObjectStream<String, ADSentenceStream.Sentence> { public static class Sentence { private String text; private Node root; private String metadata; public static final String META_LABEL_FINAL = "final"; public String getText() { return text; } public void setText(String text) { this.text = text; } public Node getRoot() { return root; } public void setRoot(Node root) { this.root = root; } public void setMetadata(String metadata) { this.metadata = metadata; } public String getMetadata() { return metadata; } } /** * Parses a sample of AD corpus. A sentence in AD corpus is represented by a * Tree. In this class we declare some types to represent that tree. Today we get only * the first alternative (A1). */ public static class SentenceParser { private Pattern nodePattern = Pattern .compile("([=-]*)([^:=]+:[^\\(\\s]+)(\\(([^\\)]+)\\))?\\s*(?:(\\((<.+>)\\))*)\\s*$"); private Pattern leafPattern = Pattern .compile("^([=-]*)([^:=]+):([^\\(\\s]+)\\([\"'](.+)[\"']\\s*((?:<.+>)*)\\s*([^\\)]+)?\\)\\s+(.+)"); private Pattern bizarreLeafPattern = Pattern .compile("^([=-]*)([^:=]+=[^\\(\\s]+)\\(([\"'].+[\"'])?\\s*([^\\)]+)?\\)\\s+(.+)"); private Pattern punctuationPattern = Pattern.compile("^(=*)(\\W+)$"); private String text,meta; /** * Parse the sentence */ public Sentence parse(String sentenceString, int para, boolean isTitle, boolean isBox) { BufferedReader reader = new BufferedReader(new StringReader(sentenceString)); Sentence sentence = new Sentence(); Node root = new Node(); try { // first line is <s ...> String line = reader.readLine(); boolean useSameTextAndMeta = false; // to handle cases where there are diff sug of parse (&&) // should find the source source while (!line.startsWith("SOURCE")) { if (line.equals("&&")) { // same sentence again! useSameTextAndMeta = true; break; } line = reader.readLine(); if (line == null) { return null; } } if (!useSameTextAndMeta) { // got source, get the metadata String metaFromSource = line.substring(7); line = reader.readLine(); // we should have the plain sentence // we remove the first token int start = line.indexOf(" "); text = line.substring(start + 1).trim(); text = fixPunctuation(text); String titleTag = ""; if (isTitle) titleTag = " title"; String boxTag = ""; if (isBox) boxTag = " box"; if (start > 0) { meta = line.substring(0, start) + " p=" + para + titleTag + boxTag + metaFromSource; } } sentence.setText(text); sentence.setMetadata(meta); // now we look for the root node // skip lines starting with ### line = reader.readLine(); while (line != null && line.startsWith("###")) { line = reader.readLine(); } // got the root. Add it to the stack Stack<Node> nodeStack = new Stack<>(); root.setSyntacticTag("ROOT"); root.setLevel(0); nodeStack.add(root); /* now we have to take care of the lastLevel. Every time it raises, we will add the leaf to the node at the top. If it decreases, we remove the top. */ while (line != null && line.length() != 0 && !line.startsWith("</s>") && !line.equals("&&")) { TreeElement element = this.getElement(line); if (element != null) { // The idea here is to keep a stack of nodes that are candidates for // parenting the following elements (nodes and leafs). // 1) When we get a new element, we check its level and remove from // the top of the stack nodes that are brothers or nephews. while (!nodeStack.isEmpty() && element.getLevel() > 0 && element.getLevel() <= nodeStack.peek().getLevel()) { Node nephew = nodeStack.pop(); } if (element.isLeaf() ) { // 2a) If the element is a leaf and there is no parent candidate, // add it as a daughter of the root. if (nodeStack.isEmpty()) { root.addElement(element); } else { // 2b) There are parent candidates. // look for the node with the correct level Node peek = nodeStack.peek(); if (element.level == 0) { // add to the root nodeStack.firstElement().addElement(element); } else { Node parent = null; int index = nodeStack.size() - 1; while (parent == null) { if (peek.getLevel() < element.getLevel()) { parent = peek; } else { index--; if (index > -1) { peek = nodeStack.get(index); } else { parent = nodeStack.firstElement(); } } } parent.addElement(element); } } } else { // 3) Check if the element that is at the top of the stack is this // node parent, if yes add it as a son if (!nodeStack.isEmpty() && nodeStack.peek().getLevel() < element.getLevel()) { nodeStack.peek().addElement(element); } else { System.err.println("should not happen!"); } // 4) Add it to the stack so it is a parent candidate. nodeStack.push((Node) element); } } line = reader.readLine(); } } catch (Exception e) { System.err.println(sentenceString); e.printStackTrace(); return sentence; } // second line should be SOURCE sentence.setRoot(root); return sentence; } private String fixPunctuation(String text) { text = text.replaceAll("\\»\\s+\\.", "»."); text = text.replaceAll("\\»\\s+\\,", "»,"); return text; } /** * Parse a tree element from a AD line * * @param line * the AD line * @return the tree element */ public TreeElement getElement(String line) { // Note: all levels are higher than 1, because 0 is reserved for the root. // try node Matcher nodeMatcher = nodePattern.matcher(line); if (nodeMatcher.matches()) { int level = nodeMatcher.group(1).length() + 1; String syntacticTag = nodeMatcher.group(2); Node node = new Node(); node.setLevel(level); node.setSyntacticTag(syntacticTag); return node; } Matcher leafMatcher = leafPattern.matcher(line); if (leafMatcher.matches()) { int level = leafMatcher.group(1).length() + 1; String syntacticTag = leafMatcher.group(2); String funcTag = leafMatcher.group(3); String lemma = leafMatcher.group(4); String secondaryTag = leafMatcher.group(5); String morphologicalTag = leafMatcher.group(6); String lexeme = leafMatcher.group(7); Leaf leaf = new Leaf(); leaf.setLevel(level); leaf.setSyntacticTag(syntacticTag); leaf.setFunctionalTag(funcTag); leaf.setSecondaryTag(secondaryTag); leaf.setMorphologicalTag(morphologicalTag); leaf.setLexeme(lexeme); leaf.setLemma(lemma); return leaf; } Matcher punctuationMatcher = punctuationPattern.matcher(line); if (punctuationMatcher.matches()) { int level = punctuationMatcher.group(1).length() + 1; String lexeme = punctuationMatcher.group(2); Leaf leaf = new Leaf(); leaf.setLevel(level); leaf.setLexeme(lexeme); return leaf; } // process the bizarre cases if (line.equals("_") || line.startsWith("<lixo") || line.startsWith("pause")) { return null; } if (line.startsWith("=")) { Matcher bizarreLeafMatcher = bizarreLeafPattern.matcher(line); if (bizarreLeafMatcher.matches()) { int level = bizarreLeafMatcher.group(1).length() + 1; String syntacticTag = bizarreLeafMatcher.group(2); String lemma = bizarreLeafMatcher.group(3); String morphologicalTag = bizarreLeafMatcher.group(4); String lexeme = bizarreLeafMatcher.group(5); Leaf leaf = new Leaf(); leaf.setLevel(level); leaf.setSyntacticTag(syntacticTag); leaf.setMorphologicalTag(morphologicalTag); leaf.setLexeme(lexeme); if (lemma != null) { if (lemma.length() > 2) { lemma = lemma.substring(1, lemma.length() - 1); } leaf.setLemma(lemma); } return leaf; } else { int level = line.lastIndexOf("=") + 1; String lexeme = line.substring(level + 1); if (lexeme.matches("\\w.*?[\\.<>].*")) { return null; } Leaf leaf = new Leaf(); leaf.setLevel(level + 1); leaf.setSyntacticTag(""); leaf.setMorphologicalTag(""); leaf.setLexeme(lexeme); return leaf; } } System.err.println("Couldn't parse leaf: " + line); Leaf leaf = new Leaf(); leaf.setLevel(1); leaf.setSyntacticTag(""); leaf.setMorphologicalTag(""); leaf.setLexeme(line); return leaf; } /** Represents a tree element, Node or Leaf */ public abstract class TreeElement { private String syntacticTag; private String morphologicalTag; private int level; public boolean isLeaf() { return false; } public void setSyntacticTag(String syntacticTag) { this.syntacticTag = syntacticTag; } public String getSyntacticTag() { return syntacticTag; } public void setLevel(int level) { this.level = level; } public int getLevel() { return level; } public void setMorphologicalTag(String morphologicalTag) { this.morphologicalTag = morphologicalTag; } public String getMorphologicalTag() { return morphologicalTag; } } /** Represents the AD node */ public class Node extends TreeElement { private List<TreeElement> elems = new ArrayList<>(); public void addElement(TreeElement element) { elems.add(element); } public TreeElement[] getElements() { return elems.toArray(new TreeElement[elems.size()]); } @Override public String toString() { StringBuilder sb = new StringBuilder(); // print itself and its children for (int i = 0; i < this.getLevel(); i++) { sb.append("="); } sb.append(this.getSyntacticTag()); if (this.getMorphologicalTag() != null) { sb.append(this.getMorphologicalTag()); } sb.append("\n"); for (TreeElement element : elems) { sb.append(element.toString()); } return sb.toString(); } } /** Represents the AD leaf */ public class Leaf extends TreeElement { private String word; private String lemma; private String secondaryTag; private String functionalTag; @Override public boolean isLeaf() { return true; } public void setFunctionalTag(String funcTag) { this.functionalTag = funcTag; } public String getFunctionalTag() { return this.functionalTag; } public void setSecondaryTag(String secondaryTag) { this.secondaryTag = secondaryTag; } public String getSecondaryTag() { return this.secondaryTag; } public void setLexeme(String lexeme) { this.word = lexeme; } public String getLexeme() { return word; } private String emptyOrString(String value, String prefix, String suffix) { if (value == null) return ""; return prefix + value + suffix; } @Override public String toString() { StringBuilder sb = new StringBuilder(); // print itself and its children for (int i = 0; i < this.getLevel(); i++) { sb.append("="); } if (this.getSyntacticTag() != null) { sb.append(this.getSyntacticTag()).append(":") .append(getFunctionalTag()).append("(") .append(emptyOrString(getLemma(), "'", "' ")) .append(emptyOrString(getSecondaryTag(), "", " ")) .append(this.getMorphologicalTag()).append(") "); } sb.append(this.word).append("\n"); return sb.toString(); } public void setLemma(String lemma) { this.lemma = lemma; } public String getLemma() { return lemma; } } } /** * The start sentence pattern */ private static final Pattern sentStart = Pattern.compile("<s[^>]*>"); /** * The end sentence pattern */ private static final Pattern sentEnd = Pattern.compile("</s>"); private static final Pattern extEnd = Pattern.compile("</ext>"); /** * The start sentence pattern */ private static final Pattern titleStart = Pattern.compile("<t[^>]*>"); /** * The end sentence pattern */ private static final Pattern titleEnd = Pattern.compile("</t>"); /** * The start sentence pattern */ private static final Pattern boxStart = Pattern.compile("<caixa[^>]*>"); /** * The end sentence pattern */ private static final Pattern boxEnd = Pattern.compile("</caixa>"); /** * The start sentence pattern */ private static final Pattern paraStart = Pattern.compile("<p[^>]*>"); /** * The start sentence pattern */ private static final Pattern textStart = Pattern.compile("<ext[^>]*>"); private SentenceParser parser; private int paraID = 0; private boolean isTitle = false; private boolean isBox = false; public ADSentenceStream(ObjectStream<String> lineStream) { super(lineStream); parser = new SentenceParser(); } public Sentence read() throws IOException { StringBuilder sentence = new StringBuilder(); boolean sentenceStarted = false; while (true) { String line = samples.read(); if (line != null) { if (sentenceStarted) { if (sentEnd.matcher(line).matches() || extEnd.matcher(line).matches()) { sentenceStarted = false; } else if (!line.startsWith("A1")) { sentence.append(line).append('\n'); } } else { if (sentStart.matcher(line).matches()) { sentenceStarted = true; } else if (paraStart.matcher(line).matches()) { paraID++; } else if (titleStart.matcher(line).matches()) { isTitle = true; } else if (titleEnd.matcher(line).matches()) { isTitle = false; } else if (textStart.matcher(line).matches()) { paraID = 0; } else if (boxStart.matcher(line).matches()) { isBox = true; } else if (boxEnd.matcher(line).matches()) { isBox = false; } } if (!sentenceStarted && sentence.length() > 0) { return parser.parse(sentence.toString(), paraID, isTitle, isBox); } } else { // handle end of file if (sentenceStarted) { if (sentence.length() > 0) { return parser.parse(sentence.toString(), paraID, isTitle, isBox); } } else { return null; } } } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/ADTokenSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.DetokenizerParameter; import opennlp.tools.formats.DetokenizerSampleStreamFactory; import opennlp.tools.formats.convert.NameToTokenSampleStream; import opennlp.tools.namefind.NameSample; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ADTokenSampleStreamFactory extends DetokenizerSampleStreamFactory<TokenSample> { interface Parameters extends ADNameSampleStreamFactory.Parameters, DetokenizerParameter { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(TokenSample.class, "ad", new ADTokenSampleStreamFactory(Parameters.class)); } protected <P> ADTokenSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<TokenSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ObjectStream<NameSample> samples = StreamFactoryRegistry.getFactory( NameSample.class, "ad").create( ArgumentParser.filter(args, ADNameSampleStreamFactory.Parameters.class)); return new NameToTokenSampleStream(createDetokenizer(params), samples); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ad/PortugueseContractionUtility.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ad; import java.util.Collections; import java.util.HashMap; import java.util.Map; import opennlp.tools.util.StringUtil; /** * Utility class to handle Portuguese contractions. * <p> * Some Corpora splits contractions in its parts, for example, "da" &gt; "de" + * "a", but according to the fase of language processing, NER for instance, we * can't decide if to split a contraction or not, specially because contractions * inside names are not separated, but outside are. * * <p> * <b>Note:</b> Do not use this class, internal use only! */ public class PortugueseContractionUtility { protected static final Map<String, String> CONTRACTIONS; static { Map<String, String> elems = new HashMap<>(); // 103 CONTRACTIONS. elems.put("a+a", "\u00e0"); elems.put("a+as", "\u00e0s"); elems.put("a+aquele", "\u00e0quele"); elems.put("a+aqueles", "\u00e0queles"); elems.put("a+aquela", "\u00e0quela"); elems.put("a+aquelas", "\u00e0quelas"); elems.put("a+aquilo", "\u00e0quilo"); elems.put("a+o", "ao"); elems.put("a+os", "aos"); elems.put("com+mim", "comigo"); elems.put("com+n\u00f2s", "conosco"); elems.put("com+si", "consigo"); elems.put("com+ti", "contigo"); elems.put("com+v\u00f2s", "convosco"); elems.put("de+a\u00ed", "da\u00ed"); elems.put("de+algu\u00e9m", "dalgu\u00e9m"); elems.put("de+algum", "dalgum"); elems.put("de+alguma", "dalguma"); elems.put("de+alguns", "dalguns"); elems.put("de+algumas", "dalgumas"); elems.put("de+ali", "dali"); elems.put("de+aqu\u00e9m", "daqu\u00e9m"); elems.put("de+aquele", "daquele"); elems.put("de+aquela", "daquela"); elems.put("de+aqueles", "daqueles"); elems.put("de+aquelas", "daquelas"); elems.put("de+aqui", "daqui"); elems.put("de+aquilo", "daquilo"); elems.put("de+ele", "dele"); elems.put("de+ela", "dela"); elems.put("de+eles", "deles"); elems.put("de+elas", "delas"); elems.put("de+entre", "dentre"); elems.put("de+esse", "desse"); elems.put("de+essa", "dessa"); elems.put("de+esses", "desses"); elems.put("de+essas", "dessas"); elems.put("de+este", "deste"); elems.put("de+esta", "desta"); elems.put("de+estes", "destes"); elems.put("de+estas", "destas"); elems.put("de+isso", "disso"); elems.put("de+isto", "disto"); elems.put("de+o", "do"); elems.put("de+a", "da"); elems.put("de+os", "dos"); elems.put("de+as", "das"); elems.put("de+outrem", "doutrem"); elems.put("de+outro", "doutro"); elems.put("de+outra", "doutra"); elems.put("de+outros", "doutros"); elems.put("de+outras", "doutras"); elems.put("de+um", "dum"); elems.put("de+uma", "duma"); elems.put("de+uns", "duns"); elems.put("de+umas", "dumas"); elems.put("esse+outro", "essoutro"); elems.put("essa+outra", "essoutra"); elems.put("este+outro", "estoutro"); elems.put("este+outra", "estoutra"); elems.put("ele+o", "lho"); elems.put("ele+a", "lha"); elems.put("ele+os", "lhos"); elems.put("ele+as", "lhas"); elems.put("em+algum", "nalgum"); elems.put("em+alguma", "nalguma"); elems.put("em+alguns", "nalguns"); elems.put("em+algumas", "nalgumas"); elems.put("em+aquele", "naquele"); elems.put("em+aquela", "naquela"); elems.put("em+aqueles", "naqueles"); elems.put("em+aquelas", "naquelas"); elems.put("em+aquilo", "naquilo"); elems.put("em+ele", "nele"); elems.put("em+ela", "nela"); elems.put("em+eles", "neles"); elems.put("em+elas", "nelas"); elems.put("em+esse", "nesse"); elems.put("em+essa", "nessa"); elems.put("em+esses", "nesses"); elems.put("em+essas", "nessas"); elems.put("em+este", "neste"); elems.put("em+esta", "nesta"); elems.put("em+estes", "nestes"); elems.put("em+estas", "nestas"); elems.put("em+isso", "nisso"); elems.put("em+isto", "nisto"); elems.put("em+o", "no"); elems.put("em+a", "na"); elems.put("em+os", "nos"); elems.put("em+as", "nas"); elems.put("em+outro", "noutro"); elems.put("em+outra", "noutra"); elems.put("em+outros", "noutros"); elems.put("em+outras", "noutras"); elems.put("em+um", "num"); elems.put("em+uma", "numa"); elems.put("em+uns", "nuns"); elems.put("em+umas", "numas"); elems.put("por+o", "pelo"); elems.put("por+a", "pela"); elems.put("por+os", "pelos"); elems.put("por+as", "pelas"); elems.put("para+a", "pra"); elems.put("para+o", "pro"); elems.put("para+as", "pras"); elems.put("para+os", "pros"); CONTRACTIONS = Collections.unmodifiableMap(elems); } /** * Merges a contraction * * @param left * the left component * @param right * the right component * @return the merged contraction */ public static String toContraction(String left, String right) { String key = left + "+" + right; if (CONTRACTIONS.containsKey(key)) { return CONTRACTIONS.get(key); } else { StringBuilder sb = new StringBuilder(); String[] parts = left.split("_"); for (int i = 0; i < parts.length - 1; i++) { sb.append(parts[i]).append(" "); } key = parts[parts.length - 1] + "+" + right; if (CONTRACTIONS.containsKey(key)) { sb.append(CONTRACTIONS.get(key)); return sb.toString(); } if (right.contains("_")) { parts = right.split("_"); key = left + "+" + parts[0]; if (CONTRACTIONS.containsKey(key)) { sb.append(CONTRACTIONS.get(key)).append(" "); for (int i = 1; i < parts.length; i++) { sb.append(parts[i]).append(" "); } return sb.toString(); } } String leftLower = StringUtil.toLowerCase(parts[parts.length - 1]); key = leftLower + "+" + right; if (CONTRACTIONS.containsKey(key)) { String r = CONTRACTIONS.get(key); String firstChar = r.substring(0, 1); r = StringUtil.toUpperCase(firstChar) + r.substring(1); sb.append(r); return sb.toString(); } } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/AnnotationConfiguration.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.io.BufferedInputStream; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.HashMap; import java.util.Map; import opennlp.tools.tokenize.WhitespaceTokenizer; public class AnnotationConfiguration { public static final String SPAN_TYPE = "Span"; public static final String ENTITY_TYPE = "Entity"; public static final String RELATION_TYPE = "Relation"; public static final String ATTRIBUTE_TYPE = "Attribute"; public static final String EVENT_TYPE = "Event"; private final Map<String, String> typeToClassMap; public AnnotationConfiguration(Map<String, String> typeToClassMap) { this.typeToClassMap = Collections.unmodifiableMap(new HashMap<>(typeToClassMap)); } public String getTypeClass(String type) { return typeToClassMap.get(type); } public static AnnotationConfiguration parse(InputStream in) throws IOException { Map<String, String> typeToClassMap = new HashMap<>(); BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); // Note: This only supports entities and relations section String line; String sectionType = null; while ((line = reader.readLine()) != null) { line = line.trim(); if (!line.isEmpty()) { if (!line.startsWith("#")) { if (line.startsWith("[") && line.endsWith("]")) { sectionType = line.substring(line.indexOf('[') + 1, line.indexOf(']')); } else { String typeName = WhitespaceTokenizer.INSTANCE.tokenize(line)[0]; switch (sectionType) { case "entities": typeToClassMap.put(typeName, AnnotationConfiguration.ENTITY_TYPE); break; case "relations": typeToClassMap.put(typeName, AnnotationConfiguration.RELATION_TYPE); break; case "attributes": typeToClassMap.put(typeName, AnnotationConfiguration.ATTRIBUTE_TYPE); break; case "events": typeToClassMap.put(typeName, AnnotationConfiguration.EVENT_TYPE); break; default: break; } } } } } return new AnnotationConfiguration(typeToClassMap); } public static AnnotationConfiguration parse(File annConfigFile) throws IOException { try (InputStream in = new BufferedInputStream(new FileInputStream(annConfigFile))) { return parse(in); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/AttributeAnnotation.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; public class AttributeAnnotation extends BratAnnotation { private final String attachedTo; private final String value; protected AttributeAnnotation(String id, String type, String attachedTo, String value) { super(id, type); this.attachedTo = attachedTo; this.value = value; } public String getAttachedTo() { return attachedTo; } public String getValue() { return value; } @Override public String toString() { return super.toString() + " " + attachedTo + (value != null ? " " + value : ""); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/BratAnnotation.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.util.Objects; public abstract class BratAnnotation { private final String id; private final String type; protected BratAnnotation(String id, String type) { this.id = Objects.requireNonNull(id); this.type = Objects.requireNonNull(type); } public String getId() { return id; } public String getType() { return type; } @Override public String toString() { return id + " " + type; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/BratAnnotationStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import opennlp.tools.tokenize.WhitespaceTokenizer; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; /** * Reads the annotations from the brat .ann annotation file. */ public class BratAnnotationStream implements ObjectStream<BratAnnotation> { static abstract class BratAnnotationParser { static final int ID_OFFSET = 0; static final int TYPE_OFFSET = 1; BratAnnotation parse(Span[] tokens, CharSequence line) throws IOException { return null; } protected int parseInt(String intString) throws InvalidFormatException { try { return Integer.parseInt(intString); } catch (NumberFormatException e) { throw new InvalidFormatException(e); } } } static class SpanAnnotationParser extends BratAnnotationParser { private static final int BEGIN_OFFSET = 2; private static final int END_OFFSET = 3; @Override BratAnnotation parse(Span[] values, CharSequence line) throws IOException { if (values.length > 4) { String type = values[BratAnnotationParser.TYPE_OFFSET].getCoveredText(line).toString(); int endOffset = -1; int firstTextTokenIndex = -1; for (int i = END_OFFSET; i < values.length; i++) { if (!values[i].getCoveredText(line).toString().contains(";")) { endOffset = parseInt(values[i].getCoveredText(line).toString()); firstTextTokenIndex = i + 1; break; } } String id = values[BratAnnotationParser.ID_OFFSET].getCoveredText(line).toString(); String coveredText = line.subSequence(values[firstTextTokenIndex].getStart(), values[values.length - 1].getEnd()).toString(); try { return new SpanAnnotation(id, type, new Span(parseInt(values[BEGIN_OFFSET] .getCoveredText(line).toString()), endOffset, type), coveredText); } catch (IllegalArgumentException e) { throw new InvalidFormatException(e); } } else { throw new InvalidFormatException("Line must have at least 5 fields"); } } } static class RelationAnnotationParser extends BratAnnotationParser { private static final int ARG1_OFFSET = 2; private static final int ARG2_OFFSET = 3; private String parseArg(String arg) throws InvalidFormatException { if (arg.length() > 4) { return arg.substring(5).trim(); } else { throw new InvalidFormatException("Failed to parse argument: " + arg); } } @Override BratAnnotation parse(Span[] tokens, CharSequence line) throws IOException { return new RelationAnnotation(tokens[BratAnnotationParser.ID_OFFSET].getCoveredText(line).toString(), tokens[BratAnnotationParser.TYPE_OFFSET].getCoveredText(line).toString(), parseArg(tokens[ARG1_OFFSET].getCoveredText(line).toString()), parseArg(tokens[ARG2_OFFSET].getCoveredText(line).toString())); } } static class EventAnnotationParser extends BratAnnotationParser { @Override BratAnnotation parse(Span[] tokens, CharSequence line) throws IOException { String[] typeParts = tokens[TYPE_OFFSET].getCoveredText(line).toString().split(":"); if (typeParts.length != 2) { throw new InvalidFormatException(String.format( "Failed to parse [%s], type part must be in the format type:trigger", line)); } String type = typeParts[0]; String eventTrigger = typeParts[1]; Map<String, String> arguments = new HashMap<>(); for (int i = TYPE_OFFSET + 1; i < tokens.length; i++) { String[] parts = tokens[i].getCoveredText(line).toString().split(":"); if (parts.length != 2) { throw new InvalidFormatException(String.format( "Failed to parse [%s], argument parts must be in form argument:value", line)); } arguments.put(parts[0], parts[1]); } return new EventAnnotation(tokens[ID_OFFSET].getCoveredText(line).toString(),type, eventTrigger, arguments); } } static class AttributeAnnotationParser extends BratAnnotationParser { private static final int ATTACHED_TO_OFFSET = 2; private static final int VALUE_OFFSET = 3; @Override BratAnnotation parse(Span[] values, CharSequence line) throws IOException { if (values.length == 3 || values.length == 4) { String value = null; if (values.length == 4) { value = values[VALUE_OFFSET].getCoveredText(line).toString(); } return new AttributeAnnotation(values[ID_OFFSET].getCoveredText(line).toString(), values[TYPE_OFFSET].getCoveredText(line).toString(), values[ATTACHED_TO_OFFSET].getCoveredText(line).toString(), value); } else { throw new InvalidFormatException("Line must have 3 or 4 fields"); } } } private final AnnotationConfiguration config; private final BufferedReader reader; private final String id; public BratAnnotationStream(AnnotationConfiguration config, String id, InputStream in) { this.config = config; this.id = id; reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); } public BratAnnotation read() throws IOException { String line = reader.readLine(); if (line != null) { Span[] tokens = WhitespaceTokenizer.INSTANCE.tokenizePos(line); if (tokens.length > 2) { String annId = tokens[BratAnnotationParser.ID_OFFSET].getCoveredText(line).toString(); if (annId.length() == 0) { throw new InvalidFormatException("annotation id is empty"); } // The first leter of the annotation id marks the annotation type final BratAnnotationParser parser; switch (annId.charAt(0)) { case 'T': parser = new SpanAnnotationParser(); break; case 'R': parser = new RelationAnnotationParser(); break; case 'A': parser = new AttributeAnnotationParser(); break; case 'E': parser = new EventAnnotationParser(); break; default: // Skip it, do that for everything unsupported (e.g. "*" id) return read(); } try { return parser.parse(tokens, line); } catch (IOException e) { throw new IOException(String.format("Failed to parse ann document with id [%s.ann]", id), e); } } } return null; } public void reset() throws IOException, UnsupportedOperationException { reader.reset(); } public void close() throws IOException { reader.close(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/BratDocument.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; import opennlp.tools.util.ObjectStream; public class BratDocument { private final AnnotationConfiguration config; private final String id; private final String text; private final Map<String, BratAnnotation> annotationMap; public BratDocument(AnnotationConfiguration config, String id, String text, Collection<BratAnnotation> annotations) { this.config = config; this.id = id; this.text = text; Map<String, BratAnnotation> annMap = new HashMap<>(); for (BratAnnotation annotation : annotations) { annMap.put(annotation.getId(), annotation); } annotationMap = Collections.unmodifiableMap(annMap); } public AnnotationConfiguration getConfig() { return config; } public String getId() { return id; } public String getText() { return text; } public BratAnnotation getAnnotation(String id) { return annotationMap.get(id); } public Collection<BratAnnotation> getAnnotations() { return annotationMap.values(); } public static BratDocument parseDocument(AnnotationConfiguration config, String id, InputStream txtIn, InputStream annIn) throws IOException { Reader txtReader = new InputStreamReader(txtIn, StandardCharsets.UTF_8); StringBuilder text = new StringBuilder(); char[] cbuf = new char[1024]; int len; while ((len = txtReader.read(cbuf)) > 0) { text.append(cbuf, 0, len); } Collection<BratAnnotation> annotations = new ArrayList<>(); ObjectStream<BratAnnotation> annStream = new BratAnnotationStream(config, id, annIn); BratAnnotation ann; while ((ann = annStream.read()) != null) { annotations.add(ann); } annStream.close(); return new BratDocument(config, id, text.toString(), annotations); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/BratDocumentParser.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import opennlp.tools.namefind.NameSample; import opennlp.tools.sentdetect.SentenceDetector; import opennlp.tools.tokenize.Tokenizer; import opennlp.tools.util.Span; public class BratDocumentParser { private SentenceDetector sentDetector; private Tokenizer tokenizer; public BratDocumentParser(SentenceDetector sentenceDetector, Tokenizer tokenizer) { this.sentDetector = sentenceDetector; this.tokenizer = tokenizer; } public List<NameSample> parse(BratDocument sample) { // Note: Some entities might not match sentence boundaries, // to be able to print warning a set of entities id must be maintained // to check if all entities have been used up after the matching is done Set<String> entityIdSet = new HashSet<>(); Map<Integer, Span> coveredIndexes = new HashMap<>(); for (BratAnnotation ann : sample.getAnnotations()) { if (ann instanceof SpanAnnotation) { entityIdSet.add(ann.getId()); Span span = ((SpanAnnotation) ann).getSpan(); for (int i = span.getStart(); i < span.getEnd(); i++) { coveredIndexes.put(i, span); } } } List<Span> sentences = new ArrayList<>(); for (Span sentence : sentDetector.sentPosDetect(sample.getText())) { Span conflictingName = coveredIndexes.get(sentence.getStart()); if (sentences.size() > 0 && conflictingName != null && conflictingName.getStart() < sentence.getStart()) { Span lastSentence = sentences.remove(sentences.size() - 1); sentences.add(new Span(lastSentence.getStart(), sentence.getEnd())); System.out.println("Correcting sentence segmentation in document " + sample.getId()); } else { sentences.add(sentence); } } // TODO: Token breaks should be enforced on name span boundaries // a) Just split tokens // b) Implement a custom token split validator which can be injected into the Tokenizer // Currently we are missing all List<NameSample> samples = new ArrayList<>(sentences.size()); for (Span sentence : sentences) { String sentenceText = sentence.getCoveredText( sample.getText()).toString(); Span[] tokens = tokenizer.tokenizePos(sentenceText); // Note: // A begin and end token index can be identical, but map to different // tokens, to distinguish between between the two begin indexes are // stored with a negative sign, and end indexes are stored with a positive sign // in the tokenIndexMap. // The tokenIndexMap maps to the sentence local token index. Map<Integer, Integer> tokenIndexMap = new HashMap<>(); for (int i = 0; i < tokens.length; i++) { tokenIndexMap.put(-(sentence.getStart() + tokens[i].getStart()), i); tokenIndexMap.put(sentence.getStart() + tokens[i].getEnd(), i + 1); } List<Span> names = new ArrayList<>(); for (BratAnnotation ann : sample.getAnnotations()) { if (ann instanceof SpanAnnotation) { SpanAnnotation entity = (SpanAnnotation) ann; Span entitySpan = entity.getSpan(); if (sentence.contains(entitySpan)) { entityIdSet.remove(ann.getId()); entitySpan = entitySpan.trim(sample.getText()); Integer nameBeginIndex = tokenIndexMap.get(-entitySpan.getStart()); Integer nameEndIndex = tokenIndexMap.get(entitySpan.getEnd()); if (nameBeginIndex != null && nameEndIndex != null) { names.add(new Span(nameBeginIndex, nameEndIndex, entity.getType())); } else { System.err.println("Dropped entity " + entity.getId() + " (" + entitySpan.getCoveredText(sample.getText()) + ") " + " in document " + sample.getId() + ", it is not matching tokenization!"); } } } } samples.add(new NameSample(sample.getId(), Span.spansToStrings(tokens, sentenceText), names.toArray(new Span[names.size()]), null, samples.size() == 0)); } for (String id : entityIdSet) { System.err.println("Dropped entity " + id + " in document " + sample.getId() + ", is not matching sentence segmentation!"); } return samples; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/BratDocumentStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.io.BufferedInputStream; import java.io.File; import java.io.FileFilter; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Stack; import opennlp.tools.util.ObjectStream; public class BratDocumentStream implements ObjectStream<BratDocument> { private AnnotationConfiguration config; private List<String> documentIds = new LinkedList<>(); private Iterator<String> documentIdIterator; /** * Creates a BratDocumentStream which reads the documents from the given input directory. * * @param config the annotation.conf from the brat project as an Annotation Configuration object * @param bratCorpusDirectory the directory containing all the brat training data files * @param searchRecursive specifies if the corpus directory should be traversed recursively * to find training data files. * @param fileFilter a custom file filter to filter out certain files or null to accept all files * * @throws IOException if reading from the brat directory fails in anyway */ public BratDocumentStream(AnnotationConfiguration config, File bratCorpusDirectory, boolean searchRecursive, FileFilter fileFilter) throws IOException { if (!bratCorpusDirectory.isDirectory()) { throw new IOException("Input corpus directory must be a directory " + "according to File.isDirectory()!"); } this.config = config; Stack<File> directoryStack = new Stack<>(); directoryStack.add(bratCorpusDirectory); while (!directoryStack.isEmpty()) { for (File file : directoryStack.pop().listFiles(fileFilter)) { if (file.isFile()) { String annFilePath = file.getAbsolutePath(); if (annFilePath.endsWith(".ann")) { // cutoff last 4 chars ... String documentId = annFilePath.substring(0, annFilePath.length() - 4); File txtFile = new File(documentId + ".txt"); if (txtFile.exists() && txtFile.isFile()) { documentIds.add(documentId); } } } else if (searchRecursive && file.isDirectory()) { directoryStack.push(file); } } } reset(); } public BratDocument read() throws IOException { BratDocument doc = null; if (documentIdIterator.hasNext()) { String id = documentIdIterator.next(); try (InputStream txtIn = new BufferedInputStream(new FileInputStream(id + ".txt")); InputStream annIn = new BufferedInputStream(new FileInputStream(id + ".ann"))) { doc = BratDocument.parseDocument(config, id, txtIn, annIn); } } return doc; } public void reset() { documentIdIterator = documentIds.iterator(); } public void close() { // No longer needed, make the object unusable documentIds = null; documentIdIterator = null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/BratNameSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.io.IOException; import java.util.List; import opennlp.tools.namefind.NameSample; import opennlp.tools.sentdetect.SentenceDetector; import opennlp.tools.sentdetect.SentenceDetectorME; import opennlp.tools.sentdetect.SentenceModel; import opennlp.tools.tokenize.Tokenizer; import opennlp.tools.tokenize.TokenizerME; import opennlp.tools.tokenize.TokenizerModel; import opennlp.tools.util.ObjectStream; /** * Generates Name Sample objects for a Brat Document object. */ public class BratNameSampleStream extends SegmenterObjectStream<BratDocument, NameSample> { private final BratDocumentParser parser; public BratNameSampleStream(SentenceDetector sentDetector, Tokenizer tokenizer, ObjectStream<BratDocument> samples) { super(samples); this.parser = new BratDocumentParser(sentDetector, tokenizer); } public BratNameSampleStream(SentenceModel sentModel, TokenizerModel tokenModel, ObjectStream<BratDocument> samples) { super(samples); // TODO: We can pass in custom validators here ... this.parser = new BratDocumentParser(new SentenceDetectorME(sentModel), new TokenizerME(tokenModel)); } @Override protected List<NameSample> read(BratDocument sample) throws IOException { return parser.parse(sample); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/BratNameSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.io.File; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.ArgumentParser.OptionalParameter; import opennlp.tools.cmdline.ArgumentParser.ParameterDescription; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.TerminateToolException; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.namefind.NameSample; import opennlp.tools.sentdetect.NewlineSentenceDetector; import opennlp.tools.sentdetect.SentenceDetector; import opennlp.tools.sentdetect.SentenceDetectorME; import opennlp.tools.sentdetect.SentenceModel; import opennlp.tools.tokenize.SimpleTokenizer; import opennlp.tools.tokenize.Tokenizer; import opennlp.tools.tokenize.TokenizerME; import opennlp.tools.tokenize.TokenizerModel; import opennlp.tools.tokenize.WhitespaceTokenizer; import opennlp.tools.util.ObjectStream; public class BratNameSampleStreamFactory extends AbstractSampleStreamFactory<NameSample> { interface Parameters { @ParameterDescription(valueName = "bratDataDir", description = "location of brat data dir") File getBratDataDir(); @ParameterDescription(valueName = "annConfFile") File getAnnotationConfig(); @ParameterDescription(valueName = "modelFile") @OptionalParameter File getSentenceDetectorModel(); @ParameterDescription(valueName = "modelFile") @OptionalParameter File getTokenizerModel(); @ParameterDescription(valueName = "name") @OptionalParameter String getRuleBasedTokenizer(); @ParameterDescription(valueName = "value") @OptionalParameter(defaultValue = "false") Boolean getRecursive(); } protected BratNameSampleStreamFactory() { super(Parameters.class); } /** * Checks that non of the passed values are null. * * @param objects * @return true or false */ private boolean notNull(Object... objects) { for (Object obj : objects) { if (obj == null) return false; } return true; } public ObjectStream<NameSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); if (notNull(params.getRuleBasedTokenizer(), params.getTokenizerModel())) { throw new TerminateToolException(-1, "Either use rule based or statistical tokenizer!"); } // TODO: Provide the file name to the annotation.conf file and implement the parser ... AnnotationConfiguration annConfig; try { annConfig = AnnotationConfiguration.parse(params.getAnnotationConfig()); } catch (IOException e) { throw new TerminateToolException(1, "Failed to parse annotation.conf file!"); } // TODO: Add an optional parameter to search recursive // TODO: How to handle the error here ? terminate the tool? not nice if used by API! ObjectStream<BratDocument> samples; try { samples = new BratDocumentStream(annConfig, params.getBratDataDir(), params.getRecursive(), null); } catch (IOException e) { throw new TerminateToolException(-1, e.getMessage()); } SentenceDetector sentDetector; if (params.getSentenceDetectorModel() != null) { try { sentDetector = new SentenceDetectorME(new SentenceModel(params.getSentenceDetectorModel())); } catch (IOException e) { throw new TerminateToolException(-1, "Failed to load sentence detector model!", e); } } else { sentDetector = new NewlineSentenceDetector(); } Tokenizer tokenizer = WhitespaceTokenizer.INSTANCE; if (params.getTokenizerModel() != null) { try { tokenizer = new TokenizerME(new TokenizerModel(params.getTokenizerModel())); } catch (IOException e) { throw new TerminateToolException(-1, "Failed to load tokenizer model!", e); } } else if (params.getRuleBasedTokenizer() != null) { String tokenizerName = params.getRuleBasedTokenizer(); if ("simple".equals(tokenizerName)) { tokenizer = SimpleTokenizer.INSTANCE; } else if ("whitespace".equals(tokenizerName)) { tokenizer = WhitespaceTokenizer.INSTANCE; } else { throw new TerminateToolException(-1, "Unkown tokenizer: " + tokenizerName); } } return new BratNameSampleStream(sentDetector, tokenizer, samples); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(NameSample.class, "brat", new BratNameSampleStreamFactory()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/EventAnnotation.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; public class EventAnnotation extends BratAnnotation { private final String eventTrigger; private final Map<String, String> arguments; protected EventAnnotation(String id, String type, String eventTrigger, Map<String, String> arguments) { super(id, type); this.eventTrigger = Objects.requireNonNull(eventTrigger); this.arguments = Collections.unmodifiableMap(new HashMap<>(arguments)); } public String getEventTrigger() { return eventTrigger; } public Map<String, String> getArguments() { return arguments; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/RelationAnnotation.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; public class RelationAnnotation extends BratAnnotation { private final String arg1; private final String arg2; protected RelationAnnotation(String id, String type, String arg1, String arg2) { super(id, type); this.arg1 = arg1; this.arg2 = arg2; } public String getArg1() { return arg1; } public String getArg2() { return arg2; } @Override public String toString() { return super.toString() + " arg1:" + getArg1() + " arg2:" + getArg2(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/SegmenterObjectStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.List; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public abstract class SegmenterObjectStream<S, T> extends FilterObjectStream<S, T> { private Iterator<T> sampleIt = Collections.<T>emptySet().iterator(); public SegmenterObjectStream(ObjectStream<S> in) { super(in); } protected abstract List<T> read(S sample) throws IOException; public final T read() throws IOException { if (sampleIt.hasNext()) { return sampleIt.next(); } else { S inSample = samples.read(); if (inSample != null) { List<T> outSamples = read(inSample); if (outSamples != null) { sampleIt = outSamples.iterator(); } return read(); } } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/brat/SpanAnnotation.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.brat; import opennlp.tools.util.Span; public class SpanAnnotation extends BratAnnotation { private final Span span; private final String coveredText; SpanAnnotation(String id, String type, Span span, String coveredText) { super(id, type); this.span = span; this.coveredText = coveredText; } public Span getSpan() { return span; } public String getCoveredText() { return coveredText; } @Override public String toString() { return super.toString() + " " + span.getStart() + " " + span.getEnd() + " " + getCoveredText(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluLemmaSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.IOException; import java.util.ArrayList; import java.util.List; import opennlp.tools.lemmatizer.LemmaSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public class ConlluLemmaSampleStream extends FilterObjectStream<ConlluSentence, LemmaSample> { private final ConlluTagset tagset; public ConlluLemmaSampleStream(ObjectStream<ConlluSentence> samples, ConlluTagset tagset) { super(samples); this.tagset = tagset; } @Override public LemmaSample read() throws IOException { ConlluSentence sentence = samples.read(); if (sentence != null) { List<String> tokens = new ArrayList<>(); List<String> tags = new ArrayList<>(); List<String> lemmas = new ArrayList<>(); for (ConlluWordLine line : sentence.getWordLines()) { tokens.add(line.getForm()); tags.add(line.getPosTag(tagset)); lemmas.add(line.getLemma()); } return new LemmaSample(tokens, tags, lemmas); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluLemmaSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.TerminateToolException; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.lemmatizer.LemmaSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ConlluLemmaSampleStreamFactory extends AbstractSampleStreamFactory<LemmaSample> { interface Parameters extends BasicFormatParams { @ArgumentParser.ParameterDescription(valueName = "tagset", description = "u|x u for unified tags and x for language-specific part-of-speech tags") @ArgumentParser.OptionalParameter(defaultValue = "u") String getTagset(); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(LemmaSample.class, ConlluPOSSampleStreamFactory.CONLLU_FORMAT, new ConlluLemmaSampleStreamFactory(Parameters.class)); } protected <P> ConlluLemmaSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<LemmaSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ConlluTagset tagset; switch (params.getTagset()) { case "u": tagset = ConlluTagset.U; break; case "x": tagset = ConlluTagset.X; break; default: throw new TerminateToolException(-1, "Unkown tagset parameter: " + params.getTagset()); } InputStreamFactory inFactory = CmdLineUtil.createInputStreamFactory(params.getData()); try { return new ConlluLemmaSampleStream(new ConlluStream(inFactory), tagset); } catch (IOException e) { // That will throw an exception CmdLineUtil.handleCreateObjectStreamError(e); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluPOSSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; import opennlp.tools.postag.POSSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public class ConlluPOSSampleStream extends FilterObjectStream<ConlluSentence, POSSample> { private final ConlluTagset tagset; ConlluPOSSampleStream(ObjectStream<ConlluSentence> samples, ConlluTagset tagset) { super(samples); this.tagset = Objects.requireNonNull(tagset); } @Override public POSSample read() throws IOException { ConlluSentence sentence = samples.read(); if (sentence != null) { List<String> tokens = new ArrayList<>(); List<String> tags = new ArrayList<>(); for (ConlluWordLine line : sentence.getWordLines()) { tokens.add(line.getForm()); tags.add(line.getPosTag(tagset)); } return new POSSample(tokens, tags); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluPOSSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.TerminateToolException; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.postag.POSSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ConlluPOSSampleStreamFactory extends AbstractSampleStreamFactory<POSSample> { public static final String CONLLU_FORMAT = "conllu"; interface Parameters extends BasicFormatParams { @ArgumentParser.ParameterDescription(valueName = "tagset", description = "u|x u for unified tags and x for language-specific part-of-speech tags") @ArgumentParser.OptionalParameter(defaultValue = "u") String getTagset(); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(POSSample.class, CONLLU_FORMAT, new ConlluPOSSampleStreamFactory(Parameters.class)); } protected <P> ConlluPOSSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<POSSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ConlluTagset tagset; switch (params.getTagset()) { case "u": tagset = ConlluTagset.U; break; case "x": tagset = ConlluTagset.X; break; default: throw new TerminateToolException(-1, "Unkown tagset parameter: " + params.getTagset()); } InputStreamFactory inFactory = CmdLineUtil.createInputStreamFactory(params.getData()); try { return new ConlluPOSSampleStream(new ConlluStream(inFactory), tagset); } catch (IOException e) { // That will throw an exception CmdLineUtil.handleCreateObjectStreamError(e); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluSentence.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.util.List; public class ConlluSentence { private List<ConlluWordLine> wordLines; private String sentenceIdComment; private String textComment; ConlluSentence(List<ConlluWordLine> wordLines, String sentenceIdComment, String textComment) { this.wordLines = wordLines; this.sentenceIdComment = sentenceIdComment; this.textComment = textComment; } public List<ConlluWordLine> getWordLines() { return wordLines; } public String getSentenceIdComment() { return sentenceIdComment; } public String getTextComment() { return textComment; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluSentenceSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.IOException; import java.util.ArrayList; import java.util.List; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; public class ConlluSentenceSampleStream extends FilterObjectStream<ConlluSentence, SentenceSample> { private final int sentencesPerSample; public ConlluSentenceSampleStream(ObjectStream<ConlluSentence> samples, int sentencesPerSample) { super(samples); this.sentencesPerSample = sentencesPerSample; } @Override public SentenceSample read() throws IOException { StringBuilder documentText = new StringBuilder(); List<Span> sentenceSpans = new ArrayList<>(); ConlluSentence sentence; for (int i = 0; i < sentencesPerSample && (sentence = samples.read()) != null; i++) { int startIndex = documentText.length(); documentText.append(sentence.getTextComment()).append(' '); sentenceSpans.add(new Span(startIndex, documentText.length() - 1)); } if (documentText.length() > 0) { documentText.setLength(documentText.length() - 1); return new SentenceSample(documentText, sentenceSpans.toArray(new Span[sentenceSpans.size()])); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluSentenceSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; public class ConlluSentenceSampleStreamFactory extends AbstractSampleStreamFactory<SentenceSample> { interface Parameters extends BasicFormatParams { @ArgumentParser.ParameterDescription(valueName = "sentencesPerSample", description = "number of sentences per sample") String getSentencesPerSample(); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(SentenceSample.class, ConlluPOSSampleStreamFactory.CONLLU_FORMAT, new ConlluSentenceSampleStreamFactory(ConlluSentenceSampleStreamFactory.Parameters.class)); } protected <P> ConlluSentenceSampleStreamFactory(Class<P> params) { super(params); } @Override public ObjectStream<SentenceSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); InputStreamFactory inFactory = CmdLineUtil.createInputStreamFactory(params.getData()); try { return new ConlluSentenceSampleStream(new ConlluStream(inFactory), Integer.parseInt(params.getSentencesPerSample())); } catch (IOException e) { // That will throw an exception CmdLineUtil.handleCreateObjectStreamError(e); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.BufferedReader; import java.io.IOException; import java.io.StringReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.stream.Collectors; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.ParagraphStream; import opennlp.tools.util.PlainTextByLineStream; /** * The CoNNL-U Format is specified here: * http://universaldependencies.org/format.html */ public class ConlluStream implements ObjectStream<ConlluSentence> { private final ObjectStream<String> sentenceStream; public ConlluStream(InputStreamFactory in) throws IOException { this.sentenceStream = new ParagraphStream(new PlainTextByLineStream(in, StandardCharsets.UTF_8)); } @Override public ConlluSentence read() throws IOException { String sentence = sentenceStream.read(); if (sentence != null) { List<ConlluWordLine> wordLines = new ArrayList<>(); BufferedReader reader = new BufferedReader(new StringReader(sentence)); String sentenceId = null; String text = null; String line; while ((line = reader.readLine()) != null) { // # indicates a comment line and contains additional data if (line.trim().startsWith("#")) { String commentLine = line.trim().substring(1); int separator = commentLine.indexOf('='); if (separator != -1) { String firstPart = commentLine.substring(0, separator).trim(); String secondPart = commentLine.substring(separator + 1, commentLine.length()).trim(); if (!secondPart.isEmpty()) { switch (firstPart) { case "sent_id": sentenceId = secondPart; break; case "text": text = secondPart; break; } } } } else { wordLines.add(new ConlluWordLine(line)); } } wordLines = postProcessContractions(wordLines); return new ConlluSentence(wordLines, sentenceId, text); } return null; } private List<ConlluWordLine> postProcessContractions(List<ConlluWordLine> lines) { // 1. Find contractions Map<String, Integer> index = new HashMap<>(); Map<String, List<String>> contractions = new HashMap<>(); List<String> linesToDelete = new ArrayList<>(); for (int i = 0; i < lines.size(); i++) { ConlluWordLine line = lines.get(i); index.put(line.getId(), i); if (line.getId().contains("-")) { List<String> expandedContractions = new ArrayList<>(); String[] ids = line.getId().split("-"); int start = Integer.parseInt(ids[0]); int end = Integer.parseInt(ids[1]); for (int j = start; j <= end; j++) { String js = Integer.toString(j); expandedContractions.add(js); linesToDelete.add(js); } contractions.put(line.getId(), expandedContractions); } } // 2. Merge annotation for (Entry<String, List<String>> entry : contractions.entrySet()) { final String contractionId = entry.getKey(); final List<String> expandedContractions = entry.getValue(); int contractionIndex = index.get(contractionId); ConlluWordLine contraction = lines.get(contractionIndex); List<ConlluWordLine> expandedParts = new ArrayList<>(); for (String id : expandedContractions) { expandedParts.add(lines.get(index.get(id))); } ConlluWordLine merged = mergeAnnotation(contraction, expandedParts); lines.set(contractionIndex, merged); } // 3. Delete the expanded parts for (int i = linesToDelete.size() - 1; i >= 0; i--) { lines.remove(index.get(linesToDelete.get(i)).intValue()); } return lines; } /** * Merges token level annotations * @param contraction the line that receives the annotation * @param expandedParts the lines to get annotation * @return the merged line */ private ConlluWordLine mergeAnnotation(ConlluWordLine contraction, List<ConlluWordLine> expandedParts) { String id = contraction.getId(); String form = contraction.getForm(); String lemma = expandedParts.stream() .filter(p -> !"_".equals(p.getLemma())) .map(p -> p.getLemma()) .collect(Collectors.joining("+")); String uPosTag = expandedParts.stream() .filter(p -> !"_".equals(p.getPosTag(ConlluTagset.U))) .map(p -> p.getPosTag(ConlluTagset.U)) .collect(Collectors.joining("+")); String xPosTag = expandedParts.stream() .filter(p -> !"_".equals(p.getPosTag(ConlluTagset.X))) .map(p -> p.getPosTag(ConlluTagset.X)) .collect(Collectors.joining("+")); String feats = expandedParts.stream() .filter(p -> !"_".equals(p.getFeats())) .map(p -> p.getFeats()) .collect(Collectors.joining("+")); String head = contraction.getHead(); String deprel = contraction.getDeprel(); String deps = contraction.getDeps(); String misc = contraction.getMisc(); return new ConlluWordLine(id, form, lemma, uPosTag, xPosTag, feats,head, deprel, deps, misc); } @Override public void close() throws IOException { sentenceStream.close(); } @Override public void reset() throws IOException, UnsupportedOperationException { sentenceStream.reset(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluTagset.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; public enum ConlluTagset { U, X }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluTokenSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.IOException; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.StringUtil; public class ConlluTokenSampleStream extends FilterObjectStream<ConlluSentence, TokenSample> { public ConlluTokenSampleStream(ObjectStream<ConlluSentence> samples) { super(samples); } @Override public TokenSample read() throws IOException { ConlluSentence sentence = samples.read(); if (sentence != null) { if (sentence.getTextComment() != null) { StringBuilder text = new StringBuilder(sentence.getTextComment()); int searchIndex = 0; for (ConlluWordLine wordLine : sentence.getWordLines()) { // skip over inserted words which are not in the source text if (wordLine.getId().contains(".")) { continue; } String token = wordLine.getForm(); int tokenIndex = text.indexOf(token, searchIndex); if (tokenIndex == -1) { throw new IOException(String.format("Failed to match token [%s] in sentence [%s] with text [%s]", token, sentence.getSentenceIdComment(), text)); } searchIndex = tokenIndex + token.length(); if (searchIndex < text.length()) { if (!StringUtil.isWhitespace(text.charAt(searchIndex))) { text.insert(searchIndex, TokenSample.DEFAULT_SEPARATOR_CHARS); } } } return TokenSample.parse(text.toString(), TokenSample.DEFAULT_SEPARATOR_CHARS); } else { throw new IOException("Sentence is missing raw text sample!"); } } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluTokenSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; public class ConlluTokenSampleStreamFactory extends AbstractSampleStreamFactory<TokenSample> { interface Parameters extends BasicFormatParams { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(TokenSample.class, ConlluPOSSampleStreamFactory.CONLLU_FORMAT, new ConlluTokenSampleStreamFactory(ConlluTokenSampleStreamFactory.Parameters.class)); } protected <P> ConlluTokenSampleStreamFactory(Class<P> params) { super(params); } @Override public ObjectStream<TokenSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); InputStreamFactory inFactory = CmdLineUtil.createInputStreamFactory(params.getData()); try { return new ConlluTokenSampleStream(new ConlluStream(inFactory)); } catch (IOException e) { // That will throw an exception CmdLineUtil.handleCreateObjectStreamError(e); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/conllu/ConlluWordLine.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.conllu; import opennlp.tools.util.InvalidFormatException; public class ConlluWordLine { private final String id; private final String form; private final String lemma; private final String uPosTag; private final String xPosTag; private final String feats; private final String head; private final String deprel; private final String deps; private final String misc; ConlluWordLine(String id, String form, String lemma, String uPosTag, String xPosTag, String feats, String head, String deprel, String deps, String misc) { this.id = id; this.form = form; this.lemma = lemma; this.uPosTag = uPosTag; this.xPosTag = xPosTag; this.feats = feats; this.head = head; this.deprel = deprel; this.deps = deps; this.misc = misc; } ConlluWordLine(String line) throws InvalidFormatException { String[] fields = line.split("\t"); if (fields.length != 10) { throw new InvalidFormatException("Line must have exactly 10 fields"); } id = fields[0]; form = fields[1]; lemma = fields[2]; uPosTag = fields[3]; xPosTag = fields[4]; feats = fields[5]; head = fields[6]; deprel = fields[7]; deps = fields[8]; misc = fields[9]; } /** * Retrieves the word index. An Integer starting at 1 for each new sentence; * may be a range for multiword tokens; may be a decimal number for empty nodes. */ public String getId() { return id; } /** * Retrieve the word form or punctuation symbol. */ public String getForm() { return form; } /** * Retrieve the lemma or stem of the word form. */ public String getLemma() { return lemma; } /** * Retrieve the Universal part-of-speech tag or the language-specific part-of-speech tag; * underscore if not available. * * @param tagset the type of tag to retrieve, either universial (u) or language specific (x) */ public String getPosTag(ConlluTagset tagset) { switch (tagset) { case U: return uPosTag; case X: return xPosTag; default: throw new IllegalStateException("Unexpected tagset value: " + tagset); } } /** * Retrieve list of morphological features from the universal feature inventory or from a * defined language-specific extension; underscore if not available. */ public String getFeats() { return feats; } /** * Head of the current word, which is either a value of ID or zero (0). */ public String getHead() { return head; } /** * Universal dependency relation to the HEAD (root iff HEAD = 0) or a * defined language-specific subtype of one. */ public String getDeprel() { return deprel; } /** * Enhanced dependency graph in the form of a list of head-deprel pairs. */ public String getDeps() { return deps; } /** * Retrieve any other annotation. */ public String getMisc() { return misc; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/AbstractToSentenceSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.tokenize.Detokenizer; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public abstract class AbstractToSentenceSampleStream<T> extends FilterObjectStream<T, SentenceSample> { private final Detokenizer detokenizer; private final int chunkSize; AbstractToSentenceSampleStream(Detokenizer detokenizer, ObjectStream<T> samples, int chunkSize) { super(samples); this.detokenizer = Objects.requireNonNull(detokenizer, "detokenizer must not be null"); if (chunkSize < 0) { throw new IllegalArgumentException("chunkSize must be zero or larger but was " + chunkSize + "!"); } if (chunkSize > 0) { this.chunkSize = chunkSize; } else { this.chunkSize = Integer.MAX_VALUE; } } protected abstract String[] toSentence(T sample); public SentenceSample read() throws IOException { List<String[]> sentences = new ArrayList<>(); T posSample; int chunks = 0; while ((posSample = samples.read()) != null && chunks < chunkSize) { sentences.add(toSentence(posSample)); chunks++; } if (sentences.size() > 0) { return new SentenceSample(detokenizer, sentences.toArray(new String[sentences.size()][])); } else if (posSample != null) { return read(); // filter out empty line } return null; // last sample was read } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/FileToByteArraySampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import java.io.BufferedInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public class FileToByteArraySampleStream extends FilterObjectStream<File, byte[]> { public FileToByteArraySampleStream(ObjectStream<File> samples) { super(samples); } private static byte[] readFile(File file) throws IOException { ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (InputStream in = new BufferedInputStream(new FileInputStream(file))) { byte[] buffer = new byte[1024]; int length; while ((length = in.read(buffer, 0, buffer.length)) > 0) { bytes.write(buffer, 0, length); } } return bytes.toByteArray(); } public byte[] read() throws IOException { File sampleFile = samples.read(); if (sampleFile != null) { return readFile(sampleFile); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/FileToStringSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.io.Reader; import java.nio.charset.Charset; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; /** * Provides the ability to read the contents of files * contained in an object stream of files. * */ public class FileToStringSampleStream extends FilterObjectStream<File, String> { private final Charset encoding; /** * Creates a new file-to-string sample stream. * @param samples The {@link ObjectStream} containing the files. * @param encoding The {@link Charset} encoding of the files. */ public FileToStringSampleStream(ObjectStream<File> samples, Charset encoding) { super(samples); this.encoding = encoding; } /** * Reads the contents of a file to a string. * @param textFile The {@link File} to read. * @param encoding The {@link Charset} for the file. * @return The string contents of the file. * @throws IOException Thrown if the file cannot be read. */ private static String readFile(File textFile, Charset encoding) throws IOException { Reader in = new BufferedReader(new InputStreamReader(new FileInputStream(textFile), encoding)); StringBuilder text = new StringBuilder(); try { char[] buffer = new char[1024]; int length; while ((length = in.read(buffer, 0, buffer.length)) > 0) { text.append(buffer, 0, length); } } finally { try { in.close(); } catch (IOException e) { // sorry that this can fail! } } return text.toString(); } @Override public String read() throws IOException { File sampleFile = samples.read(); if (sampleFile != null) { return readFile(sampleFile, encoding); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/NameToSentenceSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.namefind.NameSample; import opennlp.tools.tokenize.Detokenizer; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class NameToSentenceSampleStream extends AbstractToSentenceSampleStream<NameSample> { public NameToSentenceSampleStream(Detokenizer detokenizer, ObjectStream<NameSample> samples, int chunkSize) { super(detokenizer, samples, chunkSize); } @Override protected String[] toSentence(NameSample sample) { return sample.getSentence(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/NameToSentenceSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.DetokenizerParameter; import opennlp.tools.formats.DetokenizerSampleStreamFactory; import opennlp.tools.formats.NameSampleDataStreamFactory; import opennlp.tools.namefind.NameSample; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class NameToSentenceSampleStreamFactory extends DetokenizerSampleStreamFactory<SentenceSample> { interface Parameters extends NameSampleDataStreamFactory.Parameters, DetokenizerParameter { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(SentenceSample.class, "namefinder", new NameToSentenceSampleStreamFactory(Parameters.class)); } protected <P> NameToSentenceSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<SentenceSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ObjectStream<NameSample> nameSampleStream = StreamFactoryRegistry.getFactory( NameSample.class, StreamFactoryRegistry.DEFAULT_FORMAT).create( ArgumentParser.filter(args, NameSampleDataStreamFactory.Parameters.class)); return new NameToSentenceSampleStream(createDetokenizer(params), nameSampleStream, 30); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/NameToTokenSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import java.io.IOException; import opennlp.tools.namefind.NameSample; import opennlp.tools.tokenize.Detokenizer; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class NameToTokenSampleStream extends FilterObjectStream<NameSample, TokenSample> { private final Detokenizer detokenizer; public NameToTokenSampleStream(Detokenizer detokenizer, ObjectStream<NameSample> samples) { super(samples); this.detokenizer = detokenizer; } public TokenSample read() throws IOException { NameSample nameSample = samples.read(); TokenSample tokenSample = null; if (nameSample != null ) { tokenSample = new TokenSample(detokenizer, nameSample.getSentence()); } return tokenSample; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/NameToTokenSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.DetokenizerParameter; import opennlp.tools.formats.DetokenizerSampleStreamFactory; import opennlp.tools.formats.NameSampleDataStreamFactory; import opennlp.tools.namefind.NameSample; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class NameToTokenSampleStreamFactory extends DetokenizerSampleStreamFactory<TokenSample> { interface Parameters extends NameSampleDataStreamFactory.Parameters, DetokenizerParameter { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(TokenSample.class, "namefinder", new NameToTokenSampleStreamFactory(Parameters.class)); } protected <P> NameToTokenSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<TokenSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ObjectStream<NameSample> nameSampleStream = StreamFactoryRegistry.getFactory( NameSample.class, StreamFactoryRegistry.DEFAULT_FORMAT).create( ArgumentParser.filter(args, NameSampleDataStreamFactory.Parameters.class)); return new NameToTokenSampleStream(createDetokenizer(params), nameSampleStream); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/POSToSentenceSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.postag.POSSample; import opennlp.tools.tokenize.Detokenizer; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class POSToSentenceSampleStream extends AbstractToSentenceSampleStream<POSSample> { public POSToSentenceSampleStream(Detokenizer detokenizer, ObjectStream<POSSample> samples, int chunkSize) { super(detokenizer, samples, chunkSize); } @Override protected String[] toSentence(POSSample sample) { return sample.getSentence(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/POSToSentenceSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.DetokenizerParameter; import opennlp.tools.formats.DetokenizerSampleStreamFactory; import opennlp.tools.formats.WordTagSampleStreamFactory; import opennlp.tools.postag.POSSample; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class POSToSentenceSampleStreamFactory extends DetokenizerSampleStreamFactory<SentenceSample> { interface Parameters extends WordTagSampleStreamFactory.Parameters, DetokenizerParameter { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(SentenceSample.class, "pos", new POSToSentenceSampleStreamFactory(Parameters.class)); } protected <P> POSToSentenceSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<SentenceSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ObjectStream<POSSample> posSampleStream = StreamFactoryRegistry.getFactory(POSSample.class, StreamFactoryRegistry.DEFAULT_FORMAT).create( ArgumentParser.filter(args, WordTagSampleStreamFactory.Parameters.class)); return new POSToSentenceSampleStream(createDetokenizer(params), posSampleStream, 30); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/POSToTokenSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import java.io.IOException; import java.util.Objects; import opennlp.tools.postag.POSSample; import opennlp.tools.tokenize.Detokenizer; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class POSToTokenSampleStream extends FilterObjectStream<POSSample, TokenSample> { private final Detokenizer detokenizer; public POSToTokenSampleStream(Detokenizer detokenizer, ObjectStream<POSSample> samples) { super(samples); this.detokenizer = Objects.requireNonNull(detokenizer, "detokenizer must not be null!"); } public TokenSample read() throws IOException { POSSample posSample = samples.read(); TokenSample tokenSample = null; if (posSample != null ) { tokenSample = new TokenSample(detokenizer, posSample.getSentence()); } return tokenSample; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/POSToTokenSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.DetokenizerParameter; import opennlp.tools.formats.DetokenizerSampleStreamFactory; import opennlp.tools.formats.WordTagSampleStreamFactory; import opennlp.tools.postag.POSSample; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class POSToTokenSampleStreamFactory extends DetokenizerSampleStreamFactory<TokenSample> { interface Parameters extends WordTagSampleStreamFactory.Parameters, DetokenizerParameter { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(TokenSample.class, "pos", new POSToTokenSampleStreamFactory(Parameters.class)); } protected <P> POSToTokenSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<TokenSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ObjectStream<POSSample> posSampleStream = StreamFactoryRegistry.getFactory(POSSample.class, StreamFactoryRegistry.DEFAULT_FORMAT).create( ArgumentParser.filter(args, WordTagSampleStreamFactory.Parameters.class)); return new POSToTokenSampleStream(createDetokenizer(params), posSampleStream); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/ParseToPOSSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import java.io.IOException; import java.util.ArrayList; import java.util.List; import opennlp.tools.parser.Parse; import opennlp.tools.postag.POSSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ParseToPOSSampleStream extends FilterObjectStream<Parse, POSSample> { public ParseToPOSSampleStream(ObjectStream<Parse> samples) { super(samples); } public POSSample read() throws IOException { Parse parse = samples.read(); if (parse != null) { List<String> sentence = new ArrayList<>(); List<String> tags = new ArrayList<>(); for (Parse tagNode : parse.getTagNodes()) { sentence.add(tagNode.getCoveredText()); tags.add(tagNode.getType()); } return new POSSample(sentence, tags); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/ParseToPOSSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.formats.LanguageSampleStreamFactory; import opennlp.tools.formats.ParseSampleStreamFactory; import opennlp.tools.parser.Parse; import opennlp.tools.postag.POSSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ParseToPOSSampleStreamFactory extends LanguageSampleStreamFactory<POSSample> { private ParseToPOSSampleStreamFactory() { super(ParseSampleStreamFactory.Parameters.class); } public ObjectStream<POSSample> create(String[] args) { ParseSampleStreamFactory.Parameters params = ArgumentParser.parse(args, ParseSampleStreamFactory.Parameters.class); ObjectStream<Parse> parseSampleStream = StreamFactoryRegistry.getFactory(Parse.class, StreamFactoryRegistry.DEFAULT_FORMAT).create( ArgumentParser.filter(args, ParseSampleStreamFactory.Parameters.class)); return new ParseToPOSSampleStream(parseSampleStream); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(POSSample.class, "parse", new ParseToPOSSampleStreamFactory()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/ParseToSentenceSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.DetokenizerParameter; import opennlp.tools.formats.DetokenizerSampleStreamFactory; import opennlp.tools.formats.ParseSampleStreamFactory; import opennlp.tools.parser.Parse; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ParseToSentenceSampleStreamFactory extends DetokenizerSampleStreamFactory<SentenceSample> { interface Parameters extends ParseSampleStreamFactory.Parameters, DetokenizerParameter { } private ParseToSentenceSampleStreamFactory() { super(Parameters.class); } public ObjectStream<SentenceSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ObjectStream<Parse> parseSampleStream = StreamFactoryRegistry.getFactory(Parse.class, StreamFactoryRegistry.DEFAULT_FORMAT).create( ArgumentParser.filter(args, ParseSampleStreamFactory.Parameters.class)); return new POSToSentenceSampleStream(createDetokenizer(params), new ParseToPOSSampleStream(parseSampleStream), 30); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(SentenceSample.class, "parse", new ParseToSentenceSampleStreamFactory()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/convert/ParseToTokenSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.convert; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.DetokenizerParameter; import opennlp.tools.formats.DetokenizerSampleStreamFactory; import opennlp.tools.formats.ParseSampleStreamFactory; import opennlp.tools.formats.WordTagSampleStreamFactory; import opennlp.tools.parser.Parse; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class ParseToTokenSampleStreamFactory extends DetokenizerSampleStreamFactory<TokenSample> { interface Parameters extends ParseSampleStreamFactory.Parameters, DetokenizerParameter { } private ParseToTokenSampleStreamFactory() { super(Parameters.class); } public ObjectStream<TokenSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); ObjectStream<Parse> parseSampleStream = StreamFactoryRegistry.getFactory(Parse.class, StreamFactoryRegistry.DEFAULT_FORMAT).create( ArgumentParser.filter(args, WordTagSampleStreamFactory.Parameters.class)); return new POSToTokenSampleStream(createDetokenizer(params), new ParseToPOSSampleStream(parseSampleStream)); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(TokenSample.class, "parse", new ParseToTokenSampleStreamFactory()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/frenchtreebank/ConstitDocumentHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.frenchtreebank; import java.util.LinkedList; import java.util.List; import java.util.Stack; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; import opennlp.tools.parser.AbstractBottomUpParser; import opennlp.tools.parser.Constituent; import opennlp.tools.parser.Parse; import opennlp.tools.util.Span; class ConstitDocumentHandler extends DefaultHandler { private static final String SENT_ELEMENT_NAME = "SENT"; private static final String WORD_ELEMENT_NAME = "w"; private static final String SENT_TYPE_NAME = "S"; private final List<Parse> parses; private boolean insideSentenceElement; /** * A token buffer, a token might be build up by multiple * {@link #characters(char[], int, int)} calls. */ private final StringBuilder tokenBuffer = new StringBuilder(); private final StringBuilder text = new StringBuilder(); private int offset; private final Stack<Constituent> stack = new Stack<>(); private final List<Constituent> cons = new LinkedList<>(); ConstitDocumentHandler(List<Parse> parses) { this.parses = parses; } private String cat; private String subcat; @Override public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { String type = qName; if (SENT_ELEMENT_NAME.equals(qName)) { // Clear everything to be ready for the next sentence text.setLength(0); offset = 0; stack.clear(); cons.clear(); type = SENT_TYPE_NAME; insideSentenceElement = true; } else if (WORD_ELEMENT_NAME.equals(qName)) { // Note: // If there are compound words they are represented in a couple // of ways in the training data. // Many of them are marked with the compound attribute, but not // all of them. Thats why it is not used in the code to detect // a compound word. // Compounds are detected by the fact that a w tag is appearing // inside a w tag. // // The type of a compound word can be encoded either cat of the compound // plus the catint of each word of the compound. // Or all compound words have the cat plus subcat of the compound, in this // case they have an empty cat attribute. // // This implementation hopefully decodes these cases correctly! String newCat = attributes.getValue("cat"); if (newCat != null && newCat.length() > 0) { cat = newCat; } String newSubcat = attributes.getValue("subcat"); if (newSubcat != null && newSubcat.length() > 0) { subcat = newSubcat; } if (cat != null) { type = cat + (subcat != null ? subcat : ""); } else { String catint = attributes.getValue("catint"); if (catint != null) { type = cat + catint; } else { type = cat + subcat; } } } stack.push(new Constituent(type, new Span(offset, offset))); tokenBuffer.setLength(0); } @Override public void characters(char[] ch, int start, int length) throws SAXException { tokenBuffer.append(ch, start, length); } @Override public void endElement(String uri, String localName, String qName) throws SAXException { boolean isCreateConstituent = true; if (insideSentenceElement) { if (WORD_ELEMENT_NAME.equals(qName)) { String token = tokenBuffer.toString().trim(); if (token.length() > 0) { cons.add(new Constituent(AbstractBottomUpParser.TOK_NODE, new Span(offset, offset + token.length()))); text.append(token).append(" "); offset += token.length() + 1; } else { isCreateConstituent = false; } } Constituent unfinishedCon = stack.pop(); if (isCreateConstituent) { int start = unfinishedCon.getSpan().getStart(); if (start < offset) { cons.add(new Constituent(unfinishedCon.getLabel(), new Span(start, offset - 1))); } } if (SENT_ELEMENT_NAME.equals(qName)) { // Finished parsing sentence, now put everything together and create // a Parse object String txt = text.toString(); int tokenIndex = -1; Parse p = new Parse(txt, new Span(0, txt.length()), AbstractBottomUpParser.TOP_NODE, 1,0); for (int ci = 0; ci < cons.size(); ci++) { Constituent con = cons.get(ci); String type = con.getLabel(); if (!type.equals(AbstractBottomUpParser.TOP_NODE)) { if (AbstractBottomUpParser.TOK_NODE.equals(type)) { tokenIndex++; } Parse c = new Parse(txt, con.getSpan(), type, 1,tokenIndex); p.insert(c); } } parses.add(p); insideSentenceElement = false; } tokenBuffer.setLength(0); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/frenchtreebank/ConstitParseSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.frenchtreebank; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.List; import javax.xml.parsers.SAXParser; import org.xml.sax.SAXException; import opennlp.tools.parser.Parse; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.XmlUtil; public class ConstitParseSampleStream extends FilterObjectStream<byte[], Parse> { private SAXParser saxParser; private List<Parse> parses = new ArrayList<>(); protected ConstitParseSampleStream(ObjectStream<byte[]> samples) { super(samples); saxParser = XmlUtil.createSaxParser(); } public Parse read() throws IOException { if (parses.isEmpty()) { byte[] xmlbytes = samples.read(); if (xmlbytes != null) { List<Parse> producedParses = new ArrayList<>(); try { saxParser.parse(new ByteArrayInputStream(xmlbytes), new ConstitDocumentHandler(producedParses)); } catch (SAXException e) { //TODO update after Java6 upgrade throw new IOException(e.getMessage(), e); } parses.addAll(producedParses); } } if (parses.size() > 0) { return parses.remove(0); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/frenchtreebank/ConstitParseSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.frenchtreebank; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.formats.DirectorySampleStream; import opennlp.tools.formats.convert.FileToByteArraySampleStream; import opennlp.tools.parser.Parse; import opennlp.tools.util.ObjectStream; public class ConstitParseSampleStreamFactory extends AbstractSampleStreamFactory<Parse> { // TODO: The parameters have an encoding, but the data is in xml interface Parameters extends BasicFormatParams { } private ConstitParseSampleStreamFactory() { super(Parameters.class); } public ObjectStream<Parse> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); return new ConstitParseSampleStream(new FileToByteArraySampleStream( new DirectorySampleStream(params.getData(), null, false))); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(Parse.class, "frenchtreebank", new ConstitParseSampleStreamFactory()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/irishsentencebank/IrishSentenceBankDocument.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.irishsentencebank; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.lang.StringBuilder; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import javax.xml.parsers.DocumentBuilder; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.Span; import opennlp.tools.util.XmlUtil; /** * A structure to hold an Irish Sentence Bank document, which is a collection * of tokenized sentences. * <p> * The sentence bank can be downloaded from, and is described * <a href="http://www.lexiconista.com/datasets/sentencebank-ga/">here</a> */ public class IrishSentenceBankDocument { public static class IrishSentenceBankFlex { String surface; String[] flex; public String getSurface() { return surface; } public String[] getFlex() { return flex; } public IrishSentenceBankFlex(String sf, String[] fl) { this.surface = sf; this.flex = fl; } } public static class IrishSentenceBankSentence { private String source; private String translation; private String original; private Span[] tokens; private IrishSentenceBankFlex[] flex; public String getSource() { return source; } public String getTranslation() { return translation; } public String getOriginal() { return original; } public Span[] getTokens() { return tokens; } public IrishSentenceBankFlex[] getFlex() { return flex; } public TokenSample getTokenSample() { return new TokenSample(original, tokens); } public IrishSentenceBankSentence(String src, String trans, String orig, Span[] toks, IrishSentenceBankFlex[] flx) { this.source = src; this.translation = trans; this.original = orig; this.tokens = toks; this.flex = flx; } } private List<IrishSentenceBankSentence> sentences; public IrishSentenceBankDocument() { sentences = new ArrayList<IrishSentenceBankSentence>(); } public void add(IrishSentenceBankSentence sent) { this.sentences.add(sent); } public List<IrishSentenceBankSentence> getSentences() { return Collections.unmodifiableList(sentences); } /** * Helper to adjust the span of punctuation tokens: ignores spaces to the left of the string * @param s the string to check * @param start the offset of the start of the string * @return the offset adjusted to ignore spaces to the left */ private static int advanceLeft(String s, int start) { int ret = start; for (char c : s.toCharArray()) { if (c == ' ') { ret++; } else { return ret; } } return ret; } /** * Helper to adjust the span of punctuation tokens: ignores spaces to the right of the string * @param s the string to check * @param start the offset of the start of the string * @return the offset of the end of the string, adjusted to ignore spaces to the right */ private static int advanceRight(String s, int start) { int end = s.length() - 1; int ret = start + end + 1; for (int i = end; i > 0; i--) { if (s.charAt(i) == ' ') { ret--; } else { return ret; } } return ret; } public static IrishSentenceBankDocument parse(InputStream is) throws IOException { IrishSentenceBankDocument document = new IrishSentenceBankDocument(); try { DocumentBuilder docBuilder = XmlUtil.createDocumentBuilder(); Document doc = docBuilder.parse(is); String root = doc.getDocumentElement().getNodeName(); if (!root.equalsIgnoreCase("sentences")) { throw new IOException("Expected root node " + root); } NodeList nl = doc.getDocumentElement().getChildNodes(); for (int i = 0; i < nl.getLength(); i++) { Node sentnode = nl.item(i); if (sentnode.getNodeName().equals("sentence")) { String src = sentnode.getAttributes().getNamedItem("source").getNodeValue(); String trans = ""; Map<Integer, String> toks = new HashMap<>(); Map<Integer, List<String>> flx = new HashMap<>(); List<Span> spans = new ArrayList<>(); NodeList sentnl = sentnode.getChildNodes(); int flexes = 1; StringBuilder orig = new StringBuilder(); for (int j = 0; j < sentnl.getLength(); j++) { final String name = sentnl.item(j).getNodeName(); switch (name) { case "flex": String slottmpa = sentnl.item(j).getAttributes().getNamedItem("slot").getNodeValue(); Integer flexslot = Integer.parseInt(slottmpa); if (flexslot > flexes) { flexes = flexslot; } flx.computeIfAbsent(flexslot, k -> new ArrayList<>()); String tkn = sentnl.item(j).getAttributes().getNamedItem("lemma").getNodeValue(); flx.get(flexslot).add(tkn); break; case "translation": trans = sentnl.item(j).getFirstChild().getTextContent(); break; case "original": int last = 0; NodeList orignl = sentnl.item(j).getChildNodes(); for (int k = 0; k < orignl.getLength(); k++) { switch (orignl.item(k).getNodeName()) { case "token": String tmptok = orignl.item(k).getFirstChild().getTextContent(); spans.add(new Span(last, last + tmptok.length())); String slottmpb = orignl.item(k).getAttributes().getNamedItem("slot").getNodeValue(); Integer tokslot = Integer.parseInt(slottmpb); if (tokslot > flexes) { flexes = tokslot; } toks.put(tokslot, tmptok); orig.append(tmptok); last += tmptok.length(); break; case "#text": String tmptxt = orignl.item(k).getTextContent(); orig.append(tmptxt); if (!" ".equals(tmptxt)) { spans.add(new Span(advanceLeft(tmptxt, last), advanceRight(tmptxt, last))); } last += tmptxt.length(); break; default: throw new IOException("Unexpected node: " + orignl.item(k).getNodeName()); } } break; case "#text": case "#comment": break; default: throw new IOException("Unexpected node: " + name); } } IrishSentenceBankFlex[] flexa = new IrishSentenceBankFlex[flexes]; for (Entry<Integer, String> entry : toks.entrySet()) { final Integer flexidx = entry.getKey(); final String left = entry.getValue(); if (flx.get(flexidx) == null) { flexa = null; break; } int rsize = flx.get(flexidx).size(); String[] right = new String[rsize]; right = flx.get(flexidx).toArray(right); flexa[flexidx - 1] = new IrishSentenceBankFlex(left, right); } Span[] spanout = new Span[spans.size()]; spanout = spans.toArray(spanout); document.add(new IrishSentenceBankSentence(src, trans, orig.toString(), spanout, flexa)); } else if (!sentnode.getNodeName().equals("#text") && !sentnode.getNodeName().equals("#comment")) { throw new IOException("Unexpected node: " + sentnode.getNodeName()); } } return document; } catch (SAXException e) { throw new IOException("Failed to parse IrishSentenceBank document", e); } } static IrishSentenceBankDocument parse(File file) throws IOException { try (InputStream in = new FileInputStream(file)) { return parse(in); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/irishsentencebank/IrishSentenceBankSentenceStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.irishsentencebank; import java.io.IOException; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; class IrishSentenceBankSentenceStream implements ObjectStream<SentenceSample> { private final IrishSentenceBankDocument source; private Iterator<IrishSentenceBankDocument.IrishSentenceBankSentence> sentenceIt; IrishSentenceBankSentenceStream(IrishSentenceBankDocument source) { this.source = source; reset(); } @Override public SentenceSample read() throws IOException { StringBuilder sentencesString = new StringBuilder(); List<Span> sentenceSpans = new LinkedList<>(); while (sentenceIt.hasNext()) { IrishSentenceBankDocument.IrishSentenceBankSentence sentence = sentenceIt.next(); int begin = sentencesString.length(); if (sentence.getOriginal() != null) { sentencesString.append(sentence.getOriginal()); } sentenceSpans.add(new Span(begin, sentencesString.length())); sentencesString.append(' '); } // end of stream is reached, indicate that with null return value if (sentenceSpans.size() == 0) { return null; } return new SentenceSample(sentencesString.toString(), sentenceSpans.toArray(new Span[sentenceSpans.size()])); } @Override public void reset() { sentenceIt = source.getSentences().iterator(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/irishsentencebank/IrishSentenceBankSentenceStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.irishsentencebank; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.ObjectStream; public class IrishSentenceBankSentenceStreamFactory extends AbstractSampleStreamFactory<SentenceSample> { interface Parameters extends BasicFormatParams { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(SentenceSample.class, "irishsentencebank", new IrishSentenceBankSentenceStreamFactory( IrishSentenceBankSentenceStreamFactory.Parameters.class)); } protected <P> IrishSentenceBankSentenceStreamFactory(Class<P> params) { super(params); } @Override public ObjectStream<SentenceSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); CmdLineUtil.checkInputFile("Data", params.getData()); IrishSentenceBankDocument isbDoc = null; try { isbDoc = IrishSentenceBankDocument.parse(params.getData()); } catch (IOException ex) { CmdLineUtil.handleCreateObjectStreamError(ex); } return new IrishSentenceBankSentenceStream(isbDoc); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/irishsentencebank/IrishSentenceBankTokenSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.irishsentencebank; import java.io.IOException; import java.util.Iterator; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.ObjectStream; class IrishSentenceBankTokenSampleStream implements ObjectStream<TokenSample> { private final IrishSentenceBankDocument source; private Iterator<IrishSentenceBankDocument.IrishSentenceBankSentence> sentenceIt; IrishSentenceBankTokenSampleStream(IrishSentenceBankDocument source) { this.source = source; reset(); } @Override public TokenSample read() throws IOException { if (sentenceIt.hasNext()) { IrishSentenceBankDocument.IrishSentenceBankSentence sentence = sentenceIt.next(); return sentence.getTokenSample(); } else { return null; } } @Override public void reset() { sentenceIt = source.getSentences().iterator(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/irishsentencebank/IrishSentenceBankTokenSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.irishsentencebank; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.DetokenizerSampleStreamFactory; import opennlp.tools.tokenize.TokenSample; import opennlp.tools.util.ObjectStream; public class IrishSentenceBankTokenSampleStreamFactory extends DetokenizerSampleStreamFactory<TokenSample> { interface Parameters extends BasicFormatParams { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(TokenSample.class, "irishsentencebank", new IrishSentenceBankTokenSampleStreamFactory( IrishSentenceBankTokenSampleStreamFactory.Parameters.class)); } protected <P> IrishSentenceBankTokenSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<TokenSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); CmdLineUtil.checkInputFile("Data", params.getData()); IrishSentenceBankDocument isbDoc = null; try { isbDoc = IrishSentenceBankDocument.parse(params.getData()); } catch (IOException ex) { CmdLineUtil.handleCreateObjectStreamError(ex); } return new IrishSentenceBankTokenSampleStream(isbDoc); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/leipzig/LeipzigLanguageSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.leipzig; import java.io.File; import java.io.FileFilter; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; import opennlp.tools.langdetect.Language; import opennlp.tools.langdetect.LanguageSample; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.MarkableFileInputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; public class LeipzigLanguageSampleStream implements ObjectStream<LanguageSample> { private class LeipzigSentencesStream implements ObjectStream<LanguageSample> { private final String lang; private Iterator<String> lineIterator; LeipzigSentencesStream(String lang, File sentencesFile, int sentencesPerSample, int numberOfSamples) throws IOException { this.lang = lang; // The file name contains the number of lines, but to make this more stable // the file is once scanned for the count even tough this is slower int totalLineCount = (int) Files.lines(sentencesFile.toPath()).count(); int requiredLines = sentencesPerSample * numberOfSamples; if (totalLineCount < requiredLines) throw new InvalidFormatException( String.format("%s does not contain enough lines (%d lines < %d required lines).", sentencesFile.getPath(), totalLineCount, requiredLines)); List<Integer> indexes = IntStream.range(0, totalLineCount) .boxed().collect(Collectors.toList()); Collections.shuffle(indexes, random); Set<Integer> selectedLines = new HashSet<>(indexes.subList(0, requiredLines)); List<String> sentences = new ArrayList<>(); try (ObjectStream<String> lineStream = new PlainTextByLineStream( new MarkableFileInputStreamFactory(sentencesFile), StandardCharsets.UTF_8)) { int lineIndex = 0; String line; while ((line = lineStream.read()) != null) { int tabIndex = line.indexOf('\t'); if (tabIndex != -1) { if (selectedLines.contains(lineIndex)) { sentences.add(line); } } lineIndex++; } } Collections.shuffle(sentences, random); lineIterator = sentences.iterator(); } @Override public LanguageSample read() throws IOException { StringBuilder sampleString = new StringBuilder(); int count = 0; while (count < sentencesPerSample && lineIterator.hasNext()) { String line = lineIterator.next(); int textStart = line.indexOf('\t') + 1; sampleString.append(line.substring(textStart) + " "); count++; } if (sampleString.length() > 0) { return new LanguageSample(new Language(lang), sampleString); } return null; } } private final int sentencesPerSample; private Map<String, Integer> langSampleCounts; private File[] sentencesFiles; private Iterator<File> sentencesFilesIt; private ObjectStream<LanguageSample> sampleStream; private final Random random; public LeipzigLanguageSampleStream(File leipzigFolder, final int sentencesPerSample, final int samplesPerLanguage) throws IOException { this.sentencesPerSample = sentencesPerSample; sentencesFiles = leipzigFolder.listFiles(new FileFilter() { @Override public boolean accept(File pathname) { return !pathname.isHidden() && pathname.isFile() && pathname.getName().length() >= 3 && pathname.getName().substring(0,3).matches("[a-z]+"); } }); Arrays.sort(sentencesFiles); Map<String, Integer> langCounts = Arrays.stream(sentencesFiles) .map(file -> file.getName().substring(0, 3)) .collect(Collectors.groupingBy(String::toString, Collectors.summingInt(v -> 1))); langSampleCounts = langCounts.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> samplesPerLanguage / e.getValue())); random = new Random(23); reset(); } public LanguageSample read() throws IOException { LanguageSample sample; if (sampleStream != null && (sample = sampleStream.read()) != null) { return sample; } else { if (sentencesFilesIt.hasNext()) { File sentencesFile = sentencesFilesIt.next(); String lang = sentencesFile.getName().substring(0, 3); sampleStream = new LeipzigSentencesStream(lang, sentencesFile, sentencesPerSample, langSampleCounts.get(lang)); return read(); } } return null; } @Override public void reset() throws IOException { sentencesFilesIt = Arrays.asList(sentencesFiles).iterator(); sampleStream = null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/leipzig/LeipzigLanguageSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.leipzig; import java.io.File; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.ArgumentParser.OptionalParameter; import opennlp.tools.cmdline.ArgumentParser.ParameterDescription; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.TerminateToolException; import opennlp.tools.cmdline.params.EncodingParameter; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.langdetect.LanguageSample; import opennlp.tools.util.ObjectStream; /** * <b>Note:</b> Do not use this class, internal use only! */ public class LeipzigLanguageSampleStreamFactory extends AbstractSampleStreamFactory<LanguageSample> { interface Parameters extends EncodingParameter { @ParameterDescription(valueName = "sentencesDir", description = "dir with Leipig sentences to be used") File getSentencesDir(); @ParameterDescription(valueName = "sentencesPerSample", description = "number of sentences per sample") String getSentencesPerSample(); @ParameterDescription(valueName = "samplesPerLanguage", description = "number of samples per language") String getSamplesPerLanguage(); @ParameterDescription(valueName = "samplesToSkip", description = "number of samples to skip before returning") @OptionalParameter(defaultValue = "0") String getSamplesToSkip(); } protected <P> LeipzigLanguageSampleStreamFactory(Class<P> params) { super(params); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(LanguageSample.class, "leipzig", new LeipzigLanguageSampleStreamFactory(Parameters.class)); } public ObjectStream<LanguageSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); File sentencesFileDir = params.getSentencesDir(); try { return new SampleSkipStream(new SampleShuffleStream( new LeipzigLanguageSampleStream(sentencesFileDir, Integer.parseInt(params.getSentencesPerSample()), Integer.parseInt(params.getSamplesPerLanguage()) + Integer.parseInt(params.getSamplesToSkip()))), Integer.parseInt(params.getSamplesToSkip())); } catch (IOException e) { throw new TerminateToolException(-1, "IO error while opening sample data.", e); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/leipzig/SampleShuffleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.leipzig; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Random; import opennlp.tools.util.ObjectStream; class SampleShuffleStream<T> implements ObjectStream<T> { private List<T> bufferedSamples = new ArrayList<>(); private Iterator<T> sampleIt; SampleShuffleStream(ObjectStream<T> samples) throws IOException { T sample; while ((sample = samples.read()) != null) { bufferedSamples.add(sample); } Collections.shuffle(bufferedSamples, new Random(23)); reset(); } @Override public T read() throws IOException { if (sampleIt.hasNext()) { return sampleIt.next(); } return null; } @Override public void reset() throws IOException, UnsupportedOperationException { sampleIt = bufferedSamples.iterator(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/leipzig/SampleSkipStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.leipzig; import java.io.IOException; import opennlp.tools.util.ObjectStream; class SampleSkipStream<T> implements ObjectStream<T> { private final ObjectStream<T> samples; private final int samplesToSkip; SampleSkipStream(ObjectStream<T> samples, int samplesToSkip) throws IOException { this.samples = samples; this.samplesToSkip = samplesToSkip; skipSamples(); } @Override public T read() throws IOException { return samples.read(); } @Override public void reset() throws IOException, UnsupportedOperationException { this.samples.reset(); skipSamples(); } private void skipSamples() throws IOException { int i = 0; while (i < samplesToSkip && (samples.read()) != null) { i++; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/letsmt/DetokenizeSentenceSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.letsmt; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.tokenize.Detokenizer; import opennlp.tools.tokenize.WhitespaceTokenizer; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; public class DetokenizeSentenceSampleStream extends FilterObjectStream<SentenceSample, SentenceSample> { private final Detokenizer detokenizer; public DetokenizeSentenceSampleStream(Detokenizer detokenizer, ObjectStream<SentenceSample> samples) { super(samples); this.detokenizer = Objects.requireNonNull(detokenizer); } @Override public SentenceSample read() throws IOException { SentenceSample sample = samples.read(); if (sample != null) { List<String> sentenceTexts = new ArrayList<>(); for (Span sentenceSpan : sample.getSentences()) { sentenceTexts.add(sample.getDocument().substring(sentenceSpan.getStart(), sentenceSpan.getEnd())); } StringBuilder documentText = new StringBuilder(); List<Span> newSentenceSpans = new ArrayList<>(); for (String sentenceText : sentenceTexts) { String[] tokens = WhitespaceTokenizer.INSTANCE.tokenize(sentenceText); int begin = documentText.length(); documentText.append(detokenizer.detokenize(tokens, null)); newSentenceSpans.add(new Span(begin, documentText.length())); documentText.append(' '); } return new SentenceSample(documentText, newSentenceSpans.toArray(new Span[newSentenceSpans.size()])); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/letsmt/LetsmtDocument.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.letsmt; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import javax.xml.parsers.SAXParser; import org.xml.sax.InputSource; import org.xml.sax.SAXException; import org.xml.sax.XMLReader; import org.xml.sax.helpers.DefaultHandler; import opennlp.tools.util.XmlUtil; /** * A structure to hold the letsmt document. The documents contains sentences and depending on the * source it either contains tokenized text (words) or an un-tokenized sentence string. * <p> * The format specification can be found * <a href="http://project.letsmt.eu/uploads/Deliverables/D2.1%20%20Specification%20of%20data%20formats%20v1%20final.pdf">here</a>. */ public class LetsmtDocument { public static class LetsmtSentence { private String nonTokenizedText; private String[] tokens; public String getNonTokenizedText() { return nonTokenizedText; } public String[] getTokens() { if (tokens != null) { return Arrays.copyOf(tokens, tokens.length); } return null; } } // define a content handler to receive the sax events ... public static class LetsmtDocumentHandler extends DefaultHandler { private List<LetsmtSentence> sentences = new ArrayList<>(); private StringBuilder chars = new StringBuilder(); private List<String> tokens = new ArrayList<>(); @Override public void characters(char[] ch, int start, int length) throws SAXException { chars.append(ch, start, length); } @Override public void endElement(String uri, String localName, String qName) throws SAXException { super.endElement(uri, localName, qName); // Note: // words are optional in sentences, if there are no words just the chars have to be captured switch (qName) { case "w": tokens.add(chars.toString().trim()); chars.setLength(0); break; // TODO: The sentence should contain the id, so it can be tracked back to the // place it came from case "s": LetsmtSentence sentence = new LetsmtSentence(); if (tokens.size() > 0) { sentence.tokens = tokens.toArray(new String[tokens.size()]); tokens = new ArrayList<>(); } else { sentence.nonTokenizedText = chars.toString().trim(); } sentences.add(sentence); chars.setLength(0); } } } private List<LetsmtSentence> sentences = new ArrayList<>(); private LetsmtDocument(List<LetsmtSentence> sentences) { this.sentences = sentences; } public List<LetsmtSentence> getSentences() { return Collections.unmodifiableList(sentences); } static LetsmtDocument parse(InputStream letsmtXmlIn) throws IOException { SAXParser saxParser = XmlUtil.createSaxParser(); try { XMLReader xmlReader = saxParser.getXMLReader(); LetsmtDocumentHandler docHandler = new LetsmtDocumentHandler(); xmlReader.setContentHandler(docHandler); xmlReader.parse(new InputSource(letsmtXmlIn)); return new LetsmtDocument(docHandler.sentences); } catch (SAXException e) { throw new IOException("Failed to parse letsmt xml!", e); } } static LetsmtDocument parse(File file) throws IOException { try (InputStream in = new FileInputStream(file)) { return parse(in); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/letsmt/LetsmtSentenceStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.letsmt; import java.io.IOException; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; class LetsmtSentenceStream implements ObjectStream<SentenceSample> { private final LetsmtDocument source; private Iterator<LetsmtDocument.LetsmtSentence> sentenceIt; LetsmtSentenceStream(LetsmtDocument source) { this.source = source; reset(); } @Override public SentenceSample read() throws IOException { StringBuilder sentencesString = new StringBuilder(); List<Span> sentenceSpans = new LinkedList<>(); for (int i = 0; sentenceIt.hasNext() && i < 25 ; i++) { LetsmtDocument.LetsmtSentence sentence = sentenceIt.next(); int begin = sentencesString.length(); if (sentence.getTokens() != null) { sentencesString.append(String.join(" ", sentence.getTokens())); } else if (sentence.getNonTokenizedText() != null) { sentencesString.append(sentence.getNonTokenizedText()); } sentenceSpans.add(new Span(begin, sentencesString.length())); sentencesString.append(' '); } // end of stream is reached, indicate that with null return value if (sentenceSpans.size() == 0) { return null; } return new SentenceSample(sentencesString.toString(), sentenceSpans.toArray(new Span[sentenceSpans.size()])); } @Override public void reset() { sentenceIt = source.getSentences().iterator(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/letsmt/LetsmtSentenceStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.letsmt; import java.io.File; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.TerminateToolException; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.tokenize.DetokenizationDictionary; import opennlp.tools.tokenize.Detokenizer; import opennlp.tools.tokenize.DictionaryDetokenizer; import opennlp.tools.util.ObjectStream; public class LetsmtSentenceStreamFactory extends AbstractSampleStreamFactory<SentenceSample> { interface Parameters extends BasicFormatParams { @ArgumentParser.ParameterDescription(valueName = "dictionary", description = "specifies the file with detokenizer dictionary.") @ArgumentParser.OptionalParameter File getDetokenizer(); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(SentenceSample.class, "letsmt", new LetsmtSentenceStreamFactory( LetsmtSentenceStreamFactory.Parameters.class)); } protected <P> LetsmtSentenceStreamFactory(Class<P> params) { super(params); } @Override public ObjectStream<SentenceSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); CmdLineUtil.checkInputFile("Data", params.getData()); LetsmtDocument letsmtDoc = null; try { letsmtDoc = LetsmtDocument.parse(params.getData()); } catch (IOException ex) { CmdLineUtil.handleCreateObjectStreamError(ex); } // TODO: // Implement a filter stream to remove splits which are not at an eos char ObjectStream<SentenceSample> samples = new LetsmtSentenceStream(letsmtDoc); if (params.getDetokenizer() != null) { try { Detokenizer detokenizer = new DictionaryDetokenizer( new DetokenizationDictionary(params.getDetokenizer())); samples = new DetokenizeSentenceSampleStream(detokenizer, samples); } catch (IOException e) { throw new TerminateToolException(-1, "Failed to load detokenizer rules!", e); } } return samples; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/moses/MosesSentenceSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.moses; import java.io.IOException; import java.util.LinkedList; import java.util.List; import opennlp.tools.sentdetect.EmptyLinePreprocessorStream; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; public class MosesSentenceSampleStream extends FilterObjectStream<String, SentenceSample> { public MosesSentenceSampleStream(ObjectStream<String> sentences) { super(new EmptyLinePreprocessorStream(sentences)); } public SentenceSample read() throws IOException { StringBuilder sentencesString = new StringBuilder(); List<Span> sentenceSpans = new LinkedList<>(); String sentence; for (int i = 0; i < 25 && (sentence = samples.read()) != null; i++) { int begin = sentencesString.length(); sentence = sentence.trim(); sentencesString.append(sentence); int end = sentencesString.length(); sentenceSpans.add(new Span(begin, end)); sentencesString.append(' '); } if (sentenceSpans.size() > 0) { return new SentenceSample(sentencesString.toString(), sentenceSpans.toArray(new Span[sentenceSpans.size()])); } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/moses/MosesSentenceSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.moses; import java.io.IOException; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.CmdLineUtil; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.sentdetect.SentenceSample; import opennlp.tools.util.InputStreamFactory; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.PlainTextByLineStream; /** * Factory producing OpenNLP {@link MosesSentenceSampleStream}s. */ public class MosesSentenceSampleStreamFactory extends AbstractSampleStreamFactory<SentenceSample> { interface Parameters extends BasicFormatParams { } public static void registerFactory() { StreamFactoryRegistry.registerFactory(SentenceSample.class, "moses", new MosesSentenceSampleStreamFactory(Parameters.class)); } protected <P> MosesSentenceSampleStreamFactory(Class<P> params) { super(params); } public ObjectStream<SentenceSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); CmdLineUtil.checkInputFile("Data", params.getData()); InputStreamFactory sampleDataIn = CmdLineUtil.createInputStreamFactory(params.getData()); ObjectStream<String> lineStream = null; try { lineStream = new PlainTextByLineStream(sampleDataIn, params.getEncoding()); } catch (IOException ex) { CmdLineUtil.handleCreateObjectStreamError(ex); } return new MosesSentenceSampleStream(lineStream); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/muc/DocumentSplitterStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.muc; import java.io.IOException; import java.util.ArrayList; import java.util.List; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.ObjectStream; class DocumentSplitterStream extends FilterObjectStream<String, String> { private static final String DOC_START_ELEMENT = "<DOC>"; private static final String DOC_END_ELEMENT = "</DOC>"; private List<String> docs = new ArrayList<>(); DocumentSplitterStream(ObjectStream<String> samples) { super(samples); } public String read() throws IOException { if (docs.isEmpty()) { String newDocs = samples.read(); if (newDocs != null) { int docStartOffset = 0; while (true) { int startDocElement = newDocs.indexOf(DOC_START_ELEMENT, docStartOffset); int endDocElement = newDocs.indexOf(DOC_END_ELEMENT, docStartOffset); if (startDocElement != -1 && endDocElement != -1) { if (startDocElement < endDocElement) { docs.add(newDocs.substring(startDocElement, endDocElement + DOC_END_ELEMENT.length())); docStartOffset = endDocElement + DOC_END_ELEMENT.length(); } else { throw new InvalidFormatException("<DOC> element is not closed!"); } } else if (startDocElement != endDocElement) { throw new InvalidFormatException("Missing <DOC> or </DOC> element!"); } else { break; } } } } if (docs.size() > 0) { return docs.remove(0); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/muc/Muc6NameSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.muc; import java.io.File; import java.nio.charset.StandardCharsets; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.ArgumentParser.ParameterDescription; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.cmdline.params.BasicFormatParams; import opennlp.tools.cmdline.tokenizer.TokenizerModelLoader; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.formats.DirectorySampleStream; import opennlp.tools.formats.convert.FileToStringSampleStream; import opennlp.tools.namefind.NameSample; import opennlp.tools.tokenize.Tokenizer; import opennlp.tools.tokenize.TokenizerME; import opennlp.tools.tokenize.TokenizerModel; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.StringUtil; public class Muc6NameSampleStreamFactory extends AbstractSampleStreamFactory<NameSample> { protected Muc6NameSampleStreamFactory() { super(Parameters.class); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(NameSample.class, "muc6", new Muc6NameSampleStreamFactory()); } public ObjectStream<NameSample> create(String[] args) { Parameters params = ArgumentParser.parse(args, Parameters.class); TokenizerModel tokenizerModel = new TokenizerModelLoader().load(params.getTokenizerModel()); Tokenizer tokenizer = new TokenizerME(tokenizerModel); ObjectStream<String> mucDocStream = new FileToStringSampleStream( new DirectorySampleStream(params.getData(), file -> StringUtil.toLowerCase(file.getName()).endsWith(".sgm"), false), StandardCharsets.UTF_8); return new MucNameSampleStream(tokenizer, mucDocStream); } interface Parameters extends BasicFormatParams { @ParameterDescription(valueName = "modelFile") File getTokenizerModel(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/muc/MucElementNames.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.muc; import java.util.Collections; import java.util.HashSet; import java.util.Set; class MucElementNames { static final String DOC_ELEMENT = "DOC"; static final String HEADLINE_ELEMENT = "HL"; static final String DATELINE_ELEMENT = "DATELINE"; static final String DD_ELEMENT = "DD"; static final String SENTENCE_ELEMENT = "s"; static final Set<String> CONTENT_ELEMENTS; static { Set<String> contentElementNames = new HashSet<>(); contentElementNames.add(MucElementNames.HEADLINE_ELEMENT); contentElementNames.add(MucElementNames.DATELINE_ELEMENT); contentElementNames.add(MucElementNames.DD_ELEMENT); contentElementNames.add(MucElementNames.SENTENCE_ELEMENT); CONTENT_ELEMENTS = Collections.unmodifiableSet(contentElementNames); } private MucElementNames() { } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/muc/MucNameContentHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.muc; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.Stack; import opennlp.tools.namefind.NameSample; import opennlp.tools.tokenize.Tokenizer; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.Span; public class MucNameContentHandler extends SgmlParser.ContentHandler { private static final String ENTITY_ELEMENT_NAME = "ENAMEX"; private static final String TIME_ELEMENT_NAME = "TIMEX"; private static final String NUM_ELEMENT_NAME = "NUMEX"; private static final Set<String> NAME_ELEMENT_NAMES; private static final Set<String> EXPECTED_TYPES; static { Set<String> types = new HashSet<>(); types.add("PERSON"); types.add("ORGANIZATION"); types.add("LOCATION"); types.add("DATE"); types.add("TIME"); types.add("MONEY"); types.add("PERCENT"); EXPECTED_TYPES = Collections.unmodifiableSet(types); Set<String> nameElements = new HashSet<>(); nameElements.add(ENTITY_ELEMENT_NAME); nameElements.add(TIME_ELEMENT_NAME); nameElements.add(NUM_ELEMENT_NAME); NAME_ELEMENT_NAMES = Collections.unmodifiableSet(nameElements); } private final Tokenizer tokenizer; private final List<NameSample> storedSamples; private boolean isInsideContentElement = false; private final List<String> text = new ArrayList<>(); private boolean isClearAdaptiveData = false; private final Stack<Span> incompleteNames = new Stack<>(); private List<Span> names = new ArrayList<>(); public MucNameContentHandler(Tokenizer tokenizer, List<NameSample> storedSamples) { this.tokenizer = tokenizer; this.storedSamples = storedSamples; } @Override public void startElement(String name, Map<String, String> attributes) throws InvalidFormatException { if (MucElementNames.DOC_ELEMENT.equals(name)) { isClearAdaptiveData = true; } if (MucElementNames.CONTENT_ELEMENTS.contains(name)) { isInsideContentElement = true; } if (NAME_ELEMENT_NAMES.contains(name)) { String nameType = attributes.get("TYPE"); if (!EXPECTED_TYPES.contains(nameType)) { throw new InvalidFormatException("Unknown timex, numex or namex type: " + nameType + ", expected one of " + EXPECTED_TYPES); } incompleteNames.add(new Span(text.size(), text.size(), nameType.toLowerCase(Locale.ENGLISH))); } } @Override public void characters(CharSequence chars) { if (isInsideContentElement) { String[] tokens = tokenizer.tokenize(chars.toString()); text.addAll(Arrays.asList(tokens)); } } @Override public void endElement(String name) { if (NAME_ELEMENT_NAMES.contains(name)) { Span nameSpan = incompleteNames.pop(); nameSpan = new Span(nameSpan.getStart(), text.size(), nameSpan.getType()); names.add(nameSpan); } if (MucElementNames.CONTENT_ELEMENTS.contains(name)) { storedSamples.add(new NameSample(text.toArray(new String[text.size()]), names.toArray(new Span[names.size()]), isClearAdaptiveData)); if (isClearAdaptiveData) { isClearAdaptiveData = false; } text.clear(); names.clear(); isInsideContentElement = false; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/muc/MucNameSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.muc; import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.List; import opennlp.tools.namefind.NameSample; import opennlp.tools.tokenize.Tokenizer; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; public class MucNameSampleStream extends FilterObjectStream<String, NameSample> { private final Tokenizer tokenizer; private List<NameSample> storedSamples = new ArrayList<>(); protected MucNameSampleStream(Tokenizer tokenizer, ObjectStream<String> samples) { super(samples); this.tokenizer = tokenizer; } public NameSample read() throws IOException { if (storedSamples.isEmpty()) { String document = samples.read(); if (document != null) { // Note: This is a hack to fix invalid formating in // some MUC files ... document = document.replace(">>", ">"); new SgmlParser().parse(new StringReader(document), new MucNameContentHandler(tokenizer, storedSamples)); } } if (storedSamples.size() > 0) { return storedSamples.remove(0); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/muc/SgmlParser.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.muc; import java.io.IOException; import java.io.Reader; import java.util.HashMap; import java.util.Map; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.StringUtil; /** * SAX style SGML parser. * <p> * Note:<br> * The implementation is very limited, but good enough to * parse the MUC corpora. Its must very likely be extended/improved/fixed to parse * a different SGML corpora. */ public class SgmlParser { public static abstract class ContentHandler { public void startElement(String name, Map<String, String> attributes) throws InvalidFormatException { } public void characters(CharSequence chars) throws InvalidFormatException{ } public void endElement(String name) throws InvalidFormatException { } } private static String extractTagName(CharSequence tagChars) throws InvalidFormatException { int fromOffset = 1; if (tagChars.length() > 1 && tagChars.charAt(1) == '/') { fromOffset = 2; } for (int ci = 1; ci < tagChars.length(); ci++) { if (tagChars.charAt(ci) == '>' || StringUtil.isWhitespace(tagChars.charAt(ci))) { return tagChars.subSequence(fromOffset, ci).toString(); } } throw new InvalidFormatException("Failed to extract tag name!"); } private static Map<String, String> getAttributes(CharSequence tagChars) { // format: // space // key // = // " <- begin // value chars // " <- end Map<String, String> attributes = new HashMap<>(); StringBuilder key = new StringBuilder(); StringBuilder value = new StringBuilder(); boolean extractKey = false; boolean extractValue = false; for (int i = 0; i < tagChars.length(); i++) { // White space indicates begin of new key name if (StringUtil.isWhitespace(tagChars.charAt(i)) && !extractValue) { extractKey = true; } // Equals sign indicated end of key name else if (extractKey && ('=' == tagChars.charAt(i) || StringUtil.isWhitespace(tagChars.charAt(i)))) { extractKey = false; } // Inside key name, extract all chars else if (extractKey) { key.append(tagChars.charAt(i)); } // " Indicates begin or end of value chars else if ('"' == tagChars.charAt(i)) { if (extractValue) { attributes.put(key.toString(), value.toString()); // clear key and value buffers key.setLength(0); value.setLength(0); } extractValue = !extractValue; } // Inside value, extract all chars else if (extractValue) { value.append(tagChars.charAt(i)); } } return attributes; } public void parse(Reader in, ContentHandler handler) throws IOException { StringBuilder buffer = new StringBuilder(); boolean isInsideTag = false; boolean isStartTag = true; int lastChar = -1; int c; while ((c = in.read()) != -1) { if ('<' == c) { if (isInsideTag) { throw new InvalidFormatException("Did not expect < char!"); } if (buffer.toString().trim().length() > 0) { handler.characters(buffer.toString().trim()); } buffer.setLength(0); isInsideTag = true; isStartTag = true; } buffer.appendCodePoint(c); if ('/' == c && lastChar == '<') { isStartTag = false; } if ('>' == c) { if (!isInsideTag) { throw new InvalidFormatException("Did not expect > char!"); } if (isStartTag) { handler.startElement(extractTagName(buffer), getAttributes(buffer)); } else { handler.endElement(extractTagName(buffer)); } buffer.setLength(0); isInsideTag = false; } lastChar = c; } if (isInsideTag) { throw new InvalidFormatException("Did not find matching > char!"); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ontonotes/DocumentToLineStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ontonotes; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import opennlp.tools.formats.brat.SegmenterObjectStream; import opennlp.tools.util.ObjectStream; /** * Reads a plain text file and return each line as a <code>String</code> object. */ public class DocumentToLineStream extends SegmenterObjectStream<String, String> { public DocumentToLineStream(ObjectStream<String> samples) { super(samples); } @Override protected List<String> read(String sample) throws IOException { List<String> lines = Arrays.asList(sample.split("\n")); // documents must be empty line terminated if (!lines.get(lines.size() - 1).trim().isEmpty()) { lines = new ArrayList<>(lines); lines.add(""); } return lines; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ontonotes/OntoNotesFormatParameters.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ontonotes; import opennlp.tools.cmdline.ArgumentParser.ParameterDescription; public interface OntoNotesFormatParameters { @ParameterDescription(valueName = "OntoNotes 4.0 corpus directory") String getOntoNotesDir(); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ontonotes/OntoNotesNameSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ontonotes; import java.io.BufferedReader; import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import opennlp.tools.namefind.NameSample; import opennlp.tools.tokenize.WhitespaceTokenizer; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.Span; import opennlp.tools.util.StringUtil; /** * Name Sample Stream parser for the OntoNotes 4.0 corpus. */ public class OntoNotesNameSampleStream extends FilterObjectStream<String, NameSample> { private final Map<String, String> tokenConversionMap; private List<NameSample> nameSamples = new LinkedList<>(); public OntoNotesNameSampleStream(ObjectStream<String> samples) { super(samples); Map<String, String> tokenConversionMap = new HashMap<>(); tokenConversionMap.put("-LRB-", "("); tokenConversionMap.put("-RRB-", ")"); tokenConversionMap.put("-LSB-", "["); tokenConversionMap.put("-RSB-", "]"); tokenConversionMap.put("-LCB-", "{"); tokenConversionMap.put("-RCB-", "}"); tokenConversionMap.put("-AMP-", "&"); this.tokenConversionMap = Collections.unmodifiableMap(tokenConversionMap); } private String convertToken(String token) { StringBuilder convertedToken = new StringBuilder(token); int startTagEndIndex = convertedToken.indexOf(">"); if (token.contains("=\"") && startTagEndIndex != -1) { convertedToken.delete(0, startTagEndIndex + 1); } int endTagBeginIndex = convertedToken.indexOf("<"); int endTagEndIndex = convertedToken.indexOf(">"); if (endTagBeginIndex != -1 && endTagEndIndex != -1) { convertedToken.delete(endTagBeginIndex, endTagEndIndex + 1); } String cleanedToken = convertedToken.toString(); if (tokenConversionMap.get(cleanedToken) != null) { cleanedToken = tokenConversionMap.get(cleanedToken); } return cleanedToken; } public NameSample read() throws IOException { if (nameSamples.isEmpty()) { String doc = samples.read(); if (doc != null) { BufferedReader docIn = new BufferedReader(new StringReader(doc)); boolean clearAdaptiveData = true; String line; while ((line = docIn.readLine()) != null) { if (line.startsWith("<DOC")) { continue; } if (line.equals("</DOC>")) { break; } String[] tokens = WhitespaceTokenizer.INSTANCE.tokenize(line); List<Span> entities = new LinkedList<>(); List<String> cleanedTokens = new ArrayList<>(tokens.length); int tokenIndex = 0; int entityBeginIndex = -1; String entityType = null; boolean insideStartEnmaxTag = false; for (String token : tokens) { // Split here, next part of tag is in new token if (token.startsWith("<ENAMEX")) { insideStartEnmaxTag = true; continue; } if (insideStartEnmaxTag) { String typeBegin = "TYPE=\""; if (token.startsWith(typeBegin)) { int typeEnd = token.indexOf("\"", typeBegin.length()); entityType = StringUtil.toLowerCase(token.substring(typeBegin.length(), typeEnd)); } if (token.contains(">")) { entityBeginIndex = tokenIndex; insideStartEnmaxTag = false; } else { continue; } } if (token.endsWith("</ENAMEX>")) { entities.add(new Span(entityBeginIndex, tokenIndex + 1, entityType)); entityBeginIndex = -1; } cleanedTokens.add(convertToken(token)); tokenIndex++; } nameSamples.add(new NameSample(cleanedTokens .toArray(new String[cleanedTokens.size()]), entities .toArray(new Span[entities.size()]), clearAdaptiveData)); clearAdaptiveData = false; } } } if (!nameSamples.isEmpty()) { return nameSamples.remove(0); } else { return null; } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ontonotes/OntoNotesNameSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ontonotes; import java.io.File; import java.nio.charset.StandardCharsets; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.formats.DirectorySampleStream; import opennlp.tools.formats.convert.FileToStringSampleStream; import opennlp.tools.namefind.NameSample; import opennlp.tools.util.ObjectStream; public class OntoNotesNameSampleStreamFactory extends AbstractSampleStreamFactory<NameSample> { public OntoNotesNameSampleStreamFactory() { super(OntoNotesFormatParameters.class); } public ObjectStream<NameSample> create(String[] args) { OntoNotesFormatParameters params = ArgumentParser.parse(args, OntoNotesFormatParameters.class); ObjectStream<File> documentStream = new DirectorySampleStream(new File( params.getOntoNotesDir()), file -> { if (file.isFile()) { return file.getName().endsWith(".name"); } return file.isDirectory(); }, true); return new OntoNotesNameSampleStream( new FileToStringSampleStream(documentStream, StandardCharsets.UTF_8)); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(NameSample.class, "ontonotes", new OntoNotesNameSampleStreamFactory()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ontonotes/OntoNotesPOSSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ontonotes; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.formats.convert.ParseToPOSSampleStream; import opennlp.tools.parser.Parse; import opennlp.tools.postag.POSSample; import opennlp.tools.util.ObjectStream; public class OntoNotesPOSSampleStreamFactory extends AbstractSampleStreamFactory<POSSample> { private OntoNotesParseSampleStreamFactory parseSampleStreamFactory = new OntoNotesParseSampleStreamFactory(); protected OntoNotesPOSSampleStreamFactory() { super(OntoNotesFormatParameters.class); } public ObjectStream<POSSample> create(String[] args) { ObjectStream<Parse> parseSampleStream = parseSampleStreamFactory.create(args); return new ParseToPOSSampleStream(parseSampleStream); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(POSSample.class, "ontonotes", new OntoNotesPOSSampleStreamFactory()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ontonotes/OntoNotesParseSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ontonotes; import java.io.IOException; import opennlp.tools.parser.Parse; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; // Should be possible with this one, to train the parser and pos tagger! public class OntoNotesParseSampleStream extends FilterObjectStream<String, Parse> { public OntoNotesParseSampleStream(ObjectStream<String> samples) { super(samples); } public Parse read() throws IOException { StringBuilder parseString = new StringBuilder(); while (true) { String parse = samples.read(); if (parse != null) { parse = parse.trim(); } if (parse == null || parse.isEmpty()) { if (parseString.length() > 0) { return Parse.parseParse(parseString.toString()); } else { return null; } } parseString.append(parse).append(" "); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/formats/ontonotes/OntoNotesParseSampleStreamFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.formats.ontonotes; import java.io.File; import java.nio.charset.StandardCharsets; import opennlp.tools.cmdline.ArgumentParser; import opennlp.tools.cmdline.StreamFactoryRegistry; import opennlp.tools.formats.AbstractSampleStreamFactory; import opennlp.tools.formats.DirectorySampleStream; import opennlp.tools.formats.convert.FileToStringSampleStream; import opennlp.tools.parser.Parse; import opennlp.tools.util.ObjectStream; public class OntoNotesParseSampleStreamFactory extends AbstractSampleStreamFactory<Parse> { protected OntoNotesParseSampleStreamFactory() { super(OntoNotesFormatParameters.class); } public ObjectStream<Parse> create(String[] args) { OntoNotesFormatParameters params = ArgumentParser.parse(args, OntoNotesFormatParameters.class); ObjectStream<File> documentStream = new DirectorySampleStream(new File( params.getOntoNotesDir()), file -> { if (file.isFile()) { return file.getName().endsWith(".parse"); } return file.isDirectory(); }, true); // We need file to line here ... and that is probably best doen with the plain text stream // lets copy it over here, refactor it, and then at some point we replace the current version // with the refactored version return new OntoNotesParseSampleStream(new DocumentToLineStream(new FileToStringSampleStream( documentStream, StandardCharsets.UTF_8))); } public static void registerFactory() { StreamFactoryRegistry.registerFactory(Parse.class, "ontonotes", new OntoNotesParseSampleStreamFactory()); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/DefaultLanguageDetectorContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import java.util.ArrayList; import java.util.Collection; import opennlp.tools.ngram.NGramModel; import opennlp.tools.util.StringList; import opennlp.tools.util.normalizer.AggregateCharSequenceNormalizer; import opennlp.tools.util.normalizer.CharSequenceNormalizer; /** * A context generator for language detector. */ public class DefaultLanguageDetectorContextGenerator implements LanguageDetectorContextGenerator { protected final int minLength; protected final int maxLength; protected final CharSequenceNormalizer normalizer; /** * Creates a customizable @{@link DefaultLanguageDetectorContextGenerator} that computes ngrams from text * @param minLength min ngrams chars * @param maxLength max ngrams chars * @param normalizers zero or more normalizers to * be applied in to the text before extracting ngrams */ public DefaultLanguageDetectorContextGenerator(int minLength, int maxLength, CharSequenceNormalizer... normalizers) { this.minLength = minLength; this.maxLength = maxLength; this.normalizer = new AggregateCharSequenceNormalizer(normalizers); } /** * Generates the context for a document using character ngrams. * @param document document to extract context from * @return the generated context */ @Override public String[] getContext(CharSequence document) { Collection<String> context = new ArrayList<>(); NGramModel model = new NGramModel(); model.add(normalizer.normalize(document), minLength, maxLength); for (StringList tokenList : model) { if (tokenList.size() > 0) { context.add(tokenList.getToken(0)); } } return context.toArray(new String[context.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/Language.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import java.io.Serializable; import java.util.Objects; /** * Class for holding the document language and its confidence */ public class Language implements Serializable { private final String lang; private final double confidence; public Language(String lang) { this(lang, 0); } public Language(String lang, double confidence) { Objects.requireNonNull(lang, "lang must not be null"); this.lang = lang; this.confidence = confidence; } public String getLang() { return lang; } public double getConfidence() { return confidence; } @Override public String toString() { return getLang() + " (" + this.confidence + ")"; } @Override public int hashCode() { return Objects.hash(getLang(), getConfidence()); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof Language) { Language a = (Language) obj; return getLang().equals(a.getLang()); } return false; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetector.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; /** * The interface for LanguageDetector which provide the @{@link Language} according to the context. */ public interface LanguageDetector { Language[] predictLanguages(CharSequence content); Language predictLanguage(CharSequence content); String[] getSupportedLanguages(); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; /** * A context generator interface for language detector. */ public interface LanguageDetectorContextGenerator { String[] getContext(CharSequence document); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorCrossValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import java.io.IOException; import opennlp.tools.doccat.FeatureGenerator; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.TrainingParameters; import opennlp.tools.util.eval.CrossValidationPartitioner; import opennlp.tools.util.eval.Mean; /** * Cross validator for language detector */ public class LanguageDetectorCrossValidator { private final TrainingParameters params; private Mean documentAccuracy = new Mean(); private LanguageDetectorEvaluationMonitor[] listeners; private LanguageDetectorFactory factory; /** * Creates a {@link LanguageDetectorCrossValidator} with the given * {@link FeatureGenerator}s. */ public LanguageDetectorCrossValidator(TrainingParameters mlParams, LanguageDetectorFactory factory, LanguageDetectorEvaluationMonitor ... listeners) { this.params = mlParams; this.listeners = listeners; this.factory = factory; } /** * Starts the evaluation. * * @param samples * the data to train and test * @param nFolds * number of folds * * @throws IOException */ public void evaluate(ObjectStream<LanguageSample> samples, int nFolds) throws IOException { CrossValidationPartitioner<LanguageSample> partitioner = new CrossValidationPartitioner<>(samples, nFolds); while (partitioner.hasNext()) { CrossValidationPartitioner.TrainingSampleStream<LanguageSample> trainingSampleStream = partitioner.next(); LanguageDetectorModel model = LanguageDetectorME.train( trainingSampleStream, params, factory); LanguageDetectorEvaluator evaluator = new LanguageDetectorEvaluator( new LanguageDetectorME(model), listeners); evaluator.evaluate(trainingSampleStream.getTestSampleStream()); documentAccuracy.add(evaluator.getAccuracy(), evaluator.getDocumentCount()); } } /** * Retrieves the accuracy for all iterations. * * @return the word accuracy */ public double getDocumentAccuracy() { return documentAccuracy.mean(); } /** * Retrieves the number of words which where validated over all iterations. * The result is the amount of folds multiplied by the total number of words. * * @return the word count */ public long getDocumentCount() { return documentAccuracy.count(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorEvaluationMonitor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import opennlp.tools.util.eval.EvaluationMonitor; /** * {@link EvaluationMonitor} for Language Detector. */ public interface LanguageDetectorEvaluationMonitor extends EvaluationMonitor<LanguageSample> { }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import opennlp.tools.doccat.DocumentCategorizer; import opennlp.tools.util.eval.Evaluator; import opennlp.tools.util.eval.Mean; /** * The {@link LanguageDetectorEvaluator} measures the performance of * the given {@link LanguageDetector} with the provided reference * {@link LanguageSample}s. * * @see LanguageDetector * @see LanguageSample */ public class LanguageDetectorEvaluator extends Evaluator<LanguageSample> { private LanguageDetector languageDetector; private Mean accuracy = new Mean(); /** * Initializes the current instance. * * @param langDetect the language detector instance */ public LanguageDetectorEvaluator(LanguageDetector langDetect, LanguageDetectorEvaluationMonitor ... listeners) { super(listeners); this.languageDetector = langDetect; } /** * Evaluates the given reference {@link LanguageSample} object. * * This is done by categorizing the document from the provided * {@link LanguageSample}. The detected language is then used * to calculate and update the score. * * @param sample the reference {@link LanguageSample}. */ public LanguageSample processSample(LanguageSample sample) { CharSequence document = sample.getContext(); Language predicted = languageDetector.predictLanguage(document); if (sample.getLanguage().getLang().equals(predicted.getLang())) { accuracy.add(1); } else { accuracy.add(0); } return new LanguageSample(predicted, sample.getContext()); } /** * Retrieves the accuracy of provided {@link DocumentCategorizer}. * * accuracy = correctly categorized documents / total documents * * @return the accuracy */ public double getAccuracy() { return accuracy.mean(); } public long getDocumentCount() { return accuracy.count(); } /** * Represents this objects as human readable {@link String}. */ @Override public String toString() { return "Accuracy: " + accuracy.mean() + "\n" + "Number of documents: " + accuracy.count(); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorEventStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import java.util.Iterator; import opennlp.tools.ml.model.Event; import opennlp.tools.util.AbstractEventStream; import opennlp.tools.util.ObjectStream; /** * Iterator-like class for modeling language detector events. */ public class LanguageDetectorEventStream extends AbstractEventStream<LanguageSample> { private LanguageDetectorContextGenerator mContextGenerator; /** * Initializes the current instance via samples and feature generators. * * @param data {@link ObjectStream} of {@link LanguageSample}s */ public LanguageDetectorEventStream(ObjectStream<LanguageSample> data, LanguageDetectorContextGenerator contextGenerator) { super(data); mContextGenerator = contextGenerator; } @Override protected Iterator<Event> createEvents(final LanguageSample sample) { return new Iterator<Event>() { private boolean isVirgin = true; public boolean hasNext() { return isVirgin; } public Event next() { isVirgin = false; return new Event(sample.getLanguage().getLang(), mContextGenerator.getContext(sample.getContext().toString())); } public void remove() { throw new UnsupportedOperationException(); } }; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import opennlp.tools.util.BaseToolFactory; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.ext.ExtensionLoader; import opennlp.tools.util.normalizer.EmojiCharSequenceNormalizer; import opennlp.tools.util.normalizer.NumberCharSequenceNormalizer; import opennlp.tools.util.normalizer.ShrinkCharSequenceNormalizer; import opennlp.tools.util.normalizer.TwitterCharSequenceNormalizer; import opennlp.tools.util.normalizer.UrlCharSequenceNormalizer; /** * <p>Default factory used by Language Detector. Extend this class to change the Language Detector * behaviour, such as the {@link LanguageDetectorContextGenerator}.</p> * <p>The default {@link DefaultLanguageDetectorContextGenerator} will use char n-grams of * size 1 to 3 and the following normalizers: * <ul> * <li> {@link EmojiCharSequenceNormalizer} * <li> {@link UrlCharSequenceNormalizer} * <li> {@link TwitterCharSequenceNormalizer} * <li> {@link NumberCharSequenceNormalizer} * <li> {@link ShrinkCharSequenceNormalizer} * </ul> * </p> */ public class LanguageDetectorFactory extends BaseToolFactory { public LanguageDetectorContextGenerator getContextGenerator() { return new DefaultLanguageDetectorContextGenerator(1, 3, EmojiCharSequenceNormalizer.getInstance(), UrlCharSequenceNormalizer.getInstance(), TwitterCharSequenceNormalizer.getInstance(), NumberCharSequenceNormalizer.getInstance(), ShrinkCharSequenceNormalizer.getInstance()); } public static LanguageDetectorFactory create(String subclassName) throws InvalidFormatException { if (subclassName == null) { // will create the default factory return new LanguageDetectorFactory(); } try { LanguageDetectorFactory theFactory = ExtensionLoader.instantiateExtension( LanguageDetectorFactory.class, subclassName); theFactory.init(); return theFactory; } catch (Exception e) { String msg = "Could not instantiate the " + subclassName + ". The initialization throw an exception."; throw new InvalidFormatException(msg, e); } } public void init() { // nothing to do } @Override public void validateArtifactMap() throws InvalidFormatException { // nothing to validate } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorME.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import opennlp.tools.ml.AbstractEventTrainer; import opennlp.tools.ml.EventTrainer; import opennlp.tools.ml.TrainerFactory; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.util.ObjectStream; import opennlp.tools.util.TrainingParameters; /** * Implements learnable Language Detector */ public class LanguageDetectorME implements LanguageDetector { private LanguageDetectorModel model; private LanguageDetectorContextGenerator mContextGenerator; /** * Initializes the current instance with a language detector model. Default feature * generation is used. * * @param model the language detector model */ public LanguageDetectorME(LanguageDetectorModel model) { this.model = model; this.mContextGenerator = model.getFactory().getContextGenerator(); } @Override public Language[] predictLanguages(CharSequence content) { double[] eval = model.getMaxentModel().eval(mContextGenerator.getContext(content.toString())); Language[] arr = new Language[eval.length]; for (int i = 0; i < eval.length; i++) { arr[i] = new Language(model.getMaxentModel().getOutcome(i), eval[i]); } Arrays.sort(arr, (o1, o2) -> Double.compare(o2.getConfidence(), o1.getConfidence())); return arr; } @Override public Language predictLanguage(CharSequence content) { return predictLanguages(content)[0]; } @Override public String[] getSupportedLanguages() { int numberLanguages = model.getMaxentModel().getNumOutcomes(); String[] languages = new String[numberLanguages]; for (int i = 0; i < numberLanguages; i++) { languages[i] = model.getMaxentModel().getOutcome(i); } return languages; } public static LanguageDetectorModel train(ObjectStream<LanguageSample> samples, TrainingParameters mlParams, LanguageDetectorFactory factory) throws IOException { Map<String, String> manifestInfoEntries = new HashMap<>(); mlParams.putIfAbsent(AbstractEventTrainer.DATA_INDEXER_PARAM, AbstractEventTrainer.DATA_INDEXER_ONE_PASS_VALUE); EventTrainer trainer = TrainerFactory.getEventTrainer( mlParams, manifestInfoEntries); MaxentModel model = trainer.train( new LanguageDetectorEventStream(samples, factory.getContextGenerator())); return new LanguageDetectorModel(model, manifestInfoEntries, factory); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.util.Map; import opennlp.tools.ml.model.AbstractModel; import opennlp.tools.ml.model.MaxentModel; import opennlp.tools.util.BaseToolFactory; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.model.BaseModel; /** * A model for language detection */ public class LanguageDetectorModel extends BaseModel { private static final String COMPONENT_NAME = "LanguageDetectorME"; private static final String LANGDETECT_MODEL_ENTRY_NAME = "langdetect.model"; public LanguageDetectorModel(MaxentModel langdetectModel, Map<String, String> manifestInfoEntries, LanguageDetectorFactory factory) { super(COMPONENT_NAME, "und", manifestInfoEntries, factory); artifactMap.put(LANGDETECT_MODEL_ENTRY_NAME, langdetectModel); checkArtifactMap(); } public LanguageDetectorModel(InputStream in) throws IOException { super(COMPONENT_NAME, in); } public LanguageDetectorModel(File modelFile) throws IOException { super(COMPONENT_NAME, modelFile); } public LanguageDetectorModel(URL modelURL) throws IOException { super(COMPONENT_NAME, modelURL); } @Override protected void validateArtifactMap() throws InvalidFormatException { super.validateArtifactMap(); if (!(artifactMap.get(LANGDETECT_MODEL_ENTRY_NAME) instanceof AbstractModel)) { throw new InvalidFormatException("Language detector model is incomplete!"); } } public LanguageDetectorFactory getFactory() { return (LanguageDetectorFactory) this.toolFactory; } @Override protected Class<? extends BaseToolFactory> getDefaultFactory() { return LanguageDetectorFactory.class; } public MaxentModel getMaxentModel() { return (MaxentModel) artifactMap.get(LANGDETECT_MODEL_ENTRY_NAME); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageDetectorSampleStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import java.io.IOException; import opennlp.tools.util.FilterObjectStream; import opennlp.tools.util.ObjectStream; /** * This class reads in string encoded training samples, parses them and * outputs {@link LanguageSample} objects. * <p> * Format:<br> * Each line contains one sample document.<br> * The language is the first string in the line followed by a tab and the document content.<br> * Sample line: category-string tab-char document line-break-char(s)<br> */ public class LanguageDetectorSampleStream extends FilterObjectStream<String, LanguageSample> { public LanguageDetectorSampleStream(ObjectStream<String> samples) { super(samples); } public LanguageSample read() throws IOException { String sampleString; while ((sampleString = samples.read()) != null) { int tabIndex = sampleString.indexOf("\t"); if (tabIndex > 0) { String lang = sampleString.substring(0, tabIndex); String context = sampleString.substring(tabIndex + 1); return new LanguageSample(new Language(lang), context); } } return null; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/langdetect/LanguageSample.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.langdetect; import java.io.Serializable; import java.util.Objects; /** * Class which holds a classified document and its @{@link Language}. */ public class LanguageSample implements Serializable { private final Language language; private final CharSequence context; public LanguageSample(Language language, CharSequence context) { this.language = Objects.requireNonNull(language, "language must not be null"); this.context = Objects.requireNonNull(context, "context must not be null"); } public Language getLanguage() { return language; } public CharSequence getContext() { return context; } @Override public String toString() { return language.getLang() + '\t' + context; } @Override public int hashCode() { return Objects.hash(getContext(), getLanguage()); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof LanguageSample) { LanguageSample a = (LanguageSample) obj; return getLanguage().equals(a.getLanguage()) && getContext().equals(a.getContext()); } return false; } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/languagemodel/LanguageModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.languagemodel; import opennlp.tools.util.StringList; /** * A language model can calculate the probability <i>p</i> (between 0 and 1) of a * certain {@link opennlp.tools.util.StringList sequence of tokens}, given its underlying vocabulary. */ public interface LanguageModel { /** * Calculate the probability of a series of tokens (e.g. a sentence), given a vocabulary * * @param tokens the text tokens to calculate the probability for * @return the probability of the given text tokens in the vocabulary */ double calculateProbability(StringList tokens); /** * Predict the most probable output sequence of tokens, given an input sequence of tokens * * @param tokens a sequence of tokens * @return the most probable subsequent token sequence */ StringList predictNextTokens(StringList tokens); }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/languagemodel/NGramLanguageModel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.languagemodel; import java.io.IOException; import java.io.InputStream; import opennlp.tools.ngram.NGramModel; import opennlp.tools.ngram.NGramUtils; import opennlp.tools.util.StringList; /** * A {@link opennlp.tools.languagemodel.LanguageModel} based on a {@link opennlp.tools.ngram.NGramModel} * using Stupid Backoff to get the probabilities of the ngrams. */ public class NGramLanguageModel extends NGramModel implements LanguageModel { private static final int DEFAULT_N = 3; private final int n; public NGramLanguageModel() { this(DEFAULT_N); } public NGramLanguageModel(int n) { this.n = n; } public NGramLanguageModel(InputStream in) throws IOException { this(in, DEFAULT_N); } public NGramLanguageModel(InputStream in, int n) throws IOException { super(in); this.n = n; } @Override public double calculateProbability(StringList sample) { double probability = 0d; if (size() > 0) { for (StringList ngram : NGramUtils.getNGrams(sample, n)) { double score = stupidBackoff(ngram); probability += Math.log(score); if (Double.isNaN(probability)) { probability = 0d; } } probability = Math.exp(probability); } return probability; } @Override public StringList predictNextTokens(StringList tokens) { double maxProb = Double.NEGATIVE_INFINITY; StringList token = null; for (StringList ngram : this) { String[] sequence = new String[ngram.size() + tokens.size()]; for (int i = 0; i < tokens.size(); i++) { sequence[i] = tokens.getToken(i); } for (int i = 0; i < ngram.size(); i++) { sequence[i + tokens.size()] = ngram.getToken(i); } StringList sample = new StringList(sequence); double v = calculateProbability(sample); if (v > maxProb) { maxProb = v; token = ngram; } } return token; } private double stupidBackoff(StringList ngram) { int count = getCount(ngram); StringList nMinusOneToken = NGramUtils.getNMinusOneTokenFirst(ngram); if (nMinusOneToken == null || nMinusOneToken.size() == 0) { return (double) count / (double) size(); } else if (count > 0) { double countM1 = getCount(nMinusOneToken); if (countM1 == 0d) { countM1 = size(); // to avoid Infinite if n-1grams do not exist } return (double) count / countM1; } else { return 0.4 * stupidBackoff(NGramUtils.getNMinusOneTokenLast(ngram)); } } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/languagemodel/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package related to language models */ package opennlp.tools.languagemodel;
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/lemmatizer/DefaultLemmatizerContextGenerator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.lemmatizer; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; /** * Simple feature generator for learning statistical lemmatizers. * Features based on Grzegorz Chrupała. 2008. Towards a Machine-Learning * Architecture for Lexical Functional Grammar Parsing. PhD dissertation, * Dublin City University * @version 2016-02-15 */ public class DefaultLemmatizerContextGenerator implements LemmatizerContextGenerator { private static final int PREFIX_LENGTH = 5; private static final int SUFFIX_LENGTH = 7; private static Pattern hasCap = Pattern.compile("[A-Z]"); private static Pattern hasNum = Pattern.compile("[0-9]"); public DefaultLemmatizerContextGenerator() { } protected static String[] getPrefixes(String lex) { String[] prefs = new String[PREFIX_LENGTH]; for (int li = 1; li < PREFIX_LENGTH; li++) { prefs[li] = lex.substring(0, Math.min(li + 1, lex.length())); } return prefs; } protected static String[] getSuffixes(String lex) { String[] suffs = new String[SUFFIX_LENGTH]; for (int li = 1; li < SUFFIX_LENGTH; li++) { suffs[li] = lex.substring(Math.max(lex.length() - li - 1, 0)); } return suffs; } public String[] getContext(int index, String[] sequence, String[] priorDecisions, Object[] additionalContext) { return getContext(index, sequence, (String[]) additionalContext[0], priorDecisions); } public String[] getContext(int index, String[] toks, String[] tags, String[] preds) { // Word String w0; // Tag String t0; // Previous prediction String p_1; String lex = toks[index]; if (index < 1) { p_1 = "p_1=bos"; } else { p_1 = "p_1=" + preds[index - 1]; } w0 = "w0=" + toks[index]; t0 = "t0=" + tags[index]; List<String> features = new ArrayList<>(); features.add(w0); features.add(t0); features.add(p_1); features.add(p_1 + t0); features.add(p_1 + w0); // do some basic suffix analysis String[] suffs = getSuffixes(lex); for (int i = 0; i < suffs.length; i++) { features.add("suf=" + suffs[i]); } String[] prefs = getPrefixes(lex); for (int i = 0; i < prefs.length; i++) { features.add("pre=" + prefs[i]); } // see if the word has any special characters if (lex.indexOf('-') != -1) { features.add("h"); } if (hasCap.matcher(lex).find()) { features.add("c"); } if (hasNum.matcher(lex).find()) { features.add("d"); } return features.toArray(new String[features.size()]); } }
0
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools
java-sources/ai/idylnlp/idylnlp-opennlp-tools-1.8.3/1.1.0/opennlp/tools/lemmatizer/DefaultLemmatizerSequenceValidator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.tools.lemmatizer; import opennlp.tools.util.SequenceValidator; public class DefaultLemmatizerSequenceValidator implements SequenceValidator<String> { //TODO implement this public boolean validSequence(int i, String[] sequence, String[] s, String outcome) { return true; } }