|
| 1 | +package edu.stanford.nlp.semgraph.semgrex.ssurgeon; |
| 2 | + |
| 3 | +import java.util.*; |
| 4 | +import java.util.regex.Matcher; |
| 5 | +import java.util.regex.Pattern; |
| 6 | +import java.io.*; |
| 7 | + |
| 8 | +import edu.stanford.nlp.ling.IndexedWord; |
| 9 | +import edu.stanford.nlp.semgraph.semgrex.SemgrexMatcher; |
| 10 | +import edu.stanford.nlp.semgraph.SemanticGraph; |
| 11 | +import edu.stanford.nlp.trees.GrammaticalRelation; |
| 12 | + |
| 13 | +/** |
| 14 | + * Split a word into pieces based on the regex expressions provided by the -regex arguments |
| 15 | + * <br> |
| 16 | + * As an example of where this is useful, a tokenization dataset had " |
| 17 | + * stuck to each of the words. We can separate that out by using two |
| 18 | + * regex, one which matches the " in a group, one which matches the |
| 19 | + * rest of the word without the " |
| 20 | + * |
| 21 | + * @author John Bauer |
| 22 | + */ |
| 23 | +public class SplitWord extends SsurgeonEdit { |
| 24 | + public static final String LABEL = "splitWord"; |
| 25 | + |
| 26 | + final String node; |
| 27 | + final List<Pattern> nodeRegex; |
| 28 | + final int headIndex; |
| 29 | + final GrammaticalRelation relation; |
| 30 | + |
| 31 | + public SplitWord(String node, List<String> nodeRegex, Integer headIndex, GrammaticalRelation relation) { |
| 32 | + if (node == null) { |
| 33 | + throw new SsurgeonParseException("SplitWord expected -node with the name of the matched node to split"); |
| 34 | + } |
| 35 | + this.node = node; |
| 36 | + |
| 37 | + if (nodeRegex == null || nodeRegex.size() == 0) { |
| 38 | + throw new SsurgeonParseException("SplitWord expected -regex with regex to determine which pieces to split the word into"); |
| 39 | + } |
| 40 | + if (nodeRegex.size() == 1) { |
| 41 | + throw new SsurgeonParseException("SplitWord expected at least two -regex"); |
| 42 | + } |
| 43 | + this.nodeRegex = new ArrayList<>(); |
| 44 | + for (int i = 0; i < nodeRegex.size(); ++i) { |
| 45 | + this.nodeRegex.add(Pattern.compile(nodeRegex.get(i))); |
| 46 | + } |
| 47 | + |
| 48 | + if (headIndex == null) { |
| 49 | + throw new SsurgeonParseException("SplitWord expected a -headIndex, 0-indexed for the word piece to use when chopping up the word"); |
| 50 | + } |
| 51 | + this.headIndex = headIndex; |
| 52 | + |
| 53 | + if (relation == null) { |
| 54 | + throw new SsurgeonParseException("SplitWord expected a -reln to represent the dependency to use for the new words"); |
| 55 | + } |
| 56 | + this.relation = relation; |
| 57 | + } |
| 58 | + |
| 59 | + @Override |
| 60 | + public String toEditString() { |
| 61 | + StringWriter buf = new StringWriter(); |
| 62 | + buf.write(LABEL); |
| 63 | + buf.write("\t"); |
| 64 | + buf.write("-node " + node + "\t"); |
| 65 | + for (Pattern regex : nodeRegex) { |
| 66 | + buf.write("-regex " + regex + "\t"); |
| 67 | + } |
| 68 | + buf.write("-reln " + relation.toString() + "\t"); |
| 69 | + buf.write("-headIndex " + headIndex); |
| 70 | + return buf.toString(); |
| 71 | + } |
| 72 | + |
| 73 | + @Override |
| 74 | + public boolean evaluate(SemanticGraph sg, SemgrexMatcher sm) { |
| 75 | + IndexedWord matchedNode = sm.getNode(node); |
| 76 | + String origWord = matchedNode.word(); |
| 77 | + |
| 78 | + // first, iterate over the regex patterns we had at creation time |
| 79 | + // |
| 80 | + // each new word created will be the concatenation of all of the |
| 81 | + // matching groups from this pattern |
| 82 | + List<String> words = new ArrayList<>(); |
| 83 | + for (int i = 0; i < nodeRegex.size(); ++i) { |
| 84 | + Matcher regexMatcher = nodeRegex.get(i).matcher(origWord); |
| 85 | + if (!regexMatcher.matches()) { |
| 86 | + return false; |
| 87 | + } |
| 88 | + |
| 89 | + StringBuilder newWordBuilder = new StringBuilder(); |
| 90 | + for (int j = 0; j < regexMatcher.groupCount(); ++j) { |
| 91 | + newWordBuilder.append(regexMatcher.group(j+1)); |
| 92 | + } |
| 93 | + String newWord = newWordBuilder.toString(); |
| 94 | + if (newWord.length() == 0) { |
| 95 | + return false; |
| 96 | + } |
| 97 | + words.add(newWord); |
| 98 | + } |
| 99 | + |
| 100 | + int matchedIndex = matchedNode.index(); |
| 101 | + |
| 102 | + // at this point, we can make new words out of each of the patterns |
| 103 | + |
| 104 | + // move all words down by nodeRegex.size() - 1 |
| 105 | + // then move the original word down by headIndex |
| 106 | + AddDep.moveNodes(sg, sm, x -> (x > matchedIndex), x -> x+nodeRegex.size() - 1, true); |
| 107 | + // the head node has its word replaced, and its index & links need |
| 108 | + // to be rearranged, but none of the links are added or removed |
| 109 | + if (headIndex > 0) { |
| 110 | + AddDep.moveNode(sg, sm, matchedNode, matchedIndex + headIndex); |
| 111 | + } |
| 112 | + matchedNode = sm.getNode(node); |
| 113 | + matchedNode.setWord(words.get(headIndex)); |
| 114 | + matchedNode.setValue(words.get(headIndex)); |
| 115 | + |
| 116 | + for (int i = 0; i < nodeRegex.size(); ++i) { |
| 117 | + if (i == headIndex) |
| 118 | + continue; |
| 119 | + |
| 120 | + // otherwise, add a word with the appropriate index, |
| 121 | + // then connect it to matchedNode |
| 122 | + // TODO: add the ability to set more values, such as POS? |
| 123 | + IndexedWord newNode = new IndexedWord(); |
| 124 | + newNode.setDocID(matchedNode.docID()); |
| 125 | + newNode.setIndex(matchedIndex + i); |
| 126 | + newNode.setSentIndex(matchedNode.sentIndex()); |
| 127 | + newNode.setWord(words.get(i)); |
| 128 | + newNode.setValue(words.get(i)); |
| 129 | + |
| 130 | + sg.addVertex(newNode); |
| 131 | + sg.addEdge(matchedNode, newNode, relation, 0.0, false); |
| 132 | + } |
| 133 | + return true; |
| 134 | + } |
| 135 | +} |
0 commit comments