Using a guaranteed unique field names for the duplicates index.
authorJan Lahoda <jlahoda@netbeans.org>
Fri, 22 Jun 2012 22:58:14 +0200
changeset 8044d8a57f89afd
parent 803 09425fee64d8
child 805 e502b71f2040
Using a guaranteed unique field names for the duplicates index.
duplicates/ide/impl/src/org/netbeans/modules/jackpot30/impl/duplicates/ComputeDuplicates.java
duplicates/ide/impl/src/org/netbeans/modules/jackpot30/impl/duplicates/indexing/DuplicatesIndex.java
     1.1 --- a/duplicates/ide/impl/src/org/netbeans/modules/jackpot30/impl/duplicates/ComputeDuplicates.java	Fri Jun 22 22:30:08 2012 +0200
     1.2 +++ b/duplicates/ide/impl/src/org/netbeans/modules/jackpot30/impl/duplicates/ComputeDuplicates.java	Fri Jun 22 22:58:14 2012 +0200
     1.3 @@ -155,7 +155,7 @@
     1.4  
     1.5          MultiReader r = new MultiReader(readers2Roots.keySet().toArray(new IndexReader[0]));
     1.6  
     1.7 -        List<String> dd = new ArrayList<String>(getDuplicatedValues(r, "generalized", cancel));
     1.8 +        List<String> dd = new ArrayList<String>(getDuplicatedValues(r, "duplicatesGeneralized", cancel));
     1.9  
    1.10          sortHashes(dd);
    1.11  
    1.12 @@ -196,7 +196,7 @@
    1.13              String longest = duplicateCandidates.next();
    1.14              List<Span> foundDuplicates = new LinkedList<Span>();
    1.15  
    1.16 -            Query query = new TermQuery(new Term("generalized", longest));
    1.17 +            Query query = new TermQuery(new Term("duplicatesGeneralized", longest));
    1.18  
    1.19              for (Entry<IndexReader, FileObject> e : readers2Roots.entrySet()) {
    1.20                  Searcher s = new IndexSearcher(e.getKey());
    1.21 @@ -207,15 +207,14 @@
    1.22  
    1.23                  for (int docNum = matchingDocuments.nextSetBit(0); docNum >= 0; docNum = matchingDocuments.nextSetBit(docNum + 1)) {
    1.24                      final Document doc = e.getKey().document(docNum);
    1.25 -                    int pos = Arrays.binarySearch(doc.getValues("generalized"), longest);
    1.26 +                    int pos = Arrays.binarySearch(doc.getValues("duplicatesGeneralized"), longest);
    1.27  
    1.28                      if (pos < 0) {
    1.29 -                        System.err.println("FOOBAR=" + pos);
    1.30                          continue;
    1.31                      }
    1.32                      
    1.33 -                    String spanSpec = doc.getValues("positions")[pos];
    1.34 -                    String relPath = doc.getField("path").stringValue();
    1.35 +                    String spanSpec = doc.getValues("duplicatesPositions")[pos];
    1.36 +                    String relPath = doc.getField("duplicatesPath").stringValue();
    1.37  
    1.38                      for (String spanPart : spanSpec.split(";")) {
    1.39                          Span span = Span.of(e.getValue().getFileObject(relPath), spanPart);
     2.1 --- a/duplicates/ide/impl/src/org/netbeans/modules/jackpot30/impl/duplicates/indexing/DuplicatesIndex.java	Fri Jun 22 22:30:08 2012 +0200
     2.2 +++ b/duplicates/ide/impl/src/org/netbeans/modules/jackpot30/impl/duplicates/indexing/DuplicatesIndex.java	Fri Jun 22 22:58:14 2012 +0200
     2.3 @@ -55,10 +55,8 @@
     2.4  import org.netbeans.api.java.source.CompilationInfo;
     2.5  import org.netbeans.modules.jackpot30.common.api.IndexAccess;
     2.6  import org.netbeans.modules.jackpot30.impl.duplicates.ComputeDuplicates;
     2.7 -import org.netbeans.modules.java.source.JavaSourceAccessor;
     2.8  import org.netbeans.modules.parsing.spi.indexing.Indexable;
     2.9  import org.openide.filesystems.FileObject;
    2.10 -import org.openide.filesystems.FileUtil;
    2.11  import org.openide.util.Lookup;
    2.12  
    2.13  /**
    2.14 @@ -83,12 +81,12 @@
    2.15          try {
    2.16              final Document doc = new Document();
    2.17  
    2.18 -            doc.add(new Field("path", relative, Field.Store.YES, Field.Index.NOT_ANALYZED));
    2.19 +            doc.add(new Field("duplicatesPath", relative, Field.Store.YES, Field.Index.NOT_ANALYZED));
    2.20  
    2.21              final Map<String, long[]> positions = ComputeDuplicates.encodeGeneralized(trees, cut);
    2.22  
    2.23              for (Entry<String, long[]> e : positions.entrySet()) {
    2.24 -                doc.add(new Field("generalized", e.getKey(), Store.YES, Index.NOT_ANALYZED));
    2.25 +                doc.add(new Field("duplicatesGeneralized", e.getKey(), Store.YES, Index.NOT_ANALYZED));
    2.26  
    2.27                  StringBuilder positionsSpec = new StringBuilder();
    2.28  
    2.29 @@ -97,7 +95,7 @@
    2.30                      positionsSpec.append(e.getValue()[i]).append(':').append(e.getValue()[i + 1] - e.getValue()[i]);
    2.31                  }
    2.32  
    2.33 -                doc.add(new Field("positions", positionsSpec.toString(), Store.YES, Index.NO));
    2.34 +                doc.add(new Field("duplicatesPositions", positionsSpec.toString(), Store.YES, Index.NO));
    2.35              }
    2.36  
    2.37              luceneWriter.addDocument(doc);
    2.38 @@ -109,7 +107,7 @@
    2.39      }
    2.40  
    2.41      public void remove(String relativePath) throws IOException {
    2.42 -        luceneWriter.deleteDocuments(new Term("path", relativePath));
    2.43 +        luceneWriter.deleteDocuments(new Term("duplicatesPath", relativePath));
    2.44      }
    2.45  
    2.46      public void close() throws IOException {