Imported Upstream version 3.6.0

Former-commit-id: da6be194a6b1221998fc28233f2503bd61dd9d14
This commit is contained in:
Jo Shields
2014-08-13 10:39:27 +01:00
commit a575963da9
50588 changed files with 8155799 additions and 0 deletions

View File

@@ -0,0 +1,98 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using Lucene.Net.Analysis.AR;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.AR
{
/*
* Test the Arabic Analyzer
*
*/
[TestFixture]
public class TestArabicAnalyzer : BaseTokenStreamTestCase
{
/* This test fails with NPE when the
* stopwords file is missing in classpath */
[Test]
public void TestResourcesAvailable()
{
new ArabicAnalyzer(Version.LUCENE_CURRENT);
}
/*
* Some simple tests showing some features of the analyzer, how some regular forms will conflate
*/
[Test]
public void TestBasicFeatures()
{
ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesTo(a, "كبير", new String[] { "كبير" });
AssertAnalyzesTo(a, "كبيرة", new String[] { "كبير" }); // feminine marker
AssertAnalyzesTo(a, "مشروب", new String[] { "مشروب" });
AssertAnalyzesTo(a, "مشروبات", new String[] { "مشروب" }); // plural -at
AssertAnalyzesTo(a, "أمريكيين", new String[] { "امريك" }); // plural -in
AssertAnalyzesTo(a, "امريكي", new String[] { "امريك" }); // singular with bare alif
AssertAnalyzesTo(a, "كتاب", new String[] { "كتاب" });
AssertAnalyzesTo(a, "الكتاب", new String[] { "كتاب" }); // definite article
AssertAnalyzesTo(a, "ما ملكت أيمانكم", new String[] { "ملكت", "ايمانكم" });
AssertAnalyzesTo(a, "الذين ملكت أيمانكم", new String[] { "ملكت", "ايمانكم" }); // stopwords
}
/*
* Simple tests to show things are getting reset correctly, etc.
*/
[Test]
public void TestReusableTokenStream()
{
ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesToReuse(a, "كبير", new String[] { "كبير" });
AssertAnalyzesToReuse(a, "كبيرة", new String[] { "كبير" }); // feminine marker
}
/*
* Non-arabic text gets treated in a similar way as SimpleAnalyzer.
*/
[Test]
public void TestEnglishInput()
{
AssertAnalyzesTo(new ArabicAnalyzer(Version.LUCENE_CURRENT), "English text.", new String[] {
"english", "text" });
}
/*
* Test that custom stopwords work, and are not case-sensitive.
*/
[Test]
public void TestCustomStopwords()
{
ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT, new String[] { "the", "and", "a" });
AssertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick", "brown", "fox" });
}
}
}

View File

@@ -0,0 +1,128 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.IO;
using Lucene.Net.Analysis.AR;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
namespace Lucene.Net.Analyzers.AR
{
/*
* Test the Arabic Normalization Filter
*
*/
[TestFixture]
public class TestArabicNormalizationFilter : BaseTokenStreamTestCase
{
[Test]
public void TestAlifMadda()
{
Check("آجن", "اجن");
}
[Test]
public void TestAlifHamzaAbove()
{
Check("أحمد", "احمد");
}
[Test]
public void TestAlifHamzaBelow()
{
Check("إعاذ", "اعاذ");
}
[Test]
public void TestAlifMaksura()
{
Check("بنى", "بني");
}
[Test]
public void TestTehMarbuta()
{
Check("فاطمة", "فاطمه");
}
[Test]
public void TestTatweel()
{
Check("روبرـــــت", "روبرت");
}
[Test]
public void TestFatha()
{
Check("مَبنا", "مبنا");
}
[Test]
public void TestKasra()
{
Check("علِي", "علي");
}
[Test]
public void TestDamma()
{
Check("بُوات", "بوات");
}
[Test]
public void TestFathatan()
{
Check("ولداً", "ولدا");
}
[Test]
public void TestKasratan()
{
Check("ولدٍ", "ولد");
}
[Test]
public void TestDammatan()
{
Check("ولدٌ", "ولد");
}
[Test]
public void TestSukun()
{
Check("نلْسون", "نلسون");
}
[Test]
public void TestShaddah()
{
Check("هتميّ", "هتمي");
}
private void Check(string input, string expected)
{
ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(new StringReader(input));
ArabicNormalizationFilter filter = new ArabicNormalizationFilter(tokenStream);
AssertTokenStreamContents(filter, new String[] { expected });
}
}
}

View File

@@ -0,0 +1,169 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.IO;
using Lucene.Net.Analysis.AR;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
namespace Lucene.Net.Analyzers.AR
{
/*
* Test the Arabic Normalization Filter
*
*/
[NUnit.Framework.TestFixture]
public class TestArabicStemFilter : BaseTokenStreamTestCase
{
[Test]
public void TestAlPrefix()
{
Check("الحسن", "حسن");
}
[Test]
public void TestWalPrefix()
{
Check("والحسن", "حسن");
}
[Test]
public void TestBalPrefix()
{
Check("بالحسن", "حسن");
}
[Test]
public void TestKalPrefix()
{
Check("كالحسن", "حسن");
}
[Test]
public void TestFalPrefix()
{
Check("فالحسن", "حسن");
}
[Test]
public void TestLlPrefix()
{
Check("للاخر", "اخر");
}
[Test]
public void TestWaPrefix()
{
Check("وحسن", "حسن");
}
[Test]
public void TestAhSuffix()
{
Check("زوجها", "زوج");
}
[Test]
public void TestAnSuffix()
{
Check("ساهدان", "ساهد");
}
[Test]
public void TestAtSuffix()
{
Check("ساهدات", "ساهد");
}
[Test]
public void TestWnSuffix()
{
Check("ساهدون", "ساهد");
}
[Test]
public void TestYnSuffix()
{
Check("ساهدين", "ساهد");
}
[Test]
public void TestYhSuffix()
{
Check("ساهديه", "ساهد");
}
[Test]
public void TestYpSuffix()
{
Check("ساهدية", "ساهد");
}
[Test]
public void TestHSuffix()
{
Check("ساهده", "ساهد");
}
[Test]
public void TestPSuffix()
{
Check("ساهدة", "ساهد");
}
[Test]
public void TestYSuffix()
{
Check("ساهدي", "ساهد");
}
[Test]
public void TestComboPrefSuf()
{
Check("وساهدون", "ساهد");
}
[Test]
public void TestComboSuf()
{
Check("ساهدهات", "ساهد");
}
[Test]
public void TestShouldntStem()
{
Check("الو", "الو");
}
[Test]
public void TestNonArabic()
{
Check("English", "English");
}
private void Check(string input, string expected)
{
ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(new StringReader(input));
ArabicStemFilter filter = new ArabicStemFilter(tokenStream);
AssertTokenStreamContents(filter, new String[] { expected });
}
}
}

View File

@@ -0,0 +1,179 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.BR;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.Br
{
/*
* Test the Brazilian Stem Filter, which only modifies the term text.
*
* It is very similar to the snowball portuguese algorithm but not exactly the same.
*
*/
[TestFixture]
public class TestBrazilianStemmer : BaseTokenStreamTestCase
{
[Test]
public void TestWithSnowballExamples()
{
Check("boa", "boa");
Check("boainain", "boainain");
Check("boas", "boas");
Check("bôas", "boas"); // removes diacritic: different from snowball portugese
Check("boassu", "boassu");
Check("boataria", "boat");
Check("boate", "boat");
Check("boates", "boat");
Check("boatos", "boat");
Check("bob", "bob");
Check("boba", "bob");
Check("bobagem", "bobag");
Check("bobagens", "bobagens");
Check("bobalhões", "bobalho"); // removes diacritic: different from snowball portugese
Check("bobear", "bob");
Check("bobeira", "bobeir");
Check("bobinho", "bobinh");
Check("bobinhos", "bobinh");
Check("bobo", "bob");
Check("bobs", "bobs");
Check("boca", "boc");
Check("bocadas", "boc");
Check("bocadinho", "bocadinh");
Check("bocado", "boc");
Check("bocaiúva", "bocaiuv"); // removes diacritic: different from snowball portuguese
Check("boçal", "bocal"); // removes diacritic: different from snowball portuguese
Check("bocarra", "bocarr");
Check("bocas", "boc");
Check("bode", "bod");
Check("bodoque", "bodoqu");
Check("body", "body");
Check("boeing", "boeing");
Check("boem", "boem");
Check("boemia", "boem");
Check("boêmio", "boemi"); // removes diacritic: different from snowball portuguese
Check("bogotá", "bogot");
Check("boi", "boi");
Check("bóia", "boi"); // removes diacritic: different from snowball portuguese
Check("boiando", "boi");
Check("quiabo", "quiab");
Check("quicaram", "quic");
Check("quickly", "quickly");
Check("quieto", "quiet");
Check("quietos", "quiet");
Check("quilate", "quilat");
Check("quilates", "quilat");
Check("quilinhos", "quilinh");
Check("quilo", "quil");
Check("quilombo", "quilomb");
Check("quilométricas", "quilometr"); // removes diacritic: different from snowball portuguese
Check("quilométricos", "quilometr"); // removes diacritic: different from snowball portuguese
Check("quilômetro", "quilometr"); // removes diacritic: different from snowball portoguese
Check("quilômetros", "quilometr"); // removes diacritic: different from snowball portoguese
Check("quilos", "quil");
Check("quimica", "quimic");
Check("quilos", "quil");
Check("quimica", "quimic");
Check("quimicas", "quimic");
Check("quimico", "quimic");
Check("quimicos", "quimic");
Check("quimioterapia", "quimioterap");
Check("quimioterápicos", "quimioterap"); // removes diacritic: different from snowball portoguese
Check("quimono", "quimon");
Check("quincas", "quinc");
Check("quinhão", "quinha"); // removes diacritic: different from snowball portoguese
Check("quinhentos", "quinhent");
Check("quinn", "quinn");
Check("quino", "quin");
Check("quinta", "quint");
Check("quintal", "quintal");
Check("quintana", "quintan");
Check("quintanilha", "quintanilh");
Check("quintão", "quinta"); // removes diacritic: different from snowball portoguese
Check("quintessência", "quintessente"); // versus snowball portuguese 'quintessent'
Check("quintino", "quintin");
Check("quinto", "quint");
Check("quintos", "quint");
Check("quintuplicou", "quintuplic");
Check("quinze", "quinz");
Check("quinzena", "quinzen");
Check("quiosque", "quiosqu");
}
public void TestNormalization()
{
Check("Brasil", "brasil"); // lowercase by default
Check("Brasília", "brasil"); // remove diacritics
Check("quimio5terápicos", "quimio5terapicos"); // contains non-letter, diacritic will still be removed
Check("áá", "áá"); // token is too short: diacritics are not removed
Check("ááá", "aaa"); // normally, diacritics are removed
}
[Test]
public void TestReusableTokenStream()
{
Analyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
CheckReuse(a, "boa", "boa");
CheckReuse(a, "boainain", "boainain");
CheckReuse(a, "boas", "boas");
CheckReuse(a, "bôas", "boas"); // removes diacritic: different from snowball portugese
}
[Test]
public void TestStemExclusionTable()
{
BrazilianAnalyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
a.SetStemExclusionTable(new String[] { "quintessência" });
CheckReuse(a, "quintessência", "quintessência"); // excluded words will be completely unchanged.
}
/*
* Test that changes to the exclusion table are applied immediately
* when using reusable token streams.
*/
[Test]
public void TestExclusionTableReuse()
{
BrazilianAnalyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
CheckReuse(a, "quintessência", "quintessente");
a.SetStemExclusionTable(new String[] { "quintessência" });
CheckReuse(a, "quintessência", "quintessência");
}
private void Check(String input, String expected)
{
CheckOneTerm(new BrazilianAnalyzer(Version.LUCENE_CURRENT), input, expected);
}
private void CheckReuse(Analyzer a, String input, String expected)
{
CheckOneTermReuse(a, input, expected);
}
}
}

View File

@@ -0,0 +1,327 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.CJK;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.Cjk
{
[TestFixture]
public class TestCJKTokenizer : BaseTokenStreamTestCase
{
public class TestToken
{
protected internal String termText;
protected internal int start;
protected internal int end;
protected internal String type;
}
public TestToken NewToken(String termText, int start, int end, int type)
{
TestToken token = new TestToken();
token.termText = termText;
token.type = CJKTokenizer.TOKEN_TYPE_NAMES[type];
token.start = start;
token.end = end;
return token;
}
public void CheckCjkToken(String str, TestToken[] out_tokens)
{
Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
String[] terms = new String[out_tokens.Length];
int[] startOffsets = new int[out_tokens.Length];
int[] endOffsets = new int[out_tokens.Length];
String[] types = new String[out_tokens.Length];
for (int i = 0; i < out_tokens.Length; i++)
{
terms[i] = out_tokens[i].termText;
startOffsets[i] = out_tokens[i].start;
endOffsets[i] = out_tokens[i].end;
types[i] = out_tokens[i].type;
}
AssertAnalyzesTo(analyzer, str, terms, startOffsets, endOffsets, types, null);
}
public void CheckCjkTokenReusable(Analyzer a, String str, TestToken[] out_tokens)
{
Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
String[] terms = new String[out_tokens.Length];
int[] startOffsets = new int[out_tokens.Length];
int[] endOffsets = new int[out_tokens.Length];
String[] types = new String[out_tokens.Length];
for (int i = 0; i < out_tokens.Length; i++)
{
terms[i] = out_tokens[i].termText;
startOffsets[i] = out_tokens[i].start;
endOffsets[i] = out_tokens[i].end;
types[i] = out_tokens[i].type;
}
AssertAnalyzesToReuse(analyzer, str, terms, startOffsets, endOffsets, types, null);
}
[Test]
public void TestJa1()
{
String str = "\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d\u5341";
TestToken[] out_tokens = {
NewToken("\u4e00\u4e8c", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e8c\u4e09", 1, 3, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e09\u56db", 2, 4, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u56db\u4e94", 3, 5, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e94\u516d", 4, 6, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u516d\u4e03", 5, 7, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e03\u516b", 6, 8, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u516b\u4e5d", 7, 9, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e5d\u5341", 8, 10, CJKTokenizer.DOUBLE_TOKEN_TYPE)
};
CheckCjkToken(str, out_tokens);
}
[Test]
public void TestJa2()
{
String str = "\u4e00 \u4e8c\u4e09\u56db \u4e94\u516d\u4e03\u516b\u4e5d \u5341";
TestToken[] out_tokens = {
NewToken("\u4e00", 0, 1, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e8c\u4e09", 2, 4, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e09\u56db", 3, 5, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e94\u516d", 6, 8, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u516d\u4e03", 7, 9, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u4e03\u516b", 8, 10, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u516b\u4e5d", 9, 11, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u5341", 12, 13, CJKTokenizer.DOUBLE_TOKEN_TYPE)
};
CheckCjkToken(str, out_tokens);
}
[Test]
public void TestC()
{
String str = "abc defgh ijklmn opqrstu vwxy z";
TestToken[] out_tokens = {
NewToken("abc", 0, 3, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("defgh", 4, 9, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("ijklmn", 10, 16, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("opqrstu", 17, 24, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("vwxy", 25, 29, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("z", 30, 31, CJKTokenizer.SINGLE_TOKEN_TYPE),
};
CheckCjkToken(str, out_tokens);
}
[Test]
public void TestMix()
{
String str = "\u3042\u3044\u3046\u3048\u304aabc\u304b\u304d\u304f\u3051\u3053";
TestToken[] out_tokens = {
NewToken("\u3042\u3044", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3044\u3046", 1, 3, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3046\u3048", 2, 4, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3048\u304a", 3, 5, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("abc", 5, 8, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("\u304b\u304d", 8, 10, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u304d\u304f", 9, 11, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u304f\u3051", 10, 12, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3051\u3053", 11, 13, CJKTokenizer.DOUBLE_TOKEN_TYPE)
};
CheckCjkToken(str, out_tokens);
}
[Test]
public void TestMix2()
{
String str = "\u3042\u3044\u3046\u3048\u304aab\u3093c\u304b\u304d\u304f\u3051 \u3053";
TestToken[] out_tokens = {
NewToken("\u3042\u3044", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3044\u3046", 1, 3, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3046\u3048", 2, 4, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3048\u304a", 3, 5, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("ab", 5, 7, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("\u3093", 7, 8, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("c", 8, 9, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("\u304b\u304d", 9, 11, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u304d\u304f", 10, 12, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u304f\u3051", 11, 13, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3053", 14, 15, CJKTokenizer.DOUBLE_TOKEN_TYPE)
};
CheckCjkToken(str, out_tokens);
}
[Test]
public void TestSingleChar()
{
String str = "\u4e00";
TestToken[] out_tokens = {
NewToken("\u4e00", 0, 1, CJKTokenizer.DOUBLE_TOKEN_TYPE),
};
CheckCjkToken(str, out_tokens);
}
/*
* Full-width text is normalized to half-width
*/
[Test]
public void TestFullWidth()
{
String str = "Test 1234";
TestToken[] out_tokens = {
NewToken("test", 0, 4, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("1234", 5, 9, CJKTokenizer.SINGLE_TOKEN_TYPE)
};
CheckCjkToken(str, out_tokens);
}
/*
* Non-english text (not just CJK) is treated the same as CJK: C1C2 C2C3
*/
[Test]
public void TestNonIdeographic()
{
String str = "\u4e00 روبرت موير";
TestToken[] out_tokens = {
NewToken("\u4e00", 0, 1, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("رو", 2, 4, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("وب", 3, 5, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("بر", 4, 6, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("رت", 5, 7, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("مو", 8, 10, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("وي", 9, 11, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("ير", 10, 12, CJKTokenizer.DOUBLE_TOKEN_TYPE)
};
CheckCjkToken(str, out_tokens);
}
/*
* Non-english text with nonletters (non-spacing marks,etc) is treated as C1C2 C2C3,
* except for words are split around non-letters.
*/
[Test]
public void TestNonIdeographicNonLetter()
{
String str = "\u4e00 رُوبرت موير";
TestToken[] out_tokens = {
NewToken("\u4e00", 0, 1, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("ر", 2, 3, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("وب", 4, 6, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("بر", 5, 7, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("رت", 6, 8, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("مو", 9, 11, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("وي", 10, 12, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("ير", 11, 13, CJKTokenizer.DOUBLE_TOKEN_TYPE)
};
CheckCjkToken(str, out_tokens);
}
[Test]
public void TestTokenStream()
{
Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesTo(analyzer, "\u4e00\u4e01\u4e02",
new String[] {"\u4e00\u4e01", "\u4e01\u4e02"});
}
[Test]
public void TestReusableTokenStream()
{
Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
String str = "\u3042\u3044\u3046\u3048\u304aabc\u304b\u304d\u304f\u3051\u3053";
TestToken[] out_tokens = {
NewToken("\u3042\u3044", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3044\u3046", 1, 3, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3046\u3048", 2, 4, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3048\u304a", 3, 5, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("abc", 5, 8, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("\u304b\u304d", 8, 10, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u304d\u304f", 9, 11, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u304f\u3051", 10, 12, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3051\u3053", 11, 13, CJKTokenizer.DOUBLE_TOKEN_TYPE)
};
CheckCjkTokenReusable(analyzer, str, out_tokens);
str = "\u3042\u3044\u3046\u3048\u304aab\u3093c\u304b\u304d\u304f\u3051 \u3053";
TestToken[] out_tokens2 = {
NewToken("\u3042\u3044", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3044\u3046", 1, 3, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3046\u3048", 2, 4, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3048\u304a", 3, 5, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("ab", 5, 7, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("\u3093", 7, 8, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("c", 8, 9, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("\u304b\u304d", 9, 11, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u304d\u304f", 10, 12, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u304f\u3051", 11, 13, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("\u3053", 14, 15, CJKTokenizer.DOUBLE_TOKEN_TYPE)
};
CheckCjkTokenReusable(analyzer, str, out_tokens2);
}
/*
* LUCENE-2207: wrong offset calculated by end()
*/
[Test]
public void TestFinalOffset()
{
CheckCjkToken("あい", new TestToken[]
{
NewToken("あい", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE)
});
CheckCjkToken("あい ", new TestToken[]
{
NewToken("あい", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE)
});
CheckCjkToken("test", new TestToken[]
{
NewToken("test", 0, 4, CJKTokenizer.SINGLE_TOKEN_TYPE)
});
CheckCjkToken("test ", new TestToken[]
{
NewToken("test", 0, 4, CJKTokenizer.SINGLE_TOKEN_TYPE)
});
CheckCjkToken("あいtest", new TestToken[]
{
NewToken("あい", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE),
NewToken("test", 2, 6, CJKTokenizer.SINGLE_TOKEN_TYPE)
});
CheckCjkToken("testあい ", new TestToken[]
{
NewToken("test", 0, 4, CJKTokenizer.SINGLE_TOKEN_TYPE),
NewToken("あい", 4, 6, CJKTokenizer.DOUBLE_TOKEN_TYPE)
});
}
}
}

View File

@@ -0,0 +1,131 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Cn;
using Lucene.Net.Analysis.Tokenattributes;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
namespace Lucene.Net.Analyzers.Cn
{
[TestFixture]
public class TestChineseTokenizer : BaseTokenStreamTestCase
{
[Test]
public void TestOtherLetterOffset()
{
String s = "a天b";
ChineseTokenizer tokenizer = new ChineseTokenizer(new StringReader(s));
int correctStartOffset = 0;
int correctEndOffset = 1;
IOffsetAttribute offsetAtt = tokenizer.GetAttribute<IOffsetAttribute>();
while (tokenizer.IncrementToken())
{
Assert.AreEqual(correctStartOffset, offsetAtt.StartOffset);
Assert.AreEqual(correctEndOffset, offsetAtt.EndOffset);
correctStartOffset++;
correctEndOffset++;
}
}
[Test]
public void TestReusableTokenStream()
{
Analyzer a = new ChineseAnalyzer();
AssertAnalyzesToReuse(a, "中华人民共和国",
new String[] { "中", "华", "人", "民", "共", "和", "国" },
new int[] { 0, 1, 2, 3, 4, 5, 6 },
new int[] { 1, 2, 3, 4, 5, 6, 7 });
AssertAnalyzesToReuse(a, "北京市",
new String[] { "北", "京", "市" },
new int[] { 0, 1, 2 },
new int[] { 1, 2, 3 });
}
/*
* Analyzer that just uses ChineseTokenizer, not ChineseFilter.
* convenience to show the behavior of the tokenizer
*/
private class JustChineseTokenizerAnalyzer : Analyzer
{
public override TokenStream TokenStream(String fieldName, TextReader reader)
{
return new ChineseTokenizer(reader);
}
}
/*
* Analyzer that just uses ChineseFilter, not ChineseTokenizer.
* convenience to show the behavior of the filter.
*/
private class JustChineseFilterAnalyzer : Analyzer
{
public override TokenStream TokenStream(String fieldName, TextReader reader)
{
return new ChineseFilter(new WhitespaceTokenizer(reader));
}
}
/*
* ChineseTokenizer tokenizes numbers as one token, but they are filtered by ChineseFilter
*/
[Test]
public void TestNumerics()
{
Analyzer justTokenizer = new JustChineseTokenizerAnalyzer();
AssertAnalyzesTo(justTokenizer, "中1234", new String[] {"中", "1234"});
// in this case the ChineseAnalyzer (which applies ChineseFilter) will remove the numeric token.
Analyzer a = new ChineseAnalyzer();
AssertAnalyzesTo(a, "中1234", new String[] {"中"});
}
/*
* ChineseTokenizer tokenizes english similar to SimpleAnalyzer.
* it will lowercase terms automatically.
*
* ChineseFilter has an english stopword list, it also removes any single character tokens.
* the stopword list is case-sensitive.
*/
[Test]
public void TestEnglish()
{
Analyzer chinese = new ChineseAnalyzer();
AssertAnalyzesTo(chinese, "This is a Test. b c d",
new String[] {"test"});
Analyzer justTokenizer = new JustChineseTokenizerAnalyzer();
AssertAnalyzesTo(justTokenizer, "This is a Test. b c d",
new String[] {"this", "is", "a", "test", "b", "c", "d"});
Analyzer justFilter = new JustChineseFilterAnalyzer();
AssertAnalyzesTo(justFilter, "This is a Test. b c d",
new String[] {"This", "Test."});
}
}
}

View File

@@ -0,0 +1,39 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using NUnit.Framework;
namespace Lucene.Net.Analyzers.Compound
{
[TestFixture]
public class TestCompoundWordTokenFilter
{
[Test]
public void TestIgnore()
{
Assert.Ignore("Needs to be ported");
}
}
}

View File

@@ -0,0 +1,245 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProductVersion>9.0.21022</ProductVersion>
<SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>{67D27628-F1D5-4499-9818-B669731925C8}</ProjectGuid>
<AppDesignerFolder>Properties</AppDesignerFolder>
<RootNamespace>Lucene.Net.Analysis.Test</RootNamespace>
<AssemblyName>Lucene.Net.Contrib.Analyzers.Test</AssemblyName>
<FileAlignment>512</FileAlignment>
<FileUpgradeFlags>
</FileUpgradeFlags>
<OldToolsVersion>3.5</OldToolsVersion>
<UpgradeBackupLocation />
<PublishUrl>publish\</PublishUrl>
<Install>true</Install>
<InstallFrom>Disk</InstallFrom>
<UpdateEnabled>false</UpdateEnabled>
<UpdateMode>Foreground</UpdateMode>
<UpdateInterval>7</UpdateInterval>
<UpdateIntervalUnits>Days</UpdateIntervalUnits>
<UpdatePeriodically>false</UpdatePeriodically>
<UpdateRequired>false</UpdateRequired>
<MapFileExtensions>true</MapFileExtensions>
<ApplicationRevision>0</ApplicationRevision>
<ApplicationVersion>1.0.0.%2a</ApplicationVersion>
<IsWebBootstrapper>false</IsWebBootstrapper>
<UseApplicationTrust>false</UseApplicationTrust>
<BootstrapperEnabled>true</BootstrapperEnabled>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
<Framework>$(TargetFrameworkVersion.Replace("v", "NET").Replace(".", ""))</Framework>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>..\..\..\build\bin\contrib\Analyzers\$(Configuration.Replace("35", ""))\$(Framework)\</OutputPath>
<DefineConstants>DEBUG;TRACE;$(Framework)</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
<NoWarn>618</NoWarn>
<OutputType>Library</OutputType>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug35|AnyCPU' ">
<TargetFrameworkVersion>v3.5</TargetFrameworkVersion>
<Framework>$(TargetFrameworkVersion.Replace("v", "NET").Replace(".", ""))</Framework>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>..\..\..\build\bin\contrib\Analyzers\$(Configuration.Replace("35", ""))\$(Framework)\</OutputPath>
<DefineConstants>DEBUG;TRACE;$(Framework)</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
<NoWarn>618</NoWarn>
<OutputType>Library</OutputType>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
<Framework>$(TargetFrameworkVersion.Replace("v", "NET").Replace(".", ""))</Framework>
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<OutputPath>..\..\..\build\bin\contrib\Analyzers\$(Configuration.Replace("35", ""))\$(Framework)\</OutputPath>
<DefineConstants>TRACE;$(Framework)</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
<NoWarn>618</NoWarn>
<DebugSymbols>true</DebugSymbols>
<OutputType>Library</OutputType>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release35|AnyCPU' ">
<TargetFrameworkVersion>v3.5</TargetFrameworkVersion>
<Framework>$(TargetFrameworkVersion.Replace("v", "NET").Replace(".", ""))</Framework>
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<OutputPath>..\..\..\build\bin\contrib\Analyzers\$(Configuration.Replace("35", ""))\$(Framework)\</OutputPath>
<DefineConstants>TRACE;$(Framework)</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
<NoWarn>618</NoWarn>
<DebugSymbols>true</DebugSymbols>
<OutputType>Library</OutputType>
</PropertyGroup>
<PropertyGroup>
<SignAssembly>true</SignAssembly>
</PropertyGroup>
<PropertyGroup>
<AssemblyOriginatorKeyFile>Lucene.Net.snk</AssemblyOriginatorKeyFile>
</PropertyGroup>
<ItemGroup>
<Reference Include="nunit.framework">
<HintPath>..\..\..\lib\NUnit.org\NUnit\2.5.9\bin\net-2.0\framework\nunit.framework.dll</HintPath>
</Reference>
<Reference Include="System" />
<Reference Condition="'$(Framework)' == 'NET35'" Include="System.Core" />
</ItemGroup>
<ItemGroup>
<Compile Include="AR\TestArabicAnalyzer.cs" />
<Compile Include="AR\TestArabicNormalizationFilter.cs" />
<Compile Include="AR\TestArabicStemFilter.cs" />
<Compile Include="Br\TestBrazilianStemmer.cs" />
<Compile Include="Cjk\TestCJKTokenizer.cs" />
<Compile Include="Cn\TestChineseTokenizer.cs" />
<Compile Include="Compound\TestCompoundWordTokenFilter.cs" />
<Compile Include="Cz\TestCzechAnalyzer.cs" />
<Compile Include="De\TestGermanStemFilter.cs" />
<Compile Include="El\GreekAnalyzerTest.cs" />
<Compile Include="Fa\TestPersianAnalyzer.cs" />
<Compile Include="Fr\TestElision.cs" />
<Compile Include="Fr\TestFrenchAnalyzer.cs" />
<Compile Include="Hunspell\HunspellDictionaryLoader.cs" />
<Compile Include="Hunspell\TestHunspellDictionary.cs" />
<Compile Include="Hunspell\TestHunspellStemFilter.cs" />
<Compile Include="Hunspell\TestHunspellStemmer.cs" />
<Compile Include="NGram\TestEdgeNGramTokenFilter.cs" />
<Compile Include="NGram\TestEdgeNGramTokenizer.cs" />
<Compile Include="Miscellaneous\PatternAnalyzerTest.cs" />
<Compile Include="Miscellaneous\TestEmptyTokenStream.cs" />
<Compile Include="Miscellaneous\TestPrefixAndSuffixAwareTokenFilter.cs" />
<Compile Include="Miscellaneous\TestPrefixAwareTokenFilter.cs" />
<Compile Include="Miscellaneous\TestSingleTokenTokenFilter.cs" />
<Compile Include="NGram\TestNGramTokenFilter.cs" />
<Compile Include="NGram\TestNGramTokenizer.cs" />
<Compile Include="Nl\TestDutchStemmer.cs" />
<Compile Include="Payloads\DelimitedPayloadTokenFilterTest.cs" />
<Compile Include="Payloads\NumericPayloadTokenFilterTest.cs" />
<Compile Include="Payloads\TokenOffsetPayloadTokenFilterTest.cs" />
<Compile Include="Payloads\TypeAsPayloadTokenFilterTest.cs" />
<Compile Include="Position\PositionFilterTest.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="Query\QueryAutoStopWordAnalyzerTest.cs" />
<Compile Include="Reverse\TestReverseStringFilter.cs" />
<Compile Include="Ru\TestRussianAnalyzer.cs" />
<Compile Include="Ru\TestRussianStem.cs" />
<Compile Include="Shingle\ShingleAnalyzerWrapperTest.cs" />
<Compile Include="Shingle\ShingleFilterTest.cs" />
<Compile Include="Shingle\TestShingleMatrixFilter.cs" />
<Compile Include="Sinks\DateRecognizerSinkTokenizerTest.cs" />
<Compile Include="Sinks\TokenRangeSinkTokenizerTest.cs" />
<Compile Include="Sinks\TokenTypeSinkTokenizerTest.cs" />
<Compile Include="Th\TestThaiAnalyzer.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\..\src\contrib\Analyzers\Contrib.Analyzers.csproj">
<Project>{4286E961-9143-4821-B46D-3D39D3736386}</Project>
<Name>Contrib.Analyzers</Name>
</ProjectReference>
<ProjectReference Include="..\..\..\src\core\Lucene.Net.csproj">
<Project>{5D4AD9BE-1FFB-41AB-9943-25737971BF57}</Project>
<Name>Lucene.Net</Name>
</ProjectReference>
<ProjectReference Include="..\..\core\Lucene.Net.Test.csproj">
<Project>{AAF68BCF-F781-45FC-98B3-2B9CEE411E01}</Project>
<Name>Lucene.Net.Test</Name>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<BootstrapperPackage Include=".NETFramework,Version=v4.0">
<Visible>False</Visible>
<ProductName>Microsoft .NET Framework 4 %28x86 and x64%29</ProductName>
<Install>true</Install>
</BootstrapperPackage>
<BootstrapperPackage Include="Microsoft.Net.Client.3.5">
<Visible>False</Visible>
<ProductName>.NET Framework 3.5 SP1 Client Profile</ProductName>
<Install>false</Install>
</BootstrapperPackage>
<BootstrapperPackage Include="Microsoft.Net.Framework.3.5.SP1">
<Visible>False</Visible>
<ProductName>.NET Framework 3.5 SP1</ProductName>
<Install>false</Install>
</BootstrapperPackage>
<BootstrapperPackage Include="Microsoft.Windows.Installer.3.1">
<Visible>False</Visible>
<ProductName>Windows Installer 3.1</ProductName>
<Install>true</Install>
</BootstrapperPackage>
</ItemGroup>
<ItemGroup>
<Content Include="Cz\customStopWordFile.txt">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="De\data.txt">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Compile Include="Fa\TestPersianNormalizationFilter.cs" />
<Content Include="De\data_din2.txt">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="Nl\customStemDict.txt">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="Ru\resUTF8.txt">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="Ru\stemsUTF8.txt">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="Ru\testUTF8.txt">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
<Content Include="Ru\wordsUTF8.txt">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="Hunspell\Dictionaries\en_US.aff" />
<EmbeddedResource Include="Hunspell\Dictionaries\en_US.dic" />
<EmbeddedResource Include="Hunspell\Dictionaries\fr-moderne.aff" />
<EmbeddedResource Include="Hunspell\Dictionaries\fr-moderne.dic" />
<EmbeddedResource Include="Hunspell\Dictionaries\nl_NL.aff" />
<EmbeddedResource Include="Hunspell\Dictionaries\nl_NL.dic" />
<None Include="Lucene.Net.snk" />
</ItemGroup>
<ItemGroup />
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
Other similar extension points exist, see Microsoft.Common.targets.
<Target Name="BeforeBuild">
</Target>
<Target Name="AfterBuild">
</Target>
-->
</Project>

View File

@@ -0,0 +1,102 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Cz;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.Cz
{
/*
* Test the CzechAnalyzer
*
* CzechAnalyzer is like a StandardAnalyzer with a custom stopword list.
*
*/
[TestFixture]
public class TestCzechAnalyzer : BaseTokenStreamTestCase
{
string customStopFile = @"Cz\customStopWordFile.txt";
[Test]
public void TestStopWord()
{
AssertAnalyzesTo(new CzechAnalyzer(Version.LUCENE_CURRENT), "Pokud mluvime o volnem", new String[] { "mluvime", "volnem" });
}
[Test]
public void TestReusableTokenStream()
{
Analyzer analyzer = new CzechAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesToReuse(analyzer, "Pokud mluvime o volnem", new String[] { "mluvime", "volnem" });
AssertAnalyzesToReuse(analyzer, "Česká Republika", new String[] { "česká", "republika" });
}
/*
* An input stream that always throws IOException for testing.
*/
private class UnreliableInputStream : MemoryStream
{
public override int Read(byte[] buffer, int offset, int count)
{
throw new IOException();
}
}
/*
* The loadStopWords method does not throw IOException on error,
* instead previously it set the stoptable to null (versus empty)
* this would cause a NPE when it is time to create the StopFilter.
*/
[Test]
public void TestInvalidStopWordFile()
{
CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
cz.LoadStopWords(new UnreliableInputStream(), Encoding.UTF8);
AssertAnalyzesTo(cz, "Pokud mluvime o volnem",
new String[] { "pokud", "mluvime", "o", "volnem" });
}
/*
* Test that changes to the stop table via loadStopWords are applied immediately
* when using reusable token streams.
*/
[Test]
public void TestStopWordFileReuse()
{
CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesToReuse(cz, "Česká Republika",
new String[] { "česká", "republika" });
Stream stopwords = new FileStream(customStopFile, FileMode.Open, FileAccess.Read);
cz.LoadStopWords(stopwords, Encoding.UTF8);
AssertAnalyzesToReuse(cz, "Česká Republika", new String[] { "česká" });
}
}
}

View File

@@ -0,0 +1,3 @@
examplestopword
anotherexamplestopword
republika

View File

@@ -0,0 +1,144 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.IO;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.De;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
using Version=Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.De
{
/*
* Test the German stemmer. The stemming algorithm is known to work less
* than perfect, as it doesn't use any word lists with exceptions. We
* also check some of the cases where the algorithm is wrong.
*
*/
[TestFixture]
public class TestGermanStemFilter : BaseTokenStreamTestCase
{
const string TestFile = @"De\data.txt";
const string TestFileDin2 = @"De\data_din2.txt";
[Test]
public void TestDin1Stemming()
{
// read test cases from external file:
using (var fis = new FileStream(TestFile, FileMode.Open, FileAccess.Read, FileShare.Read))
using (var breader = new StreamReader(fis, Encoding.GetEncoding("iso-8859-1")))
{
while (true)
{
String line = breader.ReadLine();
if (line == null)
break;
line = line.Trim();
if (line.StartsWith("#") || string.IsNullOrEmpty(line))
continue; // ignore comments and empty lines
String[] parts = line.Split(';');
//System.out.println(parts[0] + " -- " + parts[1]);
Check(parts[0], parts[1], false);
}
}
}
[Test]
public void TestDin2Stemming()
{
// read test cases from external file(s):
foreach (var file in new[] { TestFile, TestFileDin2 })
{
using (var fis = new FileStream(file, FileMode.Open, FileAccess.Read, FileShare.Read))
using (var breader = new StreamReader(fis, Encoding.GetEncoding("iso-8859-1")))
{
string line;
while ((line = breader.ReadLine()) != null)
{
line = line.Trim();
if (line.StartsWith("#") || string.IsNullOrEmpty(line))
continue; // ignore comments and empty lines
var parts = line.Split(';');
Check(parts[0], parts[1], true);
}
}
}
}
[Test]
public void TestReusableTokenStream()
{
Analyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
CheckReuse(a, "Tisch", "tisch");
CheckReuse(a, "Tische", "tisch");
CheckReuse(a, "Tischen", "tisch");
}
/*
* subclass that acts just like whitespace analyzer for testing
*/
private sealed class GermanSubclassAnalyzer : GermanAnalyzer
{
public GermanSubclassAnalyzer(Version matchVersion)
: base(matchVersion)
{
}
public override TokenStream TokenStream(String fieldName, TextReader reader)
{
return new WhitespaceTokenizer(reader);
}
}
[Test]
public void TestLucene1678BwComp()
{
CheckReuse(new GermanSubclassAnalyzer(Version.LUCENE_CURRENT), "Tischen", "Tischen");
}
/*
* Test that changes to the exclusion table are applied immediately
* when using reusable token streams.
*/
[Test]
public void TestExclusionTableReuse()
{
var a = new GermanAnalyzer(Version.LUCENE_CURRENT);
CheckReuse(a, "tischen", "tisch");
a.SetStemExclusionTable(new[] { "tischen" });
CheckReuse(a, "tischen", "tischen");
}
private void Check(String input, String expected, bool useDin2)
{
CheckOneTerm(new GermanAnalyzer(Version.LUCENE_CURRENT, useDin2), input, expected);
}
private void CheckReuse(Analyzer a, String input, String expected)
{
CheckOneTermReuse(a, input, expected);
}
}
}

View File

@@ -0,0 +1,51 @@
# German special characters are replaced:
häufig;haufig
üor;uor
björk;bjork
# here the stemmer works okay, it maps related words to the same stem:
abschließen;abschliess
abschließender;abschliess
abschließendes;abschliess
abschließenden;abschliess
Tisch;tisch
Tische;tisch
Tischen;tisch
geheimtür;geheimtur
Haus;hau
Hauses;hau
Häuser;hau
Häusern;hau
# here's a case where overstemming occurs, i.e. a word is
# mapped to the same stem as unrelated words:
hauen;hau
# here's a case where understemming occurs, i.e. two related words
# are not mapped to the same stem. This is the case with basically
# all irregular forms:
Drama;drama
Dramen;dram
# replace "ß" with 'ss':
Ausmaß;ausmass
# fake words to test if suffixes are cut off:
xxxxxe;xxxxx
xxxxxs;xxxxx
xxxxxn;xxxxx
xxxxxt;xxxxx
xxxxxem;xxxxx
xxxxxer;xxxxx
xxxxxnd;xxxxx
# the suffixes are also removed when combined:
xxxxxetende;xxxxx
# words that are shorter than four charcters are not changed:
xxe;xxe
# -em and -er are not removed from words shorter than five characters:
xxem;xxem
xxer;xxer
# -nd is not removed from words shorter than six characters:
xxxnd;xxxnd

View File

@@ -0,0 +1,8 @@
# Test cases for words with ae, ue, or oe in them
Haus;hau
Hauses;hau
Haeuser;hau
Haeusern;hau
steuer;steur
rueckwaerts;ruckwar
geheimtuer;geheimtur

View File

@@ -0,0 +1,122 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.El;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
using Version=Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.El
{
/*
* A unit test class for verifying the correct operation of the GreekAnalyzer.
*
*/
[TestFixture]
public class GreekAnalyzerTest : BaseTokenStreamTestCase {
/*
* Test the analysis of various greek strings.
*
* @throws Exception in case an error occurs
*/
[Test]
public void testAnalyzer(){
Analyzer a = new GreekAnalyzer(Version.LUCENE_CURRENT);
// Verify the correct analysis of capitals and small accented letters
AssertAnalyzesTo(a,
"\u039c\u03af\u03b1 \u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03ac \u03ba\u03b1\u03bb\u03ae \u03ba\u03b1\u03b9 \u03c0\u03bb\u03bf\u03cd\u03c3\u03b9\u03b1 \u03c3\u03b5\u03b9\u03c1\u03ac \u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03ae\u03c1\u03c9\u03bd \u03c4\u03b7\u03c2 \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae\u03c2 \u03b3\u03bb\u03ce\u03c3\u03c3\u03b1\u03c2",
new String[]
{
"\u03bc\u03b9\u03b1", "\u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03b1",
"\u03ba\u03b1\u03bb\u03b7", "\u03c0\u03bb\u03bf\u03c5\u03c3\u03b9\u03b1",
"\u03c3\u03b5\u03b9\u03c1\u03b1",
"\u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03b7\u03c1\u03c9\u03bd",
"\u03b5\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03b7\u03c3",
"\u03b3\u03bb\u03c9\u03c3\u03c3\u03b1\u03c3"
});
// Verify the correct analysis of small letters with diaeresis and the elimination
// of punctuation marks
AssertAnalyzesTo(a,
"\u03a0\u03c1\u03bf\u03ca\u03cc\u03bd\u03c4\u03b1 (\u03ba\u03b1\u03b9) [\u03c0\u03bf\u03bb\u03bb\u03b1\u03c0\u03bb\u03ad\u03c2] - \u0391\u039d\u0391\u0393\u039a\u0395\u03a3",
new String[]
{
"\u03c0\u03c1\u03bf\u03b9\u03bf\u03bd\u03c4\u03b1",
"\u03c0\u03bf\u03bb\u03bb\u03b1\u03c0\u03bb\u03b5\u03c3",
"\u03b1\u03bd\u03b1\u03b3\u03ba\u03b5\u03c3"
});
// Verify the correct analysis of capital accented letters and capitalletters with diaeresis,
// as well as the elimination of stop words
AssertAnalyzesTo(a,
"\u03a0\u03a1\u039f\u03ab\u03a0\u039f\u0398\u0395\u03a3\u0395\u0399\u03a3 \u0386\u03c8\u03bf\u03b3\u03bf\u03c2, \u03bf \u03bc\u03b5\u03c3\u03c4\u03cc\u03c2 \u03ba\u03b1\u03b9 \u03bf\u03b9 \u03ac\u03bb\u03bb\u03bf\u03b9",
new String[]
{
"\u03c0\u03c1\u03bf\u03c5\u03c0\u03bf\u03b8\u03b5\u03c3\u03b5\u03b9\u03c3",
"\u03b1\u03c8\u03bf\u03b3\u03bf\u03c3", "\u03bc\u03b5\u03c3\u03c4\u03bf\u03c3",
"\u03b1\u03bb\u03bb\u03bf\u03b9"
});
}
[Test]
public void testReusableTokenStream(){
Analyzer a = new GreekAnalyzer(Version.LUCENE_CURRENT);
// Verify the correct analysis of capitals and small accented letters
AssertAnalyzesToReuse(a,
"\u039c\u03af\u03b1 \u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03ac \u03ba\u03b1\u03bb\u03ae \u03ba\u03b1\u03b9 \u03c0\u03bb\u03bf\u03cd\u03c3\u03b9\u03b1 \u03c3\u03b5\u03b9\u03c1\u03ac \u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03ae\u03c1\u03c9\u03bd \u03c4\u03b7\u03c2 \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae\u03c2 \u03b3\u03bb\u03ce\u03c3\u03c3\u03b1\u03c2",
new String[]
{
"\u03bc\u03b9\u03b1",
"\u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03b1",
"\u03ba\u03b1\u03bb\u03b7", "\u03c0\u03bb\u03bf\u03c5\u03c3\u03b9\u03b1",
"\u03c3\u03b5\u03b9\u03c1\u03b1",
"\u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03b7\u03c1\u03c9\u03bd",
"\u03b5\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03b7\u03c3",
"\u03b3\u03bb\u03c9\u03c3\u03c3\u03b1\u03c3"
});
// Verify the correct analysis of small letters with diaeresis and the elimination
// of punctuation marks
AssertAnalyzesToReuse(a,
"\u03a0\u03c1\u03bf\u03ca\u03cc\u03bd\u03c4\u03b1 (\u03ba\u03b1\u03b9) [\u03c0\u03bf\u03bb\u03bb\u03b1\u03c0\u03bb\u03ad\u03c2] - \u0391\u039d\u0391\u0393\u039a\u0395\u03a3",
new String[]
{
"\u03c0\u03c1\u03bf\u03b9\u03bf\u03bd\u03c4\u03b1",
"\u03c0\u03bf\u03bb\u03bb\u03b1\u03c0\u03bb\u03b5\u03c3",
"\u03b1\u03bd\u03b1\u03b3\u03ba\u03b5\u03c3"
});
// Verify the correct analysis of capital accented letters and capitalletters with diaeresis,
// as well as the elimination of stop words
AssertAnalyzesToReuse(a,
"\u03a0\u03a1\u039f\u03ab\u03a0\u039f\u0398\u0395\u03a3\u0395\u0399\u03a3 \u0386\u03c8\u03bf\u03b3\u03bf\u03c2, \u03bf \u03bc\u03b5\u03c3\u03c4\u03cc\u03c2 \u03ba\u03b1\u03b9 \u03bf\u03b9 \u03ac\u03bb\u03bb\u03bf\u03b9",
new String[]
{
"\u03c0\u03c1\u03bf\u03c5\u03c0\u03bf\u03b8\u03b5\u03c3\u03b5\u03b9\u03c3",
"\u03b1\u03c8\u03bf\u03b3\u03bf\u03c3", "\u03bc\u03b5\u03c3\u03c4\u03bf\u03c3",
"\u03b1\u03bb\u03bb\u03bf\u03b9"
});
}
}
}

View File

@@ -0,0 +1,235 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Fa;
using Lucene.Net.Test.Analysis;
using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.Fa
{
/*
* Test the Persian Analyzer
*
*/
public class TestPersianAnalyzer : BaseTokenStreamTestCase {
/*
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
new PersianAnalyzer(Version.LUCENE_CURRENT);
}
/*
* This test shows how the combination of tokenization (breaking on zero-width
* non-joiner), normalization (such as treating arabic YEH and farsi YEH the
* same), and stopwords creates a light-stemming effect for verbs.
*
* These verb forms are from http://en.wikipedia.org/wiki/Persian_grammar
*/
public void testBehaviorVerbs(){
Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
// active present indicative
AssertAnalyzesTo(a, "می‌خورد", new String[] { "خورد" });
// active preterite indicative
AssertAnalyzesTo(a, "خورد", new String[] { "خورد" });
// active imperfective preterite indicative
AssertAnalyzesTo(a, "می‌خورد", new String[] { "خورد" });
// active future indicative
AssertAnalyzesTo(a, "خواهد خورد", new String[] { "خورد" });
// active present progressive indicative
AssertAnalyzesTo(a, "دارد می‌خورد", new String[] { "خورد" });
// active preterite progressive indicative
AssertAnalyzesTo(a, "داشت می‌خورد", new String[] { "خورد" });
// active perfect indicative
AssertAnalyzesTo(a, "خورده‌است", new String[] { "خورده" });
// active imperfective perfect indicative
AssertAnalyzesTo(a, "می‌خورده‌است", new String[] { "خورده" });
// active pluperfect indicative
AssertAnalyzesTo(a, "خورده بود", new String[] { "خورده" });
// active imperfective pluperfect indicative
AssertAnalyzesTo(a, "می‌خورده بود", new String[] { "خورده" });
// active preterite subjunctive
AssertAnalyzesTo(a, "خورده باشد", new String[] { "خورده" });
// active imperfective preterite subjunctive
AssertAnalyzesTo(a, "می‌خورده باشد", new String[] { "خورده" });
// active pluperfect subjunctive
AssertAnalyzesTo(a, "خورده بوده باشد", new String[] { "خورده" });
// active imperfective pluperfect subjunctive
AssertAnalyzesTo(a, "می‌خورده بوده باشد", new String[] { "خورده" });
// passive present indicative
AssertAnalyzesTo(a, "خورده می‌شود", new String[] { "خورده" });
// passive preterite indicative
AssertAnalyzesTo(a, "خورده شد", new String[] { "خورده" });
// passive imperfective preterite indicative
AssertAnalyzesTo(a, "خورده می‌شد", new String[] { "خورده" });
// passive perfect indicative
AssertAnalyzesTo(a, "خورده شده‌است", new String[] { "خورده" });
// passive imperfective perfect indicative
AssertAnalyzesTo(a, "خورده می‌شده‌است", new String[] { "خورده" });
// passive pluperfect indicative
AssertAnalyzesTo(a, "خورده شده بود", new String[] { "خورده" });
// passive imperfective pluperfect indicative
AssertAnalyzesTo(a, "خورده می‌شده بود", new String[] { "خورده" });
// passive future indicative
AssertAnalyzesTo(a, "خورده خواهد شد", new String[] { "خورده" });
// passive present progressive indicative
AssertAnalyzesTo(a, "دارد خورده می‌شود", new String[] { "خورده" });
// passive preterite progressive indicative
AssertAnalyzesTo(a, "داشت خورده می‌شد", new String[] { "خورده" });
// passive present subjunctive
AssertAnalyzesTo(a, "خورده شود", new String[] { "خورده" });
// passive preterite subjunctive
AssertAnalyzesTo(a, "خورده شده باشد", new String[] { "خورده" });
// passive imperfective preterite subjunctive
AssertAnalyzesTo(a, "خورده می‌شده باشد", new String[] { "خورده" });
// passive pluperfect subjunctive
AssertAnalyzesTo(a, "خورده شده بوده باشد", new String[] { "خورده" });
// passive imperfective pluperfect subjunctive
AssertAnalyzesTo(a, "خورده می‌شده بوده باشد", new String[] { "خورده" });
// active present subjunctive
AssertAnalyzesTo(a, "بخورد", new String[] { "بخورد" });
}
/*
* This test shows how the combination of tokenization and stopwords creates a
* light-stemming effect for verbs.
*
* In this case, these forms are presented with alternative orthography, using
* arabic yeh and whitespace. This yeh phenomenon is common for legacy text
* due to some previous bugs in Microsoft Windows.
*
* These verb forms are from http://en.wikipedia.org/wiki/Persian_grammar
*/
public void testBehaviorVerbsDefective(){
Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
// active present indicative
AssertAnalyzesTo(a, "مي خورد", new String[] { "خورد" });
// active preterite indicative
AssertAnalyzesTo(a, "خورد", new String[] { "خورد" });
// active imperfective preterite indicative
AssertAnalyzesTo(a, "مي خورد", new String[] { "خورد" });
// active future indicative
AssertAnalyzesTo(a, "خواهد خورد", new String[] { "خورد" });
// active present progressive indicative
AssertAnalyzesTo(a, "دارد مي خورد", new String[] { "خورد" });
// active preterite progressive indicative
AssertAnalyzesTo(a, "داشت مي خورد", new String[] { "خورد" });
// active perfect indicative
AssertAnalyzesTo(a, "خورده است", new String[] { "خورده" });
// active imperfective perfect indicative
AssertAnalyzesTo(a, "مي خورده است", new String[] { "خورده" });
// active pluperfect indicative
AssertAnalyzesTo(a, "خورده بود", new String[] { "خورده" });
// active imperfective pluperfect indicative
AssertAnalyzesTo(a, "مي خورده بود", new String[] { "خورده" });
// active preterite subjunctive
AssertAnalyzesTo(a, "خورده باشد", new String[] { "خورده" });
// active imperfective preterite subjunctive
AssertAnalyzesTo(a, "مي خورده باشد", new String[] { "خورده" });
// active pluperfect subjunctive
AssertAnalyzesTo(a, "خورده بوده باشد", new String[] { "خورده" });
// active imperfective pluperfect subjunctive
AssertAnalyzesTo(a, "مي خورده بوده باشد", new String[] { "خورده" });
// passive present indicative
AssertAnalyzesTo(a, "خورده مي شود", new String[] { "خورده" });
// passive preterite indicative
AssertAnalyzesTo(a, "خورده شد", new String[] { "خورده" });
// passive imperfective preterite indicative
AssertAnalyzesTo(a, "خورده مي شد", new String[] { "خورده" });
// passive perfect indicative
AssertAnalyzesTo(a, "خورده شده است", new String[] { "خورده" });
// passive imperfective perfect indicative
AssertAnalyzesTo(a, "خورده مي شده است", new String[] { "خورده" });
// passive pluperfect indicative
AssertAnalyzesTo(a, "خورده شده بود", new String[] { "خورده" });
// passive imperfective pluperfect indicative
AssertAnalyzesTo(a, "خورده مي شده بود", new String[] { "خورده" });
// passive future indicative
AssertAnalyzesTo(a, "خورده خواهد شد", new String[] { "خورده" });
// passive present progressive indicative
AssertAnalyzesTo(a, "دارد خورده مي شود", new String[] { "خورده" });
// passive preterite progressive indicative
AssertAnalyzesTo(a, "داشت خورده مي شد", new String[] { "خورده" });
// passive present subjunctive
AssertAnalyzesTo(a, "خورده شود", new String[] { "خورده" });
// passive preterite subjunctive
AssertAnalyzesTo(a, "خورده شده باشد", new String[] { "خورده" });
// passive imperfective preterite subjunctive
AssertAnalyzesTo(a, "خورده مي شده باشد", new String[] { "خورده" });
// passive pluperfect subjunctive
AssertAnalyzesTo(a, "خورده شده بوده باشد", new String[] { "خورده" });
// passive imperfective pluperfect subjunctive
AssertAnalyzesTo(a, "خورده مي شده بوده باشد", new String[] { "خورده" });
// active present subjunctive
AssertAnalyzesTo(a, "بخورد", new String[] { "بخورد" });
}
/*
* This test shows how the combination of tokenization (breaking on zero-width
* non-joiner or space) and stopwords creates a light-stemming effect for
* nouns, removing the plural -ha.
*/
public void testBehaviorNouns(){
Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesTo(a, "برگ ها", new String[] { "برگ" });
AssertAnalyzesTo(a, "برگ‌ها", new String[] { "برگ" });
}
/*
* Test showing that non-persian text is treated very much like SimpleAnalyzer
* (lowercased, etc)
*/
public void testBehaviorNonPersian(){
Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesTo(a, "English test.", new String[] { "english", "test" });
}
/*
* Basic test ensuring that reusableTokenStream works correctly.
*/
public void testReusableTokenStream(){
Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesToReuse(a, "خورده مي شده بوده باشد", new String[] { "خورده" });
AssertAnalyzesToReuse(a, "برگ‌ها", new String[] { "برگ" });
}
/*
* Test that custom stopwords work, and are not case-sensitive.
*/
public void testCustomStopwords(){
PersianAnalyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT, new String[] { "the", "and", "a" });
AssertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick",
"brown", "fox" });
}
}
}

View File

@@ -0,0 +1,87 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.AR;
using Lucene.Net.Analysis.Fa;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
namespace Lucene.Net.Analyzers.Fa
{
/*
* Test the Arabic Normalization Filter
*
*/
[TestFixture]
public class TestPersianNormalizationFilter : BaseTokenStreamTestCase
{
[Test]
public void TestFarsiYeh()
{
Check("های", "هاي");
}
[Test]
public void TestYehBarree()
{
Check("هاے", "هاي");
}
[Test]
public void TestKeheh()
{
Check("کشاندن", "كشاندن");
}
[Test]
public void TestHehYeh()
{
Check("كتابۀ", "كتابه");
}
[Test]
public void TestHehHamzaAbove()
{
Check("كتابهٔ", "كتابه");
}
[Test]
public void TestHehGoal()
{
Check("زادہ", "زاده");
}
private void Check(String input, String expected)
{
ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(
new StringReader(input));
PersianNormalizationFilter filter = new PersianNormalizationFilter(
tokenStream);
AssertTokenStreamContents(filter, new String[] { expected });
}
}
}

View File

@@ -0,0 +1,218 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Lucene.Net.Documents;
using Lucene.Net.Index;
using Lucene.Net.Search;
using Lucene.Net.Store;
using Lucene.Net.Analysis;
using Lucene.Net.Util;
using NUnit.Framework;
namespace Lucene.Net.Analysis
{
public class ChainedFilterTest : Lucene.Net.TestCase
{
public static int MAX = 500;
private RAMDirectory directory;
private IndexSearcher searcher;
private Query query;
// private DateFilter dateFilter; DateFilter was deprecated and removed
private TermRangeFilter dateFilter;
private QueryWrapperFilter bobFilter;
private QueryWrapperFilter sueFilter;
[SetUp]
public void SetUp()
{
directory = new RAMDirectory();
IndexWriter writer =
new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
DateTime cal = new DateTime(1041397200000L * TimeSpan.TicksPerMillisecond); // 2003 January 01
for (int i = 0; i < MAX; i++)
{
Document doc = new Document();
doc.Add(new Field("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("date", (cal.Ticks / TimeSpan.TicksPerMillisecond).ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
cal.AddMilliseconds(1);
}
writer.Close();
searcher = new IndexSearcher(directory, true);
// query for everything to make life easier
BooleanQuery bq = new BooleanQuery();
bq.Add(new TermQuery(new Term("owner", "bob")), BooleanClause.Occur.SHOULD);
bq.Add(new TermQuery(new Term("owner", "sue")), BooleanClause.Occur.SHOULD);
query = bq;
// date filter matches everything too
//Date pastTheEnd = parseDate("2099 Jan 1");
// dateFilter = DateFilter.Before("date", pastTheEnd);
// just treat dates as strings and select the whole range for now...
dateFilter = new TermRangeFilter("date", "", "ZZZZ", true, true);
bobFilter = new QueryWrapperFilter(
new TermQuery(new Term("owner", "bob")));
sueFilter = new QueryWrapperFilter(
new TermQuery(new Term("owner", "sue")));
}
private ChainedFilter GetChainedFilter(Filter[] chain, ChainedFilter.Logic[] logic)
{
if (logic == null)
{
return new ChainedFilter(chain);
}
else
{
return new ChainedFilter(chain, logic);
}
}
private ChainedFilter GetChainedFilter(Filter[] chain, ChainedFilter.Logic logic)
{
return new ChainedFilter(chain, logic);
}
[Test]
public void TestSingleFilter()
{
ChainedFilter chain = GetChainedFilter(new Filter[] { dateFilter }, null);
int numHits = searcher.Search(query, chain, 1000).TotalHits;
Assert.AreEqual(MAX, numHits);
chain = new ChainedFilter(new Filter[] { bobFilter });
numHits = searcher.Search(query, chain, 1000).TotalHits;
Assert.AreEqual(MAX / 2, numHits);
chain = GetChainedFilter(new Filter[] { bobFilter }, new ChainedFilter.Logic[] { ChainedFilter.Logic.AND });
TopDocs hits = searcher.Search(query, chain, 1000);
numHits = hits.TotalHits;
Assert.AreEqual(MAX / 2, numHits);
Assert.AreEqual("bob", searcher.Doc(hits.ScoreDocs[0].doc).Get("owner"));
chain = GetChainedFilter(new Filter[] { bobFilter }, new ChainedFilter.Logic[] { ChainedFilter.Logic.ANDNOT });
hits = searcher.Search(query, chain, 1000);
numHits = hits.TotalHits;
Assert.AreEqual(MAX / 2, numHits);
Assert.AreEqual("sue", searcher.Doc(hits.ScoreDocs[0].doc).Get("owner"));
}
[Test]
public void TestOR()
{
ChainedFilter chain = GetChainedFilter(
new Filter[] { sueFilter, bobFilter }, null);
int numHits = searcher.Search(query, chain, 1000).TotalHits;
Assert.AreEqual(MAX, numHits, "OR matches all");
}
[Test]
public void TestAND()
{
ChainedFilter chain = GetChainedFilter(
new Filter[] { dateFilter, bobFilter }, ChainedFilter.Logic.AND);
TopDocs hits = searcher.Search(query, chain, 1000);
Assert.AreEqual(MAX / 2, hits.TotalHits, "AND matches just bob");
Assert.AreEqual("bob", searcher.Doc(hits.ScoreDocs[0].doc).Get("owner"));
}
[Test]
public void TestXOR()
{
ChainedFilter chain = GetChainedFilter(
new Filter[] { dateFilter, bobFilter }, ChainedFilter.Logic.XOR);
TopDocs hits = searcher.Search(query, chain, 1000);
Assert.AreEqual(MAX / 2, hits.TotalHits, "XOR matches sue");
Assert.AreEqual("sue", searcher.Doc(hits.ScoreDocs[0].doc).Get("owner"));
}
[Test]
public void TestANDNOT()
{
ChainedFilter chain = GetChainedFilter(
new Filter[] { dateFilter, sueFilter },
new ChainedFilter.Logic[] { ChainedFilter.Logic.AND, ChainedFilter.Logic.ANDNOT });
TopDocs hits = searcher.Search(query, chain, 1000);
Assert.AreEqual(MAX / 2, hits.TotalHits, "ANDNOT matches just bob");
Assert.AreEqual("bob", searcher.Doc(hits.ScoreDocs[0].doc).Get("owner"));
chain = GetChainedFilter(
new Filter[] { bobFilter, bobFilter },
new ChainedFilter.Logic[] { ChainedFilter.Logic.ANDNOT, ChainedFilter.Logic.ANDNOT });
hits = searcher.Search(query, chain, 1000);
Assert.AreEqual(MAX / 2, hits.TotalHits, "ANDNOT bob ANDNOT bob matches all sues");
Assert.AreEqual("sue", searcher.Doc(hits.ScoreDocs[0].doc).Get("owner"));
}
/*
private Date parseDate(String s) throws ParseException {
return new SimpleDateFormat("yyyy MMM dd", Locale.US).parse(s);
}
*/
[Test]
public void TestWithCachingFilter()
{
Directory dir = new RAMDirectory();
Analyzer analyzer = new WhitespaceAnalyzer();
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.Close();
Searcher searcher = new IndexSearcher(dir, true);
Query query = new TermQuery(new Term("none", "none"));
QueryWrapperFilter queryFilter = new QueryWrapperFilter(query);
CachingWrapperFilter cachingFilter = new CachingWrapperFilter(queryFilter);
searcher.Search(query, cachingFilter, 1);
CachingWrapperFilter cachingFilter2 = new CachingWrapperFilter(queryFilter);
Filter[] chain = new Filter[2];
chain[0] = cachingFilter;
chain[1] = cachingFilter2;
ChainedFilter cf = new ChainedFilter(chain);
// throws java.lang.ClassCastException: org.apache.lucene.util.OpenBitSet cannot be cast to java.util.BitSet
searcher.Search(new MatchAllDocsQuery(), cf, 1);
}
}
}

View File

@@ -0,0 +1,70 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Fr;
using Lucene.Net.Analysis.Standard;
using Lucene.Net.Analysis.Tokenattributes;
using Lucene.Net.Index;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
using Version=Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.Fr
{
/*
*
*/
[TestFixture]
public class TestElision : BaseTokenStreamTestCase
{
[Test]
public void TestElision2()
{
String test = "Plop, juste pour voir l'embrouille avec O'brian. M'enfin.";
Tokenizer tokenizer = new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(test));
HashSet<String> articles = new HashSet<String>();
articles.Add("l");
articles.Add("M");
TokenFilter filter = new ElisionFilter(tokenizer, articles);
List<string> tas = Filtre(filter);
Assert.AreEqual("embrouille", tas[4]);
Assert.AreEqual("O'brian", tas[6]);
Assert.AreEqual("enfin", tas[7]);
}
private List<string> Filtre(TokenFilter filter)
{
List<string> tas = new List<string>();
ITermAttribute termAtt = filter.GetAttribute<ITermAttribute>();
while (filter.IncrementToken())
{
tas.Add(termAtt.Term);
}
return tas;
}
}
}

View File

@@ -0,0 +1,179 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Fr;
using Lucene.Net.Test.Analysis;
using NUnit.Framework;
using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analyzers.Fr
{
/*
* Test case for FrenchAnalyzer.
*
* @version $version$
*/
[TestFixture]
public class TestFrenchAnalyzer : BaseTokenStreamTestCase
{
[Test]
public void TestAnalyzer()
{
FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesTo(fa, "", new String[0]);
AssertAnalyzesTo(
fa,
"chien chat cheval",
new String[] {"chien", "chat", "cheval"});
AssertAnalyzesTo(
fa,
"chien CHAT CHEVAL",
new String[] {"chien", "chat", "cheval"});
AssertAnalyzesTo(
fa,
" chien ,? + = - CHAT /: > CHEVAL",
new String[] {"chien", "chat", "cheval"});
AssertAnalyzesTo(fa, "chien++", new String[] {"chien"});
AssertAnalyzesTo(
fa,
"mot \"entreguillemet\"",
new String[] {"mot", "entreguillemet"});
// let's do some french specific tests now
/* 1. couldn't resist
I would expect this to stay one term as in French the minus
sign is often used for composing words */
AssertAnalyzesTo(
fa,
"Jean-François",
new String[] {"jean", "françois"});
// 2. stopwords
AssertAnalyzesTo(
fa,
"le la chien les aux chat du des à cheval",
new String[] {"chien", "chat", "cheval"});
// some nouns and adjectives
AssertAnalyzesTo(
fa,
"lances chismes habitable chiste éléments captifs",
new String[]
{
"lanc",
"chism",
"habit",
"chist",
"élément",
"captif"
});
// some verbs
AssertAnalyzesTo(
fa,
"finissions souffrirent rugissante",
new String[] {"fin", "souffr", "rug"});
// some everything else
// aujourd'hui stays one term which is OK
AssertAnalyzesTo(
fa,
"C3PO aujourd'hui oeuf ïâöûàä anticonstitutionnellement Java++ ",
new String[]
{
"c3po",
"aujourd'hui",
"oeuf",
"ïâöûàä",
"anticonstitutionnel",
"jav"
});
// some more everything else
// here 1940-1945 stays as one term, 1940:1945 not ?
AssertAnalyzesTo(
fa,
"33Bis 1940-1945 1940:1945 (---i+++)*",
new String[] {"33bis", "1940-1945", "1940", "1945", "i"});
AssertAnalyzesTo(fa, "abbeaux abdication abdications abondamment marieuses pageaux", new[]
{
"abbeau",
"abdiqu",
"abdiqu",
"abond",
"marieux",
"pageau"
});
}
[Test]
public void TestReusableTokenStream()
{
FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
// stopwords
AssertAnalyzesToReuse(
fa,
"le la chien les aux chat du des à cheval",
new String[] {"chien", "chat", "cheval"});
// some nouns and adjectives
AssertAnalyzesToReuse(
fa,
"lances chismes habitable chiste éléments captifs",
new String[]
{
"lanc",
"chism",
"habit",
"chist",
"élément",
"captif"
});
}
/*
* Test that changes to the exclusion table are applied immediately
* when using reusable token streams.
*/
[Test]
public void TestExclusionTableReuse()
{
FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
AssertAnalyzesToReuse(fa, "habitable", new String[] { "habit" });
fa.SetStemExclusionTable(new String[] { "habitable" });
AssertAnalyzesToReuse(fa, "habitable", new String[] { "habitable" });
}
}
}

View File

@@ -0,0 +1,207 @@
# testcomment
# Alles so sch<63>n!
SET ISO8859-1
TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
NOSUGGEST !
# ordinal numbers
COMPOUNDMIN 1
# only in compounds: 1th, 2th, 3th
ONLYINCOMPOUND c
# compound rules:
# 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.)
# 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.)
COMPOUNDRULE 2
COMPOUNDRULE n*1t
COMPOUNDRULE n*mp
WORDCHARS 0123456789
PFX A Y 1
PFX A 0 re .
PFX I Y 1
PFX I 0 in .
PFX U Y 1
PFX U 0 un .
PFX C Y 1
PFX C 0 de .
PFX E Y 1
PFX E 0 dis .
PFX F Y 1
PFX F 0 con .
PFX K Y 1
PFX K 0 pro .
SFX V N 2
SFX V e ive e
SFX V 0 ive [^e]
SFX N Y 3
SFX N e ion e
SFX N y ication y
SFX N 0 en [^ey]
SFX X Y 3
SFX X e ions e
SFX X y ications y
SFX X 0 ens [^ey]
SFX H N 2
SFX H y ieth y
SFX H 0 th [^y]
SFX Y Y 1
SFX Y 0 ly .
SFX G Y 2
SFX G e ing e
SFX G 0 ing [^e]
SFX J Y 2
SFX J e ings e
SFX J 0 ings [^e]
SFX D Y 4
SFX D 0 d e
SFX D y ied [^aeiou]y
SFX D 0 ed [^ey]
SFX D 0 ed [aeiou]y
SFX T N 4
SFX T 0 st e
SFX T y iest [^aeiou]y
SFX T 0 est [aeiou]y
SFX T 0 est [^ey]
SFX R Y 4
SFX R 0 r e
SFX R y ier [^aeiou]y
SFX R 0 er [aeiou]y
SFX R 0 er [^ey]
SFX Z Y 4
SFX Z 0 rs e
SFX Z y iers [^aeiou]y
SFX Z 0 ers [aeiou]y
SFX Z 0 ers [^ey]
SFX S Y 4
SFX S y ies [^aeiou]y
SFX S 0 s [aeiou]y
SFX S 0 es [sxzh]
SFX S 0 s [^sxzhy]
SFX P Y 3
SFX P y iness [^aeiou]y
SFX P 0 ness [aeiou]y
SFX P 0 ness [^y]
SFX M Y 1
SFX M 0 's .
SFX B Y 3
SFX B 0 able [^aeiou]
SFX B 0 able ee
SFX B e able [^aeiou]e
SFX L Y 1
SFX L 0 ment .
REP 88
REP a ei
REP ei a
REP a ey
REP ey a
REP ai ie
REP ie ai
REP are air
REP are ear
REP are eir
REP air are
REP air ere
REP ere air
REP ere ear
REP ere eir
REP ear are
REP ear air
REP ear ere
REP eir are
REP eir ere
REP ch te
REP te ch
REP ch ti
REP ti ch
REP ch tu
REP tu ch
REP ch s
REP s ch
REP ch k
REP k ch
REP f ph
REP ph f
REP gh f
REP f gh
REP i igh
REP igh i
REP i uy
REP uy i
REP i ee
REP ee i
REP j di
REP di j
REP j gg
REP gg j
REP j ge
REP ge j
REP s ti
REP ti s
REP s ci
REP ci s
REP k cc
REP cc k
REP k qu
REP qu k
REP kw qu
REP o eau
REP eau o
REP o ew
REP ew o
REP oo ew
REP ew oo
REP ew ui
REP ui ew
REP oo ui
REP ui oo
REP ew u
REP u ew
REP oo u
REP u oo
REP u oe
REP oe u
REP u ieu
REP ieu u
REP ue ew
REP ew ue
REP uff ough
REP oo ieu
REP ieu oo
REP ier ear
REP ear ier
REP ear air
REP air ear
REP w qu
REP qu w
REP z ss
REP ss z
REP shun tion
REP shun sion
REP shun cion

Some files were not shown because too many files have changed in this diff Show More