现在的位置: 首页 > 综合 > 正文

ICTCLAS30做的lucene.net分词接口

2013年04月22日 ⁄ 综合 ⁄ 共 6283字 ⁄ 字号 评论关闭

随便搞搞搜索引擎技术,刚开始分词而已,综合考察了几个比较有名的分词方法,决定还是用中科院的分词程序。其中C#有个开源的,且网上已经有用SharpICTCLAS为lucene.net写的分词接口了,不过想试试用好一点的分词程序的效果,所以选了2009共享版的。本人编程技术还是非常菜的,如有不对请大家指出。

 

分词接口的代码我是综合这两篇博客:http://ythzjk.javaeye.com/blog/334194http://www.cnblogs.com/birdshover/archive/2009/04/03/1122305.html#1494633

 

代码都不长也不难,仔细阅读后很容易根据把自己的分词程序放进lucene。据我观察似乎只要修改Tokenizer接口中next()方法就够了。不过他们的代码我觉得有个问题,每次初始化分词程序都是在Tokenizer的构造方法中,这就影响了分词效率,每分一句话(标题、正文)就要加载一次字典。于是我把加载词典的部分写成静态的。

 

调用ICTCLAS30.dll的方法,其实只用到了几个方法而已

using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace TestLucene
{
    [StructLayout(LayoutKind.Explicit)]
    public struct result_t
    {
        [FieldOffset(0)]
        public int start;
        [FieldOffset(4)]
        public int length;
        [FieldOffset(8)]
        public int sPos;
        [FieldOffset(12)]
        public int sPosLow;
        [FieldOffset(16)]
        public int POS_id;
        [FieldOffset(20)]
        public int word_ID;
        [FieldOffset(24)]
        public int word_type;
        [FieldOffset(28)]
        public int weight;
    }
    /// <summary>
    /// Class1 的摘要说明。
    /// </summary>
    public class ICTCLAS
    {
        const string path = @"ICTCLAS30.dll";
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_Init")]
        public static extern bool ICTCLAS_Init(String sInitDirPath);
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_ParagraphProcess")]
        public static extern String ICTCLAS_ParagraphProcess(String sParagraph, int bPOStagged);
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_Exit")]
        public static extern bool ICTCLAS_Exit();
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_ImportUserDict")]
        public static extern int ICTCLAS_ImportUserDict(String sFilename);
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_FileProcess")]
        public static extern bool ICTCLAS_FileProcess(String sSrcFilename, String sDestFilename, int bPOStagged);
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_FileProcessEx")]
        public static extern bool ICTCLAS_FileProcessEx(String sSrcFilename, String sDestFilename);
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_GetParagraphProcessAWordCount")]
        public static extern int ICTCLAS_GetParagraphProcessAWordCount(String sParagraph);
        //ICTCLAS_GetParagraphProcessAWordCount
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_ParagraphProcessAW")]
        public static extern void ICTCLAS_ParagraphProcessAW(int nCount, [Out, MarshalAs(UnmanagedType.LPArray)] result_t[] result);
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_AddUserWord")]
        public static extern int ICTCLAS_AddUserWord(String sWord);
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_SaveTheUsrDic")]
        public static extern int ICTCLAS_SaveTheUsrDic();
        [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "ICTCLAS_DelUsrWord")]
        public static extern int ICTCLAS_DelUsrWord(String sWord);
    }
}

ICTCLASAnalyzer:

using System;
using System.Collections.Generic;
using System.Text;
using System.IO;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Standard;
using System.Collections;

namespace TestLucene
{

    public class ICTCLASAnalyzer : Analyzer
    {
        //定义要过滤的词
        public string[] CHINESE_ENGLISH_STOP_WORDS;
        public string StopPath = @"E:/MyCsProj/TestLucene/Stopwords.txt";

        public ICTCLASAnalyzer()
        {
            ArrayList StopWords= new ArrayList();
            StreamReader reader = new StreamReader(StopPath, System.Text.Encoding.UTF8);
            string noise = reader.ReadLine();
            int i = 0;
            while (!string.IsNullOrEmpty(noise))
            {
                StopWords.Add(noise);
                noise = reader.ReadLine();
                i++;
            }
            CHINESE_ENGLISH_STOP_WORDS = new String[i];

            while (i>0)
            {
                i--;
                CHINESE_ENGLISH_STOP_WORDS[i] = (string)StopWords[i];
            }
            StopWords.Clear();
        }

        /**//// Constructs a
{@link
StandardTokenizer} filtered by a {@link
        /// StandardFilter}, a {@link LowerCaseFilter} and a
{@link StopFilter}.
        ///
        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
        {

            TokenStream result = new ICTCLASTokenizer(reader);
            result = new StandardFilter(result);
            result = new LowerCaseFilter(result);
            result = new StopFilter(result, CHINESE_ENGLISH_STOP_WORDS);
            return result;
        }

    }

 

Tokenizer:

using System;
using System.Collections.Generic;
using System.Text;
using System.Text.RegularExpressions;
using Lucene.Net.Analysis;
using System.IO;

namespace TestLucene
{
    class ICTCLASTokenizer : Tokenizer
    {
        //result_t[] result; //本来我想直接使用这个结构的结果 但发现中英文识别字符长度不一样,分词之后原句子对应分词的位置不到了
        int startIndex = 0;
        int endIndex = 0;
        int i = 0;
        string[] pResult;
        /**/
        ///
        /// 待分词的句子
        ///
        private string sentence;
        /**/
        /// Constructs a tokenizer for this Reader.
        ///

        public static bool flag = ICTCLAS.ICTCLAS_Init(@"E:/MyCsProj/ttt/bin/Debug");

        public static int userdic = ICTCLAS.ICTCLAS_ImportUserDict("userdict.txt");

 

        public ICTCLASTokenizer(System.IO.TextReader reader)
        {
            this.input = reader;

            sentence = input.ReadToEnd();
           

            if(!flag)
            {
                System.Console.WriteLine("Init ICTCLAS failed!");
                return;
            }

            string sResult= ICTCLAS.ICTCLAS_ParagraphProcess(sentence,1);
            pResult = Regex.Split(sResult,@"(?<=/w) ");//直接获取分词结果字符串,在结果上分出词。
            //Console.WriteLine(sResult);
        }

        /**/
        /// 进行切词,返回数据流中下一个token或者数据流为空时返回null
        ///
        public override Token Next()
        {
            Token token = null;
            while (i < pResult.Length-1)
            {

                string word = pResult[i].Split('/')[0];
                MatchCollection rw = Regex.Matches(word, @"/s");
                int space = rw.Count;
               
                startIndex=sentence.IndexOf(word,endIndex);
                endIndex =startIndex+word.Length;

                token = new Token(sentence.Substring(startIndex+space, word.Length-space),startIndex+space,endIndex);

                i++;
               // Console.WriteLine("word: {0},({1},{2})", sentence.Substring(startIndex + space, word.Length - space), startIndex + space, endIndex);
                return token;
            }
            return null;
        }
    }
}

 

 

抱歉!评论已关闭.