欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

NLP实践-Task1

程序员文章站 2022-03-08 19:25:22
...

任务链接:https://wx.zsxq.com/dweb/#/index/222248424811
全部代码链接:https://github.com/gaussic/text-classification-cnn-rnn
关于字符级特征的文本分类参考:https://www.jianshu.com/p/dc00a5d597ed
数据处理py,包含了对数据集的分词、去除停用词、特征提取等功能。

import jieba
import pandas as pd
import tensorflow as tf
from collections import Counter
from gensim.models import Word2Vec
from sklearn.feature_extraction.text import CountVectorizer


# 读取停用词
def read_stopword(filename):
    stopword = []
    fp = open(filename, 'r')
    for line in fp.readlines():
        stopword.append(line.replace('\n', ''))
    fp.close()
    return stopword


# 切分数据,并删除停用词
def cut_data(data, stopword):
    words = []
    for content in data['content']:
        word = list(jieba.cut(content))
        for w in list(set(word) & set(stopword)):
            while w in word:
                word.remove(w)
        words.append(word)
    data['content'] = words
    return data


# 获取单词列表
def word_list(data):
    all_word = []
    for word in data['content']:
        all_word.extend(word)
    return all_word


# 提取特征
def feature(train_data, test_data, val_data):
    content = pd.concat([train_data['content'], test_data['content'], val_data['content']], ignore_index=True)
    # count_vec = CountVectorizer(max_features=300, min_df=2)
    # count_vec.fit_transform(content)
    # train_fea = count_vec.transform(train_data['content']).toarray()
    # test_fea = count_vec.transform(test_data['content']).toarray()
    # val_fea = count_vec.transform(val_data['content']).toarray()
    model = Word2Vec(content, size=100, min_count=1, window=10, iter=10)
    train_fea = train_data['content'].apply(lambda x: model[x])
    test_fea = test_data['content'].apply(lambda x: model[x])
    val_fea = val_data['content'].apply(lambda x: model[x])
    return train_fea, test_fea, val_fea


if __name__ == '__main__':
    train_data = pd.read_csv('./data/task1/cnews/cnews.train.txt', names=['title', 'content'], sep='\t')  # (50000, 2)
    test_data = pd.read_csv('./data/task1/cnews/cnews.test.txt', names=['title', 'content'], sep='\t')  # (10000, 2)
    val_data = pd.read_csv('./data/task1/cnews/cnews.val.txt', names=['title', 'content'], sep='\t')  # (5000, 2)

    train_data = train_data.head(50)
    test_data = test_data.head(50)
    val_data = val_data.head(50)

    stopword = read_stopword('./data/stopword.txt')
    train_data = cut_data(train_data, stopword)
    test_data = cut_data(test_data, stopword)
    val_data = cut_data(val_data, stopword)

    train_fea, test_fea, val_fea = feature(train_data, test_data, val_data)
    print(train_fea)

    all_word = []
    all_word.extend(word_list(train_data))
    all_word.extend(word_list(test_data))
    all_word.extend(word_list(val_data))
    all_word = list(set(all_word))

 

相关标签: 自然语言处理