数据
3万文本,train val test 6 2 2.
工具、手法
xgboost、sklearn、gensim的word2vec。
word2vec嵌入词,词间接sum掉词失去“句向量”,后用xgb对句向量分类。
代码
import jiebaimport xgboost as xgbfrom sklearn.model_selection import train_test_splitimport numpy as npfrom gensim.models import Word2Vec# reorganize datadef get_split_sentences(file_path): res_sen=[] with open(file_path) as f: for line in f: split_query=jieba.lcut(line.strip()) res_sen.append(split_query) return res_senlabel2_sentences=get_split_sentences('label2.csv')label0_sentences=get_split_sentences('label0.csv')label1_sentences=get_split_sentences('label1.csv')all_sentences=[]all_sentences.extend(label0_sentences)all_sentences.extend(label1_sentences)all_sentences.extend(label2_sentences)# set paramsemb_size=128win=3model=Word2Vec(sentences=all_sentences,vector_size=emb_size,window=win,min_count=1)# retrieve word embeddingsw2vec=model.wv# assemble sentence embeddingsdef assemble_x(w2vec:dict,sentences): sen_vs=[] for sen in sentences: max_len=max(max_len,len(sen)) v=np.vstack([w2vec[w] for w in sen]) sen_v=v.mean(axis=0) sen_vs.append(sen_v) return np.array(sen_vs)# ready the data for trainingx=assemble_x(w2vec,all_sentences,False)y=np.array([0]*13000+[1]*13000+[2]*4000)x_train,x_test,y_train,y_test=train_test_split(x,y,train_size=0.6,shuffle=True)x_val,x_test,y_val,y_test=train_test_split(x_test,y_test,train_size=0.5,shuffle=True)dtrain=xgb.DMatrix(x_train,y_train)dval=xgb.DMatrix(x_val,y_val)dtest=xgb.DMatrix(x_test,y_test)params={ 'booster': 'gbtree', 'objective': 'multi:softmax', 'num_class': 3, 'max_depth': 20, }evals=[(dtrain,'train'),(dval,'vaild')]model=xgb.train(params,dtrain=dtrain,evals=evals)preds=model.predict(dtest)def get_scores(preds,gt): from sklearn import metrics # print ('AUC: %.4f' % metrics.roc_auc_score(gt,preds)) print ('ACC: %.4f' % metrics.accuracy_score(gt,preds)) print('macro') print( 'Recall: %.4f' % metrics.recall_score(y_test,preds,average='macro')) print( 'F1-score: %.4f' %metrics.f1_score(gt,preds,average='macro')) print( 'Precision: %.4f' %metrics.precision_score(gt,preds,average='macro')) print('\nmicro:') print( 'Recall: %.4f' % metrics.recall_score(y_test,preds,average='micro')) print( 'F1-score: %.4f' %metrics.f1_score(gt,preds,average='micro')) print( 'Precision: %.4f' %metrics.precision_score(gt,preds,average='micro'))get_scores(preds,y_test)
后果
ACC: 0.9402macroRecall: 0.9330F1-score: 0.9391Precision: 0.9459micro:Recall: 0.9402F1-score: 0.9402Precision: 0.9402
小结
即使是十分粗略的进行embedding的相加成的句向量,也能够达到94%左右的问题,1是因为工作自身简略,2是因为xgb boosting成果好。