/ PROJECTS

NLP - Korean Language Text Analysis with RNN

# 한국어 자료
import sys
import os
import numpy as np
import nltk
import konlpy
import pandas as pd
import re
import random
import itertools
import warnings
warnings.filterwarnings(action='ignore')
from sklearn.metrics import classification_report,f1_score,precision_score,recall_score
random.seed(1)

1. Data Load

train_data=pd.read_csv('final_train_data.csv')
test_data=pd.read_csv('final_test_data.csv')
# 384 duplicates in content, 240 in title
print(train_data.describe())
       category                                            content  title
count     10686                                              10686  10686
unique        7                                              10302  10124
top        정치개혁  개인회생 36개월 단축소급 전국 적용을 위해 춘천지방법원의 법원에 바란다에 글을 올...   경남제약
freq       3094                                                 16     21

2. Preprocessing

2.1. Remove duplicates

train_data = train_data.drop_duplicates(['content'],keep='first')
train_data.duplicated().sum()
0
all duplicates removed
train_data['document']=train_data.iloc[:,1]+train_data.iloc[:,2]
train_data['document']
test_data['document']=test_data.iloc[:,1]+test_data.iloc[:,2]

2.2. Regexp on Korean Language

train_data['document'] = train_data['document'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]","")
test_data['document'] = test_data['document'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]","")

3. Tokenizing with konlpy-Okt

# tokenize
from konlpy.tag import Okt
okt = Okt() #형태소 분석기
tokenized_data = []
stopwords = ['의','가','이','은','들','는','좀','잘','걍','과','도','를','으로','자','에','와','한','하다']
for sentence in train_data['document']:
    temp_X = okt.morphs(sentence, norm=True, stem=True) # 형태소 추출
    temp_X = [word for word in temp_X if not word in stopwords] # 불용어 제거
    tokenized_data.append(temp_X)
x_train=tokenized_data
okt = Okt() #형태소 분석기
tokenized_data = []
stopwords = ['의','가','이','은','들','는','좀','잘','걍','과','도','를','으로','자','에','와','한','하다']
for sentence in test_data['document']:
    temp_X = okt.morphs(sentence, norm=True, stem=True) # 형태소 추출 - 토큰화 #norm=True : 근사어
    temp_X = [word for word in temp_X if not word in stopwords] # 불용어 제거 #https://www.ranks.nl/stopwords/korean
    tokenized_data.append(temp_X)
x_test=tokenized_data
  • save and re-import processed data
import pickle
with open('nlp_final_x_tr.data', 'wb') as f:
    pickle.dump(x_train, f)
with open('nlp_final_x_te.data', 'wb') as f:
    pickle.dump(x_test, f)
import pickle
with open('nlp_final_x_tr.data', 'rb') as f:
    x_train = pickle.load(f)
with open('nlp_final_x_te.data', 'rb') as f:
    x_test = pickle.load(f)
from tensorflow.keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(x_train)
threshold = 3
total_cnt = len(tokenizer.word_index) # 단어의 수
rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트
total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합
rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합

# 단어와 빈도수의 쌍(pair)을 key와 value로 받는다.
for key, value in tokenizer.word_counts.items():
    total_freq = total_freq + value

    # 단어의 등장 빈도수가 threshold보다 작으면
    if(value < threshold):
        rare_cnt = rare_cnt + 1
        rare_freq = rare_freq + value

print('단어 집합(vocabulary)의 크기 :',total_cnt)
print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt))
print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100)
print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100)
단어 집합(vocabulary)의 크기 : 34537
등장 빈도가 2번 이하인 희귀 단어의 수: 15483
단어 집합에서 희귀 단어의 비율: 44.830182123519705
전체 등장 빈도에서 희귀 단어 등장 빈도 비율: 1.1132793250941202
vocab_size = total_cnt - rare_cnt + 1 # 전체 단어 개수 중 빈도수 2이하인 단어 개수는 제거. 0번 패딩 토큰을 고려하여 +1
print('단어 집합의 크기 :',vocab_size)
단어 집합의 크기 : 19055
tokenizer = Tokenizer(vocab_size) 
tokenizer.fit_on_texts(x_train)
X_train = tokenizer.texts_to_sequences(x_train)
X_test = tokenizer.texts_to_sequences(x_test)
y_train=np.array(train_data.category)
y_test=np.array(test_data.category)
drop_train = [index for index, sentence in enumerate(X_train) if len(sentence) < 1]
drop_test = [index for index, sentence in enumerate(X_test) if len(sentence) < 1]
X_train = np.delete(X_train, drop_train, axis=0)
y_train = np.delete(y_train, drop_train, axis=0)
print(len(X_train))
print(len(y_train))
10301
10301
X_test = np.delete(X_test, drop_test, axis=0)
y_test = np.delete(y_test, drop_test, axis=0)
print(len(X_train))
print(len(X_test))
print(len(y_train))
print(len(y_test))
10301
1158
10301
1158
import matplotlib.pyplot as plt
print('리뷰의 최대 길이 :',max(len(l) for l in X_train))
print('리뷰의 평균 길이 :',sum(map(len, X_train))/len(X_train))
plt.hist([len(s) for s in X_train], bins=50)
plt.xlabel('length of samples')
plt.ylabel('number of samples')
plt.show()
리뷰의 최대 길이 : 9032
리뷰의 평균 길이 : 170.45791670711583

png

def below_threshold_len(max_len, nested_list):
  cnt = 0
  for s in nested_list:
    if(len(s) <= max_len):
        cnt = cnt + 1
  print('전체 샘플 중 길이가 %s 이하인 샘플의 비율: %s'%(max_len, (cnt / len(nested_list))*100))
max_len = 600
below_threshold_len(max_len, X_train)
전체 샘플 중 길이가 600 이하인 샘플의 비율: 96.02951169789341
from tensorflow.keras.preprocessing.sequence import pad_sequences
X_train = pad_sequences(X_train, maxlen = max_len)
X_test = pad_sequences(X_test, maxlen = max_len)
y_train=np.array(train_data.category)
y_test=np.array(test_data.category)
y_train = np.delete(y_train, drop_train, axis=0)
y_test = np.delete(y_test, drop_test, axis=0)
print(len(y_train),len(y_test))
10301 1158

4. Train-test Data

t={'경제민주화': 1, '교통/건축/국토': 2, '보건복지': 3, '육아/교육': 4, '인권/성평등': 5, '일자리': 6, '정치개혁': 7}
print(t['보건복지'])
index1=np.zeros([10301,7])
for i in range(len(y_train)):
  index1[i][t[y_train[i]]-1]=1
y_train=index1

index1=np.zeros([1158,7])
for i in range(len(y_test)):
  index1[i][t[y_test[i]]-1]=1
y_test=index1

print(y_train.shape,y_test.shape)
3
(10301, 7) (1158, 7)
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print(vocab_size)
(10301, 600)
(1158, 600)
(10301, 7)
(1158, 7)
19055

5. LSTM Model

from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow_addons as tfa
f1 = tfa.metrics.F1Score(num_classes=7,threshold=0.5)
from tensorflow import keras
keras.__version__
# '2.6.0'
model = Sequential()
model.add(Embedding(vocab_size, 128))
model.add(LSTM(128))
model.add(Dense(7, activation='sigmoid'))
es = EarlyStopping(monitor='val_loss', mode='min', verbose=2, patience=4)
mc = ModelCheckpoint('best_model.h5', monitor=f1, mode='max', verbose=2, save_best_only=True)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc',f1])
model.fit(X_train, y_train, epochs=15, callbacks=[es, mc], batch_size=512, validation_split=0.2)
Epoch 1/15
17/17 [==============================] - 39s 2s/step - loss: 1.8842 - acc: 0.2830 - f1_score: 0.2583 - val_loss: 1.7326 - val_acc: 0.3081 - val_f1_score: 0.2726
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 2/15
17/17 [==============================] - 38s 2s/step - loss: 1.6177 - acc: 0.4615 - f1_score: 0.3981 - val_loss: 1.5555 - val_acc: 0.4639 - val_f1_score: 0.3776
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 3/15
17/17 [==============================] - 38s 2s/step - loss: 1.3590 - acc: 0.5495 - f1_score: 0.4018 - val_loss: 1.3879 - val_acc: 0.5075 - val_f1_score: 0.4232
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 4/15
17/17 [==============================] - 38s 2s/step - loss: 1.1364 - acc: 0.6211 - f1_score: 0.4351 - val_loss: 1.3140 - val_acc: 0.5303 - val_f1_score: 0.4173
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 5/15
17/17 [==============================] - 38s 2s/step - loss: 0.9084 - acc: 0.7212 - f1_score: 0.4893 - val_loss: 1.1852 - val_acc: 0.6487 - val_f1_score: 0.4386
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 6/15
17/17 [==============================] - 38s 2s/step - loss: 0.7336 - acc: 0.7920 - f1_score: 0.5183 - val_loss: 1.1071 - val_acc: 0.6473 - val_f1_score: 0.4536
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 7/15
17/17 [==============================] - 39s 2s/step - loss: 0.5796 - acc: 0.8381 - f1_score: 0.5395 - val_loss: 1.0900 - val_acc: 0.6458 - val_f1_score: 0.4837
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 8/15
17/17 [==============================] - 38s 2s/step - loss: 0.4641 - acc: 0.8757 - f1_score: 0.5575 - val_loss: 0.9854 - val_acc: 0.6982 - val_f1_score: 0.4781
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 9/15
17/17 [==============================] - 38s 2s/step - loss: 0.3588 - acc: 0.9039 - f1_score: 0.5759 - val_loss: 1.2205 - val_acc: 0.6458 - val_f1_score: 0.4993
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 10/15
17/17 [==============================] - 38s 2s/step - loss: 0.3082 - acc: 0.9181 - f1_score: 0.5908 - val_loss: 1.0990 - val_acc: 0.6870 - val_f1_score: 0.5031
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 11/15
17/17 [==============================] - 39s 2s/step - loss: 0.2331 - acc: 0.9408 - f1_score: 0.6076 - val_loss: 1.8883 - val_acc: 0.5793 - val_f1_score: 0.4935
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 12/15
17/17 [==============================] - 39s 2s/step - loss: 0.2625 - acc: 0.9380 - f1_score: 0.6111 - val_loss: 1.3714 - val_acc: 0.6642 - val_f1_score: 0.5215
WARNING:tensorflow:Can save best model only with <tensorflow_addons.metrics.f_scores.F1Score object at 0x7f1bd3f444f0> available, skipping.
Epoch 00012: early stopping





<keras.callbacks.History at 0x7f1bf1402a60>
# evaluating function: report f1_macro
def evaluate(test_x,test_y,model):
    predictions=model.predict(test_x)
    y_pred=max(predictions)
    print(classification_report(test_y,y_pred))
model.evaluate(X_test, y_test)
37/37 [==============================] - 6s 149ms/step - loss: 1.1846 - acc: 0.6986 - f1_score: 0.5419





[1.1846439838409424,
 0.6986182928085327,
 array([0.5124555 , 0.59907836, 0.48712873, 0.45633796, 0.5854922 ,
        0.4855967 , 0.6674057 ], dtype=float32)]
# from saved best model
loaded_model = load_model('best_model.h5')
loaded_model.evaluate(X_test, y_test)