上一篇博客主要介紹了決策樹的原理,這篇主要介紹他的實現,代碼環境python 3.4,實現的是ID3算法,首先為了后面matplotlib的繪圖方便,我把原來的中文數據集變成了英文。
原始數據集:
變化后的數據集在程序代碼中體現,這就不截圖了
構建決策樹的代碼如下:
#coding :utf-8'''2017.6.25 author :Erin function: "decesion tree" ID3 '''import numpy as npimport pandas as pdfrom math import logimport operator def load_data(): #data=np.array(data) data=[['teenager' ,'high', 'no' ,'same', 'no'], ['teenager', 'high', 'no', 'good', 'no'], ['middle_aged' ,'high', 'no', 'same', 'yes'], ['old_aged', 'middle', 'no' ,'same', 'yes'], ['old_aged', 'low', 'yes', 'same' ,'yes'], ['old_aged', 'low', 'yes', 'good', 'no'], ['middle_aged', 'low' ,'yes' ,'good', 'yes'], ['teenager' ,'middle' ,'no', 'same', 'no'], ['teenager', 'low' ,'yes' ,'same', 'yes'], ['old_aged' ,'middle', 'yes', 'same', 'yes'], ['teenager' ,'middle', 'yes', 'good', 'yes'], ['middle_aged' ,'middle', 'no', 'good', 'yes'], ['middle_aged', 'high', 'yes', 'same', 'yes'], ['old_aged', 'middle', 'no' ,'good' ,'no']] features=['age','input','student','level'] return data,features def cal_entropy(dataSet): ''' 輸入data ,表示帶最后標簽列的數據集 計算給定數據集總的信息熵 {'是': 9, '否': 5} 0.9402859586706309 ''' numEntries = len(dataSet) labelCounts = {} for featVec in dataSet: label = featVec[-1] if label not in labelCounts.keys(): labelCounts[label] = 0 labelCounts[label] += 1 entropy = 0.0 for key in labelCounts.keys(): p_i = float(labelCounts[key]/numEntries) entropy -= p_i * log(p_i,2)#log(x,10)表示以10 為底的對數 return entropy def split_data(data,feature_index,value): ''' 劃分數據集 feature_index:用于劃分特征的列數,例如“年齡” value:劃分后的屬性值:例如“青少年” ''' data_split=[]#劃分后的數據集 for feature in data: if feature[feature_index]==value: reFeature=feature[:feature_index] reFeature.extend(feature[feature_index+1:]) data_split.append(reFeature) return data_splitdef choose_best_to_split(data): ''' 根據每個特征的信息增益,選擇最大的劃分數據集的索引特征 ''' count_feature=len(data[0])-1#特征個數4 #print(count_feature)#4 entropy=cal_entropy(data)#原數據總的信息熵 #print(entropy)#0.9402859586706309 max_info_gain=0.0#信息增益最大 split_fea_index = -1#信息增益最大,對應的索引號 for i in range(count_feature): feature_list=[fe_index[i] for fe_index in data]#獲取該列所有特征值 ####################################### ''' print('feature_list') ['青少年', '青少年', '中年', '老年', '老年', '老年', '中年', '青少年', '青少年', '老年', '青少年', '中年', '中年', '老年'] 0.3467680694480959 #對應上篇博客中的公式 =(1)*5/14 0.3467680694480959 0.6935361388961918 ''' # print(feature_list) unqval=set(feature_list)#去除重復 Pro_entropy=0.0#特征的熵 for value in unqval:#遍歷改特征下的所有屬性 sub_data=split_data(data,i,value) pro=len(sub_data)/float(len(data)) Pro_entropy+=pro*cal_entropy(sub_data) #print(Pro_entropy) info_gain=entropy-Pro_entropy if(info_gain>max_info_gain): max_info_gain=info_gain split_fea_index=i return split_fea_index ##################################################def most_occur_label(labels): #sorted_label_count[0][0] 次數最多的類標簽 label_count={} for label in labels: if label not in label_count.keys(): label_count[label]=0 else: label_count[label]+=1 sorted_label_count = sorted(label_count.items(),key = operator.itemgetter(1),reverse = True) return sorted_label_count[0][0]def build_decesion_tree(dataSet,featnames): ''' 字典的鍵存放節點信息,分支及葉子節點存放值 ''' featname = featnames[:] ################ classlist = [featvec[-1] for featvec in dataSet] #此節點的分類情況 if classlist.count(classlist[0]) == len(classlist): #全部屬于一類 return classlist[0] if len(dataSet[0]) == 1: #分完了,沒有屬性了 return Vote(classlist) #少數服從多數 # 選擇一個最優特征進行劃分 bestFeat = choose_best_to_split(dataSet) bestFeatname = featname[bestFeat] del(featname[bestFeat]) #防止下標不準 DecisionTree = {bestFeatname:{}} # 創建分支,先找出所有屬性值,即分支數 allvalue = [vec[bestFeat] for vec in dataSet] specvalue = sorted(list(set(allvalue))) #使有一定順序 for v in specvalue: copyfeatname = featname[:] DecisionTree[bestFeatname][v] = build_decesion_tree(split_data(dataSet,bestFeat,v),copyfeatname) return DecisionTree
新聞熱點
疑難解答