# Coding: UTF-8 ''' Google wide & Deep model ''' written in Keras import pandas as pdfrom Keras. models import sequentialfrom Keras. layers import dense, mergefrom sklearn. preprocessing import minmaxscaler # All data columns = ["Age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "Occupation ", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country ", "income_bracket"] # label column label_column = "label" # categorical_columns = ["workclass", "education", "marital_status", "Occupation", "relationship ", "race", "gender", "native_country"] # continuous value feature variable continuous_columns = ["Age", "education_num", "capital_gain", "capital_loss ", "hours_per_week"] # load the file def load (filename): With open (filename, 'R') as F: skiprows = 1 If 'test' in Filename else 0 df = PD. read_csv (F, names = columns, skipinitialspace = true, skiprows = skiprows, engine = 'python') # process df = DF by default. dropna (how = 'any', axis = 0) return DF # pre-processing def preprocess (DF): DF [label_column] = DF ['Welcome _ bracket ']. apply (lambda X: "> 50 K" in X ). astype (INT) DF. pop ("income_bracket") y = DF [label_column]. values DF. pop (label_column) df = PD. get_dummies (DF, columns = [X for X in categorical_columns]) # todo: select features to make the network more efficient # todo: Feature Engineering, for example, adding crossover and combination features # From sklearn. preprocessing import polynomialfeatures # x = polynomialfeatures (degree = 2, interaction_only = true, include_bias = false ). fit_transform (x) df = PD. dataframe (minmaxscaler (). fit_transform (DF), columns = DF. columns) x = DF. values return X, ydef main (): df_train = load ('adult. data ') df_test = load ('adult. test') df = PD. concat ([df_train, df_test]) # concatenate train_len = Len (df_train) x, y = preprocess (DF) x_train = x [: train_len] y_train = Y [: train_len] x_test = x [train_len:] y_test = Y [train_len:] # wide part wide = sequential () wide. add (dense (1, input_dim = x_train.shape [1]) # deep part deep = sequential () # todo: add the embedding layer Deep. add (dense (input_dim = x_train.shape [1], output_dim = 100, activation = 'relu') # Deep. add (dense (100, activation = 'relu') deep. add (dense (input_dim = 100, output_dim = 32, activation = 'relu') # Deep. add (dense (50, activation = 'relu') deep. add (dense (input_dim = 32, output_dim = 8) deep. add (dense (1, activation = 'sigmoid') # combine wide and deep: Build the two sides and splice the model = sequential () model. add (merge ([wide, deep], mode = 'concat', concat_axis = 1) model. add (dense (1, activation = 'sigmoid') # compile the model. compile (optimizer = 'rdsprou', loss = 'binary _ crossentropy ', metrics = ['accuracy']) # model training model. FIT ([x_train, x_train], y_train, nb_epoch = 10, batch_size = 32) # loss and Accuracy Evaluation loss, accuracy = model. evaluate ([x_test, x_test], y_test) print ('\ n', 'test accuracy:', accuracy) if _ name _ = '_ main _': Main ()
# Error: model. Add (merge ([wide, deep], mode = 'concat', concat_axis = 1 ))
# Typeerror: 'module' object is not callable
Wide_and_deep_model_keras (error