81
81
def main (_ ):
82
82
# UCI Statlog (Heart) dataset.
83
83
csv_file = tf .keras .utils .get_file (
84
- 'heart.csv' , 'http://storage.googleapis.com/applied-dl/heart.csv' )
84
+ 'heart.csv' ,
85
+ 'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv' )
85
86
training_data_df = pd .read_csv (csv_file ).sample (
86
87
frac = 1.0 , random_state = 41 ).reset_index (drop = True )
87
88
@@ -126,8 +127,8 @@ def main(_):
126
127
age_embedding = keras .layers .Embedding (
127
128
input_dim = 10 ,
128
129
output_dim = len (lattice_sizes_for_embedding ),
129
- embeddings_initializer = keras .initializers .RandomNormal (seed = 1 )
130
- )( age_input )
130
+ embeddings_initializer = keras .initializers .RandomNormal (seed = 1 ))(
131
+ age_input )
131
132
# Flatten to get rid of redundant tensor dimension created by embedding layer.
132
133
age_embedding = keras .layers .Flatten ()(age_embedding )
133
134
@@ -140,8 +141,7 @@ def main(_):
140
141
# will not collapse as result of multiplication.
141
142
shape = (1 , 2 ))
142
143
age_ranged = keras .layers .multiply (
143
- [keras .activations .sigmoid (age_embedding ),
144
- embedding_lattice_input_range ])
144
+ [keras .activations .sigmoid (age_embedding ), embedding_lattice_input_range ])
145
145
lattice_inputs .append (age_ranged )
146
146
147
147
# ############### sex ###############
@@ -156,7 +156,8 @@ def main(_):
156
156
output_max = lattice_sizes [2 ] - 1.0 ,
157
157
# Initializes all outputs to (output_min + output_max) / 2.0.
158
158
kernel_initializer = 'constant' ,
159
- )(sex_input )
159
+ )(
160
+ sex_input )
160
161
lattice_inputs .append (sex_calibrator )
161
162
162
163
# ############### cp ###############
@@ -171,8 +172,8 @@ def main(_):
171
172
output_max = lattice_sizes [3 ] - 1.0 ,
172
173
monotonicity = 'increasing' ,
173
174
# You can specify TFL regularizers as tuple ('regularizer name', l1, l2).
174
- kernel_regularizer = ('hessian' , 0.0 , 1e-4 )
175
- )( cp_input )
175
+ kernel_regularizer = ('hessian' , 0.0 , 1e-4 ))(
176
+ cp_input )
176
177
lattice_inputs .append (cp_calibrator )
177
178
178
179
# ############### trestbps ###############
@@ -182,8 +183,8 @@ def main(_):
182
183
trestbps_calibrator = tfl .layers .PWLCalibration (
183
184
# Alternatively to uniform keypoints you might want to use quantiles as
184
185
# keypoints.
185
- input_keypoints = np .quantile (
186
- training_data_df [ 'trestbps' ], np .linspace (0.0 , 1.0 , num = 5 )),
186
+ input_keypoints = np .quantile (training_data_df [ 'trestbps' ],
187
+ np .linspace (0.0 , 1.0 , num = 5 )),
187
188
dtype = tf .float32 ,
188
189
# Together with quantile keypoints you might want to initialize piecewise
189
190
# linear function to have 'equal_slopes' in order for output of layer
@@ -196,7 +197,8 @@ def main(_):
196
197
clamp_min = True ,
197
198
clamp_max = True ,
198
199
monotonicity = 'increasing' ,
199
- )(trestbps_input )
200
+ )(
201
+ trestbps_input )
200
202
lattice_inputs .append (trestbps_calibrator )
201
203
202
204
# ############### chol ###############
@@ -219,8 +221,8 @@ def main(_):
219
221
# You can specify list of regularizers. You are not limited to TFL
220
222
# regularizrs. Feel free to use any :)
221
223
kernel_regularizer = [('laplacian' , 0.0 , 1e-4 ),
222
- keras .regularizers .l1_l2 (l1 = 0.001 )]
223
- )( chol_input )
224
+ keras .regularizers .l1_l2 (l1 = 0.001 )])(
225
+ chol_input )
224
226
lattice_inputs .append (chol_calibrator )
225
227
226
228
# ############### fbs ###############
@@ -242,7 +244,8 @@ def main(_):
242
244
# seed in order to simplify experimentation.
243
245
kernel_initializer = keras .initializers .RandomUniform (
244
246
minval = 0.0 , maxval = lattice_sizes [5 ] - 1.0 , seed = 1 ),
245
- )(fbs_input )
247
+ )(
248
+ fbs_input )
246
249
lattice_inputs .append (fbs_calibrator )
247
250
248
251
# ############### restecg ###############
@@ -258,7 +261,8 @@ def main(_):
258
261
# Categorical calibration layer supports standard Keras regularizers.
259
262
kernel_regularizer = keras .regularizers .l1_l2 (l1 = 0.001 ),
260
263
kernel_initializer = 'constant' ,
261
- )(restecg_input )
264
+ )(
265
+ restecg_input )
262
266
lattice_inputs .append (restecg_calibrator )
263
267
264
268
# Lattice inputs must be either list of d tensors of rank (batch_size, 1) or
@@ -274,22 +278,25 @@ def main(_):
274
278
# Note that making embedding inputs monotonic does not make sense.
275
279
lattice = tfl .layers .Lattice (
276
280
lattice_sizes = lattice_sizes ,
277
- monotonicities = ['none' , 'none' , 'none' , 'increasing' , 'increasing' ,
278
- 'increasing' , 'increasing' , 'increasing' ],
281
+ monotonicities = [
282
+ 'none' , 'none' , 'none' , 'increasing' , 'increasing' , 'increasing' ,
283
+ 'increasing' , 'increasing'
284
+ ],
279
285
output_min = 0.0 ,
280
286
output_max = 1.0 ,
281
- )(lattice_inputs_tensor )
287
+ )(
288
+ lattice_inputs_tensor )
282
289
283
- model = keras .models .Model (
284
- inputs = model_inputs ,
285
- outputs = lattice )
286
- model .compile (loss = keras .losses .mean_squared_error ,
287
- optimizer = keras .optimizers .Adagrad (learning_rate = 1.0 ))
290
+ model = keras .models .Model (inputs = model_inputs , outputs = lattice )
291
+ model .compile (
292
+ loss = keras .losses .mean_squared_error ,
293
+ optimizer = keras .optimizers .Adagrad (learning_rate = 1.0 ))
288
294
289
295
feature_names = ['age' , 'sex' , 'cp' , 'trestbps' , 'chol' , 'fbs' , 'restecg' ]
290
- features = np .split (training_data_df [feature_names ].values .astype (np .float32 ),
291
- indices_or_sections = len (feature_names ),
292
- axis = 1 )
296
+ features = np .split (
297
+ training_data_df [feature_names ].values .astype (np .float32 ),
298
+ indices_or_sections = len (feature_names ),
299
+ axis = 1 )
293
300
target = training_data_df [['target' ]].values .astype (np .float32 )
294
301
295
302
# Bucketize input for embedding.
@@ -302,12 +309,13 @@ def main(_):
302
309
embedding_bins [- 1 ] += 1.0
303
310
features [0 ] = np .digitize (features [0 ], bins = embedding_bins )
304
311
305
- model .fit (features ,
306
- target ,
307
- batch_size = 32 ,
308
- epochs = FLAGS .num_epochs ,
309
- validation_split = 0.2 ,
310
- shuffle = False )
312
+ model .fit (
313
+ features ,
314
+ target ,
315
+ batch_size = 32 ,
316
+ epochs = FLAGS .num_epochs ,
317
+ validation_split = 0.2 ,
318
+ shuffle = False )
311
319
312
320
313
321
if __name__ == '__main__' :
0 commit comments