diff --git a/Main/Main Stage 2/main2.py b/Main/Main Stage 2/main2.py
index e66e740a4adead6851222ddc5a76936653e64f73..d652c7bd475303d281ce40e0c27d0828dee4c29a 100644
--- a/Main/Main Stage 2/main2.py	
+++ b/Main/Main Stage 2/main2.py	
@@ -7,7 +7,7 @@ import tensorflow as tf
 df = pd.read_csv("data-input.csv") # read the data
 df.to_csv("data.csv", index=False) # this is also just data-input as it wasn't combined
 df['legendary'] = df['legendary'].astype(int)
-df = df.fillna({"type2": "No Secondary"}) # clean the empty, type 2 with 'None' so it its just fire type its Fire, None
+df = df.fillna({"type2": "No Secondary"}) # clean the empty, type 2 with 'No Secondary' so if its just fire type its Fire, No secondary
 df.to_csv("data-cleaned.csv", index=False)
 print("--Done Cleaning--")
 print(df.head())
diff --git a/Main/Main Stage 4/main4.py b/Main/Main Stage 4/main4.py
index dba9cd4b83a22633b4225187b81a05351a250dcb..0880d8a338e56b46eca2bf92b95781bb2ce023cb 100644
--- a/Main/Main Stage 4/main4.py	
+++ b/Main/Main Stage 4/main4.py	
@@ -10,11 +10,6 @@ df['legendary'] = df['legendary'].astype(int)
 df = df.fillna({"type2": "No Secondary"}) # clean the empty, type 2 with 'None' so it its just fire type its Fire, None
 df = df.drop(columns=["name"]) # dont need
 df = df.drop(columns=["generation"]) # dont need
-# df.to_csv("data-cleaned.csv", index=False)
-# print("--Done Cleaning--")
-# print(df.head())
-# drop name and drop generation they should have no effect
-# df = pd.read_csv("data-cleaned.csv")
 
 numeric_feature_names = ["total", "hp", "attack", "defense", "sp_attack", "sp_defense", "speed"]
 categorical_feature_names = ["type1", "type2"]
diff --git a/Part 1/MNIST-Complete98+.py b/Part 1/MNIST-Complete98+.py
index fdee7e8be5f4ccd2c43efe3fd7959a83219293c9..7fb78e6799f8f4e058aa637698ab0fb4e3fabd91 100644
--- a/Part 1/MNIST-Complete98+.py	
+++ b/Part 1/MNIST-Complete98+.py	
@@ -16,7 +16,6 @@ model = tf.keras.models.Sequential([
     tf.keras.layers.BatchNormalization(),
     tf.keras.layers.MaxPooling2D((2, 2)),
 
-    # added kernel_regularizer to help
     tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', strides=(2, 2)),
     tf.keras.layers.BatchNormalization(),
     tf.keras.layers.MaxPooling2D((2, 2)),
diff --git a/Part 2/nonMNIST-Complete.py b/Part 2/nonMNIST-Complete.py
index 83cd4d60767ad1cf307228414deb986b316c85c9..2eead6368253d24de891ae8f4e8885ab72e48688 100644
--- a/Part 2/nonMNIST-Complete.py	
+++ b/Part 2/nonMNIST-Complete.py	
@@ -24,7 +24,6 @@ model = tf.keras.models.Sequential([
     tf.keras.layers.BatchNormalization(),
     tf.keras.layers.MaxPooling2D((2, 2)),
 
-    # added kernel_regularizer to help
     tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', strides=(2, 2)),
     tf.keras.layers.BatchNormalization(),
     tf.keras.layers.MaxPooling2D((2, 2)),
diff --git a/Part 2/nonMNIST-Complete95+.py b/Part 2/nonMNIST-Complete95+.py
index 8fe1ed3f31fb639d69cb46e496d951f82f5e523b..eae42db00100a834de6030c463ba219734a96815 100644
--- a/Part 2/nonMNIST-Complete95+.py	
+++ b/Part 2/nonMNIST-Complete95+.py	
@@ -24,7 +24,6 @@ model = tf.keras.models.Sequential([
     tf.keras.layers.BatchNormalization(),
     tf.keras.layers.MaxPooling2D((2, 2)),
 
-    # added kernel_regularizer to help
     tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same', strides=(2, 2)),
     tf.keras.layers.BatchNormalization(),
     tf.keras.layers.MaxPooling2D((2, 2)),
diff --git a/Part 3/CHDModel.py b/Part 3/CHDModel.py
index 1d5e9f0dcd6d59ea05541f0f3aeda7fe3ccae10b..60e43829db53d056336f94c1eadf7d41d014f5ac 100644
--- a/Part 3/CHDModel.py	
+++ b/Part 3/CHDModel.py	
@@ -64,6 +64,7 @@ model = tf.keras.Sequential([
     tf.keras.layers.Dense(1, activation='sigmoid')
 ])
 
+# added from doc online
 early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
 
 num_runs = 1  # setting it to 1 for right now, Was used to debug changes/improvements in the model