diff --git a/README.md b/README.md
index 30e2621..274e950 100644
--- a/README.md
+++ b/README.md
@@ -30,11 +30,10 @@
 
 - [x] Box-Plot of Avg. Distance of clones from parent
 
-- [ ] Search subspace between two fixpoints by linage(10**-5), check were they end up
+- [x] Search subspace between two fixpoints by linage(10**-5), check were they end up
+
+- [x] How are basins / "attractor areas" shaped?
 
-- [ ] How are basins / "attractor areas" shaped?
-      - Weired.... tbc...
-      -
 
 # Future Todos:
 
diff --git a/experiments/soup_exp.py b/experiments/soup_exp.py
index 678b772..3b58ec0 100644
--- a/experiments/soup_exp.py
+++ b/experiments/soup_exp.py
@@ -63,6 +63,23 @@ class SoupExperiment:
             net = Net(self.net_input_size, self.net_hidden_size, self.net_out_size, net_name)
             self.population.append(net)
 
+    def population_self_train(self):
+        #  Self-training each network in the population
+        for j in range(self.population_size):
+            net = self.population[j]
+
+            for _ in range(self.ST_steps):
+                net.self_train(1, self.log_step_size, self.net_learning_rate)
+
+    def population_attack(self):
+        # A network attacking another network with a given percentage
+        if random.randint(1, 100) <= self.attack_chance:
+            random_net1, random_net2 = random.sample(range(self.population_size), 2)
+            random_net1 = self.population[random_net1]
+            random_net2 = self.population[random_net2]
+            print(f"\n Attack: {random_net1.name} -> {random_net2.name}")
+            random_net1.attack(random_net2)
+
     def evolve(self):
         """ Evolving consists of attacking & self-training. """
 
@@ -71,19 +88,10 @@ class SoupExperiment:
             loop_epochs.set_description("Evolving soup %s" % i)
 
             # A network attacking another network with a given percentage
-            if random.randint(1, 100) <= self.attack_chance:
-                random_net1, random_net2 = random.sample(range(self.population_size), 2)
-                random_net1 = self.population[random_net1]
-                random_net2 = self.population[random_net2]
-                print(f"\n Attack: {random_net1.name} -> {random_net2.name}")
-                random_net1.attack(random_net2)
+            self.population_attack()
 
             #  Self-training each network in the population
-            for j in range(self.population_size):
-                net = self.population[j]
-                
-                for _ in range(self.ST_steps):
-                    net.self_train(1, self.log_step_size, self.net_learning_rate)
+            self.population_self_train()
 
             # Testing for fixpoints after each batch of ST steps to see relevant data
             if i % self.ST_steps == 0:
diff --git a/experiments/soup_melt_exp.py b/experiments/soup_melt_exp.py
new file mode 100644
index 0000000..ed080d4
--- /dev/null
+++ b/experiments/soup_melt_exp.py
@@ -0,0 +1,50 @@
+import random
+
+from tqdm import tqdm
+
+from experiments.soup_exp import SoupExperiment
+from functionalities_test import test_for_fixpoints
+
+
+class MeltingSoupExperiment(SoupExperiment):
+
+    def __init__(self, melt_chance, *args, keep_population_size=True, **kwargs):
+        super(MeltingSoupExperiment, self).__init__(*args, **kwargs)
+        self.keep_population_size = keep_population_size
+        self.melt_chance = melt_chance
+
+    def population_melt(self):
+        # A network melting with another network by a given percentage
+        if random.randint(1, 100) <= self.melt_chance:
+            random_net1_idx, random_net2_idx, destroy_idx = random.sample(range(self.population_size), 3)
+            random_net1 = self.population[random_net1_idx]
+            random_net2 = self.population[random_net2_idx]
+            print(f"\n Melt: {random_net1.name} -> {random_net2.name}")
+            melted_network = random_net1.melt(random_net2)
+            if self.keep_population_size:
+                del self.population[destroy_idx]
+            self.population.append(melted_network)
+
+    def evolve(self):
+        """ Evolving consists of attacking, melting & self-training. """
+
+        loop_epochs = tqdm(range(self.epochs))
+        for i in loop_epochs:
+            loop_epochs.set_description("Evolving soup %s" % i)
+
+            self.population_attack()
+
+            self.population_melt()
+
+            self.population_self_train()
+
+            # Testing for fixpoints after each batch of ST steps to see relevant data
+            if i % self.ST_steps == 0:
+                test_for_fixpoints(self.fixpoint_counters, self.population)
+                fixpoints_percentage = round(self.fixpoint_counters["identity_func"] / self.population_size, 1)
+                self.fixpoint_counters_history.append(fixpoints_percentage)
+
+            # Resetting the fixpoint counter. Last iteration not to be reset -
+            #  it is important for the bar_chart_fixpoints().
+            if i < self.epochs:
+                self.reset_fixpoint_counters()
diff --git a/journal_basins.py b/journal_basins.py
index 191779e..68a72d0 100644
--- a/journal_basins.py
+++ b/journal_basins.py
@@ -262,7 +262,7 @@ if __name__ == "__main__":
     # Countplot of all fixpoint clone after training per class. Uncomment and manually adjust xticklabels if x-ax size gets too small.
     ax = sns.catplot(kind="count", data=df, x="noise", hue="class", height=5.27, aspect=11.7/5.27)
     ax.set_axis_labels("Noise Levels", "Clone Fixpoints After Training Count ", fontsize=15)
-    #ax.set_xticklabels(labels=('10e-10', '10e-9', '10e-8', '10e-7', '10e-6', '10e-5', '10e-4', '10e-3', '10e-2', '10e-1'), fontsize=15)
+    ax.set_xticklabels(labels=('10e-10', '10e-9', '10e-8', '10e-7', '10e-6', '10e-5', '10e-4', '10e-3', '10e-2', '10e-1'), fontsize=15)
     plt.savefig(f"{directory}/clone_status_after_countplot_{ST_name_hash}.png")
     plt.clf()
 
@@ -274,5 +274,6 @@ if __name__ == "__main__":
     ax.map(sns.boxplot, "State", "Distance", "noise", linewidth=0.8, order=["MAE_pre", "MAE_post"], whis=[0, 100])
     ax.set_axis_labels("", "Manhattan Distance To Parent Weights", fontsize=15)
     ax.set_xticklabels(labels=('after noise application', 'after training'), fontsize=15)
+    plt.ticklabel_format(style='sci', axis='x')
     plt.savefig(f"{directory}/before_after_distance_catplot_{ST_name_hash}.png")
     plt.clf()
diff --git a/journal_robustness.py b/journal_robustness.py
index 8eb87c3..acce534 100644
--- a/journal_robustness.py
+++ b/journal_robustness.py
@@ -6,6 +6,8 @@ import random
 import copy
 
 from pathlib import Path
+
+from matplotlib.ticker import ScalarFormatter
 from tqdm import tqdm
 from tabulate import tabulate
 
@@ -125,7 +127,7 @@ class RobustnessComparisonExperiment:
         #   or multi network settings with a singlee seed
 
         df = pd.DataFrame(columns=['setting', 'Noise Level', 'Self Train Steps', 'absolute_loss',
-                                   'Time to vergence', 'Time as fixpoint'])
+                                   'Time to convergence', 'Time as fixpoint'])
         with tqdm(total=max(len(self.id_functions), seeds)) as pbar:
             for i, fixpoint in enumerate(self.id_functions):  # 1 / n
                 row_headers.append(fixpoint.name)
@@ -157,7 +159,7 @@ class RobustnessComparisonExperiment:
                                 # When this raises a Type Error, we found a second order fixpoint!
                             steps += 1
 
-                            df.loc[df.shape[0]] = [setting, f'10e-{noise_level}', steps, absolute_loss,
+                            df.loc[df.shape[0]] = [setting, f'$10^{{-{noise_level}}}$', steps, absolute_loss,
                                                    time_to_vergence[setting][noise_level],
                                                    time_as_fixpoint[setting][noise_level]]
                     pbar.update(1)
@@ -165,14 +167,19 @@ class RobustnessComparisonExperiment:
         # Get the measuremts at the highest time_time_to_vergence
         df_sorted = df.sort_values('Self Train Steps', ascending=False).drop_duplicates(['setting', 'Noise Level'])
         df_melted = df_sorted.reset_index().melt(id_vars=['setting', 'Noise Level', 'Self Train Steps'],
-                                                 value_vars=['Time to vergence', 'Time as fixpoint'],
+                                                 value_vars=['Time to convergence', 'Time as fixpoint'],
                                                  var_name="Measurement",
                                                  value_name="Steps").sort_values('Noise Level')
         # Plotting
+        plt.rcParams.update({
+            "text.usetex": True,
+            "font.family": "sans-serif",
+            "font.size": 12,
+            "font.weight": 'bold',
+            "font.sans-serif": ["Helvetica"]})
         sns.set(style='whitegrid', font_scale=2)
         bf = sns.boxplot(data=df_melted, y='Steps', x='Noise Level', hue='Measurement', palette=PALETTE)
         synthetic = 'synthetic' if self.is_synthetic else 'natural'
-        # bf.set_title(f'Robustness as self application steps per noise level for {synthetic} fixpoints.')
         plt.tight_layout()
 
         # sns.set(rc={'figure.figsize': (10, 50)})
@@ -206,7 +213,6 @@ class RobustnessComparisonExperiment:
         plot_loss(self.loss_history, self.directory)
 
 
-
 if __name__ == "__main__":
     NET_INPUT_SIZE = 4
     NET_OUT_SIZE = 1
@@ -214,12 +220,11 @@ if __name__ == "__main__":
     ST_steps = 1000
     ST_epochs = 5
     ST_log_step_size = 10
-    ST_population_size = 500
+    ST_population_size = 1000
     ST_net_hidden_size = 2
     ST_net_learning_rate = 0.004
     ST_name_hash = random.getrandbits(32)
-    ST_synthetic = True
-
+    ST_synthetic = False
 
     print(f"Running the robustness comparison experiment:")
     exp = RobustnessComparisonExperiment(
diff --git a/journal_soup_robustness.py b/journal_soup_robustness.py
index 0ba3e50..a2c5208 100644
--- a/journal_soup_robustness.py
+++ b/journal_soup_robustness.py
@@ -7,6 +7,7 @@ from typing import Union
 import numpy as np
 import pandas as pd
 import seaborn as sns
+from matplotlib.ticker import ScalarFormatter
 from tqdm import tqdm
 from matplotlib import pyplot as plt
 from torch.nn import functional as F
@@ -158,8 +159,10 @@ class SoupRobustnessExperiment:
         df = df.replace([np.inf, -np.inf], np.nan)
         df = df.dropna()
         # sns.set(rc={'figure.figsize': (10, 50)})
+        sns.set_theme(style="ticks")
         bx = sns.catplot(data=df[df['absolute_loss'] < 1], y='absolute_loss', x='application_step', kind='box',
                          col='noise_level', col_wrap=3, showfliers=False)
+
         directory = Path('output') / 'robustness'
         filename = f"absolute_loss_perapplication_boxplot_grid.png"
         filepath = directory / filename
@@ -167,7 +170,7 @@ class SoupRobustnessExperiment:
         plt.savefig(str(filepath))
 
         if print_it:
-            col_headers = [str(f"10e-{d}") for d in range(noise_levels)]
+            col_headers = [str(f"10-{d}") for d in range(noise_levels)]
 
             print(f"\nAppplications steps until divergence / zero: ")
             print(tabulate(avg_time_to_vergence, showindex=row_headers, headers=col_headers, tablefmt='orgtbl'))
@@ -221,7 +224,7 @@ if __name__ == "__main__":
     # soup_SA_steps = 10
 
     # Define number of networks & their architecture
-    soup_population_size = 20
+    soup_population_size = 4
     soup_net_hidden_size = 2
     soup_net_learning_rate = 0.04
 
diff --git a/network.py b/network.py
index 337ae8f..9ec060a 100644
--- a/network.py
+++ b/network.py
@@ -1,5 +1,6 @@
 # from __future__ import annotations
 import copy
+import inspect
 import random
 from typing import Union
 
@@ -167,18 +168,30 @@ class Net(nn.Module):
             """ See after how many steps of SA is the output not changing anymore: """
             # print(f"Self-app. step {i+1}: {Experiment.changing_rate(output2, output)}")
 
-            self = self.apply_weights(output)
+            _ = self.apply_weights(output)
 
         return self
 
     def attack(self, other_net):
         other_net_weights = other_net.input_weight_matrix()
         my_evaluation = self(other_net_weights)
-
-        SA_steps = 1
-
         return other_net.apply_weights(my_evaluation)
 
+    def melt(self, other_net):
+        try:
+            melted_name = self.name + other_net.name
+        except AttributeError:
+            melted_name = None
+        melted_weights = self.create_target_weights(other_net.input_weight_matrix())
+        self_weights = self.create_target_weights(self.input_weight_matrix())
+        weight_indxs = list(range(len(self_weights)))
+        random.shuffle(weight_indxs)
+        for weight_idx in weight_indxs[:len(melted_weights) // 2]:
+            melted_weights[weight_idx] = self_weights[weight_idx]
+        melted_net = Net(i_size=self.input_size, h_size=self.hidden_size, o_size=self.out_size, name=melted_name)
+        melted_net.apply_weights(melted_weights)
+        return melted_net
+
     def apply_noise(self, noise_size: float):
         """ Changing the weights of a network to values + noise """
         for layer_id, layer_name in enumerate(self.state_dict()):