readme updated
This commit is contained in:
		
							
								
								
									
										12
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,6 +1,11 @@ | ||||
| # self-rep NN paper - ALIFE journal edition | ||||
|  | ||||
| - [x] Plateau / Pillar sizeWhat does happen to the fixpoints after noise introduction and retraining?Options beeing: Same Fixpoint, Similar Fixpoint (Basin), Different Fixpoint? Do they do the clustering thingy? | ||||
| - [x] Plateau / Pillar sizeWhat does happen to the fixpoints after noise introduction and retraining?Options beeing: Same Fixpoint, Similar Fixpoint (Basin),  | ||||
|     - Different Fixpoint? | ||||
|       Yes, we did not found same (10-5) | ||||
|     - Do they do the clustering thingy? | ||||
|       Kind of: Small movement towards (MIM-Distance getting smaller) parent fixpoint. | ||||
|       Small movement for everyone? -> Distribution | ||||
|  | ||||
|     - see `journal_basins.py` for the "train -> spawn with noise -> train again and see where they end up" functionality. Apply noise follows the `vary` function that was used in the paper robustness test with `+- prng() * eps`. Change if desired. | ||||
|  | ||||
| @@ -9,6 +14,9 @@ | ||||
|  | ||||
| - [ ] Same Thing with Soup interactionWe would expect the same behaviour...Influence of interaction with near and far away particles. | ||||
|  | ||||
| - [ ] How are basins / "attractor areas" shaped? | ||||
|       - Weired.... tbc... | ||||
|  | ||||
| - [x] Robustness test with a trained NetworkTraining for high quality fixpoints, compare with the "perfect" fixpoint. Average Loss per application step | ||||
|      | ||||
|     - see `journal_robustness.py` for robustness test modeled after cristians robustness-exp (with the exeption that we put noise on the weights). Has `synthetic` bool to switch to hand-modeled perfect fixpoint instead of naturally trained ones.  | ||||
| @@ -19,7 +27,7 @@ | ||||
|  | ||||
| - [ ] Adjust Self Training so that it favors second order fixpoints-> Second order test implementation (?) | ||||
|  | ||||
| - [ ] Barplot over clones -> how many become a fixpoint cs how many diverge per noise level | ||||
| - [x] Barplot over clones -> how many become a fixpoint cs how many diverge per noise level | ||||
|  | ||||
| - [ ] Box-Plot of Avg. Distance of clones from parent | ||||
|  | ||||
|   | ||||
| @@ -55,8 +55,6 @@ class SelfTrainExperiment: | ||||
|             net = Net(self.net_input_size, self.net_hidden_size, self.net_out_size, net_name) | ||||
|  | ||||
|             for _ in range(self.epochs): | ||||
|               input_data = net.input_weight_matrix() | ||||
|               target_data = net.create_target_weights(input_data) | ||||
|               net.self_train(1, self.log_step_size, self.net_learning_rate) | ||||
|  | ||||
|             print(f"\nLast weight matrix (epoch: {self.epochs}):\n{net.input_weight_matrix()}\nLossHistory: {net.loss_history[-10:]}") | ||||
| @@ -113,5 +111,6 @@ def run_ST_experiment(population_size, batch_size, net_input_size, net_hidden_si | ||||
|     summary_fixpoint_experiment(runs, population_size, epochs, experiments, net_learning_rate, summary_directory_name, | ||||
|                                 summary_pre_title) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     raise NotImplementedError('Test this here!!!') | ||||
|   | ||||
| @@ -195,7 +195,6 @@ class RobustnessComparisonExperiment: | ||||
|  | ||||
|             print(f"\nTime as fixpoint: ") | ||||
|             # print(tabulate(time_as_fixpoint, showindex=row_headers, headers=col_headers, tablefmt='orgtbl')) | ||||
|  | ||||
|         return time_as_fixpoint, time_to_vergence | ||||
|  | ||||
|     def count_fixpoints(self): | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 steffen-illium
					steffen-illium