-
Notifications
You must be signed in to change notification settings - Fork 2
/
old.html
973 lines (828 loc) · 43.6 KB
/
old.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
<!doctype html>
<meta charset="utf-8">
<script src="website/template.js"></script>
<script async src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-68SGYK1XFT"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-68SGYK1XFT');
</script>
<link async rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.13.18/dist/katex.min.css" integrity="sha384-zTROYFVGOfTw7JV7KUu8udsvW2fx4lWOsCEDqhBreBwlHI4ioVRtmIvEThzJHGET" crossorigin="anonymous">
<script defer src="https://cdn.jsdelivr.net/npm/katex@0.13.18/dist/katex.min.js" integrity="sha384-GxNFqL3r9uRJQhR+47eDxuPoNE7yLftQM8LcxzgS4HT73tp970WS/wV5p8UzCOmb" crossorigin="anonymous"></script>
<script defer src="https://cdn.jsdelivr.net/npm/katex@0.13.18/dist/contrib/auto-render.min.js" integrity="sha384-vZTG03m+2yp6N6BNi5iM4rW4oIwk5DfcNdFfxkk9ZWpDriOkXX8voJBFrAO7MpVl" crossorigin="anonymous"></script>
<script async>
document.addEventListener("DOMContentLoaded", function() {
renderMathInElement(document.body, {
// customised options
// • auto-render specific keys, e.g.:
delimiters: [
{left: '$$', right: '$$', display: true},
{left: '$', right: '$', display: false},
{left: '\\(', right: '\\)', display: false},
{left: '\\[', right: '\\]', display: true}
],
// • rendering keys, e.g.:
throwOnError : false
});
});
</script>
<script type="text/front-matter">
title: "Stopping Aging in Neural Cellular Automata"
description: "Description of the post"
authors:
- Lorenzo Cavuoti: https://github.com/LetteraUnica
- Francesco Sacco: http://github.com/Francesco215
affiliations:
- University of Trieste: http://units.it
- University of Pisa: http://unipi.it
</script>
<dt-article>
<h1>Stopping Aging in Neural Cellular Automata</h1>
<h2>
In this paper we are going to discuss the condition necessary to make a mortal
neural cellular automata, immortal
</h2>
<dt-byline></dt-byline>
<p>
Most living organisms age and eventually die because of it, but a few don’t, and some of
them are incredibly hard to kill, even if you cut their head off, it just regrows
<dt-cite key="handberg2008stem"></dt-cite>.
Neural Cellular Automata are a very good representation of living organisms: scientists managed
to create cellural automatas that are able to represent any image, some of them after a while
decay, others manage to maintain the final state, and some
of them are even able to regenerate damage.
<dt-cite key="mordvintsev2020growing"></dt-cite>
</p>
<p>
Throughout this paper we are going to use different terms with capital letter to refer to different types of cellular automatas:
<ul>
<li>With “Growing” we indicate a neural cellular automata that grows into a final state and then decays, this represents growing and aging biological organisms</li>
<li>With “Persistent” we indicate a cellular automaton that grows into a final state and then keeps it for an infinite amount of time, some biological organisms like lobsters have this property.</li>
<li>Finally with “Regenerating” we refer to persistent organisms that are even able to regenerate damage and have a infinite lifespan, biological examples of these are the planarians.</li>
</ul>
</p>
<figure class="l-body">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/firework_grow_persist_regen.mp4" type="video/mp4" id="unstable">
Your browser does not support the video tag.
</video>
<div style="position:relative; height:30px; display:flex">
<div style="width:33%; top:0px; text-align:center">Growing</div>
<div style="width:33%; top:0px; left:275px; text-align:center">Persistent</div>
<div style="width:33%; top:0px; left:550px; text-align:center">Regenerating</div>
</div>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Video 1: Example of the three types of Cellular Automata
</figcaption>
</figure>
<p>
Biological organisms often times blur the line between this categories, for example most aging organisms like ourselves
are able to heal wounds, and some like salamanders are even able to regenerate missing limbs,
for practical reasons, however, we are going to stick with these 3 categories
</p>
<p>Sometimes we will abbreviate Cellular Automata with CA to make the descriptions less verbose</p>
<p>
In this Article we are going to ask the following questions:
<ul>
<li>Is it possible to make a Growing neural CA Persistent or Regenerating?</li>
<li>What is the minimum amout of effort to be able to do so?</li>
</ul>
</p>
<dt-byline></dt-byline>
<h2 style="font-size: 40px;">Switching the Rule</h2>
<p>
First of all we wanted to know if it is possible to take a Growing cellular automata
and after a arbitrary number $n$ of steps
<dt-fn> By arbitrary we mean any number of steps before the cellular automata decays</dt-fn>
change the update rule in order to make it Regenerating. We can write it mathematically like this
$$
\begin{cases}
x_{new}=f_1(x_{old})\quad \textrm{ if }\quad n_{iter} \le n\\
x_{new}=f_2(x_{old})\quad \textrm{ if }\quad n_{iter} > n
\end{cases}
$$
</p>
<figure class="l-middle side" id="boom">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/rswitch40.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption>
Video 2: Evolution when we use a $f_1 = $Growing CA for the first 40 steps then
switch to a $f_2 = $Regenerating CA for the next iterations
</figcaption>
</figure>
<p>
Where $f_1$ is a growing rule and $f_2$ is a rule that we trained to make the organism Regenerating
<dt-fn>$n$ in this case should be considered a parameter, that is, if we change $n$ we don't need to change $f_2$,
so the switch could happen at any iteration</dt-fn>
</p>
<p>
To make a parallel with a biological system, this is equivalent to asking:
Is it possible to make an organism immortal if we have the ability to change its DNA in every single cell
at once?
<dt-fn>
This is not entirely a correct statement since there are more ways to change the behaviour of
a biological system, for example you can do so by changing the electric potential <dt-cite key="TransmembraneVoltagePotential"></dt-cite>.
There is also the possibility that some changes to the update rule are impossible to encode in the DNA by itself,
so the best way to view the switch is by considering it as a change in the way the cells operate.
</dt-fn>
</p>
<p>
Since we had already trained a Regenerating CA we tried to substitute it directly into $f_2$, the result
however was a complete failure (<a href="#boom">Video 2</a>) where the switch happens at $n=40$
</p>
<div class="body" style="margin-top: 70px;">
<details>
<summary>
<div class="body" style="display: flex; width: 100%; margin-left: 20px; margin-top: -35px">
<div style="position: relative; margin-top:-px; width:60%">
<h3 style="margin-top: 0em;">Why does it happen?</h3>
</div>
<div style="font-size: small; line-height: 120%; width:40%">
This section can be skipped if you are only interested in the results
</div>
</div>
</summary>
<p>
This outcome at first sight might be surprising, because as can be seen in the first video the visible
RGBA channels of the cell evolve in a similar fashion in both the growing and regenerating CA. However, the hidden
channels, which are the ones used by the cells to transfer information between them, are
in general completely different, which makes the two CAs incompatible.
</p>
<p>
We can show this behaviour empirically by visualizing the hidden states of two CAs after they reached
the target state (<a href="#80%">Figure 1</a>).
We can notice that the CA on the left uses this hidden state to encode something similar to a $x$ coordinate,
while the CA on the right uses the same hidden state to represent a sort of distance from the center.
</p>
<figure id="80%">
<img src="pytorch_ca/Presentation_images/regenerating2_channel_6.png" style='height: 50%; width: 49%; object-fit: contain'>
<img src="pytorch_ca/Presentation_images/regenerating_channel_6.png" style='height: 50%; width: 49%; object-fit: contain'>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Figure 1: Encodings of the 6th channel after 300 iterations of two independently trained Regenerating CAs.
</figcaption>
</figure>
</details>
</div>
<h3>How to fix it</h3>
<figure class="l-middle side" id="switch" style="margin-top: -40px;">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/switch_visuals/switch60.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption>
Video 3: The rule switches at the 60th step, you can clearly see when the switch happens
</figcaption>
</figure>
<p>
To to make the model work as intended we must train $f_2$ with the goal of making the organism
regenerating, we will call the trained $f_2$ rule Switch.
The only difference between training a Regenerating CA and Switch is the training pool (<a href="#pool">Figure 2</a>): instead
of starting with a single dot on the canvas, the pool is initialized by having each element be the output
of $n$ steps of $f_1$ (in our case the Growing CA). That way Switch learns to start from a state of a Growing CA and reach the
state of a Regenerating one.
<dt-fn>
The training seemed to be highly dependent from the starting parameters, in particular if we
started from a Regenerating CA the net seemed to converge faster, instead
if we started from a random initialization often times it didn't converge
</dt-fn>
</p>
<figure class="l-middle outset" id="pool">
<img src="pytorch_ca/Presentation_images/collage switch.png" alt="training pool" width="100%" height="100%">
<figcaption>
Figure 2: Sample of 15 images from the training pool used to train the Switch CA
</figcaption>
</figure>
<p>
So, does it work if we train Switch with this method?
The answer is yes. <a href="#switch">Video 3</a> shows you what happens.
</p>
<div class="body" style="margin-top: 70px;">
<details>
<summary>
<div class="body" style="display: flex; width: 100%; margin-left: 20px; margin-top: -35px">
<div style="position: relative; margin-top:-px; width:60%">
<h3 style="margin-top: 0em;">Some considerations</h3>
</div>
<div style="font-size: small; line-height: 120%; width:40%">
This section can be skipped
</div>
</div>
</summary>
<p>
As already explained above, different CA rules will use different encodings to store the informations of
the organisms, so here are some other things that you can’t do:
<ul>
<li>
You can’t use a Switch CA for a Growing one which has not been trained for
<dt-fn>The Switch rule is tailor-made to be able to understand the specific encoding of the Growing rule</dt-fn>
</li>
<li>You cannot just swap the Growing rule with another Regenerating (or Persist) rule
<dt-fn>For the same reason of the one above and because the regenerating (and the persist) are trained with different starting points </dt-fn>
</li>
<li>You cannot salvage a Growing rule that has decayed
<dt-fn>We are not trying to resuscitate the dead, and so we didn’t train for it</dt-fn>
</li>
</ul>
</p>
</details>
</div>
<dt-byline></dt-byline>
<h2 style="font-size: 40px;">Virus with fixed mask</h2>
<p>
Switch has some relevant problems that need to be addressed.
</p>
<p>
First of all during the transition from one rule to the next the system undergoes a major restructuring.
<dt-fn>
You can see it from Video 2: when the switch happens the organism becomes amorphus.
</dt-fn>
If this was a living organism that needs it’s organs to function 24h a day it will probably die before
the transition is completed.
</p>
<p>
The second problem is that we need to change every single cell of the organism in order to make it immortal,
in practice, however, we aren’t going to have the precision necessary
to be able to influence every single cell of a organism without missing even one, and,
as you can see from <a href="#mask">Video 4</a>, if some of the cells (marked in blue) don’t transition from $f_1$
to $f_2$ the organism decays quickly.
</p>
<figure class='l-middle' id="mask">
<img src="pytorch_ca/Presentation_images/mask_switch.png" style='height: 45%; width: 45%; object-fit: contain'>
<video loop="" autoplay="" playsinline="" muted="" style='height: 45%; width: 45%; object-fit: contain'>
<source src="pytorch_ca/Presentation_videos/switch40_99.7.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Video 4: On the left we plotted the cell mask, where the yellow cells evolve according to $f_2$ and the blue ones to $f_1$,
while on the right you can see the evolution video.
We can see that leaving only a couple of cells with the old update rule can be catastrophic for the organism
</figcaption>
</figure>
<p>
Randazzo et al.<dt-cite key="randazzo2021adversarial"></dt-cite> managed to change the
global properties of a CA by adding some cells that follow a different rule, so the next thing
that we tried to do was to train a neural CA that would be able to make a Growing into a Persist even
if not all the cells follow $f_2$ after the switch has happened. We will call this new CA Mask
The training proceeds as before, however we switch the update rule of only a given percentage of the cells.
</p>
<p>
<a href="#80%mask">Video 5</a> shows what happens with a CA rule trained to switch 80% of the cells. With this technique
we made Mask more flexible than Switch and at the same time we managed to avoid the restructuring phase,
without training for it.
<dt-fn>
This is because by having some of the old cells still around, the new rule has to learn to
collaborate and influence the old cells to be able to reach a stationary state.
In section 4 we will provide more explanations for this behaviour
</dt-fn>
</p>
<figure class='l-body' id="80%mask">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/mask_80.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Video 5: On the left we plotted the evolution of the cell mask, with yellow being the regenerating cells and blue the growing cells.
The rule switches at the 40th step with 80% of new cells, here the transition is not even noticeable
</figcaption>
</figure>
<p>
</p>
<figure class='l-page side' id="grafico_loss">
<img src="pytorch_ca/Presentation_images/mask_by_percentage.svg" style='height: 100%; width: 100%; object-fit: contain'>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Figure 3: Evaluation loss as the percentage of cells substituted changes, to have an image that resembles the target
the loss should be less than $10^{-2}$, we can see that after 300 iterations most of the Mask CAs decay,
while only the ones with more than 95% of cells substituted survive until the end
</figcaption>
</figure>
<h3>Effectiveness</h3>
<p>
One of the problems is that the effectiveness of Mask decreases as you increase the number of steps
and decrease the percentage of switched cells,
however it still significantly increases the lifespan of the CA.
</p>
<p>
In particular, <a href="#grafico_loss">Figure 3</a> shows the loss as a function of the number of steps and the percentage of cells substituted with Mask.
We notice that we need to switch at least 70% of cells to increase the lifespan
of the Growing CA by an order of magnitude, and at least 95% of cells to be able to make the Growing CA persist indefinitely.
</p>
<p>
In order to better understand <a href="#grafico_loss">Figure 3</a>, <a href="#images_loss">Figure 4</a> shows some images and the corresponding losses
</p>
<figure class='l-body' id="images_loss">
<img src="pytorch_ca/Presentation_images/loss_5e-3.png" style='height: 32%; width: 32%; object-fit: contain'>
<img src="pytorch_ca/Presentation_images/loss_1e-2.png" style='height: 32%; width: 32%; object-fit: contain'>
<img src="pytorch_ca/Presentation_images/loss_5e-2.png" style='height: 32%; width: 32%; object-fit: contain'>
<div style="position:relative; height:30px; display:flex">
<div style="width:33%; top:0px; text-align:center">$5\times 10^{-3}$</div>
<div style="width:33%; top:0px; text-align:center">$1\times 10^{-2}$</div>
<div style="width:33%; top:0px; text-align:center">$5\times 10^{-2}$</div>
</div>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Figure 4: Images as the loss changes, we can see that for losses $\approx 5\times 10^2$
the image starts to deteriorate significantly
</figcaption>
</figure>
<div class="body">
<details>
<summary>
<div class="body" style="display: flex; width: 100%; margin-left: 20px; margin-top: -35px">
<div style="position: relative; margin-top:-px; width:60%">
<h3 style="margin-top: 0em;">Robustness</h3>
</div>
<div style="font-size: small; line-height: 120%; width:40%">
This section can be skipped
</div>
</div>
</summary>
<p>
After training Mask we noticed that it got some nice properties without training for them:
<ul>
<li>
The density of new cells does not need to be uniform, it just needs to locally equal or higher than the minimum percentage,
in fact, if the percentage of cells increases Mask would only benefit from it
</li>
<li>
The density of new cells can change multiple times during the evolution, and,
as long as the density remains higher than the minimum percentage, the CA will persist
</li>
<li>
If we train Mask to switch 80% of the cells, then use it in a case where we switch only 70% of cells,
it performs in a similar way to a Mask that was trained explicitly for 70% of adversarial cells<dt-fn>Da spiegare qua</dt-fn>
</li>
</ul>
</p>
<p>
<a href="#mask_80_100_chekered">Video 6</a> illustrates these points, it seems that Mask has learned rule that works well for a wide range of cell densities,
furthermore it manages to communicate effectively with regions with different concentration of cells
</p>
<div class="body">
<figure id="mask_80_100_chekered">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/mask_80_100_chekered.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Video 6: The rule switches two times: the first at the 40th step with 70% of cells on the left side, and 100% of cells on the right side,
the second at the 120th step with a checkered pattern which maintains the minimum density of cells higher than 70%.
This Switch was trained to switch always 80% of the cells.
</figcaption>
</figure>
</div>
</details>
</div>
<dt-byline></dt-byline>
<h1 id="realistic simulation of adversarials">Realistic simulation of adversarials</h1>
<p>
One of the limitations of using a fixed mask is that it can’t model what happens when a kind of cell
overtakes the other. This is important because having the new cells overtake the old ones would be both more biologically plausible
<dt-fn>because in biological organisms cells die and are replaced by new ones</dt-fn> and could
reduce the minimum percentage of initial cells that need to be switched.
In an ideal case, we would only substitute a small percentage of cells, then these would gradually overtake the entire organism,
making it immortal (<a href="#illutration_replacement">Figure 7</a>).
<dt-fn>
The minimum percentage required depends from the speed of the aging process, if it's slow
you can start with fewer cells since they will have plenty of time to overtake the old ones
</dt-fn>
</p>
<figure class="l-body" id="illutration_replacement">
<img src="pytorch_ca/images/Evoluzione.png" style='height: 100%; width: 100%; object-fit: contain'>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Figure 7: The idea is that the new cells learn to overtake the old one and than make the organism immortal,
so you need to inject fewer cells in the new organism
</figcaption>
</figure>
<p>
To make this work we first have to ask ourselves how do we model the evolution of the mask?
</p>
<h3 id="model realistic simulation of adversarials">The Model</h3>
<figure class="l-middle side" id="evolving_mask">
<img src="pytorch_ca/images/Canali.svg" style='height: 300px; width: 100%; object-fit: contain'>
<figcaption style="margin-top: 20px; margin-bottom: 25px; width: 100%" >
Figure 9: The vector on the left represents the old represetation of the CA state, and the one
on the right is the new representation. This new representation of the cells has an additional
$\alpha$ channel and to make the code simpler the alpha channels are now the last two components
</figcaption>
</figure>
<p>
For simplicity we are going to restrict ourself in the case where we are going to have 2 rules ($f_1$ and $f_2$).
</p>
<p>
Before, a cell state was represented by a state vector having the first 4 components representing
the RGBA of the pixel and the remaining were hidden channels that helped the CA pass
information between its cells. If the $\alpha$ channel (transparency) is >0.1 it means
that the cell is alive, otherwise it’s dead
</p>
<p>
If we are going to have two different types of cells we are going to need two alpha channels.
Since a cell cannot be of both kinds at the same time we choose that if $\alpha_1$ is $>0.1$, then
$\alpha_2$ must be 0, and the cell update follows $f_1$ and vice versa.
</p>
<p>
At the same time, when both alphas are below $0.1$, the cell evolves with the average of both updates (<a href="#evolving_mask">Figure 8,9</a>).
</p>
<figure class="l-middle" id="evolving_mask" style="display: flex;">
<div style='height: 50%; width: 50%;' id="grid_container">
<div id="grid" style='position: relative; z-index: 1; height: 308px; width: 100%; object-fit: contain'></div>
<figcaption id="white">
Figure 8: The color of the cells in the image on the left represents the kind of cell.
Different kinds will have different evolution rules. Pass the mouse over the cells to see further details
</figcaption>
<figcaption id="red" style="display: none;">
The red cells have $\alpha_1>0.1$ and $\alpha_2=0$, and follow the rule $f_1$. They represent the cells that are alive and of kind 1.
</figcaption>
<figcaption id="yellow" style="display: none;">
The yellow cells have $\alpha_2>0.1$ and $\alpha_1=0$, and follow the rule $f_2$. They represent the cells that are alive and of kind 2.
</figcaption>
<figcaption id="light_red" style="display: none;">
The light red cells have $\alpha_1 \le 0.1$ and $\alpha_2=0$. They occupy the cells that are neighboring to the red cells, but do not neighbor the yellow cells. They follow $f_1$
</figcaption>
<figcaption id="light_yellow" style="display: none;">
The light yellow cells have $\alpha_2 \le 0.1$ and $\alpha_1=0$. They occupy the cells that are neighboring to the yellow cells, but do not neighbor the red cells. They follow $f_2$
</figcaption>
<figcaption id="orange" style="display: none;">
The light orange cells have $\alpha_1 \le 0.1$ and $\alpha_2 \le 0.1$. They occupy the cells that are neighboring to both red and yellow cells but are not alive in either. They follow the average of both rules
</figcaption>
<div id="frame" style="position:absolute; width: 2000px; height: 1000px; margin-left: -300px; margin-top:-600px;"></div>
</div id="explanation">
<div style='float:right; height: 50%; width: 50%; position: relative; color: grey; font-size: 13px; line-height: 1.5em; font-family: sans-serif; margin-top: 0.5em;'>
<p id="explanation 1" style="font-family: inherit; margin-top: inherit; margin-bottom: inherit;">
A cell can be alive in only one channel, you can’t have a live cell that behaves like both, this means that no cell must have both $\alpha_\textrm s$ greater than $0.1$. This is because if we interpret the two cells as having different DNA, then they must have different rules and there is no in-between
</p>
<p id="explanation 2" style="font-family: inherit; margin-top: inherit; margin-bottom: inherit;">
We impose that new cells can only grow near mature ones of the same type, example: cells of type 2 can only grow near cells of type 2 </p>
<p id="explanation 3" style="font-family: inherit; margin-top: inherit; margin-bottom: inherit;">
Unless the space is already occupied by another cell
</p>
<p id="explanation 4" style="font-family: inherit; margin-top: inherit; margin-bottom: inherit;">
If we do the same reasoning for the red cells we get this
</p>
<p id="explanation 5" style="font-family: inherit; margin-top: inherit; margin-bottom: inherit;">
If you look closely there are some orange squares, they are a combination of light yellow and light red, this means that these squares neighbors both the kind of live cells, but are not alive.
These orange squares have both $\alpha$ channels below 0.1 and they follow the average of both rules
</p>
</div>
</figure>
<p>
Keep in mind that neither update rules can directly influence the neighboring cells directly, so the adversarial
is forced to rely on changing its internal state in such a way that the other cell's rule makes it kill itself
$$
f_{1,2}: \mathbb{R}^{9\times (n_\textrm{channels}-1)} \to \mathbb{R}^{n_\textrm{channels}-1}
$$
</p>
<div class="body">
<details>
<summary>
<div class="body" style="display: flex; width: 100%; margin-left: 20px; margin-top: -35px">
<div style="position: relative; margin-top:-px; width:60%">
<h3 style="margin-top: 0em;">Training technique</h3>
</div>
<div style="font-size: small; line-height: 120%; width:40%">
This section can be skipped if you are only interested in the results
</div>
</div>
</summary>
<p>
One of the problems we first encountered was that the new cells never tried to kill the old ones.
This meant that if they were severely outnumbered they wouldn't be able to salvage the CA.
</p>
<p>
One way to try to solve this problem would be to make a loss function like so:
$$
L=L_\textrm{target} + \lambda N_\textrm{old}
$$
Where $L_\textrm{target}$ is the distance to the target image, $N_\textrm{old}$
is number of old cells.
</p>
<p>
This however has the flaw that when we have just introduced the new cells the loss is bound to be high.
This means that the training algorithm will sacrifice some of the image quality in favor of having a higher speed of substitution of cells.
</p>
<p>
On the other hand, if we give the CA plenty of time before evaluating the loss, it might destroy the CA at the start and than rebuild it in time before the evaluation (like in figure <a href="#switch">3</a>)
</p>
<p>
To address this problem we made a custom loss that depended on the number of steps<dt-fn>This doesn't increase the computation cost per epoch by any measurable amout, and it decreases the total number of epochs</dt-fn>
$$
L=\int_{t_\textrm{start}}^{t_\textrm{end}}
\lambda_1(t)L_\textrm{target}(t)+
\lambda_2(t)N_\textrm{old}(t) dt
$$
Where $\lambda_1$ and $\lambda_2$ are hyperparameters that depend on the time (or equivalently number of steps),
and the loss is normalized such that <dt-fn>We do this because doublign the total loss is equivalent to doubling the learning rate</dt-fn>
$$
\int_{t_\textrm{start}}^{t_\textrm{end}}\lambda_1(t)+\lambda_2(t)\,\, dt=1
$$
In principle you can make $\lambda_{1,2}$ dependent on the number of epochs, we used this to teach first to the CA to kill the old cells, and than to learn to be Persistant.
</p>
<div class="body">
<div style="position:relative; width:100%; height:100%;">
<svg id="loss_graph" style="width:100%; height:100%;"></svg>
<div style="position:absolute; bottom:0%; right: 10%">$t$</div>
<div style="position:absolute; top:-10%; left: 5%">$\lambda_{1,2}$</div>
</div>
<div style="margin-left: 40px;">
<input type="range" min="0" max="200" step="1" value="0" id="EpochLoss">
<div id="displayEpochLoss">epoch=0</div>
</div>
</div>
<p>
We used this same technique to train a Growing CA that dies.
</p>
<div class="body">
<div style="position:relative; width:100%; height:100%;">
<div id="loss_graph_growing" style="width:100%; height:100%;"></div>
<div style="position:absolute; bottom:0%; right: 10%">$t$</div>
<div style="position:absolute; top:-10%; left: 5%">$\lambda_{1,2}$</div>
</div>
</div>
</details>
</div>
<h3> Results for Expanding the lifespan</h3>
<figure class='l-body side'>
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/dying_firework.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption>
Growing rule that dies
</figcaption>
</figure>
<p>
We have found that if the Growing CA evolves such that all its cells are dead at the end we can
salvage it with a way less percentage of initial adversarial cells, in the case of the butterfly only 5% CAMBIARE,
instead we need a much higher number of adversaries if the CA degenerates and, fills the whole space,
however this number is still way lower than in the case of the fixed mask (50% vs 80%).
</p>
<!-- INSERIRE VIDEO FARFALLA, SOSTITUIRE EVOLUZIONE DELLA CA MORENTE CON QUELLA CHE ESPLODE -->
<p>
Keep in mind that we don't lose any generality by imposing this condition since all the biological
organisms eventually vanish for $t\to\infty$
</p>
<p>
As you can see from the videos below it is possible to salvage the Growing rule with way
fewer new cells with respect to a fixed mask
</p>
<figure class="l-body" style="display:flex">
<div style=" width:49%; height:49%;">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/virus_50.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="width:90%">
Video 7: Evolution of a CA with 50% cells substituted
</figcaption>
</div>
<div style="width: 1%;"></div>
<div style=" width:49%; height:49%;">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/virus_50_alpha.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="width:90%">
Video 8: Alpha channel representation of the same CA
</figcaption>
</div>
</figure>
<h3> Results for changing Global Properties</h3>
<figure class='l-page side' id="grafico_loss">
<img src="pytorch_ca/Presentation_images/lizard_green->red.png" style='height: 100%; width: 100%; object-fit: contain'>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 100%" >
Transformation of the CA from green to red
</figcaption>
</figure>
<p>
This tecnique can be used to change other properties of the CA, such as changing the color of the CA.
In <dt-cite key="randazzo2021adversarial"></dt-cite> it was show that a high concentration of (fixed) adversarial was needed
to turn the green lizard into a red one.
</p>
<p>
With our tecnique we were able to change the color of the CA by switching 2% of the original cell to the new rule.
</p>
<figure class="l-body" style="display:flex">
<div style=" width:49%; height:49%;">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/green_to_red_2_virus_RGBA.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="width:90%">
Video 9: Evolution of a CA with 2.5% of cells substituted
</figcaption>
</div>
<div style="width: 1%;"></div>
<div style=" width:49%; height:49%;">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/green_to_red_2_virus_alpha.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="width:90%">
Video 10: Alpha channel representation of the same CA
</figcaption>
</div>
</figure>
<p>
It's also possible to train the adversarial archieve this just with a randomly placed starting seed
</p>
<figure class="l-body" style="display:flex">
<div style=" width:49%; height:49%;">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/single_cell_RGB.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="width:90%">
Video 11: Evolution of a CA an added 3x3 seed
</figcaption>
</div>
<div style="width: 1%;"></div>
<div style=" width:49%; height:49%;">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/single_cell_alpha.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="width:90%">
Video 12: Alpha channel representation of the same CA
</figcaption>
</div>
</figure>
<dt-byline></dt-byline>
<h2 style="font-size: 40px;">Adding a perturbation</h2>
<p>
In this final section we wanted to to know if we can add a small perturbation $\Delta w$ to the weights of a Growing cellular automata
in order to make it a persistent one, this is very interesting from a biological perspective
because we can answer the question:
</p>
<p>
How much do we need to change the rule (or DNA) in order to have a newborn non-aging organism?
</p>
<h3>The Model</h3>
<p>
In practice we keep the weights of the growing CA $w_\textrm{growing}$ fixed and generate another CA with weights $w_\textrm{new}$ which are equal to
$$
w_\textrm{new} = w_\textrm{growing} + \Delta w
$$
To be sure that $\Delta w$ will be as small as possible, we added an additional
term in the loss, that penalizes the $\mathbb L^2$ norm of the perturbation $|\Delta w|^2$.
So the total loss will be:
$$
L = L_\textrm{target} + \lambda |\Delta w|^2
$$
Where $L_\textrm{target}$ is the distance squared to the target image and $\lambda$ is a hyperparameter.
We will call the new CA trained in this way Perturbation
</p>
<h3>Results</h2>
<figure class="l-middle side" id="perturbation">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/perturbation_l=10.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption style="align-self: center; margin-top: 20px; margin-bottom: 25px; width: 704px" >
Video 9: Perturbation CA trained with $\lambda=0.01$
</figcaption>
</figure>
<p>
It turns out that a very little perturbation can make the Growing CA into a Persistent or even
a Regenerating one, suggesting that a growing and a regenerating organisms aren't so different.
Below we show some data on how much different Growing and Perturbation actually are, and Video 9 shows the Perturbation CA evolution
</p>
<div style="height:3px"></div>
<p>
$$
\frac{|\Delta w|^2}{|w_\textrm{growing}|^2}=0.17
$$
$$
\frac{\langle w_\textrm{new}, w_\textrm{growing}\rangle}{|w_\textrm{growing}||w_{\textrm{new}}|}=0.914
$$
</p>
<div style="height: 13px;"></div>
<p>
We wanted to know if Perturbation has learned the same encoding of Growing,
so we tried to evolve both the Growing and Perturbation together,
it turns out that they are able to communicate between each other and
coordinate the growth of the organism
</p>
<p>
Even more interesting is the fact that Perturbation works as switch even if we didn't explicitly train for it!
</p>
<p>
Finally we noticed that we can also use Perturbation as a Mask, however it needs a high percentage of cells to work well (99%+).
Video <a href="#regen_near_grow">10</a> shows all the above points in action
</p>
<figure class="l-body" id="regen_near_grow">
<video loop="" autoplay="" playsinline="" muted="" width="100%" height="100%">
<source src="pytorch_ca/Presentation_videos/perturbation_chekered.mp4" type="video/mp4" id="regen_near_grow">
Your browser does not support the video tag.
</video>
<figcaption>
Video 10: On the left you can see the cell mask, with yellow being the regenerating cells and blue the growing cells.
For the first 20 steps the CA evolves with a random mix of 20% regenerating cells and 80% of growing cells.
Then for the next 30 iterations it uses a checkered pattern.
Finally for the remaining 150 iterations we use only the regenerating cells.
</figcaption>
</figure>
<h3>Putting it all together</h3>
<p>
In order to train the adversarial that the weights as close as possible as the original CA we need to
add use the following Loss
</p>
<p>
$$
L = L_\textrm{target} +\lambda_1 N_\textrm{old} + \lambda_2 |\Delta w|^2
$$
</p>
<p>
And the propagation foward is the same as the one described in the section about the <a href="#model realistic simulation of adversarials">realistic simulation of adversarials</a>
</p>
<p>INSERIRE RISULTATI</p>
<dt-byline></dt-byline>
<h2 style="font-size: 40px;">Conclusion</h2>
<p>
We showed that it is possible to program adversarial cells to take over the old cells in order to modify global properties of the CA.
We also showed that the Persist rule is also close to the Growing rule, this might mean that only a small modification to the DNA should do the trick.
</p>
<!--
<h3><a class="marker" href="#section-5" id="section-5">5</a> Does any of this apply to Biology?</h2>
<p>
Shorth answer: Probably yes.
</p>
<p>
To be able to answer this we need to know what the differences are between our model and a real biological system.
</p>
<p>
First of all the cells in a organism are not in a grid, but it would be foolish to think that this model works only on a 2-D grid.
</p>
<p>
Secondly, and most important, the parameters that govern the update rule of the cells are the genes in the DNA.
This
</p>
-->
</dt-article>
<dt-appendix>
<h3>Glossary</h3>
<ul>
<li>Growing: CA trained to reach the target image in a finite number of steps, but then it can do whatever it wants</li>
<li>Persist: CA trained to reach the target image in a finite number of steps and then keep its shape </li>
<li>Regenerating: CA trained to reach the target image in a finite number of steps, keep its shape and be able to resist damage</li>
<li>Switch</li>
<li>Mask</li>
<li>Virus</li>
<li>Perturbation</li>
</ul>
</dt-appendix>
<!--Bibliography-->
<script type="text/bibliography">
@article{mordvintsev2020growing,
author = {Mordvintsev, Alexander and Randazzo, Ettore and Niklasson, Eyvind and Levin, Michael},
title = {Growing Neural Cellular Automata},
journal = {Distill},
year = {2020},
url = {https://distill.pub/2020/growing-ca},
}
@article{randazzo2021adversarial,
author = {Randazzo, Ettore and Mordvintsev, Alexander and Niklasson, Eyvind and Levin, Michael},
title = {Adversarial Reprogramming of Neural Cellular Automata},
journal = {Distill},
year = {2021},
url = {https://distill.pub/selforg/2021/adversarial},
doi = {10.23915/distill.00027.004}
}
@article{handberg2008stem,
title={Stem cells and regeneration in planarians},
author={Handberg-Thorsager, Mette and Fernandez, Enrique and Salo, Emili},
journal={Front Biosci},
volume={13},
pages={6374--6394},
year={2008},
url ={https://www.fbscience.com/Landmark/articles/pdf/Landmark3160.pdf}
}
@article{TransmembraneVoltagePotential,
author = {Pai, Vaibhav P. and Aw, Sherry and Shomrat, Tal and Lemire, Joan M. and Levin, Michael},
title = {Transmembrane voltage potential controls embryonic eye patterning in Xenopus laevis},
journal = {Development},
volume = {139},
number = {2},
pages = {313-323},
year = {2012},
month = {01},
issn = {0950-1991},
doi = {10.1242/dev.073759},
url = {https://doi.org/10.1242/dev.073759},
eprint = {https://journals.biologists.com/dev/article-pdf/139/2/313/1160014/313.pdf}
}
</script>
<script src="https://d3js.org/d3.v4.js"></script>
<script src="website/grid_value.js" type="text/javascript"></script>
<script src="website/grid_yellow_value.js" type="text/javascript"></script>
<script src="website/grid.js" type="text/javascript"></script>
<script src="website/epoch_loss.js"></script>
<script src="website/epoch_loss_growing.js"></script>
<script>
window.addEventListener("resize", function(){
onresize1();
onresize2();
});
</script>