forked from Magnulas/ANN13
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathann_1.aux
executable file
·99 lines (99 loc) · 8.81 KB
/
ann_1.aux
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
\relax
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldcontentsline\contentsline
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\contentsline\oldcontentsline
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\select@language{english}
\@writefile{toc}{\select@language{english}}
\@writefile{lof}{\select@language{english}}
\@writefile{lot}{\select@language{english}}
\select@language{english}
\@writefile{toc}{\select@language{english}}
\@writefile{lof}{\select@language{english}}
\@writefile{lot}{\select@language{english}}
\@writefile{toc}{\contentsline {section}{\numberline {1}One layered neural network with delta rule}{1}{section.1}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces The one layer neural network layout and it's matrix transformation.}}{1}{figure.1}}
\newlabel{fig:linear1:sub1}{{2(a)}{2}{Subfigure 2(a)\relax }{subfigure.2.1}{}}
\newlabel{sub@fig:linear1:sub1}{{(a)}{2}{Subfigure 2(a)\relax }{subfigure.2.1}{}}
\newlabel{fig:linear1:sub2}{{2(b)}{2}{Subfigure 2(b)\relax }{subfigure.2.2}{}}
\newlabel{sub@fig:linear1:sub2}{{(b)}{2}{Subfigure 2(b)\relax }{subfigure.2.2}{}}
\newlabel{fig:linear1:sub3}{{2(c)}{2}{Subfigure 2(c)\relax }{subfigure.2.3}{}}
\newlabel{sub@fig:linear1:sub3}{{(c)}{2}{Subfigure 2(c)\relax }{subfigure.2.3}{}}
\newlabel{fig:linear1:sub3}{{2(d)}{2}{Subfigure 2(d)\relax }{subfigure.2.4}{}}
\newlabel{sub@fig:linear1:sub3}{{(d)}{2}{Subfigure 2(d)\relax }{subfigure.2.4}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces One layer neural network tests runs}}{2}{figure.2}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {Seperable data}}}{2}{figure.2}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Seperaing line between data}}}{2}{figure.2}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(c)}{\ignorespaces {The movement of the seperating line to it's final position.}}}{2}{figure.2}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(d)}{\ignorespaces {The error function we're minmizing with the delta rule.}}}{2}{figure.2}}
\newlabel{fig:linear2:sub1}{{3(a)}{3}{Subfigure 3(a)\relax }{subfigure.3.1}{}}
\newlabel{sub@fig:linear2:sub1}{{(a)}{3}{Subfigure 3(a)\relax }{subfigure.3.1}{}}
\newlabel{fig:linear2:sub2}{{3(b)}{3}{Subfigure 3(b)\relax }{subfigure.3.2}{}}
\newlabel{sub@fig:linear2:sub2}{{(b)}{3}{Subfigure 3(b)\relax }{subfigure.3.2}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Larger eta values cause the weights to oscillate}}{3}{figure.3}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {The movement of the seperating line to it's final position.}}}{3}{figure.3}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {The error function we're minmizing with the delta rule.}}}{3}{figure.3}}
\newlabel{fig:linear3:sub1}{{4(a)}{3}{Subfigure 4(a)\relax }{subfigure.4.1}{}}
\newlabel{sub@fig:linear3:sub1}{{(a)}{3}{Subfigure 4(a)\relax }{subfigure.4.1}{}}
\newlabel{fig:linear3:sub2}{{4(b)}{3}{Subfigure 4(b)\relax }{subfigure.4.2}{}}
\newlabel{sub@fig:linear3:sub2}{{(b)}{3}{Subfigure 4(b)\relax }{subfigure.4.2}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces With non-seperable data the weights still converge but we don't get a good classifer.}}{3}{figure.4}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {The movement of the seperating line to it's final position.}}}{3}{figure.4}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {The error function we're minmizing with the delta rule.}}}{3}{figure.4}}
\@writefile{toc}{\contentsline {section}{\numberline {2}Two layered neural network with backprop}{4}{section.2}}
\newlabel{fig:twolay1}{{2}{4}{Two layered neural network with backprop\relax }{section.2}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces The two layer neural network layout}}{4}{figure.5}}
\newlabel{fig:twolay2:sub1}{{6(a)}{5}{Subfigure 6(a)\relax }{subfigure.6.1}{}}
\newlabel{sub@fig:twolay2:sub1}{{(a)}{5}{Subfigure 6(a)\relax }{subfigure.6.1}{}}
\newlabel{fig:twolay2:sub2}{{6(b)}{5}{Subfigure 6(b)\relax }{subfigure.6.2}{}}
\newlabel{sub@fig:twolay2:sub2}{{(b)}{5}{Subfigure 6(b)\relax }{subfigure.6.2}{}}
\newlabel{fig:twolay2:sub3}{{6(c)}{5}{Subfigure 6(c)\relax }{subfigure.6.3}{}}
\newlabel{sub@fig:twolay2:sub3}{{(c)}{5}{Subfigure 6(c)\relax }{subfigure.6.3}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Two layer nodes variation}}{5}{figure.6}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {Non seperable data to run twolayer network on.}}}{5}{figure.6}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Eucledian error for different amount of nodes.}}}{5}{figure.6}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(c)}{\ignorespaces {Missclassified instances for different amount of nodes.}}}{5}{figure.6}}
\newlabel{fig:twolay3:sub1}{{7(a)}{6}{Subfigure 7(a)\relax }{subfigure.7.1}{}}
\newlabel{sub@fig:twolay3:sub1}{{(a)}{6}{Subfigure 7(a)\relax }{subfigure.7.1}{}}
\newlabel{fig:twolay3:sub2}{{7(b)}{6}{Subfigure 7(b)\relax }{subfigure.7.2}{}}
\newlabel{sub@fig:twolay3:sub2}{{(b)}{6}{Subfigure 7(b)\relax }{subfigure.7.2}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces With non-seperable data the weights still converge but we don't get a good classifer.}}{6}{figure.7}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {Eta change for two layer network.}}}{6}{figure.7}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Momentum affect on the two layer network.}}}{6}{figure.7}}
\@writefile{toc}{\contentsline {section}{\numberline {3}Two layered neural network used for compression}{7}{section.3}}
\newlabel{fig:compress}{{3}{7}{Two layered neural network used for compression\relax }{section.3}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces The two layer neural network layout for the compressing network}}{7}{figure.8}}
\newlabel{fig:compress2}{{3}{7}{Two layered neural network used for compression\relax }{lstnumber.-1.15}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Results from compression network}}{7}{figure.9}}
\@writefile{toc}{\contentsline {section}{\numberline {4}Two layered neural network used for approximation}{8}{section.4}}
\newlabel{fig:gauss1:sub1}{{10(a)}{8}{Subfigure 10(a)\relax }{subfigure.10.1}{}}
\newlabel{sub@fig:gauss1:sub1}{{(a)}{8}{Subfigure 10(a)\relax }{subfigure.10.1}{}}
\newlabel{fig:gauss1:sub2}{{10(b)}{8}{Subfigure 10(b)\relax }{subfigure.10.2}{}}
\newlabel{sub@fig:gauss1:sub2}{{(b)}{8}{Subfigure 10(b)\relax }{subfigure.10.2}{}}
\newlabel{fig:gauss1:sub3}{{10(c)}{8}{Subfigure 10(c)\relax }{subfigure.10.3}{}}
\newlabel{sub@fig:gauss1:sub3}{{(c)}{8}{Subfigure 10(c)\relax }{subfigure.10.3}{}}
\newlabel{fig:gauss1}{{4}{8}{Two layered neural network used for approximation\relax }{subfigure.10.3}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Two layer approximation of Gaussian function.}}{8}{figure.10}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {Approximated gaussian function.}}}{8}{figure.10}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {The real gaussian function.}}}{8}{figure.10}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(c)}{\ignorespaces {Non seperable data to run twolayer network on.}}}{8}{figure.10}}
\@writefile{toc}{\contentsline {section}{\numberline {5}Two layered neural network used for generalisation}{9}{section.5}}
\newlabel{fig:gauss2:sub1}{{11(a)}{9}{Subfigure 11(a)\relax }{subfigure.11.1}{}}
\newlabel{sub@fig:gauss2:sub1}{{(a)}{9}{Subfigure 11(a)\relax }{subfigure.11.1}{}}
\newlabel{fig:gauss2:sub2}{{11(b)}{9}{Subfigure 11(b)\relax }{subfigure.11.2}{}}
\newlabel{sub@fig:gauss2:sub2}{{(b)}{9}{Subfigure 11(b)\relax }{subfigure.11.2}{}}
\newlabel{fig:gauss1}{{5}{9}{Two layered neural network used for generalisation\relax }{subfigure.11.2}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Two layer approximation of Gaussian function.}}{9}{figure.11}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {Generalization depending on training data size.}}}{9}{figure.11}}
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Generalization depening on number of hidden nodes.}}}{9}{figure.11}}