-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.tex
2526 lines (1919 loc) · 198 KB
/
main.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
\def\module{M3P8 Algebra III}
\def\lecturer{Dr David Helm}
\def\term{Autumn 2018}
\def\cover{
$$
\begin{tikzpicture}
\draw [fill=lightgray, opacity=0.1, very thick] (0, 2) circle (7.5);
\draw (0, 9) node{Modules};
\draw [fill=lightgray, opacity=0.1, very thick] (0, -1.25) ellipse (5 and 4.25);
\draw (0, -5) node{Noetherian modules};
\draw [fill=lightgray, opacity=0.1, very thick] (0, 2) circle (6.5);
\draw (0, 8) node{Rings};
\draw (0, -4) node{Noetherian rings};
\draw [fill=lightgray, opacity=0.1, very thick] (0, 2) circle (5.5);
\draw (0, 6.5) node{Integral domains};
\draw (0, -3) node{Noetherian domains};
\draw [fill=lightgray, opacity=0.1, very thick] (0, 1.75) ellipse (4.75 and 4.25);
\draw (0, 5) node{Integrally closed domains};
\draw [fill=lightgray, opacity=0.1, very thick] (0, 1.75) ellipse (4 and 2.75);
\draw (0, 3.5) node{Unique factorisation domains};
\draw [fill=gray, opacity=0.1, very thick] (0, 0.25) ellipse (4 and 2.75);
\draw (0, -2) node{Dedekind domains};
\draw (0, 2.5) node{Principal ideal domains};
\draw [fill=gray, opacity=0.1, very thick] (0, 0.5) ellipse (2.5 and 1.5);
\draw (0, 1.5) node{Euclidean domains};
\draw [fill=gray, opacity=0.1, very thick] (0, 0) circle (1);
\draw (0, 0) node{Fields};
\end{tikzpicture}
$$
}
\def\syllabus{Rings. Homomorphisms, ideals, and quotients. Factorisation. The Chinese remainder theorem. Fields and field extensions. Finite fields. $ R $-modules. Noetherian rings and modules. Polynomial rings in several variables. Integral extensions and algebraic integers. Dedekind domains. Integers in number fields. Introduction to algebraic geometry.}
\def\thm{subsection}
\input{header}
\begin{document}
\input{cover}
\section{Introduction}
\lecture{1}{Friday}{05/10/18}
This course is an introduction to ring theory. The topics covered will include ideals, factorisation, the theory of field extensions, finite fields, polynomial rings in several variables, and the theory of modules. In addition to the lecture notes, the following will cover much of the material we will be studying.
\begin{itemize}
\item M Artin, Algebra, 1991
\end{itemize}
Rings are contexts in which it makes sense to add and multiply. For example,
$$ \ZZ, \qquad \QQ, \qquad \RR, \qquad \CC, \qquad \text{polynomials}, \qquad \cbr{0, 1} \to \RR, \qquad \ZZ / n\ZZ $$
are rings. The goals of this course include
\begin{itemize}
\item to unify arguments that apply in all of the above contexts, and
\item to study relationships between different rings.
\end{itemize}
The applications of rings include
\begin{itemize}
\item number theory, by studying extensions of $ \ZZ $ in which particular Diophantine equations have solutions, such as $ n = x^2 + y^2 = \br{x + iy}\br{x - iy} $, to study solutions in $ \ZZ\sbr{i} $ and pass to result about $ \ZZ $,
\item algebraic geometry, by the study of zero sets of polynomials in several variables via rings of functions, and
\item topology, by the cohomology classes of topological spaces.
\end{itemize}
\begin{note*}
The official notes are integrated in these unofficial notes.
\end{note*}
\pagebreak
\section{Basic definitions and examples}
\subsection{Rings}
Recall the definition of a commmutative ring.
\begin{definition}
A \textbf{commutative ring with identity} $ R $ is a set together with two binary operations
$$ +_R : R \times R \to R, \qquad \cdot_R : R \times R \to R, $$
\textbf{addition} and \textbf{multiplication}, and two distinguished elements $ 0_R $ and $ 1_R $, such that the following holds.
\begin{itemize}
\item The operation $ +_R $ makes $ R $ into an abelian group with identity $ 0_R $, that is
\begin{itemize}
\item for all $ r \in R $, $ 0_R +_R r = r +_R 0_R = 0_R $,
\item for all $ r, s, t \in R $, $ \br{r +_R s} +_R t = r +_R \br{s +_R t} $,
\item for all $ r, s \in R $, $ r +_R s = s +_R r $, and
\item for all $ r \in R $, there exists $ -r \in R $ such that $ r +_R \br{-r} = \br{-r} +_R r = 0_R $.
\end{itemize}
\item The operation $ \cdot_R $ is associative and commutative with identity $ 1_R $. That is,
\begin{itemize}
\item for all $ r \in R $, $ 1_R \cdot_R r = r \cdot_R 1_R = 1_R $,
\item for all $ r, s, t \in R $, $ \br{r \cdot_R s} \cdot_R t = r \cdot_R \br{s \cdot_R t} $, and
\item for all $ r, s \in R $, $ r \cdot_R s = s \cdot_R r $.
\end{itemize}
\item Multiplication distributes over addition. That is,
\begin{itemize}
\item for all $ r, s, t \in R $, $ r \cdot_R \br{s +_R t} = r \cdot_R s +_R r \cdot_R t $, and
\item for all $ r, s, t \in R $, $ \br{s +_R t} \cdot_R r = s \cdot_R r +_R t \cdot_R r $.
\end{itemize}
\end{itemize}
\end{definition}
There is some redundancy here, of course. I have written things this way so that one obtains the definition of a \textbf{noncommutative ring} simply by removing the condition that multiplication is commutative. In this course, however, all rings will be commutative. When it is clear from the context what ring we are working with, we will write $ 0_R $ and $ 1_R $ as $ 0 $ and $ 1 $, $ a +_R b $ as $ a + b $, and $ a \cdot_R b $ as $ ab $.
\begin{proposition}
Let $ R $ be a ring. Then for all $ r \in R $, $ r \cdot_R 0_R = 0_R $.
\end{proposition}
\begin{proof}
$ r \cdot_R 0_R = r \cdot_R \br{0_R +_R 0_R} = r \cdot_R 0_R +_R r \cdot_R 0_R $, so
$$ 0_R = -\br{r \cdot_R 0_R} +_R \br{r \cdot_R 0_R} = -\br{r \cdot_R 0_R} +_R \br{r \cdot_R 0_R +_R r \cdot_R 0_R} = r \cdot_R 0_R. $$
\end{proof}
\begin{note*}
Some definitions of rings require $ 1_R \ne 0_R $ in $ R $. We will not do this.
\end{note*}
\begin{proposition}
If $ 0_R = 1_R $, then $ R $ is the one-element ring $ \cbr{0_R} $.
\end{proposition}
\begin{proof}
We certainly have $ r = 1_R \cdot_R r = 0_R \cdot_R r $. On the other hand
$$ 0_R \cdot_R r = \br{0_R +_R 0_R} \cdot_R r = 0_R \cdot_R r +_R 0_R \cdot_R r, $$
and subtracting $ 0_R \cdot_R r $ from both sides we find that $ 0_R \cdot_R r = 0_R $.
\end{proof}
\begin{definition}
A ring $ R $ is a \textbf{field} if $ R \ne \cbr{0_R} $ and every nonzero element of $ R $ has a multiplicative inverse. That is, for every $ r \in R \setminus \cbr{0_R} $ there exists $ r^{-1} \in R $ such that
$$ rr^{-1} = r^{-1}r = 1_R. $$
\end{definition}
We do not consider the zero ring $ \cbr{0_R} $ to be a field. We have seen many examples of rings at this point.
\pagebreak
\begin{example*}
The sets $ \ZZ, \QQ, \RR, \CC $ are all rings with their usual notion of addition and multiplication. All of them but $ \ZZ $ are in fact fields. We have the ring $ \ZZ / n\ZZ $ of integers modulo $ n $. Let $ n \in \ZZ_{> 0} $, and recall that $ a $ and $ b $ are said to be \textbf{congruent modulo $ n $} if $ a - b $ is divisible by $ n $. It is easy to check that this is an equivalence relation on $ \ZZ $. Moreover, since any $ a \in \ZZ $ can uniquely be written as $ qn + r $ with $ q, r \in \ZZ $ and $ 0 \le r < n $, the set
$$ \cbr{\sbr{0}_n, \dots, \sbr{n - 1}_n} $$
is a complete list of the equivalence classes under this relation, where $ \sbr{a}_n $ denotes the set of all integers congruent to $ a \mod n $. We denote this $ n $-element set by $ \ZZ / n\ZZ $, and we can define addition and multiplication in $ \ZZ / n\ZZ $ by setting
$$ \sbr{a}_n + \sbr{b}_n = \sbr{a + b}_n, \qquad \sbr{a}_n\sbr{b}_n = \sbr{ab}_n. $$
This defines a ring structure on $ \ZZ / n\ZZ $, once one checks that it is well-defined. This is the first example of a general construction we will see more of later, the quotient of a ring by an ideal.
\end{example*}
\subsection{Polynomial rings}
\lecture{2}{Monday}{08/10/18}
A very important class of rings that we will study are the polynomial rings. Let $ R $ be any ring. Then we can form a new ring $ R\sbr{X} $, called the \textbf{ring of polynomials in $ X $ with coefficients in $ R $}. Informally, a polynomial in $ R\sbr{X} $ is a finite sum of the form
$$ r_0 + \dots + r_nX^n, \qquad n \in \ZZ_{\ge 0}, \qquad r_0, \dots, r_n \in R. $$
If $ n > m $, we consider $ r_0 + \dots + r_nX^n $ to represent the same polynomial of $ R\sbr{X} $ as $ s_0 + \dots + s_mX^m $ if $ r_i = s_i $ for $ i \le m $ and $ r_i = 0_R $ for $ i > m $. That is, you can pad out polynomials with terms of the form $ 0_RX^i $ without changing it. From a formal standpoint, it is better to define a polynomial to be an infinite sum
$$ \sum_{n = 0}^\infty r_iX^i = r_0 + r_1X + \dots, \qquad r_i \in R, $$
in which all but finitely many $ r_i $ are zero. This makes it easier to define addition and multiplication. The \textbf{degree} of such an expression is the largest $ i $ such that $ r_i $ is nonzero. We add and multiply in $ R\sbr{X} $ just as we would any other polynomials,
$$ \sum_{i = 0}^\infty r_iX^i +_{R\sbr{X}} \sum_{i = 0}^\infty s_iX^i = \sum_{i = 0}^\infty \br{r_i +_R s_i} X^i, \qquad \sum_{i = 0}^\infty r_iX^i \cdot_{R\sbr{X}} \sum_{i = 0}^\infty s_iX^i = \sum_{i = 0}^\infty \sum_{j = 0}^i \br{r_j \cdot_R s_{i - j}} X^i. $$
What about polynomial rings in more than one variable? Since the construction of polynomial rings takes an arbitrary ring as input, one can iterate it. Start with a ring $ R $, and consider first the ring $ R\sbr{X} $ and then the ring $ \br{R\sbr{X}}\sbr{Y} $. A polynomial of this has the form
$$ \sum_{i = 0}^\infty \sum_{j = 0}^\infty r_{ij}X^jY^i, \qquad r_{ij} \in R. $$
On the other hand, we can consider the ring $ \br{R\sbr{Y}}\sbr{X} $, whose polynomials have the form
$$ \sum_{i = 0}^\infty \sum_{j = 0}^\infty r_{ij}Y^jX^i, \qquad r_{ij} \in R. $$
Alternatively, we could consider the ring $ R\sbr{X, Y} $ whose polynomials are formal expressions of the form
$$ \sum_{i, j = 0}^\infty r_{ij}X^iY^j, \qquad r_{ij} \in R. $$
It is not hard to see that all three approaches yield the same ring. If we identify these elements, we see that addition and multiplication in any of these three rings gives the same answer. We will therefore primarily use notation like $ R\sbr{X, Y} $ for polynomial rings in multiple variables, but we will occasionally need to know that this is the same as $ \br{R\sbr{X}}\sbr{Y} $ or $ \br{R\sbr{Y}}\sbr{X} $. The identifications we have made here are an example of isomorphisms of rings, a notion we will make precise later.
\pagebreak
\subsection{Subrings and extensions}
\begin{definition}
Let $ R $ be a ring. A subset $ S $ of $ R $ is a \textbf{subring} of $ R $ if
\begin{itemize}
\item $ 0_R, 1_R, -1_R \in S $, and
\item $ S $ is closed under $ +_R $ and $ \cdot_R $, so if $ r, s \in S $, then so are $ r +_R s $ and $ r \cdot_R s $.
\end{itemize}
\end{definition}
\begin{example*}
$ \ZZ $ is a subring of $ \RR $, which is itself a subring of $ \CC $.
\end{example*}
Subrings inherit the additive and multiplicative structures from the ring that contains them, and are thus themselves rings. It is easy to see that the intersection of two subrings of $ R $, or even an arbitrary collection of subrings of $ R $, is also a subring of $ R $.
\begin{definition}
Now let $ S \subseteq R $ be a subring of a ring $ R $, and let $ \alpha $ be an element of $ R $. We can then form a subring $ S\sbr{\alpha} $ of $ R $, called the \textbf{subring of $ R $ generated by $ \alpha $ over $ S $}, as follows. An element of $ R $ lies in $ S\sbr{\alpha} $ if and only if it can be expressed in the form
$$ r_0 + \dots + r_n\alpha^n, \qquad n \in \ZZ^*, \qquad r_0, \dots, r_n \in S. $$
This operation is known as \textbf{adjoining} the element $ \alpha $ to the ring $ S $.
\end{definition}
\begin{example*}
Let $ i $ denote a square root of $ -1 $ in $ \CC $, and consider the subring $ \ZZ\sbr{i} $ of $ \CC $ formed by $ \ZZ \subseteq \CC $ and $ i $. This consists of all complex numbers that can be expressed as polynomials in $ i $ with integer coefficients. Note that such an expression need not be unique. For instance the element $ 1 + i $ of $ \ZZ\sbr{i} $ can also be written as $ 2 + i + i^2 $, and $ -1 = i^2 = i^6 = i + i^3 + i^{10} $.
\end{example*}
Indeed, since $ i^2 = -1 $, the following holds.
\begin{proposition}
We can uniquely express any element $ a_0 + \dots + a_ni^n $ of $ \ZZ\sbr{i} $ as $ a + bi $ for $ a, b \in \ZZ $.
\end{proposition}
\begin{proof}
Given $ \sum_{n = 0}^\infty a_ni^n $ with only finitely many $ a_n $ nonzero, set $ a = a_0 - a_2 + \dots \in \ZZ $ and $ b = a_1 - a_3 + \dots \in \ZZ $. Then
$$ \sum_{n = 0}^\infty a_ni^n = a + bi. $$
This expression is clearly unique, as if $ a + bi = c + di $ in $ \CC $ for $ a, b, c, d \in \ZZ $, then $ a = c $ and $ b = d $.
\end{proof}
If $ \alpha $ is more complicated then the elements of $ R\sbr{\alpha} $ may well be harder to describe, and indeed, a nice description might not exist at all.
\begin{example*}
\hfill
\begin{itemize}
\item If $ \alpha $ is the real cube root of $ 2 $, then every element of $ \ZZ\sbr{\alpha} $ can be uniquely expressed as $ a + b\alpha + c\alpha^2 $, where $ a, b, c \in \ZZ $.
\item In $ \ZZ\sbr{\pi} $, any element has a unique expression in the form $ \sum_{n = 0}^\infty a_n\pi^n $ for all but finitely many $ a_n $ are zero. Suppose $ \sum_{n = 0}^\infty a_n\pi^n = \sum_{n = 0}^\infty b_n\pi^n $, then
$$ 0 = \sum_{n = 0}^\infty \br{a_n - b_n}\pi^n. $$
Since $ \pi $ is transcendental, this polynomial must be zero. Thus each $ a_n = b_n $.
\item The elements of $ \ZZ\sbr{\tfrac{1}{2}} $ can be expressed uniquely as $ a / b $, where $ b $ is a power of $ 2 $ and $ a $ is odd unless $ b = 1 $. If $ \alpha $ is a root of the polynomial $ x^2 - x / 2 + 1 $ then $ \alpha^2 \in \ZZ\sbr{\alpha} $ and $ \alpha^2 = \alpha / 2 - 1 $. Can show that every element of $ \ZZ\sbr{\alpha} $ can be uniquely expressed as $ a + b\alpha $, where $ a $ and $ b $ lie in $ \ZZ\sbr{\tfrac{1}{2}} $, but there are pairs $ a $ and $ b $ such that $ a + b\alpha $ does not lie in $ \ZZ\sbr{\alpha} $.
\end{itemize}
\end{example*}
\begin{exercise*}
For which pairs $ a $ and $ b $ of elements of $ \ZZ\sbr{\tfrac{1}{2}} $ does $ a + b\alpha $ lie in $ \ZZ\sbr{\alpha} $?
\end{exercise*}
An alternative way of defining the ring $ S\sbr{\alpha} $ is to note that it is the smallest subring of $ R $ containing $ S $ and $ \alpha $. In one direction, any such subring contains every expression of the form $ r_0 + \dots + r_n\alpha^n $, with $ r_i \in S $, so any subring of $ R $ containing $ S $ and $ \alpha $ contains $ S\sbr{\alpha} $. One can thus construct $ S\sbr{\alpha} $ as the intersection of every subring of $ R $ containing $ S $ and $ \alpha $. Since the intersection of any collection of subrings of $ R $ is a subring of $ R $ it is clear that this intersection is equal to $ S\sbr{\alpha} $ as defined above.
\pagebreak
\subsection{Integral domains and rings of fractions}
\lecture{3}{Wednesday}{10/10/18}
\begin{definition}
A \textbf{zero divisor} in a ring $ R $ is a nonzero element $ r $ of $ R $ such that there exists a nonzero $ s \in R $ with $ rs = 0 $. A ring $ R $ in which there are no zero divisors is called an \textbf{integral domain}.
\end{definition}
\begin{example*}
$ \ZZ $ is an integral domain and any subring of a field is an integral domain, but $ \ZZ / 6\ZZ $ is not an integral domain, as $ \sbr{2}\sbr{3} $ is $ 0 \mod 6 $ even though neither $ \sbr{2} $ nor $ \sbr{3} $ is $ 0 \mod 6 $.
\end{example*}
If $ R $ is an integral domain, then we can form the field of fractions of $ R $ in analogy to the way we build $ \QQ $ from $ \ZZ $.
\begin{definition}
Let $ R $ be an integral domain. The \textbf{field of fractions} $ K\br{R} $ is the set of equivalence classes of expressions of the form $ a / b $, where $ a $ and $ b $ are elements of $ R $ with $ b $ nonzero, and $ a / b $ is equivalent to $ a' / b' $ if and only if $ ab' = a'b $. We add and multiply elements of $ K\br{R} $ just as we do for fractions,
$$ \dfrac{a}{b} + \dfrac{a'}{b'} = \dfrac{ab' + ba'}{bb'}, \qquad \dfrac{a}{b} \cdot \dfrac{a'}{b'} = \dfrac{aa'}{bb'}. $$
Then $ K\br{R} $ is a field, and it contains $ R $ in a natural way as a subring if we identify $ r $ with $ r / 1_R \in K\br{R} $.
$$ 0_{K\br{R}} = \dfrac{0_R}{1_R}, \qquad 1_{K\br{R}} = \dfrac{1_R}{1_R}. $$
If $ a \ne 0 $ in $ R $, then $ b / a \in K\br{R} $, so
$$ \dfrac{a}{b} \cdot \dfrac{b}{a} = \dfrac{ab}{ba} \sim \dfrac{1}{1}. $$
\end{definition}
The field $ K\br{R} $ is in some sense the smallest field containing $ R $ as a subring. When we talk about homomorphisms and isomorphisms, we will be able to state this more precisely. More generally, let the \textbf{multiplicative system} $ S $ be a subset of $ R $ that contains $ 1_R $, does not contain $ 0_R $ and is closed under multiplication. That is, if $ a $ and $ b $ are in $ S $ then so is $ ab $. For any integral domain $ R $ and any multiplicative system $ S $, we can define $ S^{-1}R $ to be the subring of $ K\br{R} $ consisting of all fractions of the form $ a / b $ with $ b \in S $. It is easy to see that this is closed under addition and multiplication, and defines a ring in between $ R $ and $ K\br{R} $.
\begin{example*}
If $ R = \ZZ $ and $ S $ is the set of powers of $ 2 $, then $ S^{-1}R = \ZZ\sbr{\tfrac{1}{2}} $. On the other hand, if $ S $ is the set of odd integers, then $ S^{-1}R $ is the set of all rational numbers of the form $ a / b $ with $ b $ odd.
\end{example*}
In general $ S^{-1}R $ is the smallest subring of $ K\br{R} $ containing $ R $ in which every element of $ S $ has a multiplicative inverse, that is $ 1 / b \in S $ for all $ b \in S $. The process of obtaining $ S^{-1}R $ from $ R $ is called \textbf{localisation} and is an extremely powerful tool. One can even make sense of it when $ R $ is not an integral domain, but one has to be more careful. The equivalence relation on fractions is trickier, for example. We will not discuss this in this course but it will be quite useful in future courses.
\pagebreak
\section{Homomorphisms, ideals, and quotients}
\subsection{Homomorphisms}
Let $ R $ and $ S $ be rings. A ring homomorphism from $ R $ to $ S $ is, roughly, a way of interpreting elements of $ R $ as elements of $ S $, in a way that is compatible with the addition and multiplication laws on $ R $ and $ S $. More precisely is the following.
\begin{definition}
A function $ f : R \to S $ is a \textbf{ring homomorphism} if
\begin{enumerate}
\item $ f\br{1_R} = 1_S $,
\item for all $ r, r' \in R $, $ f\br{r +_R r'} = f\br{r} +_S f\br{r'} $,
\item for all $ r, r' \in R $, $ f\br{r \cdot_R r'} = f\br{r} \cdot_S f\br{r'} $.
\end{enumerate}
\end{definition}
\begin{note*}
If $ f $ is a homomorphism then $ f\br{0_R} = 0_S $. This is because $ f\br{0_R} = f\br{0_R + 0_R} = f\br{0_R} +_S f\br{0_R} $. Adding the additive inverse, in $ S $, of $ f\br{0_R} $ to both sides gives $ 0_S = f\br{0_R} $. Thus we do not need to require this as an axiom. On the other hand we do need to require $ f\br{1_R} = 1_S $. For certain $ R $ and $ S $ one can construct examples of maps $ f : R \to S $ that satisfy properties $ 2 $ and $ 3 $ of the definition without satisfying property $ 1 $.
\end{note*}
\begin{definition}
A bijective homomorphism $ f : R \to S $ is called an \textbf{isomorphism}. Write $ S \cong R $ for $ S $ is isomorphic to $ R $. In this case one verifies easily that the inverse map $ f^{-1} : S \to R $ is also a bijective homomorphism.
\end{definition}
\begin{example*}
\hfill
\begin{itemize}
\item If $ R $ is a subring of $ S $, then the inclusion of $ R $ into $ S $ is a homomorphism. This is just a fancy way of saying that the addition and multiplication on $ R $ are induced from the corresponding operations on $ S $. In particular the inclusions $ \ZZ \subset \QQ \subset \RR \subset \CC $ are all homomorphisms.
\item The composition of two homomorphisms is a homomorphism, as is easily checked from the definitions.
\item The map $ \ZZ \to \ZZ / n\ZZ $ that takes $ m \in \ZZ $ into its congruence class modulo $ n $ is a ring homomorphism.
\end{itemize}
\end{example*}
In fact, this is a special case of the following construction.
\begin{proposition}
Let $ R $ be any ring. Then there is a unique ring homomorphism $ f : \ZZ \to R $ such that
$$ f\br{n} =
\begin{cases}
1_R + \dots + 1_R & n > 0 \\
0_R & n = 0 \\
-\br{1_R + \dots + 1_R} & n < 0
\end{cases}.
$$
\end{proposition}
\begin{proof}
Let $ f : \ZZ \to R $ be a homomorphism. Then, directly from the definition, we have $ f\br{0} = 0_R $ and $ f\br{1} = 1_R $. In particular for all $ n > 0 $,
$$ f\br{n} = f\br{1 + \dots + 1} = 1_R + \dots + 1_R, $$
where there are $ n $ copies of $ 1_R $ in the sum. Moreover, since
$$ 0_R = f\br{n + \br{-n}} = f\br{n} + f\br{-n}, $$
we find that $ f\br{-n} $ is the additive inverse of $ 1_R + \dots + 1_R $. Thus $ f\br{n} $ is determined, for all $ n $, completely by the fact that $ f $ is a homomorphism. In the converse direction, it is not hard to check that the map defined above is in fact a homomorphism.
\end{proof}
Thus, for any ring $ R $, we can regard an integer as an element of $ R $ via this homomorphism.
\pagebreak
\subsection{Evaluation homomorphisms}
Let $ R $ be a ring, and consider the ring $ R\sbr{X} $ of polynomials in $ X $ with coefficients in $ R $. If $ s $ is an element of $ R $, then we can define a homomorphism $ R\sbr{X} \to R $ by \textbf{evaluation at $ s $}. More precisely, given an element of $ R\sbr{X} $ of the form
$$ P\br{X} = r_0 + \dots + r_nX^n, \qquad n \in \ZZ_{\ge 0}, \qquad r_i \in R. $$
Then $ P\br{s} $ for $ s \in R $ is defined to be
$$ P\br{s} = r_0 + \dots + r_ns^n \in R. $$
Consider the map
$$ \function[\phi_s]{R\sbr{X}}{R}{P\br{X}}{P\br{s}}. $$
In effect, it substitutes $ s $ for $ X $. It is easy to check that this is in fact a ring homomorphism. More generally, if $ R $ and $ S $ are rings and $ f : R \to S $ is a homomorphism, and $ s $ is an element of $ S $, then we can define a map
$$ \function[\phi_{s, f}]{R\sbr{X}}{S}{r_0 + \dots + r_nX^n}{f\br{r_0} + \dots + f\br{r_n}s^n}. $$
That is, by applying $ f $ to the coefficients and substituting $ s $ for $ X $. Again, this is clearly a homomorphism. The evaluation homomorphisms $ \phi_{s, f} $ are a fundamental property of polynomial rings. In some sense, they are the reason polynomial rings are worth studying. In fact, the ring $ R\sbr{X} $ is uniquely characterised by the fact that homomorphisms from $ R\sbr{X} $ to $ S $ are in bijection with pairs $ \br{s, f} $, where $ f : R \to S $ is a homomorphism and $ s $ is an element of $ S $.
\subsection{Images, kernels, and ideals}
\begin{definition}
Let $ f : R \to S $ be a homomorphism. The \textbf{image} of $ f $ is
$$ \Im f = \cbr{f\br{r} \st r \in R} \subseteq S. $$
The \textbf{kernel} of $ f $ is
$$ \Ker f = \cbr{r \in R \st f\br{r} = 0} \subseteq R. $$
\end{definition}
The image of a homomorphism $ f : R \to S $ is easily seen to be a subring of $ S $.
\begin{example*}
If $ R $ is a subring of $ S $, $ f : R \to S $ is the inclusion and $ s $ lies in $ S $, then the image of the map $ \phi_{s, f} : R\sbr{X} \to S $ is precisely the subring $ R\sbr{s} $ of $ S $.
\end{example*}
By contrast, the kernel of a homomorphism $ f $ is almost never a subring of $ R $. For instance, subrings contain the identity. However, we have the following.
\lecture{4}{Friday}{12/10/18}
\begin{definition}
A nonempty subset $ I $ of $ R $ is an \textbf{ideal} of $ R $ if $ I $ is closed under addition, that is for all elements $ i $ and $ j $ of $ I $, $ i + j $ is an element of $ I $, and for all elements $ i $ of $ I $ and $ r $ of $ R $, $ ri $ is an element of $ I $.
\end{definition}
Then one can verify, directly from the definition, that the kernel of any homomorphism $ f : R \to S $ is an ideal of $ R $. Any ideal of $ R $ contains $ 0_R $, and conversely the subset $ \cbr{0_R} $ of $ R $ is an ideal, called the \textbf{zero ideal}.
\begin{note*}
A homomorphism $ f : R \to S $ is injective if and only if its kernel is the zero ideal. Forward direction is easy. Conversely, if $ f\br{x} = f\br{y} $, then $ f\br{x - y} = 0 $, so $ x - y \in \Ker f $. If $ \Ker f = \cbr{0} $, $ x = y $.
\end{note*}
The kernel of the homomorphism $ \ZZ \to R $ is either the zero ideal, or the ideal of multiples of $ n $ in $ \ZZ $ for some $ n > 0 $. We say that $ R $ has \textbf{characteristic zero} or \textbf{characteristic $ n $}, respectively. If not zero, the characteristic of $ R $ is the smallest $ n $ such that the sum of $ n $ copies of $ 1_R $ is equal to zero.
\pagebreak
\subsection{Ideals: examples and basic operations}
If $ r $ is an element of $ R $, then any ideal containing $ R $ contains any multiple $ sr $ of $ R $, for any $ r $ in $ S $. Conversely, one checks easily that the set
$$ \cbr{sr \st s \in R} $$
is an ideal of $ R $. It is known as the ideal of $ R $ generated by $ r $, and denoted $ \abr{r} $. An ideal generated by one element in this way is called a \textbf{principal ideal}.
\begin{note*}
The ideal generated by $ 1_R $, or more generally by any element of $ R $ with a multiplicative inverse, is all of $ R $. This ideal is called the \textbf{unit ideal} of $ R $.
\end{note*}
\begin{proposition}
$ R $ is a field if and only if the only ideals of $ R $ are the zero ideal $ \cbr{0} $ and unit ideal $ R $.
\end{proposition}
\begin{proof}
If $ R $ is a field, let $ I \subseteq R $ be a nonzero ideal. There exists $ r \in I \ne 0 $. Then for all $ s \in R $, $ \br{sr^{-1}}\br{r} \in I $, so $ s \in I $ for all $ s \in R $. Conversely, if $ R $ has only the zero ideal and the unit ideal, let $ r \in R \ne 0 $, and let $ I = \cbr{sr \st s \in R} $. This is an ideal that is not the zero ideal, so it is all of $ R $. In particular, $ 1 \in I $, so there exists $ s \in R $ such that $ sr = 1 $.
\end{proof}
More generally is the following.
\begin{definition}
If $ S $ is a subset of elements of $ R $, then any ideal containing $ S $ consists of all elements of $ R $ of the form
$$ r_0s_0 + \dots + r_ns_n, \qquad n \in \ZZ_{\ge 0}, \qquad r_i \in R, \qquad s_i \in S. $$
The set of all elements of this form is an ideal of $ R $, known as the \textbf{ideal of $ R $ generated by $ S $}, and denoted $ \abr{S} $. It is the intersection of all the ideals of $ R $ containing $ S $. It is also the smallest ideal of $ R $ containing $ S $.
\end{definition}
If $ S $ has one element, $ \abr{S} $ is a principal ideal. We will show soon that any ideal of $ \ZZ $ is a principal ideal, as is any ideal of the ring $ K\sbr{X} $ for any field $ K $. You may well have seen this in last year's
algebra course. On the other hand, there are rings in which not every ideal is principal. For example, the ideal $ \abr{X, Y} $ of $ K\sbr{X, Y} $ is not a principal ideal. Given ideals $ I $ and $ J $ there are several ways to create new ideals.
\begin{example*}
\hfill
\begin{itemize}
\item If $ I $ and $ J $ are ideals, then the intersection $ I \cap J $ is an ideal. Note that if $ I $ and $ J $ are given by generators, it might be hard to find generators for the intersection. Certainly it is not enough to intersect the generating sets.
\item If $ I $ and $ J $ are ideals, then the union of ideals is not usually an ideal. Taking $ R = \ZZ $, $ \abr{3} \cup \abr{5} $ contains $ 3 $ and $ 5 $ but not $ 3 + 5 $. The sum $ I + J = \cbr{i + j \st i \in I, j \in J} $ is an ideal. It is the smallest ideal containing both $ I $ and $ J $, or equivalently the ideal generated by $ I \cup J $.
\item If $ I $ and $ J $ are ideals, the product $ IJ $ is the ideal generated by $ \cbr{ij \st i \in I, \ j \in J} $. This may be strictly larger than the set of such products. For example, consider the product of the ideals $ I = \abr{X, Y} $ and $ J = \abr{Z, W} $ in $ R = K\sbr{X, Y, Z, W} $ for $ K $ a field. The product $ IJ = \abr{XZ, XW, YZ, YW} $ contains $ XZ + YW $, but the latter is not a product of an element in $ I $ with an element in $ J $.
\item If $ I $ and $ J $ are general ideals, the product of ideals $ I $ and $ J $ is always contained in the intersection of $ I $ and $ J $, but the two need not be equal, even in simple rings like $ \ZZ $, since $ \abr{3} \cdot \abr{3} = \abr{9} $ and $ \abr{3} \cap \abr{3} = \abr{3} $.
\end{itemize}
\end{example*}
\subsection{Quotients}
Let $ R $ be a ring and let $ I $ be an ideal of $ R $. If $ x $ and $ y $ are elements of $ R $, we say that $ x $ is congruent to $ y \mod I $ if $ x - y $ is in $ I $. This is an equivalence relation on $ R $. We denote the equivalence class of $ r $ by $ r + I $, or as the alternative notations $ \sbr{r}_I $ and $ \overline{r} $. It is the set
$$ \cbr{r + s \st s \in I}. $$
Let $ R / I $ denote the set of equivalence classes on $ R \mod I $. This set has the natural structure of a ring. The additive and multiplicative identities are $ 0_R + I $ and $ 1_R + I $, respectively, and addition and multiplication are defined by
$$ \br{r + I} + \br{s + I} = \br{r + s} + I, \qquad \br{r + I} \cdot \br{s + I} = \br{rs} + I $$
respectively. One has to check that these are well-defined, but this is not difficult.
\pagebreak
\begin{example*}
If $ R = \ZZ $ and $ I $ is the ideal generated by $ n $, then $ R / I $ is the ring $ \ZZ / n\ZZ $ that we have already seen.
\end{example*}
The ring $ R / I $ is called the \textbf{quotient} of $ R $ by the ideal $ I $. There is a natural quotient homomorphism, \textbf{reduction modulo $ I $},
$$ \function{R}{R / I}{r}{r + I}. $$
This homomorphism is surjective with kernel $ I $. We then have the following.
\begin{proposition}[Universal property of the quotient]
\label{prop:2.5.1}
Let $ I \subseteq R $ be an ideal and let $ f : R \to S $ be a homomorphism, and suppose that the kernel of $ f $ contains $ I $. Then there is a unique homomorphism
$$ \overline{f} : R / I \to S, $$
such that for all $ r \in R $, $ f\br{r} = \overline{f}\br{r + I} $.
\end{proposition}
\begin{proof}
Note that $ \overline{f} $ is necessarily unique, as every element of $ R / I $ has the form $ r + I $ for some $ r $. We must thus show that it is well-defined and gives a homomorphism. If $ r + I = r' + I $, then $ r $ and $ r' $ differ by an element of $ I $, so $ f\br{r - r'} = 0 $, so $ f\br{r} = f\br{r'} $ since $ I $ is contained in the kernel of $ f $. Thus $ \overline{f} $ is well-defined. Checking that it is a homomorphism follows from $ f $ is a homomorphism.
\end{proof}
\begin{note*}
The kernel of $ \overline{f} $ in Proposition \ref{prop:2.5.1} above is just the image of the kernel of $ f $ in $ R / I $. If the kernel of $ f $ is equal to $ I $, this image is the zero ideal and $ \overline{f} $ is injective. In particular, any homomorphism of $ R $ to $ S $ can be thought of as an isomorphism of some quotient of $ R $ with a subring of $ S $.
\end{note*}
\begin{example*}
Let $ R \subseteq S $ be a subring, $ \alpha \in S $, and $ \iota : R \to S $ be the inclusion map. Recall that we have an evaluation at $ \alpha $ by $ \phi_{\iota, \alpha} : R\sbr{X} \to S $. Image of this is $ R\sbr{\alpha} $. Let $ I = \Ker \phi_{\iota, \alpha} $. Then $ \phi_{\iota, \alpha} $ descends to a map $ R\sbr{X} / I \to S $ that is injective with image $ R\sbr{\alpha} $. So $ R\sbr{\alpha} $ is isomorphic to a quotient of $ R\sbr{X} $.
\end{example*}
\subsection{Prime and maximal ideals}
\lecture{5}{Monday}{15/10/18}
\begin{definition}
\label{def:2.6.1}
An ideal $ I $ of $ R $ is \textbf{prime} if the quotient $ R / I $ is an integral domain. It is \textbf{maximal} if $ R / I $ is a field.
\end{definition}
\begin{note*}
As fields are integral domains, every maximal ideal is prime. The converse need not hold, of course. The zero ideal in $ \ZZ $ is prime but not maximal.
\end{note*}
\begin{lemma}
An ideal $ I $ is prime if and only if for every pair of elements $ r $ and $ s $ in $ R $ such that $ rs $ is in $ I $, either $ r $ is in $ I $ or $ s $ is in $ I $.
\end{lemma}
\begin{proof}
This is just a restatement of Definition \ref{def:2.6.1}. $ R / I $ is an integral domain if and only if for all whenever two elements $ r + I $ and $ s + I $ in $ R / I $ satisfy $ \br{r + I}\br{s + I} = 0 + I $ in $ R / I $, either $ r + I = 0 + I $ or $ s + I = 0 + I $ in $ R / I $. This is the same as saying $ rs $ lies in $ I $ if and only if either $ r $ or $ s $ lies in $ I $.
\end{proof}
\begin{lemma}
An ideal $ I $ is maximal if and only if the only ideals of $ R $ containing $ I $ are $ I $ and the unit ideal $ R $.
\end{lemma}
This justifies the name maximal for such ideals.
\begin{proof}
First suppose that $ R / I $ is a field. Recall that $ R / I $ is a field if and only if only the ideals of $ R / I $ are $ \cbr{0} $ and $ R / I $. Given an ideal $ J \subseteq R / I $, let $ \widetilde{J} $ be the preimage of $ J $ under $ R \to R / I $. Then $ \widetilde{J} $ is an ideal containing $ I $ and contained in $ R $, so $ J $ is either the zero ideal of $ R / I $, in which case $ \widetilde{J} $ is contained in, and thus equal to, $ I $, or $ J $ is all of $ R / I $, in which case $ \widetilde{J} $ contains $ I $ and an element of $ 1_R + I $, so $ \widetilde{J} $ contains $ 1_R $ and is thus the unit ideal of $ R $. Conversely, if the only ideals of $ R $ containing $ I $ are $ I $ and the unit ideal, then for any $ r $ in $ R \setminus I $, the ideal of $ R $ generated by $ I $ and $ r $ contains $ 1_R $. We can thus write $ 1_R = rs + i $, where $ i \in I $ and $ s \in R $. This means that $ s + I $ and $ r + I $ are multiplicative inverses of each other in $ R / I $, so $ R / I $ is a field.
\end{proof}
\pagebreak
\section{Factorisation}
In these notes $ R $ always denotes an integral domain.
\subsection{Divisibility, units, associates, and irreducibles}
\begin{definition}
Let $ r $ and $ s $ be elements of $ R $. We say $ r $ \textbf{divides} $ s $, denoted $ r \mid s $, if there exists $ r' \in R $ with $ rr' = s $, or, equivalently, $ s $ lies in the principal ideal $ \abr{r} $ generated by $ r $. An element $ r $ that divides $ 1_R $ is called a \textbf{unit} of $ R $, or, equivalently, $ \abr{r} = R $. The set of units in $ R $ forms a group under multiplication denoted $ R^\times $.
\end{definition}
For any element $ r \in R $ and any unit $ u $ of $ R $, both $ u $ and $ ur $ divide $ r $.
\begin{definition}
The set of elements of $ R $ of the form $ ur $, with $ u \in R^\times $ are called \textbf{associates} of $ R $.
\end{definition}
That is, $ r $ and $ r' $ are associates if $ r = ur' $ for a unit $ u \in R^\times $. This implies $ r \mid r' $, that is there exists $ u' $ with $ u'u = 1 $ and $ u'r = r' $.
\begin{note*}
The principal ideals $ \abr{r} $ and $ \abr{r'} $ are equal if and only if $ r $ and $ r' $ are associates.
\end{note*}
\begin{definition}
A nonzero element $ r $ of $ R $ is called \textbf{irreducible} if $ r $ is not a unit and the only elements of $ R $ that divide $ r $ are the units and the associates of $ r $.
\end{definition}
\subsection{Unique factorisation domains}
An interesting question is when elements of rings admit unique factorisations into irreducibles? To that end we define the following.
\begin{definition}
A \textbf{unique factorisation domain (UFD)} is a ring $ R $ in which
\begin{enumerate}
\item every nonzero, nonunit element $ r $ of $ R $ admits a factorisation as a finite product of irreducibles in $ R $, and
\item if $ r = p_1 \dots p_n = q_1 \dots q_m \in R $ are two factorisations of $ r $ as products of irreducibles $ p_i $ and $ q_i $, then $ n = m $ and, after permuting the $ q_i $, each $ q_i $ is an associate of $ p_i $.
\end{enumerate}
\end{definition}
Both conditions can fail.
\begin{example*}
\hfill
\begin{itemize}
\item There are certainly domains in which $ 1 $ can fail, although they are somewhat exotic. One example is to take the rational polynomial ring $ R = \CC\sbr{X^\QQ} $ with coefficients in $ \CC $, whose entries are finite formal sums
$$ \sum_{i = 0}^N a_iX^{n_i}, \qquad a_i \in \CC, \qquad n_i \in \QQ_{\ge 0}. $$
Any such expression of $ R $ is a polynomial in $ X^{1 / n} $ for some $ n $. The element $ X $ of this ring is not a unit, and also not a finite product of irreducibles. In $ \CC\sbr{X^\QQ} $, $ X $ factors as $ \br{X^{1 / n}}^n $, so $ X $ has no factorisation into irreducibles in $ R $.
\item Even if $ 1 $ holds, $ 2 $ often fails. The classic example of this is $ R = \ZZ\sbr{\sqrt{-5}} $, in which $ 2, 3, 1 + \sqrt{-5}, 1 - \sqrt{-5} $ are all irreducibles, none are associates of each other, yet $ \br{2}\br{3} = \br{1 + \sqrt{-5}}\br{1 - \sqrt{-5}} $.
\end{itemize}
\end{example*}
We will show later that a very mild finiteness condition on a domain $ R $, the condition that $ R $ is Noetherian, actually guarantees that $ 1 $ holds. Another way to interpret condition $ 2 $ is as follows.
\begin{definition}
We say an element $ r $ of $ R $ is \textbf{prime} if the principal ideal $ \abr{r} $ of $ R $ is a prime ideal. In other words, for any $ s $ and $ s' $ in $ R $, if $ r $ divides $ ss' $, then $ r \mid s $ or $ r \mid s' $.
\end{definition}
\begin{lemma}
Prime elements are irreducible.
\end{lemma}
\begin{proof}
If $ r $ is prime and $ s $ divides $ r $, we can write $ r = ss' $. Then since $ r $ divides $ ss' $ we have that either $ r $ divides $ s $, in which case $ rs'' = s $, then $ ss's'' = s $ and $ s's'' = 1 $, so $ r $ is an associate of $ s $, or $ r $ divides $ s' $, in which case $ s' = rs'' $, then $ r = srs'' $ and $ ss'' = 1 $, so $ r $ is an associate of $ s' $ and $ s $ is a unit.
\end{proof}
\pagebreak
The converse is not necessarily true, but we have the following observation as criteria for $ R $ to be a UFD.
\begin{proposition}
Let $ R $ be a domain in which condition $ 1 $ holds. Then condition $ 2 $ above holds for $ R $ if and only if every irreducible element of $ R $ is prime.
\end{proposition}
\begin{proof}
First suppose condition $ 2 $ holds, and let $ r $ be an irreducible element of $ R $. If $ r $ divides $ ab $, we can write $ rs = ab $ for some $ s \in R $. Expanding out $ s, a, b $ as products of irreducibles we see that $ r $ is an associate of some irreducible dividing $ a $ or $ b $, so $ r $ is prime. Conversely, if every irreducible element of $ R $ is prime, and we have products of irreducibles
$$ p_1 \dots p_n = q_1 \dots q_m, $$
then, since $ p_1 $ is prime, it divides the product $ q_1 \dots q_m $ and is thus an associate of some $ q_i $. We can thus cancel $ p_1 $ from the left and $ q_i $ from the right, after introducing a unit on one side. This is possible because $ R $ is an integral domain. Repeating the process we find that, up to reordering the terms and multiplying by units, the two expressions coincide.
\end{proof}
\subsection{Principal ideal domains}
\begin{definition}
An integral domain $ R $ is a \textbf{principal ideal domain (PID)} if every ideal of $ R $ is a principal ideal.
\end{definition}
\begin{theorem}
\label{thm:3.3.2}
Every PID is a UFD.
\end{theorem}
We first show $ 1 $. It is true for units trivially.
\begin{lemma}
Let $ R $ be a PID. Then every nonzero nonunit $ r \in R $ has a irreducible divisor.
\end{lemma}
\begin{proof}
Fix $ r = r_0 \in R $. We first show $ r $ has an irreducible factor. If $ r_0 $ is irreducible we are done. Otherwise if $ r_0 $ is not irreducible, we can choose an $ r_1 $, not a unit nor an associate of $ r_0 $, such that $ r_1 $ divides $ r_0 $, so $ r_0 = r_1s_1 $ with $ r_1 $ and $ s_1 $ not units. If $ r_1 $ is not irreducible we choose $ r_2 $ similarly, and repeat. If this process ever terminates we have found an irreducible divisor of $ r $. It suffices to show this terminates. Suppose it does not terminate. We obtain an increasing tower of ideals
$$ \abr{r_0} \subsetneq \abr{r_1} \subsetneq \dots. $$
Let $ I $ be the union of all these ideals generated by $ r_0, r_1, \dots $. Then $ I $ is an ideal, so it is generated by some element $ s \in I $. Thus $ s $ divides $ r_i $ for all $ i $. On the other hand, $ s $ lives in some $ \abr{r_j} $, so $ r_j $ divides $ s $. Thus $ s $ is an associate of $ r_j $, and therefore an associate of $ r_i $ for all $ i > j $, that is $ I \subseteq \abr{r_j} $. This contradicts our construction, because $ \abr{r_{j + 1}} \subseteq I $ and $ \abr{r_{j + 1}} \ne \abr{r_j} $.
\end{proof}
Thus $ r $ has an irreducible divisor $ s_0 $.
\begin{lemma}
Let $ R $ be a PID. Every nonzero nonunit $ r \in R $ is a finite product of irreducibles.
\end{lemma}
\begin{proof}
Consider $ rs_0^{-1} $. If this is a unit we are done. If not let $ s_1 $ be an irreducible divisor of $ rs_0^{-1} $. If $ r\br{s_0s_1}^{-1} $ is a unit we are done. Otherwise repeat. We obtain a sequence of irreducibles $ s_0, s_1, \dots $ such that $ s_0 \dots s_i $ divides $ r $ for all $ i $, so
$$ r = r_0s_0 = r_0r_1s_1 = \dots, $$
with $ r_0, r_1, \dots $ irreducible. If this process ever terminates we are done. Suppose it does not. Then we have a strictly increasing tower of ideals
$$ \abr{r} \subsetneq \abr{s_0} \subsetneq \abr{s_1} \subsetneq \dots. $$
This cannot continue forever. Arguing as above we arrive at a contradiction.
\end{proof}
Now we show $ 2 $.
\begin{proof}[Proof of Theorem \ref{thm:3.3.2}]
It suffices to show that in a PID every irreducible is prime. Let $ r \in R $ be irreducible, and suppose that $ r $ divides $ st $. Want $ r \mid s $ or $ r \mid t $. Let $ q $ be a generator of the ideal $ \abr{r, s} $ of $ R $, so $ \abr{r, s} = \abr{q} $. Then $ q $ divides $ r $, so either $ q $ is a unit or $ q $ is an associate of $ r $. If $ q $ is an associate of $ r $, then since $ q $ divides $ s $, $ r $ divides $ s $. On the other hand, if $ q $ is a unit, then the ideal generated by $ r $ and $ s $ is the unit ideal and $ 1 \in \abr{r, s} $, so we can write $ 1 = xr + ys $ for $ x $ and $ y $ elements of $ R $. We then have $ t = xrt + yst $, and since $ r $ divides both $ yst $ and $ xrt $, $ r $ divides $ t $.
\end{proof}
\pagebreak
\subsection{Euclidean domains}
\lecture{6}{Wednesday}{16/10/18}
One technique for proving that rings are PIDs is Euclid's algorithm. We formalise this in an abstract setting as follows.
\begin{definition}
Let $ R $ be an integral domain.
\begin{itemize}
\item A \textbf{Euclidean norm} on $ R $ is a function $ \N : R \to \ZZ_{\ge 0} $ such that for all $ a, b \in R $, with $ b \ne 0 $, there exist $ q, r \in R $ such that $ a = qb + r $, and either $ r = 0 $ or $ \N\br{r} < \N\br{b} $.
\item An integral domain $ R $ is called a \textbf{Euclidean domain} if there is a Euclidean norm on $ R $.
\end{itemize}
\end{definition}
\begin{theorem}
Any Euclidean domain is a PID.
\end{theorem}
\begin{proof}
Let $ R $ be a Euclidean domain, $ \N $ be a Euclidean norm on $ R $, and $ I \subseteq R $ be a nonzero ideal of $ R $. Let $ n $ be the smallest integer such that there exists a nonzero element $ a \in I $ with $ \N\br{a} = n $ minimal, that is if $ b \in I $ and $ b \ne 0 $, then $ \N\br{b} < \N\br{a} $. Claim that $ I = \abr{a} $. Then for any $ b \in I $, we can write $ b = qa + r $ with $ \N\br{r} < \N\br{a} $ unless $ r = 0 $. But since $ \N\br{a} $ is the smallest possible norm in $ I $, we must have $ r = 0 $, so $ b = qa $. Thus $ I $ is generated by $ a $ and we are done.
\end{proof}
\subsection{Examples}
\begin{example*}
\hfill
\begin{itemize}
\item The classic example of a Euclidean domain is $ \ZZ $, with $ \N\br{x} = \abs{x} $ for $ x \in \ZZ $.
\item The ring $ \ZZ\sbr{i} $ is a Euclidean domain, with $ \N\br{z} = z\overline{z} = \abs{z}^2 $, so
$$ \N\br{x + yi} = \abs{x + yi}^2 = x^2 + y^2. $$
To see this, note that given $ a $ and $ b $ in $ \ZZ\sbr{i} $ for $ b \ne 0 $, set $ q' = a / b \in \QQ\sbr{i} $. Write $ q' = x' + iy' $ for $ x', y' \in \QQ $. Let $ x $ and $ y $ be the closest integers to $ x' $ and $ y' $, such that $ \abs{x - x'}, \abs{y - y'} \le \tfrac{1}{2} $, and set $ q = x + iy $ in $ \ZZ\sbr{i} $ and $ r = a - bq $. Then
$$ \N\br{r} = \abs{r}^2 = \abs{a - bq}^2 = \abs{a - b\br{\dfrac{a}{b} + \br{q - q'}}}^2 = \abs{b\br{q - q'}}^2 = \abs{b}^2\abs{q - q'}^2 \le \dfrac{\N\br{b}}{2}. $$
\item Similar arguments can be used to prove that $ \ZZ\sbr{\alpha} $ is a Euclidean domain for
$$ \alpha = \sqrt{-2}, \qquad \alpha = \tfrac{-1 + \sqrt{-3}}{2}, \qquad \alpha = \tfrac{-1 + \sqrt{-7}}{2}. $$
Beyond this one needs other tricks, and for most $ \alpha $ unique factorisation fails.
\item A critical example is the polynomial ring $ K\sbr{X} $ for $ K $ a field. Here we can take $ \N\br{P\br{X}} $ to be the degree of $ P\br{X} $. Then, given polynomials $ P\br{X}, T\br{X} \in K\sbr{X} $ and $ T\br{X} \ne 0 $, we can use polynomial long division to write $ P\br{X} = Q\br{X}T\br{X} + R\br{X} $ for some $ Q\br{X} $ with the degree of $ R\br{X} $ strictly less than that of $ T\br{X} $, unless $ T\br{X} $ is constant, in which case we can make $ R\br{X} = 0 $. To prove this, fix $ T\br{X} $. If $ \deg T\br{X} = 0 $, then $ T\br{X} $ is constant, so $ T\br{X} = c \ne 0 \in K $. Take $ Q\br{X} = P\br{X} / c $ and $ R\br{X} = 0 $. Otherwise induct on $ \deg P\br{X} $. If $ \deg P\br{X} < \deg T\br{X} $, set $ R\br{X} = P\br{X} $ and $ Q\br{X} = 0 $. Suppose the claim is true for polynomials of degree $ n $ and $ P\br{X} $ has degree $ n + 1 $, so
$$ P\br{X} = \sum_{i = 0}^{n + 1} a_iX^i, \qquad T\br{X} = \sum_{i = 0}^d b_iX^i, \qquad d < n + 1. $$
Then $ S\br{X} = P\br{X} - \br{a_{n + 1} / b_d}X^{n + 1 - d}T\br{X} $ has degree $ n $. By the inductive hypothesis there exist $ Q\br{X} $ and $ R\br{X} $ with $ \deg R\br{X} < \deg T\br{X} $ such that $ S\br{X} = Q\br{X}T\br{X} + R\br{X} $, so
$$ P\br{X} = \br{\dfrac{a_{n + 1}}{b_d}X^{n + 1 - d} + Q\br{X}}T\br{X} + R\br{X}. $$
\item Later, will show if $ R $ is a UFD, then $ R\sbr{X} $ is also a UFD.
\end{itemize}
\end{example*}
\pagebreak
\section{The Chinese remainder theorem}
In elementary number theory, let $ m_1, m_2 \in \ZZ $ be relatively prime and $ a_1, a_2 \in \ZZ $. Then there exists $ a \in \ZZ $ such that $ a \equiv a_1 \mod m_1 $ and $ a \equiv a_2 \mod m_2 $. Moreover, $ a $ is unique up to congruence modulo $ m_1m_2 $. A question is given ideals $ I_1, \dots, I_r $ and $ a_1, \dots, a_r \in R $, when can we find a $ a \in R $ with $ a \in a_1 + I_1, \dots, a \in a_r + I_r $?
\subsection{Products}
\begin{definition}
Let $ R_1, \dots, R_n $ be rings. The \textbf{direct product} $ R_1 \times \dots \times R_n $ is a ring whose elements are $ n $-tuples $ \br{r_1, \dots, r_n} $ with $ r_i \in R_i $ for all $ i $. The addition and multiplication are given componentwise by
$$ \br{r_1, \dots, r_n} + \br{r_1', \dots, r_n'} = \br{r_1 + r_1', \dots, r_n + r_n'}, \qquad \br{r_1, \dots, r_n}\br{r_1', \dots, r_n'} = \br{r_1r_1', \dots, r_nr_n'}. $$
\end{definition}
\begin{note*}
The product comes with natural homomorphisms $ \pi_i $ for all $ i $, the \textbf{projection} onto the $ i $-th factor, defined by
$$ \function[\pi_i]{R_1 \times \dots \times R_n}{R_i}{\br{r_1, \dots, r_n}}{r_i}. $$
\end{note*}
The product also comes with the following universal property.
\begin{theorem}[Universal property of the product]
Let $ S, R_1, \dots, R_n $ be any rings. For any homomorphisms $ f_1 : S \to R_1, \dots, f_n : S \to R_n $, there exists a unique homomorphism
$$ f : S \to R_1 \times \dots \times R_n, $$
such that $ \pi_i \circ f = f_i $ for all $ i $.
\end{theorem}
\begin{proof}
Given $ f_i $, the homomorphism $ f $ is defined by $ f\br{s} = \br{f_1\br{t}, \dots, f_n\br{t}} $. Then $ \br{\pi_i \circ f}\br{s} = f_i\br{s} $. For uniqueness, if $ \br{\pi_i \circ g}\br{s} = f_i\br{s} $ for all $ i $, then $ g\br{s} = \br{f_1\br{s}, \dots, f_n\br{s}} = f\br{s} $.
\end{proof}
More generally, if $ I $ is any index set, and for each $ i \in I $ we have a ring $ R_i $, we can define the product $ \prod_i R_i $. An element $ r $ of this product is a choice, for each $ i \in I $, of an element of $ R_i $. We write such an element as $ \br{r_i}_{i \in I} $. For each $ j \in I $ we have a map
$$ \function[\pi_j]{\prod_i R_i}{R_j}{\br{r_i}_{i \in I}}{r_j}. $$
Such a product satisfies a very similar universal property. For any collection $ f_i : S \to R_i $ for $ i \in I $ of maps, we get a unique map
$$ f : S \to \prod_i R_i, $$
such that $ \pi_j \circ f = f_j $.
\subsection{The Chinese remainder theorem}
Let $ R $ be a ring, and let $ I_1, \dots, I_r $ be a finite collection of ideals of $ R $. We have the natural maps $ R \to R / I_1, \dots R \to R / I_r $, which are surjective with kernel $ I_j $. Consider the product map $ R \to R / I_1 \times \dots \times R / I_r $. It is easy to see that the kernel of this map is the set of $ r \in R $ such that $ r $ maps to zero in $ R / I_j $ for all $ j $. That is, the kernel is the intersection $ I_1 \cap \dots \cap I_r $. Call this ideal $ J $. We thus have an injective embedding $ R / J \hookrightarrow R / I_1 \times \dots \times R / I_r $. A natural question to ask is, what can we say about the image? In other words, given congruence classes modulo $ I_1, \dots, I_r $, when is there a single element of $ R $ that lives in all those congruence classes simultaneously?
\begin{note*}
Because the above map is injective, if one such element exists, then there is a unique congruence class modulo $ J $ that satisfies all of the required congruences.
\end{note*}
Of course, without further hypotheses we cannot expect this map to be surjective. Think about what happens when $ I_1 = I_2 $, for instance. Nonetheless, we have the following.
\begin{definition}
We will say $ I_1, \dots, I_r $ are \textbf{pairwise relatively prime} if for each $ i \ne j $, the sum $ I_i + I_j $ is the unit ideal in $ R $.
\end{definition}
\pagebreak
\begin{theorem}
Let $ R $ be a ring and $ I_1, \dots, I_r $ be pairwise relatively prime ideals. Then the natural map
$$ R / J \hookrightarrow R / I_1 \times \dots \times R / I_r $$
is an isomorphism.
\end{theorem}
\begin{proof}
We have to prove it is surjective. Fix any tuple $ \br{c_1, \dots, c_r} $ of elements of $ R $. We need to find $ c \in R $ such that $ c \in c_i + I_i $ for all $ i $. It suffices to construct, for each $ i $, an element $ e_i $ of $ R $ that is congruent to $ 1 \mod I_i $ and $ 0 \mod I_j $ for $ j \ne i $. Suppose we have such an element. Then the element
$$ c = c_1e_1 + \dots + c_re_r $$
is congruent to $ c_j \mod I_j $ for all $ j $. Given $ i $ and $ j $ with $ i \ne j $, we know that $ I_i + I_j $ is the unit ideal. That is, we can write $ a_{ij} + b_{ij} = 1 $ for $ a_{ij} \in I_i $ and $ b_{ij} \in I_j $. Then $ a_{ij} $ is congruent to $ 1 \mod I_j $ and $ 0 \mod I_i $ as an element of $ R / I_1 \times \dots \times R / I_r $, so $ a_{ij} $ has zero in the $ i $-th place and one in the $ j $-th place. Then for any $ j $ we can take $ e_j = \prod_{i \ne j} a_{ij} $, and $ e_j $ will be congruent to $ 1 \mod I_j $ and $ 0 \mod I_i $ for all $ j \ne i $, so $ e_j $ has one only in the $ j $-th place. So $ R \to R / I_1 \times \dots \times R / I_r $ is surjective. The result follows.
\end{proof}
\subsection{Examples}
When $ R = \ZZ $, then every ideal is principal, so we can write $ I_j = \abr{n_j} $ for all $ j $. The condition that $ I_i + I_j $ is the unit ideal becomes the condition that $ n_i \in \ZZ $ are pairwise relatively prime. In this case the ideal $ J $ is generated by the product $ n $ of the $ n_i $. Specialising, we find the version of the Chinese remainder theorem from elementary number theory.
\begin{theorem}
If $ \cbr{n_j \in \ZZ} $ is a finite collection of pairwise relatively prime integers, and $ n $ is their product, then for any $ c_1, \dots, c_r \in \ZZ $, there exists $ c \in \ZZ $ unique up to congruence modulo $ n $ such that $ c $ is congruent to $ c_i \mod n_i $ for all $ i $.
\end{theorem}
Now let $ K $ be a field and take $ R = K\sbr{X} $. If $ c_1, \dots, c_r \in K $ are distinct elements of $ K $, the ideals $ I_i = \abr{X - c_i} \subseteq R $ are such that $ I_i + I_j = \abr{X - c_i} + \abr{X - c_j} \ni c_i - c_j \in K^\times $, so contains one. That is, $ I_i + I_j $ is the unit ideal in $ R $ and the ideals $ I_i $ are pairwise relatively prime. Moreover, for each $ i $, $ I_i $ is the kernel of the evaluation map
$$ \function[f_i]{R}{K}{P\br{X}}{P\br{c_i}}. $$
Let
$$ \function[f]{R}{K \times \dots \times K}{P\br{X}}{\br{P\br{c_1}, \dots, P\br{c_r}}}. $$
Then the following diagram commutes.
$$
\begin{tikzcd}[column sep=1in]
R \arrow{r}{f} \arrow[twoheadrightarrow]{d} & K \times \dots \times K \\
R / J \arrow[swap]{r}{\sim} & R / I_1 \times \dots \times R / I_r \arrow[swap]{u}{\sim}
\end{tikzcd}.
$$
Chinese remainder theorem implies that $ f $ is surjective. We thus have an isomorphism
$$ \function{R / I_i}{K}{P\br{X}}{P\br{c_i}}, $$
for all polynomials $ P $. We thus obtain the following.
\begin{theorem}
For any $ c_1, \dots, c_n \in K $, there is a polynomial $ P\br{X} $ in $ K\sbr{X} $, unique up to congruence modulo $ \br{X - a_1} \dots \br{X - a_n} $, such that $ P\br{a_i} = c_i $ for all $ i $.
\end{theorem}
\pagebreak
\section{Fields and field extensions}
\lecture{7}{Friday}{19/10/18}
Next we will use that $ K\sbr{X} $ is a PID for $ K $ a field to study fields systematically.
\subsection{Prime fields}
Let $ K $ be a field. We have a unique ring homomorphism
$$ \function[\iota]{\ZZ}{K}{n}{n_K = 1_K + \dots + 1_K}, \qquad n \ge 0. $$
Let $ I $ be the kernel. Then $ \ZZ / I \hookrightarrow K $ so $ \ZZ / I $ is an integral domain, so $ I $ is a prime ideal. Thus $ I $ is either the zero ideal $ \cbr{0} $, if $ K $ has characteristic zero, or the ideal $ \abr{p} $ for some prime $ p $ of $ \ZZ $. In the former case $ I = \cbr{0} $, the injection $ \ZZ \hookrightarrow K $ extends to an inclusion
$$ \function{\QQ}{K}{\dfrac{a}{b}}{\br{\iota a}\br{\iota b^{-1}} = \dfrac{a_K}{b_K}}. $$
In the latter case $ I = \abr{p} $, we get an injection $ \ZZ / p\ZZ \hookrightarrow K $, which we often denote $ \FF_p $ when we think of it as a field. The upshot is that every field $ K $ contains exactly one of $ \QQ $ or $ \FF_p $ for $ p $ prime, in exactly one way depending on its characteristic. This field is called the \textbf{prime field} of $ K $, and it is contained in $ K $ in a unique way.
\subsection{Field extensions}
The prime fields are in some sense the smallest possible fields. Once we know they exist, it makes sense to study fields by studying pairs $ K $ and $ L $ of fields such that $ K \subseteq L $ of fields, trying to relate $ L $ to $ K $.
\begin{definition}
A \textbf{field extension} is such a pair of fields $ K $ and $ L $ with $ K \subseteq L $, and is often denoted $ L / K $.
\end{definition}
\begin{note*}
Such an inclusion of fields $ L / K $ makes $ L $ into a $ K $-vector space, that is a vector space over $ K $.
\end{note*}
\begin{definition}
We say that a field extension $ L / K $ is \textbf{finite} if $ L $ is finite dimensional as a $ K $-vector space. If this is the case, the \textbf{degree} of such an extension is the dimension of $ L $ as a $ K $-vector space $ \dim_K L $, and is denoted $ \sbr{L : K} $.
\end{definition}
\begin{proposition}
Let $ K \subseteq L \subseteq M $ be fields. Then $ M / K $ is finite if and only if $ M / L $ and $ L / K $ are both finite. If this is the case then
$$ \sbr{M : K} = \sbr{M : L}\sbr{L : K}. $$
\end{proposition}
\begin{proof}
First suppose that $ M / K $ is finite. Then $ L $ is a $ K $-subspace of $ M $, so finite dimensional as a $ K $-vector space. Moreover, there exists a $ K $-basis $ m_1, \dots, m_r $, and this basis spans $ M $ over $ K $ and thus also over $ L $. Thus $ M $ is finite dimensional as an $ L $-vector space, so $ M / L $ is finite. Conversely, suppose $ L / K $ and $ M / L $ are finite. Let $ e_1, \dots, e_n $ be a $ K $-basis for $ L $, and let $ f_1, \dots, f_n $ be an $ L $-basis for $ M $. Then claim that
$$ e_1f_1, \dots, e_1f_m, \dots, e_nf_1, \dots, e_nf_m $$
is a $ K $-basis for $ M $. Every element $ x $ of $ M $ can be expressed uniquely as $ c_1f_1 + \dots + c_mf_m $ for $ c_i \in L $. Each $ c_i $ in turn can be expressed as $ d_{1, i}e_1 + \dots + d_{n, i}e_n $ for $ d_{j, i} \in K $. Thus we can express $ x $ as
$$ d_{1, 1}e_1f_1 + \dots + d_{n, 1}e_nf_1 + \dots + d_{1, m}e_1f_m + \dots + d_{n, m}e_nf_m. $$
In particular the set $ \cbr{e_if_j \st 1 \le i \le n, \ 1 \le j \le m} $ spans $ M $ over $ K $. In this case the degree of $ L $ over $ K $ is $ n $ and the degree of $ M $ over $ L $ is $ m $, so it remains to show that $ \cbr{e_if_j} $ is linearly independent over $ K $. Suppose we have elements $ d_{i, j} $ of $ K $ such that $ \sum_{i, j} d_{i, j}e_if_j = 0 $. Then, regrouping, we find that $ \sum_j \sum_i d_{i, j}e_if_j = 0 $ is an $ L $-linear combination of the $ f_j $ that is zero. Since the $ f_j $ are linearly independent over $ L $ we must have $ \sum_i d_{i, j}e_i = 0 $ for all $ j $. Since the $ e_i $ are linearly independent over $ K $ we must have $ d_{i, j} = 0 $ for all $ i $ and $ j $.
\end{proof}
\pagebreak
\subsection{Extensions generated by one element}
\lecture{8}{Monday}{22/10/18}
Let $ L / K $ be a field extension, and let $ \alpha $ be an element of $ L $.
\begin{definition}
We let $ K\br{\alpha} $ denote the subfield of $ L $ consisting of all elements of $ L $ that can be expressed in the form $ P\br{\alpha} / Q\br{\alpha} $, where $ P $ and $ Q $ are polynomials with coefficients in $ K $ and $ Q\br{\alpha} $ is not zero. This is the smallest subfield of $ L $ containing $ K $ and $ \alpha $.
\end{definition}
Recall that if $ R $ and $ S $ are rings, $ f : R \to S $ is a homomorphism, and $ \alpha \in S $, then have
$$ \function[\phi_{f, a}]{R\sbr{X}}{S}{\sum_{i = 1}^n r_iX^i}{\sum_{i = 1}^n f\br{r_i}\alpha^i}. $$
We also have a natural map
$$ \function{K\sbr{X}}{K\br{\alpha} \subseteq L}{P\br{X}}{P\br{\alpha}}. $$
the inclusion on $ K $. It is a ring homomorphism. Let $ I $ be the kernel of this homomorphism. We then get an injection of $ K\sbr{X} / I $ into the field $ K\br{\alpha} $. Thus $ K\sbr{X} / I $ is an integral domain, so $ I $ is a prime ideal of $ K\sbr{X} $. Since $ K\sbr{X} $ is a PID, every nonzero prime ideal is maximal. There are thus two cases. In the first $ I $ is the zero ideal that is not maximal. That is, there is no nonzero polynomial $ Q $ in $ K\sbr{X} $ such that $ Q\br{\alpha} $ is zero in $ L $. We say that $ \alpha $ is \textbf{transcendental} over $ K $ in this case. In the second $ I $ is an ideal $ \abr{Q} $ for $ Q \in K\sbr{X} $ a nonzero irreducible polynomial that is a maximal ideal of $ K\sbr{X} $. In this case we say $ \alpha $ is \textbf{algebraic} over $ K $.
\begin{definition}
$ K\br{X} $ is the \textbf{field of rational functions} on $ X $,
$$ K\br{X} = \cbr{\dfrac{P\br{X}}{Q\br{X}} \st P, Q \in K\sbr{X}, \ Q \ne 0} / \sim. $$
\end{definition}
Assume first that $ \alpha $ is transcendental over $ K $, that is $ I = \cbr{0} $. Recall $ I = \cbr{P\br{X} \in K\sbr{X} \st P\br{\alpha} = 0} $. So in this case there is no nonzero polynomial $ P \in K\sbr{X} $ with $ P\br{\alpha} = 0 $. In this case the map taking $ P\br{X} $ to $ P\br{\alpha} $ is an injection of $ K\sbr{X} $ into $ K\br{\alpha} \subseteq L $. In particular every nonzero element of $ K\sbr{X} $ gets sent to a nonzero, hence invertible, element of $ L $. Thus the map from $ K\sbr{X} $ to $ L $ extends to an injective map from the field of fractions of $ K\sbr{X} $,
$$ \function{K\br{X}}{L}{\dfrac{P\br{X}}{Q\br{X}}}{\dfrac{P\br{\alpha}}{Q\br{\alpha}}}. $$
By definition of $ K\br{\alpha} $, this map is surjective so the image of this map is $ K\br{\alpha} $. In particular $ K\br{X} $ and $ K\br{\alpha} $ are isomorphic.
\begin{note*}
In this case $ K\br{\alpha} $ is infinite dimensional as a $ K $-vector space. It contains a subspace isomorphic to $ K\sbr{X} $, for instance.
\end{note*}
If $ \alpha $ is algebraic over $ K $, then $ I $ is a nonzero maximal ideal of the PID $ K\sbr{X} $, so it is generated by a single irreducible polynomial $ Q\br{X} $ in $ K\sbr{X} $. As a consequence, since the units in $ K\sbr{X} $ are just the constant polynomials, the polynomial $ Q\br{X} $ is well-defined up to a constant factor. It is called the \textbf{minimal polynomial} of $ \alpha $. By definition, it divides every polynomial $ P\br{X} $ such that $ P\br{\alpha} = 0 $. Since $ \abr{Q\br{X}} $ is maximal, the ring $ K\sbr{X} / \abr{Q\br{X}} $ is a field. Recall that for any $ P \in K\sbr{X} $, can write $ P\br{X} $ uniquely as $ A\br{X}Q\br{X} + R\br{X} $ for $ \deg R < \deg Q $. So $ 1, \dots, X^{\deg Q - 1} $ are a $ K $-basis of $ K\sbr{X} / \abr{Q\br{X}} $. So its dimension as a $ K $-vector space is equal to the degree of $ Q\br{X} $. The map $ K\sbr{X} \to K\br{\alpha} \subseteq L $ descends to an injection of $ K\sbr{X} / \abr{Q\br{X}} $ into $ L $. Since its image is a subfield of $ K\br{\alpha} $ containing $ K $ and $ \alpha $, this map is an isomorphism
$$ K\br{\alpha} \cong K\sbr{X} / \abr{Q\br{X}}. $$
Thus in this case the extension $ K\br{\alpha} / K $ is a finite extension, of degree equal to the degree of $ Q\br{X} $. To summarise, extend $ K $ by a single element by
\begin{itemize}
\item building $ K\sbr{X} $, and
\item either passing to field of fractions $ K\br{X} $ to form a transcendental extension, or choosing an irreducible polynomial $ Q $ to form an algebraic extension $ K\sbr{X} / \abr{Q\br{X}} $.
\end{itemize}
Slightly informally, instead of $ K\sbr{X} / \abr{Q\br{X}} $, we sometimes write $ K\br{\alpha} $, where $ \alpha $ is a root of $ Q\br{X} $.
\pagebreak
\subsection{Algebraic extensions}
\begin{definition}
An extension $ L / K $ is \textbf{algebraic} if every element of $ L $ is algebraic over $ K $.
\end{definition}
\begin{proposition}
If $ L / K $ is finite, then $ L / K $ is algebraic.
\end{proposition}
\begin{proof}
Let $ d $ be the dimension of $ L $ over $ K $. Then for any $ \alpha $, the set $ 1, \dots, \alpha^d $ must be linearly dependent over $ K $. This gives a nonzero polynomial $ P $ such that $ P\br{\alpha} = 0 $.
\end{proof}
\begin{corollary}
Let $ L / K $ be a field extension, and suppose $ \alpha $ and $ \beta $ are elements of $ L $ algebraic over $ K $. Then $ \alpha + \beta $ and $ \alpha\beta $ are algebraic over $ K $. Moreover, if $ \alpha $ is nonzero then $ \alpha^{-1} $ is algebraic over $ K $.
\end{corollary}
\begin{proof}
Consider the chain of extensions $ K \subseteq K\br{\alpha} \subseteq K\br{\alpha, \beta} $, where we write $ K\br{\alpha, \beta} $ for $ \br{K\br{\alpha}}\br{\beta} $. Since $ \alpha $ is algebraic over $ K $, $ K\br{\alpha} $ is finite over $ K $, of degree $ \deg \alpha $. Since $ \beta $ is algebraic over $ K $, it is also algebraic over $ K\br{\alpha} $, so $ K\br{\alpha, \beta} $ is finite over $ K\br{\alpha} $, of degree at most $ \deg \beta $. Thus $ K\br{\alpha, \beta} $ is algebraic over $ K $, of degree at most $ \deg \alpha\deg \beta $. On the other hand, we also have a chain of extensions $ K \subseteq K\br{\alpha + \beta} \subseteq K\br{\alpha, \beta} $, so $ K\br{\alpha + \beta} $ is finite over $ K $, of degree at most $ \deg \alpha\deg \beta $. Hence $ \alpha + \beta $ is algebraic over $ K $. The proofs for $ \alpha\beta $ and $ \alpha^{-1} $ are similar.
\end{proof}
\begin{corollary}
For any extension $ L / K $, let $ L^{\alg} $ be the subset of $ L $ consisting of all elements of $ L $ that are algebraic over $ K $. Then $ L^{\alg} $ is a field.
\end{corollary}
\begin{proof}
We have seen that $ L^{\alg} $ is closed under addition, multiplication, and taking inverses. For example, if $ a_0 + \dots + a_n\alpha^n = 0 $, then $ a_0\br{\alpha^{-1}}^n + \dots + a_n = 0 $.
\end{proof}
\begin{example*}
In particular, the subfield $ \overline{\QQ} \subseteq \CC $ of complex numbers that are algebraic over $ \QQ $ is a field, called the \textbf{field of algebraic numbers}.
\end{example*}
\subsection{Example}
\begin{example*}
Consider the polynomial $ X^2 + X + 1 $ in $ \FF_2\sbr{X} $. It has no roots in $ \FF_2 $, so it is irreducible, as it is a polynomial of degree two any nontrivial factor would be linear. The other polynomials of degree two are
$$ X^2, \qquad X^2 + X = X\br{X + 1}, \qquad X^2 + 1 = \br{X + 1}^2, $$
so $ X^2 + X + 1 $ is the unique irreducible polynomial of degree two. Thus the quotient $ \FF_2\sbr{X} / \abr{X^2 + X + 1} $ is a field extension of degree two of $ \FF_2 $, which is denoted $ \FF_4 $. Its four elements are $ 0, 1, X, X + 1 $, or more precisely, their classes modulo $ \abr{X^2 + X + 1} $, and
$$
\begin{array}{c|cccc}
\cdot & 0 & 1 & X & X + 1 \\
\hline
0 & 0 & 0 & 0 & 0 \\
1 & 0 & 1 & X & X + 1 \\
X & 0 & X & X + 1 & 1 \\
X + 1 & 0 & X + 1 & 1 & X
\end{array}.
$$
In particular the multiplicative group of $ \FF_4 $ is cyclic of order three. This is not particularly surprising, as all groups of order three are cyclic. We will see later that the multiplicative group of any finite field is cyclic.
\end{example*}
\begin{proposition}
Let $ K $ be a field with four elements. Then $ K \cong \FF_4 $.
\end{proposition}
\begin{proof}
Let $ \alpha \in K $ with $ \alpha \ne 0 $ and $ \alpha \ne 1 $. Consider $ 1, \alpha, \alpha^2 $. Since $ K $ has dimension two over $ \FF_2 $, there is a linear dependence. So there exists a polynomial $ P $ in $ \FF_2\sbr{X} $ of degree at most two such that $ P\br{\alpha} = 0 $. In fact $ P $ must be irreducible of degree two. If it is divisible by something of degree one, then a polynomial of degree one vanishes on $ \alpha $, so $ \alpha = 0 $ or $ \alpha = 1 $. So $ \alpha^2 + \alpha + 1 = 0 $. The map
$$ \function{\FF_2\sbr{X}}{K}{X}{\alpha}. $$
descends to $ \FF_2\sbr{X} / \abr{X^2 + X + 1} \to K $. So $ \FF_4 $ embeds in $ K $. Thus $ K \cong \FF_4 $.
\end{proof}
\pagebreak
\section{Finite fields}
\subsection{Finite fields}
\lecture{9}{Wednesday}{24/10/18}
Let $ K $ be a finite field. That is, a field with only finitely many elements. Then $ K $ has characteristic $ p $ for some prime $ p $, and is in particular a finite dimensional $ \FF_p $-vector space. Thus its order is a power $ p^r $ of $ p $ for $ r \in \ZZ_{> 0} $. If we fix a particular prime power $ p^r $, then two questions naturally arise. Does there exist a field of order $ p^r $? If so, can we classify fields of order $ p^r $ up to isomorphism? We will see that in fact, up to isomorphism, there is a unique field $ \FF_{p^r} $ of order $ p^r $.
\subsection{The Frobenius automorphism}
Let $ p $ be a prime. For any ring $ R $, the map $ x \mapsto x^p $ on $ R $ certainly satisfies $ \br{xy}^p = x^py^p $ for $ x, y \in R $. On the other hand,
$$ \br{x + y}^p = x^p + \binom{p}{1}xy^{p - 1} + \dots + \binom{p}{p - 1}x^{p - 1}y + y^p. $$
Now the binomial coefficients satisfy $ p \mid p! / i!\br{p - i}! $ for $ 1 \le i \le p - 1 $, so if $ R $ has characteristic $ p $, we have $ \br{x + y}^p = x^p + y^p $. Thus, when $ R $ has characteristic $ p $, the map $ x \mapsto x^p $ is a ring homomorphism from $ R $ to $ R $, called the \textbf{Frobenius endomorphism} of $ R $. If $ R $ is a field of characteristic $ p $, then the Frobenius endomorphism is injective. If in addition $ R $ is finite, then any injective map from $ R $ to $ R $ is surjective. In particular the Frobenius endomorphism is a bijective and an isomorphism from $ R $ to $ R $ when $ R $ is a finite field of characteristic $ p $. In this case we call the map $ x \mapsto x^p $ the \textbf{Frobenius automorphism}. Composing the Frobenius endomorphism with itself, we find that for any $ r $, $ x \mapsto x^{p^r} $ is also an endomorphism of any ring $ R $ of characteristic $ p $.
\begin{example*}
Let $ R = \FF_4 $. Then $ y \mapsto y^2 $ gives
$$ 0 \mapsto 0, \qquad 1 \mapsto 1, \qquad X \mapsto X + 1, \qquad X + 1 \mapsto X. $$
\end{example*}
Let $ K $ be a field of $ p^r $ elements. Then $ \alpha^{p^r} = \alpha $ for all $ \alpha \in K $. If $ \alpha = 0 $, this is clear. Otherwise $ \alpha \in K^* $, so $ K^* $ is an abelian group of order $ p^r - 1 $. Lagrange's theorem implies that $ \alpha^{p^r - 1} = 1 $, so $ \alpha^{p^r} = \alpha $. We have the following.
\begin{proposition}
Let $ K $ be a field of characteristic $ p $, such that $ \alpha^{p^r} = \alpha $ for all $ \alpha \in K $. Let $ P\br{X} \in K\sbr{X} $ be an irreducible factor of $ X^{p^r} - X $ over $ K\sbr{X} $. Then every element $ \beta $ of $ K\sbr{X} / \abr{P\br{X}} $ satisfies $ \beta^{p^r} = \beta $.
\end{proposition}
\begin{proof}
Let $ d = \deg P $. Can write $ \beta = c_0 + \dots + c_{d - 1}X^{d - 1} $. Moreover, since $ P\br{X} = 0 $ in $ K\sbr{X} / \abr{P\br{X}} $ and $ P\br{X} $ divides $ X^{p^r} - X $, we have $ X^{p^r} = X $ in $ K\sbr{X} / \abr{P\br{X}} $. Thus
$$ \beta^{p^r} = c_0^{p^r} + \dots + c_{d - 1}^{p^r}\br{X^{p^r}}^{d - 1} = c_0 + \dots + c_{d - 1}\br{X^{p^r}}^{d - 1} = c_0 + \dots + c_{d - 1}X^{d - 1} = \beta. $$
\end{proof}
\begin{corollary}
There exists a field $ K $ of characteristic $ p $ such that
\begin{enumerate}
\item $ \alpha^{p^r} = \alpha $ for all $ \alpha \in K $, and
\item the polynomial $ X^{p^r} - X $ of $ K\sbr{X} $ factors into linear factors over $ K\sbr{X} $.
\end{enumerate}
\end{corollary}
\begin{proof}
Let $ K_0 = \FF_p $. Then $ K_0 $ satisfies $ 1 $. We construct a tower of fields
$$ K_0 = \FF_p \subsetneq K_1 \subsetneq K_2 \subsetneq \dots $$
all satisfying $ 1 $ as follows. Suppose we have constructed $ K_i $ satisfying $ 1 $. If $ X^{p^r} - X $ factors into linear factors over $ K_i\sbr{X} $, we are done. Otherwise, choose a nonlinear irreducible factor $ P_i\br{X} $ of $ X^{p^r} - X $ in $ K_i\sbr{X} $ of degree at least two, and set $ K_{i + 1} = K_i\sbr{X} / \abr{P_i\br{X}} $. Then $ K_{i + 1} $ is strictly larger than $ K_i $ and still satisfies $ 1 $. On the other hand, in any field $ K_i $ satisfying $ 1 $, every element is a root of $ X^{p^r} - X $, so $ \#K_i \le p^r $ for all $ i $. Since this polynomial can have at most $ p^r $ roots, this process must eventually terminate.
\end{proof}
Since $ X^{p^r} - X $ has degree $ p^r $, we expect the field $ K $ constructed above to have $ p^r $ elements. So it suffices to show that over any field $ K $ of characteristic $ p $, $ X^{p^r} - X $ has no repeated roots. To prove this we need an additional tool.
\pagebreak
\subsection{Derivatives}
\begin{definition}
Let $ R $ be a ring, and let $ P\br{X} = r_0 + \dots + r_dX^d $ be an element of $ R\sbr{X} $. The \textbf{derivative} $ P'\br{X} $ of $ P\br{X} $ is the polynomial
$$ r_1 + \dots + dr_dX^{d - 1}. $$
\end{definition}
\begin{note*}
Just as for differentiation in calculus, we have a Leibniz rule. For $ P, Q \in R\sbr{X} $,
$$ \br{PQ}'\br{X} = P\br{X}Q'\br{X} + P'\br{X}Q\br{X}, $$
by reducing to $ P $ and $ Q $ monomials.
\end{note*}
From this we deduce the following.
\begin{lemma}
Let $ K $ be a field, and let $ P\br{X} $ be a polynomial in $ K\sbr{X} $ with a multiple root in $ K $. Then $ P\br{X} $ and $ P'\br{X} $ have a common factor of degree greater than zero.
\end{lemma}
\begin{proof}
Let $ \alpha \in K $ be the multiple root. Then we can write $ P\br{X} = \br{X - \alpha}^2Q\br{X} $. Applying the Leibniz rule we get $ P'\br{X} = 2\br{X - \alpha}Q\br{X} + \br{X - \alpha}^2Q'\br{X} $, and it is clear that $ X - \alpha $ divides both $ P\br{X} $ and $ P'\br{X} $.
\end{proof}
\begin{corollary}
Let $ K $ be a field of characteristic $ p $. Then $ X^{p^r} - X $ has no repeated roots in $ K $.
\end{corollary}
\begin{proof}
Let $ P\br{X} = X^{p^r} - X $. Then $ P'\br{X} = -1 $, so $ P\br{X} $ and $ P'\br{X} $ have no common factor.
\end{proof}
\begin{corollary}
There exists a finite field of $ p^r $ elements.
\end{corollary}
\subsection{The multiplicative group}
\lecture{10}{Friday}{26/10/18}
Rather than show immediately that there is a unique finite field of $ p^r $ elements, we make a detour to study the multiplicative group of a finite field. This is not strictly necessary to prove uniqueness, but will simplify the proof, and is of interest in its own right. Let $ K $ denote a field of $ p^r $ elements. The goal of this section is to show that $ K^* $ is cyclic.
\begin{note*}
As a multiplicative group, $ K^* $ is an abelian group of order $ p^r - 1 $, so by Lagrange's theorem, we have $ \alpha^{p^r - 1} = 1 $ for all $ \alpha \in K^* $.
\end{note*}
The order of an element $ a $ of $ A $ divides the order of $ A $. If $ d'a = 0 $ for some $ d' \in \ZZ $ then the order of $ a $ divides $ d' $. The order of an element $ a $ of $ K^* $ is the smallest $ d \in \ZZ_{> 0} $ such that $ a^d = 1 $. Since $ a^{p^r - 1} = 1 $, the order of $ a $ is a divisor of $ p^r - 1 $. On the other hand, if $ d $ is a divisor of $ p^r - 1 $, then any element of order dividing $ d $ is a root of the polynomial $ X^d - 1 $. Since $ K $ is a field, this polynomial has at most $ d $ roots, so we find that there are at most $ d $ elements of $ K^* $ of order dividing $ d $. Order of any element divides $ p^r - 1 $. Know $ X^{p^r - 1} - 1 $ has $ p^r - 1 $ distinct roots in $ K $. For $ d \mid p^r - 1 $, $ X^d - 1 \mid X^{p^r - 1} - 1 $, so $ X^d - 1 $ has exactly $ d $ roots in $ K $. That is, for all $ d \mid p^r - 1 $, $ K^* $ has exactly $ d $ elements of order dividing $ d $. In fact, we have the following.
\begin{proposition}
\label{prop:6.4.1}
Let $ A $ be a finite abelian group of order $ n $, and suppose that $ A $ has exactly $ d $ elements of order dividing $ d $, for all $ d $ dividing $ n $. Then $ A $ is cyclic.
\end{proposition}
The remainder of this section will be devoted to proving Proposition \ref{prop:6.4.1}. As a corollary, we deduce that the multiplicative group $ K^* $ of any finite field $ K $ is cyclic. Consider the cyclic group $ \ZZ / n\ZZ $. The order of any element in this group is a divisor of $ n $.
\begin{definition}
For $ n \in \ZZ $, we let $ \Phi\br{n} $ denote the number of elements in $ \br{\ZZ / n\ZZ, +} $ of exact order $ n $. This equals to the number of elements $ t \in \ZZ $ for $ 1 \le t \le n $ such that $ \br{t, n} = 1 $.
\end{definition}
\begin{note*}
Since $ \sbr{1} $ in $ \ZZ / n\ZZ $ has order $ n $, $ \Phi\br{n} $ is nonzero for all $ n $.
\end{note*}
\begin{lemma}
For any $ d $ dividing $ n $, the cyclic group $ \ZZ / n\ZZ $ contains a unique subgroup of order $ d $, and any element of $ \ZZ / n\ZZ $ of order dividing $ d $ is contained in this subgroup.
\end{lemma}
\begin{proof}
The cyclic subgroup $ C $ of $ \ZZ / n\ZZ $ generated by $ n / d $ is clearly a subgroup of order $ d $. This has $ d $ elements $ \sbr{0}, \dots, \br{d - 1}\sbr{\tfrac{n}{d}} $. Conversely, if $ x $ is an element of a subgroup of $ \ZZ / n\ZZ $ of order $ d $, then the order of $ x $ divides $ d $, so $ dx $ is divisible by $ n $, and hence, by unique factorisation, $ x $ is divisible by $ n / d $. Thus $ x $ is in $ C $ and the claim follows.
\end{proof}
\pagebreak
As a consequence, we deduce the following.
\begin{corollary}
For any $ d $ dividing $ n $, $ \Phi\br{d} $ is the number of elements of $ \ZZ / n\ZZ $ of order $ d $.
\end{corollary}
\begin{corollary}