-
Notifications
You must be signed in to change notification settings - Fork 2
/
gSSURGO_CreateSoilMap.py
11368 lines (8658 loc) · 492 KB
/
gSSURGO_CreateSoilMap.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# gSSURGO_CreateSoilMap.py
#
# Creates a single Soil Data Viewer-type maps using gSSURGO and the sdv* attribute tables
# Uses mdstatrship* tables and sdvattribute table to populate menu
#
# 2021-12-18 Steve Peaslee
#
# THINGS TO DO:
#
# Test the input MUPOLYGON featurelayer to see how many polygons are selected when compared
# to the total in the source featureclass. If there is a significant difference, consider
# applying a query filter using AREASYMBOL to limit the size of the master query table.
#
# 0. Need to look at WTA for Depth to Any Restrictive Layer. Customer reported problem. The
# 201 values are being used in the weighting.
#
# 1. Aggregation method "Weighted Average" can now be used for non-class soil interpretations.
#
#
# 2. "Minimum or Maximum" and its use is now restricted to numeric attributes or attributes
# with a corresponding domain that is logically ordered.
#
# 3. Aggregation method "Absence/Presence" was replaced with a more generalized version
# thereof, which is referred to as "Percent Present". Up to now, aggregation method
# "Absence/Presence" was supported for one and only one attribute, component.hydricrating.
# Percent Present is a powerful new aggregation method that opens up a lot of new possibilities,
# e.g. "bedrock within two feel of the surface".
#
# 4. The merged aggregation engine now supports two different kinds of horizon aggregation,
# "weighted average" and "weighted sum". For the vast majority of horizon level attributes,
# "weighted average" is used. At the current time, the only case where "weighted sum" is used is
# for Available Water Capacity, where the water holding capacity needs to be summed rather than
# averaged.
# 5. The aggregation process now always returns two values, rather than one, the original
# aggregated result AND the percent of the map unit that shares that rating. For example, for
# the drainage class/dominant condition example below, the rating would be "Moderately well
# drained" and the corresponding map unit percent would be 60:
#
# 6. A horizon or layer where the attribute being aggregated is null will now never contribute
# to the final aggregated result. There # was a case for the second version of the aggregation
# engine where this was not true.
#
# 7. Column sdvattribute.fetchallcompsflag is no longer needed. The new aggregation engine was
# updated to know that it needs to # include all components whenever no component percent cutoff
# is specified and the aggregation method is "Least Limiting" or "Most # Limiting" or "Minimum or Maximum".
#
# 8. For aggregation methods "Least Limiting" and "Most Limiting", the rating will be set to "Unknown"
# if any component has a null # rating, and no component has a fully conclusive rating (0 or 1), depending
# on the type of rule (limitation or suitability) and the # corresponding aggregation method.
#
# 2015-12-17 Depth to Water Table: [Minimum or Maximum / Lower] is not swapping out NULL values for 201.
# The other aggregation methods appear to be working properly. So the minimum is returning mostly NULL
# values for the map layer when it should return 201's.
# 2015-12-17 For Most Limiting, I'm getting some questionable results. For example 'Somewhat limited'
# may get changed to 'Not rated'
# Looking at option to map fuzzy rating for all interps. This would require redirection to the
# Aggregate2_NCCPI amd CreateNumericLayer functions. Have this working, but needs more testing.
#
# 2015-12-23 Need to look more closely at my Tiebreak implementation for Interps. 'Dwellings with
# Basements (DCD, Higher) appears to be switched. Look at Delaware 'PsA' mapunit with Pepperbox-Rosedale components
# at 45% each.
#
# 2016-03-23 Fixed bad bug, skipping last mapunit in NCCPI and one other function
#
# 2016-04-19 bZero parameter. Need to look at inclusion/exclusion of NULL rating values for Text or Choice.
# WSS seems to include NULL values for ratings such as Hydrologic Group and Flooding
#
# Interpretation columns
# interphr is the High fuzzy value, interphrc is the High rating class
# interplr is the Low fuzzy value, interplrc is the Low rating class
# Very Limited = 1.0; Somewhat limited = 0.22
#
# NCCPI maps fuzzy values by default. It appears that 1.0 would be high productivity and
# 0.01 very low productivity. Null would be Not rated.
#
# 2017-03-03 AggregateHZ_DCP_WTA - Bug fix. Was only returning surface rating for DCP. Need to let folks know about this.
#
# 2017-07-24 Depth to Water Table, DCP bug involving nullreplacementvalue and tiebreak code.
#
# 2017-08-11 Mapping interpretations using Cointerp very slow on CONUS gSSURGO
#
# 2017-08-14 Altered Unique values legend code to skip the map symbology section for very large layers
#
# 2018-06-30 Addressed issue with some Raster maps-classified had color ramp set backwards. Added new logic and layer files.
#
# 2019-09-23 Testing new ORG_TYPE per Kyle Stephens. Made some significant changes to aggregation and sql that needs to be tested,
# especially for the horizon aggregation.
# Background information needed to create raster symbology through .dbf or .clr file
#
# A .clr file can be used where the integer cell value is known for each attribute and each has its own rgb values.
# Certain raster formats support color maps: BIL, BMP, IMG, FGDBRaster, TIF
# There are two tools for colormaps: AddColorMap_management, DeleteColorMap_manageement
# AddColormap_management (in_raster, {in_template_raster}, {input_CLR_file})
# 1 255 255 0
# 2 64 0 128
# 3 255 32 32
# 4 0 255 0
# 5 0 0 255
# 2022-12-15 (by AKS) Issue with floolding frequency in SDVattribute table with
# 2023 SSURGO, for attributekey 12 (Flooding Frequency Class)
# the tiebreakerdomainname is incorrectly set to Null
# and needs to be set to flooding_frequency_class. Added floodBandaid function
# to correctly populate these records in the sdvattribute table.
# Also, parentheses in SQL queries in the sqlwhereclause field of sdvattribute table
# is causing errors downstream. floadBandaid changes three queires to work without
# parenthese.
# Also in the GetSDVatts function there was an error with concatenating the sql
# query from the sqlwhereclause. It assumed only one '=' would be present. Reconfigured
# to format more complex sql queries.
# Commented out stray reference to variable dCase in AggregateCo_Mo_DCD_Domain
# function
# 2023-01-05 There are 14 sdvattributes that come from the component level that
# are flagged as both Map unit and Component level attributes which routes them
# via the Aggregate1 function. This resulted in
# these attributes not being aggregated, but rather a many to one result. Within
# CreateSoilMap function I swapped an "or" for "and" such that attributes that
# are flagged as both component and map unit are aggregated by the approprieate
# component level aggregation function.
# For dominant condition property attributes changed condition
# sdvAtt.startswith("Surface") or sdvAtt.endswith("(Surface)") -> "SURFACE" in sdvAtt
# it was erroring out Soil Health - Surface Texture
## ===================================================================================
class MyError(Exception):
pass
## ===================================================================================
def errorMsg():
try:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
theMsg = tbinfo + " \n" + str(sys.exc_type)+ ": " + str(sys.exc_value) + " \n"
PrintMsg(theMsg, 2)
except:
PrintMsg("Unhandled error in attFld method", 2)
pass
## ===================================================================================
def PrintMsg(msg, severity=0):
# Adds tool message to the geoprocessor
#
#Split the message on \n first, so that if it's multiple lines, a GPMessage will be added for each line
try:
for string in msg.split('\n'):
#Add a geoprocessing message (in case this is run as a tool)
if severity == 0:
arcpy.AddMessage(string)
elif severity == 1:
arcpy.AddWarning(string)
elif severity == 2:
arcpy.AddError(" \n" + string)
except:
pass
## ===================================================================================
def floodBandaid(sdvattTable):
''' This function is a bandaid to fix conflicts within the sdvattribute table'''
sdvView = 'sdvView'
# Verify that there are Null tiebreakdomainname for type 12
q = "attributekey = 12 AND tiebreakdomainname IS NULL"
arcpy.management.MakeTableView(sdvattTable, sdvView)
arcpy.management.SelectLayerByAttribute(sdvView, "NEW_SELECTION", q)
# If there are Null entries, populate them with flooding_frequency_class
nullCount = arcpy.management.GetCount(sdvView)
if int(nullCount[0]):
arcpy.CalculateField_management(sdvView,
"tiebreakdomainname",
"'flooding_frequency_class'",
"PYTHON_9.3")
arcpy.AddMessage('sdvattribute Tie Breaker Domain Name was not poppulated for flooding frequency, now amended.')
# For some reason queries with parens are causing errors.
q = "sqlwhereclause = '(coecoclass.ecoclasstypename = ''NRCS Rangeland Site'' or coecoclass.ecoclasstypename = ''NRCS Forestland Site'')'"
arcpy.management.SelectLayerByAttribute(sdvView, "NEW_SELECTION", q)
nullCount = arcpy.management.GetCount(sdvView)
if int(nullCount[0]):
arcpy.CalculateField_management(sdvView,
"sqlwhereclause",
'''"coecoclass.ecoclasstypename = 'NRCS Rangeland Site' or coecoclass.ecoclasstypename = 'NRCS Forestland Site'"''',
"PYTHON_9.3")
arcpy.AddMessage('Amended SQL queries with parentheses in sdvattribute EcoStieNm and EcoSiteID')
q = "sqlwhereclause = 'corestrictions.reskind IN (''Densic bedrock'', ''Paralithic bedrock'', ''Lithic bedrock'')'"
arcpy.management.SelectLayerByAttribute(sdvView, "NEW_SELECTION", q)
nullCount = arcpy.management.GetCount(sdvView)
if int(nullCount[0]):
arcpy.CalculateField_management(sdvView,
"sqlwhereclause",
'''"corestrictions.reskind ='Densic bedrock' OR corestrictions.reskind ='Paralithic bedrock' OR corestrictions.reskind ='Lithic bedrock'"''',
"PYTHON_9.3")
arcpy.AddMessage('Amended SQL queries with parentheses in sdvattribute Dep2BedRS')
## ===================================================================================
def Number_Format(num, places=0, bCommas=True):
try:
# Format a number according to locality and given places
locale.setlocale(locale.LC_ALL, "")
if bCommas:
theNumber = locale.format("%.*f", (places, num), True)
else:
theNumber = locale.format("%.*f", (places, num), False)
return theNumber
except:
errorMsg()
return "???"
## ===================================================================================
def elapsedTime(start):
# Calculate amount of time since "start" and return time string
try:
# Stop timer
#
end = time.time()
# Calculate total elapsed seconds
eTotal = end - start
# day = 86400 seconds
# hour = 3600 seconds
# minute = 60 seconds
eMsg = ""
# calculate elapsed days
eDay1 = eTotal / 86400
eDay2 = math.modf(eDay1)
eDay = int(eDay2[1])
eDayR = eDay2[0]
if eDay > 1:
eMsg = eMsg + str(eDay) + " days "
elif eDay == 1:
eMsg = eMsg + str(eDay) + " day "
# Calculated elapsed hours
eHour1 = eDayR * 24
eHour2 = math.modf(eHour1)
eHour = int(eHour2[1])
eHourR = eHour2[0]
if eDay > 0 or eHour > 0:
if eHour > 1:
eMsg = eMsg + str(eHour) + " hours "
else:
eMsg = eMsg + str(eHour) + " hour "
# Calculate elapsed minutes
eMinute1 = eHourR * 60
eMinute2 = math.modf(eMinute1)
eMinute = int(eMinute2[1])
eMinuteR = eMinute2[0]
if eDay > 0 or eHour > 0 or eMinute > 0:
if eMinute > 1:
eMsg = eMsg + str(eMinute) + " minutes "
else:
eMsg = eMsg + str(eMinute) + " minute "
# Calculate elapsed secons
eSeconds = "%.1f" % (eMinuteR * 60)
if eSeconds == "1.00":
eMsg = eMsg + eSeconds + " second "
else:
eMsg = eMsg + eSeconds + " seconds "
return eMsg
except:
errorMsg()
return ""
## ===================================================================================
def get_random_color(pastel_factor=0.5):
# Part of generate_random_color
try:
newColor = [int(255 *(x + pastel_factor)/(1.0 + pastel_factor)) for x in [random.uniform(0,1.0) for i in [1,2,3]]]
return newColor
except:
errorMsg()
return [0,0,0]
## ===================================================================================
def color_distance(c1,c2):
# Part of generate_random_color
return sum([abs(x[0] - x[1]) for x in zip(c1,c2)])
## ===================================================================================
def generate_new_color(existing_colors, pastel_factor=0.5):
# Part of generate_random_color
try:
#PrintMsg(" \nExisting colors: " + str(existing_colors) + "; PF: " + str(pastel_factor), 1)
max_distance = None
best_color = None
for i in range(0,100):
color = get_random_color(pastel_factor)
if not color in existing_colors:
color.append(255) # add transparency level
return color
best_distance = min([color_distance(color,c) for c in existing_colors])
if not max_distance or best_distance > max_distance:
max_distance = best_distance
best_color = color
best_color.append(255)
return best_color
except:
errorMsg()
return None
## ===================================================================================
def rand_rgb_colors(num):
# Generate a random list of rgb values
# 2nd argument in generate_new_colors is the pastel factor. 0 to 1. Higher value -> more pastel.
try:
colors = []
# PrintMsg(" \nGenerating " + str(num - 1) + " new colors", 1)
for i in range(0, num):
newColor = generate_new_color(colors, 0.1)
colors.append(newColor)
# PrintMsg(" \nColors: " + str(colors), 1)
return colors
except:
errorMsg()
return []
## ===================================================================================
def polylinear_gradient(colors, n):
''' returns a list of colors forming linear gradients between
all sequential pairs of colors. "n" specifies the total
number of desired output colors '''
# The number of colors per individual linear gradient
n_out = int(float(n) / (len(colors) - 1))
# returns dictionary defined by color_dict()
gradient_dict = linear_gradient(colors[0], colors[1], n_out)
if len(colors) > 1:
for col in range(1, len(colors) - 1):
next = linear_gradient(colors[col], colors[col+1], n_out)
for k in ("hex", "r", "g", "b"):
# Exclude first point to avoid duplicates
gradient_dict[k] += next[k][1:]
return gradient_dict
## ===================================================================================
def fact(n):
''' Memoized factorial function '''
try:
return fact_cache[n]
except(KeyError):
if n == 1 or n == 0:
result = 1
else:
result = n*fact(n-1)
fact_cache[n] = result
return result
## ===================================================================================
def bernstein(t,n,i):
''' Bernstein coefficient '''
binom = fact(n) / float(fact(i) * fact(n - i))
return binom *( (1-t)**(n-i)) * (t**i)
## ===================================================================================
def bezier_gradient(colors, n_out=100):
''' Returns a "bezier gradient" dictionary
using a given list of colors as control
points. Dictionary also contains control
colors/points. '''
# RGB vectors for each color, use as control points
RGB_list = [hex_to_RGB(color) for color in colors]
n = len(RGB_list) - 1
def bezier_interp(t):
''' Define an interpolation function
for this specific curve'''
# List of all summands
summands = [
map(lambda x: int(bernstein(t,n,i)*x), c)
for i, c in enumerate(RGB_list)
]
# Output color
out = [ 0, 0, 0]
# Add components of each summand together
for vector in summands:
for c in range(3):
out[c] += vector[c]
return out
gradient = [
bezier_interp(float(t)/(n_out - 1))
for t in range(n_out)
]
# Return all points requested for gradient
return {
"gradient": color_dict(gradient),
"control": color_dict(RGB_list)
}
## ===================================================================================
def BadTable(tbl):
# Make sure the table has data
#
# If has contains one or more records, return False (not a bad table)
# If the table is empty, return True (bad table)
try:
if not arcpy.Exists(tbl):
return True
recCnt = int(arcpy.GetCount_management(tbl).getOutput(0))
if recCnt > 0:
return False
else:
return True
except:
errorMsg()
return True
## ===================================================================================
def SortData(muVals, a, b, sortA, sortB):
# Input muVals is a list of lists to be sorted. Each list must have contain at least two items.
# Input 'a' is the first item index in the sort order (integer)
# Item 'b' is the second item index in the sort order (integer)
# Item sortA is a bookean for reverse sort
# Item sortB is a boolean for reverse sort
# Perform a 2-level sort by then by item i, then by item j.
# Return a single list
try:
#PrintMsg(" \nmuVals: " + str(muVals), 1)
if len(muVals) > 0:
muVal = sorted(sorted(muVals, key = lambda x : x[b], reverse=sortB), key = lambda x : x[a], reverse=sortA)[0]
else:
muVal = muVals[0]
#PrintMsg(str(muVal) + " <- " + str(muVals), 1)
return muVal
except:
errorMsg()
return (None, None)
## ===================================================================================
def SortData0(muVals):
# Sort by then by item 1, then by item 0 a list of tuples containing comppct_r and rating value or index and return a single tuple
try:
#PrintMsg(" \nmuVals: " + str(muVals), 1)
if len(muVals) > 0:
if tieBreaker == dSDV["tiebreakhighlabel"]:
# return higher value
muVal = sorted(sorted(muVals, key = lambda x : x[1], reverse=True), key = lambda x : x[0], reverse=True)[0]
elif tieBreaker == dSDV["tiebreaklowlabel"]:
muVal = sorted(sorted(muVals, key = lambda x : x[1], reverse=False), key = lambda x : x[0], reverse=True)[0]
else:
muVal = (None, None)
else:
muVal = [None, None]
#PrintMsg("\tReturning " + str(muVal) + " from: " + str(muVals), 1)
return muVal
except:
errorMsg()
return (None, None)
## ===================================================================================
def ColorRamp(dLabels, lowerColor, upperColor):
# For Progressive color ramps, there are no colors defined for each legend item.
# Create a dictionary of colors based upon the upper and lower colors.
# Key value is 'part' which is the number of colors used to define the color ramp.
#
# count is always equal to three and part is always zero-based
#
# upper and lower Color are dictionaries (keys: 0, 1, 2) with RGB tuples as values
# Will only handle base RGB color
# dColors = ColorRamp(dLegend["count"], len(dLabels), dLegend["LowerColor"], dLegend["UpperColor"])
try:
import BezierColorRamp
labelCnt = len(dLabels)
#PrintMsg(" \nCreating color ramp based upon " + str(labelCnt) + " legend items", 1)
#PrintMsg("dLabels: " + str(dLabels), 1)
dColorID = dict()
dRGB = dict()
# Use dColorID to identify the Lower and Upper Colors
dColorID[(255, 0, 0)] = "Red"
dColorID[(255, 255, 0)] = "Yellow"
dColorID[(0, 255, 0)] ="Green" # not being used in slope color ramp
dColorID[(0, 255, 255)] = "Cyan"
dColorID[(0, 0, 255)] = "Blue"
dColorID[(255, 0, 255)] = "Magenta" # not being used in slope color ramp
dRGB["red"] = (255, 0, 0)
dRGB["yellow"] = (255, 255, 0)
dRGB["green"] = (0, 255, 0)
dRGB["cyan"] = (0, 255, 255)
dRGB["blue"] = (0, 0, 255)
dRGB["magenta"] = (255, 0, 255)
#PrintMsg(" \nLowerColor: " + str(lowerColor), 1)
#PrintMsg("UpperColor: " + str(upperColor) + " \n ", 1)
dBaseColors = dict() # basic RGB color ramp as defined by lower and upper colors
colorList = list()
dColors = dict()
j = -1
lastclr = (-1, -1, -1)
for i in range(len(lowerColor)):
clr = lowerColor[i]
if clr != lastclr:
j += 1
dBaseColors[j] = clr
#PrintMsg("\t" + str(j) + ". " + dColorID[clr], 1)
colorList.append(dColorID[clr])
lastclr = clr
clr = upperColor[i]
if clr != lastclr:
j += 1
dBaseColors[j] = clr
#PrintMsg("\t" + str(j) + ". " + dColorID[clr], 1)
colorList.append(dColorID[clr])
lastclr = clr
#PrintMsg(" \nBezierColorRamp inputs: labelCnt (" + str(labelCnt) + ") , " + str(colorList), 1)
newColors = BezierColorRamp.Process(labelCnt, colorList)
for i in range(len(newColors)):
dColors[i + 1] = {"red" : newColors[i][0], "green": newColors[i][1], "blue" : newColors[i][2]}
#PrintMsg(" \ndColors: " + str(dColors), 1)
return dColors
except:
errorMsg()
return {}
## ===================================================================================
def GetMapLegend(dAtts, bFuzzy):
# Get map legend values and order from maplegendxml column in sdvattribute table
# Return dLegend dictionary containing contents of XML.
# Problem with Farmland Classification. It is defined as a choice, but
try:
#bVerbose = True # This function seems to work well, but prints a lot of messages.
global dLegend
dLegend = dict()
dLabels = dict()
#if bFuzzy and not dAtts["attributename"].startswith("National Commodity Crop Productivity Index"):
# # Skip map legend because the fuzzy values will not match the XML legend.
# return dict()
arcpy.SetProgressorLabel("Getting map legend information")
if bVerbose:
PrintMsg(" \nCurrent function : " + sys._getframe().f_code.co_name, 1)
xmlString = dAtts["maplegendxml"]
#if bVerbose:
# PrintMsg(" \nxmlString: " + xmlString + " \n ", 1)
# Convert XML to tree format
tree = ET.fromstring(xmlString)
# Iterate through XML tree, finding required elements...
i = 0
dColors = dict()
legendList = list()
legendKey = ""
legendType = ""
legendName = ""
# Notes: dictionary items will vary according to legend type
# Looks like order should be dictionary key for at least the labels section
#
for rec in tree.iter():
if rec.tag == "Map_Legend":
dLegend["maplegendkey"] = rec.attrib["maplegendkey"]
if rec.tag == "ColorRampType":
dLegend["type"] = rec.attrib["type"]
dLegend["name"] = rec.attrib["name"]
if rec.attrib["name"] == "Progressive":
dLegend["count"] = int(rec.attrib["count"])
if "name" in dLegend and dLegend["name"] == "Progressive":
if rec.tag == "LowerColor":
# 'part' is zero-based and related to count
part = int(rec.attrib["part"])
red = int(rec.attrib["red"])
green = int(rec.attrib["green"])
blue = int(rec.attrib["blue"])
#PrintMsg("Lower Color part #" + str(part) + ": " + str(red) + ", " + str(green) + ", " + str(blue), 1)
if rec.tag in dLegend:
dLegend[rec.tag][part] = (red, green, blue)
else:
dLegend[rec.tag] = dict()
dLegend[rec.tag][part] = (red, green, blue)
if rec.tag == "UpperColor":
part = int(rec.attrib["part"])
red = int(rec.attrib["red"])
green = int(rec.attrib["green"])
blue = int(rec.attrib["blue"])
#PrintMsg("Upper Color part #" + str(part) + ": " + str(red) + ", " + str(green) + ", " + str(blue), 1)
if rec.tag in dLegend:
dLegend[rec.tag][part] = (red, green, blue)
else:
dLegend[rec.tag] = dict()
dLegend[rec.tag][part] = (red, green, blue)
if rec.tag == "Labels":
order = int(rec.attrib["order"])
if dSDV["attributelogicaldatatype"].lower() == "integer":
# get dictionary values and convert values to integer
try:
val = int(rec.attrib["value"])
label = rec.attrib["label"]
rec.attrib["value"] = val
dLabels[order] = rec.attrib
except:
upperVal = int(float(rec.attrib["upper_value"]))
lowerVal = int(float(rec.attrib["lower_value"]))
rec.attrib["upper_value"] = upperVal
rec.attrib["lower_value"] = lowerVal
dLabels[order] = rec.attrib
elif dSDV["attributelogicaldatatype"].lower() == "float" and not bFuzzy:
# get dictionary values and convert values to float
try:
val = float(rec.attrib["value"])
label = rec.attrib["label"]
rec.attrib["value"] = val
dLabels[order] = rec.attrib
except:
upperVal = float(rec.attrib["upper_value"])
lowerVal = float(rec.attrib["lower_value"])
rec.attrib["upper_value"] = upperVal
rec.attrib["lower_value"] = lowerVal
dLabels[order] = rec.attrib
else:
dLabels[order] = rec.attrib # for each label, save dictionary of values
if rec.tag == "Color":
# Save RGB Colors for each legend item
# get dictionary values and convert values to integer
red = int(rec.attrib["red"])
green = int(rec.attrib["green"])
blue = int(rec.attrib["blue"])
dColors[order] = rec.attrib
if rec.tag == "Legend_Elements":
try:
dLegend["classes"] = rec.attrib["classes"] # save number of classes (also is a dSDV value)
except:
pass
# Add the labels dictionary to the legend dictionary
dLegend["labels"] = dLabels
dLegend["colors"] = dColors
# Test iteration methods on dLegend
#PrintMsg(" \n" + dAtts["attributename"] + " Legend Key: " + dLegend["maplegendkey"] + ", Type: " + dLegend["type"] + ", Name: " + dLegend["name"] , 1)
if bVerbose:
PrintMsg(" \n" + dAtts["attributename"] + "; MapLegendKey: " + dLegend["maplegendkey"] + ",; Type: " + dLegend["type"] , 1)
for order, vals in dLabels.items():
PrintMsg("\tNew " + str(order) + ": ", 1)
for key, val in vals.items():
PrintMsg("\t\t" + key + ": " + str(val), 1)
try:
r = int(dColors[order]["red"])
g = int(dColors[order]["green"])
b = int(dColors[order]["blue"])
rgb = (r,g,b)
#PrintMsg("\t\tRGB: " + str(rgb), 1)
except:
pass
if bVerbose:
PrintMsg(" \ndLegend: " + str(dLegend), 1)
return dLegend
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
return dict()
except:
errorMsg()
return dict()
## ===================================================================================
def CreateStringLayer(sdvLyrFile, dLegend, outputValues):
# OLD METHOD NOT BEING USED
#
# # UNIQUE_VALUES
# Create dummy shapefile that can be used to set up
# UNIQUE_VALUES symbology for the final map layer. Since
# there is no join, I am hoping that the dummy layer symbology
# can be setup correctly and then transferred to the final
# output layer that has the table join.
#
# Need to expand this to able to use defined class breaks and remove unused
# breaks, labels.
# SDVATTRIBUTE Table notes:
#
# dSDV["maplegendkey"] tells us which symbology type to use
# dSDV["maplegendclasses"] tells us if there are a fixed number of classes (5)
# dSDV["maplegendxml"] gives us detailed information about the legend such as class values, legend text
#
# *maplegendkey 1: fixed numeric class ranges with zero floor. Used only for Hydric Percent Present.
#
# maplegendkey 2: defined list of ordered values and legend text. Used for Corrosion of Steel, Farmland Class, TFactor.
#
# *maplegendkey 3: classified numeric values. Physical and chemical properties.
#
# maplegendkey 4: unique string values. Unknown values such as mapunit name.
#
# maplegendkey 5: defined list of string values. Used for Interp ratings.
#
# *maplegendkey 6: defined class breaks for a fixed number of classes and a color ramp. Used for pH, Slope, Depth to.., etc
#
# *maplegendkey 7: fixed list of index values and legend text. Used for Irrigated Capability Class, WEI, KFactor.
#
# maplegendkey 8: random unique values with domain values and legend text. Used for HSG, Irrigated Capability Subclass, AASHTO.
#
try:
#arcpy.SetProgressorLabel("Setting up map layer for string data")
if bVerbose:
PrintMsg(" \nCurrent function : " + sys._getframe().f_code.co_name, 1)
# The output feature class to be created
dummyFC = os.path.join(env.scratchGDB, "sdvsymbology")
# Create the output feature class with rating field
#
if arcpy.Exists(dummyFC):
arcpy.Delete_management(dummyFC)
arcpy.CreateFeatureclass_management(os.path.dirname(dummyFC), os.path.basename(dummyFC), "POLYGON")
# Create copy of output field and add to shapefile
# AddField_management (in_table, field_name, field_type, {field_precision}, {field_scale}, {field_length}, {field_alias}, {field_is_nullable}, {field_is_required}, {field_domain})
#fName = dSDV["resultcolumnname"]
outFields = arcpy.Describe(outputTbl).fields
for fld in outFields:
if fld.name.upper() == dSDV["resultcolumnname"].upper():
fType = fld.type.upper()
fLen = fld.length
break
arcpy.AddField_management(dummyFC, dSDV["resultcolumnname"].upper(), fType, "", "", fLen, "", "NULLABLE")
# Open an insert cursor for the new feature class
#
x1 = 0
y1 = 0
x2 = 1
y2 = 1
if not None in outputValues:
outputValues.append(None)
with arcpy.da.InsertCursor(dummyFC, ["SHAPE@", dSDV["resultcolumnname"]]) as cur:
for val in outputValues:
array = arcpy.Array()
coords = [[x1, y1], [x1, y2], [x2, y2], [x2, y1]]
for coord in coords:
pnt = arcpy.Point(coord[0], coord[1])
array.add(pnt)
array.add(array.getObject(0))
polygon = arcpy.Polygon(array)
rec = [polygon, val]
cur.insertRow(rec)
x1 += 1
x2 += 1
#
# Setup symbology
# Identify temporary layer filename and path
layerFileCopy = os.path.join(env.scratchFolder, os.path.basename(sdvLyrFile))
# Try creating a featurelayer from dummyFC
dummyLayer = "DummyLayer"
arcpy.MakeFeatureLayer_management(dummyFC, dummyLayer)
dummyDesc = arcpy.Describe(dummyLayer)
#arcpy.SaveToLayerFile_management("DummyLayer", layerFileCopy, "ABSOLUTE", "10.1")
#arcpy.Delete_management("DummyLayer")
#tmpSDVLayer = arcpy.mapping.Layer(layerFileCopy)
tmpSDVLayer = arcpy.mapping.Layer(dummyLayer)
tmpSDVLayer.visible = False
if bVerbose:
PrintMsg(" \nUpdating tmpSDVLayer symbology using " + sdvLyrFile, 1)
arcpy.mapping.UpdateLayer(df, tmpSDVLayer, arcpy.mapping.Layer(sdvLyrFile), True)
if tmpSDVLayer.symbologyType.lower() == "other":
# Failed to properly update symbology on the dummy layer for a second time
raise MyError, "Failed to properly update the datasource using " + dummyFC
# At this point, the layer is based upon the dummy featureclass
tmpSDVLayer.symbology.valueField = dSDV["resultcolumnname"]
return tmpSDVLayer
except MyError, e:
PrintMsg(str(e), 2)
return None
except:
errorMsg()
return None
## ===================================================================================
def CreateNumericLayer(sdvLyrFile, dLegend, outputValues, classBV, classBL):
#
# POLYGON layer
#
# Create dummy polygon featureclass that can be used to set up
# GRADUATED_COLORS symbology for the final map layer.
#
# Need to expand this to able to use defined class breaks and remove unused
# breaks, labels.
#
# I saw a confusing error message related to env.scratchGDB that was corrupted.
# The error message was ERROR 000354: The name contains invalid characters.
# SDVATTRIBUTE Table notes:
#
# dSDV["maplegendkey"] tells us which symbology type to use
# dSDV["maplegendclasses"] tells us if there are a fixed number of classes (5)
# dSDV["maplegendxml"] gives us detailed information about the legend such as class values, legend text
#
# *maplegendkey 1: fixed numeric class ranges with zero floor. Used only for Hydric Percent Present.
#
# maplegendkey 2: defined list of ordered values and legend text. Used for Corrosion of Steel, Farmland Class, TFactor.
#
# *maplegendkey 3: classified numeric values. Physical and chemical properties.
#
# maplegendkey 4: unique string values. Unknown values such as mapunit name.
#
# maplegendkey 5: defined list of string values. Used for Interp ratings.
#
# *maplegendkey 6: defined class breaks for a fixed number of classes and a color ramp. Used for pH, Slope, Depth to.., etc
#
# *maplegendkey 7: fixed list of index values and legend text. Used for Irrigated Capability Class, WEI, KFactor.
#
# maplegendkey 8: random unique values with domain values and legend text. Used for HSG, Irrigated Capability Subclass, AASHTO.
try:
#arcpy.SetProgressorLabel("Setting up layer for numeric data")
if bVerbose:
PrintMsg(" \nCurrent function : " + sys._getframe().f_code.co_name, 1)
PrintMsg(" \nTop of CreateNumericLayer \n classBV: " + str(classBV) + " \nclassBL: " + str(classBL), 1)
# For pH, the classBV and classBL are already set properly at this point. Need to skip any changes to
# these two variables.
# The temporary output feature class to be created for symbology use.
dummyName = "sdvsymbology"
dummyFC = os.path.join(scratchGDB, dummyName)
# Create the output feature class
#
if arcpy.Exists(dummyFC):
arcpy.Delete_management(dummyFC)
time.sleep(1)
arcpy.CreateFeatureclass_management(os.path.dirname(dummyFC), os.path.basename(dummyFC), "POLYGON")
# Create copy of output field and add to shapefile
outFields = arcpy.Describe(outputTbl).fields
for fld in outFields:
fName = fld.name
fType = fld.type.upper()
fLen = fld.length
arcpy.AddField_management(dummyFC, dSDV["resultcolumnname"].upper(), fType, "", "", fLen)
# Handle numeric ratings
if dSDV["maplegendkey"] in [1, 2, 3, 6]:
#
#try:
# Problem with bFuzzy for other interps besides NCCPI
# dLabels = dLegend["labels"] # now a global
#except:
# dLabels = dict()
if len(outputValues) == 2:
# Use this one if outputValues are integer with a unique values renderer
#
if outputValues[0] is None:
outputValues[0] = 0
if outputValues[1] is None:
outputValues[1] = 0
if dSDV["effectivelogicaldatatype"].lower() == "float":
minVal = max(float(outputValues[0]), 0)
maxVal = max(float(outputValues[1]), 0)
elif dSDV["effectivelogicaldatatype"].lower() == "integer":
minVal = max(outputValues[0], 0)
maxVal = max(int(outputValues[1]), 0)
else:
# Use this for an unknown range of numeric values including Nulls
minVal = max(min(outputValues), 0)
maxVal = max(max(outputValues), 0)
else:
# More than a single pair of values