-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy path4. Deforestation attribution (Python)-Statistical.py
2503 lines (1954 loc) · 175 KB
/
4. Deforestation attribution (Python)-Statistical.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
# coding: utf-8
# Unquote one comment in:
# Bullet 7 (if Production.size == 0:)
# Import packages
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from tqdm import tqdm
import datetime
import warnings
warnings.filterwarnings("ignore")
####################
# 1. Intial run values
####################
# For tracking changes in data generated through script and GEE attribution dataset
Dataset_version = 1
GEE_data_version = 0
Python_script_version = 1
Year = '(2001-2022)'
end_year = 2022 # Year upto which the simulation runs
end_year_FAO_Landuse = 2021; # Last avaiable year of FAO landuse dataset (will help with gap-filling)
#end_year_FAO_Production = 2021 # Last avaiable year of FAO Production dataset (will help with gap-filling) - to be ajusted in section 7
#end_year_IBGE_Production = 2022 # For IBGE - to be ajusted in section 7
lag = 3 # Considered as the lag between detected forest loss and establised agriculture stats in FAO
amortized_period = 5 # Time over which the final deforestation and carbon footprint are averaged
# Details about classification codes in Supplementary Information
redundant_agriculture_classes = 5182 # Classes name is 'Unknown Tree Crop'
redundant_plantation_class = 5181 # Class name is 'Unknown Forest Plantation'
# Classes that need to attributed to other classes using statistical attribution
Hansen_Unattributed = [1] # Not attributed to any land use
MapBiomas_Farming = [3050] # Can be attributed to agriculture/cropland, pasture and forest plantation
MapBiomas_Mosiac = [3100] # Can be attributed to agriculture/cropland and pasture
Dominant_Agriculture = [3000] # Can be attributed to agriculture/cropland and pasture; treated differently than the class below
Dominant_forestry = [500] # For attributing values to only plantation
Broad_LU_Undefined = [9000] # All unattributed values after during the land-balance go here
MapBiomas_FarmingMosiac_cropland = [3150]
MapBiomas_FarmingMosiac_pasture = [4000]
MapBiomas_FarmingMosiac_plantation = [5000]
Dominant_cropland = [3151]
Dominant_pasture = [4001]
Dominant_plantation = [6501]
Hansen_cropland = [3152]
Hansen_pasture = [4002]
Hansen_plantation = [6502]
GlobalPlantation_treecrop = [3801]
necessary_classes = [500,3150,3151,3152,3801,4000,4001,4002,5000,6501,6502,9000] # Classes to which all landuse is attributed to Forestry, Pasture, Plantations and Discarded/Undefined
Broad_cropland = [3150, 3151, 3152, 3200, 3201, 3800, 3801]
Broad_cropland_limited = [3152, 3151, 3201, 3150, 3200, 3800, 3801]
Broad_pasture = [4000, 4001, 4002]
Broad_plantation = [5000, 6501, 6502]
# Potential ranges for cropland, pasture and forest plantations in our reclassification (Plantation species (mostly from Du et al. 2021) that overlap with FAO commodities will ultimately be counted in cropland)
Broad_LU_Agriculture_start, Broad_LU_Agriculture_end = 3150,3999
Broad_LU_Pasture_start, Broad_LU_Pasture_end = 4000,4999
Broad_LU_Plantation_start, Broad_LU_Plantation_end = 5000,6999
# Countries that will not follow (i.e., grouping) national scale attribution
Country_with_subnational_attribution = ['Brazil']
# In[61]:
####################
# 2. Update and load all necessary datasets
####################
home_folder = '/home/chandra/backup/Chalmers/'
Simulation_version = 'DeDuCE v'+str(Dataset_version)+'.'+str(GEE_data_version)+'.'+str(Python_script_version)+' ' + Year
GEE_datasets_version = home_folder+Simulation_version+'/DeDuCE_dataset_GEE' # Directory of the latest GEE run csv files
FAO_Landuse_version = home_folder+'FAOSTATS/Inputs_LandUse_E_All_Data_(Normalized).csv' # Download the latest FAO Landuse dataset (https://www.fao.org/faostat/en/#home) - directly from bulk dataset
FAO_Production_version = home_folder+'FAOSTATS/Production_Crops_Livestock_E_All_Data_(Normalized).csv' # Download the latest FAO Production dataset (https://www.fao.org/faostat/en/#home)
Global_FRA_version = home_folder+'FAOSTATS/fra-forestCharacteristics.csv' # Download the latest FAO Production dataset (https://www.fao.org/faostat/en/#home)
Crop_and_grass_loss_Brazil = home_folder+'Other_datasets/Li_et_al_2017_Brazil_Gross_cropland_grassland_changes_2001-2022.xlsx'# Updated from Li et al 2018 (https://essd.copernicus.org/articles/10/219/2018/) - values for 2021 are same as 2020;
Crop_and_grass_loss = home_folder+'Other_datasets/Li_et_al_2017_Gross_cropland_grassland_changes_2001-2022.xlsx' # Updated from Li et al 2018 (https://essd.copernicus.org/articles/10/219/2018/) - values for 2021 are same as 2020;
# Check the latest data at https://maps.elie.ucl.ac.be/CCI/viewer/download.php
# Update from the following link (last updated till year 2021): https://sidra.ibge.gov.br/pesquisa/pam/tabelas
IBGE_dataset_version = home_folder+'IBGE dataset'
# Contains code for commodities, primarily from MapBiomas and Du et al. plantation
Commodity_lookup_codes = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-Commodity Code')
# Contains FAO names for GADM countries
Country_lookup_codes = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-Country (GADM vs FAO)')
# Contains Ecoregion group for GADM countries
Country_lookup_codes_ecoregion = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-Country (GADM)')
# Contains group for all FAO vcommodities
Commodity_group = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-FAO commodity')
# Contains Soil Organic carbon loss data
SOC_loss = (pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-SOC Loss').iloc[:8, :5])
# Sequestration potential of replacing land use
Commodity_replacement_potential = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-Carbon seq. potential')
# Pealand emission factor
Peatland_emission_factors = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-Peatland emission factor')
# Contains matching FAO names for IBGE commodities
IBGE_crops = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-IBGE crops')
# Contains subnational GADM code for Brazil. First row contains miscellaneous table info and are therefore skipped.
IBGE_municipality = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Lookup-Brazil (GADM vs IBGE)', skiprows=[0])
# Contains Score and Overall accuracy for GADM countries
Quality_assessment_lookup_codes = pd.read_excel(home_folder+Simulation_version+'/Supplementary_data-DeDuCE v1.xlsx', 'Scoring')
####################
# 3.1 Extracting and pre-processing GEE and FAO files
####################
## Function to extract GEE files
def find_forest_loss_files(Countries, directory_path):
# Searches for forest loss files for the specified countries in the directory and merges the data across rows
classification_files = []
AGB_files = []
BGB_files = []
Topsoil_SOC_files = []
Subsoil_SOC_files = []
Peatland_files = []
for Country in Countries:
# Pattern of files that need to be extracted
# Searches for 'Forest_loss_to_CLASSIFICATION_X_*.csv', 'Forest_loss_to_AGB_EMISSIONS_[COUNTRY]_*.csv' and 'Forest_loss_to_BGB_EMISSIONS_[COUNTRY]_*.csv'
# files in the specified directory and returns their file paths when [Country] is defined in the start.
classification_file_pattern = 'Forest_loss_to_CLASSIFICATION_' + str(Country) + '_'
AGB_file_pattern = 'Forest_loss_to_AGB_EMISSION_' + str(Country) + '_'
BGB_file_pattern = 'Forest_loss_to_BGB_EMISSION_' + str(Country) + '_'
Topsoil_SOC_file_pattern = 'Forest_loss_to_SOC_0_30_' + str(Country) + '_'
Subsoil_SOC_file_pattern = 'Forest_loss_to_SOC_30_100_' + str(Country) + '_'
Peatland_file_pattern = 'Forest_loss_to_PEATLAND_' + str(Country) + '_'
# Get a list of all the matching file paths
FLC_file_paths = [os.path.join(directory_path, f) for f in os.listdir(directory_path) if classification_file_pattern in f]
AGB_file_paths = [os.path.join(directory_path, f) for f in os.listdir(directory_path) if AGB_file_pattern in f]
BGB_file_paths = [os.path.join(directory_path, f) for f in os.listdir(directory_path) if BGB_file_pattern in f]
Topsoil_SOC_file_paths = [os.path.join(directory_path, f) for f in os.listdir(directory_path) if Topsoil_SOC_file_pattern in f]
Subsoil_SOC_file_paths = [os.path.join(directory_path, f) for f in os.listdir(directory_path) if Subsoil_SOC_file_pattern in f]
Peatland_file_paths = [os.path.join(directory_path, f) for f in os.listdir(directory_path) if Peatland_file_pattern in f]
# The code will show an error when multiple files are read for the same category
if len(FLC_file_paths) > 1 or len(AGB_file_paths) > 1 or len(BGB_file_paths) > 1 or \
len(Topsoil_SOC_file_paths) > 1 or len(Subsoil_SOC_file_paths) > 1 or \
len(Peatland_file_paths) > 1:
print(Country, subregion,'\033[91m' + 'Multiple forest loss attribution files for the same country' + '\033[0m')
try:
classification_file = pd.read_csv(FLC_file_paths[0])
AGB_file = pd.read_csv(AGB_file_paths[0])
BGB_file = pd.read_csv(BGB_file_paths[0])
Topsoil_SOC_file = pd.read_csv(Topsoil_SOC_file_paths[0])
Subsoil_SOC_file = pd.read_csv(Subsoil_SOC_file_paths[0])
Peatland_file = pd.read_csv(Peatland_file_paths[0])
classification_files.append(classification_file)
AGB_files.append(AGB_file)
BGB_files.append(BGB_file)
Topsoil_SOC_files.append(Topsoil_SOC_file)
Subsoil_SOC_files.append(Subsoil_SOC_file)
Peatland_files.append(Peatland_file)
except pd.errors.EmptyDataError:
continue
if not classification_files:
return None, None, None, None, None, None
# Merge the data across rows
merged_classification = pd.concat(classification_files, ignore_index=True)
merged_AGB = pd.concat(AGB_files, ignore_index=True)
merged_BGB = pd.concat(BGB_files, ignore_index=True)
merged_Topsoil_SOC = pd.concat(Topsoil_SOC_files, ignore_index=True)
merged_Subsoil_SOC = pd.concat(Subsoil_SOC_files, ignore_index=True)
merged_Peatland = pd.concat(Peatland_files, ignore_index=True)
#return classification and AGB/BGB files
return merged_classification, merged_AGB, merged_BGB, merged_Topsoil_SOC, merged_Subsoil_SOC, merged_Peatland
## Function to extract FAO Landuse files
def preprocessing_dataframe_landuse_FAO(Landuse, Country):
if Country == 'French Guiana':
Country = 'French Guyana'
Landuse = Landuse[['Area', 'Item', 'Year', 'Value', 'Flag']]
Landuse['Value'] = Landuse['Value']*1000 # FAO 'Area harvested' values are in 1000 ha
Landuse = Landuse.sort_values(by = 'Year')
Landuse = Landuse[(Landuse['Year'] >= 2001)] # Screen values before year 2000
if Country == 'Serbia and Montenegro':
Landuse = Landuse[Landuse['Area'].isin(['Serbia', 'Montenegro', 'Serbia and Montenegro'])]
Landuse['Area'] = Country
elif Country == 'Serbia':
Landuse = Landuse[Landuse['Area'].isin(['Serbia', 'Serbia and Montenegro'])]
Landuse['Area'] = Country
elif Country == 'Montenegro':
Landuse = Landuse[Landuse['Area'].isin(['Montenegro', 'Serbia and Montenegro'])]
Landuse['Area'] = Country
elif Country == 'Sudan (former)':
Landuse = Landuse[Landuse['Area'].isin(['Sudan', 'South Sudan', 'Sudan (former)'])]
Landuse['Area'] = Country
elif Country == 'Sudan':
Landuse = Landuse[Landuse['Area'].isin(['Sudan', 'Sudan (former)'])]
Landuse['Area'] = Country
elif Country == 'South Sudan':
Landuse = Landuse[Landuse['Area'].isin(['South Sudan', 'Sudan (former)'])]
Landuse['Area'] = Country
elif Country == 'China, mainland':
Landuse = Landuse[Landuse['Area'].isin(['China, mainland', 'China, Hong Kong SAR', 'China, Macao SAR'])]
Landuse['Area'] = Country
else:
Landuse = Landuse[Landuse['Area'] == Country]
Landuse = Landuse[Landuse['Item'].isin(['Cropland', 'Permanent meadows and pastures', 'Planted Forest'])] # Since we are only interested in cropland expansion, pasture expansion and plantation expansion
Landuse["Item"] = Landuse["Item"].replace("Planted Forest", "Plantation") # Replace column name
Landuse["Item"] = Landuse["Item"].replace("Permanent meadows and pastures", "Pasture")
Landuse = Landuse.groupby(['Area', 'Item', 'Year']).agg({'Flag': np.min, 'Value': 'sum'}).reset_index() # Clean the data when multiple countries area being read
Landuse = Landuse.pivot_table(index=['Area', 'Year'], columns=['Item'], values=['Value', 'Flag'], aggfunc='first')
# flatten the column names
Landuse.columns = ['_'.join(col).rstrip('_') for col in Landuse.columns.values]
# reset the index
Landuse = Landuse.reset_index()
Landuse[['Area', 'Year'] + [col for col in Landuse.columns if col.startswith('Value_')]] = Landuse[['Area', 'Year'] + [col for col in Landuse.columns if col.startswith('Value_')]].fillna(0)
Landuse[[col for col in Landuse.columns if col.startswith('Flag_')]] = Landuse[[col for col in Landuse.columns if col.startswith('Flag_')]].fillna('Z')
return Landuse
## Function to extract FAO production files
def preprocessing_dataframe_crop_livestock_FAO(Production, Country):
if Country == 'Netherlands (Kingdom of the)':
Country = 'Netherlands'
Production = Production[Production['Element'] == 'Area harvested']
Production = Production[['Area', 'Item', 'Year', 'Value', 'Flag', 'Item Code (CPC)']]
Production = Production.sort_values(by = 'Year')
Production = Production[(Production['Year'] >= 2001) & (Production['Year'] <= end_year)] # Change this to the last year. Forcefully restricting to year 2021
if Country == 'Serbia and Montenegro':
Production = Production[Production['Area'].isin(['Serbia', 'Montenegro', 'Serbia and Montenegro'])]
Production['Area'] = Country
elif Country == 'Serbia':
Production = Production[Production['Area'].isin(['Serbia','Serbia and Montenegro'])]
Production['Area'] = Country
elif Country == 'Montenegro':
Production = Production[Production['Area'].isin(['Montenegro', 'Serbia and Montenegro'])]
Production['Area'] = Country
elif Country == 'Sudan (former)':
Production = Production[Production['Area'].isin(['Sudan', 'South Sudan', 'Sudan (former)'])]
Production['Area'] = Country
elif Country == 'Sudan':
Production = Production[Production['Area'].isin(['Sudan', 'Sudan (former)'])]
Production['Area'] = Country
elif Country == 'South Sudan':
Production = Production[Production['Area'].isin(['South Sudan', 'Sudan (former)'])]
Production['Area'] = Country
elif Country == 'China, mainland':
Production = Production[Production['Area'].isin(['China, mainland', 'China, Hong Kong SAR', 'China, Macao SAR'])]
Production['Area'] = Country
else:
Production = Production[Production['Area'] == Country]
unique_code = Production['Item Code (CPC)'].unique()
mask = np.array([not s.startswith('\'F') for s in unique_code])
unique_code = unique_code[mask]
Production = Production.loc[Production['Item Code (CPC)'].isin(unique_code)]
unique = Production['Item'].unique() # Unique values are used for commodity attribution
Production = Production.groupby(['Area', 'Item', 'Year']).agg({'Flag': np.min, 'Value': 'sum'}).reset_index() # Clean the data when multiple countries area being read
Production = Production.pivot_table(index=['Area', 'Year'], columns=['Item'], values=['Value', 'Flag'], aggfunc='first')
Production.columns = ['_'.join(col).rstrip('_') for col in Production.columns.values]
# reset the index
Production = Production.reset_index()
Production[['Area', 'Year'] + [col for col in Production.columns if col.startswith('Value_')]] = Production[['Area', 'Year'] + [col for col in Production.columns if col.startswith('Value_')]].fillna(0)
Production[[col for col in Production.columns if col.startswith('Flag_')]] = Production[[col for col in Production.columns if col.startswith('Flag_')]].fillna('Z')
return Production, unique
# Function to extract forest plantation dataset from FRA (https://fra-data.fao.org/assessments/fra/2020)
def preprocessing_dataframe_FRA(Global_Forest_Resources_Assessment, Country):
if Country == 'China, mainland':
Country = 'China'
if Country == 'French Guiana':
Country = 'French Guyana'
if Country == 'Côte d\'Ivoire':
Country = 'Côte d\'Ivoire'
if Country == 'Türkiye':
Country = 'Türkiye'
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment.rename(columns = {'Unnamed: 0': 'Area'})
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment.fillna(0)
Global_Forest_Resources_Assessment['Area'] = Global_Forest_Resources_Assessment['Area'].str.replace(' \(Desk study\)', '', regex=True)
if Country == 'Sudan (former)':
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment[Global_Forest_Resources_Assessment['Area'].isin(['Sudan', 'South Sudan'])]
Global_Forest_Resources_Assessment['Area'] = Country
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment.groupby(['Area']).sum().reset_index()
if Country == 'Serbia and Montenegro':
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment[Global_Forest_Resources_Assessment['Area'].isin(['Serbia', 'Montenegro'])]
Global_Forest_Resources_Assessment['Area'] = Country
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment.groupby(['Area']).sum().reset_index()
else:
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment[Global_Forest_Resources_Assessment['Area'] == Country]
# Values to add (Since data is only avaialble for year 2000 and 2010)
years_to_add = np.linspace(2000, 2010, 10 + 1)[1:-1]
values_to_add = np.linspace(Global_Forest_Resources_Assessment['2000'].values[0], Global_Forest_Resources_Assessment['2010'].values[0], 10 + 1)[1:-1]
# Add the values as new columns to the DataFrame
for year, value in zip(years_to_add, values_to_add):
Global_Forest_Resources_Assessment[str(int(year))] = value
# Values to add(Since data is only avaialble for year 2010 and 2015)
years_to_add = np.linspace(2010, 2015, 5 + 1)[1:-1]
values_to_add = np.linspace(Global_Forest_Resources_Assessment['2010'].values[0], Global_Forest_Resources_Assessment['2015'].values[0], 5 + 1)[1:-1]
# Add the values as new columns to the DataFrame
for year, value in zip(years_to_add, values_to_add):
Global_Forest_Resources_Assessment[str(int(year))] = value
if '2021' not in Global_Forest_Resources_Assessment.columns:
Global_Forest_Resources_Assessment['2021'] = Global_Forest_Resources_Assessment['2020'] + ((Global_Forest_Resources_Assessment['2018']-Global_Forest_Resources_Assessment['2017']) + \
(Global_Forest_Resources_Assessment['2019']-Global_Forest_Resources_Assessment['2018']) + \
(Global_Forest_Resources_Assessment['2020']-Global_Forest_Resources_Assessment['2019']))/3
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment.sort_index(axis=1)
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment.loc[:, '2001':'2021']
Global_Forest_Resources_Assessment = Global_Forest_Resources_Assessment*1000
return Global_Forest_Resources_Assessment.values[0]
####################
# 3.2 Specify specific sub-national commodity functions
####################
# Run this funtion outside the main-function to reduce time
# Defined below for Brazil
def preprocessing_dataframe_production_IBGE():
# Orientation of the table downloaded from IBGE: https://sidra.ibge.gov.br/pesquisa/pam/tabelas
# Tabela 5457 - Área plantada ou destinada à colheita, área colhida, quantidade produzida, rendimento médio e valor da produção das lavouras temporárias e permanentes
# in Portuguese: ['Município', 'Variável', 'Ano', 'Produto das lavouras temporárias e permanentes', 'Values of commodities ha']
# in English: ['Municipality', 'Variable', 'Year', 'Product of temporary and permanent crops', 'Values of commodities ha']
# File path where IBGE datasets are stored
dataset_path = IBGE_dataset_version
# Here we specifically read Tabela 5457 with contains 'Area planted or destined for harvest, harvested area, quantity produced, average yield and production value of temporary and permanent crops'
file_pattern = 'tabela5457-'
csv_files = [os.path.join(dataset_path, f) for f in os.listdir(dataset_path) if file_pattern in f]
data = []
# Loop through all csv files (downloaded in parts as the size of individual csv is quite large)
for file in csv_files:
try:
production = pd.read_csv(file, skiprows=[0])
# Since we are intersted in Plantation area, we extract 'Área plantada ou destinada à colheita (Hectares)' -> (English translation) 'Area planted or intended for harvesting (Hectares)'
production = production.loc[production['Variável'] == 'Área plantada ou destinada à colheita (Hectares)'] \
[['Município', 'Ano', 'Produto das lavouras temporárias e permanentes', 'Unnamed: 4']]
production = production.rename(columns = {'Município': 'Area', 'Ano': 'Year', 'Produto das lavouras temporárias e permanentes': 'Item', 'Unnamed: 4': 'Value'})
production['Item'] = production['Item'].map(IBGE_crops.set_index('IBGE Crops')['FAO name'])
# Skip if there is no column
if not production.empty:
data.append(production)
except pd.errors.EmptyDataError:
#print('CSV file is empty')
continue
if not data:
return None
production_df = pd.concat(data)
production_df = production_df.pivot_table(index=['Area', 'Year'], columns=['Item'], values=['Value'], aggfunc='first')
production_df.columns = ['_'.join(col).rstrip('_') for col in production_df.columns.values] # Creates columns with name 'Value_'+[Commodity]
production_df = production_df.reset_index()
production_df[['Area', 'Year'] + [col for col in production_df.columns if col.startswith('Value_')]] = production_df[['Area', 'Year'] + [col for col in production_df.columns if col.startswith('Value_')]].fillna(0)
production_df = production_df.replace('-', 0) # Replacing empy columns with zero
production_df = production_df.replace('...', 0) # Replacing empy columns with zero
production_df['Year'] = production_df['Year'].astype(int)
return production_df
# Replacing with first harvest
def preprocessing_dataframe_production_IBGE_multicropping(Crop):
# Orientation of the table downloaded from IBGE: https://sidra.ibge.gov.br/pesquisa/pam/tabelas
# Tabela 5457 - Área plantada ou destinada à colheita, área colhida, quantidade produzida, rendimento médio e valor da produção das lavouras temporárias e permanentes
# in Portuguese: ['Município', 'Variável', 'Ano', 'Produto das lavouras temporárias e permanentes', 'Values of commodities ha']
# in English: ['Municipality', 'Variable', 'Year', 'Product of temporary and permanent crops', 'Values of commodities ha']
# File path where IBGE datasets are stored
dataset_path = IBGE_dataset_version
# Here we specifically read Tabela 5457 with contains 'Area planted or destined for harvest, harvested area, quantity produced, average yield and production value of temporary and permanent crops'
if Crop == 'Maize (corn)':
file_pattern = 'tabela839'
cropname = 'Milho (em grão) - 1ª safra'
elif Crop == 'Groundnuts, excluding shelled':
file_pattern = 'tabela1000'
cropname = 'Amendoim (em casca) - 1ª safra'
elif Crop == 'Potatoes':
file_pattern = 'tabela1001'
cropname = 'Batata-inglesa - 1ª safra'
elif Crop == 'Beans, dry':
file_pattern = 'tabela1002'
cropname = 'Feijão (em grão) - 1ª safra'
csv_files = [os.path.join(dataset_path, f) for f in os.listdir(dataset_path) if file_pattern in f]
data = []
# Loop through all csv files (downloaded in parts as the size of individual csv is quite large)
for file in csv_files:
try:
production = pd.read_csv(file, skiprows=[0])
# Since we are intersted in Plantation area, we extract 'Área plantada ou destinada à colheita (Hectares)' -> (English translation) 'Area planted or intended for harvesting (Hectares)'
production = production.loc[production['Variável'] == 'Área plantada (Hectares)'] \
[['Município', 'Ano', 'Produto das lavouras temporárias', 'Unnamed: 4']]
production = production.rename(columns = {'Município': 'Area', 'Ano': 'Year', 'Produto das lavouras temporárias': 'Item', 'Unnamed: 4': 'Value'})
production = production.loc[production['Item'] == cropname]
# Skip if there is no column
if not production.empty:
data.append(production)
except pd.errors.EmptyDataError:
#print('CSV file is empty')
continue
if not data:
return None
production_df = pd.concat(data)
production_df = production_df.pivot_table(index=['Area', 'Year'], columns=['Item'], values=['Value'], aggfunc='first')
production_df.columns = ['_'.join(col).rstrip('_') for col in production_df.columns.values] # Creates columns with name 'Value_'+[Commodity]
production_df = production_df.reset_index()
production_df[['Area', 'Year'] + [col for col in production_df.columns if col.startswith('Value_')]] = production_df[['Area', 'Year'] + [col for col in production_df.columns if col.startswith('Value_')]].fillna(0)
production_df = production_df.replace('-', 0) # Replacing empy columns with zero
production_df = production_df.replace('...', 0) # Replacing empy columns with zero
production_df['Year'] = production_df['Year'].astype(int)
production_df = production_df.rename(columns = {'Value_'+cropname: 'Value_'+Crop})
# Set 'Area' and 'Year' as the index in both DataFrames
Production_IBGE.set_index(['Area', 'Year'], inplace=True)
production_df.set_index(['Area', 'Year'], inplace=True)
# Update values from 'Production_IBGE' with values from 'production_df'
Production_IBGE.update(production_df, overwrite=True)
# Reset the index to the default
Production_IBGE.reset_index(inplace=True)
return Production_IBGE
# Extract SOC loss values from the table based on depth and Ecoregion
def extract_SOC_numeric(SOC_loss, Depth, Country_Ecoregion, Landuse):
Symbol = ''
Value = SOC_loss.loc[(SOC_loss['Depth'] == Depth) & (SOC_loss['Ecoregion group'] == Country_Ecoregion), Landuse].values[0]
if isinstance(Value, (int, float)):
# Value is already numeric, no further processing needed
pass
elif isinstance(Value, str):
# Value is a string, attempt to replace symbols
symbols_to_replace = ['†', '*'] # Add more symbols if needed
for symbol in symbols_to_replace:
if symbol in Value:
try:
Value = int(float(Value.replace(symbol, '').replace(',', ''))) # Convert to float instead of int
Symbol = symbol # Store the symbol that was successfully replaced
break
except ValueError:
# Handle the case where the string can't be converted to a float
print(f"Error converting to float for symbol: {symbol}")
else:
print("Unknown Data Type")
return Value, Symbol
####################
# 4. Pre-processing: Land balance - Extracting land use attributed to certain land uses/commodities
####################
## Function starts here
def run_att_in_loop(Country, subregion = None):
global new_df_TCL
#print(Country, subregion)
Country_ISO = Country_lookup_codes.loc[Country_lookup_codes['GADM Countries'] == Country, 'GID_0'].values[0] # Match country with ISO code
Country_FAO = Country_lookup_codes.loc[Country_lookup_codes['GADM Countries'] == Country, 'FAO countries'].values[0] # Match country with FAO country name
Country_GADM = Country_lookup_codes.loc[Country_lookup_codes['GADM Countries'] == Country, 'GADM Countries'].values[0] # Save GADM country name for incorporating in final export
Country_Ecoregion = Country_lookup_codes_ecoregion.loc[Country_lookup_codes_ecoregion['COUNTRY'] == Country, 'ECOREGION GROUP'].values[0]
Country_Group = Country_lookup_codes_ecoregion.loc[Country_lookup_codes_ecoregion['COUNTRY'] == Country, 'COUNTRY GROUP'].values[0]
# Addressing special character in country names
if Country == 'Åland':
Country = 'Aland'
elif Country =='Côte d\'Ivoire':
Country = 'Cote dIvoire'
elif Country == 'Curaçao':
Country = 'Curacao'
elif Country == 'México':
Country = 'Mexico'
elif Country == 'Réunion':
Country = 'Reunion'
elif Country == 'Saint-Barthélemy':
Country = 'Saint-Barthelemy'
elif Country == 'São Tomé and Príncipe':
Country = 'Sao Tome and Principe'
# Calling the Forest_loss_attributed function
if Country == 'Serbia and Montenegro':
Forest_loss_attributed, AGB, BGB, SOC_top, SOC_sub, Peatland = find_forest_loss_files(['Serbia','Montenegro'], GEE_datasets_version)
Forest_loss_attributed['COUNTRY'] = Country
AGB['COUNTRY'] = Country
BGB['COUNTRY'] = Country
SOC_top['COUNTRY'] = Country
SOC_sub['COUNTRY'] = Country
Peatland['COUNTRY'] = Country
elif Country == 'Sudan and South Sudan':
Forest_loss_attributed, AGB, BGB, SOC_top, SOC_sub, Peatland = find_forest_loss_files(['Sudan','South Sudan'], GEE_datasets_version)
Forest_loss_attributed['COUNTRY'] = Country
AGB['COUNTRY'] = Country
BGB['COUNTRY'] = Country
SOC_top['COUNTRY'] = Country
SOC_sub['COUNTRY'] = Country
Peatland['COUNTRY'] = Country
else:
Forest_loss_attributed, AGB, BGB, SOC_top, SOC_sub, Peatland = find_forest_loss_files([Country], GEE_datasets_version)
# If Forest_loss_attributed doesn't have a value, we exit the funtion
if Forest_loss_attributed is None:
return None
# In[201]:
def preprocess_dataframe(df, subregion, necessary_classes, end_year):
if Peatland.empty:
# Create an empty row with zeros
new_row = pd.DataFrame(0, index=[0], columns=df.columns)
# Concatenate the empty row to the existing DataFrame
df = pd.concat([new_row, df]).reset_index(drop=True)
if Country in Country_with_subnational_attribution:
df = (df.loc[df['GID_2'] == subregion].groupby(['COUNTRY', 'GID_2', 'Class']).sum().reset_index()[['COUNTRY', 'Class'] \
+ [f'loss_{year}' for year in range(2001, end_year+1)]]).iloc[:,1:]
else:
df = (df.groupby(['COUNTRY', 'Class']).sum().reset_index()[['COUNTRY', 'Class'] \
+ [f'loss_{year}' for year in range(2001, end_year+1)]]).iloc[:,1:]
for column in necessary_classes:
if not (df['Class'] == column).any():
df.loc[len(df.index)] = [column] + [0] * (len(df.columns) - 1)
df.set_index('Class', inplace=True)
df.sort_values(by='Class', ascending=False, inplace=True)
return df
Forest_loss_attributed = preprocess_dataframe(Forest_loss_attributed, subregion, necessary_classes, end_year)
AGB = preprocess_dataframe(AGB, subregion, necessary_classes, end_year)
BGB = preprocess_dataframe(BGB, subregion, necessary_classes, end_year)
Carbon_Emissions_Biomass = (AGB+BGB)/10**6 # Converting from Mg or tonnes to Million tonnes CO2
Carbon_SOC_top = preprocess_dataframe(SOC_top, subregion, necessary_classes, end_year)/10**6
Carbon_SOC_sub = preprocess_dataframe(SOC_sub, subregion, necessary_classes, end_year)/10**6
Peatland = preprocess_dataframe(Peatland, subregion, necessary_classes, end_year)
# Define the reference DataFrame
reference_df = Forest_loss_attributed
# Align and filter other DataFrames
# Fill missing rows in Carbon_Emissions_Biomass with zeros
Carbon_Emissions_Biomass = Carbon_Emissions_Biomass.reindex(reference_df.index, fill_value=0)
# Repeat the same process for Carbon_SOC_top and Carbon_SOC_sub
Carbon_SOC_top = Carbon_SOC_top.reindex(reference_df.index, fill_value=0)
Carbon_SOC_sub = Carbon_SOC_sub.reindex(reference_df.index, fill_value=0)
def update_and_delete_values(df, index_to_update, index_to_delete):
# Ensure that both indices are in the DataFrame
if index_to_update in df.index and index_to_delete in df.index:
# Add values from one row to another
df.loc[index_to_update] += df.loc[index_to_delete].values
# Delete the row with index_to_delete
df.drop(index_to_delete, inplace=True)
# List of your DataFrames
dataframes = [Forest_loss_attributed, Carbon_Emissions_Biomass, Carbon_SOC_top, Carbon_SOC_sub, Peatland]
# Remove Global forest plantation treecrop to a new class (for values to be included as cropland)
# Apply the function to each DataFrame
for df in dataframes:
update_and_delete_values(df, GlobalPlantation_treecrop[0], redundant_agriculture_classes)
# Values in redundant classes are added to braod agriculture class
for attribution in [redundant_agriculture_classes]:
if Forest_loss_attributed.loc[Forest_loss_attributed.index.isin([attribution])].values.size != 0:
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(GlobalPlantation_treecrop)] += Forest_loss_attributed.loc[Forest_loss_attributed.index.isin([attribution])].values
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin([attribution])] -= Forest_loss_attributed.loc[Forest_loss_attributed.index.isin([attribution])].values
Forest_loss_attributed = Forest_loss_attributed.drop(attribution)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(GlobalPlantation_treecrop)] += Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin([attribution])].values
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin([attribution])] -= Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin([attribution])].values
Carbon_Emissions_Biomass = Carbon_Emissions_Biomass.drop(attribution)
if Carbon_SOC_top.loc[Carbon_SOC_top.index.isin([attribution])].values.size != 0:
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(GlobalPlantation_treecrop)] += Carbon_SOC_top.loc[Carbon_SOC_top.index.isin([attribution])].values
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin([attribution])] -= Carbon_SOC_top.loc[Carbon_SOC_top.index.isin([attribution])].values
Carbon_SOC_top = Carbon_SOC_top.drop(attribution)
if Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin([attribution])].values.size != 0:
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(GlobalPlantation_treecrop)] += Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin([attribution])].values
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin([attribution])] -= Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin([attribution])].values
Carbon_SOC_sub = Carbon_SOC_sub.drop(attribution)
# In[203]:
# Segregate area between cropland and plantation species (as some commodities overlap between MapBiomas and Du et al-2022 plantation)
Crops_class = Commodity_lookup_codes.loc[Commodity_lookup_codes['Type'].isin(['Crop', 'Tree crop'])]
Crop_Forest_loss_attributed = Forest_loss_attributed[Forest_loss_attributed.index.isin(Crops_class['Code'].values)].index.values
Crop_Forest_loss_attributed_expansion_code = ['E_' + value for value in Crops_class[Crops_class.Code.isin(Crop_Forest_loss_attributed)]['Classification'].values]
# Read landuse dataset from FAO
Landuse = pd.read_csv(FAO_Landuse_version, encoding = "ISO-8859-1")
Landuse = preprocessing_dataframe_landuse_FAO(Landuse, Country_FAO)
# Only attributes forest loss based on after 2000 drivers (only positive indexes, i.e., segregating deforestation post year 2000's)
Forest_loss = Forest_loss_attributed[Forest_loss_attributed.index >= 0].sum()[[f'loss_{year}' for year in range(2001, end_year)]].values
Landuse['Forest_loss'] = Forest_loss
Global_Forest_Resources_Assessment = pd.read_csv(Global_FRA_version, encoding = "ISO-8859-1", skiprows=[0])
Landuse['Value_Plantation'] = preprocessing_dataframe_FRA(Global_Forest_Resources_Assessment, Country_FAO)
Landuse
# In[205]:
# Do gap filling
for column in ['Flag_Cropland', 'Flag_Pasture', 'Flag_Plantation', 'Value_Cropland', 'Value_Pasture', 'Value_Plantation']:
if not (Landuse.columns == column).any():
if column.startswith('Flag_'):
Landuse[column] = 'Z' # Empty 'Flags' are filled with 'Z'
else:
Landuse[column] = 0 # Numerical values are filled with '0'
# Extracting grass and crop loss
if Country == 'Brazil':
Pasture_loss = pd.read_excel(Crop_and_grass_loss_Brazil, 'grass_loss')
Crop_loss = pd.read_excel(Crop_and_grass_loss_Brazil, 'crop_loss')
else:
Pasture_loss = pd.read_excel(Crop_and_grass_loss, 'grass_loss')
Crop_loss = pd.read_excel(Crop_and_grass_loss, 'crop_loss')
if Country == 'Brazil':
Pasture_loss = Pasture_loss[Pasture_loss['Country'] == subregion][[f'Grassloss_{year}' for year in range(2001, end_year+1)]].transpose()*(-10**8) # Converting from Mkm2 to ha. Since values are negative, we multiply with -1
Crop_loss = Crop_loss[Crop_loss['Country'] == subregion][[f'Croploss_{year}' for year in range(2001, end_year+1)]].transpose()*(-10**8) # Converting from Mkm2 to ha. Since values are negative, we multiply with -1
elif Country == 'Serbia and Montenegro':
Pasture_loss = (Pasture_loss[Pasture_loss['Country'].isin(['Serbia', 'Montenegro'])][[f'Grassloss_{year}' for year in range(2001, end_year+1)]].transpose()*(-10**8)).sum(axis = 1)
Pasture_loss = pd.DataFrame(Pasture_loss)
Crop_loss = (Crop_loss[Crop_loss['Country'].isin(['Serbia', 'Montenegro'])][[f'Croploss_{year}' for year in range(2001, end_year+1)]].transpose()*(-10**8)).sum(axis = 1)
Crop_loss = pd.DataFrame(Crop_loss)
elif Country == 'Sudan and South Sudan':
Pasture_loss = (Pasture_loss[Pasture_loss['Country'].isin(['Sudan','South Sudan'])][[f'Grassloss_{year}' for year in range(2001, end_year+1)]].transpose()*(-10**8)).sum(axis = 1)
Pasture_loss = pd.DataFrame(Pasture_loss)
Crop_loss = (Crop_loss[Crop_loss['Country'].isin(['Sudan','South Sudan'])][[f'Croploss_{year}' for year in range(2001, end_year+1)]].transpose()*(-10**8)).sum(axis = 1)
Crop_loss = pd.DataFrame(Crop_loss)
else:
Pasture_loss = Pasture_loss[Pasture_loss['Country'] == Country_GADM][[f'Grassloss_{year}' for year in range(2001, end_year+1)]].transpose()*(-10**8) # Converting from Mkm2 to ha. Since values are negative, we multiply with -1
Crop_loss = Crop_loss[Crop_loss['Country'] == Country_GADM][[f'Croploss_{year}' for year in range(2001, end_year+1)]].transpose()*(-10**8) # Converting from Mkm2 to ha. Since values are negative, we multiply with -1
# In[208]:
# Processing crop and grass loss by averaging them over lag years
if (Crop_loss.size == 0 & Pasture_loss.size == 0):
crop_loss = 0
grass_loss = 0
else:
crop_loss = []
grass_loss = []
for year in range(2001, end_year_FAO_Landuse+1):
dynamic_lag = min(lag, end_year_FAO_Landuse-year)
#print(year, dynamic_lag)
if dynamic_lag == 0:
grass_loss.append(Pasture_loss[Pasture_loss.index == 'Grassloss_'+str(year)].iloc[:,0].values[0])
crop_loss.append(Crop_loss[Crop_loss.index == 'Croploss_'+str(year)].iloc[:,0].values[0])
else:
grass_loss.append(np.sum([Pasture_loss[Pasture_loss.index == 'Grassloss_'+str(year_lag)].iloc[:,0].values[0] for year_lag in range(year,year+dynamic_lag)])/dynamic_lag)
crop_loss.append(np.sum([Crop_loss[Crop_loss.index == 'Croploss_'+str(year_lag)].iloc[:,0].values[0] for year_lag in range(year,year+dynamic_lag)])/dynamic_lag)
Landuse['Grass_loss'] = grass_loss
Landuse['Crop_loss'] = crop_loss
if Country == 'Taiwan':
# Set 'Flag_Plantation' to 'A' for specific years
Landuse.loc[Landuse['Year'].isin([2000, 2010, 2015, 2020]), 'Flag_Plantation'] = 'A'
# Set 'Flag_Plantation' to 'E' for all other rows
Landuse.loc[~Landuse['Year'].isin([2000, 2010, 2015, 2020]), 'Flag_Plantation'] = 'E'
# In[79]:
####################
# 5.1 Land balance - statistical (i.e., determing expansion/attribution with FAO data)
####################
Landuse["CLE"] = 0
Landuse["PPE"] = 0
Landuse["FPE"] = 0
cropland_expansion = []
permanent_pasture_expansion = []
forest_plantation_expansion = []
# To be used for gapfilling
CLE_gap = []; PPE_gap = []; FPE_gap = []; FL_gap = []
# Calculates CLE, PPE and FPE
for year in range(2001, end_year_FAO_Landuse+1):
dynamic_lag = min(lag, end_year_FAO_Landuse-year)
net_crop_change = 0; net_pasture_change = 0; net_plantation_change = 0
net_crop_change = np.nan_to_num((Landuse[Landuse['Year'] == year+dynamic_lag]['Value_Cropland'].values - Landuse[Landuse['Year'] == year]['Value_Cropland'].values)[0]/dynamic_lag, nan=0.0, posinf=0.0, neginf=0.0)
net_pasture_change = np.nan_to_num((Landuse[Landuse['Year'] == year+dynamic_lag]['Value_Pasture'].values - Landuse[Landuse['Year'] == year]['Value_Pasture'].values)[0]/dynamic_lag, nan=0.0, posinf=0.0, neginf=0.0)
net_plantation_change = np.nan_to_num((Landuse[Landuse['Year'] == year+dynamic_lag]['Value_Plantation'].values - Landuse[Landuse['Year'] == year]['Value_Plantation'].values)[0]/dynamic_lag, nan=0.0, posinf=0.0, neginf=0.0)
GPL = max(min(net_pasture_change,Landuse[Landuse['Year'] == year]['Grass_loss'].values[0]), 0) # Gross pasture loss is minimum of pasture change from FAO and Li et al. grass loss, but never less than zero
CLE = max(net_crop_change + (Landuse[Landuse['Year'] == year]['Crop_loss'].values[0]) - GPL, 0) # Cropland expasion is sum of FAO cropland expansion + Gross Crop loss from li et al - Gross pasture loss from above (since croplands are expanding first into forests)
PPE = max(net_pasture_change + Landuse[Landuse['Year'] == year]['Grass_loss'].values[0], 0) # Similarly pasture expansion is sum of FAO pasture expansion + gross pasture loss (i.e., pasture loss due croplands expansion)
FPE = max(net_plantation_change, 0)
# The code snippet below fills gap from last three year of available data (Only applies to last FAO Landuse year, as expansion can't be determined)
if year >= end_year_FAO_Landuse-3 and year <= end_year_FAO_Landuse-1:
CLE_gap.append(CLE)
PPE_gap.append(PPE)
FPE_gap.append(FPE)
FL_gap.append(Landuse.loc[Landuse['Year'] == year, 'Forest_loss'].values[0])
if year == end_year_FAO_Landuse:
FL_gap = Forest_loss_attributed.loc[(Forest_loss_attributed.index == 1) | (Forest_loss_attributed.index >= 3000), 'loss_'+str(year)].sum()
FL_gap_crop = Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Agriculture_start,Broad_LU_Agriculture_end+1,1)) | Forest_loss_attributed.index.isin(Crop_Forest_loss_attributed)][['loss_'+str(year-3), 'loss_'+str(year-2), 'loss_'+str(year-1)]].sum().sum()
FL_gap_pasture = Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Pasture_start,Broad_LU_Pasture_end+1,1))][['loss_'+str(year-3), 'loss_'+str(year-2), 'loss_'+str(year-1)]].sum().sum()
FL_gap_plantation = Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Plantation_start,Broad_LU_Plantation_end+1,1)) & ~Forest_loss_attributed.index.isin(Crop_Forest_loss_attributed)][['loss_'+str(year-3), 'loss_'+str(year-2), 'loss_'+str(year-1)]].sum().sum()
# Since the exapnsion of last FAO Landuse landuse year can't be determined, we do gapfilling by taking averages over last three available years
CLE = min(np.nansum(CLE_gap)/3, np.nansum(FL_gap)*(np.nansum(CLE_gap)/3)/(np.nansum(FL_gap_crop)/3))
PPE = min(np.nansum(PPE_gap)/3, np.nansum(FL_gap)*(np.nansum(PPE_gap)/3)/(np.nansum(FL_gap_pasture)/3))
FPE = min(np.nansum(FPE_gap)/3, np.nansum(FL_gap)*(np.nansum(FPE_gap)/3)/(np.nansum(FL_gap_plantation)/3))
cropland_expansion.append(CLE)
permanent_pasture_expansion.append(PPE)
forest_plantation_expansion.append(FPE)
else:
cropland_expansion.append(CLE)
permanent_pasture_expansion.append(PPE)
forest_plantation_expansion.append(FPE)
#print(year, dynamic_lag, CLE, PPE, FPE)
Landuse["CLE"] = cropland_expansion
Landuse["PPE"] = permanent_pasture_expansion
Landuse["FPE"] = forest_plantation_expansion
for column_FAO in ['Flag_Cropland','Flag_Pasture','Flag_Plantation']:
Flag = []
for year in range(2001, end_year_FAO_Landuse+1):
dynamic_lag = min(lag, end_year_FAO_Landuse-year)
lag_year = Landuse[Landuse['Year'] == year+dynamic_lag][column_FAO].values[0]
base_year = Landuse[Landuse['Year'] == year][column_FAO].values[0]
Flag.append(chr(min(ord(lag_year), ord(base_year))))
Landuse[column_FAO] = Flag
# In[212]:
# Normally there is a lag between last forest loss year and last FAO land use year. This fills that gap.
# If the column for the latest year is not available then gapfilling is done by directly taking averages over last three years and proportionally assinging expansion based on forest loss (only with index > 0)
if Landuse.loc[Landuse['Year'] == end_year].size == 0:
# create dictionary of values for the new row
FL_gap = Forest_loss_attributed.loc[(Forest_loss_attributed.index == 1) | (Forest_loss_attributed.index >= 3000), 'loss_'+str(end_year)].sum()
FL_gap_crop = Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Agriculture_start,Broad_LU_Agriculture_end+1,1)) | Forest_loss_attributed.index.isin(Crop_Forest_loss_attributed)][['loss_'+str(end_year-3), 'loss_'+str(end_year-2), 'loss_'+str(end_year-1)]].sum().sum()
FL_gap_pasture = Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Pasture_start,Broad_LU_Pasture_end+1,1))][['loss_'+str(end_year-3), 'loss_'+str(end_year-2), 'loss_'+str(end_year-1)]].sum().sum()
FL_gap_plantation = Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Plantation_start,Broad_LU_Plantation_end+1,1)) & ~Forest_loss_attributed.index.isin(Crop_Forest_loss_attributed)][['loss_'+str(end_year-3), 'loss_'+str(end_year-2), 'loss_'+str(end_year-1)]].sum().sum()
new_row = {'Area': Country_FAO, 'Year': end_year, 'Forest_loss': FL_gap,
'CLE': min(np.nansum(CLE_gap)/3, np.nansum(FL_gap)*(np.nansum(CLE_gap)/3)/(np.nansum(FL_gap_crop)/3)),
'PPE': min(np.nansum(PPE_gap)/3, np.nansum(FL_gap)*(np.nansum(PPE_gap)/3)/(np.nansum(FL_gap_pasture)/3)),
'FPE': min(np.nansum(FPE_gap)/3, np.nansum(FL_gap)*(np.nansum(FPE_gap)/3)/(np.nansum(FL_gap_plantation)/3)),}
# create a new dataframe with only the columns of interest
new_df = pd.DataFrame({key: [new_row[key]] for key in ['Area', 'Year', 'Forest_loss', 'CLE', 'PPE', 'FPE']})
# append the new row to the existing dataframe
Landuse = Landuse.append(new_df, ignore_index=True)
# Filling other rows
Landuse.fillna({col: 0 if not col.startswith('Flag_') else 'Z' for col in Landuse.columns}, inplace=True)
# In[213]:
# Calculating/Capping forest loss to observed expansion
for column, column_new in zip(['CLE', 'PPE', 'FPE'], ['F_CL', 'F_PP', 'F_FP']):
Total_landuse_expansion = Landuse['CLE'].values +Landuse['PPE'].values+Landuse['FPE'].values
Landuse[column_new] = np.minimum(np.nan_to_num(Landuse[column], nan=0.0, posinf=0.0, neginf=0.0),
np.nan_to_num((Landuse['Forest_loss'])*(Landuse[column])/(Total_landuse_expansion), nan=0.0, posinf=0.0, neginf=0.0))
# In[214]:
# Seperating relevant data to a different dataframe
Landuse_attribution = Landuse[['Area', 'Year', 'Forest_loss', 'F_CL', 'F_PP', 'F_FP', 'Flag_Cropland','Flag_Pasture','Flag_Plantation']]
Landuse_attribution['FL_sum'] = Landuse_attribution[['F_CL', 'F_PP', 'F_FP']].sum(axis = 1).values
# In[215]:
Landuse[['Year','CLE', 'PPE', 'FPE', 'F_CL', 'F_PP', 'F_FP']]
# In[216]:
####################
# 5.2 Land balance - Spatial (i.e., attribution with sptial data)
####################
# Attributing Braod attributed forest loss to cropland, pasture and plantation statistically from FAO derived CLE, PPE and FPE
for attribution in [MapBiomas_Mosiac, MapBiomas_Farming]:
# Exit loop if the class value does not exist
if Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution)].size == 0:
continue
for Year in range(2001, end_year+1):
CL_att = 0; PP_att = 0; FP_att = 0
Total_landuse = np.nansum(Landuse.loc[Landuse['Year'] == Year, 'CLE'].values[0]+Landuse.loc[Landuse['Year'] == Year, 'PPE'].values[0])
# The 'if' snippet is read when the broad class includes plantation as well
if attribution == MapBiomas_Farming:
Total_landuse += Landuse.loc[Landuse['Year'] == Year, 'FPE'].values[0]
# Total land use is zero (i.e., no value in CLE, PPE, FPE to attribute), then all the area in the class is attributed to Broad_LU_Undefined class
if Total_landuse == 0:
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(Broad_LU_Undefined), 'loss_'+str(Year)] += Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)] -= Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
# So the same for Carbon emssion
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(Broad_LU_Undefined), 'loss_'+str(Year)] += Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)] -= Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(Broad_LU_Undefined), 'loss_'+str(Year)] += Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)] -= Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(Broad_LU_Undefined), 'loss_'+str(Year)] += Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)] -= Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)].values[0]
continue
proportion_CL_att = 0; proportion_PP_att = 0; proportion_FP_att = 0
proportion_CL_att = (Landuse.loc[Landuse['Year'] == Year, 'CLE'].values[0])/Total_landuse
proportion_PP_att = (Landuse.loc[Landuse['Year'] == Year, 'PPE'].values[0])/Total_landuse
if attribution == MapBiomas_Farming:
proportion_FP_att = (Landuse.loc[Landuse['Year'] == Year, 'FPE'].values[0])/Total_landuse
# The line below is assigning unattributed landuse in the braad categories to either a cropland or a pasture based on the ratio of CLE/(CLE+PPE) or CLE/(CLE+PPE+FPE)
CL_att = (Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].sum())*proportion_CL_att
PP_att = (Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].sum())*proportion_PP_att
# Recycling the 'proportion variables' to a determine the proportion of landuse that goes from one class to another. This is used for carbon allocation
proportion_CL_att = CL_att/Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
proportion_PP_att = PP_att/Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
if attribution == MapBiomas_Farming:
FP_att = (Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].sum())*proportion_FP_att
proportion_FP_att = FP_att/Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
# This line substract the now attributed landuse from broad categoes and adds them to the cropland and pasture classes
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(MapBiomas_FarmingMosiac_cropland), 'loss_'+str(Year)] += np.nansum(CL_att)
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(MapBiomas_FarmingMosiac_pasture), 'loss_'+str(Year)] += np.nansum(PP_att)
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(MapBiomas_FarmingMosiac_plantation), 'loss_'+str(Year)] += np.nansum(FP_att)
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)] -= np.nansum(CL_att + PP_att + FP_att)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(MapBiomas_FarmingMosiac_cropland), 'loss_'+str(Year)] += np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(MapBiomas_FarmingMosiac_pasture), 'loss_'+str(Year)] += np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(MapBiomas_FarmingMosiac_plantation), 'loss_'+str(Year)] += np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)] -= np.nansum(np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att) +
np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att) +
np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att))
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(MapBiomas_FarmingMosiac_cropland), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att)
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(MapBiomas_FarmingMosiac_pasture), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att)
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(MapBiomas_FarmingMosiac_plantation), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att)
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)] -= np.nansum(np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att) +
np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att) +
np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att))
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(MapBiomas_FarmingMosiac_cropland), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att)
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(MapBiomas_FarmingMosiac_pasture), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att)
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(MapBiomas_FarmingMosiac_plantation), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att)
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)] -= np.nansum(np.nansum(Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att) +
np.nansum(Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att) +
np.nansum(Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att))
# In[217]:
# The code below follows a similar logic, with a key difference that we restrict landuse to minimum of CLE, PPE and FPE
for attribution in [Dominant_Agriculture, Hansen_Unattributed]:
if Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution)].size == 0:
continue
if attribution == Dominant_Agriculture:
Cropland = Dominant_cropland
Pasture = Dominant_pasture
Plantation = Dominant_plantation
if attribution == Hansen_Unattributed:
Cropland = Hansen_cropland
Pasture = Hansen_pasture
Plantation = Hansen_plantation
for Year in range(2001, end_year+1):
CL_att = 0; PP_att = 0; FP_att = 0
Total_landuse = np.nansum(Landuse.loc[Landuse['Year'] == Year, 'CLE'].values[0]+Landuse.loc[Landuse['Year'] == Year, 'PPE'].values[0])
if attribution == Hansen_Unattributed:
Total_landuse += Landuse.loc[Landuse['Year'] == Year, 'FPE'].values[0]
if Total_landuse == 0:
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(Broad_LU_Undefined), 'loss_'+str(Year)] += Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)] -= Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(Broad_LU_Undefined), 'loss_'+str(Year)] += Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)] -= Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(Broad_LU_Undefined), 'loss_'+str(Year)] += Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)] -= Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(Broad_LU_Undefined), 'loss_'+str(Year)] += Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)].values[0]
Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)] -= Carbon_SOC_sub.loc[Carbon_SOC_sub.index.isin(attribution), 'loss_'+str(Year)].values[0]
continue
proportion_CL_att = 0; proportion_PP_att = 0; proportion_FP_att = 0
proportion_CL_att = (Landuse.loc[Landuse['Year'] == Year, 'CLE'].values[0])/Total_landuse
proportion_PP_att = (Landuse.loc[Landuse['Year'] == Year, 'PPE'].values[0])/Total_landuse
if attribution == Hansen_Unattributed:
proportion_FP_att = (Landuse.loc[Landuse['Year'] == Year, 'FPE'].values[0])/Total_landuse
# The line below is assigning unattributed landuse in the braad categories, i.e., either a cropland or a pasture based on the ratio of CLE/(CLE+PPE) or CLE/(CLE+PPE+FPE).
# Furthermore, the maximum it can assign is always less than equal to CLE, PPE, and FPE
CL_att = min(max(Landuse.loc[Landuse['Year'] == Year, 'CLE'].values[0] - (Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Agriculture_start,Broad_LU_Agriculture_end+1,1)) | Forest_loss_attributed.index.isin(Crop_Forest_loss_attributed), 'loss_'+str(Year)]).sum(axis = 0), 0),
(Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].sum())*proportion_CL_att)
PP_att = min(max(Landuse.loc[Landuse['Year'] == Year, 'PPE'].values[0] - (Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Pasture_start,Broad_LU_Pasture_end+1,1)), 'loss_'+str(Year)]).sum(axis = 0), 0),
(Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].sum())*proportion_PP_att)
# Recycling the 'proportion variables' to a determine the proportion of landuse that goes from one class to another. This is used for carbon allocation
proportion_CL_att = CL_att/Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
proportion_PP_att = PP_att/Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
if attribution == Hansen_Unattributed:
FP_att = min(max(Landuse.loc[Landuse['Year'] == Year, 'FPE'].values[0] - (Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(range(Broad_LU_Plantation_start,Broad_LU_Plantation_end+1,1)) & ~Forest_loss_attributed.index.isin(Crop_Forest_loss_attributed), 'loss_'+str(Year)]).sum(axis = 0), 0),
(Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].sum())*proportion_FP_att)
proportion_FP_att = FP_att/Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)].values[0]
# This line substract the now attributed landuse from brad categoes and adds them to the cropland and pasture classes
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(Cropland), 'loss_'+str(Year)] += np.nansum(CL_att)
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(Pasture), 'loss_'+str(Year)] += np.nansum(PP_att)
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(Plantation), 'loss_'+str(Year)] += np.nansum(FP_att)
Forest_loss_attributed.loc[Forest_loss_attributed.index.isin(attribution), 'loss_'+str(Year)] -= np.nansum(CL_att + PP_att + FP_att)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(Cropland), 'loss_'+str(Year)] += np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(Pasture), 'loss_'+str(Year)] += np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(Plantation), 'loss_'+str(Year)] += np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att)
Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)] -= np.nansum(np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att) +
np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att) +
np.nansum(Carbon_Emissions_Biomass.loc[Carbon_Emissions_Biomass.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att))
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(Cropland), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att)
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(Pasture), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att)
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(Plantation), 'loss_'+str(Year)] += np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att)
Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)] -= np.nansum(np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_CL_att) +
np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_PP_att) +
np.nansum(Carbon_SOC_top.loc[Carbon_SOC_top.index.isin(attribution), 'loss_'+str(Year)]*proportion_FP_att))