diff --git a/model-files/RunModel.bat b/model-files/RunModel.bat index 54d579d33..3b7a100bc 100644 --- a/model-files/RunModel.bat +++ b/model-files/RunModel.bat @@ -419,7 +419,18 @@ if %PROJECT%==NGF ( :: ------------------------------------------------------------------------------------------------------ :: -:: Step 17: Directory clean up +:: Step 17: Off-model Calculation (only for 2035 and 2050) +:: +:: ------------------------------------------------------------------------------------------------------ + +if "%runOffModel%"=="Yes" ( + call RunOffmodel + if ERRORLEVEL 2 goto done . +) + +:: ------------------------------------------------------------------------------------------------------ +:: +:: Step 18: Directory clean up :: :: ------------------------------------------------------------------------------------------------------ diff --git a/model-files/RunModel_test_offModel.bat b/model-files/RunModel_test_offModel.bat new file mode 100644 index 000000000..d2f7f61a9 --- /dev/null +++ b/model-files/RunModel_test_offModel.bat @@ -0,0 +1,258 @@ +::~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:: RunModel.bat +:: +:: MS-DOS batch file to execute the MTC travel model. Each of the model steps are sequentially +:: called here. +:: +:: For complete details, please see http://mtcgis.mtc.ca.gov/foswiki/Main/RunModelBatch. +:: +:: dto (2012 02 15) gde (2009 04 22) +:: +::~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +:: ------------------------------------------------------------------------------------------------------ +:: +:: Step 1: Set the necessary path variables +:: +:: ------------------------------------------------------------------------------------------------------ + +:: Set the path +call CTRAMP\runtime\SetPath.bat + +:: Keep a record of the installed Python packages and their versions +call CTRAMP\runtime\pip_list.bat > pip_list.log 2>&1 + +:: Start the cube cluster +Cluster "%COMMPATH%\CTRAMP" 1-48 Starthide Exit + +:: Set the IP address of the host machine which sends tasks to the client machines +if %computername%==MODEL2-A set HOST_IP_ADDRESS=10.1.1.206 +if %computername%==MODEL2-B set HOST_IP_ADDRESS=192.168.1.207 +if %computername%==MODEL2-C set HOST_IP_ADDRESS=192.168.1.208 +if %computername%==MODEL2-D set HOST_IP_ADDRESS=192.168.1.209 +if %computername%==MODEL3-A set HOST_IP_ADDRESS=10.164.0.200 +if %computername%==MODEL3-B set HOST_IP_ADDRESS=10.164.0.201 +if %computername%==MODEL3-C set HOST_IP_ADDRESS=10.164.0.202 +if %computername%==MODEL3-D set HOST_IP_ADDRESS=10.164.0.203 +if %computername%==PORMDLPPW01 set HOST_IP_ADDRESS=172.24.0.101 +if %computername%==PORMDLPPW02 set HOST_IP_ADDRESS=172.24.0.102 +if %computername%==WIN-FK0E96C8BNI set HOST_IP_ADDRESS=10.0.0.154 +rem if %computername%==WIN-A4SJP19GCV5 set HOST_IP_ADDRESS=10.0.0.70 +rem for aws machines, HOST_IP_ADDRESS is set in SetUpModel.bat + +:: for AWS, this will be "WIN-" +SET computer_prefix=%computername:~0,4% +set INSTANCE=%COMPUTERNAME% +if "%COMPUTER_PREFIX%" == "WIN-" ( + rem figure out instance + for /f "delims=" %%I in ('"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe -Command (wget http://169.254.169.254/latest/meta-data/instance-id).Content"') do set INSTANCE=%%I +) + +:: Figure out the model year +set MODEL_DIR=%CD% +set PROJECT_DIR=%~p0 +set PROJECT_DIR2=%PROJECT_DIR:~0,-1% +:: get the base dir only +for %%f in (%PROJECT_DIR2%) do set myfolder=%%~nxf +:: the first four characters are model year +set MODEL_YEAR=%myfolder:~0,4% + +:: MODEL YEAR ------------------------- make sure it's numeric -------------------------------- +set /a MODEL_YEAR_NUM=%MODEL_YEAR% 2>nul +if %MODEL_YEAR_NUM%==%MODEL_YEAR% ( + echo Numeric model year [%MODEL_YEAR%] +) else ( + echo Couldn't determine numeric model year from project dir [%PROJECT_DIR%] + echo Guessed [%MODEL_YEAR%] + exit /b 2 +) +:: MODEL YEAR ------------------------- make sure it's in [2000,3000] ------------------------- +if %MODEL_YEAR% LSS 2000 ( + echo Model year [%MODEL_YEAR%] is less than 2000 + exit /b 2 +) +if %MODEL_YEAR% GTR 3000 ( + echo Model year [%MODEL_YEAR%] is greater than 3000 + exit /b 2 +) + +set PROJECT=%myfolder:~11,3% +set FUTURE_ABBR=%myfolder:~15,2% +set FUTURE=X + +:: FUTURE ------------------------- make sure FUTURE_ABBR is one of the five [RT,CG,BF] ------------------------- +:: The long names are: BaseYear ie 2015, Blueprint aka PBA50, CleanAndGreen, BackToTheFuture, or RisingTidesFallingFortunes + +if %PROJECT%==IPA (SET FUTURE=PBA50) +if %PROJECT%==DBP (SET FUTURE=PBA50) +if %PROJECT%==FBP (SET FUTURE=PBA50) +if %PROJECT%==EIR (SET FUTURE=PBA50) +if %PROJECT%==SEN (SET FUTURE=PBA50) +if %PROJECT%==STP (SET FUTURE=PBA50) +if %PROJECT%==NGF (SET FUTURE=PBA50) +if %PROJECT%==TIP (SET FUTURE=PBA50) +if %PROJECT%==TRR (SET FUTURE=PBA50) +if %PROJECT%==PPA ( + if %FUTURE_ABBR%==RT (set FUTURE=RisingTidesFallingFortunes) + if %FUTURE_ABBR%==CG (set FUTURE=CleanAndGreen) + if %FUTURE_ABBR%==BF (set FUTURE=BackToTheFuture) +) + +echo on +echo FUTURE = %FUTURE% + +echo off +if %FUTURE%==X ( + echo on + echo Couldn't determine FUTURE name. + echo Make sure the name of the project folder conform to the naming convention. + exit /b 2 +) + +:: EN7 ------------------------- make sure EN7 is one of [ENABLED,DISABLED] ------------------------- +:: see https://github.com/BayAreaMetro/travel-model-one/tree/tm16_en7/utilities/telecommute +IF "%EN7%"=="" ( + echo EN7 is not configured; set EN7 environment variable to ENABLED or DISABLED + goto done +) +IF "%EN7%"=="ENABLED" ( + echo EN7 is ENABLED +) ELSE ( + IF "%EN7%"=="DISABLED" ( + echo EN7 is DISABLED + ) ELSE ( + echo EN7 value is not allowed; set EN7 environment variable to ENABLED or DISABLED + goto done + ) +) + +echo on +echo turn echo back on + +python "CTRAMP\scripts\notify_slack.py" "Starting *%MODEL_DIR%*" + +set MAXITERATIONS=3 +:: --------TrnAssignment Setup -- Standard Configuration +:: CHAMP has dwell configured for buses (local and premium) +:: CHAMP has access configured for for everything +:: set TRNCONFIG=STANDARD +:: set COMPLEXMODES_DWELL=21 24 27 28 30 70 80 81 83 84 87 88 +:: set COMPLEXMODES_ACCESS=21 24 27 28 30 70 80 81 83 84 87 88 110 120 130 + +:: --------TrnAssignment Setup -- Fast Configuration +:: NOTE the blank ones should have a space +set TRNCONFIG=FAST +set COMPLEXMODES_DWELL= +set COMPLEXMODES_ACCESS= + +:: ------------------------------------------------------------------------------------------------------ +:: +:: Step 2: Create the directory structure +:: +:: ------------------------------------------------------------------------------------------------------ + +:: Create the working directories +mkdir hwy +mkdir trn +mkdir skims +mkdir landuse +mkdir popsyn +mkdir nonres +mkdir main +mkdir logs +mkdir database +mkdir logsums +mkdir core_summaries +mkdir emfac_prep +mkdir extractor +mkdir metrics +mkdir updated_output + +:: Stamp the feedback report with the date and time of the model start +echo STARTED MODEL RUN %DATE% %TIME% >> logs\feedback.rpt + +:: Move the input files, which are not accessed by the model, to the working directories +set ref_run="2035_TM160_DBP_Plan_08b" +copy ..\%ref_run%\hwy\ hwy\ +copy ..\%ref_run%\trn\ trn\ +copy ..\%ref_run%\landuse\ landuse\ +copy ..\%ref_run%\popsyn\ popsyn\ +copy ..\%ref_run%\nonres\ nonres\ +copy ..\%ref_run%\main\ main\ +copy ..\%ref_run%\nonres\ nonres\ +copy ..\%ref_run%\logsums\ logsums\ +copy ..\%ref_run%\core_summaries\ core_summaries\ +copy ..\%ref_run%\database\ database\ +copy ..\%ref_run%\metrics\ metrics\ +copy ..\%ref_run%\skims\ skims\ +copy ..\%ref_run%\updated_output\ updated_output\ + + + +:: ------------------------------------------------------------------------------------------------------ +:: +:: Step 17: Off-model Calculation (only for 2035 and 2050) +:: +:: ------------------------------------------------------------------------------------------------------ + +if "%runOffModel%"=="Yes" ( + call RunOffmodel + if ERRORLEVEL 2 goto done . +) + +:: ------------------------------------------------------------------------------------------------------ +:: +:: Step 18: Directory clean up +:: +:: ------------------------------------------------------------------------------------------------------ + + +:: Extract key files +call extractkeyfiles +c:\windows\system32\Robocopy.exe /E extractor "%M_DIR%\OUTPUT" + + +: cleanup + +:: Move all the TP+ printouts to the \logs folder +copy *.prn logs\*.prn +copy *.log logs\*.log + +:: Close the cube cluster +Cluster "%COMMPATH%\CTRAMP" 1-48 Close Exit + +:: Delete all the temporary TP+ printouts and cluster files +del *.prn +del *.script.* +del *.script + +:: Success target and message +:success +ECHO FINISHED SUCCESSFULLY! + +python "CTRAMP\scripts\notify_slack.py" "Finished *%MODEL_DIR%*" + +if "%COMPUTER_PREFIX%" == "WIN-" ( + + rem go up a directory and sync model folder to s3 + cd .. + "C:\Program Files\Amazon\AWSCLI\aws" s3 sync %myfolder% s3://travel-model-runs/%myfolder% + cd %myfolder% + + rem shutdown + python "CTRAMP\scripts\notify_slack.py" "Finished *%MODEL_DIR%* - shutting down" + C:\Windows\System32\shutdown.exe /s +) + +:: no errors +goto donedone + +:: this is the done for errors +:done +ECHO FINISHED. + +:: if we got here and didn't shutdown -- assume something went wrong +python "CTRAMP\scripts\notify_slack.py" ":exclamation: Error in *%MODEL_DIR%*" + +:donedone \ No newline at end of file diff --git a/tm15-python310.yml b/tm15-python310.yml index 62ff7cd33..e76e3538f 100644 --- a/tm15-python310.yml +++ b/tm15-python310.yml @@ -41,3 +41,4 @@ dependencies: - xlutils==2.0.0 - xlwings==0.28.5 - xlwt==1.3.0 + - pyreadr-0.5.2 diff --git a/utilities/RTP/Emissions/Off Model Calculators/BikeInfrastructure.R b/utilities/RTP/Emissions/Off Model Calculators/BikeInfrastructure.R deleted file mode 100644 index 5f7d059fa..000000000 --- a/utilities/RTP/Emissions/Off Model Calculators/BikeInfrastructure.R +++ /dev/null @@ -1,166 +0,0 @@ -# -# This R script distills the model outputs into the versions used by ICF calculator: "Bike Infrastructure.xlsx" -# -# Pass two arguments to the script: -# 1) ModelRuns.xlsx with runs to process and -# 2) Output directory -# - -library(dplyr) -library(tidyr) -library(readxl) -options(width = 180) - -args = commandArgs(trailingOnly=TRUE) -print(args) -if (length(args) != 2) { - stop("Two arguments are required: ModelRuns.xlsx and output_dir") -} - -MODEL_DATA_BASE_DIRS<- c(RTP2021_IP ="M:/Application/Model One/RTP2021/IncrementalProgress", - RTP2021 ="M:/Application/Model One/RTP2021/Blueprint", - RTP2025_IP ="M:/Application/Model One/RTP2025/IncrementalProgress", - RTP2025 ="M:/Application/Model One/RTP2025/Blueprint", - TRR ="L:/Application/Model_One/TransitRecovery") -MODEL_RUNS_FILE <- args[1] -OUTPUT_DIR <- args[2] -OUTPUT_FILE <- file.path(OUTPUT_DIR, "Model Data - Bike Infrastructure.csv") - -# this is the currently running script -SCRIPT <- "X:/travel-model-one-master/utilities/RTP/Emissions/Off Model Calculators/BikeInfrastructure.R" -# the model runs are RTP/ModelRuns.csv -model_runs <- read_excel(MODEL_RUNS_FILE) - -# filter to the runs that need off-model calculation -model_runs <- model_runs[ which((model_runs$run_offmodel == "yes") | (model_runs$status == "DEIR")), ] - -print(paste("MODEL_DATA_BASE_DIRS = ",MODEL_DATA_BASE_DIRS)) -print(paste("OUTPUT_DIR = ",OUTPUT_DIR)) - -model_runs <- select(model_runs, year, directory, run_set, category, description) -print(model_runs) - -# Read tazdata -TAZDATA_FIELDS <- c("ZONE", "SD", "COUNTY","TOTPOP","TOTACRE") # only care about these fields -tazdata_df <- data.frame() -for (i in 1:nrow(model_runs)) { - if (model_runs[i,"directory"]=="2015_UrbanSim_FBP") { next } - # print(paste("run_set =",model_runs[[i,"run_set"]])) - MODEL_DATA_BASE_DIR <- MODEL_DATA_BASE_DIRS[model_runs[[i,"run_set"]]] - tazdata_file <- file.path(MODEL_DATA_BASE_DIR, model_runs[i,"directory"],"INPUT","landuse","tazData.csv") - tazdata_file_df <- read.table(tazdata_file, header=TRUE, sep=",") - tazdata_file_df <- tazdata_file_df[, TAZDATA_FIELDS] %>% - mutate(year = model_runs[[i,"year"]], - directory = model_runs[[i,"directory"]], - category = model_runs[[i,"category"]]) - tazdata_df <- rbind(tazdata_df, tazdata_file_df) -} -remove(i, tazdata_file, tazdata_file_df) - -# summarise population by superdistrict -tazdata_sd_df <- tazdata_df %>% - group_by(year, category, directory, SD, COUNTY) %>% - summarise(total_population = sum(TOTPOP), - total_square_miles = sum(TOTACRE)/640.0, - .groups = "drop_last") - -tazdata_all_df <- - group_by(tazdata_df, year, category, directory) %>% - summarise(total_population = sum(TOTPOP), - total_square_miles = sum(TOTACRE)/640.0, - .groups = "drop_last") %>% - mutate(SD=0, COUNTY=0) - -tazdata_sd_df <- rbind(tazdata_sd_df, tazdata_all_df) - -# incorporate county population -tazdata_county_df <- - group_by(tazdata_df, year, category, directory, COUNTY) %>% - summarise(total_population_county = sum(TOTPOP), .groups="drop") %>% - ungroup() - -# summarise at superdistrict level -tazdata_sd_df <- left_join( - tazdata_sd_df, - tazdata_county_df, - by=c("year","category","directory","COUNTY")) %>% - mutate(population_county_share = total_population/total_population_county) -remove(tazdata_county_df) - -# Read trip-distance-by-mode-superdistrict.csv -tripdist_df <- data.frame() -for (i in 1:nrow(model_runs)) { - if (model_runs[i,"directory"]=="2015_UrbanSim_FBP") { next } - MODEL_DATA_BASE_DIR <- MODEL_DATA_BASE_DIRS[model_runs[[i,"run_set"]]] - tripdist_file <- file.path(MODEL_DATA_BASE_DIR, model_runs[i,"directory"],"OUTPUT","bespoke","trip-distance-by-mode-superdistrict.csv") - if (!file.exists(tripdist_file)) { - stop(paste0("File [",tripdist_file,"] does not exist")) - } - tripdist_file_df <- read.table(tripdist_file, header=TRUE, sep=",", stringsAsFactors=FALSE) %>% - mutate(year = model_runs[[i,"year"]], - directory = model_runs[[i,"directory"]], - category = model_runs[[i,"category"]]) - tripdist_df <- rbind(tripdist_df, tripdist_file_df) -} -remove(i, tripdist_file, tripdist_file_df) - -# For Bike Infrastructure, calculate bike and SOV trip mode share, and average bike trip distance -tripdist_df <- mutate(tripdist_df, - bike_trips = estimated_trips*(mode_name=="Bike"), - bike_dist = estimated_trips*(mode_name=="Bike")*mean_distance, - sov_trips = estimated_trips*(substr(mode_name,1,11)=="Drive alone")) - -# summarise at superdistrict level -tripdist_sd_summary_df <- - group_by(tripdist_df, year, category, directory, dest_sd) %>% - summarize(bike_trips = sum(bike_trips), - bike_dist = sum(bike_dist), - sov_trips = sum(sov_trips), - estimated_trips = sum(estimated_trips), - .groups = "drop") %>% - mutate(bike_trip_mode_share = bike_trips/estimated_trips, - bike_avg_trip_dist = bike_dist/bike_trips, - sov_trip_mode_share = sov_trips/estimated_trips) - -# and overall -tripdist_all_summary_df <- - group_by(tripdist_df, year, category, directory) %>% - summarize(bike_trips = sum(bike_trips), - bike_dist = sum(bike_dist), - sov_trips = sum(sov_trips), - estimated_trips = sum(estimated_trips), - .groups = "drop") %>% - mutate(bike_trip_mode_share = bike_trips/estimated_trips, - bike_avg_trip_dist = bike_dist/bike_trips, - sov_trip_mode_share = sov_trips/estimated_trips, - dest_sd = 0) - -tripdist_sd_summary_df <- rbind(tripdist_sd_summary_df, tripdist_all_summary_df) - -# select out needed fields of tazdata and tripdist table and put them together -summary_df <- left_join(tazdata_sd_df, - tripdist_sd_summary_df, - by=c("year","directory","category","SD"="dest_sd")) %>% - rename(superdistrict=SD) %>% - select(-bike_trips, -sov_trips, -COUNTY) # these were just intermediate - -# columns are: year, category, directory, superdistrict, variable, value -summary_melted_df <- pivot_longer(summary_df, cols=!c(year,category,directory,superdistrict), names_to="variable") - -# add index column for vlookup -summary_melted_df <- mutate(summary_melted_df, - index = paste0(year,"-",category,"-",superdistrict,"-",variable)) - -# sort by index and reorder columns -summary_melted_df <- arrange(summary_melted_df, index) %>% - relocate(index, year, category, directory, superdistrict, variable, value) - -# remove existing file -file.remove(OUTPUT_FILE) - -# prepend note -prepend_note <- paste0("Output by ",SCRIPT," on ",format(Sys.time(), "%a %b %d %H:%M:%S %Y")) -write(prepend_note, file=OUTPUT_FILE, append=FALSE) - -# output -write.table(summary_melted_df, OUTPUT_FILE, sep=",", row.names=FALSE, append=TRUE) diff --git a/utilities/RTP/Emissions/Off Model Calculators/Bikeshare.R b/utilities/RTP/Emissions/Off Model Calculators/Bikeshare.R deleted file mode 100644 index 0826785a1..000000000 --- a/utilities/RTP/Emissions/Off Model Calculators/Bikeshare.R +++ /dev/null @@ -1,79 +0,0 @@ -# -# This R script distills the model outputs into the versions used by ICF calculator: "Bikeshare.xlsx" -# -# Pass two arguments to the script: -# 1) ModelRuns.xlsx with runs to process and -# 2) Output directory -# - -library(dplyr) -library(reshape2) -library(readxl) -options(width = 180) - -args = commandArgs(trailingOnly=TRUE) -print(args) -if (length(args) != 2) { - stop("Two arguments are required: ModelRuns.xlsx and output_dir") -} - -MODEL_DATA_BASE_DIRS<- c(RTP2021_IP ="M:/Application/Model One/RTP2021/IncrementalProgress", - RTP2021 ="M:/Application/Model One/RTP2021/Blueprint", - RTP2025_IP ="M:/Application/Model One/RTP2025/IncrementalProgress", - RTP2025 ="M:/Application/Model One/RTP2025/Blueprint", - TRR ="L:/Application/Model_One/TransitRecovery") -MODEL_RUNS_FILE <- args[1] -OUTPUT_DIR <- args[2] -OUTPUT_FILE <- file.path(OUTPUT_DIR, "Model Data - Bikeshare.csv") - -# this is the currently running script -SCRIPT <- "X:/travel-model-one-master/utilities/RTP/Emissions/Off Model Calculators/BikeShare.R" -model_runs <- read_excel(MODEL_RUNS_FILE) - -# filter to the runs that need off-model calculation -model_runs <- model_runs[ which((model_runs$run_offmodel == "yes") | (model_runs$status == "DEIR")), ] - -print(paste("MODEL_DATA_BASE_DIRS = ",MODEL_DATA_BASE_DIRS)) -print(paste("OUTPUT_DIR = ",OUTPUT_DIR)) -print(model_runs) -# want: -# Total population -# Total employment - -# Read tazdata -TAZDATA_FIELDS <- c("ZONE", "SD", "COUNTY","TOTPOP","TOTEMP") # only care about these fields -tazdata_df <- data.frame() -for (i in 1:nrow(model_runs)) { - if (model_runs[[i,"directory"]]=="2015_UrbanSim_FBP") { next } - MODEL_DATA_BASE_DIR <- MODEL_DATA_BASE_DIRS[model_runs[[i,"run_set"]]] - tazdata_file <- file.path(MODEL_DATA_BASE_DIR, model_runs[[i,"directory"]],"INPUT","landuse","tazData.csv") - tazdata_file_df <- read.table(tazdata_file, header=TRUE, sep=",") - tazdata_file_df <- tazdata_file_df[, TAZDATA_FIELDS] %>% - mutate(year = model_runs[[i,"year"]], - directory = model_runs[[i,"directory"]], - category = model_runs[[i,"category"]]) - tazdata_df <- rbind(tazdata_df, tazdata_file_df) -} -remove(i, tazdata_file, tazdata_file_df) - - -summary_df <- summarise(group_by(tazdata_df, year, category, directory), - total_population = sum(TOTPOP), - total_employment = sum(TOTEMP)) - -# columns are: year, category, directory, variable, value -summary_melted_df <- melt(summary_df, id.vars=c("year","category","directory")) - -# add index column for vlookup -summary_melted_df <- mutate(summary_melted_df, - index = paste0(year,"-",category,"-",variable)) -summary_melted_df <- summary_melted_df[order(summary_melted_df$index), - c("index","year","category","directory","variable","value")] - -# prepend note -prepend_note <- paste0("Output by ",SCRIPT," on ",format(Sys.time(), "%a %b %d %H:%M:%S %Y")) -write(prepend_note, file=OUTPUT_FILE, append=FALSE) - -# output -write.table(summary_melted_df, OUTPUT_FILE, sep=",", row.names=FALSE, append=TRUE) - diff --git a/utilities/RTP/Emissions/Off Model Calculators/Carshare.R b/utilities/RTP/Emissions/Off Model Calculators/Carshare.R deleted file mode 100644 index e22829ca9..000000000 --- a/utilities/RTP/Emissions/Off Model Calculators/Carshare.R +++ /dev/null @@ -1,103 +0,0 @@ -# -# This R script distills the model outputs into the versions used by ICF calculator: "Carshare v4.xlsx" -# -# Pass two arguments to the script: -# 1) ModelRuns.xlsx with runs to process and -# 2) Output directory -# - -library(dplyr) -library(reshape2) -library(readxl) -options(width = 180) - -args = commandArgs(trailingOnly=TRUE) -print(args) -if (length(args) != 2) { - stop("Two arguments are required: ModelRuns.xlsx and output_dir") -} - -MODEL_DATA_BASE_DIRS<- c(RTP2021_IP ="M:/Application/Model One/RTP2021/IncrementalProgress", - RTP2021 ="M:/Application/Model One/RTP2021/Blueprint", - RTP2025_IP ="M:/Application/Model One/RTP2025/IncrementalProgress", - RTP2025 ="M:/Application/Model One/RTP2025/Blueprint", - TRR ="L:/Application/Model_One/TransitRecovery") -MODEL_RUNS_FILE <- args[1] -OUTPUT_DIR <- args[2] -OUTPUT_FILE <- file.path(OUTPUT_DIR, "Model Data - Carshare.csv") - -# this is the currently running script -SCRIPT <- "X:/travel-model-one-master/utilities/RTP/Emissions/Off Model Calculators/CarShare.R" -model_runs <- read_excel(MODEL_RUNS_FILE) - -# filter to the runs that need off-model calculation -model_runs <- model_runs[ which((model_runs$run_offmodel == "yes") | (model_runs$status == "DEIR")), ] - -print(paste("MODEL_DATA_BASE_DIRS = ",MODEL_DATA_BASE_DIRS)) -print(paste("OUTPUT_DIR = ",OUTPUT_DIR)) -print(model_runs) - -# Calculator constants -# Criteria for applying trip caps -K_MIN_POP_DENSITY <- 10 # Minimum density needed to be considered "urban" and support dense carshare (persons/residential acre) - -# want: -# Total population -# Total population in TAZs with density >10 -# Total population in TAZs with density <10 -# Adult pop (age 20-64) in TAZs with density >10 -# Adult pop (age 20-64) in TAZs with density <10 - -# Read tazdata -TAZDATA_FIELDS <- c("ZONE", "SD", "COUNTY","TOTPOP","RESACRE","AGE2044","AGE4564") # only care about these fields -tazdata_df <- data.frame() -for (i in 1:nrow(model_runs)) { - # We don't need past years for Car Share - if (model_runs[[i,"year"]]<=2015) next - MODEL_DATA_BASE_DIR <- MODEL_DATA_BASE_DIRS[model_runs[[i,"run_set"]]] - - tazdata_file <- file.path(MODEL_DATA_BASE_DIR, model_runs[[i,"directory"]],"INPUT","landuse","tazData.csv") - tazdata_file_df <- read.table(tazdata_file, header=TRUE, sep=",") - tazdata_file_df <- tazdata_file_df[, TAZDATA_FIELDS] %>% - mutate(year = model_runs[[i,"year"]], - directory = model_runs[[i,"directory"]], - category = model_runs[[i,"category"]]) - tazdata_df <- rbind(tazdata_df, tazdata_file_df) -} -remove(i, tazdata_file, tazdata_file_df) - -# population per residential acre -tazdata_df <- mutate(tazdata_df, - totpop_per_resacre = ifelse(RESACRE==0,0,TOTPOP/RESACRE), - carshare_dense = (totpop_per_resacre > K_MIN_POP_DENSITY), - totpop_dense = TOTPOP*carshare_dense, - totpop_sparse = TOTPOP*(!carshare_dense), - adultpop_dense = (AGE2044+AGE4564)*carshare_dense, - adultpop_sparse = (AGE2044+AGE4564)*(!carshare_dense)) - -summary_df <- summarise(group_by(tazdata_df, year, category, directory), - total_population = sum(TOTPOP), - totpop_dense = sum(totpop_dense), - totpop_sparse = sum(totpop_sparse), - adultpop_dense = sum(adultpop_dense), - adultpop_sparse = sum(adultpop_sparse)) -# columns are: year, category, directory, variable, value -summary_melted_df <- melt(summary_df, id.vars=c("year","category","directory")) - -# add index column for vlookup -summary_melted_df <- mutate(summary_melted_df, - index = paste0(year,"-",category,"-",variable)) -summary_melted_df <- summary_melted_df[order(summary_melted_df$index), - c("index","year","category","directory","variable","value")] -#print(summary_melted_df) - -# prepend note -prepend_note <- paste0("Output by ",SCRIPT," on ",format(Sys.time(), "%a %b %d %H:%M:%S %Y")) -write(prepend_note, file=OUTPUT_FILE, append=FALSE) - -prepend_note <- paste0("K_MIN_POP_DENSITY," ,K_MIN_POP_DENSITY) -write(prepend_note, file=OUTPUT_FILE, append=TRUE) - -# output -write.table(summary_melted_df, OUTPUT_FILE, sep=",", row.names=FALSE, append=TRUE) - diff --git a/utilities/RTP/Emissions/Off Model Calculators/EmployerShuttles.R b/utilities/RTP/Emissions/Off Model Calculators/EmployerShuttles.R deleted file mode 100644 index 229b1937b..000000000 --- a/utilities/RTP/Emissions/Off Model Calculators/EmployerShuttles.R +++ /dev/null @@ -1,162 +0,0 @@ -# -# This R script distills the model outputs into the versions used by ICF calculator: "Employer Shuttles v2.xlsx" -# -# Pass two arguments to the script: -# 1) ModelRuns.xlsx with runs to process and -# 2) Output directory -# - -library(dplyr) -library(reshape2) -library(readxl) -options(width = 180) - -args = commandArgs(trailingOnly=TRUE) -print(args) -if (length(args) != 2) { - stop("Two arguments are required: ModelRuns.xlsx and output_dir") -} -MODEL_DATA_BASE_DIRS<- c(RTP2021_IP ="M:/Application/Model One/RTP2021/IncrementalProgress", - RTP2021 ="M:/Application/Model One/RTP2021/Blueprint", - RTP2025_IP ="M:/Application/Model One/RTP2025/IncrementalProgress", - RTP2025 ="M:/Application/Model One/RTP2025/Blueprint", - TRR ="L:/Application/Model_One/TransitRecovery") -MODEL_RUNS_FILE <- args[1] -OUTPUT_DIR <- args[2] -OUTPUT_FILE <- file.path(OUTPUT_DIR, "Model Data - Employer Shuttles.csv") - -# this is the currently running script -SCRIPT <- "X:/travel-model-one-master/utilities/RTP/Emissions/Off Model Calculators/EmployerShuttles.R" -model_runs <- read_excel(MODEL_RUNS_FILE) - -# filter to the runs that need off-model calculation -model_runs <- model_runs[ which((model_runs$run_offmodel == "yes") | (model_runs$status == "DEIR")), ] - -print(paste("MODEL_DATA_BASE_DIRS = ",MODEL_DATA_BASE_DIRS)) -print(paste("OUTPUT_DIR = ",OUTPUT_DIR)) -print(model_runs) - -#### Mode look-up table -LOOKUP_MODE <- data.frame(trip_mode = c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21), - mode_name = c("Drive alone - free", "Drive alone - pay", - "Shared ride two - free", "Shared ride two - pay", - "Shared ride three - free", "Shared ride three - pay", - "Walk", "Bike", - "Walk to local bus", "Walk to light rail or ferry", "Walk to express bus", - "Walk to heavy rail", "Walk to commuter rail", - "Drive to local bus", "Drive to light rail or ferry", "Drive to express bus", - "Drive to heavy rail", "Drive to commuter rail", - "Taxi", "TNC", "TNC shared")) -SAMPLING_RATE = 0.500 - -# read taz/SD lookup -taz_SD_df <- read.table(file = "X:/travel-model-one-master/utilities/geographies/taz-superdistrict-county.csv", header=TRUE, sep=",") - -# Read trip-distance-by-mode-superdistrict.csv -tripdist_df <- data.frame() -for (i in 1:nrow(model_runs)) { - # We don't need past years for Employer Shuttles - if (model_runs[[i,"year"]]<=2015) next - - MODEL_DATA_BASE_DIR <- MODEL_DATA_BASE_DIRS[model_runs[[i,"run_set"]]] - trips_file <- file.path(MODEL_DATA_BASE_DIR, model_runs[[i,"directory"]], "OUTPUT", "updated_output", "trips.rdata") - - print(paste("Reading trips from",trips_file)) - - # load trips - load(trips_file) - # drop unneeded columns - trips <- select(trips, hh_id, tour_purpose, distance, trip_mode, orig_taz, dest_taz) - - # add origin SD and destination SD - trips <- left_join(trips, select(taz_SD_df, ZONE, SD) %>% rename(orig_taz=ZONE, orig_sd=SD)) - trips <- left_join(trips, select(taz_SD_df, ZONE, SD) %>% rename(dest_taz=ZONE, dest_sd=SD)) - - # filter to distance > 30.0 - trips <- filter(trips, distance>30.0) - # filter to work trips - trips <- filter(trips, substr(tour_purpose,1,5)=="work_") - - # summarize - trips <- group_by(trips, trip_mode, tour_purpose, orig_sd, dest_sd) %>% - summarize(simulated_trips = n(), mean_distance = mean(distance)) - trips <- left_join(trips, LOOKUP_MODE, by = c("trip_mode")) - trips <- mutate(trips, estimated_trips = simulated_trips / SAMPLING_RATE) %>% - select(trip_mode, mode_name, tour_purpose, orig_sd, dest_sd, simulated_trips, estimated_trips, mean_distance) - - trips <- trips %>% - mutate(year = model_runs[[i,"year"]], - directory = model_runs[[i,"directory"]], - category = model_runs[[i,"category"]]) - - if (nrow(tripdist_df) == 0) { - tripdist_df <- trips - } else { - tripdist_df <- bind_rows(tripdist_df, trips) - } -} -remove(i, trips) -# print(head(tripdist_df)) -# trip_mode mode_name tour_purpose orig_sd dest_sd simulated_trips estimated_trips mean_distance year directory category -# 1 1 Drive alone - free atwork_business 1 1 643 1286 1.2962208 2035 2035_TM152_IPA_01 IP -# 2 1 Drive alone - free atwork_business 1 2 255 510 2.9464314 2035 2035_TM152_IPA_01 IP -# 3 1 Drive alone - free atwork_business 1 3 533 1066 3.6807505 2035 2035_TM152_IPA_01 IP -# 4 1 Drive alone - free atwork_business 1 4 41 82 7.4807317 2035 2035_TM152_IPA_01 IP -# 5 1 Drive alone - free atwork_business 1 5 141 282 12.4274468 2035 2035_TM152_IPA_01 IP -# 6 1 Drive alone - free atwork_business 1 6 12 24 22.0958333 2035 2035_TM152_IPA_01 IP - -simplified_mode <- data.frame( - mode_name=c("Drive alone - free", "Drive alone - pay", - "Shared ride two - free", "Shared ride two - pay", - "Shared ride three - free", "Shared ride three - pay", - "Walk", - "Bike", - "Walk to local bus", "Walk to light rail or ferry", "Walk to express bus", "Walk to heavy rail", "Walk to commuter rail", - "Drive to local bus","Drive to light rail or ferry","Drive to express bus","Drive to heavy rail","Drive to commuter rail", - "Taxi", "TNC", "TNC shared"), - simple_mode=c("SOV", "SOV", - "HOV", "HOV", - "HOV 3.5", "HOV 3.5", - "Walk", - "Bike", - "Walk to transit", "Walk to transit", "Walk to transit", "Walk to transit", "Walk to transit", - "Drive to transit","Drive to transit","Drive to transit","Drive to transit","Drive to transit", - "Taxi/TNC", "Taxi/TNC", "Taxi/TNC"), - stringsAsFactors = FALSE) - -# add simplified mode and a couple other simple variables -tripdist_df <- left_join(tripdist_df, simplified_mode) - -# add a couple other variables -tripdist_df <- mutate(tripdist_df, - total_distance = estimated_trips*mean_distance) - -# summarise to mode -summary_mode_df <- summarise(group_by(tripdist_df, year, category, directory, simple_mode), - estimated_trips = sum(estimated_trips)) -summary_all_df <- summarise(group_by(tripdist_df, year, category, directory), - all_mode_trips = sum(estimated_trips)) - -# join -summary_mode_df <- left_join(summary_mode_df, summary_all_df, - by=c("year","category","directory")) %>% - mutate(mode_share = estimated_trips/all_mode_trips) %>% - select(-estimated_trips, -all_mode_trips) # we only need the mode share - - -# columns are: year, category, directory, simple_mode, variable, value -summary_melted_df <- melt(summary_mode_df, id.vars=c("year","category","directory","simple_mode")) - -# add index column for vlookup -summary_melted_df <- mutate(summary_melted_df, - index = paste0(year,"-",category,"-",simple_mode,"-",variable)) -summary_melted_df <- summary_melted_df[order(summary_melted_df$index), - c("index","year","category","directory","simple_mode","variable","value")] -# print(summary_melted_df) - -# prepend note -prepend_note <- paste0("Output by ",SCRIPT," on ",format(Sys.time(), "%a %b %d %H:%M:%S %Y")) -write(prepend_note, file=OUTPUT_FILE, append=FALSE) - -# output -write.table(summary_melted_df, OUTPUT_FILE, sep=",", row.names=FALSE, append=TRUE) \ No newline at end of file diff --git a/utilities/RTP/Emissions/Off Model Calculators/Off Model Calculators.Rproj b/utilities/RTP/Emissions/Off Model Calculators/Off Model Calculators.Rproj deleted file mode 100644 index 8e3c2ebc9..000000000 --- a/utilities/RTP/Emissions/Off Model Calculators/Off Model Calculators.Rproj +++ /dev/null @@ -1,13 +0,0 @@ -Version: 1.0 - -RestoreWorkspace: Default -SaveWorkspace: Default -AlwaysSaveHistory: Default - -EnableCodeIndexing: Yes -UseSpacesForTab: Yes -NumSpacesForTab: 2 -Encoding: UTF-8 - -RnwWeave: Sweave -LaTeX: pdfLaTeX diff --git a/utilities/RTP/Emissions/Off Model Calculators/TargetedTransportationAlternatives.R b/utilities/RTP/Emissions/Off Model Calculators/TargetedTransportationAlternatives.R deleted file mode 100644 index 49abf3aee..000000000 --- a/utilities/RTP/Emissions/Off Model Calculators/TargetedTransportationAlternatives.R +++ /dev/null @@ -1,120 +0,0 @@ -# -# This R script distills the model outputs into the versions used by ICF calculator: "Targeted Transportation Alternatives v4.xlsx" -# -# Pass two arguments to the script: -# 1) ModelRuns.xlsx with runs to process and -# 2) Output directory -# -library(dplyr) -library(reshape2) -library(readxl) -options(width = 180) - -args = commandArgs(trailingOnly=TRUE) -print(args) -if (length(args) != 2) { - stop("Two arguments are required: ModelRuns.xlsx and output_dir") -} -MODEL_DATA_BASE_DIRS<- c(RTP2021_IP ="M:/Application/Model One/RTP2021/IncrementalProgress", - RTP2021 ="M:/Application/Model One/RTP2021/Blueprint", - RTP2025_IP ="M:/Application/Model One/RTP2025/IncrementalProgress", - RTP2025 ="M:/Application/Model One/RTP2025/Blueprint", - TRR ="L:/Application/Model_One/TransitRecovery") -MODEL_RUNS_FILE <- args[1] -OUTPUT_DIR <- args[2] -OUTPUT_FILE <-file.path(OUTPUT_DIR, "Model Data - Targeted Transportation Alternatives.csv") - -# this is the currently running script -SCRIPT <- "X:/travel-model-one-master/utilities/RTP/Emissions/Off Model Calculators/TargetedTransportationAlternatives.R" -model_runs <- read_excel(MODEL_RUNS_FILE) - -# filter to the runs that need off-model calculation -model_runs <- model_runs[ which((model_runs$run_offmodel == "yes") | (model_runs$status == "DEIR")), ] - -print(paste("MODEL_DATA_BASE_DIRS = ",MODEL_DATA_BASE_DIRS)) -print(paste("OUTPUT_DIR = ",OUTPUT_DIR)) -print(model_runs) - - -# Read tazdata -TAZDATA_FIELDS <- c("ZONE", "SD", "COUNTY", "TOTEMP", "TOTHH", "CIACRE", "AREATYPE") # only care about these fields -tazdata_df <- data.frame() -for (i in 1:nrow(model_runs)) { - if (model_runs[[i,"directory"]]=="2015_UrbanSim_FBP") { next } - MODEL_DATA_BASE_DIR <- MODEL_DATA_BASE_DIRS[model_runs[[i,"run_set"]]] - tazdata_file <- file.path(MODEL_DATA_BASE_DIR, model_runs[[i,"directory"]],"INPUT","landuse","tazData.csv") - tazdata_file_df <- read.table(tazdata_file, header=TRUE, sep=",") - tazdata_file_df <- tazdata_file_df[, TAZDATA_FIELDS] %>% - mutate(year = model_runs[[i,"year"]], - directory = model_runs[[i,"directory"]], - category = model_runs[[i,"category"]]) - tazdata_df <- rbind(tazdata_df, tazdata_file_df) -} -remove(tazdata_file, tazdata_file_df) - - -# TAZ data rollups -tazdata_summary_df <- summarise(group_by(tazdata_df, year, category, directory), - total_households = sum(TOTHH), - total_jobs = sum(TOTEMP)) - - -# Read trip-distance-by-mode-superdistrict.csv -tripdist_df <- data.frame() -for (i in 1:nrow(model_runs)) { - if (model_runs[[i,"directory"]]=="2015_UrbanSim_FBP") { next } - MODEL_DATA_BASE_DIR <- MODEL_DATA_BASE_DIRS[model_runs[[i,"run_set"]]] - tripdist_file <- file.path(MODEL_DATA_BASE_DIR, model_runs[[i,"directory"]],"OUTPUT","bespoke","trip-distance-by-mode-superdistrict.csv") - if (!file.exists(tripdist_file)) { - stop(paste0("File [",tripdist_file,"] does not exist")) - } - tripdist_file_df <- read.table(tripdist_file, header=TRUE, sep=",") %>% - mutate(year = model_runs[[i,"year"]], - directory = model_runs[[i,"directory"]], - category = model_runs[[i,"category"]]) - tripdist_df <- rbind(tripdist_df, tripdist_file_df) -} -remove(i, tripdist_file, tripdist_file_df) - -# trip-distance-by-mode-superdistrict rollups -# tour_purpose and trip_mode coding: http://analytics.mtc.ca.gov/foswiki/Main/IndividualTrip -# QUESTION: why is commute (for Trip Caps v5) = work_ and school_grade? why not school_high and university? -tripdist_df <- mutate(tripdist_df, - total_distance = mean_distance*estimated_trips, - work_trip = substr(tour_purpose,1,5)=="work_", - drive_alone = substr(mode_name,1,11)=="Drive alone") %>% - mutate(total_distance_work_da = total_distance*work_trip*drive_alone, - estimated_trips_work_da = estimated_trips*work_trip*drive_alone) - -tripdist_summary_df <- summarize(group_by(tripdist_df, year, category, directory), - total_distance = sum(total_distance), - estimated_trips = sum(estimated_trips), - total_distance_work_da = sum(total_distance_work_da), - estimated_trips_work_da = sum(estimated_trips_work_da)) %>% - mutate(avg_trip_length = total_distance/estimated_trips, - avg_trip_length_work_da = total_distance_work_da/estimated_trips_work_da) - -# Put them together -summary_df <- left_join(tazdata_summary_df, tripdist_summary_df) - -# keep only the columns we want -summary_df <- summary_df[,c("year","category","directory", - "total_households","total_jobs","avg_trip_length","avg_trip_length_work_da")] - -# columns are: year, category, directory, variable, value -summary_melted_df <- melt(summary_df, id.vars=c("year","category","directory")) - -# add index column for vlookup -summary_melted_df <- mutate(summary_melted_df, - index = paste0(year,"-",category,"-",variable)) -summary_melted_df <- summary_melted_df[order(summary_melted_df$index), - c("index","year","category","directory","variable","value")] -# print(summary_melted_df) - -# prepend note -prepend_note = paste0("Output by ",SCRIPT," on ",format(Sys.time(), "%a %b %d %H:%M:%S %Y")) -write(prepend_note, file=OUTPUT_FILE, append=FALSE) - -# output -write.table(summary_melted_df, OUTPUT_FILE, sep=",", row.names=FALSE, append=TRUE) - diff --git a/utilities/RTP/Emissions/Off Model Calculators/data/input/IPA_TM2/ModelData/Model Data - Bikeshare.csv b/utilities/RTP/Emissions/Off Model Calculators/data/input/IPA_TM2/ModelData/Model Data - Bikeshare.csv new file mode 100644 index 000000000..a16aa9875 --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/data/input/IPA_TM2/ModelData/Model Data - Bikeshare.csv @@ -0,0 +1,38 @@ +Output by X:/travel-model-one-master/utilities/RTP/Emissions/Off Model Calculators/BikeShare.R on Thu Feb 01 10:43:17 2024 +"index","year","category","directory","variable","value" +"2005-SB375 Base year-total_employment",2005,"SB375 Base year","2005_TM160_IPA_02","total_employment",3575933 +"2005-SB375 Base year-total_population",2005,"SB375 Base year","2005_TM160_IPA_02","total_population",7096469 +"2015-Previous base year-total_employment",2015,"Previous base year","2015_TM160_IPA_03","total_employment",3861318 +"2015-Previous base year-total_population",2015,"Previous base year","2015_TM160_IPA_03","total_population",7581396 +"2023-Base year-total_employment",2023,"Base year","2023_TM160_IPA_42","total_employment",4064307 +"2023-Base year-total_population",2023,"Base year","2023_TM160_IPA_42","total_population",7847766 +"2025-Previous Plan-total_employment",2025,"Previous Plan","2025_TM152_FBP_Plus_22","total_employment",4147691 +"2025-Previous Plan-total_population",2025,"Previous Plan","2025_TM152_FBP_Plus_22","total_population",8231265 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_09","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_09_minusModePref","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_09_PBA50aoc","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_09_PBA50ixex","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_09_PBA50landuse","total_employment",4834513 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_10","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_10_plusEN7","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_11","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_11_network2023","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_12","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_14","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_15","total_employment",4854742 +"2035-IPA-total_employment",2035,"IPA","2035_TM160_IPA_16","total_employment",4854742 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_09","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_09_minusModePref","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_09_PBA50aoc","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_09_PBA50ixex","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_09_PBA50landuse","total_population",9002950 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_10","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_10_plusEN7","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_11","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_11_network2023","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_12","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_14","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_15","total_population",8476332 +"2035-IPA-total_population",2035,"IPA","2035_TM160_IPA_16","total_population",8476332 +"2035-Previous Plan-total_employment",2035,"Previous Plan","2035_TM152_FBP_Plus_24","total_employment",4834513 +"2035-Previous Plan-total_population",2035,"Previous Plan","2035_TM152_FBP_Plus_24","total_population",9002950 diff --git a/utilities/RTP/Emissions/Off Model Calculators/data/input/IPA_TM2/PBA50+ Off-Model Calculators/PBA50+_OffModel_Bikeshare.xlsx b/utilities/RTP/Emissions/Off Model Calculators/data/input/IPA_TM2/PBA50+ Off-Model Calculators/PBA50+_OffModel_Bikeshare.xlsx new file mode 100644 index 000000000..65f5dd71c Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/data/input/IPA_TM2/PBA50+ Off-Model Calculators/PBA50+_OffModel_Bikeshare.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/__init__.py b/utilities/RTP/Emissions/Off Model Calculators/helper/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/bshare.py b/utilities/RTP/Emissions/Off Model Calculators/helper/bshare.py new file mode 100644 index 000000000..0a08d726b --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/bshare.py @@ -0,0 +1,86 @@ +import openpyxl +import pandas as pd + +from helper.calcs import OffModelCalculator + +class Bikeshare(OffModelCalculator): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.masterWbName="PBA50+_OffModel_Bikeshare" + self.dataFileName="Model Data - Bikeshare" + self.strategy="bike share" + self.metaRow=1 + self.dataRow=1 + + def write_runid_to_mainsheet(self): + # get variables location in calculator + OffModelCalculator.get_variable_locations(self) + + # add run_id to 'Main sheet' + newWorkbook = openpyxl.load_workbook(self.new_workbook_file) + mainsheet = newWorkbook['Main sheet'] + + # Select Main sheet variables + vMS=self.v['Main sheet'] + + # Write run name and year + mainsheet[vMS['Run_directory_2035']] = OffModelCalculator.get_ipa(self, 0)[0] + mainsheet[vMS['Run_directory_2050']] = OffModelCalculator.get_ipa(self, 1)[0] + mainsheet[vMS['year_a']] = OffModelCalculator.get_ipa(self, 0)[1] + mainsheet[vMS['year_b']] = OffModelCalculator.get_ipa(self, 1)[1] + + # save file + newWorkbook.save(self.new_workbook_file) + newWorkbook.close() + + if self.verbose: + print(f"Main sheet updated with {self.runs} in location\n{self.new_workbook_file}") + + def get_calculator_names(self): + log=pd.read_excel(self.master_workbook_file + , sheet_name='Output' + , header=[1] + , skiprows=0 + ) + + return log.columns.tolist()[3:] + + def update_calculator(self): + + # Step 1: Create run and copy files + OffModelCalculator.copy_workbook(self) + + # Step 2: load and filter model data of selected runs + modelData, metaData=OffModelCalculator.get_model_data(self) + + # Step 3: add model data of selected runs to 'Model Data' sheet + OffModelCalculator.write_model_data_to_excel(self,modelData,metaData) + + # Step 4: + self.write_runid_to_mainsheet() + + # Step 5: open close new wb + OffModelCalculator.open_excel_app(self) + + # Step 6: update log + logVariables=self.get_calculator_names() + OffModelCalculator.log_run(self,logVariables) + + def update_summary_file(self, summaryPath, folderName): + df=pd.read_csv(summaryPath) + row={ + 'year': [2035, 2050], + 'daily_vehTrip_reduction': [self.rowDict['Out_bikeshare_trips_2035'][0], + self.rowDict['Out_bikeshare_trips_2050'][0]], + 'daily_vmt_reduction':[self.rowDict['Out_daily_VMT_reduced_2035'][0], + self.rowDict['Out_daily_VMT_reduced_2050'][0]], + 'daily_ghg_reduction':[self.rowDict['Out_daily_GHG_reduced_2035'][0], + self.rowDict['Out_daily_GHG_reduced_2050'][0]], + 'strategy':[self.strategy,self.strategy], + 'directory':[folderName,folderName], + } + + df_new=pd.DataFrame(row, index=None) + df=pd.concat([df,df_new], ignore_index=True) + df.to_csv(summaryPath, index=False) diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/calcs.py b/utilities/RTP/Emissions/Off Model Calculators/helper/calcs.py new file mode 100644 index 000000000..31aa7758d --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/calcs.py @@ -0,0 +1,270 @@ +import shutil +import pandas as pd +import re +import win32com.client +import os +import openpyxl +import time + + +from helper import common + + +class OffModelCalculator: + """ + Off-model calculator general methods to copy, update, and output results + given two specific model_run_id_year (input). + + Attributes: + runs: input model_run_id_year in model data input file. + pathType: where to look for directories. Mtc points to box absolute paths. External to repo relative paths. + modelDataPath: string, absolute path to model data directory. + masterFilePath: string, absolute path to offModelCalculators directory. + masterWbName: string, name of offModelCalculator of interest (e.g. bikeshare) + dataFileName: string, name of model data file (input). + verbose: print each method calculations. + varsDir: master file with all variable locations in all OffModelCalculators. + v: dictionary, all variable names and values for the OffModelCalculator chosen. + """ + + def __init__(self, model_run_id, directory, uid, verbose=False): + self.uid=uid + self.runs = [model_run_id[0], model_run_id[1]] + self.pathType=directory + self.modelDataPath, self.masterFilePath = common.get_directory_constants(self.pathType) + self.masterWbName="" + self.dataFileName="" + self.baselineDir=None + self.verbose=verbose + self.varsDir=common.get_vars_directory(self.pathType) + + def copy_workbook(self): + # Start run + self.newWbFilePath=common.createNewRun(self) + + # make a copy of the workbook + self.master_workbook_file = os.path.join(self.masterFilePath,f"{self.masterWbName}.xlsx") + self.new_workbook_file = os.path.join(self.newWbFilePath,f"{self.masterWbName}__{self.runs[0]}__{self.runs[1]}.xlsx") + + + shutil.copy2(self.master_workbook_file, self.new_workbook_file) + + if self.verbose: + print(self.master_workbook_file) + print(self.new_workbook_file) + + # return self.new_workbook_file + + def get_model_metadata(self): + + metaData=pd.read_csv( + os.path.join(self.modelDataPath,f"{self.dataFileName}.csv"), + nrows=self.metaRow, + header=None) + + if self.verbose: + print(f"Model Data (R Script) metadata:\n{metaData.columns[0]}") + + return metaData + + def get_model_data(self): + # Get Model Data as df + rawData=pd.read_csv( + os.path.join(self.modelDataPath,f"{self.dataFileName}.csv"), + skiprows=self.dataRow) + + filteredData=rawData.loc[rawData.directory.isin(self.runs+[self.baselineDir])] + # Get metadata from model data + metaData=OffModelCalculator.get_model_metadata(self) + + if self.verbose: + print("Unique directories:") + print(rawData['directory'].unique()) + + return filteredData, metaData + + def get_sb_data(self): + sbPath=common.get_paths(self.pathType) + return pd.read_csv(sbPath['SB375']) + + def get_ipa(self, arg): + name=self.runs[arg] + pattern = r"(\d{4})_(TM\d{3})_(.*)" + matches = re.search(pattern, name) + + if matches: + ipa = matches.group(3) + year = matches.group(1) + + if self.verbose: + print(ipa) + + return [ipa, int(year)] + + def write_sbdata_to_excel(self): + # add sb375 data + data=OffModelCalculator.get_sb_data(self) + sbData=data.T.loc[['Year','Population', 'DailyCO2','RunID']] + with pd.ExcelWriter(self.new_workbook_file, engine='openpyxl', mode = 'a' + , if_sheet_exists = 'overlay' + ) as writer: + sbData.to_excel(writer, + sheet_name='SB 375 Calcs', + index=False, + startcol=1, + header=False) + + if self.verbose: + print("Copied SB375 data to excel.") + + def write_model_data_to_excel(self, data, meta): + + with pd.ExcelWriter(self.new_workbook_file, engine='openpyxl', mode = 'a', if_sheet_exists = 'replace') as writer: + # add metadata + meta=pd.DataFrame(meta) + meta.to_excel(writer, + sheet_name='Model Data', + index=False, + header=False, + startrow=0, startcol=0) + + with pd.ExcelWriter(self.new_workbook_file, engine='openpyxl', mode = 'a', if_sheet_exists = 'overlay') as writer: + # add model data + # this only works with pandas=1.4.3 or later; in earlier version, it will not overwrite sheet, but add new one with sheet name 'Model Data1' + data.to_excel(writer, + sheet_name='Model Data', + index=False, + startrow=self.metaRow, startcol=0) + + OffModelCalculator.write_sbdata_to_excel(self) + + if self.verbose: + print(f"Metadata: {meta}") + + def get_variable_locations(self): + + allVars=pd.read_excel(self.varsDir) + calcVars=allVars.loc[allVars.Workbook.isin([self.masterWbName])].drop(columns=['Workbook','Description']) + groups=set(calcVars.Sheet) + self.v={} + for group in groups: + self.v.setdefault(group,dict()) + self.v[group]=dict(zip(calcVars['Variable Name'],calcVars['Location'])) + + if self.verbose: + print("Calculator variables and locations in Excel:") + print(self.v) + + ## Step 5: open/close Excel, autosave + def open_excel_app(self): + + self.updated_workbook_file=os.path.join(self.newWbFilePath, + f"{self.uid.replace(':','--')}__{self.masterWbName}.xlsx") + + excel = win32com.client.Dispatch("Excel.Application") + wb = excel.Workbooks.Open(self.new_workbook_file) + excel.Visible=True + wb.RefreshAll() + wb.SaveAs(self.updated_workbook_file) + wb.Close() + + # Remove old file + print(f"Trying to remove {self.masterWbName}") + timesTried=0 + while True: + try: + os.remove(self.new_workbook_file) + print("Removed") + break + except: + timesTried+=1 + if timesTried<3: + time.sleep(8) + print("retries: ",timesTried) + else: + print("cannot remove old file.") + break + + ##Step 6: log runs in master + def extract_data_from_mainsheet(self, vNames): + # Select Main sheet variables + vMS=self.v['Main sheet'] + + # open main sheet + newWorkbook = openpyxl.load_workbook(self.updated_workbook_file,data_only=True) + mainsheet = newWorkbook['Main sheet'] + + # collect data of interest + data=[] + data+=[self.uid,self.runs[0],self.runs[1]] + for metric in vNames: + try: + data.append(mainsheet[vMS[metric]].value) + if self.verbose: + print(f"Metric: {metric}\nlocation:{vMS[metric]}\nValue: {mainsheet[vMS[metric]].value}") + + except: + print(f"{metric} Not found.") + pass + + vNames=['Timestamp','Baseline Run ID','Horizon Run ID']+vNames + self.rowDict=dict(map(lambda i,j : (i,[j]) , vNames,data)) + + # open output sheet + log=pd.DataFrame(self.rowDict) + + return log + + def check_last_log_index(self): + last_entry=pd.read_excel(self.master_workbook_file + , sheet_name='Output' + , header=[1] + , usecols=[0] + , skiprows=0 + ) + if self.verbose: + print(f"Length of log: {len(last_entry)}") + + return len(last_entry) + + def get_calculator_names(self): + log=pd.read_excel(self.master_workbook_file + , sheet_name='Output' + , header=[1] + , skiprows=0 + ) + + return log.columns.tolist()[3:] + + def log_run(self, vNames): + + dataTolog=self.extract_data_from_mainsheet(vNames) + logLength=self.check_last_log_index() + + with pd.ExcelWriter(self.master_workbook_file, engine='openpyxl', mode = 'a', if_sheet_exists = 'overlay') as writer: + # add log to main calc + dataTolog.to_excel(writer, + sheet_name='Output', + index=False, + header=False, + startrow=logLength+2) + + def initialize_summary_file(self, outputPath): + + # Create empty summary csv + header=['year','daily_vehTrip_reduction','daily_vmt_reduction', + 'daily_ghg_reduction','strategy','directory'] + df=pd.DataFrame(columns=header) + df.to_csv(outputPath, index=False) + + def create_output_summary_path(self,baseRun): + summaryPath=os.path.join( + self.paths['OFF_MODEL_CALCULATOR_DIR_OUTPUT'] + , self.uid.replace(':','--') + , f"off_model_summary_by_strategy_{baseRun}.csv") + + return summaryPath + + + + diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/common.py b/utilities/RTP/Emissions/Off Model Calculators/helper/common.py new file mode 100644 index 000000000..24a231a87 --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/common.py @@ -0,0 +1,123 @@ +import os + +def get_paths(dirType): + """ + dirType='mtc' + Import the absolute paths used within the MTC team. + E.g. links to box or other local directories. + + dirType='external' + Import relative paths from repo pointing to sample folders. + """ + if dirType=='mtc': + # Input data paths + box_dir = r'C:\Users\{}\Box\Plan Bay Area 2050+\Blueprint\Off-Model\PBA50+ Off-Model'\ + .format(os.environ.get('USERNAME')) + model_data_box_dir = os.path.join(box_dir, 'model_data_all') + + # Models + off_model_calculator_dir = os.path.join( + box_dir, 'DBP_v2', 'PBA50+ Off-Model Calculators') + + # Outputs + off_model_calculator_dir_output = off_model_calculator_dir + + # Variables locations + vars=os.path.join(off_model_calculator_dir, "Variable_locations.xlsx") + + sb_dir=os.path.join(off_model_calculator_dir, + "SB375_data.csv") + + + elif dirType=='external': + + abs_dirname=os.path.join(os.path.dirname(__file__),"..") + # Input data paths + box_dir = os.path.join(abs_dirname, + r"data\input\IPA_TM2") + + model_data_box_dir = os.path.join(box_dir,"ModelData") + + # Models + off_model_calculator_dir = os.path.join(abs_dirname, + "models") + + # Output + off_model_calculator_dir_output = os.path.join(abs_dirname, + r"data\output") + + # Variables locations + vars=os.path.join(abs_dirname, + r"models\Variable_locations.xlsx") + + sb_dir=os.path.join(abs_dirname, + r"models\SB375_data.csv") + + else: + raise ValueError("-d can be either mtc or external") + + return {'BOX_DIR': box_dir, + 'MODEL_DATA_BOX_DIR':model_data_box_dir, + 'OFF_MODEL_CALCULATOR_DIR':off_model_calculator_dir, + 'OFF_MODEL_CALCULATOR_DIR_OUTPUT':off_model_calculator_dir_output, + 'VARS':vars, + 'SB375':sb_dir, + } + + +def get_directory_constants(dirType): + ''' + This function extracts the corresponding relative or absolute paths + used in the external or mtc options. + ''' + # directory file paths (input, models) + paths=get_paths(dirType) + + return paths['MODEL_DATA_BOX_DIR'], paths['OFF_MODEL_CALCULATOR_DIR'] + +def get_vars_directory(dirType): + # directory file paths (variable locations) + paths=get_paths(dirType) + + return paths['VARS'] + +def getNextFilePath(output_folder, run): + """ + This method checks for folders with the same name. + If the folder exists, provides the next number in the sequence. + """ + + lastRunId=0 + for f in os.listdir(output_folder): + fileNameList=f.split("__") + if f"{fileNameList[0]}__{fileNameList[1]}"== run: + if int(fileNameList[2])>lastRunId: + lastRunId=int(fileNameList[2]) + + else: + continue + + return lastRunId + 1 + +def createNewRun(c, verbose=False): + """ + Given the two model_run_id_year selected, an output folder is created. + In this folder, outputs will be saved. + If the output folder already exists, a sequence is created + to differentiate outputs. + """ + + path=get_paths(c.pathType) + + runName=c.uid.replace(':','--') + pathToRun=os.path.join(path['OFF_MODEL_CALCULATOR_DIR_OUTPUT'], + f"{runName}") + + if not os.path.exists(pathToRun): + os.makedirs(pathToRun) + + if verbose: + print(f"New run created: {runName}") + print(f"Location: {pathToRun}") + + return pathToRun diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/cshare.py b/utilities/RTP/Emissions/Off Model Calculators/helper/cshare.py new file mode 100644 index 000000000..9a5563a91 --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/cshare.py @@ -0,0 +1,85 @@ +import openpyxl +import pandas as pd + +from helper.calcs import OffModelCalculator +class Carshare(OffModelCalculator): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.masterWbName="PBA50+_OffModel_Carshare" + self.dataFileName="Model Data - Carshare" + self.strategy="car share" + self.metaRow=2 + self.dataRow=2 + + def write_runid_to_mainsheet(self): + # get variables location in calculator + OffModelCalculator.get_variable_locations(self) + + # add run_id to 'Main sheet' + newWorkbook = openpyxl.load_workbook(self.new_workbook_file) + mainsheet = newWorkbook['Main sheet'] + modeldatasheet=newWorkbook['Model Data'] + + # Select Main sheet variables + vMS=self.v['Main sheet'] + + # Write model data + mainsheet[vMS['Min_carshare_population_density']]=modeldatasheet[vMS['k_min_pop_density']].value + # Write run name and year + mainsheet[vMS['Run_directory_2035']] = OffModelCalculator.get_ipa(self, 0)[0] + mainsheet[vMS['Run_directory_2050']] = OffModelCalculator.get_ipa(self, 1)[0] + mainsheet[vMS['year_a']] = OffModelCalculator.get_ipa(self, 0)[1] + mainsheet[vMS['year_b']] = OffModelCalculator.get_ipa(self, 1)[1] + + # save file + newWorkbook.save(self.new_workbook_file) + newWorkbook.close() + + def get_calculator_names(self): + log=pd.read_excel(self.master_workbook_file + , sheet_name='Output' + , header=[1] + , skiprows=0 + ) + + return log.columns.tolist()[3:] + + def update_calculator(self): + + # Step 1: Create run and copy files + OffModelCalculator.copy_workbook(self) + + # Step 2: load and filter model data of selected runs + modelData, metaData=OffModelCalculator.get_model_data(self) + + # Step 3: add model data of selected runs to 'Model Data' sheet + OffModelCalculator.write_model_data_to_excel(self,modelData,metaData) + + # Step 4: + self.write_runid_to_mainsheet() + + # Step 5: open close new wb + OffModelCalculator.open_excel_app(self) + + # Step 6: update log + logVariables=self.get_calculator_names() + OffModelCalculator.log_run(self,logVariables) + + def update_summary_file(self, summaryPath, folderName): + df=pd.read_csv(summaryPath) + row={ + 'year': [2035, 2050], + 'daily_vehTrip_reduction': [None, + None], + 'daily_vmt_reduction':[self.rowDict['Out_daily_VMT_reduced_2035'][0], + self.rowDict['Out_daily_VMT_reduced_2050'][0]], + 'daily_ghg_reduction':[self.rowDict['Out_daily_GHG_reduced_2035'][0], + self.rowDict['Out_daily_GHG_reduced_2050'][0]], + 'strategy':[self.strategy,self.strategy], + 'directory':[folderName,folderName], + } + + df_new=pd.DataFrame(row, index=None) + df=pd.concat([df,df_new], ignore_index=True) + df.to_csv(summaryPath, index=False) \ No newline at end of file diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/ebk.py b/utilities/RTP/Emissions/Off Model Calculators/helper/ebk.py new file mode 100644 index 000000000..0c211c581 --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/ebk.py @@ -0,0 +1,48 @@ +from helper.calcs import OffModelCalculator +import pandas as pd +class EBike(OffModelCalculator): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.masterWbName="PBA50+_OffModel_EBIKE" + self.strategy="electric bike rebates" + self.dataFileName=None + + def get_calculator_names(self): + log=pd.read_excel(self.master_workbook_file + , sheet_name='Output' + , header=[1] + , skiprows=0 + ) + + return log.columns.tolist()[3:] + + def update_calculator(self): + # Step 1: Create run and copy files + OffModelCalculator.copy_workbook(self) + # Step 2: copy sb375 data + OffModelCalculator.write_sbdata_to_excel(self) + # Step 3: open close new wb + OffModelCalculator.open_excel_app(self) + # Step 4: update log + OffModelCalculator.get_variable_locations(self) + logVariables=self.get_calculator_names() + OffModelCalculator.log_run(self,logVariables) + + def update_summary_file(self, summaryPath, folderName): + df=pd.read_csv(summaryPath) + row={ + 'year': [2035, 2050], + 'daily_vehTrip_reduction': [None, + None], + 'daily_vmt_reduction': [self.rowDict['Out_daily_VMT_reduced_2035'][0], + self.rowDict['Out_daily_VMT_reduced_2050'][0]], + 'daily_ghg_reduction':[self.rowDict['Out_daily_GHG_reduced_2035'][0], + self.rowDict['Out_daily_GHG_reduced_2050'][0]], + 'strategy':[self.strategy,self.strategy], + 'directory':[folderName,folderName], + } + + df_new=pd.DataFrame(row, index=None) + df=pd.concat([df,df_new], ignore_index=True) + df.to_csv(summaryPath, index=False) \ No newline at end of file diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/regchar.py b/utilities/RTP/Emissions/Off Model Calculators/helper/regchar.py new file mode 100644 index 000000000..f6cb34764 --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/regchar.py @@ -0,0 +1,40 @@ +import pandas as pd + +from helper.calcs import OffModelCalculator +class RegionalCharger(OffModelCalculator): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.masterWbName="PBA50+_OffModel_RegionalCharger" + self.strategy="ev charger" + self.dataFileName=None + + def update_calculator(self): + # Step 1: Create run and copy files + OffModelCalculator.copy_workbook(self) + # Step 2: copy sb375 data + OffModelCalculator.write_sbdata_to_excel(self) + # Step 3: open close new wb + OffModelCalculator.open_excel_app(self) + # Step 4: update log + OffModelCalculator.get_variable_locations(self) + logVariables=OffModelCalculator.get_calculator_names(self) + OffModelCalculator.log_run(self,logVariables) + + def update_summary_file(self, summaryPath, folderName): + df=pd.read_csv(summaryPath) + row={ + 'year': [2035, 2050], + 'daily_vehTrip_reduction': [None, + None], + 'daily_vmt_reduction': [None, + None], + 'daily_ghg_reduction':[self.rowDict['Out_daily_GHG_reduced_2035'][0], + self.rowDict['Out_daily_GHG_reduced_2050'][0]], + 'strategy':[self.strategy,self.strategy], + 'directory':[folderName,folderName], + } + + df_new=pd.DataFrame(row, index=None) + df=pd.concat([df,df_new], ignore_index=True) + df.to_csv(summaryPath, index=False) \ No newline at end of file diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/targtransalt.py b/utilities/RTP/Emissions/Off Model Calculators/helper/targtransalt.py new file mode 100644 index 000000000..91bf0aba2 --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/targtransalt.py @@ -0,0 +1,91 @@ +import openpyxl +import pandas as pd + +from helper.calcs import OffModelCalculator +class TargetedTransAlt(OffModelCalculator): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.masterWbName="PBA50+_OffModel_TargetedTransAlt" + self.dataFileName="Model Data - Targeted Transportation Alternatives" + self.strategy="targeted transportation alternative" + self.metaRow=1 + self.dataRow=1 + self.baselineDir='2015_TM152_IPA_16' + + def write_runid_to_mainsheet(self): + # get variables location in calculator + OffModelCalculator.get_variable_locations(self) + + # add run_id to 'Main sheet' + newWorkbook = openpyxl.load_workbook(self.new_workbook_file) + mainsheet = newWorkbook['Main sheet'] + modeldatasheet=pd.DataFrame(newWorkbook['Model Data'].values)[1:] + modeldatasheet.columns=modeldatasheet.iloc[0] + modeldatasheet=modeldatasheet[1:] + + # Select Main sheet variables + vMS=self.v['Main sheet'] + + # Write other variables + mainsheet[vMS['Total_households_baseline']]=modeldatasheet.loc[(modeldatasheet.directory==self.baselineDir) \ + & (modeldatasheet.variable=='total_households')]['value'].values[0] + mainsheet[vMS['Total_jobs_baseline']]=modeldatasheet.loc[(modeldatasheet.directory==self.baselineDir) \ + & (modeldatasheet.variable=='total_jobs')]['value'].values[0] + # Write run name and year + mainsheet[vMS['Run_directory_2035']] = OffModelCalculator.get_ipa(self, 0)[0] + mainsheet[vMS['Run_directory_2050']] = OffModelCalculator.get_ipa(self, 1)[0] + mainsheet[vMS['year_a']] = OffModelCalculator.get_ipa(self, 0)[1] + mainsheet[vMS['year_b']] = OffModelCalculator.get_ipa(self, 1)[1] + + # save file + newWorkbook.save(self.new_workbook_file) + newWorkbook.close() + + def get_calculator_names(self): + log=pd.read_excel(self.master_workbook_file + , sheet_name='Output' + , header=[1] + , skiprows=0 + ) + + return log.columns.tolist()[3:] + + def update_calculator(self): + + # Step 1: Create run and copy files + OffModelCalculator.copy_workbook(self) + + # Step 2: load and filter model data of selected runs + modelData, metaData=OffModelCalculator.get_model_data(self) + + # Step 3: add model data of selected runs to 'Model Data' sheet + OffModelCalculator.write_model_data_to_excel(self,modelData,metaData) + + # Step 4: + self.write_runid_to_mainsheet() + + # Step 5: open close new wb + OffModelCalculator.open_excel_app(self) + + # Step 6: update log + logVariables=self.get_calculator_names() + OffModelCalculator.log_run(self,logVariables) + + def update_summary_file(self, summaryPath, folderName): + df=pd.read_csv(summaryPath) + row={ + 'year': [2035, 2050], + 'daily_vehTrip_reduction': [self.rowDict['Total_daily_trip_reductions_2035'][0], + self.rowDict['Total_daily_trip_reductions_2050'][0]], + 'daily_vmt_reduction':[self.rowDict['Out_daily_VMT_reduced_2035'][0], + self.rowDict['Out_daily_VMT_reduced_2050'][0]], + 'daily_ghg_reduction':[self.rowDict['Out_daily_GHG_reduced_2035'][0], + self.rowDict['Out_daily_GHG_reduced_2050'][0]], + 'strategy':[self.strategy,self.strategy], + 'directory':[folderName,folderName], + } + + df_new=pd.DataFrame(row, index=None) + df=pd.concat([df,df_new], ignore_index=True) + df.to_csv(summaryPath, index=False) \ No newline at end of file diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/vbuyback.py b/utilities/RTP/Emissions/Off Model Calculators/helper/vbuyback.py new file mode 100644 index 000000000..5be6e5c6a --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/vbuyback.py @@ -0,0 +1,39 @@ +import pandas as pd +from helper.calcs import OffModelCalculator +class BuyBack(OffModelCalculator): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.masterWbName="PBA50+_OffModel_VehicleBuyback" + self.strategy="vehicle buy back" + self.dataFileName=None + + def update_calculator(self): + # Step 1: Create run and copy files + OffModelCalculator.copy_workbook(self) + # Step 2: copy sb375 data + OffModelCalculator.write_sbdata_to_excel(self) + # Step 3: open close new wb + OffModelCalculator.open_excel_app(self) + # Step 4: update log + OffModelCalculator.get_variable_locations(self) + logVariables=OffModelCalculator.get_calculator_names(self) + OffModelCalculator.log_run(self,logVariables) + + def update_summary_file(self, summaryPath, folderName): + df=pd.read_csv(summaryPath) + row={ + 'year': [2035, 2050], + 'daily_vehTrip_reduction': [None, + None], + 'daily_vmt_reduction': [None, + None], + 'daily_ghg_reduction':[self.rowDict['Out_daily_GHG_reduced_2035'][0], + self.rowDict['Out_daily_GHG_reduced_2050'][0]], + 'strategy':[self.strategy,self.strategy], + 'directory':[folderName,folderName], + } + + df_new=pd.DataFrame(row, index=None) + df=pd.concat([df,df_new], ignore_index=True) + df.to_csv(summaryPath, index=False) \ No newline at end of file diff --git a/utilities/RTP/Emissions/Off Model Calculators/helper/vpool.py b/utilities/RTP/Emissions/Off Model Calculators/helper/vpool.py new file mode 100644 index 000000000..d2f0605c0 --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/helper/vpool.py @@ -0,0 +1,73 @@ +import openpyxl +import pandas as pd + +from helper.calcs import OffModelCalculator +class VanPools(OffModelCalculator): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.masterWbName="PBA50+_OffModel_Vanpools" + self.dataFileName="Model Data - Employer Shuttles" + self.strategy="vanpool" + self.metaRow=1 + self.dataRow=1 + + def write_runid_to_mainsheet(self): + # get variables location in calculator + OffModelCalculator.get_variable_locations(self) + + # add run_id to 'Main sheet' + newWorkbook = openpyxl.load_workbook(self.new_workbook_file) + mainsheet = newWorkbook['Main sheet'] + + # Select Main sheet variables + vMS=self.v['Main sheet'] + + # Write run name and year + mainsheet[vMS['Run_directory_2035']] = OffModelCalculator.get_ipa(self, 0)[0] + mainsheet[vMS['Run_directory_2050']] = OffModelCalculator.get_ipa(self, 1)[0] + mainsheet[vMS['year_a']] = OffModelCalculator.get_ipa(self, 0)[1] + mainsheet[vMS['year_b']] = OffModelCalculator.get_ipa(self, 1)[1] + + # save file + newWorkbook.save(self.new_workbook_file) + newWorkbook.close() + + def update_calculator(self): + + # Step 1: Create run and copy files + OffModelCalculator.copy_workbook(self) + + # Step 2: load and filter model data of selected runs + modelData, metaData=OffModelCalculator.get_model_data(self) + + # Step 3: add model data of selected runs to 'Model Data' sheet + OffModelCalculator.write_model_data_to_excel(self,modelData,metaData) + + # Step 4: + self.write_runid_to_mainsheet() + + # Step 5: open close new wb + OffModelCalculator.open_excel_app(self) + + # Step 6: update log + logVariables=self.get_calculator_names() + OffModelCalculator.log_run(self,logVariables) + + def update_summary_file(self, summaryPath, folderName): + df=pd.read_csv(summaryPath) + row={ + 'year': [2035, 2050], + 'daily_vehTrip_reduction': [self.rowDict['Vanpool_one_way_vehicle_trip_reductions_2035'][0], + self.rowDict['Vanpool_one_way_vehicle_trip_reductions_2050'][0]], + 'daily_vmt_reduction':[self.rowDict['Out_daily_VMT_reduced_2035'][0], + self.rowDict['Out_daily_VMT_reduced_2050'][0]], + 'daily_ghg_reduction':[self.rowDict['Out_daily_GHG_reduced_2035'][0], + self.rowDict['Out_daily_GHG_reduced_2050'][0]], + 'strategy':[self.strategy,self.strategy], + 'directory':[folderName,folderName], + } + + df_new=pd.DataFrame(row, index=None) + df=pd.concat([df,df_new], ignore_index=True) + df.to_csv(summaryPath, index=False) \ No newline at end of file diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Bikeshare.xlsx b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Bikeshare.xlsx new file mode 100644 index 000000000..65f5dd71c Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Bikeshare.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Carshare.xlsx b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Carshare.xlsx new file mode 100644 index 000000000..51fb09448 Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Carshare.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_EBIKE.xlsx b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_EBIKE.xlsx new file mode 100644 index 000000000..fcda8966d Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_EBIKE.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_RegionalCharger.xlsx b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_RegionalCharger.xlsx new file mode 100644 index 000000000..f88b21fcf Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_RegionalCharger.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_TargetedTransAlt.xlsx b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_TargetedTransAlt.xlsx new file mode 100644 index 000000000..d222b0263 Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_TargetedTransAlt.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Vanpools.xlsx b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Vanpools.xlsx new file mode 100644 index 000000000..d3d3f5023 Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_Vanpools.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_VehicleBuyback.xlsx b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_VehicleBuyback.xlsx new file mode 100644 index 000000000..78d184cdc Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/models/PBA50+_OffModel_VehicleBuyback.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/SB375_data.csv b/utilities/RTP/Emissions/Off Model Calculators/models/SB375_data.csv new file mode 100644 index 000000000..edb081f97 --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/models/SB375_data.csv @@ -0,0 +1,5 @@ +RunID,Year,Population,DailyCO2 +IPA_02,2005,6978983,61020 +NP_16,2020,7937894, +IPA_11,2035,8524962, +FBP_21,2050,10367651, diff --git a/utilities/RTP/Emissions/Off Model Calculators/models/Variable_locations.xlsx b/utilities/RTP/Emissions/Off Model Calculators/models/Variable_locations.xlsx new file mode 100644 index 000000000..c472cd83f Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/models/Variable_locations.xlsx differ diff --git a/utilities/RTP/Emissions/Off Model Calculators/offmodel_prep.py b/utilities/RTP/Emissions/Off Model Calculators/offmodel_prep.py new file mode 100644 index 000000000..690de0b4d --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/offmodel_prep.py @@ -0,0 +1,324 @@ +import pandas as pd +import numpy as np +import pyreadr +import os + +USAGE = """ + + python offmodel_prep.py + + Simple script that reads TAZ land use input data and trips output data, outputs + the following .csv files to be used in the off-model calculation (PBA50+ version). + * bikeshare.csv + * carshare.csv + * employerShuttle.csv + * targetedTransportationAlternatives.csv + * bikeInfrastructure.csv + +""" + +######## Bikeshare + +def prep_data_for_bikeshare(taz_input_df): + """ + Input: + * TAZ land use input + + Output the following data: + * total households + * total employment (jobs) + + """ + print('create off-model calculation input data for bike share') + bikeshare_taz_cols = ['TOTPOP', 'TOTEMP'] # only care about these fields + bikeshare_taz = taz_input_df[bikeshare_taz_cols].sum() + bikeshare_taz_df = pd.DataFrame(bikeshare_taz).reset_index() + bikeshare_taz_df.columns = ['variable', 'value'] + + bikeshare_taz_df.to_csv(os.path.join(OUTPUT_DIR, 'bikeshare.csv'), index=False) + +######## Carshare + +def prep_data_for_carshare(taz_input_df): + """ + Input: + * TAZ land use input + + Output the following data: + * total population + * population in "urban" TAZs (density > 10 persons/residential acre) + * population in "non-urban" TAZs (density <= 10 persons/residential acre) + * adult population (age 20-64) in "urban" TAZs (density > 10 persons/residential acre) + * adult population (age 20-64) in "non-urban" TAZs (density <= 10 persons/residential acre) + + """ + print('create off-model calculation input data for car share') + # Calculator constant: criteria for applying trip caps + K_MIN_POP_DENSITY = 10 # Minimum density needed to be considered "urban" and support dense carshare (persons/residential acre) + + carshare_taz_cols = ['ZONE', 'COUNTY', 'SD', 'TOTPOP', 'RESACRE', 'AGE2044', 'AGE4564'] + carshare_taz_df = taz_input_df[carshare_taz_cols] + + carshare_taz_df['totpop_per_resacre'] = np.where(carshare_taz_df['RESACRE'] == 0, 0, carshare_taz_df['TOTPOP'] / carshare_taz_df['RESACRE']) + carshare_taz_df['carshare_dense'] = carshare_taz_df['totpop_per_resacre'] > K_MIN_POP_DENSITY + carshare_taz_df['totpop_dense'] = carshare_taz_df['TOTPOP'] * carshare_taz_df['carshare_dense'] + carshare_taz_df['totpop_sparse'] = carshare_taz_df['TOTPOP'] * (~carshare_taz_df['carshare_dense']) + carshare_taz_df['adultpop_dense'] = (carshare_taz_df['AGE2044'] + carshare_taz_df['AGE4564']) * carshare_taz_df['carshare_dense'] + carshare_taz_df['adultpop_sparse'] = (carshare_taz_df['AGE2044'] + carshare_taz_df['AGE4564']) * (~carshare_taz_df['carshare_dense']) + + carshare_taz = carshare_taz_df[['TOTPOP', 'totpop_dense', 'totpop_sparse', 'adultpop_dense', 'adultpop_sparse']].sum() + carshare_taz_df2 = pd.DataFrame(carshare_taz).reset_index() + carshare_taz_df2.columns = ['variable', 'value'] + carshare_taz_df2.loc[carshare_taz_df2['variable'] == 'TOTPOP', 'variable'] = 'total_population' + + carshare_taz_df2.to_csv(os.path.join(OUTPUT_DIR, 'carshare.csv'), index=False) + +######## Employer Shuttles + +def prep_data_for_employerShuttle(trips_output_df): + """ + Input: + * trips output + + Output the following data: + * trip mode share of work trips with distance > 30.0 + + """ + print('create off-model calculation input data for employer shuttles') + # filter to distance > 30.0 and work trips + trips_sub = trips_output_df.loc[(trips_output_df['distance'] > 30.0) & (trips_output_df['tour_purpose'].str[:5] == "work_")] + + # summarize + trips_summary = trips_sub.groupby(['trip_mode', 'mode_name', 'simple_mode'])[['hh_id']].count().reset_index() + trips_summary.rename({'hh_id': 'simulated_trips'}, axis=1, inplace=True) + trips_summary['estimated_trips'] = trips_summary['simulated_trips'] / SAMPLING_RATE + + # summarize to mode + simple_mode_share = trips_summary.groupby(['simple_mode'])[['estimated_trips']].sum().apply(lambda x: x/x.sum()).reset_index() + simple_mode_share.rename({'estimated_trips': 'value'}, axis=1, inplace=True) + simple_mode_share['variable'] = 'mode_share' + simple_mode_share.to_csv(os.path.join(OUTPUT_DIR, 'employerShuttle.csv'), index=False) + +######## Targeted Transportation Alternatives + +def prep_data_for_TargetedAlt(taz_input_df, tripdist_output_df): + """ + Input: + * TAZ land use input + * trip distance by mode and SD output + + Output the following data: + * total households + * total employment (jobs) + * average trip length of all trips + * average trip length of drive-alone and work trips + + """ + print('create off-model calculation input data for targeted transportation alternatives') + alt_taz_cols = ['ZONE', 'SD', 'COUNTY', 'TOTEMP', 'TOTHH', 'CIACRE', 'AREATYPE'] + alt_taz_df = taz_input_df[alt_taz_cols] + alt_taz_summary = alt_taz_df[['TOTEMP', 'TOTHH']].sum() + alt_taz_summary_df = pd.DataFrame(alt_taz_summary).reset_index() + alt_taz_summary_df.columns = ['variable', 'value'] + + # trip-distance-by-mode-superdistrict rollups + tripdist_TargetedAlt = tripdist_output_df.copy() + tripdist_TargetedAlt['total_distance'] = tripdist_TargetedAlt['mean_distance'] * tripdist_TargetedAlt['estimated_trips'] + tripdist_TargetedAlt['work_trip'] = 0 + tripdist_TargetedAlt['drive_alone'] = 0 + tripdist_TargetedAlt.loc[tripdist_TargetedAlt['tour_purpose'].str[:5] == "work_", 'work_trip'] = 1 + tripdist_TargetedAlt.loc[tripdist_TargetedAlt['simple_mode'] == 'SOV', 'drive_alone'] = 1 + + tripdist_TargetedAlt['total_distance_work_da'] = tripdist_TargetedAlt['total_distance'] * tripdist_TargetedAlt['work_trip'] * tripdist_TargetedAlt['drive_alone'] + tripdist_TargetedAlt['estimated_trips_work_da'] = tripdist_TargetedAlt['estimated_trips'] * tripdist_TargetedAlt['work_trip'] * tripdist_TargetedAlt['drive_alone'] + + tripdist_summary = tripdist_TargetedAlt[['total_distance', 'estimated_trips', 'total_distance_work_da', 'estimated_trips_work_da']].sum() + tripdist_summary_df = pd.DataFrame(tripdist_summary).transpose() + tripdist_summary_df['avg_trip_length'] = tripdist_summary_df['total_distance'] / tripdist_summary_df['estimated_trips'] + tripdist_summary_df['avg_trip_length_work_da'] = tripdist_summary_df['total_distance_work_da'] / tripdist_summary_df['estimated_trips_work_da'] + tripdist_summary_df = tripdist_summary_df[['avg_trip_length', 'avg_trip_length_work_da']].transpose().reset_index() + tripdist_summary_df.columns = ['variable', 'value'] + + alt_all_summary = pd.concat([alt_taz_summary_df, tripdist_summary_df]) + alt_all_summary.loc[alt_all_summary['variable'] == 'TOTHH', 'variable'] = 'total_households' + alt_all_summary.loc[alt_all_summary['variable'] == 'TOTEMP', 'variable'] = 'total_jobs' + + alt_all_summary.to_csv(os.path.join(OUTPUT_DIR, 'targetedTransportationAlternatives.csv'), index=False) + +######## Model Data - Bike Infrastructure.csv +# Data needed: TAZ input, trips output +def prep_data_for_bikeInfra(taz_input_df, tripdist_output_df): + """ + Input: + * TAZ land use input + * trip distance by mode and SD output + + Output the following data by SD (SD==0 is region-level): + * total_population + * total_population_county + * population_county_share + * total_square_miles + * bike_avg_trip_dist + * bike_dist + * bike_trip_mode_share + * estimated_trips + * sov_trip_mode_share + + """ + print('create off-model calculation input data for bike infrastructure') + + # population and land area by SD + tazdata_sd_df = taz_input_df.groupby(['SD', 'COUNTY'])[['TOTPOP', 'TOTACRE']].sum().reset_index() + tazdata_sd_df['total_square_miles'] = tazdata_sd_df['TOTACRE']/640.0 + # tazdata_sd_df.rename({'TOTPOP': 'SD_pop'}, axis=1, inplace=True) + + # total population and land area + tazdata_all = taz_input_df[['TOTPOP', 'TOTACRE']].sum() + tazdata_all_df = pd.DataFrame(tazdata_all).transpose() + tazdata_all_df['total_square_miles'] = tazdata_all_df['TOTACRE']/640.0 + tazdata_all_df['SD'] = 0 + tazdata_all_df['COUNTY'] = 0 + + # add all to SD data + tazdata_sd_df = pd.concat([tazdata_sd_df, tazdata_all_df], ignore_index=True) + + # population by county + tazdata_county_df = taz_input_df.groupby(['COUNTY'])[['TOTPOP']].sum().reset_index() + tazdata_county_df.rename({'TOTPOP': 'COUNTY_pop'}, axis=1, inplace=True) + + # calculate SD population share of county + tazdata_sd_df = tazdata_sd_df.merge(tazdata_county_df, on='COUNTY', how='left') + tazdata_sd_df['pop_county_share'] = tazdata_sd_df['TOTPOP'] / tazdata_sd_df['COUNTY_pop'] + print(len(tazdata_sd_df)) + + # For Bike Infrastructure, calculate bike and SOV trip mode share, and average bike trip distance + tripdist_bikeInfra = tripdist_output_df.copy() + tripdist_bikeInfra['bike_trips'] = \ + tripdist_bikeInfra['estimated_trips'] * (tripdist_bikeInfra['mode_name'] == 'Bike') + tripdist_bikeInfra['bike_dist'] = \ + tripdist_bikeInfra['estimated_trips'] * (tripdist_bikeInfra['mode_name'] == 'Bike') * tripdist_bikeInfra['mean_distance'] + tripdist_bikeInfra['sov_trips'] = \ + tripdist_bikeInfra['estimated_trips'] * (tripdist_bikeInfra['simple_mode'] == 'SOV') + + tripdist_sd_summary_df = tripdist_bikeInfra.groupby(['dest_sd'])[['bike_trips', 'bike_dist', 'sov_trips', 'estimated_trips']].sum().reset_index() + tripdist_sd_summary_df['bike_trip_mode_share'] = tripdist_sd_summary_df['bike_trips']/tripdist_sd_summary_df['estimated_trips'] + tripdist_sd_summary_df['bike_avg_trip_dist'] = tripdist_sd_summary_df['bike_dist']/tripdist_sd_summary_df['bike_trips'] + tripdist_sd_summary_df['sov_trip_mode_share'] = tripdist_sd_summary_df['sov_trips']/tripdist_sd_summary_df['estimated_trips'] + + tripdist_all_summary = tripdist_bikeInfra[['bike_trips', 'bike_dist', 'sov_trips', 'estimated_trips']].sum() + tripdist_all_summary_df = pd.DataFrame(tripdist_all_summary).transpose() + tripdist_all_summary_df['bike_trip_mode_share'] = tripdist_all_summary_df['bike_trips']/tripdist_all_summary_df['estimated_trips'] + tripdist_all_summary_df['bike_avg_trip_dist'] = tripdist_all_summary_df['bike_dist']/tripdist_all_summary_df['bike_trips'] + tripdist_all_summary_df['sov_trip_mode_share'] = tripdist_all_summary_df['sov_trips']/tripdist_all_summary_df['estimated_trips'] + tripdist_all_summary_df['dest_sd'] = 0 + + # add all to SD data + tripdist_sd_summary_df = pd.concat([tripdist_sd_summary_df, tripdist_all_summary_df], ignore_index=True) + print(len(tripdist_sd_summary_df)) + + sd_all_df = pd.merge( + tazdata_sd_df, + tripdist_sd_summary_df, + left_on='SD', + right_on='dest_sd', + how='left' + ) + print(len(sd_all_df)) + + # only keep needed columns + col_needed = [ + 'TOTPOP', 'COUNTY_pop', 'pop_county_share', 'total_square_miles', + 'bike_avg_trip_dist', 'bike_dist', 'bike_trip_mode_share', 'estimated_trips', 'sov_trip_mode_share' + ] + sd_all_df = sd_all_df[['SD'] + col_needed] + + # convert to long table + sd_all_df_long = pd.melt(sd_all_df, id_vars=['SD'], value_vars=col_needed) + + # upate variable name to be consistent with off-model template + var_name_dict = { + 'pop_county_share': 'population_county_share', + 'TOTPOP': 'total_population', + 'COUNTY_pop': 'total_population_county', + 'SD': 'superdistrict' + } + for i in var_name_dict: + sd_all_df_long.loc[sd_all_df_long['variable'] == i, 'variable'] = var_name_dict[i] + + sd_all_df_long.to_csv(os.path.join(OUTPUT_DIR, 'bikeInfrastructure.csv'), index=False) + + +if __name__ == '__main__': + + # output dir + OUTPUT_DIR = 'offmodel\\offmodel_prep' + + # TAZ land use input + print('load TAZ land use data') + tazdata_file = 'INPUT\\landuse\\tazData.csv' + tazdata_df = pd.read_csv(tazdata_file) + + # trip output and associated variable + # Mode look-up table + LOOKUP_MODE = pd.DataFrame({ + 'trip_mode': list(range(1, 22)), + 'mode_name': [ + 'Drive alone - free', 'Drive alone - pay', + 'Shared ride two - free', 'Shared ride two - pay', + 'Shared ride three - free', 'Shared ride three - pay', + 'Walk', 'Bike', + 'Walk to local bus', 'Walk to light rail or ferry', 'Walk to express bus', + 'Walk to heavy rail', 'Walk to commuter rail', + 'Drive to local bus', 'Drive to light rail or ferry', 'Drive to express bus', + 'Drive to heavy rail', 'Drive to commuter rail', + 'Taxi', 'TNC', 'TNC shared' + ], + 'simple_mode': [ + 'SOV', 'SOV', + 'HOV', 'HOV', + 'HOV 3.5', 'HOV 3.5', + 'Walk', 'Bike', + 'Walk to transit', 'Walk to transit', + 'Walk to transit', 'Walk to transit', + 'Walk to transit', 'Drive to transit', + 'Drive to transit', 'Drive to transit', + 'Drive to transit', 'Drive to transit', + 'Taxi/TNC', 'Taxi/TNC', 'Taxi/TNC' + ] + }) + + SAMPLING_RATE = 0.500 + + # load trip data + print('load trips output') + trips_R_file = 'updated_output\\trips.rdata' + trips_R = pyreadr.read_r(trips_R_file) + + # drop unneeded columns + trips = trips_R['trips'][['hh_id', 'tour_purpose', 'distance', 'trip_mode', 'orig_taz', 'dest_taz']] + + # add origin SD and destination SD + orig_sd = tazdata_df[['ZONE', 'SD']].rename(columns={'ZONE': 'orig_taz', 'SD': 'orig_sd'}) + trips_df = trips.merge(orig_sd, on='orig_taz', how='left') + + dest_sd = tazdata_df[['ZONE', 'SD']].rename(columns={'ZONE': 'dest_taz', 'SD': 'dest_sd'}) + trips_df = trips_df.merge(dest_sd, on='dest_taz', how='left') + + # add mode name and category + trips_df = trips_df.merge(LOOKUP_MODE, on='trip_mode', how='left') + + # trip distance by mode and SD - will be used in multiple off-model prep calculations + tripdist_df = trips_df.groupby(['trip_mode', 'mode_name', 'simple_mode', 'tour_purpose', 'orig_sd', 'dest_sd']).agg( + {'hh_id': 'count', + 'distance': 'mean'}).reset_index() + tripdist_df.rename({'hh_id': 'simulated_trips', 'distance': 'mean_distance'}, axis=1, inplace=True) + + tripdist_df['estimated_trips'] = tripdist_df['simulated_trips'] / SAMPLING_RATE + tripdist_df = tripdist_df[['trip_mode', 'mode_name', 'simple_mode', 'tour_purpose', 'orig_sd', 'dest_sd', 'simulated_trips', 'estimated_trips', 'mean_distance']] + + # run the functions + prep_data_for_bikeshare(tazdata_df) + prep_data_for_carshare(tazdata_df) + prep_data_for_employerShuttle(trips_df) + prep_data_for_TargetedAlt(tazdata_df, tripdist_df) + prep_data_for_bikeInfra(tazdata_df, tripdist_df) diff --git a/utilities/RTP/Emissions/Off Model Calculators/tests/case1.py b/utilities/RTP/Emissions/Off Model Calculators/tests/case1.py new file mode 100644 index 000000000..56a78c84c --- /dev/null +++ b/utilities/RTP/Emissions/Off Model Calculators/tests/case1.py @@ -0,0 +1,59 @@ +import pandas as pd +import unittest +from unittest import TestCase + +# Case 1: Data and formulas update correctly +# This script checks that the Excel workbook +# opens and closes. Then, checks formulas are updated. +# Only checks for bike_share calculator. + +class TestFormulaUpdate(TestCase): + def read_data_sb375(self,path): + wb=pd.ExcelFile(path) + data=pd.read_excel(wb,'SB 375 Calcs') + + # print(data) + return data + + def read_data_mainsheet(self,path): + wb=pd.ExcelFile(path) + data=pd.read_excel(wb,'Main sheet', + skiprows=22, + nrows=2, + header=None) + # print(data) + return data + + def read_bikeshare(self,path): + sb=self.read_data_sb375(path) + main=self.read_data_mainsheet(path) + + return sb,main + + def test_bike_share(self, verbose=False): + + PATH=r'C:\Users\63330\Documents\projects\MTC\travel-model-one\utilities\RTP\Emissions\Off Model Calculators\models\PBA50+_OffModel_Bikeshare.xlsx' + OUTPUT=r'C:\Users\63330\Documents\projects\MTC\travel-model-one\utilities\RTP\Emissions\Off Model Calculators\data\output\2035_TM160_IPA_16__2035_TM152_FBP_Plus_24__14\2024-08-08 10--14--23__PBA50+_OffModel_Bikeshare.xlsx' + sbOrigin,mainOrigin=self.read_bikeshare(PATH) + sbDest,mainDest=self.read_bikeshare(OUTPUT) + + passed=self.assertEqual( + mainDest.iloc[1][2], + -.00009525782551264607, + msg="Equal" + ) + + if passed==None: + print("Passed Case 1") + + if verbose: + print("SB 375 Calcs") + print("Cells updated: ",sbOrigin.iloc[1][2005]!=sbDest.iloc[1][2005]) + print("Correct value: ", sbDest.iloc[1][2005]==61020) + + print("Main sheet") + print("Cells updated: ",mainOrigin.iloc[1][2]!=mainDest.iloc[1][2]) + print("Correct value: ", mainOrigin.iloc[1][2], " - ", mainDest.iloc[1][2]) + +caseOne=TestFormulaUpdate() +caseOne.test_bike_share() \ No newline at end of file diff --git a/utilities/RTP/Emissions/Off Model Calculators/update_offmodel_calculator_workbooks_with_TM_output.py b/utilities/RTP/Emissions/Off Model Calculators/update_offmodel_calculator_workbooks_with_TM_output.py index 48e16f0d5..924db48fe 100644 --- a/utilities/RTP/Emissions/Off Model Calculators/update_offmodel_calculator_workbooks_with_TM_output.py +++ b/utilities/RTP/Emissions/Off Model Calculators/update_offmodel_calculator_workbooks_with_TM_output.py @@ -6,262 +6,111 @@ to create a set of "model data" for the off-model calculators. Example call: -`python update_offmodel_calculator_workbooks_with_TM_output.py bike_share 2035_TM160_DBP_Plan_04 2050_TM160_DBP_Plan_04` - -Inputs: off-model calculator, including - - - bike share - - car share - - targeted transportation alternatives - - vanpools - -Outputs: a copy of the calculator Excel workbook, with updated travel model data. - +`python update_offmodel_calculator_workbooks_with_TM_output.py` +Args inputs: + Flags: + -d: directory paths + for MTC team, select -d mtc (set as default) + for external team members -d external + +Models: +Includes all Excel sheet master model calculators. These models contain the logs of runs created after running the script. + +Data: + |input: includes a folder with the following strucure + |name: IPA_TM2 + -> |ModelData + -> All model data input files (xlsx) + |PBA50+ Off-Model Calculators + -> Calculators (not used) + |output: contains a copy of the calculator Excel workbook, with updated travel model data. + |run folder: named based on the uid (timestamp). + e.g. 2024-08-09 15--50--53 (format:YYYY-MM-DD 24H--MM--SS) """ -import argparse, datetime, os, sys -import shutil, openpyxl +import argparse import pandas as pd - -# calculator names - +import os +from datetime import datetime + +from helper.bshare import Bikeshare +from helper.cshare import Carshare +from helper.targtransalt import TargetedTransAlt +from helper.vpool import VanPools +from helper.ebk import EBike +from helper.vbuyback import BuyBack +from helper.regchar import RegionalCharger +from helper.common import get_paths + +# calculator name choices BIKE_SHARE = 'bike_share' CAR_SHARE = 'car_share' TARGETED_TRANS_ALT = 'targeted_trans_alt' VAN_POOL = 'vanpools' +E_BIKE = 'e_bike' +BUY_BACK='buy_back' +REG_CHARGER='regional_charger' -##################################### -# inputs and outputs -BOX_DIR = r'C:\Users\{}\Box\Plan Bay Area 2050+\Blueprint\Off-Model\PBA50+ Off-Model'.format(os.environ.get('USERNAME')) -MODEL_DATA_BOX_DIR = os.path.join(BOX_DIR, 'model_data_all') - -OFF_MODEL_CALCULATOR_DIR = os.path.join(BOX_DIR, 'DBP_v2', 'PBA50+ Off-Model Calculators') - - -########## Bike Share -def update_bikeshare_calculator(model_runID_ls): - # make a copy of the workbook - bikeshare_master_workbook_file = os.path.join( - OFF_MODEL_CALCULATOR_DIR, - 'PBA50+_OffModel_Bikeshare.xlsx') - bikeshare_new_workbook_file = os.path.join( - OFF_MODEL_CALCULATOR_DIR, - 'PBA50+_OffModel_Bikeshare__{}__{}.xlsx'.format(model_runID_ls[0], model_runID_ls[1])) - - print(bikeshare_master_workbook_file) - print(bikeshare_new_workbook_file) - shutil.copy2(bikeshare_master_workbook_file, bikeshare_new_workbook_file) - - # load and filter model run data of selected runs - bikeshare_model_data = pd.read_csv( - os.path.join(MODEL_DATA_BOX_DIR, 'Model Data - Bikeshare.csv'), - skiprows=1) - # display(bikeshare_model_data.head(5)) - print(bikeshare_model_data['directory'].unique()) - bikeshare_model_data = bikeshare_model_data.loc[ - bikeshare_model_data['directory'].isin(model_runID_ls)] - - # add model data of selected runs to 'Model Data' sheet - print(bikeshare_new_workbook_file) - with pd.ExcelWriter(bikeshare_new_workbook_file, engine='openpyxl', mode = 'a', if_sheet_exists = 'replace') as writer: - # note this only works with pandas=1.4.3 or later; in earlier version, it will not overwrite sheet, but add new one with sheet name 'Model Data1' - bikeshare_model_data.to_excel(writer, sheet_name='Model Data', index=False, startrow=1, startcol=0) - - # get needed data into the worksheet - model_data_info = pd.read_csv( - os.path.join(MODEL_DATA_BOX_DIR, 'Model Data - Bikeshare.csv'), - nrows=0) - model_data_info = model_data_info.columns[0] - print(model_data_info) - - # also add model data log info - bikeshare_new_workbook = openpyxl.load_workbook(bikeshare_new_workbook_file) - model_data_ws = bikeshare_new_workbook['Model Data'] - model_data_ws['A1'] = model_data_info - - # also add run_id to 'Main sheet' - bikeshare_mainsheet = bikeshare_new_workbook['Main sheet'] - bikeshare_mainsheet['C14'] = model_runID_ls[0] - bikeshare_mainsheet['D14'] = model_runID_ls[1] - - # save file - bikeshare_new_workbook.save(bikeshare_new_workbook_file) - - -########## Car Share -def update_carshare_calculator(model_runID_ls): - # make a copy of the workbook - carshare_master_workbook_file = os.path.join( - OFF_MODEL_CALCULATOR_DIR, - 'PBA50+_OffModel_Carshare.xlsx') - carshare_new_workbook_file = os.path.join( - OFF_MODEL_CALCULATOR_DIR, - 'PBA50+_OffModel_Carshare__{}__{}.xlsx'.format(model_runID_ls[0], model_runID_ls[1])) - - print(carshare_master_workbook_file) - print(carshare_new_workbook_file) - - shutil.copy2(carshare_master_workbook_file, carshare_new_workbook_file) - - # load and filter model run data of selected runs - carshare_model_data = pd.read_csv( - os.path.join(MODEL_DATA_BOX_DIR, 'Model Data - carshare.csv'), - skiprows=2) - print(carshare_model_data.head(5)) - print(carshare_model_data['directory'].unique()) - carshare_model_data = carshare_model_data.loc[ - carshare_model_data['directory'].isin(model_runID_ls)] - - # add model data of selected runs to 'Model Data' sheet - print(carshare_new_workbook_file) - with pd.ExcelWriter(carshare_new_workbook_file, engine='openpyxl', mode = 'a', if_sheet_exists = 'replace') as writer: - carshare_model_data.to_excel(writer, sheet_name='Model Data', index=False, startrow=2, startcol=0) - - # get needed data into the worksheet - model_data_info_df = pd.read_csv( - os.path.join(MODEL_DATA_BOX_DIR, 'Model Data - Carshare.csv'), - nrows=1) - model_data_info_df.reset_index(inplace=True) - model_data_info = model_data_info_df.columns[0] - model_data_var = model_data_info_df.iloc[0, 0] - model_data_val = model_data_info_df.iloc[0, 1] - print(model_data_info) - print(model_data_var) - print(model_data_val) - - # also add model data log info - carshare_new_workbook = openpyxl.load_workbook(carshare_new_workbook_file) - model_data_ws = carshare_new_workbook['Model Data'] - model_data_ws['A1'] = model_data_var - model_data_ws['B1'] = model_data_val - model_data_ws['A2'] = model_data_info - - # also add run_id to 'Main sheet' - carshare_mainsheet = carshare_new_workbook['Main Sheet'] - carshare_mainsheet['C36'] = model_runID_ls[0] - carshare_mainsheet['D36'] = model_runID_ls[1] - - # save file - carshare_new_workbook.save(carshare_new_workbook_file) - - -########## targeted transportation alternatives -def update_targetedTransAlt_calculator(model_runID_ls): - # make a copy of the workbook - targetedTransAlt_master_workbook_file = os.path.join( - OFF_MODEL_CALCULATOR_DIR, - 'PBA50+_OffModel_TargetedTransAlt.xlsx') - targetedTransAlt_new_workbook_file = os.path.join( - OFF_MODEL_CALCULATOR_DIR, - 'PBA50+_OffModel_TargetedTransAlt__{}__{}.xlsx'.format(model_runID_ls[0], model_runID_ls[1])) - - print(targetedTransAlt_master_workbook_file) - print(targetedTransAlt_new_workbook_file) - shutil.copy2(targetedTransAlt_master_workbook_file, targetedTransAlt_new_workbook_file) - - # load and filter model run data of selected runs - targetedTransAlt_model_data = pd.read_csv( - os.path.join(MODEL_DATA_BOX_DIR, 'Model Data - Targeted Transportation Alternatives.csv'), - skiprows=1) - - # display(targetedTransAlt_model_data.head(5)) - print(targetedTransAlt_model_data['directory'].unique()) - targetedTransAlt_model_data = targetedTransAlt_model_data.loc[ - targetedTransAlt_model_data['directory'].isin(model_runID_ls)] - - # add model data of selected runs to 'Model Data' sheet - print(targetedTransAlt_new_workbook_file) - with pd.ExcelWriter(targetedTransAlt_new_workbook_file, engine='openpyxl', mode = 'a', if_sheet_exists = 'replace') as writer: - targetedTransAlt_model_data.to_excel(writer, sheet_name='Model Data', index=False, startrow=1, startcol=0) - - # get needed data into the worksheet - model_data_info = pd.read_csv( - os.path.join(MODEL_DATA_BOX_DIR, 'Model Data - Targeted Transportation Alternatives.csv'), - nrows=0) - model_data_info = model_data_info.columns[0] - print(model_data_info) - - # also add model data log info - targetedTransAlt_new_workbook = openpyxl.load_workbook(targetedTransAlt_new_workbook_file) - model_data_ws = targetedTransAlt_new_workbook['Model Data'] - model_data_ws['A1'] = model_data_info - - # also add run_id to 'Main sheet' - targetedTransAlt_mainsheet = targetedTransAlt_new_workbook['Main sheet'] - targetedTransAlt_mainsheet['C26'] = model_runID_ls[0] - targetedTransAlt_mainsheet['D26'] = model_runID_ls[1] - - # save file - targetedTransAlt_new_workbook.save(targetedTransAlt_new_workbook_file) - - -########## van pools -def update_valpools_calculator(model_runID_ls): - # make a copy of the workbook - vanpool_master_workbook_file = os.path.join( - OFF_MODEL_CALCULATOR_DIR, - 'PBA50+_OffModel_Vanpools.xlsx') - vanpool_new_workbook_file = os.path.join( - OFF_MODEL_CALCULATOR_DIR, - 'PBA50+_OffModel_Vanpools__{}__{}.xlsx'.format(model_runID_ls[0], model_runID_ls[1])) - - print(vanpool_master_workbook_file) - print(vanpool_new_workbook_file) - shutil.copy2(vanpool_master_workbook_file, vanpool_new_workbook_file) - - # load and filter model run data of selected runs - vanpool_model_data = pd.read_csv( - os.path.join(MODEL_DATA_BOX_DIR, 'Model Data - Employer Shuttles.csv'), - skiprows=1) - - # display(vanpool_model_data.head(5)) - print(vanpool_model_data['directory'].unique()) - vanpool_model_data = vanpool_model_data.loc[ - vanpool_model_data['directory'].isin(model_runID_ls)] - - # add model data of selected runs to 'Model Data' sheet - print(vanpool_new_workbook_file) - with pd.ExcelWriter(vanpool_new_workbook_file, engine='openpyxl', mode = 'a', if_sheet_exists = 'replace') as writer: - vanpool_model_data.to_excel(writer, sheet_name='Model Data', index=False, startrow=1, startcol=0) - - # get needed data into the worksheet - model_data_info = pd.read_csv( - os.path.join(MODEL_DATA_BOX_DIR, 'Model Data - Employer Shuttles.csv'), - nrows=0) - model_data_info = model_data_info.columns[0] - print(model_data_info) - - # also add model data log info - vanpool_new_workbook = openpyxl.load_workbook(vanpool_new_workbook_file) - model_data_ws = vanpool_new_workbook['Model Data'] - model_data_ws['A1'] = model_data_info - - # also add run_id to 'Main sheet' - vanpool_mainsheet = vanpool_new_workbook['Main Sheet'] - vanpool_mainsheet['C12'] = model_runID_ls[0] - vanpool_mainsheet['D12'] = model_runID_ls[1] - vanpool_mainsheet['E12'] = model_runID_ls[1] - - # save file - vanpool_new_workbook.save(vanpool_new_workbook_file) - -# TODO: add function for the new e-bike calculator +# template location +CWD=os.path.dirname(__file__) +TEMPLATE_DIR=os.path.join(CWD, r'update_omc_template.xlsx') if __name__ == '__main__': - parser = argparse.ArgumentParser(description=USAGE) - parser.add_argument('calculator', choices=[BIKE_SHARE,CAR_SHARE,TARGETED_TRANS_ALT,VAN_POOL], help='Calculator name') - parser.add_argument('model_run_id_2035', help='travel model run_id of a 2035 run') - parser.add_argument('model_run_id_2050', help='travel model run_id of a 2050 run') - args = parser.parse_args() - # TODO: add logging - - MODEL_RUNS = [args.model_run_id_2035, args.model_run_id_2050] - - if args.calculator == BIKE_SHARE: - update_bikeshare_calculator(MODEL_RUNS) - elif args.calculator == CAR_SHARE: - update_carshare_calculator(MODEL_RUNS) - elif args.calculator == TARGETED_TRANS_ALT: - update_targetedTransAlt_calculator(MODEL_RUNS) - elif args.calculator == VAN_POOL: - update_valpools_calculator(MODEL_RUNS) + parser = argparse.ArgumentParser(description=USAGE) + parser.add_argument('-d', choices=['mtc','external'], default='external', + help='choose directory mtc or external' + ) + ARGS = parser.parse_args() + DIRECTORY=ARGS.d + UID=datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + + templateData=pd.read_excel(TEMPLATE_DIR + ,sheet_name='Template' + ,header=[0]).fillna("") + + for ix in range(len(templateData)): + CALCULATOR=templateData.iloc[ix]['Calculator'] + R1=templateData.iloc[ix]['model_run_id baseline'] + R2=templateData.iloc[ix]['model_run_id horizon'] + MODEL_RUN_IDS=[R1,R2] + FOLDER_NAME='2050_TM160_DBP_PLAN_08b' + + if CALCULATOR == BIKE_SHARE: + c=Bikeshare(MODEL_RUN_IDS,DIRECTORY, UID, False) + + elif CALCULATOR == CAR_SHARE: + c=Carshare(MODEL_RUN_IDS,DIRECTORY, UID, False) + + elif CALCULATOR == TARGETED_TRANS_ALT: + c=TargetedTransAlt(MODEL_RUN_IDS,DIRECTORY, UID, False) + + elif CALCULATOR == VAN_POOL: + c=VanPools(MODEL_RUN_IDS,DIRECTORY, UID, False) + + elif CALCULATOR == E_BIKE: + c=EBike(MODEL_RUN_IDS,DIRECTORY, UID, False) + + elif CALCULATOR == BUY_BACK: + c=BuyBack(MODEL_RUN_IDS,DIRECTORY, UID, False) + + elif CALCULATOR == REG_CHARGER: + c=RegionalCharger(MODEL_RUN_IDS,DIRECTORY, UID, False) + + ## TODO: Add Complete Streets calculator + + else: + raise ValueError( + "Choice not in options. Check the calculator name is correct.") + + c.update_calculator() + c.paths=get_paths(DIRECTORY) + outputSummary=c.create_output_summary_path(FOLDER_NAME) + if not os.path.exists(outputSummary): + c.initialize_summary_file(outputSummary) + else: + print("Summary file exists.") + + c.update_summary_file(outputSummary,FOLDER_NAME) + diff --git a/utilities/RTP/Emissions/Off Model Calculators/update_omc_template.xlsx b/utilities/RTP/Emissions/Off Model Calculators/update_omc_template.xlsx new file mode 100644 index 000000000..b65a52e6a Binary files /dev/null and b/utilities/RTP/Emissions/Off Model Calculators/update_omc_template.xlsx differ diff --git a/utilities/RTP/ExtractKeyFiles.bat b/utilities/RTP/ExtractKeyFiles.bat index 8ec343f15..4170e0804 100644 --- a/utilities/RTP/ExtractKeyFiles.bat +++ b/utilities/RTP/ExtractKeyFiles.bat @@ -122,6 +122,9 @@ if exist metrics\ITHIM ( copy metrics\ITHIM\*.* extractor\metrics\ITHIM ) +:: offmodel +c:\windows\system32\Robocopy.exe /E offmodel extractor\offmodel + :: make generating shapefile outputs easier mkdir extractor\shapefile copy X:\travel-model-one-master\utilities\cube-to-shapefile\run_CubeToShapefile.bat extractor\shapefile diff --git a/utilities/RTP/RunOffmodel.bat b/utilities/RTP/RunOffmodel.bat new file mode 100644 index 000000000..b7b1725cf --- /dev/null +++ b/utilities/RTP/RunOffmodel.bat @@ -0,0 +1,36 @@ +:: This first runs the off model prep script, which creates model data for +:: the off-model calculation; then runs the off-model calculator. +:: + +echo on +setlocal enabledelayedexpansion + + +:start + +:: Overhead +if not exist offmodel (mkdir offmodel) +set OFFMOEL_DIR=offmodel +set OFFMODEL_SCRIPT_DIR=.\CTRAMP\scripts\offmodel + +echo OFFMOEL_DIR=%OFFMOEL_DIR% +echo OFFMODEL_SCRIPT_DIR=%OFFMODEL_SCRIPT_DIR% +:: echo MODEL_YEAR=%MODEL_YEAR% +mkdir %OFFMOEL_DIR%\offmodel_prep +mkdir %OFFMOEL_DIR%\offmodel_output + +:: Run prep data creation script +echo %DATE% %TIME% Running offmodel prep script for bikeshare +python "%OFFMODEL_SCRIPT_DIR%\offmodel_prep.py" +echo %DATE% %TIME% ...Done + +:: Run off model calculation script +:: to-do + + +:success +echo FINISHED OFFMODEL RUN SUCESSFULLY! +echo ENDED OFFMODEL RUN %DATE% %TIME% >> logs\feedback.rpt + +:error +echo ERRORLEVEL=%ERRORLEVEL% \ No newline at end of file diff --git a/utilities/RTP/config_RTP2025/SetUpModel_PBA50Plus.bat b/utilities/RTP/config_RTP2025/SetUpModel_PBA50Plus.bat index 0601700ef..41e03339d 100644 --- a/utilities/RTP/config_RTP2025/SetUpModel_PBA50Plus.bat +++ b/utilities/RTP/config_RTP2025/SetUpModel_PBA50Plus.bat @@ -310,6 +310,17 @@ if %MODEL_YEAR_NUM% GEQ 2035 ( ) ELSE ( set EN7=DISABLED ) + +:: ------ +:: Off-model calculation +:: ------ +set runOffModel=Yes +if "%runOffModel%"=="Yes" ( + mkdir CTRAMP\scripts\offmodel + c:\windows\system32\Robocopy.exe /NP /E "%GITHUB_DIR%\utilities\RTP\Emissions\Off Model Calculators" CTRAMP\scripts\offmodel + copy /Y "%GITHUB_DIR%\utilities\RTP\RunOffmodel.bat" %CURRENT_DIR% +) + :DoneAddingStrategies :: ------------------------------------------------------------------------------------------------------