8. Housing Houshold Recovery Sequentials#

8.1 Social vulnerabilities#

# importing censusutils
from pyincore_data.censusutil import CensusUtil
---------------------------------------------------------------------------
ModuleNotFoundError                       Traceback (most recent call last)
Cell In[1], line 2
      1 # importing censusutils
----> 2 from pyincore_data.censusutil import CensusUtil

ModuleNotFoundError: No module named 'pyincore_data'
# Parameters
state = "texas"
county = "galveston"
year = 2019
# get fips code to use fetch census data
fips = CensusUtil.get_fips_by_state_county(state=state, county=county)
state_code = fips[:2]
county_code = fips[2:]
navs = CensusUtil.national_ave_values(year=year)
national_vulnerability_feature_averages = Dataset.from_csv_data(navs, name="national_vulnerability_feature_averages.csv",
                                                                 data_type="incore:socialVulnerabilityFeatureAverages")

geo_type = "block%20group:*"
# geo_type = "tract:*"
social_vulnerability_dem_factors_df = CensusUtil.demographic_factors(state_code, county_code, year=year,
                                                                      geo_type=geo_type)

# Temp fix: remove bad data point
social_vulnerability_dem_factors_df = social_vulnerability_dem_factors_df.dropna()

social_vulnerability_dem_factors = Dataset.from_dataframe(social_vulnerability_dem_factors_df,
                                                           name="social_vunlnerability_dem_factors",
                                                           data_type="incore:socialVulnerabilityDemFactors")
csv saved as api_20231211-120109.csv
csv saved as api_20231211-120109.csv
csv saved as api_20231211-120110.csv
csv saved as api_20231211-120111.csv
csv saved as api_20231211-120112.csv
csv saved as api_20231211-120113.csv
csv saved as api_20231211-120114.csv
csv saved as api_20231211-120115.csv
csv saved as api_20231211-120117.csv
csv saved as api_20231211-120118.csv
social_vulnerability = SocialVulnerability(client)

social_vulnerability.set_parameter("result_name", "gal_social_vulnerabilty")
social_vulnerability.set_input_dataset("national_vulnerability_feature_averages", national_vulnerability_feature_averages)
social_vulnerability.set_input_dataset("social_vulnerability_demographic_factors", social_vulnerability_dem_factors)

social_vulnerability.run_analysis()
sv_result = social_vulnerability.get_output_dataset("sv_result")
df = sv_result.get_dataframe_from_csv()

8.2 Run simulation#

# Population dislocation result aggregated to the block group level.
# Transition probability matrix per social vulnerability level.
transition_probability_matrix = "60f5e2ae544e944c3cec0794"
# Initial mass probability function for stage 0.
initial_probability_vector = "60f5e918544e944c3cec668b"

# Create housing recovery instance
housing_recovery = HousingRecoverySequential(client)

# Load input datasets
for keys, values in hhrs_dislocation_dataset_names.items():
    housing_recovery.set_input_dataset("population_dislocation_block", values)
    housing_recovery.load_remote_input_dataset("tpm", transition_probability_matrix)
    housing_recovery.load_remote_input_dataset("initial_stage_probabilities", initial_probability_vector) 
    
    # Chain with SV output
    housing_recovery.set_input_dataset('sv_result', sv_result)

    # Initial value to seed the random number generator to ensure replication
    seed = 1111
    # A size of the analysis time step.
    t_delta = 1.0
    # Total duration.
    t_final = 90.0    
    


    # Specify the result name
    result_name = f"housing_recovery_result_{keys}"

    # Set analysis parameters
    housing_recovery.set_parameter("result_name", result_name)
    housing_recovery.set_parameter("seed", seed)
    housing_recovery.set_parameter("t_delta", t_delta)
    housing_recovery.set_parameter("t_final", t_final)
    housing_recovery.set_parameter("num_cpu", 8)

    housing_recovery.run()
    
    result = housing_recovery.get_output_dataset("ds_result")
    hh_stagehistory_df = result.get_dataframe_from_csv()
    #timesteps = ["1", "7", "13", "25", "85"] # t0, t6, t12, t24, t84
    timesteps = ["1", "2", "3", "4", "5", "6", "7", "13", "25", "37", "49", "61", "73", "85"]
    HHRSOutputProcess.get_hhrs_stage_count(timesteps, hh_stagehistory_df,
                                           filename_json=f"hhrs_stage_count_{keys}.json")
Dataset already exists locally. Reading from local cached zip.
Unzipped folder found in the local cache. Reading from it...
Dataset already exists locally. Reading from local cached zip.
Unzipped folder found in the local cache. Reading from it...
Dataset already exists locally. Reading from local cached zip.
Unzipped folder found in the local cache. Reading from it...
Dataset already exists locally. Reading from local cached zip.
Unzipped folder found in the local cache. Reading from it...
Dataset already exists locally. Reading from local cached zip.
Unzipped folder found in the local cache. Reading from it...
Dataset already exists locally. Reading from local cached zip.
Unzipped folder found in the local cache. Reading from it...
hh_stagehistory_df['guid'].describe()
count                                    33302
unique                                   22015
top       2669f722-ae95-4181-90a8-9c4755b7b29c
freq                                       191
Name: guid, dtype: object

8.3 Results: multi-hazard#

8.3 Results: infrastructure#