COVID-19 Growth Rate Prediction
Predictions of COVID-19 Growth Rates Using Bayesian Modeling
Data
%matplotlib inlineimport numpy as npfrom IPython.display import display, Markdownimport matplotlib.pyplot as pltimport matplotlibimport pandas as pdimport seaborn as snsimport arviz as azimport pymc3 as pmimport requestsimport iosns.set_context('talk')plt.style.use('seaborn-whitegrid')#hidedef load_timeseries(name,                     base_url='https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series'):    import requests    # Thanks to kasparthommen for the suggestion to directly download    url = f'{base_url}/time_series_19-covid-{name}.csv'    csv = requests.get(url).text    df = pd.read_csv(io.StringIO(csv),                      index_col=['Country/Region', 'Province/State', 'Lat', 'Long'])    df['type'] = name.lower()    df.columns.name = 'date'        df = (df.set_index('type', append=True)            .reset_index(['Lat', 'Long'], drop=True)            .stack()            .reset_index()            .set_index('date')         )    df.index = pd.to_datetime(df.index)    df.columns = ['country', 'state', 'type', 'cases']        # Move HK to country level    df.loc[df.state =='Hong Kong', 'country'] = 'Hong Kong'    df.loc[df.state =='Hong Kong', 'state'] = np.nan        # Aggregate large countries split by states    df = pd.concat([df,                     (df.loc[~df.state.isna()]                     .groupby(['country', 'date', 'type'])                     .sum()                     .rename(index=lambda x: x+' (total)', level=0)                     .reset_index(level=['country', 'type']))                   ])    return dfdf_confirmed = load_timeseries('Confirmed')# Drop states for simplicitydf_confirmed = df_confirmed.loc[df_confirmed.state.isnull()]# Estimated critical casesp_crit = .05df_confirmed = df_confirmed.assign(cases_crit=df_confirmed.cases*p_crit)# Compute days relative to when 100 confirmed cases was crosseddf_confirmed.loc[:, 'days_since_100'] = np.nanfor country in df_confirmed.country.unique():    df_confirmed.loc[(df_confirmed.country == country), 'days_since_100'] = \        np.arange(-len(df_confirmed.loc[(df_confirmed.country == country) & (df_confirmed.cases < 100)]),                   len(df_confirmed.loc[(df_confirmed.country == country) & (df_confirmed.cases >= 100)]))    # Select countries for which we have at least some informationcountries = pd.Series(df_confirmed.loc[df_confirmed.days_since_100 >= 2].country.unique())# We only have data for China after they already had a significant number of cases.# They also are not well modeled by the exponential, so we drop them here for simplicity.countries = countries.loc[~countries.isin(['China (total)', 'Cruise Ship (total)'])]df_sign = df_confirmed.loc[lambda x: x.country.isin(countries) & (x.days_since_100 >= 0)]n_countries = len(countries)Data Info
These are the countries included in the model:
#hide_inputfor c in countries:    print(c)Date of last data update.
pd.to_datetime(df_confirmed.index.values[-1]).strftime("%e %B, %Y")Growth Rate Prediction Processing
#hidewith pm.Model() as model:    ############    # Intercept    # Group mean    a_grp = pm.Normal('a_grp', 100, 50)    # Group variance    a_grp_sigma = pm.HalfNormal('a_grp_sigma', 50)    # Individual intercepts    a_ind = pm.Normal('a_ind',                       mu=a_grp, sigma=a_grp_sigma,                       shape=n_countries)    ########    # Slope    # Group mean    b_grp = pm.Normal('b_grp', 1.33, .5)    # Group variance    b_grp_sigma = pm.HalfNormal('b_grp_sigma', .5)    # Individual slopes    b_ind = pm.Normal('b_ind',                       mu=b_grp, sigma=b_grp_sigma,                       shape=n_countries)        # Error    sigma = pm.HalfNormal('sigma', 500., shape=n_countries)        # Create likelihood for each country    for i, country in enumerate(countries):        df_country = df_sign.loc[lambda x: (x.country == country)]                # By using pm.Data we can change these values after sampling.        # This allows us to extend x into the future so we can get        # forecasts by sampling from the posterior predictive        x = pm.Data(country + "x_data",                     df_country.days_since_100.values)        cases = pm.Data(country + "y_data",                         df_country.cases.astype('float64').values)                # Likelihood        pm.NegativeBinomial(            country,             (a_ind[i] * b_ind[i] ** x), # Exponential regression            sigma[i],             observed=cases)#hidewith model:    # Sample posterior    trace = pm.sample(tune=1500, chains=1, cores=1, target_accept=.9)        # Update data so that we get predictions into the future    for country in countries:        df_country = df_sign.loc[lambda x: (x.country == country)]        x_data = np.arange(0, 30)        y_data = np.array([np.nan] * len(x_data))        pm.set_data({country + "x_data": x_data})        pm.set_data({country + "y_data": y_data})        # Sample posterior predictive    post_pred = pm.sample_posterior_predictive(trace, samples=100)#hideeuropean_countries = ['Italy', 'Germany', 'France (total)', 'Spain', 'United Kingdom (total)',                       'Iran']large_engl_countries = ['US (total)', 'Canada (total)', 'Australia (total)']asian_countries = ['Singapore', 'Japan', 'Korea, South', 'Hong Kong']south_american_countries = ['Argentina', 'Brazil', 'Colombia', 'Chile']country_groups = [european_countries, large_engl_countries, asian_countries]line_styles = ['-', ':', '--', '-.']Growth Rate Prediction
#hide_inputfig, axs = plt.subplots(nrows=len(country_groups), figsize=(8, 16), sharex=True)for ax, country_group in zip(axs, country_groups):    for i, country in enumerate(countries):        if country in country_group:            sns.distplot((trace['b_ind'][:, i] * 100) - 100, ax=ax, label=country, hist=False)            ax.axvline(33, ls='--', color='k', label='33% daily growth')    ax.legend()ax.set_xlabel('Daily growth in %')plt.suptitle('Posterior of daily growth')Predicted Cases By Country
#hide_inputfig, axs = plt.subplots(nrows=n_countries // 3, ncols=3, figsize=(15, 30), sharex=True)for ax, country in zip(axs.flatten(), countries):    df_country = df_sign.loc[lambda x: x.country == country]    ax.plot(df_country.days_since_100, df_country.cases, color='r')    ax.plot(np.arange(0, post_pred[country].shape[1]), post_pred[country].T, alpha=.05, color='.5')    ax.plot(df_country.days_since_100, df_country.cases, color='r')    #ax.set_yscale('log')    #ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())    ax.set_ylim(0, df_country.cases.iloc[-1] * 15)    ax.set_title(country)    axs[0, 0].legend(['data', 'model prediction'])[ax.set(xlabel='Days since 100 cases') for ax in axs[-1, :]][ax.set(ylabel='Confirmed cases') for ax in axs[:, 0]]fig.tight_layout()Predicted Cases By Country - Log Scale
Y axis is on a log scale
#hide_inputfig, axs = plt.subplots(nrows=n_countries // 3, ncols=3, figsize=(15, 30), sharex=True)for ax, country in zip(axs.flatten(), countries):    df_country = df_sign.loc[lambda x: x.country == country]    ax.plot(df_country.days_since_100, df_country.cases, color='r')    ax.plot(np.arange(0, post_pred[country].shape[1]), post_pred[country].T, alpha=.05, color='.5')    ax.plot(df_country.days_since_100, df_country.cases, color='r')    ax.set_yscale('log')    ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())    #ax.set_ylim(0, df_country.cases.iloc[-1] * 2.5)    ax.set_title(country)    axs[0, 0].legend(['data', 'model prediction'])[ax.set(xlabel='Days since 100 cases') for ax in axs[-1, :]][ax.set(ylabel='Confirmed cases') for ax in axs[:, 0]]fig.tight_layout()Model Diagnostics - Trace Plots
These are diagnostics for the model. You can safely ignore this if not familiar with MCMC.
#hide_inputaz.plot_trace(trace, compact=True);About This Analysis
This analysis was done by Thomas Wiecki [1]
The model that we are building assumes exponential growth. This is definitely wrong because growth would just continue uninterrupted into the future. However, in the early phase of an epidemic it's a reasonable assumption.
We assume a negative binomial likelihood as we are dealing with count data. A Poisson could also be used but the negative binomial allows us to also model the variance separately to give more flexibility.
The model is also hierarchical, pooling information from individual countries.
[1]: This notebook gets up-to-date data from the "2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE" GitHub repository. This code is provided under the BSD-3 License. Link to original notebook.