#!/usr/bin/python3

# Leverage Analyze - Leveraged ETF analysis.
# Copyright (C) 2017, 2023 Gordon Irlam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

'''
Description:

    Leverage Analyze analyses and projects the performance of leveraged
    ETFs.

    It requires a whole bunch a data files, which can be downloaded from
    Yahoo Finance, Fred, and ProFunds:

        CPIAUCSL.csv - monthly consumer price index, Fred.
        FEDFUNDS.csv - monthly Fedfunds percentage rate, Fred.
        ^GSPC.csv - daily S&P 500 index, Yahoo Finance.
        ^RUI.csv - daily Russell 1000 index, Yahoo Finance.
        ^SP500TR.csv - daily S&P total return index, Yahoo Finance.
        SPUU.csv - daily SPUU price, Yahoo Finance.
        SPUU-div.csv - SPUU dividends, Yahoo Finance.
        SPUU-split.csv - SPUU splits, Yahoo Finance.
        SPXL.csv - daily SPXL price, Yahoo Finance.
        SPXL-div.csv - SPXL dividends, Yahoo Finance.
        SPXL-split.csv - SPXL splits, Yahoo Finance.
        SSO.csv - daily SSO price, Yahoo Finance.
        SSO-div.csv - SSO dividends, Yahoo Finance.
        SSO-split.csv - SSO splits, Yahoo Finance.
        UPRO.csv - daily UPRO price, Yahoo Finance.
        UPRO-div.csv - UPRO dividends, Yahoo Finance.
        UPRO-split.csv - UPRO splits, Yahoo Finance.
        UPRO-historical_nav.csv - daily UPRO NAV, Profunds: https://accounts.profunds.com/etfdata/ByFund/UPRO-historical_nav.csv .

    The program prints some results and outputs a whole bunch of data
    files that can then be plotted using GnuPlot. See the code for the
    definition of each data file.

Version:

    0.1.

Performance:

    It is recommended that this script be run using the pypy just in
    time Python compiler/interpreter.  This will cut the run time down
    from close to a day to a few hours.
'''

# Incase want to use pypy.
from __future__ import print_function
from __future__ import division

from csv import reader, writer
from datetime import datetime
from functools import reduce
from math import ceil, exp, floor, log, sqrt
from operator import mul
from random import lognormvariate, randrange, seed

current_am = 0.045 # Current projected arithmetic mean real annual return of index.
current_vol = 0.168 # Current projected volatility of real annual return of index.
current_real_risk_free = -0.014 # 2017-04 Fred Fedfunds rate.

scv_current_am = 0.065 # Current projected arithmetic mean real annual return of small cap value on a expected risk adjusted basis.
scv_current_vol = 0.222 # Current projected volatility of real annual return of small cap value.

scv_anomaly_current_am = scv_current_am + 0.04 # Current projected arithmetic mean real annual return of small cap value if there is a small cap anomaly.

historical_am = 0.065 # Credit Suisse yearbook 1900-2016.
historical_vol = 0.174 # Credit Suisse yearbook 1900-2016.
historical_real_risk_free = 0.008 + 0.004 # Credit Suisse yearbook 1900-2016 geometric rate plus excess of Fedfunds over Fama and French 1-month T-bills 1955-2016.

opal_am = 0.045 # Mean real annual return of index used for Opal cross validation.
opal_vol = 0.168 # Volatility of real annual return of index for Opal cross validaiton..
opal_real_risk_free = -0.017 # Risk free rate for Opal cross validation.

days_per_year = 252 # 252 trading days per year.
days_per_month = 21 # 21 trading days per month.

resample_every = days_per_month
    # Sample sequences for as long as possible to capture any possible effects of short run momentum and nonconstant volatility.
    # If sample for too long though the resulting sequences aren't representative of all possible return sequences.
    # As it turns out 28 year-long true geometric Brownian motion derived sequences (with stats_block_size = 1)
    # can always be sampled from every day up to every month with only very small differences between the results and the expected results.
    # They have 336 non-overlapping pieces.

worst_case_leverage_multiplier = 1e-12 # Guaranteed not to fall below this factor of original value during a single reset interval.
    # This needs to be non-zero because we encode return values using an index sequence,
    # and if the index ever hit zero there would be no way to reconstruct subsequent return values.

def mean(l):
    return sum(l) / len(l)

def var(l):
    m = mean(l)
    return sum((x - m) ** 2 for x in l) / (len(l) - 1)

def stdev(l):
    return sqrt(var(l))

def median(l):
    return sorted(l)[int((len(l) - 1) / 2)]

def geomean(l):
    return reduce(mul, l, 1) ** (1 / len(l))

def get_csv(s):

    with open(s

+ '.csv') as f:
        return list(reader(f))

def get_index(sym):

    index = get_csv(sym)
    head = index.pop(0)
    assert(head[0] == 'Date' and head[5] == 'Adj Close')
    index = [(date, float(s)) for (date, _, _, _, _, s, _) in index if s != 'null']

    return dict(index)

index_gspc = get_index('^GSPC')
index_rui = get_index('^RUI')
index_sp500tr = get_index('^SP500TR')
index_sp500tr['1987-12-31'] = index_gspc['1987-12-31']
    # Index values coincided as of the start of 1988: http://www.indexologyblog.com/2013/08/08/inside-the-sp-500-dividends-reinvested/
    # Results in 0.08 / 255.94 in dividends over the new year period, which seems too high, but which is immaterial.
    # Results in an extra year of data to play with.

risk_free_table = get_csv('FEDFUNDS')
assert(risk_free_table.pop(0) == ['DATE', 'FEDFUNDS'])
risk_free_table = [('-'.join(date.split('-')[0:2]), float(s) / 100) for (date, s) in risk_free_table]
risk_free_table = dict(risk_free_table)

def leverage(index, factor, annual_expense, leverage_reset_interval = 1, nominal_risk_free = None):

    dates = sorted(index)
    levered_index = {}
    li = 1
    overhead_factor = 1
    for i in range(len(dates)):
        date = dates[i]
        if i == 0:
            base_index = index[date]
            base_li = li
        else:
            leverage_multiplier =  (1 + factor * (index[date] / base_index - 1))
            if leverage_multiplier < worst_case_leverage_multiplier:
                leverage_multiplier = worst_case_leverage_multiplier
            li = leverage_multiplier * base_li
            if i % leverage_reset_interval == 0:
                base_index = index[date]
                base_li = li
            try:
                rf = risk_free_table['-'.join(date.split('-')[0:2])] if nominal_risk_free == None else nominal_risk_free
            except:
                pass
            overhead_factor *= (1 - annual_expense) ** (1 / days_per_year)
            overhead_factor /= (1 + (factor - 1) * rf / 365) ** (365 / days_per_year) # Risk free is non-APR.
        levered_index[date] = li * overhead_factor

    return levered_index

cpi = get_csv('CPIAUCSL')
assert(cpi.pop(0) == ['DATE', 'CPIAUCSL'])
cpi = [('-'.join(date.split('-')[0:2]), float(s) / 100) for (date, s) in cpi]
cpi = dict(cpi)

def get_cpi(date):
    return cpi['-'.join(date.split('-')[0:2])]

def analyze(symbol, nav, factor):

    def fix_spxl(l):
        '''
        Fix Yahoo weirdness in SPXL data pre 2017-05-01.
        A 4 for 1 share split occured, but all historical data is 16 to 1.
        '''

        return [[date, v if date >= '2017-05-01' else v / 4] for (date, v) in l]

    if nav:
        split = []
    else:
        split = get_csv(symbol + '-split')
        assert(split.pop(0) == ['Date', 'Stock Splits'])
    split_prod = 1
    for i, (date, s) in enumerate(split):
        a, b = s.split('/')
        split[i][1] = float(a) / float(b)
        split_prod *= split[i][1]
    split.sort()

    dividend = get_csv(symbol + '-div')
    assert(dividend.pop(0) == ['Date', 'Dividends'])
    for i, (date, s) in enumerate(dividend):
        dividend[i][1] = float(s)
    dividend.sort()
    if symbol == 'SPXL':
        dividend = fix_spxl(dividend)
    # Dividends are split adjusted, not orginal values.

    if nav:
        quote = get_csv(symbol + '-historical_nav')
        head = quote.pop(0)
        assert(head[0] == 'Date' and head[3] == 'NAV')
        def iso_date(date):
            m, d, y = date.split('/')
            return y + '-' + m + '-' + d
        quote = [(iso_date(date), float(s)) for (date, _, _, s, _, _, _, _, _) in quote]
    else:
        quote = get_csv(symbol)
        head = quote.pop(0)
        assert(head[0] == 'Date' and head[4] == 'Close')
        quote = [(date, float(s)) for (date, _, _, _, s, _, _) in quote if s != 'null']
    quote.sort()
    if symbol == 'SPXL':
        quote = fix_spxl(quote)
    quote = dict(quote)

    tr = {}
    d = 1
    s = 1
    p_prev = None
    p0 = quote[sorted(quote)[0]]
    for date in sorted(quote):
        p = quote[date]
        while dividend and dividend[0][0] <= date:
            d *= 1 + dividend[0][1] / (p_prev * s / split_prod - dividend[0][1])
            dividend.pop(0)
        while split and split[0][0] <= date:
            s *= split[0][1]
            split.pop(0)
        tr[date] = d * s * p / p0
        p_prev = p

    index = {}
    date_spxl_sp500 = '2012-06-29'
    for date in sorted(quote):
        index[date] = index_sp500tr[date]
        if symbol == 'SPXL' and date < date_spxl_sp500:
            index[date] *= (index_rui[date] / index_rui[date_spxl_sp500]) / (index_gspc[date] / index_gspc[date_spxl_sp500])
        date_prev = date

    levered_index = leverage(index, factor, 0)

    if nav:
        symbol += '_nav'

    # Nominal OK for reporting chanages, since time period is so small, using real wouldn't make any difference.
    with open('leveraged_etfs-' + symbol + '-change.csv', 'w') as f:
        w = writer(f)
        w.writerow(['date', 'underlying index change', 'levered index change', 'total return fund change'])
        ind_prev = None
        tr_prev = 1
        for date in sorted(quote):
            try:
                ind = index[date]
                li = levered_index[date]
            except:
                pass
            else:
                if ind_prev:
                    tri = tr[date]
                    w.writerow([date, ind / ind_prev - 1, li / li_prev - 1, tri / tr_prev - 1])
                    tr_prev = tri
                ind_prev = ind
                li_prev = li

    first_date = max(sorted(levered_index)[0], sorted(tr)[0])
    last_date = min(sorted(levered_index)[-1], sorted(tr)[-1])

    def year_tr(index, year, year_end = None):

        if not year_end:
            year_end = year
        start = 1
        for date in sorted(index):
            if not (first_date <= date <= last_date):
                continue
            y = int(date.split('-')[0])
            if y < year:
                start = index[date]
            elif y == year_end:
                end = index[date]
        return end / start

    print(symbol + ':')
    print('year', 'index', 'ETF', 'outperformance')

    first_year = int(first_date.split('-')[0])
    last_year = int(last_date.split('-')[0])
    for year in range(first_year, last_year + 1):
        annual_levered_index = year_tr(levered_index, year)
        annual_tr = year_tr(tr, year)
        print(year, annual_levered_index, annual_tr, annual_tr / annual_levered_index - 1)

    # 2008 SPXL returns are only for a small part of a year and don't track the index very well.
    # 2008 SSO returns are for full non-first year and don't track the index well.
    # Suspect tracks index poorly during periods of high volatility.
    start_year = first_year + 1
    end_year = last_year - 1
    range_levered_index = year_tr(levered_index, start_year, end_year)
    range_tr = year_tr(tr, start_year, end_year)
    print(str(start_year) + '-' + str(end_year), range_levered_index, range_tr, range_tr / range_levered_index - 1)
    annual_expense = 1 - (range_tr / range_levered_index) ** (1 / (end_year - start_year + 1))
    print('annualized expense', annual_expense)

    all_levered_index = levered_index[sorted(levered_index)[-1]]
    all_tr = tr[sorted(tr)[-1]]
    print('all', all_levered_index, all_tr, all_tr / all_levered_index - 1)

    index = leverage(index_sp500tr, 1, 0) # Start from index value 1.
    li_synthetic = leverage(index_sp500tr, factor, 0)
    tr_synthetic = leverage(index_sp500tr, factor, annual_expense)

    with open('leveraged_etfs-' + symbol + '-model_vs_actual.csv', 'w') as f:
        w = writer(f)
        w.writerow(['date', 'model total return fund index', 'total return fund index'])
        dates = sorted(quote)
        date_base = dates[0]
        cpi_base = None
        for date in dates:
            try:
                cpi_level = get_cpi(date)
            except:
                pass
            if not cpi_base:
                cpi_base = cpi_level
            cpi_factor = cpi_level / cpi_base
            w.writerow([date, tr_synthetic[date] / tr_synthetic[date_base] / cpi_factor, tr[date] / cpi_factor])

    with open('leveraged_etfs-' + symbol + '-synthetic_level.csv', 'w') as f:
        w = writer(f)
        w.writerow(['date', 'underlying index', 'levered index', 'model total return fund index'])
        cpi_base = None
        for date in sorted(index):
            try:
                cpi_level = get_cpi(date)
            except:
                pass
            if not cpi_base:
                cpi_base = cpi_level
            cpi_factor = cpi_level / cpi_base
            w.writerow([date, index[date] / cpi_factor, li_synthetic[date] / cpi_factor, tr_synthetic[date] / cpi_factor])

    return symbol, annual_expense

def project_daily_returns(symbol, factor, annual_expense, interval, scenario, true_geometric_brownian_motion):

    assert(resample_every % interval == 0)

    # NB: Inflation rate used doesn't really matter as it cancels out when leverage is performed.
    #     leveraged_real_return = factor * (real_return + inflation) + (1 - factor) * (risk_free_real_return + inflation) - inflation
    #     leveraged_real_return = factor * real_return + (1 - factor) * risk_free_real_return

    if scenario == 'current':
        target_am = current_am
        target_vol = current_vol
        real_risk_free = current_real_risk_free
        inflation = 0.023 # 2017Q2 SPF inflation rate 2017
    elif scenario == 'scv_current':
        target_am = scv_current_am
        target_vol = scv_current_vol
        real_risk_free = current_real_risk_free
        inflation = 0.023 # 2017Q2 SPF inflation rate 2017
    elif scenario == 'scv_anomaly_current':
        target_am = scv_anomaly_current_am
        target_vol = scv_current_vol
        real_risk_free = current_real_risk_free
        inflation = 0.023 # 2017Q2 SPF inflation rate 2017
    elif scenario == 'historical':
        target_am = historical_am
        target_vol = historical_vol
        real_risk_free = historical_real_risk_free
        inflation = 0.030 # 1900-2016 CPI
    elif scenario == 'opal':
        target_am = opal_am
        target_vol = opal_vol
        real_risk_free = opal_real_risk_free
        inflation = 0
    else:
        assert(False)

    symbol = symbol + '_' + scenario

    def uniform_inflation(index):

        uniform = {}
        dates = sorted(index)
        days = 0
        for date in dates:
            uniform[date] = index[date] / get_cpi(date) * get_cpi(dates[0]) * (get_cpi(dates[-1]) / get_cpi(dates[0])) ** (days / (len(index) - 1))
            days += 1

        return uniform

    def adjust_index(index, am_adjust, vol_adjust):

        res = {}
        prev_index = None
        for date in sorted(index):
            if prev_index:
                change = index[date] / prev_index
                res[date] = prev_res * (1 + am_adjust + (change - 1) * vol_adjust)
            else:
                res[date] = 1
            prev_index = index[date]
            prev_res = res[date]

        return res

    def full_years(index):

        dates = sorted(index)
        first_date = dates[0]
        last_date = dates[-1]

        first_year = first_date.split('-')[0]
        last_year = last_date.split('-')[0]
        index_full_years = {}
        last_first_date = None
        for date in dates:
            current_year = date.split('-')[0]
            if current_year == first_year:
                last_first_date = date
            elif current_year < last_year:
                index_full_years[date] = index[date]
        index_full_years[last_first_date] = index[last_first_date]

        for i in range(len(index_full_years) - 1, int(len(index_full_years) / interval) * interval, -1):
            del index_full_years[dates[i]]
        assert((len(index_full_years) - 1) % interval == 0)

        return index_full_years

    stats_block_size = 1 if true_geometric_brownian_motion else days_per_year

    m = 1 + target_am
    vol = target_vol
    mu = log(m / sqrt(1 + (vol / m) ** 2))
    sigma = sqrt(log(1 + (vol / m) ** 2))

    def nominal_returns(index):

        changes = []
        dates = sorted(index)
        first_index = index[dates[0]]
        last_index = index[dates[-1]]
        for i in range(len(dates) - 1):
            prev_date = dates[i]
            current_date = dates[(i + stats_block_size) % len(dates)]
            prev_index = index[prev_date]
            current_index = index[current_date]
            if current_date < prev_date:
                current_index *=  last_index / first_index
            changes.append(current_index / prev_index)

        return changes

    full_years_index = full_years(index_sp500tr)

    if true_geometric_brownian_motion:

        symbol = symbol + '_gbm'

        seed(0)
        smoothed_index = {}
        dates = sorted(full_years_index)
        dates = range(len(full_years_index) * interval) # Maintain precision despite down projection by interval.
        level = 1
        date_prev = None
        for date in dates:
            smoothed_index[date] = level
            if date_prev:
                v = lognormvariate((mu + log(1 + inflation)) / days_per_year, sigma / sqrt(days_per_year))
                    # The correspondence between this code and Opal isn't exact because Opal uses a discrete set of 120 sample returns,
                    # while we use an adjusted sample of (2016 - 1988 + 1) * 252 * 21 returns down projected by a factor of 21.
                level *= v
            date_prev = date

    else:

        smoothed_index = uniform_inflation(full_years_index)

    am_lo = am_lo_init = -0.001
    am_hi = am_hi_init = 0.001
    for _ in range(15):
        am_mid = (am_lo + am_hi) / 2
        vol_lo = vol_lo_init = 0
        vol_hi = vol_hi_init = 2
        for _ in range(15):
            vol_mid = (vol_lo + vol_hi) / 2
            target_index = adjust_index(smoothed_index, am_mid, vol_mid)
            rets = tuple(log(x) for x in nominal_returns(target_index))
            if stdev(rets) < sigma * sqrt(stats_block_size / days_per_year):
                vol_lo = vol_mid
            else:
                vol_hi = vol_mid
        if mean(rets) < (mu + log(1 + inflation)) * stats_block_size / days_per_year:
            am_lo = am_mid
        else:
            am_hi = am_mid
    assert(am_lo != am_lo_init and am_hi != am_hi_init)
    assert(vol_mid != vol_lo_init and vol_hi != vol_hi_init)

    nominal_risk_free = (1 + real_risk_free) * (1 + inflation) - 1 # Multiply geometric mean values.
    synthetic = leverage(target_index, factor, annual_expense, interval, nominal_risk_free)

    def real_changes(index):

        changes = []
        dates = sorted(index)
        for i in range(0, len(dates) - interval, interval):
            changes.append(index[dates[i + interval]] / index[dates[i]] / (1 + inflation) ** (interval / days_per_year))

        return changes

    change_interval = real_changes(synthetic)

    return symbol, change_interval

def project_returns(symbol, interval, change_interval, years):

    seed(0)
    simulations = int(10000000 / years) # Minimal number needed to produce smooth histograms.
    ps = []
    for _ in range(simulations):
        p = 1
        for day in range(0, days_per_year * years, interval):
            if day % resample_every == 0:
                i = randrange(len(change_interval))
            p *= change_interval[i]
            i = (i + 1) % len(change_interval)
        ps.append(p)

    print(symbol, 'years' + str(years), 'final value factor', '-', 'median:', median(ps), 'mean:', mean(ps))

    f_max = 10
    f_steps = 20

    with open('leveraged_etfs-' + symbol + '-' + 'years' + str(years) + '-final_value_factor_pdf.csv', 'w') as f:
        w = writer(f)
        cd = 0.0
        for i in range(f_max * f_steps):
            fvf = (i + 0.5) / f_steps
            pd = sum(1 for p in ps if fvf - 0.5 / f_steps <= p < fvf + 0.5 / f_steps) / simulations
            cd += pd
            w.writerow([fvf, pd * f_steps, cd])

    r_lo = -0.20
    r_hi = 0.40
    r_steps = 100

    with open('leveraged_etfs-' + symbol + '-' + 'years' + str(years) + '-annualized_roi_pdf.csv', 'w') as f:
        w = writer(f)
        cd = 0.0
        for i in range(int(floor(r_lo * r_steps)), int(ceil(r_hi * r_steps))):
            r = i / r_steps
            pd = sum(1 for p in ps if r - 0.5 / r_steps <= p ** (1 / years) - 1 < r + 0.5 / r_steps) / simulations
            cd += pd
            w.writerow([r, pd * r_steps, cd])

def compute_utility(symbol, interval, change_interval, years):

    gamma = 2

    p_init = 200000
    c_uncorr = 100000

    def utility(c):
        return log(c) if gamma == 1 else c ** (1 - gamma) / (1 - gamma)

    def inverse_utility(u):
        return exp(u) if gamma == 1 else ((1 - gamma) * u) ** (1 / (1 - gamma))

    seed(0)
    simulations = 1000000 # Minimal number needed to produce smooth histograms.
    utilities = []
    for _ in range(simulations):
        p = p_init
        u = 0
        for day in range(0, days_per_year * years, interval):
            if day % resample_every == 0:
                i = randrange(len(change_interval))
            p *= change_interval[i]
            i = (i + 1) % len(change_interval)
            years_remaining = years - day / days_per_year
            # The following is imperfect, really need to use stochastic dynamic programming to compute the optimal consumption amount.
            c_fract = 1 / years_remaining # 1/N.
            c_corr = c_fract * p
            p -= c_corr * interval / days_per_year
            u += utility(c_uncorr + c_corr) * interval / days_per_year
        u = u / years
        utilities.append(u)
    ces = tuple(inverse_utility(u) - c_uncorr for u in utilities)
    print(symbol, 'utility', '-', 'median:', inverse_utility(median(utilities)) - c_uncorr, 'mean:', inverse_utility(mean(utilities)) - c_uncorr)

    ce_max = 100000
    ce_steps = 100

    step = ce_max / ce_steps
    with open('leveraged_etfs-' + symbol + '-' + 'years' + str(years) + '-ce_pdf.csv', 'w') as f:
        w = writer(f)
        cd = 0.0
        for i in range(ce_steps):
            ce_val = i * step
            pd = sum(1 for ce in ces if ce_val <= ce < ce_val + step) / simulations
            cd += pd
            w.writerow([ce_val + 0.5 * step, pd, cd])

def zero_expense_utility(symbol, interval, scenario, true_geometric_brownian_motion, years):

    for factor in range(1, 5):
        sym = symbol + '_' + str(factor) + 'x'
        print(sym + ':')
        sym, change_interval = project_daily_returns(sym, factor, 0, interval, scenario, true_geometric_brownian_motion = true_geometric_brownian_motion)
        compute_utility(sym, interval, change_interval, years)
        print()

interval = 1 # Reset leverage daily.

years = 20

def process_symbol(symbol, nav, factor, annual_expense = None):

    if annual_expense == None:
        symbol, annual_expense = analyze(symbol, nav, factor)
    else:
        print(symbol + ':')

    sym, change_interval = project_daily_returns(symbol, factor, annual_expense, interval, 'current', true_geometric_brownian_motion = False)
    for y in [1, 5, 20]:
        project_returns(sym, interval, change_interval, y)
    compute_utility(sym, interval, change_interval, years)

    # # project_daily_returns(.., 'current', true_geometric_brownian_motion = True) not utilized.

    # sym, change_interval = project_daily_returns(symbol, factor, annual_expense, interval, 'historical', true_geometric_brownian_motion = False)
    # # project_returns() not utilitized.
    # compute_utility(sym, interval, change_interval, years)

    # sym, change_interval = project_daily_returns(symbol, factor, annual_expense, interval, 'historical', true_geometric_brownian_motion = True)
    # project_returns(sym, interval, change_interval, years)
    # # compute_utility() not utilitized.

    print()

process_symbol('SP500TR_index_lag0.001', False, 1, 0.001)
process_symbol('SSO', False, 2) # ProShares
# process_symbol('SPUU', False, 2) # Direxion - low assets, low volume, tracks benchmark poorly
# process_symbol('UPRO', False, 3) # ProShares - only have all prices for 2016 on
process_symbol('UPRO', True, 3) # ProShares - NAV doesn't exactly reflect actual trading price
# process_symbol('SPXL', False, 3) # Direxion - effectively lags ProShares UPRO
# process_symbol('daily_4x_SP500TR_index_lag0.022', False, 4, 0.022)

# symbol = 'small_cap_value_lag0.001'
# print(symbol + ':')
# sym, change_interval = project_daily_returns(symbol, 1, 0.001, interval, 'scv_current', true_geometric_brownian_motion = False)
# compute_utility(sym, interval, change_interval, years)
# sym, change_interval = project_daily_returns(symbol, 1, 0.001, interval, 'scv_anomaly_current', true_geometric_brownian_motion = False)
# compute_utility(sym, interval, change_interval, years)
# print()

# zero_expense_utility('daily', interval, 'current', False, years)

# zero_expense_utility('monthly', days_per_month, 'opal', True, 50) # Opal cross-validate.
