Skip to content

Commit

Permalink
Merge pull request #910 from ricequant/develop
Browse files Browse the repository at this point in the history
rqalpha==5.5.0
  • Loading branch information
Cuizi7 authored Jan 8, 2025
2 parents 29a2003 + 18b8e9c commit ebf1519
Show file tree
Hide file tree
Showing 7 changed files with 103 additions and 60 deletions.
1 change: 1 addition & 0 deletions rqalpha/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,7 @@ class EXCHANGE(CustomEnum):
CZCE = "CZCE"
CFFEX = "CFFEX"
SGEX = "SGEX"
BJSE = "BJSE"


# noinspection PyPep8Naming
Expand Down
2 changes: 0 additions & 2 deletions rqalpha/data/base_data_source/data_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,6 @@ def _p(name):
instruments.append(Instrument(
i,
lambda i: self._future_info_store.get_tick_size(i),
# lambda i, dt: env.data_proxy.get_futures_trading_parameters(i, dt).long_margin_ratio,
# lambda i, dt: env.data_proxy.get_futures_trading_parameters(i, dt).short_margin_ratio
))
for ins_type in self.DEFAULT_INS_TYPES:
self.register_instruments_store(InstrumentStore(instruments, ins_type))
Expand Down
16 changes: 8 additions & 8 deletions rqalpha/data/bundle.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,29 +419,26 @@ def __call__(self, path, fields, **kwargs):
h5.close()


def process_init(args: Optional[Synchronized] = None):
def process_init(args: Optional[Synchronized] = None, kwargs = None):
kwargs = kwargs or {}
import warnings
with warnings.catch_warnings(record=True):
# catch warning: rqdatac is already inited. Settings will be changed
rqdatac.init()
rqdatac.init(**kwargs)
init_logger()
# Initialize process shared variables
if args:
global sval
sval = args


def update_bundle(path, create, enable_compression=False, concurrency=1):
def update_bundle(path, create, enable_compression=False, concurrency=1, **kwargs):
if create:
_DayBarTask = GenerateDayBarTask
else:
_DayBarTask = UpdateDayBarTask

init_logger()
kwargs = {}
if enable_compression:
kwargs['compression'] = 9

day_bar_args = (
("stocks.h5", rqdatac.all_instruments('CS').order_book_id.tolist(), STOCK_FIELDS),
("indexes.h5", rqdatac.all_instruments('INDX').order_book_id.tolist(), INDEX_FIELDS),
Expand All @@ -458,8 +455,11 @@ def update_bundle(path, create, enable_compression=False, concurrency=1):

succeed = multiprocessing.Value(c_bool, True)
with ProgressedProcessPoolExecutor(
max_workers=concurrency, initializer=process_init, initargs=(succeed, )
max_workers=concurrency, initializer=process_init, initargs=(succeed, kwargs)
) as executor:
kwargs = {}
if enable_compression:
kwargs['compression'] = 9
# windows上子进程需要执行rqdatac.init, 其他os则需要执行rqdatac.reset; rqdatac.init包含了rqdatac.reset的功能
for func in gen_file_funcs:
executor.submit(GenerateFileTask(func), path)
Expand Down
83 changes: 38 additions & 45 deletions rqalpha/mod/rqalpha_mod_sys_analyser/mod.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,21 +116,9 @@ def start_up(self, env, mod_config):

self._plot_store = PlotStore(env)
export_as_api(self._plot_store.plot)

def get_benchmark_daily_returns(self):
if self._benchmark is None:
return np.nan
daily_return_list = []
weights = 0
for benchmark in self._benchmark:
bar = self._env.data_proxy.get_bar(benchmark[0], self._env.calendar_dt, '1d')
if bar.close != bar.close:
daily_return_list.append((0.0, benchmark[1]))
else:
daily_return_list.append((bar.close / bar.prev_close - 1.0, benchmark[1]))
weights += benchmark[1]
return sum([daily[0] * daily[1] / weights for daily in daily_return_list])

NULL_OID = {"null", "NULL"}

def generate_benchmark_daily_returns_and_portfolio(self, event):
_s = self._env.config.base.start_date
_e = self._env.config.base.end_date
Expand All @@ -143,37 +131,41 @@ def generate_benchmark_daily_returns_and_portfolio(self, event):
self._benchmark_daily_returns = np.zeros(len(trading_dates))
weights = 0
for order_book_id, weight in self._benchmark:
ins = self._env.data_proxy.instrument(order_book_id)
if ins is None:
raise RuntimeError(
_("benchmark {} not exists, please entry correct order_book_id").format(order_book_id)
)
bars = self._env.data_proxy.history_bars(
order_book_id = order_book_id,
bar_count = len(trading_dates) + 1, # Get an extra day for calculation
frequency = "1d",
field = ["datetime", "close"],
dt = _e,
skip_suspended=False,
)
if len(bars) == len(trading_dates) + 1:
if convert_int_to_date(bars[1]['datetime']).date() != _s:
raise RuntimeError(_(
"benchmark {} missing data between backtest start date {} and end date {}").format(order_book_id, _s, _e)
)
daily_returns = (bars['close'] / np.roll(bars['close'], 1) - 1.0)[1: ]
self._benchmark_daily_returns = self._benchmark_daily_returns + daily_returns * weight
weights += weight
if order_book_id in self.NULL_OID:
daily_returns = np.zeros(len(trading_dates))
else:
if len(bars) == 0:
(available_s, available_e) = (ins.listed_date, ins.de_listed_date)
else:
(available_s, available_e) = (convert_int_to_date(bars[0]['datetime']).date(), convert_int_to_date(bars[-1]['datetime']).date())
raise RuntimeError(
_("benchmark {} available data start date {} >= backtest start date {} or end date {} <= backtest end "
"date {}").format(order_book_id, available_s, _s, available_e, _e)
ins = self._env.data_proxy.instrument(order_book_id)
if ins is None:
raise RuntimeError(
_("benchmark {} not exists, please entry correct order_book_id").format(order_book_id)
)
bars = self._env.data_proxy.history_bars(
order_book_id = order_book_id,
bar_count = len(trading_dates) + 1, # Get an extra day for calculation
frequency = "1d",
field = ["datetime", "close"],
dt = _e,
skip_suspended=False,
)
self._benchmark_daily_returns = self._benchmark_daily_returns / weight
if len(bars) == len(trading_dates) + 1:
if convert_int_to_date(bars[1]['datetime']).date() != _s:
raise RuntimeError(_(
"benchmark {} missing data between backtest start date {} and end date {}").format(order_book_id, _s, _e)
)
daily_returns = (bars['close'] / np.roll(bars['close'], 1) - 1.0)[1: ]
else:
if len(bars) == 0:
(available_s, available_e) = (ins.listed_date, ins.de_listed_date)
else:
(available_s, available_e) = (convert_int_to_date(bars[0]['datetime']).date(), convert_int_to_date(bars[-1]['datetime']).date())
raise RuntimeError(
_("benchmark {} available data start date {} >= backtest start date {} or end date {} <= backtest end "
"date {}").format(order_book_id, available_s, _s, available_e, _e)
)
self._benchmark_daily_returns = self._benchmark_daily_returns + daily_returns * weight
weights += weight

self._benchmark_daily_returns = self._benchmark_daily_returns / weights

# generate benchmark portfolio
unit_net_value = (self._benchmark_daily_returns + 1).cumprod()
Expand Down Expand Up @@ -229,7 +221,8 @@ def _parse_benchmark(benchmarks):
benchmark_list = benchmarks.split(',')
if len(benchmark_list) == 1:
if len(benchmark_list[0].split(':')) > 1:
result.append((benchmark_list[0].split(':')[0], 1.0))
oid, weight = benchmark_list[0].split(':')
result.append((oid, float(weight)))
return result
result.append((benchmark_list[0], 1.0))
return result
Expand Down Expand Up @@ -360,7 +353,7 @@ def tear_down(self, code, exception=None):
summary["benchmark_symbol"] = self._env.data_proxy.instrument(benchmark_obid).symbol
else:
summary["benchmark"] = ",".join(f"{o}:{w}" for o, w in self._benchmark)
summary["benchmark_symbol"] = ",".join(f"{self._env.data_proxy.instrument(o).symbol}:{w}" for o, w in self._benchmark)
summary["benchmark_symbol"] = ",".join(f"{self._env.data_proxy.instrument(o).symbol if o not in self.NULL_OID else 'null'}:{w}" for o, w in self._benchmark)

risk_free_rate = data_proxy.get_risk_free_rate(self._env.config.base.start_date, self._env.config.base.end_date)
risk = Risk(
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

[metadata]
name = rqalpha
version = 5.4.3
version = 5.5.0

[versioneer]
VCS = git
Expand Down
55 changes: 55 additions & 0 deletions tests/api_tests/mod/sys_analyser/test_negative_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 [email protected] 获取。

import pandas as pd
from numpy import isclose, array

from rqalpha.apis import *


__config__ = {
"base": {
"start_date": "2024-11-04",
"end_date": "2024-11-08",
"frequency": "1d",
"accounts": {
"stock": 10000000,
},
},
"extra": {
"log_level": "error",
},
"mod": {
"sys_analyser": {
"benchmark": "000300.XSHG:-1,null:2",
}
}
}


def test_negative_benchmark():
def handle_bar(context, _):
from rqalpha.environment import Environment

env = Environment.get_instance()
df = pd.DataFrame(env.mod_dict["sys_analyser"]._total_benchmark_portfolios)
df['date'] = pd.to_datetime(df['date'])
benchmark_portfolio = df.set_index('date').sort_index()

assert isclose(
(benchmark_portfolio / benchmark_portfolio.shift(1, fill_value=1) - 1)["unit_net_value"].values,
array([-0.01407232, -0.02530206, 0.00501645, -0.03016987, 0.01004613])
).all()

return locals()
4 changes: 0 additions & 4 deletions versioneer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1665,13 +1665,9 @@ def render_pep440_ricequant(pieces):
# # 如果是dev和master分支或者hotfix分支来的,或者是一个tag,那就用pep440的版本号,否则带上git commit id
if tracking_branch in ["origin/develop", "origin/master"] or tracking_branch.startswith("origin/hotfix/") or pieces[
"distance"] == 0:
if pieces["dirty"]:
rendered += ".dirty"
return rendered

rendered += "+%s" % format(pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered


Expand Down

0 comments on commit ebf1519

Please sign in to comment.