code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def return_task(self, task, status=STATUS_WAITING):
"""
Return a task to status. Always using in error handling.
Args:
task ([type]): [description]
status (str, optional): STATUS_WAITING, STATUS_RUNNING, STATUS_DONE, STATUS_PART_DONE. Defaults to STATUS_WAITING.
"""
if status is None:
status = TaskManager.STATUS_WAITING
update_dict = {"$set": {"status": status}}
self.task_pool.update_one({"_id": task["_id"]}, update_dict)
|
Return a task to status. Always using in error handling.
Args:
task ([type]): [description]
status (str, optional): STATUS_WAITING, STATUS_RUNNING, STATUS_DONE, STATUS_PART_DONE. Defaults to STATUS_WAITING.
|
return_task
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def remove(self, query={}):
"""
Remove the task using query
Parameters
----------
query: dict
the dict of query
"""
query = query.copy()
query = self._decode_query(query)
self.task_pool.delete_many(query)
|
Remove the task using query
Parameters
----------
query: dict
the dict of query
|
remove
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def task_stat(self, query={}) -> dict:
"""
Count the tasks in every status.
Args:
query (dict, optional): the query dict. Defaults to {}.
Returns:
dict
"""
query = query.copy()
query = self._decode_query(query)
tasks = self.query(query=query, decode=False)
status_stat = {}
for t in tasks:
status_stat[t["status"]] = status_stat.get(t["status"], 0) + 1
return status_stat
|
Count the tasks in every status.
Args:
query (dict, optional): the query dict. Defaults to {}.
Returns:
dict
|
task_stat
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def reset_waiting(self, query={}):
"""
Reset all running task into waiting status. Can be used when some running task exit unexpected.
Args:
query (dict, optional): the query dict. Defaults to {}.
"""
query = query.copy()
# default query
if "status" not in query:
query["status"] = self.STATUS_RUNNING
return self.reset_status(query=query, status=self.STATUS_WAITING)
|
Reset all running task into waiting status. Can be used when some running task exit unexpected.
Args:
query (dict, optional): the query dict. Defaults to {}.
|
reset_waiting
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def prioritize(self, task, priority: int):
"""
Set priority for task
Parameters
----------
task : dict
The task query from the database
priority : int
the target priority
"""
update_dict = {"$set": {"priority": priority}}
self.task_pool.update_one({"_id": task["_id"]}, update_dict)
|
Set priority for task
Parameters
----------
task : dict
The task query from the database
priority : int
the target priority
|
prioritize
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def wait(self, query={}):
"""
When multiprocessing, the main progress may fetch nothing from TaskManager because there are still some running tasks.
So main progress should wait until all tasks are trained well by other progress or machines.
Args:
query (dict, optional): the query dict. Defaults to {}.
"""
task_stat = self.task_stat(query)
total = self._get_total(task_stat)
last_undone_n = self._get_undone_n(task_stat)
if last_undone_n == 0:
return
self.logger.warning(f"Waiting for {last_undone_n} undone tasks. Please make sure they are running.")
with tqdm(total=total, initial=total - last_undone_n) as pbar:
while True:
time.sleep(10)
undone_n = self._get_undone_n(self.task_stat(query))
pbar.update(last_undone_n - undone_n)
last_undone_n = undone_n
if undone_n == 0:
break
|
When multiprocessing, the main progress may fetch nothing from TaskManager because there are still some running tasks.
So main progress should wait until all tasks are trained well by other progress or machines.
Args:
query (dict, optional): the query dict. Defaults to {}.
|
wait
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def get_mongodb() -> Database:
"""
Get database in MongoDB, which means you need to declare the address and the name of a database at first.
For example:
Using qlib.init():
.. code-block:: python
mongo_conf = {
"task_url": task_url, # your MongoDB url
"task_db_name": task_db_name, # database name
}
qlib.init(..., mongo=mongo_conf)
After qlib.init():
.. code-block:: python
C["mongo"] = {
"task_url" : "mongodb://localhost:27017/",
"task_db_name" : "rolling_db"
}
Returns:
Database: the Database instance
"""
try:
cfg = C["mongo"]
except KeyError:
get_module_logger("task").error("Please configure `C['mongo']` before using TaskManager")
raise
get_module_logger("task").info(f"mongo config:{cfg}")
client = MongoClient(cfg["task_url"])
return client.get_database(name=cfg["task_db_name"])
|
Get database in MongoDB, which means you need to declare the address and the name of a database at first.
For example:
Using qlib.init():
.. code-block:: python
mongo_conf = {
"task_url": task_url, # your MongoDB url
"task_db_name": task_db_name, # database name
}
qlib.init(..., mongo=mongo_conf)
After qlib.init():
.. code-block:: python
C["mongo"] = {
"task_url" : "mongodb://localhost:27017/",
"task_db_name" : "rolling_db"
}
Returns:
Database: the Database instance
|
get_mongodb
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def list_recorders(experiment, rec_filter_func=None):
"""
List all recorders which can pass the filter in an experiment.
Args:
experiment (str or Experiment): the name of an Experiment or an instance
rec_filter_func (Callable, optional): return True to retain the given recorder. Defaults to None.
Returns:
dict: a dict {rid: recorder} after filtering.
"""
if isinstance(experiment, str):
experiment = R.get_exp(experiment_name=experiment)
recs = experiment.list_recorders()
recs_flt = {}
for rid, rec in recs.items():
if rec_filter_func is None or rec_filter_func(rec):
recs_flt[rid] = rec
return recs_flt
|
List all recorders which can pass the filter in an experiment.
Args:
experiment (str or Experiment): the name of an Experiment or an instance
rec_filter_func (Callable, optional): return True to retain the given recorder. Defaults to None.
Returns:
dict: a dict {rid: recorder} after filtering.
|
list_recorders
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def get(self, idx: int):
"""
Get datetime by index.
Parameters
----------
idx : int
index of the calendar
"""
if idx is None or idx >= len(self.cals):
return None
return self.cals[idx]
|
Get datetime by index.
Parameters
----------
idx : int
index of the calendar
|
get
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def align_idx(self, time_point, tp_type="start") -> int:
"""
Align the index of time_point in the calendar.
Parameters
----------
time_point
tp_type : str
Returns
-------
index : int
"""
if time_point is None:
# `None` indicates unbounded index/boarder
return None
time_point = pd.Timestamp(time_point)
if tp_type == "start":
idx = bisect.bisect_left(self.cals, time_point)
elif tp_type == "end":
idx = bisect.bisect_right(self.cals, time_point) - 1
else:
raise NotImplementedError(f"This type of input is not supported")
return idx
|
Align the index of time_point in the calendar.
Parameters
----------
time_point
tp_type : str
Returns
-------
index : int
|
align_idx
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def align_time(self, time_point, tp_type="start") -> pd.Timestamp:
"""
Align time_point to trade date of calendar
Args:
time_point
Time point
tp_type : str
time point type (`"start"`, `"end"`)
Returns:
pd.Timestamp
"""
if time_point is None:
return None
return self.cals[self.align_idx(time_point, tp_type=tp_type)]
|
Align time_point to trade date of calendar
Args:
time_point
Time point
tp_type : str
time point type (`"start"`, `"end"`)
Returns:
pd.Timestamp
|
align_time
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def align_seg(self, segment: Union[dict, tuple]) -> Union[dict, tuple]:
"""
Align the given date to the trade date
for example:
.. code-block:: python
input: {'train': ('2008-01-01', '2014-12-31'), 'valid': ('2015-01-01', '2016-12-31'), 'test': ('2017-01-01', '2020-08-01')}
output: {'train': (Timestamp('2008-01-02 00:00:00'), Timestamp('2014-12-31 00:00:00')),
'valid': (Timestamp('2015-01-05 00:00:00'), Timestamp('2016-12-30 00:00:00')),
'test': (Timestamp('2017-01-03 00:00:00'), Timestamp('2020-07-31 00:00:00'))}
Parameters
----------
segment
Returns
-------
Union[dict, tuple]: the start and end trade date (pd.Timestamp) between the given start and end date.
"""
if isinstance(segment, dict):
return {k: self.align_seg(seg) for k, seg in segment.items()}
elif isinstance(segment, (tuple, list)):
return self.align_time(segment[0], tp_type="start"), self.align_time(segment[1], tp_type="end")
else:
raise NotImplementedError(f"This type of input is not supported")
|
Align the given date to the trade date
for example:
.. code-block:: python
input: {'train': ('2008-01-01', '2014-12-31'), 'valid': ('2015-01-01', '2016-12-31'), 'test': ('2017-01-01', '2020-08-01')}
output: {'train': (Timestamp('2008-01-02 00:00:00'), Timestamp('2014-12-31 00:00:00')),
'valid': (Timestamp('2015-01-05 00:00:00'), Timestamp('2016-12-30 00:00:00')),
'test': (Timestamp('2017-01-03 00:00:00'), Timestamp('2020-07-31 00:00:00'))}
Parameters
----------
segment
Returns
-------
Union[dict, tuple]: the start and end trade date (pd.Timestamp) between the given start and end date.
|
align_seg
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def truncate(self, segment: tuple, test_start, days: int) -> tuple:
"""
Truncate the segment based on the test_start date
Parameters
----------
segment : tuple
time segment
test_start
days : int
The trading days to be truncated
the data in this segment may need 'days' data
`days` are based on the `test_start`.
For example, if the label contains the information of 2 days in the near future, the prediction horizon 1 day.
(e.g. the prediction target is `Ref($close, -2)/Ref($close, -1) - 1`)
the days should be 2 + 1 == 3 days.
Returns
---------
tuple: new segment
"""
test_idx = self.align_idx(test_start)
if isinstance(segment, tuple):
new_seg = []
for time_point in segment:
tp_idx = min(self.align_idx(time_point), test_idx - days)
assert tp_idx > 0
new_seg.append(self.get(tp_idx))
return tuple(new_seg)
else:
raise NotImplementedError(f"This type of input is not supported")
|
Truncate the segment based on the test_start date
Parameters
----------
segment : tuple
time segment
test_start
days : int
The trading days to be truncated
the data in this segment may need 'days' data
`days` are based on the `test_start`.
For example, if the label contains the information of 2 days in the near future, the prediction horizon 1 day.
(e.g. the prediction target is `Ref($close, -2)/Ref($close, -1) - 1`)
the days should be 2 + 1 == 3 days.
Returns
---------
tuple: new segment
|
truncate
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def shift(self, seg: tuple, step: int, rtype=SHIFT_SD) -> tuple:
"""
Shift the datetime of segment
If there are None (which indicates unbounded index) in the segment, this method will return None.
Parameters
----------
seg :
datetime segment
step : int
rolling step
rtype : str
rolling type ("sliding" or "expanding")
Returns
--------
tuple: new segment
Raises
------
KeyError:
shift will raise error if the index(both start and end) is out of self.cal
"""
if isinstance(seg, tuple):
start_idx, end_idx = self.align_idx(seg[0], tp_type="start"), self.align_idx(seg[1], tp_type="end")
if rtype == self.SHIFT_SD:
start_idx = self._add_step(start_idx, step)
end_idx = self._add_step(end_idx, step)
elif rtype == self.SHIFT_EX:
end_idx = self._add_step(end_idx, step)
else:
raise NotImplementedError(f"This type of input is not supported")
if start_idx is not None and start_idx > len(self.cals):
raise KeyError("The segment is out of valid calendar")
return self.get(start_idx), self.get(end_idx)
else:
raise NotImplementedError(f"This type of input is not supported")
|
Shift the datetime of segment
If there are None (which indicates unbounded index) in the segment, this method will return None.
Parameters
----------
seg :
datetime segment
step : int
rolling step
rtype : str
rolling type ("sliding" or "expanding")
Returns
--------
tuple: new segment
Raises
------
KeyError:
shift will raise error if the index(both start and end) is out of self.cal
|
shift
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def replace_task_handler_with_cache(task: dict, cache_dir: Union[str, Path] = ".") -> dict:
"""
Replace the handler in task with a cache handler.
It will automatically cache the file and save it in cache_dir.
>>> import qlib
>>> qlib.auto_init()
>>> import datetime
>>> # it is simplified task
>>> task = {"dataset": {"kwargs":{'handler': {'class': 'Alpha158', 'module_path': 'qlib.contrib.data.handler', 'kwargs': {'start_time': datetime.date(2008, 1, 1), 'end_time': datetime.date(2020, 8, 1), 'fit_start_time': datetime.date(2008, 1, 1), 'fit_end_time': datetime.date(2014, 12, 31), 'instruments': 'CSI300'}}}}}
>>> new_task = replace_task_handler_with_cache(task)
>>> print(new_task)
{'dataset': {'kwargs': {'handler': 'file...Alpha158.3584f5f8b4.pkl'}}}
"""
cache_dir = Path(cache_dir)
task = deepcopy(task)
handler = task["dataset"]["kwargs"]["handler"]
if isinstance(handler, dict):
hash = hash_args(handler)
h_path = cache_dir / f"{handler['class']}.{hash[:10]}.pkl"
if not h_path.exists():
h = init_instance_by_config(handler)
h.to_pickle(h_path, dump_all=True)
task["dataset"]["kwargs"]["handler"] = f"file://{h_path}"
return task
|
Replace the handler in task with a cache handler.
It will automatically cache the file and save it in cache_dir.
>>> import qlib
>>> qlib.auto_init()
>>> import datetime
>>> # it is simplified task
>>> task = {"dataset": {"kwargs":{'handler': {'class': 'Alpha158', 'module_path': 'qlib.contrib.data.handler', 'kwargs': {'start_time': datetime.date(2008, 1, 1), 'end_time': datetime.date(2020, 8, 1), 'fit_start_time': datetime.date(2008, 1, 1), 'fit_end_time': datetime.date(2014, 12, 31), 'instruments': 'CSI300'}}}}}
>>> new_task = replace_task_handler_with_cache(task)
>>> print(new_task)
{'dataset': {'kwargs': {'handler': 'file...Alpha158.3584f5f8b4.pkl'}}}
|
replace_task_handler_with_cache
|
python
|
microsoft/qlib
|
qlib/workflow/task/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/utils.py
|
MIT
|
def __init__(
self,
qlib_dir: str,
csv_path: str,
check_fields: str = None,
freq: str = "day",
symbol_field_name: str = "symbol",
date_field_name: str = "date",
file_suffix: str = ".csv",
max_workers: int = 16,
):
"""
Parameters
----------
qlib_dir : str
qlib dir
csv_path : str
origin csv path
check_fields : str, optional
check fields, by default None, check qlib_dir/features/<first_dir>/*.<freq>.bin
freq : str, optional
freq, value from ["day", "1m"]
symbol_field_name: str, optional
symbol field name, by default "symbol"
date_field_name: str, optional
date field name, by default "date"
file_suffix: str, optional
csv file suffix, by default ".csv"
max_workers: int, optional
max workers, by default 16
"""
self.qlib_dir = Path(qlib_dir).expanduser()
bin_path_list = list(self.qlib_dir.joinpath("features").iterdir())
self.qlib_symbols = sorted(map(lambda x: x.name.lower(), bin_path_list))
qlib.init(
provider_uri=str(self.qlib_dir.resolve()),
mount_path=str(self.qlib_dir.resolve()),
auto_mount=False,
redis_port=-1,
)
csv_path = Path(csv_path).expanduser()
self.csv_files = sorted(csv_path.glob(f"*{file_suffix}") if csv_path.is_dir() else [csv_path])
if check_fields is None:
check_fields = list(map(lambda x: x.name.split(".")[0], bin_path_list[0].glob(f"*.bin")))
else:
check_fields = check_fields.split(",") if isinstance(check_fields, str) else check_fields
self.check_fields = list(map(lambda x: x.strip(), check_fields))
self.qlib_fields = list(map(lambda x: f"${x}", self.check_fields))
self.max_workers = max_workers
self.symbol_field_name = symbol_field_name
self.date_field_name = date_field_name
self.freq = freq
self.file_suffix = file_suffix
|
Parameters
----------
qlib_dir : str
qlib dir
csv_path : str
origin csv path
check_fields : str, optional
check fields, by default None, check qlib_dir/features/<first_dir>/*.<freq>.bin
freq : str, optional
freq, value from ["day", "1m"]
symbol_field_name: str, optional
symbol field name, by default "symbol"
date_field_name: str, optional
date field name, by default "date"
file_suffix: str, optional
csv file suffix, by default ".csv"
max_workers: int, optional
max workers, by default 16
|
__init__
|
python
|
microsoft/qlib
|
scripts/check_dump_bin.py
|
https://github.com/microsoft/qlib/blob/master/scripts/check_dump_bin.py
|
MIT
|
def check(self):
"""Check whether the bin file after ``dump_bin.py`` is executed is consistent with the original csv file data"""
logger.info("start check......")
error_list = []
not_in_features = []
compare_false = []
with tqdm(total=len(self.csv_files)) as p_bar:
with ProcessPoolExecutor(max_workers=self.max_workers) as executor:
for file_path, _check_res in zip(self.csv_files, executor.map(self._compare, self.csv_files)):
symbol = file_path.name.strip(self.file_suffix)
if _check_res == self.NOT_IN_FEATURES:
not_in_features.append(symbol)
elif _check_res == self.COMPARE_ERROR:
error_list.append(symbol)
elif _check_res == self.COMPARE_FALSE:
compare_false.append(symbol)
p_bar.update()
logger.info("end of check......")
if error_list:
logger.warning(f"compare error: {error_list}")
if not_in_features:
logger.warning(f"not in features: {not_in_features}")
if compare_false:
logger.warning(f"compare False: {compare_false}")
logger.info(
f"total {len(self.csv_files)}, {len(error_list)} errors, {len(not_in_features)} not in features, {len(compare_false)} compare false"
)
|
Check whether the bin file after ``dump_bin.py`` is executed is consistent with the original csv file data
|
check
|
python
|
microsoft/qlib
|
scripts/check_dump_bin.py
|
https://github.com/microsoft/qlib/blob/master/scripts/check_dump_bin.py
|
MIT
|
def _dump_pit(
self,
file_path: str,
interval: str = "quarterly",
overwrite: bool = False,
):
"""
dump data as the following format:
`/path/to/<field>.data`
[date, period, value, _next]
[date, period, value, _next]
[...]
`/path/to/<field>.index`
[first_year, index, index, ...]
`<field.data>` contains the data as the point-in-time (PIT) order: `value` of `period`
is published at `date`, and its successive revised value can be found at `_next` (linked list).
`<field>.index` contains the index of value for each period (quarter or year). To save
disk space, we only store the `first_year` as its followings periods can be easily infered.
Parameters
----------
symbol: str
stock symbol
interval: str
data interval
overwrite: bool
whether overwrite existing data or update only
"""
symbol = self.get_symbol_from_file(file_path)
df = self.get_source_data(file_path)
if df.empty:
logger.warning(f"{symbol} file is empty")
return
for field in self.get_dump_fields(df):
df_sub = df.query(f'{self.field_column_name}=="{field}"').sort_values(self.date_column_name)
if df_sub.empty:
logger.warning(f"field {field} of {symbol} is empty")
continue
data_file, index_file = self.get_filenames(symbol, field, interval)
## calculate first & last period
start_year = df_sub[self.period_column_name].min()
end_year = df_sub[self.period_column_name].max()
if interval == self.INTERVAL_quarterly:
start_year //= 100
end_year //= 100
# adjust `first_year` if existing data found
if not overwrite and index_file.exists():
with open(index_file, "rb") as fi:
(first_year,) = struct.unpack(self.PERIOD_DTYPE, fi.read(self.PERIOD_DTYPE_SIZE))
n_years = len(fi.read()) // self.INDEX_DTYPE_SIZE
if interval == self.INTERVAL_quarterly:
n_years //= 4
start_year = first_year + n_years
else:
with open(index_file, "wb") as f:
f.write(struct.pack(self.PERIOD_DTYPE, start_year))
first_year = start_year
# if data already exists, continue to the next field
if start_year > end_year:
logger.warning(f"{symbol}-{field} data already exists, continue to the next field")
continue
# dump index filled with NA
with open(index_file, "ab") as fi:
for year in range(start_year, end_year + 1):
if interval == self.INTERVAL_quarterly:
fi.write(struct.pack(self.INDEX_DTYPE * 4, *[self.NA_INDEX] * 4))
else:
fi.write(struct.pack(self.INDEX_DTYPE, self.NA_INDEX))
# if data already exists, remove overlapped data
if not overwrite and data_file.exists():
with open(data_file, "rb") as fd:
fd.seek(-self.DATA_DTYPE_SIZE, 2)
last_date, _, _, _ = struct.unpack(self.DATA_DTYPE, fd.read())
df_sub = df_sub.query(f"{self.date_column_name}>{last_date}")
# otherwise,
# 1) truncate existing file or create a new file with `wb+` if overwrite,
# 2) or append existing file or create a new file with `ab+` if not overwrite
else:
with open(data_file, "wb+" if overwrite else "ab+"):
pass
with open(data_file, "rb+") as fd, open(index_file, "rb+") as fi:
# update index if needed
for i, row in df_sub.iterrows():
# get index
offset = get_period_offset(first_year, row.period, interval == self.INTERVAL_quarterly)
fi.seek(self.PERIOD_DTYPE_SIZE + self.INDEX_DTYPE_SIZE * offset)
(cur_index,) = struct.unpack(self.INDEX_DTYPE, fi.read(self.INDEX_DTYPE_SIZE))
# Case I: new data => update `_next` with current index
if cur_index == self.NA_INDEX:
fi.seek(self.PERIOD_DTYPE_SIZE + self.INDEX_DTYPE_SIZE * offset)
fi.write(struct.pack(self.INDEX_DTYPE, fd.tell()))
# Case II: previous data exists => find and update the last `_next`
else:
_cur_fd = fd.tell()
prev_index = self.NA_INDEX
while cur_index != self.NA_INDEX: # NOTE: first iter always != NA_INDEX
fd.seek(cur_index + self.DATA_DTYPE_SIZE - self.INDEX_DTYPE_SIZE)
prev_index = cur_index
(cur_index,) = struct.unpack(self.INDEX_DTYPE, fd.read(self.INDEX_DTYPE_SIZE))
fd.seek(prev_index + self.DATA_DTYPE_SIZE - self.INDEX_DTYPE_SIZE)
fd.write(struct.pack(self.INDEX_DTYPE, _cur_fd)) # NOTE: add _next pointer
fd.seek(_cur_fd)
# dump data
fd.write(struct.pack(self.DATA_DTYPE, row.date, row.period, row.value, self.NA_INDEX))
|
dump data as the following format:
`/path/to/<field>.data`
[date, period, value, _next]
[date, period, value, _next]
[...]
`/path/to/<field>.index`
[first_year, index, index, ...]
`<field.data>` contains the data as the point-in-time (PIT) order: `value` of `period`
is published at `date`, and its successive revised value can be found at `_next` (linked list).
`<field>.index` contains the index of value for each period (quarter or year). To save
disk space, we only store the `first_year` as its followings periods can be easily infered.
Parameters
----------
symbol: str
stock symbol
interval: str
data interval
overwrite: bool
whether overwrite existing data or update only
|
_dump_pit
|
python
|
microsoft/qlib
|
scripts/dump_pit.py
|
https://github.com/microsoft/qlib/blob/master/scripts/dump_pit.py
|
MIT
|
def get_data(
self, symbol: str, interval: str, start_datetime: pd.Timestamp, end_datetime: pd.Timestamp
) -> pd.DataFrame:
"""get data with symbol
Parameters
----------
symbol: str
interval: str
value from [1min, 1d]
start_datetime: pd.Timestamp
end_datetime: pd.Timestamp
Returns
---------
pd.DataFrame, "symbol" and "date"in pd.columns
"""
raise NotImplementedError("rewrite get_timezone")
|
get data with symbol
Parameters
----------
symbol: str
interval: str
value from [1min, 1d]
start_datetime: pd.Timestamp
end_datetime: pd.Timestamp
Returns
---------
pd.DataFrame, "symbol" and "date"in pd.columns
|
get_data
|
python
|
microsoft/qlib
|
scripts/data_collector/base.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/base.py
|
MIT
|
def save_instrument(self, symbol, df: pd.DataFrame):
"""save instrument data to file
Parameters
----------
symbol: str
instrument code
df : pd.DataFrame
df.columns must contain "symbol" and "datetime"
"""
if df is None or df.empty:
logger.warning(f"{symbol} is empty")
return
symbol = self.normalize_symbol(symbol)
symbol = code_to_fname(symbol)
instrument_path = self.save_dir.joinpath(f"{symbol}.csv")
df["symbol"] = symbol
if instrument_path.exists():
_old_df = pd.read_csv(instrument_path)
df = pd.concat([_old_df, df], sort=False)
df.to_csv(instrument_path, index=False)
|
save instrument data to file
Parameters
----------
symbol: str
instrument code
df : pd.DataFrame
df.columns must contain "symbol" and "datetime"
|
save_instrument
|
python
|
microsoft/qlib
|
scripts/data_collector/base.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/base.py
|
MIT
|
def __init__(self, date_field_name: str = "date", symbol_field_name: str = "symbol", **kwargs):
"""
Parameters
----------
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
"""
self._date_field_name = date_field_name
self._symbol_field_name = symbol_field_name
self.kwargs = kwargs
self._calendar_list = self._get_calendar_list()
|
Parameters
----------
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/base.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/base.py
|
MIT
|
def __init__(
self,
source_dir: [str, Path],
target_dir: [str, Path],
normalize_class: Type[BaseNormalize],
max_workers: int = 16,
date_field_name: str = "date",
symbol_field_name: str = "symbol",
**kwargs,
):
"""
Parameters
----------
source_dir: str or Path
The directory where the raw data collected from the Internet is saved
target_dir: str or Path
Directory for normalize data
normalize_class: Type[YahooNormalize]
normalize class
max_workers: int
Concurrent number, default is 16
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
"""
if not (source_dir and target_dir):
raise ValueError("source_dir and target_dir cannot be None")
self._source_dir = Path(source_dir).expanduser()
self._target_dir = Path(target_dir).expanduser()
self._target_dir.mkdir(parents=True, exist_ok=True)
self._date_field_name = date_field_name
self._symbol_field_name = symbol_field_name
self._end_date = kwargs.get("end_date", None)
self._max_workers = max_workers
self._normalize_obj = normalize_class(
date_field_name=date_field_name, symbol_field_name=symbol_field_name, **kwargs
)
|
Parameters
----------
source_dir: str or Path
The directory where the raw data collected from the Internet is saved
target_dir: str or Path
Directory for normalize data
normalize_class: Type[YahooNormalize]
normalize class
max_workers: int
Concurrent number, default is 16
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/base.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/base.py
|
MIT
|
def __init__(self, source_dir=None, normalize_dir=None, max_workers=1, interval="1d"):
"""
Parameters
----------
source_dir: str
The directory where the raw data collected from the Internet is saved, default "Path(__file__).parent/source"
normalize_dir: str
Directory for normalize data, default "Path(__file__).parent/normalize"
max_workers: int
Concurrent number, default is 1; Concurrent number, default is 1; when collecting data, it is recommended that max_workers be set to 1
interval: str
freq, value from [1min, 1d], default 1d
"""
if source_dir is None:
source_dir = Path(self.default_base_dir).joinpath("source")
self.source_dir = Path(source_dir).expanduser().resolve()
self.source_dir.mkdir(parents=True, exist_ok=True)
if normalize_dir is None:
normalize_dir = Path(self.default_base_dir).joinpath("normalize")
self.normalize_dir = Path(normalize_dir).expanduser().resolve()
self.normalize_dir.mkdir(parents=True, exist_ok=True)
self._cur_module = importlib.import_module("collector")
self.max_workers = max_workers
self.interval = interval
|
Parameters
----------
source_dir: str
The directory where the raw data collected from the Internet is saved, default "Path(__file__).parent/source"
normalize_dir: str
Directory for normalize data, default "Path(__file__).parent/normalize"
max_workers: int
Concurrent number, default is 1; Concurrent number, default is 1; when collecting data, it is recommended that max_workers be set to 1
interval: str
freq, value from [1min, 1d], default 1d
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/base.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/base.py
|
MIT
|
def normalize_data(self, date_field_name: str = "date", symbol_field_name: str = "symbol", **kwargs):
"""normalize data
Parameters
----------
date_field_name: str
date field name, default date
symbol_field_name: str
symbol field name, default symbol
Examples
---------
$ python collector.py normalize_data --source_dir ~/.qlib/instrument_data/source --normalize_dir ~/.qlib/instrument_data/normalize --region CN --interval 1d
"""
_class = getattr(self._cur_module, self.normalize_class_name)
yc = Normalize(
source_dir=self.source_dir,
target_dir=self.normalize_dir,
normalize_class=_class,
max_workers=self.max_workers,
date_field_name=date_field_name,
symbol_field_name=symbol_field_name,
**kwargs,
)
yc.normalize()
|
normalize data
Parameters
----------
date_field_name: str
date field name, default date
symbol_field_name: str
symbol field name, default symbol
Examples
---------
$ python collector.py normalize_data --source_dir ~/.qlib/instrument_data/source --normalize_dir ~/.qlib/instrument_data/normalize --region CN --interval 1d
|
normalize_data
|
python
|
microsoft/qlib
|
scripts/data_collector/base.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/base.py
|
MIT
|
def __init__(self, qlib_dir: Union[str, Path], start_date: str = None, end_date: str = None):
"""
Parameters
----------
qlib_dir:
qlib data directory
start_date
start date
end_date
end date
"""
self.qlib_dir = Path(qlib_dir).expanduser().absolute()
self.calendar_path = self.qlib_dir.joinpath("calendars/day.txt")
self.future_path = self.qlib_dir.joinpath("calendars/day_future.txt")
self._calendar_list = self.calendar_list
_latest_date = self._calendar_list[-1]
self.start_date = _latest_date if start_date is None else pd.Timestamp(start_date)
self.end_date = _latest_date + pd.Timedelta(days=365 * 2) if end_date is None else pd.Timestamp(end_date)
|
Parameters
----------
qlib_dir:
qlib data directory
start_date
start date
end_date
end date
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/future_calendar_collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/future_calendar_collector.py
|
MIT
|
def run(qlib_dir: Union[str, Path], region: str = "cn", start_date: str = None, end_date: str = None):
"""Collect future calendar(day)
Parameters
----------
qlib_dir:
qlib data directory
region:
cn/CN or us/US
start_date
start date
end_date
end date
Examples
-------
# get cn future calendar
$ python future_calendar_collector.py --qlib_data_1d_dir <user data dir> --region cn
"""
logger.info(f"collector future calendar: region={region}")
_cur_module = importlib.import_module("future_calendar_collector")
_class = getattr(_cur_module, f"CollectorFutureCalendar{region.upper()}")
collector = _class(qlib_dir=qlib_dir, start_date=start_date, end_date=end_date)
collector.write_calendar(collector.collector())
|
Collect future calendar(day)
Parameters
----------
qlib_dir:
qlib data directory
region:
cn/CN or us/US
start_date
start date
end_date
end date
Examples
-------
# get cn future calendar
$ python future_calendar_collector.py --qlib_data_1d_dir <user data dir> --region cn
|
run
|
python
|
microsoft/qlib
|
scripts/data_collector/future_calendar_collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/future_calendar_collector.py
|
MIT
|
def __init__(
self,
index_name: str,
qlib_dir: [str, Path] = None,
freq: str = "day",
request_retry: int = 5,
retry_sleep: int = 3,
):
"""
Parameters
----------
index_name: str
index name
qlib_dir: str
qlib directory, by default Path(__file__).resolve().parent.joinpath("qlib_data")
freq: str
freq, value from ["day", "1min"]
request_retry: int
request retry, by default 5
retry_sleep: int
request sleep, by default 3
"""
self.index_name = index_name
if qlib_dir is None:
qlib_dir = Path(__file__).resolve().parent.joinpath("qlib_data")
self.instruments_dir = Path(qlib_dir).expanduser().resolve().joinpath("instruments")
self.instruments_dir.mkdir(exist_ok=True, parents=True)
self.cache_dir = Path(f"~/.cache/qlib/index/{self.index_name}").expanduser().resolve()
self.cache_dir.mkdir(exist_ok=True, parents=True)
self._request_retry = request_retry
self._retry_sleep = retry_sleep
self.freq = freq
|
Parameters
----------
index_name: str
index name
qlib_dir: str
qlib directory, by default Path(__file__).resolve().parent.joinpath("qlib_data")
freq: str
freq, value from ["day", "1min"]
request_retry: int
request retry, by default 5
retry_sleep: int
request sleep, by default 3
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/index.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/index.py
|
MIT
|
def save_new_companies(self):
"""save new companies
Examples
-------
$ python collector.py save_new_companies --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data
"""
df = self.get_new_companies()
if df is None or df.empty:
raise ValueError(f"get new companies error: {self.index_name}")
df = df.drop_duplicates([self.SYMBOL_FIELD_NAME])
df.loc[:, self.INSTRUMENTS_COLUMNS].to_csv(
self.instruments_dir.joinpath(f"{self.index_name.lower()}_only_new.txt"), sep="\t", index=False, header=None
)
|
save new companies
Examples
-------
$ python collector.py save_new_companies --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data
|
save_new_companies
|
python
|
microsoft/qlib
|
scripts/data_collector/index.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/index.py
|
MIT
|
def get_changes_with_history_companies(self, history_companies: pd.DataFrame) -> pd.DataFrame:
"""get changes with history companies
Parameters
----------
history_companies : pd.DataFrame
symbol date
SH600000 2020-11-11
dtypes:
symbol: str
date: pd.Timestamp
Return
--------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600000 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
"""
logger.info("parse changes from history companies......")
last_code = []
result_df_list = []
_columns = [self.DATE_FIELD_NAME, self.SYMBOL_FIELD_NAME, self.CHANGE_TYPE_FIELD]
for _trading_date in tqdm(sorted(history_companies[self.DATE_FIELD_NAME].unique(), reverse=True)):
_currenet_code = history_companies[history_companies[self.DATE_FIELD_NAME] == _trading_date][
self.SYMBOL_FIELD_NAME
].tolist()
if last_code:
add_code = list(set(last_code) - set(_currenet_code))
remote_code = list(set(_currenet_code) - set(last_code))
for _code in add_code:
result_df_list.append(
pd.DataFrame(
[[get_trading_date_by_shift(self.calendar_list, _trading_date, 1), _code, self.ADD]],
columns=_columns,
)
)
for _code in remote_code:
result_df_list.append(
pd.DataFrame(
[[get_trading_date_by_shift(self.calendar_list, _trading_date, 0), _code, self.REMOVE]],
columns=_columns,
)
)
last_code = _currenet_code
df = pd.concat(result_df_list)
logger.info("end of parse changes from history companies.")
return df
|
get changes with history companies
Parameters
----------
history_companies : pd.DataFrame
symbol date
SH600000 2020-11-11
dtypes:
symbol: str
date: pd.Timestamp
Return
--------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600000 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
|
get_changes_with_history_companies
|
python
|
microsoft/qlib
|
scripts/data_collector/index.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/index.py
|
MIT
|
def parse_instruments(self):
"""parse instruments, eg: csi300.txt
Examples
-------
$ python collector.py parse_instruments --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data
"""
logger.info(f"start parse {self.index_name.lower()} companies.....")
instruments_columns = [self.SYMBOL_FIELD_NAME, self.START_DATE_FIELD, self.END_DATE_FIELD]
changers_df = self.get_changes()
new_df = self.get_new_companies()
if new_df is None or new_df.empty:
raise ValueError(f"get new companies error: {self.index_name}")
new_df = new_df.copy()
logger.info("parse history companies by changes......")
for _row in tqdm(changers_df.sort_values(self.DATE_FIELD_NAME, ascending=False).itertuples(index=False)):
if _row.type == self.ADD:
min_end_date = new_df.loc[new_df[self.SYMBOL_FIELD_NAME] == _row.symbol, self.END_DATE_FIELD].min()
new_df.loc[
(new_df[self.END_DATE_FIELD] == min_end_date) & (new_df[self.SYMBOL_FIELD_NAME] == _row.symbol),
self.START_DATE_FIELD,
] = _row.date
else:
_tmp_df = pd.DataFrame([[_row.symbol, self.bench_start_date, _row.date]], columns=instruments_columns)
new_df = pd.concat([new_df, _tmp_df], sort=False)
inst_df = new_df.loc[:, instruments_columns]
_inst_prefix = self.INST_PREFIX.strip()
if _inst_prefix:
inst_df["save_inst"] = inst_df[self.SYMBOL_FIELD_NAME].apply(lambda x: f"{_inst_prefix}{x}")
inst_df = self.format_datetime(inst_df)
inst_df.to_csv(
self.instruments_dir.joinpath(f"{self.index_name.lower()}.txt"), sep="\t", index=False, header=None
)
logger.info(f"parse {self.index_name.lower()} companies finished.")
|
parse instruments, eg: csi300.txt
Examples
-------
$ python collector.py parse_instruments --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data
|
parse_instruments
|
python
|
microsoft/qlib
|
scripts/data_collector/index.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/index.py
|
MIT
|
def get_calendar_list(bench_code="CSI300") -> List[pd.Timestamp]:
"""get SH/SZ history calendar list
Parameters
----------
bench_code: str
value from ["CSI300", "CSI500", "ALL", "US_ALL"]
Returns
-------
history calendar list
"""
logger.info(f"get calendar list: {bench_code}......")
def _get_calendar(url):
_value_list = requests.get(url, timeout=None).json()["data"]["klines"]
return sorted(map(lambda x: pd.Timestamp(x.split(",")[0]), _value_list))
calendar = _CALENDAR_MAP.get(bench_code, None)
if calendar is None:
if bench_code.startswith("US_") or bench_code.startswith("IN_") or bench_code.startswith("BR_"):
print(Ticker(CALENDAR_BENCH_URL_MAP[bench_code]))
print(Ticker(CALENDAR_BENCH_URL_MAP[bench_code]).history(interval="1d", period="max"))
df = Ticker(CALENDAR_BENCH_URL_MAP[bench_code]).history(interval="1d", period="max")
calendar = df.index.get_level_values(level="date").map(pd.Timestamp).unique().tolist()
else:
if bench_code.upper() == "ALL":
@deco_retry
def _get_calendar(month):
_cal = []
try:
resp = requests.get(
SZSE_CALENDAR_URL.format(month=month, random=random.random), timeout=None
).json()
for _r in resp["data"]:
if int(_r["jybz"]):
_cal.append(pd.Timestamp(_r["jyrq"]))
except Exception as e:
raise ValueError(f"{month}-->{e}") from e
return _cal
month_range = pd.date_range(start="2000-01", end=pd.Timestamp.now() + pd.Timedelta(days=31), freq="M")
calendar = []
for _m in month_range:
cal = _get_calendar(_m.strftime("%Y-%m"))
if cal:
calendar += cal
calendar = list(filter(lambda x: x <= pd.Timestamp.now(), calendar))
else:
calendar = _get_calendar(CALENDAR_BENCH_URL_MAP[bench_code])
_CALENDAR_MAP[bench_code] = calendar
logger.info(f"end of get calendar list: {bench_code}.")
return calendar
|
get SH/SZ history calendar list
Parameters
----------
bench_code: str
value from ["CSI300", "CSI500", "ALL", "US_ALL"]
Returns
-------
history calendar list
|
get_calendar_list
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def get_calendar_list_by_ratio(
source_dir: [str, Path],
date_field_name: str = "date",
threshold: float = 0.5,
minimum_count: int = 10,
max_workers: int = 16,
) -> list:
"""get calendar list by selecting the date when few funds trade in this day
Parameters
----------
source_dir: str or Path
The directory where the raw data collected from the Internet is saved
date_field_name: str
date field name, default is date
threshold: float
threshold to exclude some days when few funds trade in this day, default 0.5
minimum_count: int
minimum count of funds should trade in one day
max_workers: int
Concurrent number, default is 16
Returns
-------
history calendar list
"""
logger.info(f"get calendar list from {source_dir} by threshold = {threshold}......")
source_dir = Path(source_dir).expanduser()
file_list = list(source_dir.glob("*.csv"))
_number_all_funds = len(file_list)
logger.info(f"count how many funds trade in this day......")
_dict_count_trade = dict() # dict{date:count}
_fun = partial(return_date_list, date_field_name)
all_oldest_list = []
with tqdm(total=_number_all_funds) as p_bar:
with ProcessPoolExecutor(max_workers=max_workers) as executor:
for date_list in executor.map(_fun, file_list):
if date_list:
all_oldest_list.append(date_list[0])
for date in date_list:
if date not in _dict_count_trade:
_dict_count_trade[date] = 0
_dict_count_trade[date] += 1
p_bar.update()
logger.info(f"count how many funds have founded in this day......")
_dict_count_founding = {date: _number_all_funds for date in _dict_count_trade} # dict{date:count}
with tqdm(total=_number_all_funds) as p_bar:
for oldest_date in all_oldest_list:
for date in _dict_count_founding.keys():
if date < oldest_date:
_dict_count_founding[date] -= 1
calendar = [
date for date, count in _dict_count_trade.items() if count >= max(int(count * threshold), minimum_count)
]
return calendar
|
get calendar list by selecting the date when few funds trade in this day
Parameters
----------
source_dir: str or Path
The directory where the raw data collected from the Internet is saved
date_field_name: str
date field name, default is date
threshold: float
threshold to exclude some days when few funds trade in this day, default 0.5
minimum_count: int
minimum count of funds should trade in one day
max_workers: int
Concurrent number, default is 16
Returns
-------
history calendar list
|
get_calendar_list_by_ratio
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def get_hs_stock_symbols() -> list:
"""get SH/SZ stock symbols
Returns
-------
stock symbols
"""
global _HS_SYMBOLS # pylint: disable=W0603
def _get_symbol():
"""
Get the stock pool from a web page and process it into the format required by yahooquery.
Format of data retrieved from the web page: 600519, 000001
The data format required by yahooquery: 600519.ss, 000001.sz
Returns
-------
set: Returns the set of symbol codes.
Examples:
-------
{600000.ss, 600001.ss, 600002.ss, 600003.ss, ...}
"""
# url = "http://99.push2.eastmoney.com/api/qt/clist/get?pn=1&pz=10000&po=1&np=1&fs=m:0+t:6,m:0+t:80,m:1+t:2,m:1+t:23,m:0+t:81+s:2048&fields=f12"
base_url = "http://99.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": 1, # page number
"pz": 100, # page size, default to 100
"po": 1,
"np": 1,
"fs": "m:0+t:6,m:0+t:80,m:1+t:2,m:1+t:23,m:0+t:81+s:2048",
"fields": "f12",
}
_symbols = []
page = 1
while True:
params["pn"] = page
try:
resp = requests.get(base_url, params=params, timeout=None)
resp.raise_for_status()
data = resp.json()
# Check if response contains valid data
if not data or "data" not in data or not data["data"] or "diff" not in data["data"]:
logger.warning(f"Invalid response structure on page {page}")
break
# fetch the current page data
current_symbols = [_v["f12"] for _v in data["data"]["diff"]]
if not current_symbols: # It's the last page if there is no data in current page
logger.info(f"Last page reached: {page - 1}")
break
_symbols.extend(current_symbols)
# show progress
logger.info(
f"Page {page}: fetch {len(current_symbols)} stocks:[{current_symbols[0]} ... {current_symbols[-1]}]"
)
page += 1
# sleep time to avoid overloading the server
time.sleep(0.5)
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"Request to {base_url} failed with status code {resp.status_code}"
) from e
except Exception as e:
logger.warning("An error occurred while extracting data from the response.")
raise
if len(_symbols) < 3900:
raise ValueError("The complete list of stocks is not available.")
# Add suffix after the stock code to conform to yahooquery standard, otherwise the data will not be fetched.
_symbols = [
_symbol + ".ss" if _symbol.startswith("6") else _symbol + ".sz" if _symbol.startswith(("0", "3")) else None
for _symbol in _symbols
]
_symbols = [_symbol for _symbol in _symbols if _symbol is not None]
return set(_symbols)
if _HS_SYMBOLS is None:
symbols = set()
_retry = 60
# It may take multiple times to get the complete
while len(symbols) < MINIMUM_SYMBOLS_NUM:
symbols |= _get_symbol()
time.sleep(3)
symbol_cache_path = Path("~/.cache/hs_symbols_cache.pkl").expanduser().resolve()
symbol_cache_path.parent.mkdir(parents=True, exist_ok=True)
if symbol_cache_path.exists():
with symbol_cache_path.open("rb") as fp:
cache_symbols = pickle.load(fp)
symbols |= cache_symbols
with symbol_cache_path.open("wb") as fp:
pickle.dump(symbols, fp)
_HS_SYMBOLS = sorted(list(symbols))
return _HS_SYMBOLS
|
get SH/SZ stock symbols
Returns
-------
stock symbols
|
get_hs_stock_symbols
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def _get_symbol():
"""
Get the stock pool from a web page and process it into the format required by yahooquery.
Format of data retrieved from the web page: 600519, 000001
The data format required by yahooquery: 600519.ss, 000001.sz
Returns
-------
set: Returns the set of symbol codes.
Examples:
-------
{600000.ss, 600001.ss, 600002.ss, 600003.ss, ...}
"""
# url = "http://99.push2.eastmoney.com/api/qt/clist/get?pn=1&pz=10000&po=1&np=1&fs=m:0+t:6,m:0+t:80,m:1+t:2,m:1+t:23,m:0+t:81+s:2048&fields=f12"
base_url = "http://99.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": 1, # page number
"pz": 100, # page size, default to 100
"po": 1,
"np": 1,
"fs": "m:0+t:6,m:0+t:80,m:1+t:2,m:1+t:23,m:0+t:81+s:2048",
"fields": "f12",
}
_symbols = []
page = 1
while True:
params["pn"] = page
try:
resp = requests.get(base_url, params=params, timeout=None)
resp.raise_for_status()
data = resp.json()
# Check if response contains valid data
if not data or "data" not in data or not data["data"] or "diff" not in data["data"]:
logger.warning(f"Invalid response structure on page {page}")
break
# fetch the current page data
current_symbols = [_v["f12"] for _v in data["data"]["diff"]]
if not current_symbols: # It's the last page if there is no data in current page
logger.info(f"Last page reached: {page - 1}")
break
_symbols.extend(current_symbols)
# show progress
logger.info(
f"Page {page}: fetch {len(current_symbols)} stocks:[{current_symbols[0]} ... {current_symbols[-1]}]"
)
page += 1
# sleep time to avoid overloading the server
time.sleep(0.5)
except requests.exceptions.HTTPError as e:
raise requests.exceptions.HTTPError(
f"Request to {base_url} failed with status code {resp.status_code}"
) from e
except Exception as e:
logger.warning("An error occurred while extracting data from the response.")
raise
if len(_symbols) < 3900:
raise ValueError("The complete list of stocks is not available.")
# Add suffix after the stock code to conform to yahooquery standard, otherwise the data will not be fetched.
_symbols = [
_symbol + ".ss" if _symbol.startswith("6") else _symbol + ".sz" if _symbol.startswith(("0", "3")) else None
for _symbol in _symbols
]
_symbols = [_symbol for _symbol in _symbols if _symbol is not None]
return set(_symbols)
|
Get the stock pool from a web page and process it into the format required by yahooquery.
Format of data retrieved from the web page: 600519, 000001
The data format required by yahooquery: 600519.ss, 000001.sz
Returns
-------
set: Returns the set of symbol codes.
Examples:
-------
{600000.ss, 600001.ss, 600002.ss, 600003.ss, ...}
|
_get_symbol
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def get_us_stock_symbols(qlib_data_path: [str, Path] = None) -> list:
"""get US stock symbols
Returns
-------
stock symbols
"""
global _US_SYMBOLS # pylint: disable=W0603
@deco_retry
def _get_eastmoney():
url = "http://4.push2.eastmoney.com/api/qt/clist/get?pn=1&pz=10000&fs=m:105,m:106,m:107&fields=f12"
resp = requests.get(url, timeout=None)
if resp.status_code != 200:
raise ValueError("request error")
try:
_symbols = [_v["f12"].replace("_", "-P") for _v in resp.json()["data"]["diff"].values()]
except Exception as e:
logger.warning(f"request error: {e}")
raise
if len(_symbols) < 8000:
raise ValueError("request error")
return _symbols
@deco_retry
def _get_nasdaq():
_res_symbols = []
for _name in ["otherlisted", "nasdaqtraded"]:
url = f"ftp://ftp.nasdaqtrader.com/SymbolDirectory/{_name}.txt"
df = pd.read_csv(url, sep="|")
df = df.rename(columns={"ACT Symbol": "Symbol"})
_symbols = df["Symbol"].dropna()
_symbols = _symbols.str.replace("$", "-P", regex=False)
_symbols = _symbols.str.replace(".W", "-WT", regex=False)
_symbols = _symbols.str.replace(".U", "-UN", regex=False)
_symbols = _symbols.str.replace(".R", "-RI", regex=False)
_symbols = _symbols.str.replace(".", "-", regex=False)
_res_symbols += _symbols.unique().tolist()
return _res_symbols
@deco_retry
def _get_nyse():
url = "https://www.nyse.com/api/quotes/filter"
_parms = {
"instrumentType": "EQUITY",
"pageNumber": 1,
"sortColumn": "NORMALIZED_TICKER",
"sortOrder": "ASC",
"maxResultsPerPage": 10000,
"filterToken": "",
}
resp = requests.post(url, json=_parms, timeout=None)
if resp.status_code != 200:
raise ValueError("request error")
try:
_symbols = [_v["symbolTicker"].replace("-", "-P") for _v in resp.json()]
except Exception as e:
logger.warning(f"request error: {e}")
_symbols = []
return _symbols
if _US_SYMBOLS is None:
_all_symbols = _get_eastmoney() + _get_nasdaq() + _get_nyse()
if qlib_data_path is not None:
for _index in ["nasdaq100", "sp500"]:
ins_df = pd.read_csv(
Path(qlib_data_path).joinpath(f"instruments/{_index}.txt"),
sep="\t",
names=["symbol", "start_date", "end_date"],
)
_all_symbols += ins_df["symbol"].unique().tolist()
def _format(s_):
s_ = s_.replace(".", "-")
s_ = s_.strip("$")
s_ = s_.strip("*")
return s_
_US_SYMBOLS = sorted(set(map(_format, filter(lambda x: len(x) < 8 and not x.endswith("WS"), _all_symbols))))
return _US_SYMBOLS
|
get US stock symbols
Returns
-------
stock symbols
|
get_us_stock_symbols
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def get_in_stock_symbols(qlib_data_path: [str, Path] = None) -> list:
"""get IN stock symbols
Returns
-------
stock symbols
"""
global _IN_SYMBOLS # pylint: disable=W0603
@deco_retry
def _get_nifty():
url = f"https://www1.nseindia.com/content/equities/EQUITY_L.csv"
df = pd.read_csv(url)
df = df.rename(columns={"SYMBOL": "Symbol"})
df["Symbol"] = df["Symbol"] + ".NS"
_symbols = df["Symbol"].dropna()
_symbols = _symbols.unique().tolist()
return _symbols
if _IN_SYMBOLS is None:
_all_symbols = _get_nifty()
if qlib_data_path is not None:
for _index in ["nifty"]:
ins_df = pd.read_csv(
Path(qlib_data_path).joinpath(f"instruments/{_index}.txt"),
sep="\t",
names=["symbol", "start_date", "end_date"],
)
_all_symbols += ins_df["symbol"].unique().tolist()
def _format(s_):
s_ = s_.replace(".", "-")
s_ = s_.strip("$")
s_ = s_.strip("*")
return s_
_IN_SYMBOLS = sorted(set(_all_symbols))
return _IN_SYMBOLS
|
get IN stock symbols
Returns
-------
stock symbols
|
get_in_stock_symbols
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def get_br_stock_symbols(qlib_data_path: [str, Path] = None) -> list:
"""get Brazil(B3) stock symbols
Returns
-------
B3 stock symbols
"""
global _BR_SYMBOLS # pylint: disable=W0603
@deco_retry
def _get_ibovespa():
_symbols = []
url = "https://www.fundamentus.com.br/detalhes.php?papel="
# Request
agent = {"User-Agent": "Mozilla/5.0"}
page = requests.get(url, headers=agent, timeout=None)
# BeautifulSoup
soup = BeautifulSoup(page.content, "html.parser")
tbody = soup.find("tbody")
children = tbody.findChildren("a", recursive=True)
for child in children:
_symbols.append(str(child).rsplit('"', maxsplit=1)[-1].split(">")[1].split("<")[0])
return _symbols
if _BR_SYMBOLS is None:
_all_symbols = _get_ibovespa()
if qlib_data_path is not None:
for _index in ["ibov"]:
ins_df = pd.read_csv(
Path(qlib_data_path).joinpath(f"instruments/{_index}.txt"),
sep="\t",
names=["symbol", "start_date", "end_date"],
)
_all_symbols += ins_df["symbol"].unique().tolist()
def _format(s_):
s_ = s_.strip()
s_ = s_.strip("$")
s_ = s_.strip("*")
s_ = s_ + ".SA"
return s_
_BR_SYMBOLS = sorted(set(map(_format, _all_symbols)))
return _BR_SYMBOLS
|
get Brazil(B3) stock symbols
Returns
-------
B3 stock symbols
|
get_br_stock_symbols
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def symbol_suffix_to_prefix(symbol: str, capital: bool = True) -> str:
"""symbol suffix to prefix
Parameters
----------
symbol: str
symbol
capital : bool
by default True
Returns
-------
"""
code, exchange = symbol.split(".")
if exchange.lower() in ["sh", "ss"]:
res = f"sh{code}"
else:
res = f"{exchange}{code}"
return res.upper() if capital else res.lower()
|
symbol suffix to prefix
Parameters
----------
symbol: str
symbol
capital : bool
by default True
Returns
-------
|
symbol_suffix_to_prefix
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def symbol_prefix_to_sufix(symbol: str, capital: bool = True) -> str:
"""symbol prefix to sufix
Parameters
----------
symbol: str
symbol
capital : bool
by default True
Returns
-------
"""
res = f"{symbol[:-2]}.{symbol[-2:]}"
return res.upper() if capital else res.lower()
|
symbol prefix to sufix
Parameters
----------
symbol: str
symbol
capital : bool
by default True
Returns
-------
|
symbol_prefix_to_sufix
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def get_trading_date_by_shift(trading_list: list, trading_date: pd.Timestamp, shift: int = 1):
"""get trading date by shift
Parameters
----------
trading_list: list
trading calendar list
shift : int
shift, default is 1
trading_date : pd.Timestamp
trading date
Returns
-------
"""
trading_date = pd.Timestamp(trading_date)
left_index = bisect.bisect_left(trading_list, trading_date)
try:
res = trading_list[left_index + shift]
except IndexError:
res = trading_date
return res
|
get trading date by shift
Parameters
----------
trading_list: list
trading calendar list
shift : int
shift, default is 1
trading_date : pd.Timestamp
trading date
Returns
-------
|
get_trading_date_by_shift
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def generate_minutes_calendar_from_daily(
calendars: Iterable,
freq: str = "1min",
am_range: Tuple[str, str] = ("09:30:00", "11:29:00"),
pm_range: Tuple[str, str] = ("13:00:00", "14:59:00"),
) -> pd.Index:
"""generate minutes calendar
Parameters
----------
calendars: Iterable
daily calendar
freq: str
by default 1min
am_range: Tuple[str, str]
AM Time Range, by default China-Stock: ("09:30:00", "11:29:00")
pm_range: Tuple[str, str]
PM Time Range, by default China-Stock: ("13:00:00", "14:59:00")
"""
daily_format: str = "%Y-%m-%d"
res = []
for _day in calendars:
for _range in [am_range, pm_range]:
res.append(
pd.date_range(
f"{pd.Timestamp(_day).strftime(daily_format)} {_range[0]}",
f"{pd.Timestamp(_day).strftime(daily_format)} {_range[1]}",
freq=freq,
)
)
return pd.Index(sorted(set(np.hstack(res))))
|
generate minutes calendar
Parameters
----------
calendars: Iterable
daily calendar
freq: str
by default 1min
am_range: Tuple[str, str]
AM Time Range, by default China-Stock: ("09:30:00", "11:29:00")
pm_range: Tuple[str, str]
PM Time Range, by default China-Stock: ("13:00:00", "14:59:00")
|
generate_minutes_calendar_from_daily
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def get_instruments(
qlib_dir: str,
index_name: str,
method: str = "parse_instruments",
freq: str = "day",
request_retry: int = 5,
retry_sleep: int = 3,
market_index: str = "cn_index",
):
"""
Parameters
----------
qlib_dir: str
qlib data dir, default "Path(__file__).parent/qlib_data"
index_name: str
index name, value from ["csi100", "csi300"]
method: str
method, value from ["parse_instruments", "save_new_companies"]
freq: str
freq, value from ["day", "1min"]
request_retry: int
request retry, by default 5
retry_sleep: int
request sleep, by default 3
market_index: str
Where the files to obtain the index are located,
for example data_collector.cn_index.collector
Examples
-------
# parse instruments
$ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method parse_instruments
# parse new companies
$ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method save_new_companies
"""
_cur_module = importlib.import_module("data_collector.{}.collector".format(market_index))
obj = getattr(_cur_module, f"{index_name.upper()}Index")(
qlib_dir=qlib_dir, index_name=index_name, freq=freq, request_retry=request_retry, retry_sleep=retry_sleep
)
getattr(obj, method)()
|
Parameters
----------
qlib_dir: str
qlib data dir, default "Path(__file__).parent/qlib_data"
index_name: str
index name, value from ["csi100", "csi300"]
method: str
method, value from ["parse_instruments", "save_new_companies"]
freq: str
freq, value from ["day", "1min"]
request_retry: int
request retry, by default 5
retry_sleep: int
request sleep, by default 3
market_index: str
Where the files to obtain the index are located,
for example data_collector.cn_index.collector
Examples
-------
# parse instruments
$ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method parse_instruments
# parse new companies
$ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method save_new_companies
|
get_instruments
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def get_1d_data(
_date_field_name: str,
_symbol_field_name: str,
symbol: str,
start: str,
end: str,
_1d_data_all: pd.DataFrame,
) -> pd.DataFrame:
"""get 1d data
Returns
------
data_1d: pd.DataFrame
data_1d.columns = [_date_field_name, _symbol_field_name, "paused", "volume", "factor", "close"]
"""
_all_1d_data = _get_all_1d_data(_date_field_name, _symbol_field_name, _1d_data_all)
return _all_1d_data[
(_all_1d_data[_symbol_field_name] == symbol.upper())
& (_all_1d_data[_date_field_name] >= pd.Timestamp(start))
& (_all_1d_data[_date_field_name] < pd.Timestamp(end))
]
|
get 1d data
Returns
------
data_1d: pd.DataFrame
data_1d.columns = [_date_field_name, _symbol_field_name, "paused", "volume", "factor", "close"]
|
get_1d_data
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def calc_adjusted_price(
df: pd.DataFrame,
_1d_data_all: pd.DataFrame,
_date_field_name: str,
_symbol_field_name: str,
frequence: str,
consistent_1d: bool = True,
calc_paused: bool = True,
) -> pd.DataFrame:
"""calc adjusted price
This method does 4 things.
1. Adds the `paused` field.
- The added paused field comes from the paused field of the 1d data.
2. Aligns the time of the 1d data.
3. The data is reweighted.
- The reweighting method:
- volume / factor
- open * factor
- high * factor
- low * factor
- close * factor
4. Called `calc_paused_num` method to add the `paused_num` field.
- The `paused_num` is the number of consecutive days of trading suspension.
"""
# TODO: using daily data factor
if df.empty:
return df
df = df.copy()
df.drop_duplicates(subset=_date_field_name, inplace=True)
df.sort_values(_date_field_name, inplace=True)
symbol = df.iloc[0][_symbol_field_name]
df[_date_field_name] = pd.to_datetime(df[_date_field_name])
# get 1d data from qlib
_start = pd.Timestamp(df[_date_field_name].min()).strftime("%Y-%m-%d")
_end = (pd.Timestamp(df[_date_field_name].max()) + pd.Timedelta(days=1)).strftime("%Y-%m-%d")
data_1d: pd.DataFrame = get_1d_data(_date_field_name, _symbol_field_name, symbol, _start, _end, _1d_data_all)
data_1d = data_1d.copy()
if data_1d is None or data_1d.empty:
df["factor"] = 1 / df.loc[df["close"].first_valid_index()]["close"]
# TODO: np.nan or 1 or 0
df["paused"] = np.nan
else:
# NOTE: volume is np.nan or volume <= 0, paused = 1
# FIXME: find a more accurate data source
data_1d["paused"] = 0
data_1d.loc[(data_1d["volume"].isna()) | (data_1d["volume"] <= 0), "paused"] = 1
data_1d = data_1d.set_index(_date_field_name)
# add factor from 1d data
# NOTE: 1d data info:
# - Close price adjusted for splits. Adjusted close price adjusted for both dividends and splits.
# - data_1d.adjclose: Adjusted close price adjusted for both dividends and splits.
# - data_1d.close: `data_1d.adjclose / (close for the first trading day that is not np.nan)`
def _calc_factor(df_1d: pd.DataFrame):
try:
_date = pd.Timestamp(pd.Timestamp(df_1d[_date_field_name].iloc[0]).date())
df_1d["factor"] = data_1d.loc[_date]["close"] / df_1d.loc[df_1d["close"].last_valid_index()]["close"]
df_1d["paused"] = data_1d.loc[_date]["paused"]
except Exception:
df_1d["factor"] = np.nan
df_1d["paused"] = np.nan
return df_1d
df = df.groupby([df[_date_field_name].dt.date], group_keys=False).apply(_calc_factor)
if consistent_1d:
# the date sequence is consistent with 1d
df.set_index(_date_field_name, inplace=True)
df = df.reindex(
generate_minutes_calendar_from_daily(
calendars=pd.to_datetime(data_1d.reset_index()[_date_field_name].drop_duplicates()),
freq=frequence,
am_range=("09:30:00", "11:29:00"),
pm_range=("13:00:00", "14:59:00"),
)
)
df[_symbol_field_name] = df.loc[df[_symbol_field_name].first_valid_index()][_symbol_field_name]
df.index.names = [_date_field_name]
df.reset_index(inplace=True)
for _col in ["open", "close", "high", "low", "volume"]:
if _col not in df.columns:
continue
if _col == "volume":
df[_col] = df[_col] / df["factor"]
else:
df[_col] = df[_col] * df["factor"]
if calc_paused:
df = calc_paused_num(df, _date_field_name, _symbol_field_name)
return df
|
calc adjusted price
This method does 4 things.
1. Adds the `paused` field.
- The added paused field comes from the paused field of the 1d data.
2. Aligns the time of the 1d data.
3. The data is reweighted.
- The reweighting method:
- volume / factor
- open * factor
- high * factor
- low * factor
- close * factor
4. Called `calc_paused_num` method to add the `paused_num` field.
- The `paused_num` is the number of consecutive days of trading suspension.
|
calc_adjusted_price
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def calc_paused_num(df: pd.DataFrame, _date_field_name, _symbol_field_name):
"""calc paused num
This method adds the paused_num field
- The `paused_num` is the number of consecutive days of trading suspension.
"""
_symbol = df.iloc[0][_symbol_field_name]
df = df.copy()
df["_tmp_date"] = df[_date_field_name].apply(lambda x: pd.Timestamp(x).date())
# remove data that starts and ends with `np.nan` all day
all_data = []
# Record the number of consecutive trading days where the whole day is nan, to remove the last trading day where the whole day is nan
all_nan_nums = 0
# Record the number of consecutive occurrences of trading days that are not nan throughout the day
not_nan_nums = 0
for _date, _df in df.groupby("_tmp_date", group_keys=False):
_df["paused"] = 0
if not _df.loc[_df["volume"] < 0].empty:
logger.warning(f"volume < 0, will fill np.nan: {_date} {_symbol}")
_df.loc[_df["volume"] < 0, "volume"] = np.nan
check_fields = set(_df.columns) - {
"_tmp_date",
"paused",
"factor",
_date_field_name,
_symbol_field_name,
}
if _df.loc[:, list(check_fields)].isna().values.all() or (_df["volume"] == 0).all():
all_nan_nums += 1
not_nan_nums = 0
_df["paused"] = 1
if all_data:
_df["paused_num"] = not_nan_nums
all_data.append(_df)
else:
all_nan_nums = 0
not_nan_nums += 1
_df["paused_num"] = not_nan_nums
all_data.append(_df)
all_data = all_data[: len(all_data) - all_nan_nums]
if all_data:
df = pd.concat(all_data, sort=False)
else:
logger.warning(f"data is empty: {_symbol}")
df = pd.DataFrame()
return df
del df["_tmp_date"]
return df
|
calc paused num
This method adds the paused_num field
- The `paused_num` is the number of consecutive days of trading suspension.
|
calc_paused_num
|
python
|
microsoft/qlib
|
scripts/data_collector/utils.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/utils.py
|
MIT
|
def __init__(
self, qlib_data_1d_dir: [str, Path], date_field_name: str = "date", symbol_field_name: str = "symbol", **kwargs
):
"""
Parameters
----------
qlib_data_1d_dir: str, Path
the qlib data to be updated for yahoo, usually from: Normalised to 5min using local 1d data
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
"""
bs.login()
qlib.init(provider_uri=qlib_data_1d_dir)
self.all_1d_data = D.features(D.instruments("all"), ["$paused", "$volume", "$factor", "$close"], freq="day")
super(BaostockNormalizeHS3005min, self).__init__(date_field_name, symbol_field_name)
|
Parameters
----------
qlib_data_1d_dir: str, Path
the qlib data to be updated for yahoo, usually from: Normalised to 5min using local 1d data
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/baostock_5min/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/baostock_5min/collector.py
|
MIT
|
def __init__(self, source_dir=None, normalize_dir=None, max_workers=1, interval="5min", region="HS300"):
"""
Changed the default value of: scripts.data_collector.base.BaseRun.
"""
super().__init__(source_dir, normalize_dir, max_workers, interval)
self.region = region
|
Changed the default value of: scripts.data_collector.base.BaseRun.
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/baostock_5min/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/baostock_5min/collector.py
|
MIT
|
def download_data(
self,
max_collector_count=2,
delay=0.5,
start=None,
end=None,
check_data_length=None,
limit_nums=None,
):
"""download data from Baostock
Notes
-----
check_data_length, example:
hs300 5min, a week: 4 * 60 * 5
Examples
---------
# get hs300 5min data
$ python collector.py download_data --source_dir ~/.qlib/stock_data/source/hs300_5min_original --start 2022-01-01 --end 2022-01-30 --interval 5min --region HS300
"""
super(Run, self).download_data(max_collector_count, delay, start, end, check_data_length, limit_nums)
|
download data from Baostock
Notes
-----
check_data_length, example:
hs300 5min, a week: 4 * 60 * 5
Examples
---------
# get hs300 5min data
$ python collector.py download_data --source_dir ~/.qlib/stock_data/source/hs300_5min_original --start 2022-01-01 --end 2022-01-30 --interval 5min --region HS300
|
download_data
|
python
|
microsoft/qlib
|
scripts/data_collector/baostock_5min/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/baostock_5min/collector.py
|
MIT
|
def normalize_data(
self,
date_field_name: str = "date",
symbol_field_name: str = "symbol",
end_date: str = None,
qlib_data_1d_dir: str = None,
):
"""normalize data
Attention
---------
qlib_data_1d_dir cannot be None, normalize 5min needs to use 1d data;
qlib_data_1d can be obtained like this:
$ python scripts/get_data.py qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn --version v3
or:
download 1d data, reference: https://github.com/microsoft/qlib/tree/main/scripts/data_collector/yahoo#1d-from-yahoo
Examples
---------
$ python collector.py normalize_data --qlib_data_1d_dir ~/.qlib/qlib_data/cn_data --source_dir ~/.qlib/stock_data/source/hs300_5min_original --normalize_dir ~/.qlib/stock_data/source/hs300_5min_nor --region HS300 --interval 5min
"""
if qlib_data_1d_dir is None or not Path(qlib_data_1d_dir).expanduser().exists():
raise ValueError(
"If normalize 5min, the qlib_data_1d_dir parameter must be set: --qlib_data_1d_dir <user qlib 1d data >, Reference: https://github.com/microsoft/qlib/tree/main/scripts/data_collector/yahoo#automatic-update-of-daily-frequency-datafrom-yahoo-finance"
)
super(Run, self).normalize_data(
date_field_name, symbol_field_name, end_date=end_date, qlib_data_1d_dir=qlib_data_1d_dir
)
|
normalize data
Attention
---------
qlib_data_1d_dir cannot be None, normalize 5min needs to use 1d data;
qlib_data_1d can be obtained like this:
$ python scripts/get_data.py qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn --version v3
or:
download 1d data, reference: https://github.com/microsoft/qlib/tree/main/scripts/data_collector/yahoo#1d-from-yahoo
Examples
---------
$ python collector.py normalize_data --qlib_data_1d_dir ~/.qlib/qlib_data/cn_data --source_dir ~/.qlib/stock_data/source/hs300_5min_original --normalize_dir ~/.qlib/stock_data/source/hs300_5min_nor --region HS300 --interval 5min
|
normalize_data
|
python
|
microsoft/qlib
|
scripts/data_collector/baostock_5min/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/baostock_5min/collector.py
|
MIT
|
def get_current_4_month_period(self, current_month: int):
"""
This function is used to calculated what is the current
four month period for the current month. For example,
If the current month is August 8, its four month period
is 2Q.
OBS: In english Q is used to represent *quarter*
which means a three month period. However, in
portuguese we use Q to represent a four month period.
In other words,
Jan, Feb, Mar, Apr: 1Q
May, Jun, Jul, Aug: 2Q
Sep, Oct, Nov, Dez: 3Q
Parameters
----------
month : int
Current month (1 <= month <= 12)
Returns
-------
current_4m_period:str
Current Four Month Period (1Q or 2Q or 3Q)
"""
if current_month < 5:
return "1Q"
if current_month < 9:
return "2Q"
if current_month <= 12:
return "3Q"
else:
return -1
|
This function is used to calculated what is the current
four month period for the current month. For example,
If the current month is August 8, its four month period
is 2Q.
OBS: In english Q is used to represent *quarter*
which means a three month period. However, in
portuguese we use Q to represent a four month period.
In other words,
Jan, Feb, Mar, Apr: 1Q
May, Jun, Jul, Aug: 2Q
Sep, Oct, Nov, Dez: 3Q
Parameters
----------
month : int
Current month (1 <= month <= 12)
Returns
-------
current_4m_period:str
Current Four Month Period (1Q or 2Q or 3Q)
|
get_current_4_month_period
|
python
|
microsoft/qlib
|
scripts/data_collector/br_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/br_index/collector.py
|
MIT
|
def get_four_month_period(self):
"""
The ibovespa index is updated every four months.
Therefore, we will represent each time period as 2003_1Q
which means 2003 first four mount period (Jan, Feb, Mar, Apr)
"""
four_months_period = ["1Q", "2Q", "3Q"]
init_year = 2003
now = datetime.datetime.now()
current_year = now.year
current_month = now.month
for year in [item for item in range(init_year, current_year)]: # pylint: disable=R1721
for el in four_months_period:
self.years_4_month_periods.append(str(year) + "_" + el)
# For current year the logic must be a little different
current_4_month_period = self.get_current_4_month_period(current_month)
for i in range(int(current_4_month_period[0])):
self.years_4_month_periods.append(str(current_year) + "_" + str(i + 1) + "Q")
return self.years_4_month_periods
|
The ibovespa index is updated every four months.
Therefore, we will represent each time period as 2003_1Q
which means 2003 first four mount period (Jan, Feb, Mar, Apr)
|
get_four_month_period
|
python
|
microsoft/qlib
|
scripts/data_collector/br_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/br_index/collector.py
|
MIT
|
def format_datetime(self, inst_df: pd.DataFrame) -> pd.DataFrame:
"""formatting the datetime in an instrument
Parameters
----------
inst_df: pd.DataFrame
inst_df.columns = [self.SYMBOL_FIELD_NAME, self.START_DATE_FIELD, self.END_DATE_FIELD]
Returns
-------
inst_df: pd.DataFrame
"""
logger.info("Formatting Datetime")
if self.freq != "day":
inst_df[self.END_DATE_FIELD] = inst_df[self.END_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x) + pd.Timedelta(hours=23, minutes=59)).strftime("%Y-%m-%d %H:%M:%S")
)
else:
inst_df[self.START_DATE_FIELD] = inst_df[self.START_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x)).strftime("%Y-%m-%d")
)
inst_df[self.END_DATE_FIELD] = inst_df[self.END_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x)).strftime("%Y-%m-%d")
)
return inst_df
|
formatting the datetime in an instrument
Parameters
----------
inst_df: pd.DataFrame
inst_df.columns = [self.SYMBOL_FIELD_NAME, self.START_DATE_FIELD, self.END_DATE_FIELD]
Returns
-------
inst_df: pd.DataFrame
|
format_datetime
|
python
|
microsoft/qlib
|
scripts/data_collector/br_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/br_index/collector.py
|
MIT
|
def format_quarter(self, cell: str):
"""
Parameters
----------
cell: str
It must be on the format 2003_1Q --> years_4_month_periods
Returns
----------
date: str
Returns date in format 2003-03-01
"""
cell_split = cell.split("_")
return cell_split[0] + "-" + quarter_dict[cell_split[1]]
|
Parameters
----------
cell: str
It must be on the format 2003_1Q --> years_4_month_periods
Returns
----------
date: str
Returns date in format 2003-03-01
|
format_quarter
|
python
|
microsoft/qlib
|
scripts/data_collector/br_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/br_index/collector.py
|
MIT
|
def get_changes(self):
"""
Access the index historic composition and compare it quarter
by quarter and year by year in order to generate a file that
keeps track of which stocks have been removed and which have
been added.
The Dataframe used as reference will provided the index
composition for each year an quarter:
pd.DataFrame:
symbol
SH600000
SH600001
.
.
.
Parameters
----------
self: is used to represent the instance of the class.
Returns
----------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600001 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
"""
logger.info("Getting companies changes in {} index ...".format(self.index_name))
try:
df_changes_list = []
for i in tqdm(range(len(self.years_4_month_periods) - 1)):
df = pd.read_csv(
self.ibov_index_composition.format(self.years_4_month_periods[i]), on_bad_lines="skip"
)["symbol"]
df_ = pd.read_csv(
self.ibov_index_composition.format(self.years_4_month_periods[i + 1]), on_bad_lines="skip"
)["symbol"]
## Remove Dataframe
remove_date = (
self.years_4_month_periods[i].split("_")[0]
+ "-"
+ quarter_dict[self.years_4_month_periods[i].split("_")[1]]
)
list_remove = list(df[~df.isin(df_)])
df_removed = pd.DataFrame(
{
"date": len(list_remove) * [remove_date],
"type": len(list_remove) * ["remove"],
"symbol": list_remove,
}
)
## Add Dataframe
add_date = (
self.years_4_month_periods[i + 1].split("_")[0]
+ "-"
+ quarter_dict[self.years_4_month_periods[i + 1].split("_")[1]]
)
list_add = list(df_[~df_.isin(df)])
df_added = pd.DataFrame(
{"date": len(list_add) * [add_date], "type": len(list_add) * ["add"], "symbol": list_add}
)
df_changes_list.append(pd.concat([df_added, df_removed], sort=False))
df = pd.concat(df_changes_list).reset_index(drop=True)
df["symbol"] = df["symbol"].astype(str) + ".SA"
return df
except Exception as E:
logger.error("An error occured while downloading 2008 index composition - {}".format(E))
|
Access the index historic composition and compare it quarter
by quarter and year by year in order to generate a file that
keeps track of which stocks have been removed and which have
been added.
The Dataframe used as reference will provided the index
composition for each year an quarter:
pd.DataFrame:
symbol
SH600000
SH600001
.
.
.
Parameters
----------
self: is used to represent the instance of the class.
Returns
----------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600001 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
|
get_changes
|
python
|
microsoft/qlib
|
scripts/data_collector/br_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/br_index/collector.py
|
MIT
|
def get_new_companies(self):
"""
Get latest index composition.
The repo indicated on README has implemented a script
to get the latest index composition from B3 website using
selenium. Therefore, this method will download the file
containing such composition
Parameters
----------
self: is used to represent the instance of the class.
Returns
----------
pd.DataFrame:
symbol start_date end_date
RRRP3 2020-11-13 2022-03-02
ALPA4 2008-01-02 2022-03-02
dtypes:
symbol: str
start_date: pd.Timestamp
end_date: pd.Timestamp
"""
logger.info("Getting new companies in {} index ...".format(self.index_name))
try:
## Get index composition
df_index = pd.read_csv(
self.ibov_index_composition.format(self.year + "_" + self.current_4_month_period), on_bad_lines="skip"
)
df_date_first_added = pd.read_csv(
self.ibov_index_composition.format("date_first_added_" + self.year + "_" + self.current_4_month_period),
on_bad_lines="skip",
)
df = df_index.merge(df_date_first_added, on="symbol")[["symbol", "Date First Added"]]
df[self.START_DATE_FIELD] = df["Date First Added"].map(self.format_quarter)
# end_date will be our current quarter + 1, since the IBOV index updates itself every quarter
df[self.END_DATE_FIELD] = self.year + "-" + quarter_dict[self.current_4_month_period]
df = df[["symbol", self.START_DATE_FIELD, self.END_DATE_FIELD]]
df["symbol"] = df["symbol"].astype(str) + ".SA"
return df
except Exception as E:
logger.error("An error occured while getting new companies - {}".format(E))
|
Get latest index composition.
The repo indicated on README has implemented a script
to get the latest index composition from B3 website using
selenium. Therefore, this method will download the file
containing such composition
Parameters
----------
self: is used to represent the instance of the class.
Returns
----------
pd.DataFrame:
symbol start_date end_date
RRRP3 2020-11-13 2022-03-02
ALPA4 2008-01-02 2022-03-02
dtypes:
symbol: str
start_date: pd.Timestamp
end_date: pd.Timestamp
|
get_new_companies
|
python
|
microsoft/qlib
|
scripts/data_collector/br_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/br_index/collector.py
|
MIT
|
def calendar_list(self) -> List[pd.Timestamp]:
"""get history trading date
Returns
-------
calendar list
"""
_calendar = getattr(self, "_calendar_list", None)
if not _calendar:
_calendar = get_calendar_list(bench_code=self.index_name.upper())
setattr(self, "_calendar_list", _calendar)
return _calendar
|
get history trading date
Returns
-------
calendar list
|
calendar_list
|
python
|
microsoft/qlib
|
scripts/data_collector/cn_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/cn_index/collector.py
|
MIT
|
def format_datetime(self, inst_df: pd.DataFrame) -> pd.DataFrame:
"""formatting the datetime in an instrument
Parameters
----------
inst_df: pd.DataFrame
inst_df.columns = [self.SYMBOL_FIELD_NAME, self.START_DATE_FIELD, self.END_DATE_FIELD]
Returns
-------
"""
if self.freq != "day":
inst_df[self.START_DATE_FIELD] = inst_df[self.START_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x) + pd.Timedelta(hours=9, minutes=30)).strftime("%Y-%m-%d %H:%M:%S")
)
inst_df[self.END_DATE_FIELD] = inst_df[self.END_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x) + pd.Timedelta(hours=15, minutes=0)).strftime("%Y-%m-%d %H:%M:%S")
)
return inst_df
|
formatting the datetime in an instrument
Parameters
----------
inst_df: pd.DataFrame
inst_df.columns = [self.SYMBOL_FIELD_NAME, self.START_DATE_FIELD, self.END_DATE_FIELD]
Returns
-------
|
format_datetime
|
python
|
microsoft/qlib
|
scripts/data_collector/cn_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/cn_index/collector.py
|
MIT
|
def get_changes(self) -> pd.DataFrame:
"""get companies changes
Returns
-------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600000 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
"""
logger.info("get companies changes......")
res = []
for _url in self._get_change_notices_url():
_df = self._read_change_from_url(_url)
if not _df.empty:
res.append(_df)
logger.info("get companies changes finish")
return pd.concat(res, sort=False)
|
get companies changes
Returns
-------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600000 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
|
get_changes
|
python
|
microsoft/qlib
|
scripts/data_collector/cn_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/cn_index/collector.py
|
MIT
|
def normalize_symbol(symbol: str) -> str:
"""
Parameters
----------
symbol: str
symbol
Returns
-------
symbol
"""
symbol = f"{int(symbol):06}"
return f"SH{symbol}" if symbol.startswith("60") or symbol.startswith("688") else f"SZ{symbol}"
|
Parameters
----------
symbol: str
symbol
Returns
-------
symbol
|
normalize_symbol
|
python
|
microsoft/qlib
|
scripts/data_collector/cn_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/cn_index/collector.py
|
MIT
|
def get_new_companies(self) -> pd.DataFrame:
"""
Returns
-------
pd.DataFrame:
symbol start_date end_date
SH600000 2000-01-01 2099-12-31
dtypes:
symbol: str
start_date: pd.Timestamp
end_date: pd.Timestamp
"""
logger.info("get new companies......")
context = retry_request(self.new_companies_url).content
with self.cache_dir.joinpath(
f"{self.index_name.lower()}_new_companies.{self.new_companies_url.split('.')[-1]}"
).open("wb") as fp:
fp.write(context)
_io = BytesIO(context)
df = pd.read_excel(_io)
df = df.iloc[:, [0, 4]]
df.columns = [self.END_DATE_FIELD, self.SYMBOL_FIELD_NAME]
df[self.SYMBOL_FIELD_NAME] = df[self.SYMBOL_FIELD_NAME].map(self.normalize_symbol)
df[self.END_DATE_FIELD] = pd.to_datetime(df[self.END_DATE_FIELD].astype(str))
df[self.START_DATE_FIELD] = self.bench_start_date
logger.info("end of get new companies.")
return df
|
Returns
-------
pd.DataFrame:
symbol start_date end_date
SH600000 2000-01-01 2099-12-31
dtypes:
symbol: str
start_date: pd.Timestamp
end_date: pd.Timestamp
|
get_new_companies
|
python
|
microsoft/qlib
|
scripts/data_collector/cn_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/cn_index/collector.py
|
MIT
|
def get_history_companies(self) -> pd.DataFrame:
"""
Returns
-------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600000 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
"""
bs.login()
today = pd.Timestamp.now()
date_range = pd.DataFrame(pd.date_range(start="2007-01-15", end=today, freq="7D"))[0].dt.date
ret_list = []
for date in tqdm(date_range, desc="Download CSI500"):
result = self.get_data_from_baostock(date)
ret_list.append(result[["date", "symbol"]])
bs.logout()
return pd.concat(ret_list, sort=False)
|
Returns
-------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600000 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
|
get_history_companies
|
python
|
microsoft/qlib
|
scripts/data_collector/cn_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/cn_index/collector.py
|
MIT
|
def get_new_companies(self) -> pd.DataFrame:
"""
Returns
-------
pd.DataFrame:
symbol start_date end_date
SH600000 2000-01-01 2099-12-31
dtypes:
symbol: str
start_date: pd.Timestamp
end_date: pd.Timestamp
"""
logger.info("get new companies......")
today = pd.Timestamp.now().normalize()
bs.login()
result = self.get_data_from_baostock(today.strftime("%Y-%m-%d"))
bs.logout()
df = result[["date", "symbol"]]
df.columns = [self.END_DATE_FIELD, self.SYMBOL_FIELD_NAME]
df[self.END_DATE_FIELD] = today
df[self.START_DATE_FIELD] = self.bench_start_date
logger.info("end of get new companies.")
return df
|
Returns
-------
pd.DataFrame:
symbol start_date end_date
SH600000 2000-01-01 2099-12-31
dtypes:
symbol: str
start_date: pd.Timestamp
end_date: pd.Timestamp
|
get_new_companies
|
python
|
microsoft/qlib
|
scripts/data_collector/cn_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/cn_index/collector.py
|
MIT
|
def fill_1min_using_1d(
data_1min_dir: [str, Path],
qlib_data_1d_dir: [str, Path],
max_workers: int = 16,
date_field_name: str = "date",
symbol_field_name: str = "symbol",
):
"""Use 1d data to fill in the missing symbols relative to 1min
Parameters
----------
data_1min_dir: str
1min data dir
qlib_data_1d_dir: str
1d qlib data(bin data) dir, from: https://qlib.readthedocs.io/en/latest/component/data.html#converting-csv-format-into-qlib-format
max_workers: int
ThreadPoolExecutor(max_workers), by default 16
date_field_name: str
date field name, by default date
symbol_field_name: str
symbol field name, by default symbol
"""
data_1min_dir = Path(data_1min_dir).expanduser().resolve()
qlib_data_1d_dir = Path(qlib_data_1d_dir).expanduser().resolve()
min_date, max_date = get_date_range(data_1min_dir, max_workers, date_field_name)
symbols_1min = get_symbols(data_1min_dir)
qlib.init(provider_uri=str(qlib_data_1d_dir))
data_1d = D.features(D.instruments("all"), ["$close"], min_date, max_date, freq="day")
miss_symbols = set(data_1d.index.get_level_values(level="instrument").unique()) - set(symbols_1min)
if not miss_symbols:
logger.warning("More symbols in 1min than 1d, no padding required")
return
logger.info(f"miss_symbols {len(miss_symbols)}: {miss_symbols}")
tmp_df = pd.read_csv(list(data_1min_dir.glob("*.csv"))[0])
columns = tmp_df.columns
_si = tmp_df[symbol_field_name].first_valid_index()
is_lower = tmp_df.loc[_si][symbol_field_name].islower()
for symbol in tqdm(miss_symbols):
if is_lower:
symbol = symbol.lower()
index_1d = data_1d.loc(axis=0)[symbol.upper()].index
index_1min = generate_minutes_calendar_from_daily(index_1d)
index_1min.name = date_field_name
_df = pd.DataFrame(columns=columns, index=index_1min)
if date_field_name in _df.columns:
del _df[date_field_name]
_df.reset_index(inplace=True)
_df[symbol_field_name] = symbol
_df["paused_num"] = 0
_df.to_csv(data_1min_dir.joinpath(f"{symbol}.csv"), index=False)
|
Use 1d data to fill in the missing symbols relative to 1min
Parameters
----------
data_1min_dir: str
1min data dir
qlib_data_1d_dir: str
1d qlib data(bin data) dir, from: https://qlib.readthedocs.io/en/latest/component/data.html#converting-csv-format-into-qlib-format
max_workers: int
ThreadPoolExecutor(max_workers), by default 16
date_field_name: str
date field name, by default date
symbol_field_name: str
symbol field name, by default symbol
|
fill_1min_using_1d
|
python
|
microsoft/qlib
|
scripts/data_collector/contrib/fill_cn_1min_data/fill_cn_1min_data.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/contrib/fill_cn_1min_data/fill_cn_1min_data.py
|
MIT
|
def future_calendar_collector(qlib_dir: [str, Path], freq: str = "day"):
"""get future calendar
Parameters
----------
qlib_dir: str or Path
qlib data directory
freq: str
value from ["day", "1min"], by default day
"""
qlib_dir = Path(qlib_dir).expanduser().resolve()
if not qlib_dir.exists():
raise FileNotFoundError(str(qlib_dir))
lg = bs.login()
if lg.error_code != "0":
logger.error(f"login error: {lg.error_msg}")
return
# read daily calendar
daily_calendar = read_calendar_from_qlib(qlib_dir)
end_year = pd.Timestamp.now().year
if daily_calendar.empty:
start_year = pd.Timestamp.now().year
else:
start_year = pd.Timestamp(daily_calendar.iloc[-1, 0]).year
rs = bs.query_trade_dates(start_date=pd.Timestamp(f"{start_year}-01-01"), end_date=f"{end_year}-12-31")
data_list = []
while (rs.error_code == "0") & rs.next():
_row_data = rs.get_row_data()
if int(_row_data[1]) == 1:
data_list.append(_row_data[0])
data_list = sorted(data_list)
date_list = generate_qlib_calendar(data_list, freq=freq)
date_list = sorted(set(daily_calendar.loc[:, 0].values.tolist() + date_list))
write_calendar_to_qlib(qlib_dir, date_list, freq=freq)
bs.logout()
logger.info(f"get trading dates success: {start_year}-01-01 to {end_year}-12-31")
|
get future calendar
Parameters
----------
qlib_dir: str or Path
qlib data directory
freq: str
value from ["day", "1min"], by default day
|
future_calendar_collector
|
python
|
microsoft/qlib
|
scripts/data_collector/contrib/future_trading_date_collector/future_trading_date_collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/contrib/future_trading_date_collector/future_trading_date_collector.py
|
MIT
|
def get_cg_crypto_symbols(qlib_data_path: [str, Path] = None) -> list:
"""get crypto symbols in coingecko
Returns
-------
crypto symbols in given exchanges list of coingecko
"""
global _CG_CRYPTO_SYMBOLS # pylint: disable=W0603
@deco_retry
def _get_coingecko():
try:
cg = CoinGeckoAPI()
resp = pd.DataFrame(cg.get_coins_markets(vs_currency="usd"))
except Exception as e:
raise ValueError("request error") from e
try:
_symbols = resp["id"].to_list()
except Exception as e:
logger.warning(f"request error: {e}")
raise
return _symbols
if _CG_CRYPTO_SYMBOLS is None:
_all_symbols = _get_coingecko()
_CG_CRYPTO_SYMBOLS = sorted(set(_all_symbols))
return _CG_CRYPTO_SYMBOLS
|
get crypto symbols in coingecko
Returns
-------
crypto symbols in given exchanges list of coingecko
|
get_cg_crypto_symbols
|
python
|
microsoft/qlib
|
scripts/data_collector/crypto/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/crypto/collector.py
|
MIT
|
def __init__(self, source_dir=None, normalize_dir=None, max_workers=4, interval="1d", region=REGION_CN):
"""
Parameters
----------
source_dir: str
The directory where the raw data collected from the Internet is saved, default "Path(__file__).parent/source"
normalize_dir: str
Directory for normalize data, default "Path(__file__).parent/normalize"
max_workers: int
Concurrent number, default is 4
interval: str
freq, value from [1min, 1d], default 1d
region: str
region, value from ["CN"], default "CN"
"""
super().__init__(source_dir, normalize_dir, max_workers, interval)
self.region = region
|
Parameters
----------
source_dir: str
The directory where the raw data collected from the Internet is saved, default "Path(__file__).parent/source"
normalize_dir: str
Directory for normalize data, default "Path(__file__).parent/normalize"
max_workers: int
Concurrent number, default is 4
interval: str
freq, value from [1min, 1d], default 1d
region: str
region, value from ["CN"], default "CN"
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/fund/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/fund/collector.py
|
MIT
|
def format_datetime(self, inst_df: pd.DataFrame) -> pd.DataFrame:
"""formatting the datetime in an instrument
Parameters
----------
inst_df: pd.DataFrame
inst_df.columns = [self.SYMBOL_FIELD_NAME, self.START_DATE_FIELD, self.END_DATE_FIELD]
Returns
-------
"""
if self.freq != "day":
inst_df[self.END_DATE_FIELD] = inst_df[self.END_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x) + pd.Timedelta(hours=23, minutes=59)).strftime("%Y-%m-%d %H:%M:%S")
)
return inst_df
|
formatting the datetime in an instrument
Parameters
----------
inst_df: pd.DataFrame
inst_df.columns = [self.SYMBOL_FIELD_NAME, self.START_DATE_FIELD, self.END_DATE_FIELD]
Returns
-------
|
format_datetime
|
python
|
microsoft/qlib
|
scripts/data_collector/us_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/us_index/collector.py
|
MIT
|
def calendar_list(self) -> List[pd.Timestamp]:
"""get history trading date
Returns
-------
calendar list
"""
_calendar_list = getattr(self, "_calendar_list", None)
if _calendar_list is None:
_calendar_list = list(filter(lambda x: x >= self.bench_start_date, get_calendar_list("US_ALL")))
setattr(self, "_calendar_list", _calendar_list)
return _calendar_list
|
get history trading date
Returns
-------
calendar list
|
calendar_list
|
python
|
microsoft/qlib
|
scripts/data_collector/us_index/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/us_index/collector.py
|
MIT
|
def _get_first_close(self, df: pd.DataFrame) -> float:
"""get first close value
Notes
-----
For incremental updates(append) to Yahoo 1D data, user need to use a close that is not 0 on the first trading day of the existing data
"""
df = df.loc[df["close"].first_valid_index() :]
_close = df["close"].iloc[0]
return _close
|
get first close value
Notes
-----
For incremental updates(append) to Yahoo 1D data, user need to use a close that is not 0 on the first trading day of the existing data
|
_get_first_close
|
python
|
microsoft/qlib
|
scripts/data_collector/yahoo/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/yahoo/collector.py
|
MIT
|
def _manual_adj_data(self, df: pd.DataFrame) -> pd.DataFrame:
"""manual adjust data: All fields (except change) are standardized according to the close of the first day"""
if df.empty:
return df
df = df.copy()
df.sort_values(self._date_field_name, inplace=True)
df = df.set_index(self._date_field_name)
_close = self._get_first_close(df)
for _col in df.columns:
# NOTE: retain original adjclose, required for incremental updates
if _col in [self._symbol_field_name, "adjclose", "change"]:
continue
if _col == "volume":
df[_col] = df[_col] * _close
else:
df[_col] = df[_col] / _close
return df.reset_index()
|
manual adjust data: All fields (except change) are standardized according to the close of the first day
|
_manual_adj_data
|
python
|
microsoft/qlib
|
scripts/data_collector/yahoo/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/yahoo/collector.py
|
MIT
|
def __init__(
self, old_qlib_data_dir: [str, Path], date_field_name: str = "date", symbol_field_name: str = "symbol", **kwargs
):
"""
Parameters
----------
old_qlib_data_dir: str, Path
the qlib data to be updated for yahoo, usually from: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
"""
super(YahooNormalize1dExtend, self).__init__(date_field_name, symbol_field_name)
self.column_list = ["open", "high", "low", "close", "volume", "factor", "change"]
self.old_qlib_data = self._get_old_data(old_qlib_data_dir)
|
Parameters
----------
old_qlib_data_dir: str, Path
the qlib data to be updated for yahoo, usually from: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/yahoo/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/yahoo/collector.py
|
MIT
|
def __init__(
self, qlib_data_1d_dir: [str, Path], date_field_name: str = "date", symbol_field_name: str = "symbol", **kwargs
):
"""
Parameters
----------
qlib_data_1d_dir: str, Path
the qlib data to be updated for yahoo, usually from: Normalised to 1min using local 1d data
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
"""
super(YahooNormalize1min, self).__init__(date_field_name, symbol_field_name)
qlib.init(provider_uri=qlib_data_1d_dir)
self.all_1d_data = D.features(D.instruments("all"), ["$paused", "$volume", "$factor", "$close"], freq="day")
|
Parameters
----------
qlib_data_1d_dir: str, Path
the qlib data to be updated for yahoo, usually from: Normalised to 1min using local 1d data
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/yahoo/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/yahoo/collector.py
|
MIT
|
def __init__(self, source_dir=None, normalize_dir=None, max_workers=1, interval="1d", region=REGION_CN):
"""
Parameters
----------
source_dir: str
The directory where the raw data collected from the Internet is saved, default "Path(__file__).parent/source"
normalize_dir: str
Directory for normalize data, default "Path(__file__).parent/normalize"
max_workers: int
Concurrent number, default is 1; when collecting data, it is recommended that max_workers be set to 1
interval: str
freq, value from [1min, 1d], default 1d
region: str
region, value from ["CN", "US", "BR"], default "CN"
"""
super().__init__(source_dir, normalize_dir, max_workers, interval)
self.region = region
|
Parameters
----------
source_dir: str
The directory where the raw data collected from the Internet is saved, default "Path(__file__).parent/source"
normalize_dir: str
Directory for normalize data, default "Path(__file__).parent/normalize"
max_workers: int
Concurrent number, default is 1; when collecting data, it is recommended that max_workers be set to 1
interval: str
freq, value from [1min, 1d], default 1d
region: str
region, value from ["CN", "US", "BR"], default "CN"
|
__init__
|
python
|
microsoft/qlib
|
scripts/data_collector/yahoo/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/yahoo/collector.py
|
MIT
|
def normalize_data(
self,
date_field_name: str = "date",
symbol_field_name: str = "symbol",
end_date: str = None,
qlib_data_1d_dir: str = None,
):
"""normalize data
Parameters
----------
date_field_name: str
date field name, default date
symbol_field_name: str
symbol field name, default symbol
end_date: str
if not None, normalize the last date saved (including end_date); if None, it will ignore this parameter; by default None
qlib_data_1d_dir: str
if interval==1min, qlib_data_1d_dir cannot be None, normalize 1min needs to use 1d data;
qlib_data_1d can be obtained like this:
$ python scripts/get_data.py qlib_data --target_dir <qlib_data_1d_dir> --interval 1d
$ python scripts/data_collector/yahoo/collector.py update_data_to_bin --qlib_data_1d_dir <qlib_data_1d_dir> --trading_date 2021-06-01
or:
download 1d data, reference: https://github.com/microsoft/qlib/tree/main/scripts/data_collector/yahoo#1d-from-yahoo
Examples
---------
$ python collector.py normalize_data --source_dir ~/.qlib/stock_data/source --normalize_dir ~/.qlib/stock_data/normalize --region cn --interval 1d
$ python collector.py normalize_data --qlib_data_1d_dir ~/.qlib/qlib_data/cn_data --source_dir ~/.qlib/stock_data/source_cn_1min --normalize_dir ~/.qlib/stock_data/normalize_cn_1min --region CN --interval 1min
"""
if self.interval.lower() == "1min":
if qlib_data_1d_dir is None or not Path(qlib_data_1d_dir).expanduser().exists():
raise ValueError(
"If normalize 1min, the qlib_data_1d_dir parameter must be set: --qlib_data_1d_dir <user qlib 1d data >, Reference: https://github.com/microsoft/qlib/tree/main/scripts/data_collector/yahoo#automatic-update-of-daily-frequency-datafrom-yahoo-finance"
)
super(Run, self).normalize_data(
date_field_name, symbol_field_name, end_date=end_date, qlib_data_1d_dir=qlib_data_1d_dir
)
|
normalize data
Parameters
----------
date_field_name: str
date field name, default date
symbol_field_name: str
symbol field name, default symbol
end_date: str
if not None, normalize the last date saved (including end_date); if None, it will ignore this parameter; by default None
qlib_data_1d_dir: str
if interval==1min, qlib_data_1d_dir cannot be None, normalize 1min needs to use 1d data;
qlib_data_1d can be obtained like this:
$ python scripts/get_data.py qlib_data --target_dir <qlib_data_1d_dir> --interval 1d
$ python scripts/data_collector/yahoo/collector.py update_data_to_bin --qlib_data_1d_dir <qlib_data_1d_dir> --trading_date 2021-06-01
or:
download 1d data, reference: https://github.com/microsoft/qlib/tree/main/scripts/data_collector/yahoo#1d-from-yahoo
Examples
---------
$ python collector.py normalize_data --source_dir ~/.qlib/stock_data/source --normalize_dir ~/.qlib/stock_data/normalize --region cn --interval 1d
$ python collector.py normalize_data --qlib_data_1d_dir ~/.qlib/qlib_data/cn_data --source_dir ~/.qlib/stock_data/source_cn_1min --normalize_dir ~/.qlib/stock_data/normalize_cn_1min --region CN --interval 1min
|
normalize_data
|
python
|
microsoft/qlib
|
scripts/data_collector/yahoo/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/yahoo/collector.py
|
MIT
|
def normalize_data_1d_extend(
self, old_qlib_data_dir, date_field_name: str = "date", symbol_field_name: str = "symbol"
):
"""normalize data extend; extending yahoo qlib data(from: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data)
Notes
-----
Steps to extend yahoo qlib data:
1. download qlib data: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data; save to <dir1>
2. collector source data: https://github.com/microsoft/qlib/tree/main/scripts/data_collector/yahoo#collector-data; save to <dir2>
3. normalize new source data(from step 2): python scripts/data_collector/yahoo/collector.py normalize_data_1d_extend --old_qlib_dir <dir1> --source_dir <dir2> --normalize_dir <dir3> --region CN --interval 1d
4. dump data: python scripts/dump_bin.py dump_update --csv_path <dir3> --qlib_dir <dir1> --freq day --date_field_name date --symbol_field_name symbol --exclude_fields symbol,date
5. update instrument(eg. csi300): python python scripts/data_collector/cn_index/collector.py --index_name CSI300 --qlib_dir <dir1> --method parse_instruments
Parameters
----------
old_qlib_data_dir: str
the qlib data to be updated for yahoo, usually from: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data
date_field_name: str
date field name, default date
symbol_field_name: str
symbol field name, default symbol
Examples
---------
$ python collector.py normalize_data_1d_extend --old_qlib_dir ~/.qlib/qlib_data/cn_data --source_dir ~/.qlib/stock_data/source --normalize_dir ~/.qlib/stock_data/normalize --region CN --interval 1d
"""
_class = getattr(self._cur_module, f"{self.normalize_class_name}Extend")
yc = Normalize(
source_dir=self.source_dir,
target_dir=self.normalize_dir,
normalize_class=_class,
max_workers=self.max_workers,
date_field_name=date_field_name,
symbol_field_name=symbol_field_name,
old_qlib_data_dir=old_qlib_data_dir,
)
yc.normalize()
|
normalize data extend; extending yahoo qlib data(from: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data)
Notes
-----
Steps to extend yahoo qlib data:
1. download qlib data: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data; save to <dir1>
2. collector source data: https://github.com/microsoft/qlib/tree/main/scripts/data_collector/yahoo#collector-data; save to <dir2>
3. normalize new source data(from step 2): python scripts/data_collector/yahoo/collector.py normalize_data_1d_extend --old_qlib_dir <dir1> --source_dir <dir2> --normalize_dir <dir3> --region CN --interval 1d
4. dump data: python scripts/dump_bin.py dump_update --csv_path <dir3> --qlib_dir <dir1> --freq day --date_field_name date --symbol_field_name symbol --exclude_fields symbol,date
5. update instrument(eg. csi300): python python scripts/data_collector/cn_index/collector.py --index_name CSI300 --qlib_dir <dir1> --method parse_instruments
Parameters
----------
old_qlib_data_dir: str
the qlib data to be updated for yahoo, usually from: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data
date_field_name: str
date field name, default date
symbol_field_name: str
symbol field name, default symbol
Examples
---------
$ python collector.py normalize_data_1d_extend --old_qlib_dir ~/.qlib/qlib_data/cn_data --source_dir ~/.qlib/stock_data/source --normalize_dir ~/.qlib/stock_data/normalize --region CN --interval 1d
|
normalize_data_1d_extend
|
python
|
microsoft/qlib
|
scripts/data_collector/yahoo/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/yahoo/collector.py
|
MIT
|
def update_data_to_bin(
self,
qlib_data_1d_dir: str,
end_date: str = None,
check_data_length: int = None,
delay: float = 1,
exists_skip: bool = False,
):
"""update yahoo data to bin
Parameters
----------
qlib_data_1d_dir: str
the qlib data to be updated for yahoo, usually from: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data
end_date: str
end datetime, default ``pd.Timestamp(trading_date + pd.Timedelta(days=1))``; open interval(excluding end)
check_data_length: int
check data length, if not None and greater than 0, each symbol will be considered complete if its data length is greater than or equal to this value, otherwise it will be fetched again, the maximum number of fetches being (max_collector_count). By default None.
delay: float
time.sleep(delay), default 1
exists_skip: bool
exists skip, by default False
Notes
-----
If the data in qlib_data_dir is incomplete, np.nan will be populated to trading_date for the previous trading day
Examples
-------
$ python collector.py update_data_to_bin --qlib_data_1d_dir <user data dir> --trading_date <start date> --end_date <end date>
"""
if self.interval.lower() != "1d":
logger.warning(f"currently supports 1d data updates: --interval 1d")
# download qlib 1d data
qlib_data_1d_dir = str(Path(qlib_data_1d_dir).expanduser().resolve())
if not exists_qlib_data(qlib_data_1d_dir):
GetData().qlib_data(
target_dir=qlib_data_1d_dir, interval=self.interval, region=self.region, exists_skip=exists_skip
)
# start/end date
calendar_df = pd.read_csv(Path(qlib_data_1d_dir).joinpath("calendars/day.txt"))
trading_date = (pd.Timestamp(calendar_df.iloc[-1, 0]) - pd.Timedelta(days=1)).strftime("%Y-%m-%d")
if end_date is None:
end_date = (pd.Timestamp(trading_date) + pd.Timedelta(days=1)).strftime("%Y-%m-%d")
# download data from yahoo
# NOTE: when downloading data from YahooFinance, max_workers is recommended to be 1
self.download_data(delay=delay, start=trading_date, end=end_date, check_data_length=check_data_length)
# NOTE: a larger max_workers setting here would be faster
self.max_workers = (
max(multiprocessing.cpu_count() - 2, 1)
if self.max_workers is None or self.max_workers <= 1
else self.max_workers
)
# normalize data
self.normalize_data_1d_extend(qlib_data_1d_dir)
# dump bin
_dump = DumpDataUpdate(
csv_path=self.normalize_dir,
qlib_dir=qlib_data_1d_dir,
exclude_fields="symbol,date",
max_workers=self.max_workers,
)
_dump.dump()
# parse index
_region = self.region.lower()
if _region not in ["cn", "us"]:
logger.warning(f"Unsupported region: region={_region}, component downloads will be ignored")
return
index_list = ["CSI100", "CSI300"] if _region == "cn" else ["SP500", "NASDAQ100", "DJIA", "SP400"]
get_instruments = getattr(
importlib.import_module(f"data_collector.{_region}_index.collector"), "get_instruments"
)
for _index in index_list:
get_instruments(str(qlib_data_1d_dir), _index, market_index=f"{_region}_index")
|
update yahoo data to bin
Parameters
----------
qlib_data_1d_dir: str
the qlib data to be updated for yahoo, usually from: https://github.com/microsoft/qlib/tree/main/scripts#download-cn-data
end_date: str
end datetime, default ``pd.Timestamp(trading_date + pd.Timedelta(days=1))``; open interval(excluding end)
check_data_length: int
check data length, if not None and greater than 0, each symbol will be considered complete if its data length is greater than or equal to this value, otherwise it will be fetched again, the maximum number of fetches being (max_collector_count). By default None.
delay: float
time.sleep(delay), default 1
exists_skip: bool
exists skip, by default False
Notes
-----
If the data in qlib_data_dir is incomplete, np.nan will be populated to trading_date for the previous trading day
Examples
-------
$ python collector.py update_data_to_bin --qlib_data_1d_dir <user data dir> --trading_date <start date> --end_date <end date>
|
update_data_to_bin
|
python
|
microsoft/qlib
|
scripts/data_collector/yahoo/collector.py
|
https://github.com/microsoft/qlib/blob/master/scripts/data_collector/yahoo/collector.py
|
MIT
|
def train(uri_path: str = None):
"""train model
Returns
-------
pred_score: pandas.DataFrame
predict scores
performance: dict
model performance
"""
# model initialization
model = init_instance_by_config(CSI300_GBDT_TASK["model"])
dataset = init_instance_by_config(CSI300_GBDT_TASK["dataset"])
# To test __repr__
print(dataset)
print(R)
# start exp
with R.start(experiment_name="workflow", uri=uri_path):
R.log_params(**flatten_dict(CSI300_GBDT_TASK))
model.fit(dataset)
R.save_objects(trained_model=model)
# prediction
recorder = R.get_recorder()
# To test __repr__
print(recorder)
# To test get_local_dir
print(recorder.get_local_dir())
rid = recorder.id
sr = SignalRecord(model, dataset, recorder)
sr.generate()
pred_score = sr.load("pred.pkl")
# calculate ic and ric
sar = SigAnaRecord(recorder)
sar.generate()
ic = sar.load("ic.pkl")
ric = sar.load("ric.pkl")
uri_path = R.get_uri()
return pred_score, {"ic": ic, "ric": ric}, rid, uri_path
|
train model
Returns
-------
pred_score: pandas.DataFrame
predict scores
performance: dict
model performance
|
train
|
python
|
microsoft/qlib
|
tests/test_all_pipeline.py
|
https://github.com/microsoft/qlib/blob/master/tests/test_all_pipeline.py
|
MIT
|
def fake_experiment():
"""A fake experiment workflow to test uri
Returns
-------
pass_or_not_for_default_uri: bool
pass_or_not_for_current_uri: bool
temporary_exp_dir: str
"""
# start exp
default_uri = R.get_uri()
current_uri = "file:./temp-test-exp-mag"
with R.start(experiment_name="fake_workflow_for_expm", uri=current_uri):
R.log_params(**flatten_dict(CSI300_GBDT_TASK))
current_uri_to_check = R.get_uri()
default_uri_to_check = R.get_uri()
return default_uri == default_uri_to_check, current_uri == current_uri_to_check, current_uri
|
A fake experiment workflow to test uri
Returns
-------
pass_or_not_for_default_uri: bool
pass_or_not_for_current_uri: bool
temporary_exp_dir: str
|
fake_experiment
|
python
|
microsoft/qlib
|
tests/test_all_pipeline.py
|
https://github.com/microsoft/qlib/blob/master/tests/test_all_pipeline.py
|
MIT
|
def backtest_analysis(pred, rid, uri_path: str = None):
"""backtest and analysis
Parameters
----------
rid : str
the id of the recorder to be used in this function
uri_path: str
mlflow uri path
Returns
-------
analysis : pandas.DataFrame
the analysis result
"""
with R.uri_context(uri=uri_path):
recorder = R.get_recorder(experiment_name="workflow", recorder_id=rid)
dataset = init_instance_by_config(CSI300_GBDT_TASK["dataset"])
model = recorder.load_object("trained_model")
port_analysis_config = {
"executor": {
"class": "SimulatorExecutor",
"module_path": "qlib.backtest.executor",
"kwargs": {
"time_per_step": "day",
"generate_portfolio_metrics": True,
},
},
"strategy": {
"class": "TopkDropoutStrategy",
"module_path": "qlib.contrib.strategy.signal_strategy",
"kwargs": {
"signal": (model, dataset),
"topk": 50,
"n_drop": 5,
},
},
"backtest": {
"start_time": "2017-01-01",
"end_time": "2020-08-01",
"account": 100000000,
"benchmark": CSI300_BENCH,
"exchange_kwargs": {
"freq": "day",
"limit_threshold": 0.095,
"deal_price": "close",
"open_cost": 0.0005,
"close_cost": 0.0015,
"min_cost": 5,
},
},
}
# backtest
par = PortAnaRecord(recorder, port_analysis_config, risk_analysis_freq="day")
par.generate()
analysis_df = par.load("port_analysis_1day.pkl")
print(analysis_df)
return analysis_df
|
backtest and analysis
Parameters
----------
rid : str
the id of the recorder to be used in this function
uri_path: str
mlflow uri path
Returns
-------
analysis : pandas.DataFrame
the analysis result
|
backtest_analysis
|
python
|
microsoft/qlib
|
tests/test_all_pipeline.py
|
https://github.com/microsoft/qlib/blob/master/tests/test_all_pipeline.py
|
MIT
|
def test_TSDataSampler2(self):
"""
Extra test TSDataSampler to prevent incorrect filling of nan for the values at the front
"""
datetime_list = ["2000-01-31", "2000-02-29", "2000-03-31", "2000-04-30", "2000-05-31"]
instruments = ["000001", "000002", "000003", "000004", "000005"]
index = pd.MultiIndex.from_product(
[pd.to_datetime(datetime_list), instruments], names=["datetime", "instrument"]
)
data = np.random.randn(len(datetime_list) * len(instruments))
test_df = pd.DataFrame(data=data, index=index, columns=["factor"])
dataset = TSDataSampler(test_df, datetime_list[2], datetime_list[-1], step_len=3)
print()
print("--------------dataset[0]--------------")
print(dataset[0])
print("--------------dataset[1]--------------")
print(dataset[1])
for i in range(3):
self.assertFalse(np.isnan(dataset[0][i]))
self.assertFalse(np.isnan(dataset[1][i]))
self.assertEqual(dataset[0][1], dataset[1][0])
self.assertEqual(dataset[0][2], dataset[1][1])
|
Extra test TSDataSampler to prevent incorrect filling of nan for the values at the front
|
test_TSDataSampler2
|
python
|
microsoft/qlib
|
tests/data_mid_layer_tests/test_dataset.py
|
https://github.com/microsoft/qlib/blob/master/tests/data_mid_layer_tests/test_dataset.py
|
MIT
|
def test_creating_client(self):
"""
Please refer to qlib/workflow/expm.py:MLflowExpManager._client
we don't cache _client (this is helpful to reduce maintainance work when MLflowExpManager's uri is chagned)
This implementation is based on the assumption creating a client is fast
"""
start = time.time()
for i in range(10):
_ = mlflow.tracking.MlflowClient(tracking_uri=str(self.TMP_PATH))
end = time.time()
elapsed = end - start
if platform.system() == "Linux":
self.assertLess(elapsed, 1e-2) # it can be done in less than 10ms
else:
self.assertLess(elapsed, 2e-2)
print(elapsed)
|
Please refer to qlib/workflow/expm.py:MLflowExpManager._client
we don't cache _client (this is helpful to reduce maintainance work when MLflowExpManager's uri is chagned)
This implementation is based on the assumption creating a client is fast
|
test_creating_client
|
python
|
microsoft/qlib
|
tests/dependency_tests/test_mlflow.py
|
https://github.com/microsoft/qlib/blob/master/tests/dependency_tests/test_mlflow.py
|
MIT
|
def test_multi_proc(self):
"""
For testing if it will raise error
"""
iter_n = 2
pool = Pool(iter_n)
res = []
for _ in range(iter_n):
res.append(pool.apply_async(get_features, (self.FIELDS,), {}))
for r in res:
print(r.get())
pool.close()
pool.join()
|
For testing if it will raise error
|
test_multi_proc
|
python
|
microsoft/qlib
|
tests/misc/test_get_multi_proc.py
|
https://github.com/microsoft/qlib/blob/master/tests/misc/test_get_multi_proc.py
|
MIT
|
def cal_sam_minute(x: pd.Timestamp, sam_minutes: int, region: str):
"""
Sample raw calendar into calendar with sam_minutes freq, shift represents the shift minute the market time
- open time of stock market is [9:30 - shift*pd.Timedelta(minutes=1)]
- mid close time of stock market is [11:29 - shift*pd.Timedelta(minutes=1)]
- mid open time of stock market is [13:00 - shift*pd.Timedelta(minutes=1)]
- close time of stock market is [14:59 - shift*pd.Timedelta(minutes=1)]
"""
# TODO: actually, this version is much faster when no cache or optimization
day_time = pd.Timestamp(x.date())
shift = C.min_data_shift
region_time = REG_MAP[region]
open_time = (
day_time
+ pd.Timedelta(hours=region_time[0].hour, minutes=region_time[0].minute)
- shift * pd.Timedelta(minutes=1)
)
close_time = (
day_time
+ pd.Timedelta(hours=region_time[-1].hour, minutes=region_time[-1].minute)
- shift * pd.Timedelta(minutes=1)
)
if region_time == CN_TIME:
mid_close_time = (
day_time
+ pd.Timedelta(hours=region_time[1].hour, minutes=region_time[1].minute - 1)
- shift * pd.Timedelta(minutes=1)
)
mid_open_time = (
day_time
+ pd.Timedelta(hours=region_time[2].hour, minutes=region_time[2].minute)
- shift * pd.Timedelta(minutes=1)
)
else:
mid_close_time = close_time
mid_open_time = open_time
if open_time <= x <= mid_close_time:
minute_index = (x - open_time).seconds // 60
elif mid_open_time <= x <= close_time:
minute_index = (x - mid_open_time).seconds // 60 + 120
else:
raise ValueError("datetime of calendar is out of range")
minute_index = minute_index // sam_minutes * sam_minutes
if 0 <= minute_index < 120 or region_time != CN_TIME:
return open_time + minute_index * pd.Timedelta(minutes=1)
elif 120 <= minute_index < 240:
return mid_open_time + (minute_index - 120) * pd.Timedelta(minutes=1)
else:
raise ValueError("calendar minute_index error, check `min_data_shift` in qlib.config.C")
|
Sample raw calendar into calendar with sam_minutes freq, shift represents the shift minute the market time
- open time of stock market is [9:30 - shift*pd.Timedelta(minutes=1)]
- mid close time of stock market is [11:29 - shift*pd.Timedelta(minutes=1)]
- mid open time of stock market is [13:00 - shift*pd.Timedelta(minutes=1)]
- close time of stock market is [14:59 - shift*pd.Timedelta(minutes=1)]
|
cal_sam_minute
|
python
|
microsoft/qlib
|
tests/misc/test_utils.py
|
https://github.com/microsoft/qlib/blob/master/tests/misc/test_utils.py
|
MIT
|
def test_update_pred(self):
"""
This test is for testing if it will raise error if the `to_date` is out of the boundary.
"""
task = copy.deepcopy(CSI300_GBDT_TASK)
task["record"] = ["qlib.workflow.record_temp.SignalRecord"]
exp_name = "online_srv_test"
cal = D.calendar()
latest_date = cal[-1]
train_start = latest_date - pd.Timedelta(days=61)
train_end = latest_date - pd.Timedelta(days=41)
task["dataset"]["kwargs"]["segments"] = {
"train": (train_start, train_end),
"valid": (latest_date - pd.Timedelta(days=40), latest_date - pd.Timedelta(days=21)),
"test": (latest_date - pd.Timedelta(days=20), latest_date),
}
task["dataset"]["kwargs"]["handler"]["kwargs"] = {
"start_time": train_start,
"end_time": latest_date,
"fit_start_time": train_start,
"fit_end_time": train_end,
"instruments": "csi300",
}
rec = task_train(task, exp_name)
pred = rec.load_object("pred.pkl")
online_tool = OnlineToolR(exp_name)
online_tool.reset_online_tag(rec) # set to online model
online_tool.update_online_pred(to_date=latest_date + pd.Timedelta(days=10))
good_pred = rec.load_object("pred.pkl")
mod_range = slice(latest_date - pd.Timedelta(days=20), latest_date - pd.Timedelta(days=10))
mod_range2 = slice(latest_date - pd.Timedelta(days=9), latest_date - pd.Timedelta(days=2))
mod_pred = good_pred.copy()
mod_pred.loc[mod_range] = -1
mod_pred.loc[mod_range2] = -2
rec.save_objects(**{"pred.pkl": mod_pred})
online_tool.update_online_pred(
to_date=latest_date - pd.Timedelta(days=10), from_date=latest_date - pd.Timedelta(days=20)
)
updated_pred = rec.load_object("pred.pkl")
# this range is not fixed
self.assertTrue((updated_pred.loc[mod_range] == good_pred.loc[mod_range]).all().item())
# this range is fixed now
self.assertTrue((updated_pred.loc[mod_range2] == -2).all().item())
|
This test is for testing if it will raise error if the `to_date` is out of the boundary.
|
test_update_pred
|
python
|
microsoft/qlib
|
tests/rolling_tests/test_update_pred.py
|
https://github.com/microsoft/qlib/blob/master/tests/rolling_tests/test_update_pred.py
|
MIT
|
def test_instrument_storage(self):
"""
The meaning of instrument, such as CSI500:
CSI500 composition changes:
date add remove
2005-01-01 SH600000
2005-01-01 SH600001
2005-01-01 SH600002
2005-02-01 SH600003 SH600000
2005-02-15 SH600000 SH600002
Calendar:
pd.date_range(start="2020-01-01", stop="2020-03-01", freq="1D")
Instrument:
symbol start_time end_time
SH600000 2005-01-01 2005-01-31 (2005-02-01 Last trading day)
SH600000 2005-02-15 2005-03-01
SH600001 2005-01-01 2005-03-01
SH600002 2005-01-01 2005-02-14 (2005-02-15 Last trading day)
SH600003 2005-02-01 2005-03-01
InstrumentStorage:
{
"SH600000": [(2005-01-01, 2005-01-31), (2005-02-15, 2005-03-01)],
"SH600001": [(2005-01-01, 2005-03-01)],
"SH600002": [(2005-01-01, 2005-02-14)],
"SH600003": [(2005-02-01, 2005-03-01)],
}
"""
instrument = InstrumentStorage(market="csi300", provider_uri=self.provider_uri, freq="day")
for inst, spans in instrument.data.items():
assert isinstance(inst, str) and isinstance(
spans, Iterable
), f"{instrument.__class__.__name__} value is not Iterable"
for s_e in spans:
assert (
isinstance(s_e, tuple) and len(s_e) == 2
), f"{instrument.__class__.__name__}.__getitem__(k) TypeError"
print(f"instrument['SH600000']: {instrument['SH600000']}")
instrument = InstrumentStorage(market="csi300", provider_uri="not_found", freq="day")
with self.assertRaises(ValueError):
print(instrument.data)
with self.assertRaises(ValueError):
print(instrument["sSH600000"])
|
The meaning of instrument, such as CSI500:
CSI500 composition changes:
date add remove
2005-01-01 SH600000
2005-01-01 SH600001
2005-01-01 SH600002
2005-02-01 SH600003 SH600000
2005-02-15 SH600000 SH600002
Calendar:
pd.date_range(start="2020-01-01", stop="2020-03-01", freq="1D")
Instrument:
symbol start_time end_time
SH600000 2005-01-01 2005-01-31 (2005-02-01 Last trading day)
SH600000 2005-02-15 2005-03-01
SH600001 2005-01-01 2005-03-01
SH600002 2005-01-01 2005-02-14 (2005-02-15 Last trading day)
SH600003 2005-02-01 2005-03-01
InstrumentStorage:
{
"SH600000": [(2005-01-01, 2005-01-31), (2005-02-15, 2005-03-01)],
"SH600001": [(2005-01-01, 2005-03-01)],
"SH600002": [(2005-01-01, 2005-02-14)],
"SH600003": [(2005-02-01, 2005-03-01)],
}
|
test_instrument_storage
|
python
|
microsoft/qlib
|
tests/storage_tests/test_storage.py
|
https://github.com/microsoft/qlib/blob/master/tests/storage_tests/test_storage.py
|
MIT
|
def test_feature_storage(self):
"""
Calendar:
pd.date_range(start="2005-01-01", stop="2005-03-01", freq="1D")
Instrument:
{
"SH600000": [(2005-01-01, 2005-01-31), (2005-02-15, 2005-03-01)],
"SH600001": [(2005-01-01, 2005-03-01)],
"SH600002": [(2005-01-01, 2005-02-14)],
"SH600003": [(2005-02-01, 2005-03-01)],
}
Feature:
Stock data(close):
2005-01-01 ... 2005-02-01 ... 2005-02-14 2005-02-15 ... 2005-03-01
SH600000 1 ... 3 ... 4 5 6
SH600001 1 ... 4 ... 5 6 7
SH600002 1 ... 5 ... 6 nan nan
SH600003 nan ... 1 ... 2 3 4
FeatureStorage(SH600000, close):
[
(calendar.index("2005-01-01"), 1),
...,
(calendar.index("2005-03-01"), 6)
]
====> [(0, 1), ..., (59, 6)]
FeatureStorage(SH600002, close):
[
(calendar.index("2005-01-01"), 1),
...,
(calendar.index("2005-02-14"), 6)
]
===> [(0, 1), ..., (44, 6)]
FeatureStorage(SH600003, close):
[
(calendar.index("2005-02-01"), 1),
...,
(calendar.index("2005-03-01"), 4)
]
===> [(31, 1), ..., (59, 4)]
"""
feature = FeatureStorage(instrument="SZ300677", field="close", freq="day", provider_uri=self.provider_uri)
with self.assertRaises(IndexError):
print(feature[0])
assert isinstance(
feature[3049][1], (float, np.float32)
), f"{feature.__class__.__name__}.__getitem__(i: int) error"
assert len(feature[3049:3052]) == 3, f"{feature.__class__.__name__}.__getitem__(s: slice) error"
print(f"feature[3049: 3052]: \n{feature[3049: 3052]}")
print(f"feature[:].tail(): \n{feature[:].tail()}")
feature = FeatureStorage(instrument="SH600004", field="close", freq="day", provider_uri="not_fount")
with self.assertRaises(ValueError):
print(feature[0])
with self.assertRaises(ValueError):
print(feature[:].empty)
with self.assertRaises(ValueError):
print(feature.data.empty)
|
Calendar:
pd.date_range(start="2005-01-01", stop="2005-03-01", freq="1D")
Instrument:
{
"SH600000": [(2005-01-01, 2005-01-31), (2005-02-15, 2005-03-01)],
"SH600001": [(2005-01-01, 2005-03-01)],
"SH600002": [(2005-01-01, 2005-02-14)],
"SH600003": [(2005-02-01, 2005-03-01)],
}
Feature:
Stock data(close):
2005-01-01 ... 2005-02-01 ... 2005-02-14 2005-02-15 ... 2005-03-01
SH600000 1 ... 3 ... 4 5 6
SH600001 1 ... 4 ... 5 6 7
SH600002 1 ... 5 ... 6 nan nan
SH600003 nan ... 1 ... 2 3 4
FeatureStorage(SH600000, close):
[
(calendar.index("2005-01-01"), 1),
...,
(calendar.index("2005-03-01"), 6)
]
====> [(0, 1), ..., (59, 6)]
FeatureStorage(SH600002, close):
[
(calendar.index("2005-01-01"), 1),
...,
(calendar.index("2005-02-14"), 6)
]
===> [(0, 1), ..., (44, 6)]
FeatureStorage(SH600003, close):
[
(calendar.index("2005-02-01"), 1),
...,
(calendar.index("2005-03-01"), 4)
]
===> [(31, 1), ..., (59, 4)]
|
test_feature_storage
|
python
|
microsoft/qlib
|
tests/storage_tests/test_storage.py
|
https://github.com/microsoft/qlib/blob/master/tests/storage_tests/test_storage.py
|
MIT
|
def convert_realtime(
self,
voice_id: str,
*,
text: typing.Iterator[str],
model_id: typing.Optional[str] = OMIT,
output_format: typing.Optional[OutputFormat] = "mp3_44100_128",
voice_settings: typing.Optional[VoiceSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
Converts text into speech using a voice of your choice and returns audio.
Parameters:
- voice_id: str. Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
- text: typing.Iterator[str]. The text that will get converted into speech.
- model_id: typing.Optional[str]. Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
- voice_settings: typing.Optional[VoiceSettings]. Voice settings overriding stored setttings for the given voice. They are applied only on the given request.
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
---
from elevenlabs import PronunciationDictionaryVersionLocator, VoiceSettings
from elevenlabs.client import ElevenLabs
def get_text() -> typing.Iterator[str]:
yield "Hello, how are you?"
yield "I am fine, thank you."
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert_realtime(
voice_id="string",
text=get_text(),
model_id="string",
voice_settings=VoiceSettings(
stability=1.1,
similarity_boost=1.1,
style=1.1,
use_speaker_boost=True,
),
)
"""
with connect(
urllib.parse.urljoin(
self._ws_base_url,
f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream-input?model_id={model_id}&output_format={output_format}"
),
additional_headers=jsonable_encoder(
remove_none_from_dict(
{
**self._client_wrapper.get_headers(),
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
}
)
)
) as socket:
try:
socket.send(json.dumps(
dict(
text=" ",
try_trigger_generation=True,
voice_settings=voice_settings.dict() if voice_settings else None,
generation_config=dict(
chunk_length_schedule=[50],
),
)
))
except websockets.exceptions.ConnectionClosedError as ce:
raise ApiError(body=ce.reason, status_code=ce.code)
try:
for text_chunk in text_chunker(text):
data = dict(text=text_chunk, try_trigger_generation=True)
socket.send(json.dumps(data))
try:
data = json.loads(socket.recv(1e-2))
if "audio" in data and data["audio"]:
yield base64.b64decode(data["audio"]) # type: ignore
except TimeoutError:
pass
socket.send(json.dumps(dict(text="")))
while True:
data = json.loads(socket.recv())
if "audio" in data and data["audio"]:
yield base64.b64decode(data["audio"]) # type: ignore
except websockets.exceptions.ConnectionClosed as ce:
if "message" in data:
raise ApiError(body=data, status_code=ce.code)
elif ce.code != 1000:
raise ApiError(body=ce.reason, status_code=ce.code)
|
Converts text into speech using a voice of your choice and returns audio.
Parameters:
- voice_id: str. Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
- text: typing.Iterator[str]. The text that will get converted into speech.
- model_id: typing.Optional[str]. Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
- voice_settings: typing.Optional[VoiceSettings]. Voice settings overriding stored setttings for the given voice. They are applied only on the given request.
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
---
from elevenlabs import PronunciationDictionaryVersionLocator, VoiceSettings
from elevenlabs.client import ElevenLabs
def get_text() -> typing.Iterator[str]:
yield "Hello, how are you?"
yield "I am fine, thank you."
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert_realtime(
voice_id="string",
text=get_text(),
model_id="string",
voice_settings=VoiceSettings(
stability=1.1,
similarity_boost=1.1,
style=1.1,
use_speaker_boost=True,
),
)
|
convert_realtime
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/realtime_tts.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/realtime_tts.py
|
MIT
|
def construct_event(self, rawBody: str, sig_header: str, secret: str) -> Dict:
"""
Constructs a webhook event object from a payload and signature.
Verifies the webhook signature to ensure the event came from ElevenLabs.
Args:
rawBody: The webhook request body. Must be the raw body, not a JSON object
sig_header: The signature header from the request
secret: Your webhook secret
Returns:
The verified webhook event
Raises:
BadRequestError: If the signature is invalid or missing
"""
if not sig_header:
raise BadRequestError(body="Missing signature header")
if not secret:
raise BadRequestError(body="Webhook secret not configured")
headers = sig_header.split(',')
timestamp = None
signature = None
for header in headers:
if header.startswith('t='):
timestamp = header[2:]
elif header.startswith('v0='):
signature = header
if not timestamp or not signature:
raise BadRequestError(body="No signature hash found with expected scheme v0")
# Validate timestamp
req_timestamp = int(timestamp) * 1000
tolerance = int(time.time() * 1000) - 30 * 60 * 1000
if req_timestamp < tolerance:
raise BadRequestError(body="Timestamp outside the tolerance zone")
# Validate hash
message = f"{timestamp}.{rawBody}"
digest = "v0=" + hmac.new(
secret.encode('utf-8'),
message.encode('utf-8'),
hashlib.sha256
).hexdigest()
if signature != digest:
raise BadRequestError(
body="Signature hash does not match the expected signature hash for payload"
)
return json.loads(rawBody)
|
Constructs a webhook event object from a payload and signature.
Verifies the webhook signature to ensure the event came from ElevenLabs.
Args:
rawBody: The webhook request body. Must be the raw body, not a JSON object
sig_header: The signature header from the request
secret: Your webhook secret
Returns:
The verified webhook event
Raises:
BadRequestError: If the signature is invalid or missing
|
construct_event
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/webhooks.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/webhooks.py
|
MIT
|
def convert(
self,
*,
audio: core.File,
file_format: typing.Optional[AudioIsolationConvertRequestFileFormat] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationConvertRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[bytes]
Successful Response
"""
with self._raw_client.convert(audio=audio, file_format=file_format, request_options=request_options) as r:
yield from r.data
|
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationConvertRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[bytes]
Successful Response
|
convert
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_isolation/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_isolation/client.py
|
MIT
|
def stream(
self,
*,
audio: core.File,
file_format: typing.Optional[AudioIsolationStreamRequestFileFormat] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationStreamRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[bytes]
Successful Response
"""
with self._raw_client.stream(audio=audio, file_format=file_format, request_options=request_options) as r:
yield from r.data
|
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationStreamRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[bytes]
Successful Response
|
stream
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_isolation/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_isolation/client.py
|
MIT
|
async def convert(
self,
*,
audio: core.File,
file_format: typing.Optional[AudioIsolationConvertRequestFileFormat] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationConvertRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[bytes]
Successful Response
"""
async with self._raw_client.convert(audio=audio, file_format=file_format, request_options=request_options) as r:
async for _chunk in r.data:
yield _chunk
|
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationConvertRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[bytes]
Successful Response
|
convert
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_isolation/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_isolation/client.py
|
MIT
|
async def stream(
self,
*,
audio: core.File,
file_format: typing.Optional[AudioIsolationStreamRequestFileFormat] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationStreamRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[bytes]
Successful Response
"""
async with self._raw_client.stream(audio=audio, file_format=file_format, request_options=request_options) as r:
async for _chunk in r.data:
yield _chunk
|
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationStreamRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[bytes]
Successful Response
|
stream
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_isolation/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_isolation/client.py
|
MIT
|
def convert(
self,
*,
audio: core.File,
file_format: typing.Optional[AudioIsolationConvertRequestFileFormat] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
"""
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationConvertRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
Successful Response
"""
with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"file_format": file_format,
},
files={
"audio": audio,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
) as _response:
def _stream() -> HttpResponse[typing.Iterator[bytes]]:
try:
if 200 <= _response.status_code < 300:
_chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
return HttpResponse(
response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
)
_response.read()
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(
status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield _stream()
|
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationConvertRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
Successful Response
|
convert
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_isolation/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_isolation/raw_client.py
|
MIT
|
def stream(
self,
*,
audio: core.File,
file_format: typing.Optional[AudioIsolationStreamRequestFileFormat] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
"""
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationStreamRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
Successful Response
"""
with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation/stream",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"file_format": file_format,
},
files={
"audio": audio,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
) as _response:
def _stream() -> HttpResponse[typing.Iterator[bytes]]:
try:
if 200 <= _response.status_code < 300:
_chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
return HttpResponse(
response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
)
_response.read()
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(
status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield _stream()
|
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationStreamRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
Successful Response
|
stream
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_isolation/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_isolation/raw_client.py
|
MIT
|
async def convert(
self,
*,
audio: core.File,
file_format: typing.Optional[AudioIsolationConvertRequestFileFormat] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
"""
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationConvertRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
Successful Response
"""
async with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"file_format": file_format,
},
files={
"audio": audio,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
) as _response:
async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
try:
if 200 <= _response.status_code < 300:
_chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
return AsyncHttpResponse(
response=_response,
data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
)
await _response.aread()
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(
status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield await _stream()
|
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationConvertRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
Successful Response
|
convert
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_isolation/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_isolation/raw_client.py
|
MIT
|
async def stream(
self,
*,
audio: core.File,
file_format: typing.Optional[AudioIsolationStreamRequestFileFormat] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
"""
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationStreamRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
Successful Response
"""
async with self._client_wrapper.httpx_client.stream(
"v1/audio-isolation/stream",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"file_format": file_format,
},
files={
"audio": audio,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
) as _response:
async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
try:
if 200 <= _response.status_code < 300:
_chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
return AsyncHttpResponse(
response=_response,
data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
)
await _response.aread()
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(
status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield await _stream()
|
Removes background noise from audio.
Parameters
----------
audio : core.File
See core.File for more documentation
file_format : typing.Optional[AudioIsolationStreamRequestFileFormat]
The format of input audio. Options are 'pcm_s16le_16' or 'other' For `pcm_s16le_16`, the input audio must be 16-bit PCM at a 16kHz sample rate, single channel (mono), and little-endian byte order. Latency will be lower than with passing an encoded waveform.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
Successful Response
|
stream
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_isolation/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_isolation/raw_client.py
|
MIT
|
def create(
self,
*,
name: str,
image: typing.Optional[str] = OMIT,
author: typing.Optional[str] = OMIT,
title: typing.Optional[str] = OMIT,
small: typing.Optional[bool] = OMIT,
text_color: typing.Optional[str] = OMIT,
background_color: typing.Optional[str] = OMIT,
sessionization: typing.Optional[int] = OMIT,
voice_id: typing.Optional[str] = OMIT,
model_id: typing.Optional[str] = OMIT,
file: typing.Optional[core.File] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AudioNativeCreateProjectResponseModel:
"""
Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
Parameters
----------
name : str
Project name.
image : typing.Optional[str]
(Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
author : typing.Optional[str]
Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
title : typing.Optional[str]
Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
small : typing.Optional[bool]
(Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
text_color : typing.Optional[str]
Text color used in the player. If not provided, default text color set in the Player settings is used.
background_color : typing.Optional[str]
Background color used in the player. If not provided, default background color set in the Player settings is used.
sessionization : typing.Optional[int]
(Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
voice_id : typing.Optional[str]
Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
model_id : typing.Optional[str]
TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioNativeCreateProjectResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.audio_native.create(
name="name",
)
"""
_response = self._raw_client.create(
name=name,
image=image,
author=author,
title=title,
small=small,
text_color=text_color,
background_color=background_color,
sessionization=sessionization,
voice_id=voice_id,
model_id=model_id,
file=file,
auto_convert=auto_convert,
request_options=request_options,
)
return _response.data
|
Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
Parameters
----------
name : str
Project name.
image : typing.Optional[str]
(Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
author : typing.Optional[str]
Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
title : typing.Optional[str]
Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
small : typing.Optional[bool]
(Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
text_color : typing.Optional[str]
Text color used in the player. If not provided, default text color set in the Player settings is used.
background_color : typing.Optional[str]
Background color used in the player. If not provided, default background color set in the Player settings is used.
sessionization : typing.Optional[int]
(Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
voice_id : typing.Optional[str]
Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
model_id : typing.Optional[str]
TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioNativeCreateProjectResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.audio_native.create(
name="name",
)
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_native/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/client.py
|
MIT
|
def get_settings(
self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetAudioNativeProjectSettingsResponseModel:
"""
Get player settings for the specific project.
Parameters
----------
project_id : str
The ID of the Studio project.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAudioNativeProjectSettingsResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.audio_native.get_settings(
project_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.get_settings(project_id, request_options=request_options)
return _response.data
|
Get player settings for the specific project.
Parameters
----------
project_id : str
The ID of the Studio project.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAudioNativeProjectSettingsResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.audio_native.get_settings(
project_id="21m00Tcm4TlvDq8ikWAM",
)
|
get_settings
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_native/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/client.py
|
MIT
|
def update(
self,
project_id: str,
*,
file: typing.Optional[core.File] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
auto_publish: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AudioNativeEditContentResponseModel:
"""
Updates content for the specific AudioNative Project.
Parameters
----------
project_id : str
The ID of the project to be used. You can use the [List projects](/docs/api-reference/studio/get-projects) endpoint to list all the available projects.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
auto_publish : typing.Optional[bool]
Whether to auto publish the new project snapshot after it's converted.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioNativeEditContentResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.audio_native.update(
project_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.update(
project_id, file=file, auto_convert=auto_convert, auto_publish=auto_publish, request_options=request_options
)
return _response.data
|
Updates content for the specific AudioNative Project.
Parameters
----------
project_id : str
The ID of the project to be used. You can use the [List projects](/docs/api-reference/studio/get-projects) endpoint to list all the available projects.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
auto_publish : typing.Optional[bool]
Whether to auto publish the new project snapshot after it's converted.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioNativeEditContentResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.audio_native.update(
project_id="21m00Tcm4TlvDq8ikWAM",
)
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_native/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/client.py
|
MIT
|
async def create(
self,
*,
name: str,
image: typing.Optional[str] = OMIT,
author: typing.Optional[str] = OMIT,
title: typing.Optional[str] = OMIT,
small: typing.Optional[bool] = OMIT,
text_color: typing.Optional[str] = OMIT,
background_color: typing.Optional[str] = OMIT,
sessionization: typing.Optional[int] = OMIT,
voice_id: typing.Optional[str] = OMIT,
model_id: typing.Optional[str] = OMIT,
file: typing.Optional[core.File] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AudioNativeCreateProjectResponseModel:
"""
Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
Parameters
----------
name : str
Project name.
image : typing.Optional[str]
(Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
author : typing.Optional[str]
Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
title : typing.Optional[str]
Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
small : typing.Optional[bool]
(Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
text_color : typing.Optional[str]
Text color used in the player. If not provided, default text color set in the Player settings is used.
background_color : typing.Optional[str]
Background color used in the player. If not provided, default background color set in the Player settings is used.
sessionization : typing.Optional[int]
(Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
voice_id : typing.Optional[str]
Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
model_id : typing.Optional[str]
TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioNativeCreateProjectResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.audio_native.create(
name="name",
)
asyncio.run(main())
"""
_response = await self._raw_client.create(
name=name,
image=image,
author=author,
title=title,
small=small,
text_color=text_color,
background_color=background_color,
sessionization=sessionization,
voice_id=voice_id,
model_id=model_id,
file=file,
auto_convert=auto_convert,
request_options=request_options,
)
return _response.data
|
Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
Parameters
----------
name : str
Project name.
image : typing.Optional[str]
(Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
author : typing.Optional[str]
Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
title : typing.Optional[str]
Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
small : typing.Optional[bool]
(Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
text_color : typing.Optional[str]
Text color used in the player. If not provided, default text color set in the Player settings is used.
background_color : typing.Optional[str]
Background color used in the player. If not provided, default background color set in the Player settings is used.
sessionization : typing.Optional[int]
(Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
voice_id : typing.Optional[str]
Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
model_id : typing.Optional[str]
TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioNativeCreateProjectResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.audio_native.create(
name="name",
)
asyncio.run(main())
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/audio_native/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/client.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.