id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
1,012 | import os
from typing import Dict, List
import csv
The provided code snippet includes necessary dependencies for implementing the `get_default_result_dict` function. Write a Python function `def get_default_result_dict(dir: str, data_name: str, index_name: str, fea_name: str) -> Dict` to solve the following problem:
Get the default result dict based on the experimental factors. Args: dir (str): the path of one single extracted feature directory. data_name (str): the name of the dataset. index_name (str): the name of query process. fea_name (str): the name of the features to be loaded. Returns: result_dict (Dict): a default configuration dict.
Here is the function:
def get_default_result_dict(dir: str, data_name: str, index_name: str, fea_name: str) -> Dict:
"""
Get the default result dict based on the experimental factors.
Args:
dir (str): the path of one single extracted feature directory.
data_name (str): the name of the dataset.
index_name (str): the name of query process.
fea_name (str): the name of the features to be loaded.
Returns:
result_dict (Dict): a default configuration dict.
"""
result_dict = {
"data_name": data_name.split("_")[0],
"pre_process_name": dir.split("_")[2],
"model_name": "_".join(dir.split("_")[-2:]),
"feature_map_name": fea_name.split("_")[0],
"post_process_name": index_name
}
if len(fea_name.split("_")) == 1:
result_dict["aggregator_name"] = "none"
else:
result_dict["aggregator_name"] = fea_name.split("_")[1]
return result_dict | Get the default result dict based on the experimental factors. Args: dir (str): the path of one single extracted feature directory. data_name (str): the name of the dataset. index_name (str): the name of query process. fea_name (str): the name of the features to be loaded. Returns: result_dict (Dict): a default configuration dict. |
1,013 | import os
from typing import Dict, List
import csv
The provided code snippet includes necessary dependencies for implementing the `save_to_csv` function. Write a Python function `def save_to_csv(results: List[Dict], csv_path: str) -> None` to solve the following problem:
Save the search results in a csv format file. Args: results (List): a list of retrieval results. csv_path (str): the path for saving the csv file.
Here is the function:
def save_to_csv(results: List[Dict], csv_path: str) -> None:
"""
Save the search results in a csv format file.
Args:
results (List): a list of retrieval results.
csv_path (str): the path for saving the csv file.
"""
start = ["data", "pre_process", "model", "feature_map", "aggregator", "post_process"]
for i in range(len(start)):
results = sorted(results, key=lambda result: result[start[len(start) - i - 1] + "_name"])
start.append('mAP')
start.append('Recall@1')
with open(csv_path, 'w') as f:
csv_write = csv.writer(f)
if len(start) > 0:
csv_write.writerow(start)
for i in range(len(results)):
data_row = [0 for x in range(len(start))]
data_row[0] = results[i]["data_name"]
data_row[1] = results[i]["pre_process_name"]
data_row[2] = results[i]["model_name"]
data_row[3] = results[i]["feature_map_name"]
data_row[4] = results[i]["aggregator_name"]
data_row[5] = results[i]["post_process_name"]
data_row[6] = results[i]["mAP"]
data_row[7] = results[i]["recall_at_k"]['1']
csv_write.writerow(data_row) | Save the search results in a csv format file. Args: results (List): a list of retrieval results. csv_path (str): the path for saving the csv file. |
1,014 | import os
from typing import Dict, List
import csv
The provided code snippet includes necessary dependencies for implementing the `filter_by_keywords` function. Write a Python function `def filter_by_keywords(results: List[Dict], keywords: Dict) -> List[Dict]` to solve the following problem:
Filter the search results according to the given keywords Args: results (List): a list of retrieval results. keywords (Dict): a dict containing keywords to be selected. Returns:
Here is the function:
def filter_by_keywords(results: List[Dict], keywords: Dict) -> List[Dict]:
"""
Filter the search results according to the given keywords
Args:
results (List): a list of retrieval results.
keywords (Dict): a dict containing keywords to be selected.
Returns:
"""
for key in keywords:
no_match = []
if len(keywords[key]) == 0:
continue
else:
for i in range(len(results)):
if not results[i][key] in keywords[key]:
no_match.append(i)
for num in no_match[::-1]:
results.pop(num)
return results | Filter the search results according to the given keywords Args: results (List): a list of retrieval results. keywords (Dict): a dict containing keywords to be selected. Returns: |
1,015 | from yacs.config import CfgNode
from copy import deepcopy
def _convert_dict_to_cfg(d: dict) -> CfgNode:
ret = CfgNode()
for key in d:
if isinstance(d[key], dict):
ret[key] = _convert_dict_to_cfg(d[key])
else:
ret[key] = d[key]
return ret | null |
1,016 | from yacs.config import CfgNode
from .registry import EVALUATORS
from ..utils import get_config_from_registry
def get_evaluator_cfg() -> CfgNode:
cfg = get_config_from_registry(EVALUATORS)
cfg["name"] = "unknown"
return cfg
def get_evaluate_cfg() -> CfgNode:
cfg = CfgNode()
cfg["evaluator"] = get_evaluator_cfg()
return cfg | null |
1,017 | from yacs.config import CfgNode
from .registry import EVALUATORS
from .evaluator import EvaluatorBase
from .helper import EvaluateHelper
from ..utils import simple_build
def build_evaluator(cfg: CfgNode) -> EvaluatorBase:
"""
Instantiate a evaluator class.
Args:
cfg (CfgNode): the configuration tree.
Returns:
evaluator (EvaluatorBase): a evaluator class.
"""
name = cfg["name"]
evaluator = simple_build(name, cfg, EVALUATORS)
return evaluator
The provided code snippet includes necessary dependencies for implementing the `build_evaluate_helper` function. Write a Python function `def build_evaluate_helper(cfg: CfgNode) -> EvaluateHelper` to solve the following problem:
Instantiate a evaluate helper class. Args: cfg (CfgNode): the configuration tree. Returns: helper (EvaluateHelper): a evaluate helper class.
Here is the function:
def build_evaluate_helper(cfg: CfgNode) -> EvaluateHelper:
"""
Instantiate a evaluate helper class.
Args:
cfg (CfgNode): the configuration tree.
Returns:
helper (EvaluateHelper): a evaluate helper class.
"""
evaluator = build_evaluator(cfg.evaluator)
helper = EvaluateHelper(evaluator)
return helper | Instantiate a evaluate helper class. Args: cfg (CfgNode): the configuration tree. Returns: helper (EvaluateHelper): a evaluate helper class. |
1,018 | from yacs.config import CfgNode
from .registry import ENHANCERS, METRICS, DIMPROCESSORS, RERANKERS
from ..utils import get_config_from_registry
def get_enhancer_cfg() -> CfgNode:
cfg = get_config_from_registry(ENHANCERS)
cfg["name"] = "unknown"
return cfg
def get_metric_cfg() -> CfgNode:
cfg = get_config_from_registry(METRICS)
cfg["name"] = "unknown"
return cfg
def get_processors_cfg() -> CfgNode:
cfg = get_config_from_registry(DIMPROCESSORS)
cfg["names"] = ["unknown"]
return cfg
def get_ranker_cfg() -> CfgNode:
cfg = get_config_from_registry(RERANKERS)
cfg["name"] = "unknown"
return cfg
def get_index_cfg() -> CfgNode:
cfg = CfgNode()
cfg["query_fea_dir"] = "unknown"
cfg["gallery_fea_dir"] = "unknown"
cfg["feature_names"] = ["all"]
cfg["dim_processors"] = get_processors_cfg()
cfg["feature_enhancer"] = get_enhancer_cfg()
cfg["metric"] = get_metric_cfg()
cfg["re_ranker"] = get_ranker_cfg()
return cfg | null |
1,019 | from yacs.config import CfgNode
from .registry import ENHANCERS, METRICS, DIMPROCESSORS, RERANKERS
from .feature_enhancer import EnhanceBase
from .helper import IndexHelper
from .metric import MetricBase
from .dim_processor import DimProcessorBase
from .re_ranker import ReRankerBase
from ..utils import simple_build
from typing import List
def build_enhance(cfg: CfgNode) -> EnhanceBase:
"""
Instantiate a feature enhancer class.
Args:
cfg (CfgNode): the configuration tree.
Returns:
enhance (EnhanceBase): an instance of feature enhancer class.
"""
name = cfg["name"]
enhance = simple_build(name, cfg, ENHANCERS)
return enhance
def build_metric(cfg: CfgNode) -> MetricBase:
"""
Instantiate a metric class.
Args:
cfg (CfgNode): the configuration tree.
Returns:
metric (MetricBase): an instance of metric class.
"""
name = cfg["name"]
metric = simple_build(name, cfg, METRICS)
return metric
def build_processors(feature_names: List[str], cfg: CfgNode) -> DimProcessorBase:
"""
Instantiate a list of dimension processor classes.
Args:
cfg (CfgNode): the configuration tree.
Returns:
processors (list): a list of instances of dimension process class.
"""
names = cfg["names"]
processors = list()
for name in names:
processors.append(simple_build(name, cfg, DIMPROCESSORS, feature_names=feature_names))
return processors
def build_ranker(cfg: CfgNode) -> ReRankerBase:
"""
Instantiate a re-ranker class.
Args:
cfg (CfgNode): the configuration tree.
Returns:
re_rank (list): an instance of re-ranker class.
"""
name = cfg["name"]
re_rank = simple_build(name, cfg, RERANKERS)
return re_rank
The provided code snippet includes necessary dependencies for implementing the `build_index_helper` function. Write a Python function `def build_index_helper(cfg: CfgNode) -> IndexHelper` to solve the following problem:
Instantiate a index helper class. Args: cfg (CfgNode): the configuration tree. Returns: helper (IndexHelper): an instance of index helper class.
Here is the function:
def build_index_helper(cfg: CfgNode) -> IndexHelper:
"""
Instantiate a index helper class.
Args:
cfg (CfgNode): the configuration tree.
Returns:
helper (IndexHelper): an instance of index helper class.
"""
dim_processors = build_processors(cfg["feature_names"], cfg.dim_processors)
metric = build_metric(cfg.metric)
feature_enhancer = build_enhance(cfg.feature_enhancer)
re_ranker = build_ranker(cfg.re_ranker)
helper = IndexHelper(dim_processors, feature_enhancer, metric, re_ranker)
return helper | Instantiate a index helper class. Args: cfg (CfgNode): the configuration tree. Returns: helper (IndexHelper): an instance of index helper class. |
1,020 | from yacs.config import CfgNode
from .registry import EXTRACTORS, SPLITTERS, AGGREGATORS
from ..utils import get_config_from_registry
def get_aggregators_cfg() -> CfgNode:
def get_splitter_cfg() -> CfgNode:
def get_extractor_cfg() -> CfgNode:
def get_extract_cfg() -> CfgNode:
cfg = CfgNode()
cfg["assemble"] = 0
cfg["extractor"] = get_extractor_cfg()
cfg["splitter"] = get_splitter_cfg()
cfg["aggregators"] = get_aggregators_cfg()
return cfg | null |
1,021 | from yacs.config import CfgNode
from .registry import AGGREGATORS, SPLITTERS, EXTRACTORS
from .extractor import ExtractorBase
from .splitter import SplitterBase
from .aggregator import AggregatorBase
from .helper import ExtractHelper
from ..utils import simple_build
import torch.nn as nn
from typing import List
def build_aggregators(cfg: CfgNode) -> List[AggregatorBase]:
"""
Instantiate a list of aggregator classes.
Args:
cfg (CfgNode): the configuration tree.
Returns:
aggregators (list): a list of instances of aggregator class.
"""
names = cfg["names"]
aggregators = list()
for name in names:
aggregators.append(simple_build(name, cfg, AGGREGATORS))
return aggregators
def build_extractor(model: nn.Module, cfg: CfgNode) -> ExtractorBase:
"""
Instantiate a extractor class.
Args:
model (nn.Module): the model for extracting features.
cfg (CfgNode): the configuration tree.
Returns:
extractor (ExtractorBase): an instance of extractor class.
"""
name = cfg["name"]
extractor = simple_build(name, cfg, EXTRACTORS, model=model)
return extractor
def build_splitter(cfg: CfgNode) -> SplitterBase:
"""
Instantiate a splitter class.
Args:
cfg (CfgNode): the configuration tree.
Returns:
splitter (SplitterBase): an instance of splitter class.
"""
name = cfg["name"]
splitter = simple_build(name, cfg, SPLITTERS)
return splitter
The provided code snippet includes necessary dependencies for implementing the `build_extract_helper` function. Write a Python function `def build_extract_helper(model: nn.Module, cfg: CfgNode) -> ExtractHelper` to solve the following problem:
Instantiate a extract helper class. Args: model (nn.Module): the model for extracting features. cfg (CfgNode): the configuration tree. Returns: helper (ExtractHelper): an instance of extract helper class.
Here is the function:
def build_extract_helper(model: nn.Module, cfg: CfgNode) -> ExtractHelper:
"""
Instantiate a extract helper class.
Args:
model (nn.Module): the model for extracting features.
cfg (CfgNode): the configuration tree.
Returns:
helper (ExtractHelper): an instance of extract helper class.
"""
assemble = cfg.assemble
extractor = build_extractor(model, cfg.extractor)
splitter = build_splitter(cfg.splitter)
aggregators = build_aggregators(cfg.aggregators)
helper = ExtractHelper(assemble, extractor, splitter, aggregators)
return helper | Instantiate a extract helper class. Args: model (nn.Module): the model for extracting features. cfg (CfgNode): the configuration tree. Returns: helper (ExtractHelper): an instance of extract helper class. |
1,022 | import os
from shutil import copyfile
The provided code snippet includes necessary dependencies for implementing the `split_dataset` function. Write a Python function `def split_dataset(dataset_path: str, split_file: str) -> None` to solve the following problem:
Split the dataset according to the given splitting rules. Args: dataset_path (str): the path of the dataset. split_file (str): the path of the file containing the splitting rules.
Here is the function:
def split_dataset(dataset_path: str, split_file: str) -> None:
"""
Split the dataset according to the given splitting rules.
Args:
dataset_path (str): the path of the dataset.
split_file (str): the path of the file containing the splitting rules.
"""
with open(split_file, 'r') as f:
lines = f.readlines()
for line in lines:
path = line.strip('\n').split(' ')[0]
is_gallery = line.strip('\n').split(' ')[1]
if is_gallery == '0':
src = os.path.join(dataset_path, path)
dst = src.replace(path.split('/')[0], 'query')
dst_index = len(dst.split('/')[-1])
dst_dir = dst[:len(dst) - dst_index]
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
if not os.path.exists(dst):
os.symlink(src, dst)
elif is_gallery == '1':
src = os.path.join(dataset_path, path)
dst = src.replace(path.split('/')[0], 'gallery')
dst_index = len(dst.split('/')[-1])
dst_dir = dst[:len(dst) - dst_index]
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
if not os.path.exists(dst):
os.symlink(src, dst) | Split the dataset according to the given splitting rules. Args: dataset_path (str): the path of the dataset. split_file (str): the path of the file containing the splitting rules. |
1,023 | import pickle
import os
def make_ds_for_general(dataset_path: str, save_path: str) -> None:
"""
Generate data json file for dataset collecting images with the same label one directory. e.g. CUB-200-2011.
Args:
dataset_path (str): the path of the dataset.
save_ds_path (str): the path for saving the data json files.
"""
info_dicts = list()
img_dirs = os.listdir(dataset_path)
label_list = list()
label_to_idx = dict()
for dir in img_dirs:
for root, _, files in os.walk(os.path.join(dataset_path, dir)):
for file in files:
info_dict = dict()
info_dict['path'] = os.path.join(root, file)
if dir not in label_list:
label_to_idx[dir] = len(label_list)
label_list.append(dir)
info_dict['label'] = dir
info_dict['label_idx'] = label_to_idx[dir]
info_dicts += [info_dict]
with open(save_path, 'wb') as f:
pickle.dump({'nr_class': len(img_dirs), 'path_type': 'absolute_path', 'info_dicts': info_dicts}, f)
def make_ds_for_oxford(dataset_path, save_path: str or None=None, gt_path: str or None=None) -> None:
"""
Generate data json file for oxford dataset.
Args:
dataset_path (str): the path of the dataset.
save_ds_path (str): the path for saving the data json files.
gt_path (str, optional): the path of the ground truth, necessary for Oxford.
"""
label_list = list()
info_dicts = list()
query_info = dict()
if 'query' in dataset_path:
for root, _, files in os.walk(gt_path):
for file in files:
if 'query' in file:
with open(os.path.join(root, file), 'r') as f:
line = f.readlines()[0].strip('\n').split(' ')
query_name = file[:-10]
label = line[0][5:]
bbox = [float(line[1]), float(line[2]), float(line[3]), float(line[4])]
query_info[label] = {'query_name': query_name, 'bbox': bbox,}
for root, _, files in os.walk(dataset_path):
for file in files:
info_dict = dict()
info_dict['path'] = os.path.join(root, file)
label = file.split('.')[0]
if label not in label_list:
label_list.append(label)
info_dict['label'] = label
if 'query' in dataset_path:
info_dict['bbox'] = query_info[label]['bbox']
info_dict['query_name'] = query_info[label]['query_name']
info_dicts += [info_dict]
with open(save_path, 'wb') as f:
pickle.dump({'nr_class': len(label_list), 'path_type': 'absolute_path', 'info_dicts': info_dicts}, f)
def make_ds_for_reid(dataset_path: str, save_path: str) -> None:
"""
Generating data json file for Re-ID dataset.
Args:
dataset_path (str): the path of the dataset.
save_ds_path (str): the path for saving the data json files.
"""
label_list = list()
info_dicts = list()
for root, _, files in os.walk(dataset_path):
for file in files:
info_dict = dict()
info_dict['path'] = os.path.join(root, file)
label = file.split('_')[0]
cam = file.split('_')[1][1]
if label not in label_list:
label_list.append(label)
info_dict['label'] = label
info_dict['cam'] = cam
info_dicts += [info_dict]
with open(save_path, 'wb') as f:
pickle.dump({'nr_class': len(label_list), 'path_type': 'absolute_path', 'info_dicts': info_dicts}, f)
The provided code snippet includes necessary dependencies for implementing the `make_data_json` function. Write a Python function `def make_data_json(dataset_path: str, save_path: str, type: str, gt_path: str or None=None) -> None` to solve the following problem:
Generate data json file for dataset. Args: dataset_path (str): the path of the dataset. save_ds_path (str): the path for saving the data json files. type (str): the structure type of the dataset. gt_path (str, optional): the path of the ground truth, necessary for Oxford.
Here is the function:
def make_data_json(dataset_path: str, save_path: str, type: str, gt_path: str or None=None) -> None:
"""
Generate data json file for dataset.
Args:
dataset_path (str): the path of the dataset.
save_ds_path (str): the path for saving the data json files.
type (str): the structure type of the dataset.
gt_path (str, optional): the path of the ground truth, necessary for Oxford.
"""
assert type in ['general', 'oxford', 'reid']
if type == 'general':
make_ds_for_general(dataset_path, save_path)
elif type == 'oxford':
make_ds_for_oxford(dataset_path, save_path, gt_path)
elif type == 'reid':
make_ds_for_reid(dataset_path, save_path) | Generate data json file for dataset. Args: dataset_path (str): the path of the dataset. save_ds_path (str): the path for saving the data json files. type (str): the structure type of the dataset. gt_path (str, optional): the path of the ground truth, necessary for Oxford. |
1,024 | from yacs.config import CfgNode
from ..datasets import get_datasets_cfg
from ..models import get_model_cfg
from ..extract import get_extract_cfg
from ..index import get_index_cfg
from ..evaluate import get_evaluate_cfg
The provided code snippet includes necessary dependencies for implementing the `get_defaults_cfg` function. Write a Python function `def get_defaults_cfg() -> CfgNode` to solve the following problem:
Construct the default configuration tree. Returns: cfg (CfgNode): the default configuration tree.
Here is the function:
def get_defaults_cfg() -> CfgNode:
"""
Construct the default configuration tree.
Returns:
cfg (CfgNode): the default configuration tree.
"""
cfg = CfgNode()
cfg["datasets"] = get_datasets_cfg()
cfg["model"] = get_model_cfg()
cfg["extract"] = get_extract_cfg()
cfg["index"] = get_index_cfg()
cfg["evaluate"] = get_evaluate_cfg()
return cfg | Construct the default configuration tree. Returns: cfg (CfgNode): the default configuration tree. |
1,025 | from yacs.config import CfgNode
from ..datasets import get_datasets_cfg
from ..models import get_model_cfg
from ..extract import get_extract_cfg
from ..index import get_index_cfg
from ..evaluate import get_evaluate_cfg
The provided code snippet includes necessary dependencies for implementing the `setup_cfg` function. Write a Python function `def setup_cfg(cfg: CfgNode, cfg_file: str, cfg_opts: list or None = None) -> CfgNode` to solve the following problem:
Load a yaml config file and merge it this CfgNode. Args: cfg (CfgNode): the configuration tree with default structure. cfg_file (str): the path for yaml config file which is matched with the CfgNode. cfg_opts (list, optional): config (keys, values) in a list (e.g., from command line) into this CfgNode. Returns: cfg (CfgNode): the configuration tree with settings in the config file.
Here is the function:
def setup_cfg(cfg: CfgNode, cfg_file: str, cfg_opts: list or None = None) -> CfgNode:
"""
Load a yaml config file and merge it this CfgNode.
Args:
cfg (CfgNode): the configuration tree with default structure.
cfg_file (str): the path for yaml config file which is matched with the CfgNode.
cfg_opts (list, optional): config (keys, values) in a list (e.g., from command line) into this CfgNode.
Returns:
cfg (CfgNode): the configuration tree with settings in the config file.
"""
cfg.merge_from_file(cfg_file)
cfg.merge_from_list(cfg_opts)
cfg.freeze()
return cfg | Load a yaml config file and merge it this CfgNode. Args: cfg (CfgNode): the configuration tree with default structure. cfg_file (str): the path for yaml config file which is matched with the CfgNode. cfg_opts (list, optional): config (keys, values) in a list (e.g., from command line) into this CfgNode. Returns: cfg (CfgNode): the configuration tree with settings in the config file. |
1,026 | from yacs.config import CfgNode
from .registry import COLLATEFNS, FOLDERS, TRANSFORMERS
from ..utils import get_config_from_registry
def get_collate_cfg() -> CfgNode:
cfg = get_config_from_registry(COLLATEFNS)
cfg["name"] = "unknown"
return cfg
def get_folder_cfg() -> CfgNode:
cfg = get_config_from_registry(FOLDERS)
cfg["name"] = "unknown"
return cfg
def get_tranformers_cfg() -> CfgNode:
cfg = get_config_from_registry(TRANSFORMERS)
cfg["names"] = ["unknown"]
return cfg
def get_datasets_cfg() -> CfgNode:
cfg = CfgNode()
cfg["collate_fn"] = get_collate_cfg()
cfg["folder"] = get_folder_cfg()
cfg["transformers"] = get_tranformers_cfg()
cfg["batch_size"] = 1
return cfg | null |
1,027 | from yacs.config import CfgNode
from .registry import COLLATEFNS, FOLDERS, TRANSFORMERS
from .collate_fn import CollateFnBase
from .folder import FolderBase
from .transformer import TransformerBase
from ..utils import simple_build
from torch.utils.data import DataLoader
from torchvision.transforms import Compose
def build_transformers(cfg: CfgNode) -> Compose:
"""
Instantiate a compose class containing several transforms with the given configuration tree.
Args:
cfg (CfgNode): the configuration tree.
Returns:
transformers (Compose): a compose class.
"""
names = cfg["names"]
transformers = list()
for name in names:
transformers.append(simple_build(name, cfg, TRANSFORMERS))
transformers = Compose(transformers)
return transformers
FOLDERS = Registry()
The provided code snippet includes necessary dependencies for implementing the `build_folder` function. Write a Python function `def build_folder(data_json_path: str, cfg: CfgNode) -> FolderBase` to solve the following problem:
Instantiate a folder class with the given configuration tree. Args: data_json_path (str): the path of the data json file. cfg (CfgNode): the configuration tree. Returns: folder (FolderBase): a folder class.
Here is the function:
def build_folder(data_json_path: str, cfg: CfgNode) -> FolderBase:
"""
Instantiate a folder class with the given configuration tree.
Args:
data_json_path (str): the path of the data json file.
cfg (CfgNode): the configuration tree.
Returns:
folder (FolderBase): a folder class.
"""
trans = build_transformers(cfg.transformers)
folder = simple_build(cfg.folder["name"], cfg.folder, FOLDERS, data_json_path=data_json_path, transformer=trans)
return folder | Instantiate a folder class with the given configuration tree. Args: data_json_path (str): the path of the data json file. cfg (CfgNode): the configuration tree. Returns: folder (FolderBase): a folder class. |
1,028 | from yacs.config import CfgNode
from .registry import COLLATEFNS, FOLDERS, TRANSFORMERS
from .collate_fn import CollateFnBase
from .folder import FolderBase
from .transformer import TransformerBase
from ..utils import simple_build
from torch.utils.data import DataLoader
from torchvision.transforms import Compose
def build_collate(cfg: CfgNode) -> CollateFnBase:
"""
Instantiate a collate class with the given configuration tree.
Args:
cfg (CfgNode): the configuration tree.
Returns:
collate (CollateFnBase): a collate class.
"""
name = cfg["name"]
collate = simple_build(name, cfg, COLLATEFNS)
return collate
The provided code snippet includes necessary dependencies for implementing the `build_loader` function. Write a Python function `def build_loader(folder: FolderBase, cfg: CfgNode) -> DataLoader` to solve the following problem:
Instantiate a data loader class with the given configuration tree. Args: folder (FolderBase): the folder function. cfg (CfgNode): the configuration tree. Returns: data_loader (DataLoader): a data loader class.
Here is the function:
def build_loader(folder: FolderBase, cfg: CfgNode) -> DataLoader:
"""
Instantiate a data loader class with the given configuration tree.
Args:
folder (FolderBase): the folder function.
cfg (CfgNode): the configuration tree.
Returns:
data_loader (DataLoader): a data loader class.
"""
co_fn = build_collate(cfg.collate_fn)
data_loader = DataLoader(folder, cfg["batch_size"], collate_fn=co_fn, num_workers=8, pin_memory=True)
return data_loader | Instantiate a data loader class with the given configuration tree. Args: folder (FolderBase): the folder function. cfg (CfgNode): the configuration tree. Returns: data_loader (DataLoader): a data loader class. |
1,029 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation) | 3x3 convolution with padding |
1,030 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 1x1 convolution |
1,031 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(BackboneBase):
def __init__(self, block=Bottleneck, layers=None, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, hps=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# Allow for accessing forward method in a inherited class
forward = _forward
The provided code snippet includes necessary dependencies for implementing the `resnet18` function. Write a Python function `def resnet18(progress=True, **kwargs)` to solve the following problem:
r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet18(progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model | r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,032 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(BackboneBase):
def __init__(self, block=Bottleneck, layers=None, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, hps=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# Allow for accessing forward method in a inherited class
forward = _forward
The provided code snippet includes necessary dependencies for implementing the `resnet34` function. Write a Python function `def resnet34(progress=True, **kwargs)` to solve the following problem:
r"""ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet34(progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model | r"""ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,033 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(BackboneBase):
def __init__(self, block=Bottleneck, layers=None, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, hps=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# Allow for accessing forward method in a inherited class
forward = _forward
The provided code snippet includes necessary dependencies for implementing the `resnet50` function. Write a Python function `def resnet50(progress=True, **kwargs)` to solve the following problem:
r"""ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet50(progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model | r"""ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,034 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(BackboneBase):
def __init__(self, block=Bottleneck, layers=None, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, hps=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# Allow for accessing forward method in a inherited class
forward = _forward
The provided code snippet includes necessary dependencies for implementing the `resnet101` function. Write a Python function `def resnet101(progress=True, **kwargs)` to solve the following problem:
r"""ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet101(progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model | r"""ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,035 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(BackboneBase):
def __init__(self, block=Bottleneck, layers=None, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, hps=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# Allow for accessing forward method in a inherited class
forward = _forward
The provided code snippet includes necessary dependencies for implementing the `resnet152` function. Write a Python function `def resnet152(progress=True, **kwargs)` to solve the following problem:
r"""ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet152(progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model | r"""ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,036 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg11` function. Write a Python function `def vgg11(progress=True, **kwargs)` to solve the following problem:
r"""VGG 11-layer model (configuration "A") from `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def vgg11(progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = VGG(make_layers(cfgs['A'], batch_norm=False), **kwargs)
return model | r"""VGG 11-layer model (configuration "A") from `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,037 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg11_bn` function. Write a Python function `def vgg11_bn(progress=True, **kwargs)` to solve the following problem:
r"""VGG 11-layer model (configuration "A") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def vgg11_bn(progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = VGG(make_layers(cfgs['A'], batch_norm=True), **kwargs)
return model | r"""VGG 11-layer model (configuration "A") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,038 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg13` function. Write a Python function `def vgg13(progress=True, **kwargs)` to solve the following problem:
r"""VGG 13-layer model (configuration "B") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def vgg13(progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = VGG(make_layers(cfgs['B'], batch_norm=False), **kwargs)
return model | r"""VGG 13-layer model (configuration "B") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,039 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg13_bn` function. Write a Python function `def vgg13_bn(progress=True, **kwargs)` to solve the following problem:
r"""VGG 13-layer model (configuration "B") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def vgg13_bn(progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = VGG(make_layers(cfgs['B'], batch_norm=True), **kwargs)
return model | r"""VGG 13-layer model (configuration "B") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,040 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg16` function. Write a Python function `def vgg16(progress=True, **kwargs)` to solve the following problem:
r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def vgg16(progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = VGG(make_layers(cfgs['D'], batch_norm=False), **kwargs)
return model | r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,041 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg16_bn` function. Write a Python function `def vgg16_bn(progress=True, **kwargs)` to solve the following problem:
r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def vgg16_bn(progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = VGG(make_layers(cfgs['D'], batch_norm=True), **kwargs)
return model | r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,042 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg19` function. Write a Python function `def vgg19(progress=True, **kwargs)` to solve the following problem:
r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def vgg19(progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = VGG(make_layers(cfgs['E'], batch_norm=False), **kwargs)
return model | r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,043 | import torch
import torch.nn as nn
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
The provided code snippet includes necessary dependencies for implementing the `vgg19_bn` function. Write a Python function `def vgg19_bn(progress=True, **kwargs)` to solve the following problem:
r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def vgg19_bn(progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = VGG(make_layers(cfgs['E'], batch_norm=True), **kwargs)
return model | r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
1,044 | import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') # For old pytorch, you may use kaiming_normal.
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0) | null |
1,045 | import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
from ..backbone_base import BackboneBase
from ...registry import BACKBONES
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal_(m.weight.data, std=0.001)
init.constant_(m.bias.data, 0.0) | null |
1,046 | from yacs.config import CfgNode
from .backbone.backbone_base import BACKBONES
def get_model_cfg() -> CfgNode:
cfg = CfgNode()
for name in BACKBONES:
cfg[name] = CfgNode()
cfg[name]["load_checkpoint"] = ""
cfg["name"] = "unknown"
return cfg | null |
1,047 | from yacs.config import CfgNode
import torch
import torch.nn as nn
from .registry import BACKBONES
from ..utils import load_state_dict
from torchvision.models.utils import load_state_dict_from_url
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
BACKBONES = Registry()
The provided code snippet includes necessary dependencies for implementing the `build_model` function. Write a Python function `def build_model(cfg: CfgNode) -> nn.Module` to solve the following problem:
Instantiate a backbone class. Args: cfg (CfgNode): the configuration tree. Returns: model (nn.Module): the model for extracting features.
Here is the function:
def build_model(cfg: CfgNode) -> nn.Module:
"""
Instantiate a backbone class.
Args:
cfg (CfgNode): the configuration tree.
Returns:
model (nn.Module): the model for extracting features.
"""
name = cfg["name"]
model = BACKBONES.get(name)()
load_checkpoint = cfg[cfg.name]["load_checkpoint"]
if 'torchvision' in load_checkpoint:
arch = load_checkpoint.split('://')[-1]
state_dict = load_state_dict_from_url(model_urls[arch], progress=True)
else:
state_dict = torch.load(load_checkpoint)
try:
model.load_state_dict(state_dict, strict=False)
except:
load_state_dict(model, state_dict)
return model | Instantiate a backbone class. Args: cfg (CfgNode): the configuration tree. Returns: model (nn.Module): the model for extracting features. |
1,048 |
def _register_generic(module_dict, module_name, module):
assert module_name not in module_dict
module_dict[module_name] = module | null |
1,049 | import os
import torch.nn as nn
from torch.nn import Parameter
from torchvision.models.utils import load_state_dict_from_url
from typing import Dict
The provided code snippet includes necessary dependencies for implementing the `ensure_dir` function. Write a Python function `def ensure_dir(path: str) -> None` to solve the following problem:
Check if a directory exists, if not, create a new one. Args: path (str): the path of the directory.
Here is the function:
def ensure_dir(path: str) -> None:
"""
Check if a directory exists, if not, create a new one.
Args:
path (str): the path of the directory.
"""
if not os.path.exists(path):
os.makedirs(path) | Check if a directory exists, if not, create a new one. Args: path (str): the path of the directory. |
1,050 | import os
import torch.nn as nn
from torch.nn import Parameter
from torchvision.models.utils import load_state_dict_from_url
from typing import Dict
The provided code snippet includes necessary dependencies for implementing the `load_state_dict` function. Write a Python function `def load_state_dict(model: nn.Module, state_dict: Dict) -> None` to solve the following problem:
Load parameters regardless the shape of parameters with the same name need to match, which is a slight modification to load_state_dict of pytorch. Args: model (nn.Module): the model for extracting features. state_dict (Dict): a dict of model parameters.
Here is the function:
def load_state_dict(model: nn.Module, state_dict: Dict) -> None:
"""
Load parameters regardless the shape of parameters with the same name need to match,
which is a slight modification to load_state_dict of pytorch.
Args:
model (nn.Module): the model for extracting features.
state_dict (Dict): a dict of model parameters.
"""
own_state = model.state_dict()
success_keys = list()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
success_keys.append(name)
except Exception:
print("[LoadStateDict]: shape mismatch in parameter {}, {} vs {}".format(
name, own_state[name].size(), param.size()
))
else:
print("[LoadStateDict]: " + 'unexpected key "{}" in state_dict'.format(name))
missing = set(own_state.keys()) - set(success_keys)
if len(missing) > 0:
print("[LoadStateDict]: " + "missing keys or mismatch param in state_dict: {}".format(missing)) | Load parameters regardless the shape of parameters with the same name need to match, which is a slight modification to load_state_dict of pytorch. Args: model (nn.Module): the model for extracting features. state_dict (Dict): a dict of model parameters. |
1,051 | from yacs.config import CfgNode
from .module_base import ModuleBase
from .registry import Registry
class Registry(dict):
"""
A helper class to register class.
"""
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(*args, **kwargs)
def register(self, module):
_register_generic(self, module.__name__, module)
return module
The provided code snippet includes necessary dependencies for implementing the `get_config_from_registry` function. Write a Python function `def get_config_from_registry(registry: Registry) -> CfgNode` to solve the following problem:
Collect all hyper-parameters from modules in registry. Args: registry (Registry): module registry. Returns: cfg (CfgNode): configurations for this registry.
Here is the function:
def get_config_from_registry(registry: Registry) -> CfgNode:
"""
Collect all hyper-parameters from modules in registry.
Args:
registry (Registry): module registry.
Returns:
cfg (CfgNode): configurations for this registry.
"""
cfg = CfgNode()
for name in registry:
cfg[name] = CfgNode()
loss = registry[name]
hps = loss.default_hyper_params
for hp_name in hps:
cfg[name][hp_name] = hps[hp_name]
return cfg | Collect all hyper-parameters from modules in registry. Args: registry (Registry): module registry. Returns: cfg (CfgNode): configurations for this registry. |
1,052 | from yacs.config import CfgNode
from .module_base import ModuleBase
from .registry import Registry
class Registry(dict):
"""
A helper class to register class.
"""
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(*args, **kwargs)
def register(self, module):
_register_generic(self, module.__name__, module)
return module
The provided code snippet includes necessary dependencies for implementing the `simple_build` function. Write a Python function `def simple_build(name: str, cfg: CfgNode, registry: Registry, **kwargs)` to solve the following problem:
Simply build a module according to name and hyper-parameters. Args: name (str): name for instance to be built. cfg (CfgNode): configurations for this sub-module. registry (Registry): registry for this sub-module. **kwargs: keyword arguments. Returns: module: a initialized instance
Here is the function:
def simple_build(name: str, cfg: CfgNode, registry: Registry, **kwargs):
"""
Simply build a module according to name and hyper-parameters.
Args:
name (str): name for instance to be built.
cfg (CfgNode): configurations for this sub-module.
registry (Registry): registry for this sub-module.
**kwargs: keyword arguments.
Returns:
module: a initialized instance
"""
assert name in registry
module = registry[name]
hps = module.default_hyper_params
for hp_name in hps:
new_value = cfg[name][hp_name]
hps[hp_name] = new_value
return module(hps=hps, **kwargs) | Simply build a module according to name and hyper-parameters. Args: name (str): name for instance to be built. cfg (CfgNode): configurations for this sub-module. registry (Registry): registry for this sub-module. **kwargs: keyword arguments. Returns: module: a initialized instance |
1,053 | import argparse
import os
import torch
from pyretri.config import get_defaults_cfg, setup_cfg
from pyretri.datasets import build_folder, build_loader
from pyretri.models import build_model
from pyretri.extract import build_extract_helper
from torchvision import models
def parse_args():
parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
parser.add_argument('--data_json', '-dj', default=None, type=str, help='json file for dataset to be extracted')
parser.add_argument('--save_path', '-sp', default=None, type=str, help='save path for features')
parser.add_argument('--config_file', '-cfg', default=None, metavar='FILE', type=str, help='path to config file')
parser.add_argument('--save_interval', '-si', default=5000, type=int, help='number of features saved in one part file')
args = parser.parse_args()
return args | null |
1,054 | import argparse
import os
from pyretri.extract.utils import split_dataset
def parse_args():
parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
parser.add_argument('--dataset', '-d', default=None, type=str, help="path for the dataset.")
parser.add_argument('--split_file', '-sf', default=None, type=str, help="name for the dataset.")
args = parser.parse_args()
return args | null |
1,055 | import argparse
import os
from PIL import Image
import numpy as np
from pyretri.config import get_defaults_cfg, setup_cfg
from pyretri.datasets import build_transformers
from pyretri.models import build_model
from pyretri.extract import build_extract_helper
from pyretri.index import build_index_helper, feature_loader
def parse_args():
parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
parser.add_argument('--config_file', '-cfg', default=None, metavar='FILE', type=str, help='path to config file')
args = parser.parse_args()
return args | null |
1,056 | import argparse
import os
import pickle
from pyretri.config import get_defaults_cfg, setup_cfg
from pyretri.index import build_index_helper, feature_loader
from pyretri.evaluate import build_evaluate_helper
def parse_args():
parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
parser.add_argument('--config_file', '-cfg', default=None, metavar='FILE', type=str, help='path to config file')
args = parser.parse_args()
return args | null |
1,057 | import argparse
from pyretri.extract import make_data_json
def parse_args():
parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
parser.add_argument('--dataset', '-d', default=None, type=str, help="path for the dataset that make the json file")
parser.add_argument('--save_path', '-sp', default=None, type=str, help="save path for the json file")
parser.add_argument('--type', '-t', default=None, type=str, help="mode of the dataset")
parser.add_argument('--ground_truth', '-gt', default=None, type=str, help="ground truth of the dataset")
args = parser.parse_args()
return args | null |
1,058 |
async def async_test(_douyin_url: str = None, _tiktok_url: str = None, _bilibili_url: str = None,
_ixigua_url: str = None, _kuaishou_url: str = None) -> None:
# 异步测试/Async test
start_time = time.time()
print("<异步测试/Async test>")
print('\n--------------------------------------------------')
print("正在测试异步获取快手视频ID方法...")
kuaishou_id = await api.get_kuaishou_video_id(_kuaishou_url)
print(f"快手视频ID: {kuaishou_id}")
print("正在测试异步获取快手视频数据方法...")
kuaishou_data = await api.get_kuaishou_video_data(kuaishou_id)
print(f"快手视频数据: {str(kuaishou_data)}")
print('\n--------------------------------------------------')
print("正在测试异步获取西瓜视频ID方法...")
ixigua_id = await api.get_ixigua_video_id(_ixigua_url)
print(f"西瓜视频ID: {ixigua_id}")
print("正在测试异步获取西瓜视频数据方法...")
ixigua_data = await api.get_ixigua_video_data(ixigua_id)
print(f"西瓜视频数据: {str(ixigua_data)[:100]}")
print('\n--------------------------------------------------')
print("正在测试异步获取哔哩哔哩视频ID方法...")
bilibili_id = await api.get_bilibili_video_id(_bilibili_url)
print(f"哔哩哔哩视频ID: {bilibili_id}")
print("正在测试异步获取哔哩哔哩视频数据方法...")
bilibili_data = await api.get_bilibili_video_data(bilibili_id)
print(f"哔哩哔哩视频数据: {str(bilibili_data)[:100]}")
print('\n--------------------------------------------------')
print("正在测试异步获取抖音视频ID方法...")
douyin_id = await api.get_douyin_video_id(_douyin_url)
print(f"抖音视频ID: {douyin_id}")
print("正在测试异步获取抖音视频数据方法...")
douyin_data = await api.get_douyin_video_data(douyin_id)
print(f"抖音视频数据: {str(douyin_data)[:100]}")
print('\n--------------------------------------------------')
print("正在测试异步获取TikTok视频ID方法...")
tiktok_id = await api.get_tiktok_video_id(_tiktok_url)
print(f"TikTok视频ID: {tiktok_id}")
print("正在测试异步获取TikTok视频数据方法...")
tiktok_data = await api.get_tiktok_video_data(tiktok_id)
print(f"TikTok视频数据: {str(tiktok_data)[:100]}")
#
print('\n--------------------------------------------------')
print("正在测试异步混合解析方法...")
douyin_hybrid_data = await api.hybrid_parsing(_douyin_url)
tiktok_hybrid_data = await api.hybrid_parsing(_tiktok_url)
bilibili_hybrid_data = await api.hybrid_parsing(_bilibili_url)
xigua_hybrid_data = await api.hybrid_parsing(_ixigua_url)
kuaishou_hybrid_data = await api.hybrid_parsing(_kuaishou_url)
print(f"抖音、TikTok、哔哩哔哩、西瓜、快手快手混合解析全部成功!")
print('\n--------------------------------------------------')
# 总耗时/Total time
total_time = round(time.time() - start_time, 2)
print("异步测试完成,总耗时: {}s".format(total_time)) | null |
1,059 | import configparser
config = configparser.ConfigParser()
config_path = 'config.ini'
config.read(config_path, encoding='utf-8')
def api_config():
api_default_port = config.get('Web_API', 'Port')
api_new_port = input(f'Default API port: {api_default_port}\nIf you want use different port input new API port here: ')
if api_new_port.isdigit():
if int(api_new_port) == int(api_default_port):
print(f'Use default port for web_app.py: {api_default_port}')
else:
print(f'Use new port for web_api.py: {api_new_port}')
config.set('Web_API', 'Port', api_new_port)
config.write(open(config_path, "w", encoding="utf-8"))
else:
print(f'Use default port for web_app.py: {api_default_port}')
req_limit = config.get('Web_API', 'Rate_Limit')
new_req_limit = input(f'Default API rate limit: {req_limit}\nIf you want use different rate limit input new rate limit here: ')
if new_req_limit.isdigit():
if int(new_req_limit) == int(req_limit.split('/')[0]):
print(f'Use default rate limit for web_api.py : {req_limit}')
else:
print(f'Use new rate limit: {new_req_limit}/minute')
config.set('Web_API', 'Rate_Limit', f'{new_req_limit}/minute')
config.write(open(config_path, "w", encoding="utf-8"))
else:
print(f'Use default rate limit for web_api.py: {req_limit}') | null |
1,060 | import configparser
config = configparser.ConfigParser()
config_path = 'config.ini'
config.read(config_path, encoding='utf-8')
def app_config():
app_default_port = config.get('Web_APP', 'Port')
app_new_port = input(f'Default App port: {app_default_port}\nIf you want use different port input new App port here: ')
if app_new_port.isdigit():
if int(app_new_port) == int(app_default_port):
print(f'Use default port for web_app.py: {app_default_port}')
else:
print(f'Use new port: {app_new_port}')
config.set('Web_APP', 'Port', app_new_port)
config.write(open(config_path, "w", encoding="utf-8"))
else:
print(f'Use default port for web_app.py : {app_default_port}') | null |
1,061 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
version = '3.1.8'
update_time = "2023/09/25"
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
The provided code snippet includes necessary dependencies for implementing the `root` function. Write a Python function `async def root()` to solve the following problem:
Root path info.
Here is the function:
async def root():
"""
Root path info.
"""
data = {
"API_status": "Running",
"Version": version,
"Update_time": update_time,
"Request_Rate_Limit": Rate_Limit,
"Web_APP": "https://www.douyin.wtf/",
"API_V1_Document": "https://api.douyin.wtf/docs",
"TikHub_API_Document": "https://api.tikhub.io/docs",
"GitHub": "https://github.com/Evil0ctal/Douyin_TikTok_Download_API",
}
return ORJSONResponse(data) | Root path info. |
1,062 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
endpoint: str = None
total_time: float = None
endpoint: str = None
total_time: float = None
video_data: dict = None
(start_time, input_data, endpoint, error_data: dict = None
rt_time = time.time()
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
The provided code snippet includes necessary dependencies for implementing the `get_douyin_video_data` function. Write a Python function `async def get_douyin_video_data(request: Request, douyin_video_url: str = None, video_id: str = None)` to solve the following problem:
## 用途/Usage - 获取抖音用户单个视频数据,参数是视频链接|分享口令 - Get the data of a single video of a Douyin user, the parameter is the video link. ## 参数/Parameter #### douyin_video_url(选填/Optional): - 视频链接。| 分享口令 - The video link.| Share code - 例子/Example: `https://www.douyin.com/video/7153585499477757192` `https://v.douyin.com/MkmSwy7/` #### video_id(选填/Optional): - 视频ID,可以从视频链接中获取。 - The video ID, can be obtained from the video link. - 例子/Example: `7153585499477757192` #### s_v_web_id(选填/Optional): - s_v_web_id,可以从浏览器访问抖音然后从cookie中获取。 - s_v_web_id, can be obtained from the browser to access Douyin and then from the cookie. - 例子/Example: `s_v_web_id=verify_leytkxgn_kvO5kOmO_SdMs_4t1o_B5ml_BUqtWM1mP6BF;` #### 备注/Note: - 参数`douyin_video_url`和`video_id`二选一即可,如果都填写,优先使用`video_id`以获得更快的响应速度。 - The parameters `douyin_video_url` and `video_id` can be selected, if both are filled in, the `video_id` is used first to get a faster response speed. ## 返回值/Return - 用户当个视频数据的列表,列表内包含JSON数据。 - List of user single video data, list contains JSON data.
Here is the function:
async def get_douyin_video_data(request: Request, douyin_video_url: str = None, video_id: str = None):
"""
## 用途/Usage
- 获取抖音用户单个视频数据,参数是视频链接|分享口令
- Get the data of a single video of a Douyin user, the parameter is the video link.
## 参数/Parameter
#### douyin_video_url(选填/Optional):
- 视频链接。| 分享口令
- The video link.| Share code
- 例子/Example:
`https://www.douyin.com/video/7153585499477757192`
`https://v.douyin.com/MkmSwy7/`
#### video_id(选填/Optional):
- 视频ID,可以从视频链接中获取。
- The video ID, can be obtained from the video link.
- 例子/Example:
`7153585499477757192`
#### s_v_web_id(选填/Optional):
- s_v_web_id,可以从浏览器访问抖音然后从cookie中获取。
- s_v_web_id, can be obtained from the browser to access Douyin and then from the cookie.
- 例子/Example:
`s_v_web_id=verify_leytkxgn_kvO5kOmO_SdMs_4t1o_B5ml_BUqtWM1mP6BF;`
#### 备注/Note:
- 参数`douyin_video_url`和`video_id`二选一即可,如果都填写,优先使用`video_id`以获得更快的响应速度。
- The parameters `douyin_video_url` and `video_id` can be selected, if both are filled in, the `video_id` is used first to get a faster response speed.
## 返回值/Return
- 用户当个视频数据的列表,列表内包含JSON数据。
- List of user single video data, list contains JSON data.
"""
if video_id is None or video_id == '':
# 获取视频ID
video_id = await api.get_douyin_video_id(douyin_video_url)
if video_id is None:
result = {
"status": "failed",
"platform": "douyin",
"message": "video_id获取失败/Failed to get video_id",
}
return ORJSONResponse(result)
if video_id is not None and video_id != '':
# 开始时间
start_time = time.time()
print('获取到的video_id数据:{}'.format(video_id))
if video_id is not None:
video_data = await api.get_douyin_video_data(video_id=video_id)
if video_data is None:
result = {
"status": "failed",
"platform": "douyin",
"endpoint": "/douyin_video_data/",
"message": "视频API数据获取失败/Failed to get video API data",
}
return ORJSONResponse(result)
# print('获取到的video_data:{}'.format(video_data))
# 记录API调用
await api_logs(start_time=start_time,
input_data={'douyin_video_url': douyin_video_url, 'video_id': video_id},
endpoint='douyin_video_data')
# 结束时间
total_time = float(format(time.time() - start_time, '.4f'))
# 返回数据
result = {
"status": "success",
"platform": "douyin",
"endpoint": "/douyin_video_data/",
"message": "获取视频数据成功/Got video data successfully",
"total_time": total_time,
"aweme_list": [video_data]
}
return ORJSONResponse(result)
else:
print('获取抖音video_id失败')
result = {
"status": "failed",
"platform": "douyin",
"endpoint": "/douyin_video_data/",
"message": "获取视频ID失败/Failed to get video ID",
"total_time": 0,
"aweme_list": []
}
return ORJSONResponse(result) | ## 用途/Usage - 获取抖音用户单个视频数据,参数是视频链接|分享口令 - Get the data of a single video of a Douyin user, the parameter is the video link. ## 参数/Parameter #### douyin_video_url(选填/Optional): - 视频链接。| 分享口令 - The video link.| Share code - 例子/Example: `https://www.douyin.com/video/7153585499477757192` `https://v.douyin.com/MkmSwy7/` #### video_id(选填/Optional): - 视频ID,可以从视频链接中获取。 - The video ID, can be obtained from the video link. - 例子/Example: `7153585499477757192` #### s_v_web_id(选填/Optional): - s_v_web_id,可以从浏览器访问抖音然后从cookie中获取。 - s_v_web_id, can be obtained from the browser to access Douyin and then from the cookie. - 例子/Example: `s_v_web_id=verify_leytkxgn_kvO5kOmO_SdMs_4t1o_B5ml_BUqtWM1mP6BF;` #### 备注/Note: - 参数`douyin_video_url`和`video_id`二选一即可,如果都填写,优先使用`video_id`以获得更快的响应速度。 - The parameters `douyin_video_url` and `video_id` can be selected, if both are filled in, the `video_id` is used first to get a faster response speed. ## 返回值/Return - 用户当个视频数据的列表,列表内包含JSON数据。 - List of user single video data, list contains JSON data. |
1,063 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
endpoint: str = None
total_time: float = None
endpoint: str = None
total_time: float = None
video_data: dict = None
(start_time, input_data, endpoint, error_data: dict = None
rt_time = time.time()
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
The provided code snippet includes necessary dependencies for implementing the `get_douyin_live_video_data` function. Write a Python function `async def get_douyin_live_video_data(request: Request, douyin_live_video_url: str = None, web_rid: str = None)` to solve the following problem:
## 用途/Usage - 获取抖音直播视频数据,参数是视频链接|分享口令 - Get the data of a Douyin live video, the parameter is the video link. ## 失效待修复/Waiting for repair
Here is the function:
async def get_douyin_live_video_data(request: Request, douyin_live_video_url: str = None, web_rid: str = None):
"""
## 用途/Usage
- 获取抖音直播视频数据,参数是视频链接|分享口令
- Get the data of a Douyin live video, the parameter is the video link.
## 失效待修复/Waiting for repair
"""
if web_rid is None or web_rid == '':
# 获取视频ID
web_rid = await api.get_douyin_video_id(douyin_live_video_url)
if web_rid is None:
result = {
"status": "failed",
"platform": "douyin",
"message": "web_rid获取失败/Failed to get web_rid",
}
return ORJSONResponse(result)
if web_rid is not None and web_rid != '':
# 开始时间
start_time = time.time()
print('获取到的web_rid:{}'.format(web_rid))
if web_rid is not None:
video_data = await api.get_douyin_live_video_data(web_rid=web_rid)
if video_data is None:
result = {
"status": "failed",
"platform": "douyin",
"endpoint": "/douyin_live_video_data/",
"message": "直播视频API数据获取失败/Failed to get live video API data",
}
return ORJSONResponse(result)
# print('获取到的video_data:{}'.format(video_data))
# 记录API调用
await api_logs(start_time=start_time,
input_data={'douyin_video_url': douyin_live_video_url, 'web_rid': web_rid},
endpoint='douyin_live_video_data')
# 结束时间
total_time = float(format(time.time() - start_time, '.4f'))
# 返回数据
result = {
"status": "success",
"platform": "douyin",
"endpoint": "/douyin_live_video_data/",
"message": "获取直播视频数据成功/Got live video data successfully",
"total_time": total_time,
"aweme_list": [video_data]
}
return ORJSONResponse(result)
else:
print('获取抖音video_id失败')
result = {
"status": "failed",
"platform": "douyin",
"endpoint": "/douyin_live_video_data/",
"message": "获取直播视频ID失败/Failed to get live video ID",
"total_time": 0,
"aweme_list": []
}
return ORJSONResponse(result) | ## 用途/Usage - 获取抖音直播视频数据,参数是视频链接|分享口令 - Get the data of a Douyin live video, the parameter is the video link. ## 失效待修复/Waiting for repair |
1,064 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
The provided code snippet includes necessary dependencies for implementing the `get_douyin_user_profile_videos` function. Write a Python function `async def get_douyin_user_profile_videos(tikhub_token: str, douyin_user_url: str = None)` to solve the following problem:
## 用途/Usage - 获取抖音用户主页数据,参数是用户链接|ID - Get the data of a Douyin user profile, the parameter is the user link or ID. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
Here is the function:
async def get_douyin_user_profile_videos(tikhub_token: str, douyin_user_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页数据,参数是用户链接|ID
- Get the data of a Douyin user profile, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_douyin_user_profile_videos(tikhub_token=tikhub_token, profile_url=douyin_user_url)
return response | ## 用途/Usage - 获取抖音用户主页数据,参数是用户链接|ID - Get the data of a Douyin user profile, the parameter is the user link or ID. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post |
1,065 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
The provided code snippet includes necessary dependencies for implementing the `get_douyin_user_profile_liked_videos` function. Write a Python function `async def get_douyin_user_profile_liked_videos(tikhub_token: str, douyin_user_url: str = None)` to solve the following problem:
## 用途/Usage - 获取抖音用户喜欢的视频数据,参数是用户链接|ID - Get the data of a Douyin user profile liked videos, the parameter is the user link or ID. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
Here is the function:
async def get_douyin_user_profile_liked_videos(tikhub_token: str, douyin_user_url: str = None):
"""
## 用途/Usage
- 获取抖音用户喜欢的视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked videos, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_douyin_profile_liked_data(tikhub_token=tikhub_token, profile_url=douyin_user_url)
return response | ## 用途/Usage - 获取抖音用户喜欢的视频数据,参数是用户链接|ID - Get the data of a Douyin user profile liked videos, the parameter is the user link or ID. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post |
1,066 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
The provided code snippet includes necessary dependencies for implementing the `get_douyin_video_comments` function. Write a Python function `async def get_douyin_video_comments(tikhub_token: str, douyin_video_url: str = None)` to solve the following problem:
## 用途/Usage - 获取抖音视频评论数据,参数是视频链接|分享口令 - Get the data of a Douyin video comments, the parameter is the video link. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
Here is the function:
async def get_douyin_video_comments(tikhub_token: str, douyin_video_url: str = None):
"""
## 用途/Usage
- 获取抖音视频评论数据,参数是视频链接|分享口令
- Get the data of a Douyin video comments, the parameter is the video link.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_douyin_video_comments(tikhub_token=tikhub_token, video_url=douyin_video_url)
return response | ## 用途/Usage - 获取抖音视频评论数据,参数是视频链接|分享口令 - Get the data of a Douyin video comments, the parameter is the video link. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post |
1,067 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
endpoint: str = None
total_time: float = None
endpoint: str = None
total_time: float = None
video_data: dict = None
(start_time, input_data, endpoint, error_data: dict = None
get("/", response_class=ORJSONResponse, response_model=APIRoot, tags=["Root"])
rt_time = time.time()
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
The provided code snippet includes necessary dependencies for implementing the `get_tiktok_video_data` function. Write a Python function `async def get_tiktok_video_data(request: Request, tiktok_video_url: str = None, video_id: str = None)` to solve the following problem:
## 用途/Usage - 获取单个视频数据,参数是视频链接| 分享口令。 - Get single video data, the parameter is the video link. ## 参数/Parameter #### tiktok_video_url(选填/Optional): - 视频链接。| 分享口令 - The video link.| Share code - 例子/Example: `https://www.tiktok.com/@evil0ctal/video/7156033831819037994` `https://vm.tiktok.com/TTPdkQvKjP/` #### video_id(选填/Optional): - 视频ID,可以从视频链接中获取。 - The video ID, can be obtained from the video link. - 例子/Example: `7156033831819037994` #### 备注/Note: - 参数`tiktok_video_url`和`video_id`二选一即可,如果都填写,优先使用`video_id`以获得更快的响应速度。 - The parameters `tiktok_video_url` and `video_id` can be selected, if both are filled in, the `video_id` is used first to get a faster response speed. ## 返回值/Return - 用户当个视频数据的列表,列表内包含JSON数据。 - List of user single video data, list contains JSON data.
Here is the function:
async def get_tiktok_video_data(request: Request, tiktok_video_url: str = None, video_id: str = None):
"""
## 用途/Usage
- 获取单个视频数据,参数是视频链接| 分享口令。
- Get single video data, the parameter is the video link.
## 参数/Parameter
#### tiktok_video_url(选填/Optional):
- 视频链接。| 分享口令
- The video link.| Share code
- 例子/Example:
`https://www.tiktok.com/@evil0ctal/video/7156033831819037994`
`https://vm.tiktok.com/TTPdkQvKjP/`
#### video_id(选填/Optional):
- 视频ID,可以从视频链接中获取。
- The video ID, can be obtained from the video link.
- 例子/Example:
`7156033831819037994`
#### 备注/Note:
- 参数`tiktok_video_url`和`video_id`二选一即可,如果都填写,优先使用`video_id`以获得更快的响应速度。
- The parameters `tiktok_video_url` and `video_id` can be selected, if both are filled in, the `video_id` is used first to get a faster response speed.
## 返回值/Return
- 用户当个视频数据的列表,列表内包含JSON数据。
- List of user single video data, list contains JSON data.
"""
# 开始时间
start_time = time.time()
if video_id is None or video_id == "":
video_id = await api.get_tiktok_video_id(tiktok_video_url)
if video_id is None:
return ORJSONResponse({"status": "fail", "platform": "tiktok", "endpoint": "/tiktok_video_data/",
"message": "获取视频ID失败/Get video ID failed"})
if video_id is not None and video_id != '':
print('开始解析单个TikTok视频数据')
video_data = await api.get_tiktok_video_data(video_id)
# TikTok的API数据如果为空或者返回的数据中没有视频数据,就返回错误信息
# If the TikTok API data is empty or there is no video data in the returned data, an error message is returned
if video_data is None or video_data.get('aweme_id') != video_id:
print('视频数据获取失败/Failed to get video data')
result = {
"status": "failed",
"platform": "tiktok",
"endpoint": "/tiktok_video_data/",
"message": "视频数据获取失败/Failed to get video data"
}
return ORJSONResponse(result)
# 记录API调用
await api_logs(start_time=start_time,
input_data={'tiktok_video_url': tiktok_video_url, 'video_id': video_id},
endpoint='tiktok_video_data')
# 结束时间
total_time = float(format(time.time() - start_time, '.4f'))
# 返回数据
result = {
"status": "success",
"platform": "tiktok",
"endpoint": "/tiktok_video_data/",
"message": "获取视频数据成功/Got video data successfully",
"total_time": total_time,
"aweme_list": [video_data]
}
return ORJSONResponse(result)
else:
print('视频链接错误/Video link error')
result = {
"status": "failed",
"platform": "tiktok",
"endpoint": "/tiktok_video_data/",
"message": "视频链接错误/Video link error"
}
return ORJSONResponse(result) | ## 用途/Usage - 获取单个视频数据,参数是视频链接| 分享口令。 - Get single video data, the parameter is the video link. ## 参数/Parameter #### tiktok_video_url(选填/Optional): - 视频链接。| 分享口令 - The video link.| Share code - 例子/Example: `https://www.tiktok.com/@evil0ctal/video/7156033831819037994` `https://vm.tiktok.com/TTPdkQvKjP/` #### video_id(选填/Optional): - 视频ID,可以从视频链接中获取。 - The video ID, can be obtained from the video link. - 例子/Example: `7156033831819037994` #### 备注/Note: - 参数`tiktok_video_url`和`video_id`二选一即可,如果都填写,优先使用`video_id`以获得更快的响应速度。 - The parameters `tiktok_video_url` and `video_id` can be selected, if both are filled in, the `video_id` is used first to get a faster response speed. ## 返回值/Return - 用户当个视频数据的列表,列表内包含JSON数据。 - List of user single video data, list contains JSON data. |
1,068 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
The provided code snippet includes necessary dependencies for implementing the `get_tiktok_profile_videos` function. Write a Python function `async def get_tiktok_profile_videos(tikhub_token: str, tiktok_video_url: str = None)` to solve the following problem:
## 用途/Usage - 获取抖音用户主页数据,参数是用户链接|ID - Get the data of a Douyin user profile, the parameter is the user link or ID. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
Here is the function:
async def get_tiktok_profile_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页数据,参数是用户链接|ID
- Get the data of a Douyin user profile, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response | ## 用途/Usage - 获取抖音用户主页数据,参数是用户链接|ID - Get the data of a Douyin user profile, the parameter is the user link or ID. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post |
1,069 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
The provided code snippet includes necessary dependencies for implementing the `get_tiktok_profile_liked_videos` function. Write a Python function `async def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None)` to solve the following problem:
## 用途/Usage - 获取抖音用户主页点赞视频数据,参数是用户链接|ID - Get the data of a Douyin user profile liked video, the parameter is the user link or ID. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
Here is the function:
async def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response | ## 用途/Usage - 获取抖音用户主页点赞视频数据,参数是用户链接|ID - Get the data of a Douyin user profile liked video, the parameter is the user link or ID. ## 参数/Parameter tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post |
1,070 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
config.read('config.ini', encoding='utf-8')int(config["Web_API"]["Port"]
if config["Web_API"]["Allow_Logs"] == "True":
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
total_time = float(format(time.time() - start_time, '.4f'))
file_name = "API_logs.json"
# 写入日志内容
with open(file_name, "a", encoding="utf-8") as f:
data = {
"time": time_now,
"endpoint": f'/{endpoint}/',
"total_time": total_time,
"input_data": input_data,
"error_data": error_data if error_data else "No error"
}
f.write(json.dumps(data, ensure_ascii=False) + ",\n")
print('日志记录成功!')
return 1
else:
print('日志记录已关闭!')
return
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
async def Get_Shortcut():
data = {
'version': config["Web_API"]["iOS_Shortcut_Version"],
'update': config["Web_API"]['iOS_Shortcut_Update_Time'],
'link': config["Web_API"]['iOS_Shortcut_Link'],
'link_en': config["Web_API"]['iOS_Shortcut_Link_EN'],
'note': config["Web_API"]['iOS_Shortcut_Update_Note'],
'note_en': config["Web_API"]['iOS_Shortcut_Update_Note_EN'],
}
return ORJSONResponse(data) | null |
1,071 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
config.read('config.ini', encoding='utf-8')int(config["Web_API"]["Port"]
platform: str = None
endpoint: str = None
endpoint: str = None
platform: str = None
aweme_id: str = None
platform: str = None
(start_time, input_data, endpoint, error_data: dict = None
if config["Web_API"]["Allow_Logs"] == "True":
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
total_time = float(format(time.time() - start_time, '.4f'))
file_name = "API_logs.json"
# 写入日志内容
with open(file_name, "a", encoding="utf-8") as f:
data = {
"time": time_now,
"endpoint": f'/{endpoint}/',
"total_time": total_time,
"input_data": input_data,
"error_data": error_data if error_data else "No error"
}
f.write(json.dumps(data, ensure_ascii=False) + ",\n")
print('日志记录成功!')
return 1
else:
print('日志记录已关闭!')
return get("/", response_class=ORJSONResponse, response_model=APIRoot, tags=["Root"])
async def hybrid_parsing(request: Request, url: str, minimal: bool = False):
"""
## 用途/Usage
- 获取[抖音|TikTok]单个视频数据,参数是视频链接或分享口令。
- Get [Douyin|TikTok] single video data, the parameter is the video link or share code.
## 参数/Parameter
#### url(必填/Required)):
- 视频链接。| 分享口令
- The video link.| Share code
- 例子/Example:
`https://www.douyin.com/video/7153585499477757192`
`https://v.douyin.com/MkmSwy7/`
`https://vm.tiktok.com/TTPdkQvKjP/`
`https://www.tiktok.com/@tvamii/video/7045537727743380782`
#### minimal(选填/Optional Default:False):
- 是否返回精简版数据。
- Whether to return simplified data.
- 例子/Example:
`True`
`False`
## 返回值/Return
- 用户当个视频数据的列表,列表内包含JSON数据。
- List of user single video data, list contains JSON data.
"""
print("正在进行混合解析...")
# 开始时间
start_time = time.time()
# 获取数据
data = await api.hybrid_parsing(url)
# 是否精简
if minimal:
result = api.hybrid_parsing_minimal(data)
else:
# 更新数据
result = {
'url': url,
"endpoint": "/api/",
"total_time": float(format(time.time() - start_time, '.4f')),
}
# 合并数据
result.update(data)
# 记录API调用
await api_logs(start_time=start_time,
input_data={'url': url},
endpoint='api')
return ORJSONResponse(result)
rt_time = time.time()
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
The provided code snippet includes necessary dependencies for implementing the `download_file_hybrid` function. Write a Python function `async def download_file_hybrid(request: Request, url: str, prefix: bool = True, watermark: bool = False)` to solve the following problem:
## 用途/Usage ### [中文] - 将[抖音|TikTok]链接作为参数提交至此端点,返回[视频|图片]文件下载请求。 ### [English] - Submit the [Douyin|TikTok] link as a parameter to this endpoint and return the [video|picture] file download request. # 参数/Parameter - url:str -> [Douyin|TikTok] [视频|图片] 链接/ [Douyin|TikTok] [video|image] link - prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix - watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark
Here is the function:
async def download_file_hybrid(request: Request, url: str, prefix: bool = True, watermark: bool = False):
"""
## 用途/Usage
### [中文]
- 将[抖音|TikTok]链接作为参数提交至此端点,返回[视频|图片]文件下载请求。
### [English]
- Submit the [Douyin|TikTok] link as a parameter to this endpoint and return the [video|picture] file download request.
# 参数/Parameter
- url:str -> [Douyin|TikTok] [视频|图片] 链接/ [Douyin|TikTok] [video|image] link
- prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix
- watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark
"""
# 是否开启此端点/Whether to enable this endpoint
if config["Web_API"]["Download_Switch"] != "True":
return ORJSONResponse({"status": "endpoint closed",
"message": "此端点已关闭请在配置文件中开启/This endpoint is closed, please enable it in the configuration file"})
# 开始时间
start_time = time.time()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
data = await api.hybrid_parsing(url)
if data is None:
return ORJSONResponse(data)
else:
# 记录API调用
await api_logs(start_time=start_time,
input_data={'url': url},
endpoint='download')
url_type = data.get('type')
platform = data.get('platform')
aweme_id = data.get('aweme_id')
file_name_prefix = config["Web_API"]["File_Name_Prefix"] if prefix else ''
root_path = config["Web_API"]["Download_Path"]
# 查看目录是否存在,不存在就创建
if not os.path.exists(root_path):
os.makedirs(root_path)
if url_type == 'video':
file_name = file_name_prefix + platform + '_' + aweme_id + '.mp4' if not watermark else file_name_prefix + platform + '_' + aweme_id + '_watermark' + '.mp4'
url = data.get('video_data').get('nwm_video_url_HQ') if not watermark else data.get('video_data').get(
'wm_video_url_HQ')
print('url: ', url)
file_path = root_path + "/" + file_name
print('file_path: ', file_path)
# 判断文件是否存在,存在就直接返回
if os.path.exists(file_path):
print('文件已存在,直接返回')
return FileResponse(path=file_path, media_type='video/mp4', filename=file_name)
else:
if platform == 'douyin':
async with aiohttp.ClientSession() as session:
async with session.get(url=url, headers=headers, allow_redirects=False) as response:
r = response.headers
cdn_url = r.get('location')
async with session.get(url=cdn_url) as res:
r = await res.content.read()
elif platform == 'tiktok':
async with aiohttp.ClientSession() as session:
async with session.get(url=url, headers=headers) as res:
r = await res.content.read()
with open(file_path, 'wb') as f:
f.write(r)
return FileResponse(path=file_path, media_type='video/mp4', filename=file_name)
elif url_type == 'image':
url = data.get('image_data').get('no_watermark_image_list') if not watermark else data.get(
'image_data').get('watermark_image_list')
print('url: ', url)
zip_file_name = file_name_prefix + platform + '_' + aweme_id + '_images.zip' if not watermark else file_name_prefix + platform + '_' + aweme_id + '_images_watermark.zip'
zip_file_path = root_path + "/" + zip_file_name
print('zip_file_name: ', zip_file_name)
print('zip_file_path: ', zip_file_path)
# 判断文件是否存在,存在就直接返回、
if os.path.exists(zip_file_path):
print('文件已存在,直接返回')
return FileResponse(path=zip_file_path, media_type='zip', filename=zip_file_name)
file_path_list = []
for i in url:
async with aiohttp.ClientSession() as session:
async with session.get(url=i, headers=headers) as res:
content_type = res.headers.get('content-type')
file_format = content_type.split('/')[1]
r = await res.content.read()
index = int(url.index(i))
file_name = file_name_prefix + platform + '_' + aweme_id + '_' + str(
index + 1) + '.' + file_format if not watermark else \
file_name_prefix + platform + '_' + aweme_id + '_' + str(
index + 1) + '_watermark' + '.' + file_format
file_path = root_path + "/" + file_name
file_path_list.append(file_path)
print('file_path: ', file_path)
with open(file_path, 'wb') as f:
f.write(r)
if len(url) == len(file_path_list):
zip_file = zipfile.ZipFile(zip_file_path, 'w')
for f in file_path_list:
zip_file.write(os.path.join(f), f, zipfile.ZIP_DEFLATED)
zip_file.close()
return FileResponse(path=zip_file_path, media_type='zip', filename=zip_file_name)
else:
return ORJSONResponse(data) | ## 用途/Usage ### [中文] - 将[抖音|TikTok]链接作为参数提交至此端点,返回[视频|图片]文件下载请求。 ### [English] - Submit the [Douyin|TikTok] link as a parameter to this endpoint and return the [video|picture] file download request. # 参数/Parameter - url:str -> [Douyin|TikTok] [视频|图片] 链接/ [Douyin|TikTok] [video|image] link - prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix - watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark |
1,072 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
The provided code snippet includes necessary dependencies for implementing the `batch_download_file` function. Write a Python function `async def batch_download_file(url_list: str, prefix: bool = True)` to solve the following problem:
批量下载文件端点/Batch download file endpoint 未完工/Unfinished
Here is the function:
async def batch_download_file(url_list: str, prefix: bool = True):
"""
批量下载文件端点/Batch download file endpoint
未完工/Unfinished
"""
print('url_list: ', url_list)
return ORJSONResponse({"status": "failed",
"message": "嘿嘿嘿,这个功能还没做呢,等我有空再做吧/Hehehe, this function hasn't been done yet, I'll do it when I have time"}) | 批量下载文件端点/Batch download file endpoint 未完工/Unfinished |
1,073 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
config.read('config.ini', encoding='utf-8')int(config["Web_API"]["Port"]
if config["Web_API"]["Allow_Logs"] == "True":
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
total_time = float(format(time.time() - start_time, '.4f'))
file_name = "API_logs.json"
# 写入日志内容
with open(file_name, "a", encoding="utf-8") as f:
data = {
"time": time_now,
"endpoint": f'/{endpoint}/',
"total_time": total_time,
"input_data": input_data,
"error_data": error_data if error_data else "No error"
}
f.write(json.dumps(data, ensure_ascii=False) + ",\n")
print('日志记录成功!')
return 1
else:
print('日志记录已关闭!')
return
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
The provided code snippet includes necessary dependencies for implementing the `download_douyin_discover` function. Write a Python function `async def download_douyin_discover(modal_id: str, prefix: bool = True, watermark: bool = False)` to solve the following problem:
## 用途/Usage ### [中文] - 将抖音域名改为当前服务器域名即可调用此端点,返回[视频|图片]文件下载请求。 - 例如原链接:https://www.douyin.com/discover?modal_id=1234567890123456789 改成 https://api.douyin.wtf/discover?modal_id=1234567890123456789 即可调用此端点。 ### [English] - Change the Douyin domain name to the current server domain name to call this endpoint and return the video file download request. - For example, the original link: https://douyin.com/discover?modal_id=1234567890123456789 becomes https://api.douyin.wtf/discover?modal_id=1234567890123456789 to call this endpoint. # 参数/Parameter - modal_id: str -> 抖音视频ID/Douyin video ID - prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix - watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark
Here is the function:
async def download_douyin_discover(modal_id: str, prefix: bool = True, watermark: bool = False):
"""
## 用途/Usage
### [中文]
- 将抖音域名改为当前服务器域名即可调用此端点,返回[视频|图片]文件下载请求。
- 例如原链接:https://www.douyin.com/discover?modal_id=1234567890123456789 改成 https://api.douyin.wtf/discover?modal_id=1234567890123456789 即可调用此端点。
### [English]
- Change the Douyin domain name to the current server domain name to call this endpoint and return the video file download request.
- For example, the original link: https://douyin.com/discover?modal_id=1234567890123456789 becomes https://api.douyin.wtf/discover?modal_id=1234567890123456789 to call this endpoint.
# 参数/Parameter
- modal_id: str -> 抖音视频ID/Douyin video ID
- prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix
- watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark
"""
# 是否开启此端点/Whether to enable this endpoint
if config["Web_API"]["Download_Switch"] != "True":
return ORJSONResponse({"status": "endpoint closed",
"message": "此端点已关闭请在配置文件中开启/This endpoint is closed, please enable it in the configuration file"})
video_url = f"https://www.douyin.com/discover?modal_id={modal_id}"
download_url = f"{domain}/download?url={video_url}&prefix={prefix}&watermark={watermark}"
return RedirectResponse(download_url) | ## 用途/Usage ### [中文] - 将抖音域名改为当前服务器域名即可调用此端点,返回[视频|图片]文件下载请求。 - 例如原链接:https://www.douyin.com/discover?modal_id=1234567890123456789 改成 https://api.douyin.wtf/discover?modal_id=1234567890123456789 即可调用此端点。 ### [English] - Change the Douyin domain name to the current server domain name to call this endpoint and return the video file download request. - For example, the original link: https://douyin.com/discover?modal_id=1234567890123456789 becomes https://api.douyin.wtf/discover?modal_id=1234567890123456789 to call this endpoint. # 参数/Parameter - modal_id: str -> 抖音视频ID/Douyin video ID - prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix - watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark |
1,074 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
config.read('config.ini', encoding='utf-8')int(config["Web_API"]["Port"]
if config["Web_API"]["Allow_Logs"] == "True":
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
total_time = float(format(time.time() - start_time, '.4f'))
file_name = "API_logs.json"
# 写入日志内容
with open(file_name, "a", encoding="utf-8") as f:
data = {
"time": time_now,
"endpoint": f'/{endpoint}/',
"total_time": total_time,
"input_data": input_data,
"error_data": error_data if error_data else "No error"
}
f.write(json.dumps(data, ensure_ascii=False) + ",\n")
print('日志记录成功!')
return 1
else:
print('日志记录已关闭!')
return
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
return ORJSONResponse(data
The provided code snippet includes necessary dependencies for implementing the `download_tiktok_video` function. Write a Python function `async def download_tiktok_video(user_id: str, aweme_id: str, prefix: bool = True, watermark: bool = False)` to solve the following problem:
## 用途/Usage ### [中文] - 将TikTok域名改为当前服务器域名即可调用此端点,返回[视频|图片]文件下载请求。 - 例如原链接:https://www.tiktok.com/@evil0ctal/video/7156033831819037994 改成 https://api.douyin.wtf/@evil0ctal/video/7156033831819037994 即可调用此端点。 ### [English] - Change the TikTok domain name to the current server domain name to call this endpoint and return the video file download request. - For example, the original link: https://www.tiktok.com/@evil0ctal/video/7156033831819037994 becomes https://api.douyin.wtf/@evil0ctal/video/7156033831819037994 to call this endpoint. # 参数/Parameter - user_id: str -> TikTok用户ID/TikTok user ID - aweme_id: str -> TikTok视频ID/TikTok video ID - prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix - watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark
Here is the function:
async def download_tiktok_video(user_id: str, aweme_id: str, prefix: bool = True, watermark: bool = False):
"""
## 用途/Usage
### [中文]
- 将TikTok域名改为当前服务器域名即可调用此端点,返回[视频|图片]文件下载请求。
- 例如原链接:https://www.tiktok.com/@evil0ctal/video/7156033831819037994 改成 https://api.douyin.wtf/@evil0ctal/video/7156033831819037994 即可调用此端点。
### [English]
- Change the TikTok domain name to the current server domain name to call this endpoint and return the video file download request.
- For example, the original link: https://www.tiktok.com/@evil0ctal/video/7156033831819037994 becomes https://api.douyin.wtf/@evil0ctal/video/7156033831819037994 to call this endpoint.
# 参数/Parameter
- user_id: str -> TikTok用户ID/TikTok user ID
- aweme_id: str -> TikTok视频ID/TikTok video ID
- prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix
- watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark
"""
# 是否开启此端点/Whether to enable this endpoint
if config["Web_API"]["Download_Switch"] != "True":
return ORJSONResponse({"status": "endpoint closed",
"message": "此端点已关闭请在配置文件中开启/This endpoint is closed, please enable it in the configuration file"})
video_url = f"https://www.tiktok.com/{user_id}/video/{aweme_id}"
download_url = f"{domain}/download?url={video_url}&prefix={prefix}&watermark={watermark}"
return RedirectResponse(download_url) | ## 用途/Usage ### [中文] - 将TikTok域名改为当前服务器域名即可调用此端点,返回[视频|图片]文件下载请求。 - 例如原链接:https://www.tiktok.com/@evil0ctal/video/7156033831819037994 改成 https://api.douyin.wtf/@evil0ctal/video/7156033831819037994 即可调用此端点。 ### [English] - Change the TikTok domain name to the current server domain name to call this endpoint and return the video file download request. - For example, the original link: https://www.tiktok.com/@evil0ctal/video/7156033831819037994 becomes https://api.douyin.wtf/@evil0ctal/video/7156033831819037994 to call this endpoint. # 参数/Parameter - user_id: str -> TikTok用户ID/TikTok user ID - aweme_id: str -> TikTok视频ID/TikTok video ID - prefix: bool -> [True/False] 是否添加前缀/Whether to add a prefix - watermark: bool -> [True/False] 是否添加水印/Whether to add a watermark |
1,075 | import json
import aiohttp
import uvicorn
import zipfile
import threading
import configparser
from fastapi import FastAPI, Request
from fastapi.responses import ORJSONResponse, FileResponse
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from scraper import Scraper
config.read('config.ini', encoding='utf-8')int(config["Web_API"]["Port"]
if config["Web_API"]["Allow_Logs"] == "True":
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
total_time = float(format(time.time() - start_time, '.4f'))
file_name = "API_logs.json"
# 写入日志内容
with open(file_name, "a", encoding="utf-8") as f:
data = {
"time": time_now,
"endpoint": f'/{endpoint}/',
"total_time": total_time,
"input_data": input_data,
"error_data": error_data if error_data else "No error"
}
f.write(json.dumps(data, ensure_ascii=False) + ",\n")
print('日志记录成功!')
return 1
else:
print('日志记录已关闭!')
return
def get_tiktok_profile_liked_videos(tikhub_token: str, tiktok_video_url: str = None):
"""
## 用途/Usage
- 获取抖音用户主页点赞视频数据,参数是用户链接|ID
- Get the data of a Douyin user profile liked video, the parameter is the user link or ID.
## 参数/Parameter
tikhub_token: https://api.tikhub.io/#/Authorization/login_for_access_token_user_login_post
"""
response = await api.get_tiktok_user_profile_liked_videos(tikhub_token=tikhub_token, tiktok_video_url=tiktok_video_url)
return response
def cleanup_path():
while True:
root_path = config["Web_API"]["Download_Path"]
timer = int(config["Web_API"]["Download_Path_Clean_Timer"])
# 查看目录是否存在,不存在就跳过
if os.path.exists(root_path):
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print(f"{time_now}: Cleaning up the download folder...")
for file in os.listdir("./download"):
file_path = os.path.join("./download", file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
print(e)
else:
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print(f"{time_now}: The download folder does not exist, skipping...")
time.sleep(timer)
async def startup_event():
# 创建一个清理下载目录定时器线程并启动
# Create a timer thread to clean up the download directory and start it
download_path_clean_switches = True if config["Web_API"]["Download_Path_Clean_Switch"] == "True" else False
if download_path_clean_switches:
# 启动清理线程/Start cleaning thread
thread_1 = threading.Thread(target=cleanup_path)
thread_1.start() | null |
1,076 |
def valid_check(input_data: str) -> str or None:
# 检索出所有链接并返回列表/Retrieve all links and return a list
url_list = find_url(input_data)
# 总共找到的链接数量/Total number of links found
total_urls = len(url_list)
if total_urls == 0:
return t('没有检测到有效的链接,请检查输入的内容是否正确。',
'No valid link detected, please check if the input content is correct.')
else:
# 最大接受提交URL的数量/Maximum number of URLs accepted
max_urls = config['Web_APP']['Max_Take_URLs']
if total_urls > int(max_urls):
warn_info = t('URL数量过多,只会处理前{}个URL。'.format(max_urls),
'Too many URLs, only the first {} URLs will be processed.'.format(max_urls))
return warn_info | null |
1,077 |
def error_do(reason: str, value: str) -> None:
# 输出一个毫无用处的信息
put_html("<hr>")
put_error(
t("发生了了意料之外的错误,输入值已被记录。", "An unexpected error occurred, the input value has been recorded."))
put_html('<h3>⚠{}</h3>'.format(t('详情', 'Details')))
put_table([
[t('原因', 'reason'), t('输入值', 'input value')],
[reason, value]])
put_markdown(t('可能的原因:', 'Possible reasons:'))
put_markdown(t('服务器可能被目标主机的防火墙限流(稍等片刻后再次尝试)',
'The server may be limited by the target host firewall (try again after a while)'))
put_markdown(t('输入了错误的链接(API-V1暂不支持主页链接解析)',
'Entered the wrong link (the home page link is not supported for parsing with API-V1)'))
put_markdown(
t('如果需要解析个人主页,请使用TikHub_API', 'If you need to parse the personal homepage, please use TikHub_API'))
put_markdown(t('TikHub_API 文档: [https://api.tikhub.io/docs](https://api.tikhub.io/docs)',
'TikHub_API Documentation: [https://api.tikhub.io/docs](https://api.tikhub.io/docs)'))
put_markdown(t('该视频已经被删除或屏蔽(你看的都是些啥(⊙_⊙)?)',
'The video has been deleted or blocked (what are you watching (⊙_⊙)?)'))
put_markdown(t('其他原因(请联系作者)', 'Other reasons (please contact the author)'))
put_markdown(t('你可以在右上角的关于菜单中查看本站错误日志。',
'You can view the error log of this site in the about menu in the upper right corner.'))
put_markdown('[{}](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)'.format(
t('点击此处在GitHub上进行反馈', 'Click here to give feedback on GitHub')))
put_html("<hr>")
if config['Web_APP']['Allow_Logs'] == 'True':
# 如果douyin或tiktok在输入值中,则记录到日志文件/If douyin or tiktok is in the input value, record it to the log file
if 'douyin' in value or 'tiktok' in value:
# 将错误记录在logs.txt中
error_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print(f"{error_date}: 正在记录错误信息...")
with open('logs.txt', 'a') as f:
f.write(error_date + ":\n" + str(reason) + '\n' + "Input value: " + value + '\n')
else:
print(t('输入值中没有douyin或tiktok,不记录到日志文件中',
'No douyin or tiktok in the input value, not recorded to the log file')) | null |
1,078 |
def ios_pop_window():
with popup(t("iOS快捷指令", "iOS Shortcut")):
version = config["Web_API"]["iOS_Shortcut_Version"]
update = config["Web_API"]['iOS_Shortcut_Update_Time']
link = config["Web_API"]['iOS_Shortcut_Link']
link_en = config["Web_API"]['iOS_Shortcut_Link_EN']
note = config["Web_API"]['iOS_Shortcut_Update_Note']
note_en = config["Web_API"]['iOS_Shortcut_Update_Note_EN']
put_markdown(t('#### 📢 快捷指令介绍:', '#### 📢 Shortcut Introduction:'))
put_markdown(
t('快捷指令运行在iOS平台,本快捷指令可以快速调用本项目的公共API将抖音或TikTok的视频或图集下载到你的手机相册中,暂时只支持单个链接进行下载。',
'The shortcut runs on the iOS platform, and this shortcut can quickly call the public API of this project to download the video or album of Douyin or TikTok to your phone album. It only supports single link download for now.'))
put_markdown(t('#### 📲 使用方法 ①:', '#### 📲 Operation method ①:'))
put_markdown(t('在抖音或TikTok的APP内,浏览你想要无水印保存的视频或图集。',
'The shortcut needs to be used in the Douyin or TikTok app, browse the video or album you want to save without watermark.'))
put_markdown(t('然后点击右下角分享按钮,选择更多,然后下拉找到 "抖音TikTok无水印下载" 这个选项。',
'Then click the share button in the lower right corner, select more, and then scroll down to find the "Douyin TikTok No Watermark Download" option.'))
put_markdown(t('如遇到通知询问是否允许快捷指令访问xxxx (域名或服务器),需要点击允许才可以正常使用。',
'If you are asked whether to allow the shortcut to access xxxx (domain name or server), you need to click Allow to use it normally.'))
put_markdown(t('该快捷指令会在你相册创建一个新的相薄方便你浏览保存的内容。',
'The shortcut will create a new album in your photo album to help you browse the saved content.'))
put_markdown(t('#### 📲 使用方法 ②:', '#### 📲 Operation method ②:'))
put_markdown(t('在抖音或TikTok的视频下方点击分享,然后点击复制链接,然后去快捷指令APP中运行该快捷指令。',
'Click share below the video of Douyin or TikTok, then click to copy the link, then go to the shortcut command APP to run the shortcut command.'))
put_markdown(t('如果弹窗询问是否允许读取剪切板请同意,随后快捷指令将链接内容保存至相册中。',
'if the pop-up window asks whether to allow reading the clipboard, please agree, and then the shortcut command will save the link content to the album middle.'))
put_html('<hr>')
put_text(t(f"最新快捷指令版本: {version}", f"Latest shortcut version: {version}"))
put_text(t(f"快捷指令更新时间: {update}", f"Shortcut update time: {update}"))
put_text(t(f"快捷指令更新内容: {note}", f"Shortcut update content: {note_en}"))
put_link("[点击获取快捷指令 - 中文]", link, new_window=True)
put_html("<br>")
put_link("[Click get Shortcut - English]", link_en, new_window=True) | null |
1,079 |
def api_document_pop_window():
with popup(t("API文档", "API Document")):
put_markdown(t("💾TikHub_API文档", "💾TikHub_API Document"))
put_markdown(t('TikHub_API 支持抖音和TikTok的更多接口, 如主页解析,视频解析,视频评论解析,个人点赞列表解析等...',
'TikHub_API supports more interfaces of Douyin and TikTok, such as home page parsing, video parsing, video comment parsing, personal like list parsing, etc...'))
put_link('[TikHub_API Docs]', 'https://api.tikhub.io/docs', new_window=True)
put_html('<hr>')
put_markdown(t("💽API-V1文档", "💽API-V1 Document"))
put_markdown(t("API-V1 支持抖音和TikTok的单一视频解析,具体请查看接口文档。",
"API-V1 supports single video parsing of Douyin and TikTok. For details, please refer to the API documentation."))
put_link('[API-V1 Docs]', 'https://api.douyin.wtf/docs', new_window=True) | null |
1,080 |
def log_popup_window():
with popup(t('错误日志', 'Error Log')):
put_html('<h3>⚠️{}</h3>'.format('关于解析失败可能的原因', 'About the possible reasons for parsing failure'))
put_markdown(t('服务器可能被目标主机的防火墙限流(稍等片刻后再次尝试)',
'The server may be limited by the target host firewall (try again after a while)'))
put_markdown(t('输入了错误的链接(API-V1暂不支持主页链接解析)',
'Entered the wrong link (the home page link is not supported for parsing with API-V1)'))
put_markdown(
t('如果需要解析个人主页,请使用TikHub_API', 'If you need to parse the personal homepage, please use TikHub_API'))
put_markdown(t('TikHub_API 文档: [https://api.tikhub.io/docs](https://api.tikhub.io/docs)',
'TikHub_API Documentation: [https://api.tikhub.io/docs](https://api.tikhub.io/docs)'))
put_markdown(t('该视频已经被删除或屏蔽(你看的都是些啥(⊙_⊙)?)',
'The video has been deleted or blocked (what are you watching (⊙_⊙)?)'))
put_markdown(t('[点击此处在GitHub上进行反馈](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)',
'[Click here to feedback on GitHub](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)'))
put_html('<hr>')
# 判断日志文件是否存在
if os.path.exists('logs.txt'):
put_text(t('点击logs.txt可下载日志:', 'Click logs.txt to download the log:'))
content = open(r'./logs.txt', 'rb').read()
put_file('logs.txt', content=content)
with open('./logs.txt', 'r') as f:
content = f.read()
put_text(str(content))
else:
put_text(t('日志文件不存在,请等发生错误时再回来看看。',
'The log file does not exist, please come back and take a look when an error occurs.')) | null |
1,081 |
def about_popup_window():
with popup(t('更多信息', 'More Information')):
put_html('<h3>👀{}</h3>'.format(t('访问记录', 'Visit Record')))
put_image('https://views.whatilearened.today/views/github/evil0ctal/TikTokDownload_PyWebIO.svg',
title='访问记录')
put_html('<hr>')
put_html('<h3>⭐Github</h3>')
put_markdown('[Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API)')
put_html('<hr>')
put_html('<h3>🎯{}</h3>'.format(t('反馈', 'Feedback')))
put_markdown('{}:[issues](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)'.format(
t('Bug反馈', 'Bug Feedback')))
put_html('<hr>')
put_html('<h3>💖WeChat</h3>')
put_markdown('WeChat:[Evil0ctal](https://mycyberpunk.com/)')
put_html('<hr>') | null |
1,082 | import matplotlib.pyplot as plt
import pandas as pd
import os
import json
from matplotlib.ticker import MaxNLocator
import matplotlib.font_manager as fm
from lab_gpt4_call import send_chat_request,send_chat_request_Azure,send_official_call
import re
from tool import *
import tiktoken
import concurrent.futures
import datetime
from PIL import Image
from io import BytesIO
import queue
import datetime
from threading import Thread
import openai
def check_RPM(run_time_list, new_time, max_RPM=1):
# Check if there are already 3 timestamps in the run_time_list, with a maximum of 3 accesses per minute.
# False means no rest is needed, True means rest is needed.
if len(run_time_list) < 3:
run_time_list.append(new_time)
return 0
else:
if (new_time - run_time_list[0]).seconds < max_RPM:
# Calculate the required rest time.
sleep_time = 60 - (new_time - run_time_list[0]).seconds
print('sleep_time:', sleep_time)
run_time_list.pop(0)
run_time_list.append(new_time)
return sleep_time
else:
run_time_list.pop(0)
run_time_list.append(new_time)
return 0 | null |
1,083 | import matplotlib.pyplot as plt
import pandas as pd
import os
import json
from matplotlib.ticker import MaxNLocator
import matplotlib.font_manager as fm
from lab_gpt4_call import send_chat_request,send_chat_request_Azure,send_official_call
import re
from tool import *
import tiktoken
import concurrent.futures
import datetime
from PIL import Image
from io import BytesIO
import queue
import datetime
from threading import Thread
import openai
class MyThread(Thread):
def __init__(self, target, args):
super(MyThread, self).__init__()
self.func = target
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
return self.result
intermediate_results = queue.Queue()
def add_to_queue(intermediate_result):
intermediate_results.put(f"After planing, the intermediate result is {intermediate_result}")
def run(instruction, add_to_queue=None, send_chat_request_Azure = send_official_call, openai_key = '', api_base='', engine=''):
output_text = ''
################################# Step-1:Task select ###########################################
current_time = datetime.datetime.now()
formatted_time = current_time.strftime("%Y-%m-%d")
# If the time has not exceeded 3 PM, use yesterday's data.
if current_time.hour < 15:
formatted_time = (current_time - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
print('===============================Intent Detecting===========================================')
with open('./prompt_lib/prompt_intent_detection.json', 'r') as f:
prompt_task_dict = json.load(f)
prompt_intent_detection = ''
for key, value in prompt_task_dict.items():
prompt_intent_detection = prompt_intent_detection + key + ": " + value+ '\n\n'
prompt_intent_detection = prompt_intent_detection + '\n\n' + 'Instruction:' + '今天的日期是'+ formatted_time +', '+ instruction + ' ###New Instruction: '
# Record the running time.
# current_time = datetime.datetime.now()
# sleep_time = check_RPM(run_time, current_time)
# if sleep_time > 0:
# time.sleep(sleep_time)
response = send_chat_request_Azure(prompt_intent_detection, openai_key=openai_key, api_base=api_base, engine=engine)
new_instruction = response
print('new_instruction:', new_instruction)
output_text = output_text + '\n======Intent Detecting Stage=====\n\n'
output_text = output_text + new_instruction +'\n\n'
if add_to_queue is not None:
add_to_queue(output_text)
event_happen = True
print('===============================Task Planing===========================================')
output_text= output_text + '=====Task Planing Stage=====\n\n'
with open('./prompt_lib/prompt_task.json', 'r') as f:
prompt_task_dict = json.load(f)
prompt_task = ''
for key, value in prompt_task_dict.items():
prompt_task = prompt_task + key + ": " + value+ '\n\n'
prompt_task = prompt_task + '\n\n' + 'Instruction:' + new_instruction + ' ###Plan:'
# current_time = datetime.datetime.now()
# sleep_time = check_RPM(run_time, current_time)
# if sleep_time > 0:
# time.sleep(sleep_time)
response = send_chat_request_Azure(prompt_task, openai_key=openai_key,api_base=api_base,engine=engine)
task_select = response
pattern = r"(task\d+=)(\{[^}]*\})"
matches = re.findall(pattern, task_select)
task_plan = {}
for task in matches:
task_step, task_select = task
task_select = task_select.replace("'", "\"") # Replace single quotes with double quotes.
task_select = json.loads(task_select)
task_name = list(task_select.keys())[0]
task_instruction = list(task_select.values())[0]
task_plan[task_name] = task_instruction
# task_plan
for key, value in task_plan.items():
print(key, ':', value)
output_text = output_text + key + ': ' + str(value) + '\n'
output_text = output_text +'\n'
if add_to_queue is not None:
add_to_queue(output_text)
################################# Step-2:Tool select and use ###########################################
print('===============================Tool select and using Stage===========================================')
output_text = output_text + '======Tool select and using Stage======\n\n'
# Read the task_select JSON file name.
task_name = list(task_plan.keys())[0].split('_task')[0]
task_instruction = list(task_plan.values())[0]
tool_lib = './tool_lib/' + 'tool_' + task_name + '.json'
tool_prompt = './prompt_lib/' + 'prompt_' + task_name + '.json'
prompt_flat = load_tool_and_prompt(tool_lib, tool_prompt)
prompt_flat = prompt_flat + '\n\n' +'Instruction :'+ task_instruction+ ' ###Function Call'
#response = "step1={\n \"arg1\": [\"贵州茅台\"],\n \"function1\": \"get_stock_code\",\n \"output1\": \"result1\"\n},step2={\n \"arg1\": [\"result1\",\"20180123\",\"20190313\",\"daily\"],\n \"function1\": \"get_stock_prices_data\",\n \"output1\": \"result2\"\n},step3={\n \"arg1\": [\"result2\",\"close\"],\n \"function1\": \"calculate_stock_index\",\n \"output1\": \"result3\"\n}, ###Output:{\n \"贵州茅台在2018年1月23日到2019年3月13的每日收盘价格的时序表格\": \"result3\",\n}"
# current_time = datetime.datetime.now()
# sleep_time = check_RPM(run_time, current_time)
# if sleep_time > 0:
# time.sleep(sleep_time)
response = send_chat_request_Azure(prompt_flat, openai_key=openai_key,api_base=api_base, engine=engine)
#response = "Function Call:step1={\n \"arg1\": [\"五粮液\"],\n \"function1\": \"get_stock_code\",\n \"output1\": \"result1\",\n \"arg2\": [\"泸州老窖\"],\n \"function2\": \"get_stock_code\",\n \"output2\": \"result2\"\n},step2={\n \"arg1\": [\"result1\",\"20190101\",\"20220630\",\"daily\"],\n \"function1\": \"get_stock_prices_data\",\n \"output1\": \"result3\",\n \"arg2\": [\"result2\",\"20190101\",\"20220630\",\"daily\"],\n \"function2\": \"get_stock_prices_data\",\n \"output2\": \"result4\"\n},step3={\n \"arg1\": [\"result3\",\"Cumulative_Earnings_Rate\"],\n \"function1\": \"calculate_stock_index\",\n \"output1\": \"result5\",\n \"arg2\": [\"result4\",\"Cumulative_Earnings_Rate\"],\n \"function2\": \"calculate_stock_index\",\n \"output2\": \"result6\"\n}, ###Output:{\n \"五粮液在2019年1月1日到2022年06月30的每日收盘价格时序表格\": \"result5\",\n \"泸州老窖在2019年1月1日到2022年06月30的每日收盘价格时序表格\": \"result6\"\n}"
call_steps, _ = response.split('###')
pattern = r"(step\d+=)(\{[^}]*\})"
matches = re.findall(pattern, call_steps)
result_buffer = {} # The stored format is as follows: {'result1': (000001.SH, 'Stock code of China Ping An'), 'result2': (df2, 'Stock data of China Ping An from January to June 2021')}.
output_buffer = [] # Store the variable names [result5, result6] that will be passed as the final output to the next task.
# print(task_output)
#
for match in matches:
step, content = match
content = content.replace("'", "\"") # Replace single quotes with double quotes.
print('==================')
print("\n\nstep:", step)
print('content:',content)
call_dict = json.loads(content)
print('It has parallel steps:', len(call_dict) / 4)
output_text = output_text + step + ': ' + str(call_dict) + '\n\n'
# Execute the following code in parallel using multiple processes.
with concurrent.futures.ThreadPoolExecutor() as executor:
# Submit tasks to thread pool
futures = {executor.submit(parse_and_exe, call_dict, result_buffer, str(parallel_step))
for parallel_step in range(1, int(len(call_dict) / 4) + 1)}
# Collect results as they become available
for idx, future in enumerate(concurrent.futures.as_completed(futures)):
# Handle possible exceptions
try:
result = future.result()
# Print the current parallel step number.
print('parallel step:', idx+1)
# print(list(result[1].keys())[0])
# print(list(result[1].values())[0])
except Exception as exc:
print(f'Generated an exception: {exc}')
if step == matches[-1][0]:
# Current task's final step. Save the output of the final step.
for parallel_step in range(1, int(len(call_dict) / 4) + 1):
output_buffer.append(call_dict['output' + str(parallel_step)])
output_text = output_text + '\n'
if add_to_queue is not None:
add_to_queue(output_text)
################################# Step-3:visualization ###########################################
print('===============================Visualization Stage===========================================')
output_text = output_text + '======Visualization Stage====\n\n'
task_name = list(task_plan.keys())[1].split('_task')[0] #visualization_task
#task_name = 'visualization'
task_instruction = list(task_plan.values())[1] #''
tool_lib = './tool_lib/' + 'tool_' + task_name + '.json'
tool_prompt = './prompt_lib/' + 'prompt_' + task_name + '.json'
result_buffer_viz={}
Previous_result = {}
for output_name in output_buffer:
rename = 'input'+ str(output_buffer.index(output_name)+1)
Previous_result[rename] = result_buffer[output_name][1]
result_buffer_viz[rename] = result_buffer[output_name]
prompt_flat = load_tool_and_prompt(tool_lib, tool_prompt)
prompt_flat = prompt_flat + '\n\n' +'Instruction: '+ task_instruction + ', Previous_result: '+ str(Previous_result) + ' ###Function Call'
# current_time = datetime.datetime.now()
# sleep_time = check_RPM(run_time, current_time)
# if sleep_time > 0:
# time.sleep(sleep_time)
response = send_chat_request_Azure(prompt_flat, openai_key=openai_key, api_base=api_base, engine=engine)
call_steps, _ = response.split('###')
pattern = r"(step\d+=)(\{[^}]*\})"
matches = re.findall(pattern, call_steps)
for match in matches:
step, content = match
content = content.replace("'", "\"") # Replace single quotes with double quotes.
print('==================')
print("\n\nstep:", step)
print('content:',content)
call_dict = json.loads(content)
print('It has parallel steps:', len(call_dict) / 4)
result_buffer_viz = parse_and_exe(call_dict, result_buffer_viz, parallel_step = '' )
output_text = output_text + step + ': ' + str(call_dict) + '\n\n'
if add_to_queue is not None:
add_to_queue(output_text)
finally_output = list(result_buffer_viz.values()) # plt.Axes
#
df = pd.DataFrame()
str_out = output_text + 'Finally result: '
for ax in finally_output:
if isinstance(ax[0], plt.Axes): # If the output is plt.Axes, display it.
plt.grid()
#plt.show()
str_out = str_out + ax[1]+ ':' + 'plt.Axes' + '\n\n'
#
elif isinstance(ax[0], pd.DataFrame):
df = ax[0]
str_out = str_out + ax[1]+ ':' + 'pd.DataFrame' + '\n\n'
else:
str_out = str_out + str(ax[1])+ ':' + str(ax[0]) + '\n\n'
#
print('===============================Summary Stage===========================================')
output_prompt = "请用第一人称总结一下整个任务规划和解决过程,并且输出结果,用[Task]表示每个规划任务,用\{function\}表示每个任务里调用的函数." + \
"示例1:###我用将您的问题拆分成两个任务,首先第一个任务[stock_task],我依次获取五粮液和贵州茅台从2013年5月20日到2023年5月20日的净资产回报率roe的时序数据. \n然后第二个任务[visualization_task],我用折线图绘制五粮液和贵州茅台从2013年5月20日到2023年5月20日的净资产回报率,并计算它们的平均值和中位数. \n\n在第一个任务中我分别使用了2个工具函数\{get_stock_code\},\{get_Financial_data_from_time_range\}获取到两只股票的roe数据,在第二个任务里我们使用折线图\{plot_stock_data\}工具函数来绘制他们的roe十年走势,最后并计算了两只股票十年ROE的中位数\{output_median_col\}和均值\{output_mean_col\}.\n\n最后贵州茅台的ROE的均值和中位数是\{\},{},五粮液的ROE的均值和中位数是\{\},\{\}###" + \
"示例2:###我用将您的问题拆分成两个任务,首先第一个任务[stock_task],我依次获取20230101到20230520这段时间北向资金每日净流入和每日累计流入时序数据,第二个任务是[visualization_task],因此我在同一张图里同时绘制北向资金20230101到20230520的每日净流入柱状图和每日累计流入的折线图 \n\n为了完成第一个任务中我分别使用了2个工具函数\{get_north_south_money\},\{calculate_stock_index\}分别获取到北上资金的每日净流入量和每日的累计净流入量,第二个任务里我们使用折线图\{plot_stock_data\}绘制来两个指标的变化走势.\n\n最后我们给您提供了包含两个指标的折线图和数据表格." + \
"示例3:###我用将您的问题拆分成两个任务,首先第一个任务[economic_task],我爬取了上市公司贵州茅台和其主营业务介绍信息. \n然后第二个任务[visualization_task],我用表格打印贵州茅台及其相关信息. \n\n在第一个任务中我分别使用了1个工具函数\{get_company_info\} 获取到贵州茅台的公司信息,在第二个任务里我们使用折线图\{print_save_table\}工具函数来输出表格.\n"
output_result = send_chat_request_Azure(output_prompt + str_out + '###', openai_key=openai_key, api_base=api_base,engine=engine)
print(output_result)
buf = BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
#
#
image = Image.open(buf)
return output_text, image, output_result, df
def send_chat_request_Azure(query, openai_key, api_base, engine):
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
openai.api_base = api_base
openai.api_key = openai_key
max_token_num = 8000 - num_tokens_from_string(query,'cl100k_base')
#
openai.api_request_timeout = 1 # 设置超时时间为10秒
response = openai.ChatCompletion.create(
engine = engine,
messages=[{"role": "system", "content": "You are an useful AI assistant that helps people solve the problem step by step."},
{"role": "user", "content": "" + query}],
temperature=0.0,
max_tokens=max_token_num,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None)
data_res = response['choices'][0]['message']['content']
return data_res
def send_official_call(query, openai_key='', api_base='', engine=''):
start = time.time()
# 转换成可阅读的时间
start = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))
print(start)
openai.api_key = openai_key
response = openai.ChatCompletion.create(
# engine="gpt35",
model="gpt-3.5-turbo",
messages = [{"role": "system", "content": "You are an useful AI assistant that helps people solve the problem step by step."},
{"role": "user", "content": "" + query}],
#max_tokens=max_token_num,
temperature=0.1,
top_p=0.1,
frequency_penalty=0,
presence_penalty=0,
stop=None)
data_res = response['choices'][0]['message']['content']
return data_res
def gradio_interface(query, openai_key, openai_key_azure, api_base,engine):
# Create a new thread to run the function.
if openai_key.startswith('sk') and openai_key_azure == '':
print('send_official_call')
thread = MyThread(target=run, args=(query, add_to_queue, send_official_call, openai_key))
elif openai_key =='' and len(openai_key_azure)>0:
print('send_chat_request_Azure')
thread = MyThread(target=run, args=(query, add_to_queue, send_chat_request_Azure, openai_key_azure, api_base, engine))
thread.start()
placeholder_image = np.zeros((100, 100, 3), dtype=np.uint8) # Create a placeholder image.
placeholder_dataframe = pd.DataFrame() #
# Wait for the result of the calculate function and display the intermediate results simultaneously.
while thread.is_alive():
while not intermediate_results.empty():
yield intermediate_results.get(), placeholder_image, 'Running' , placeholder_dataframe # Use the yield keyword to return intermediate results in real-time
time.sleep(0.1) # Avoid excessive resource consumption.
finally_text, img, output, df = thread.get_result()
yield finally_text, img, output, df
# Return the final result. | null |
1,084 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `get_last_year_date` function. Write a Python function `def get_last_year_date(date_str: str = '') -> str` to solve the following problem:
This function takes a date string in the format YYYYMMDD and returns the date string one year prior to the input date. Args: - date_str: string, the input date in the format YYYYMMDD Returns: - string, the date one year prior to the input date in the format YYYYMMDD
Here is the function:
def get_last_year_date(date_str: str = '') -> str:
"""
This function takes a date string in the format YYYYMMDD and returns the date string one year prior to the input date.
Args:
- date_str: string, the input date in the format YYYYMMDD
Returns:
- string, the date one year prior to the input date in the format YYYYMMDD
"""
dt = datetime.strptime(date_str, '%Y%m%d')
# To calculate the date one year ago
one_year_ago = dt - timedelta(days=365)
# To format the date as a string
one_year_ago_str = one_year_ago.strftime('%Y%m%d')
return one_year_ago_str | This function takes a date string in the format YYYYMMDD and returns the date string one year prior to the input date. Args: - date_str: string, the input date in the format YYYYMMDD Returns: - string, the date one year prior to the input date in the format YYYYMMDD |
1,085 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
def get_stock_code(stock_name: str) -> str:
# Retrieve the stock code of a given stock name. If we call get_stock_code('贵州茅台'), it will return '600519.SH'.
df = pd.read_csv('tushare_stock_basic_20230421210721.csv')
try:
code = df.loc[df.name==stock_name].ts_code.iloc[0]
return code
except:
return None
The provided code snippet includes necessary dependencies for implementing the `get_stock_technical_data` function. Write a Python function `def get_stock_technical_data(stock_name: str, start_date: str, end_date: str) -> pd.DataFrame` to solve the following problem:
Retrieves the daily technical data of a stock including macd turnover rate, volume, PE ratio, etc. Those technical indicators are usually plotted as subplots in a k-line chart. Args: stock_name (str): start_date (str): Start date "YYYYMMDD" end_date (str): End date "YYYYMMDD" Returns: pd.DataFrame: A DataFrame containing the technical data of the stock, including various indicators such as ts_code, trade_date, close, macd_dif, macd_dea, macd, kdj_k, kdj_d, kdj_j, rsi_6, rsi_12, boll_upper, boll_mid, boll_lower, cci, turnover_rate, turnover_rate_f, volume_ratio, pe_ttm(市盈率), pb(市净率), ps_ttm, dv_ttm, total_share, float_share, free_share, total_mv, circ_mv
Here is the function:
def get_stock_technical_data(stock_name: str, start_date: str, end_date: str) -> pd.DataFrame:
"""
Retrieves the daily technical data of a stock including macd turnover rate, volume, PE ratio, etc. Those technical indicators are usually plotted as subplots in a k-line chart.
Args:
stock_name (str):
start_date (str): Start date "YYYYMMDD"
end_date (str): End date "YYYYMMDD"
Returns:
pd.DataFrame: A DataFrame containing the technical data of the stock,
including various indicators such as ts_code, trade_date, close, macd_dif, macd_dea, macd, kdj_k, kdj_d, kdj_j, rsi_6, rsi_12, boll_upper, boll_mid, boll_lower, cci, turnover_rate, turnover_rate_f, volume_ratio, pe_ttm(市盈率), pb(市净率), ps_ttm, dv_ttm, total_share, float_share, free_share, total_mv, circ_mv
"""
# Technical factors
stock_code = get_stock_code(stock_name)
stock_data1 = pro.stk_factor(**{
"ts_code": stock_code,
"start_date": start_date,
"end_date": end_date,
"trade_date": '',
"limit": "",
"offset": ""
}, fields=[
"ts_code",
"trade_date",
"close",
"macd_dif",
"macd_dea",
"macd",
"kdj_k",
"kdj_d",
"kdj_j",
"rsi_6",
"rsi_12",
"rsi_24",
"boll_upper",
"boll_mid",
"boll_lower",
"cci"
])
# Trading factors
stock_data2 = pro.daily_basic(**{
"ts_code": stock_code,
"trade_date": '',
"start_date": start_date,
"end_date": end_date,
"limit": "",
"offset": ""
}, fields=[
"ts_code", #
"trade_date",
"turnover_rate",
"turnover_rate_f",
"volume_ratio",
"pe_ttm",
"pb",
"ps_ttm",
"dv_ttm",
"total_share",
"float_share",
"free_share",
"total_mv",
"circ_mv"
])
#
stock_data = pd.merge(stock_data1, stock_data2, on=['ts_code', 'trade_date'])
df = pd.read_csv('tushare_stock_basic_20230421210721.csv')
stock_data_merged = pd.merge(stock_data, df, on='ts_code')
stock_data_merged = stock_data_merged.sort_values(by='trade_date', ascending=True)
stock_data_merged.drop(['symbol'], axis=1, inplace=True)
stock_data_merged.rename(columns={'ts_code': 'stock_code'}, inplace=True)
stock_data_merged.rename(columns={'name': 'stock_name'}, inplace=True)
return stock_data_merged | Retrieves the daily technical data of a stock including macd turnover rate, volume, PE ratio, etc. Those technical indicators are usually plotted as subplots in a k-line chart. Args: stock_name (str): start_date (str): Start date "YYYYMMDD" end_date (str): End date "YYYYMMDD" Returns: pd.DataFrame: A DataFrame containing the technical data of the stock, including various indicators such as ts_code, trade_date, close, macd_dif, macd_dea, macd, kdj_k, kdj_d, kdj_j, rsi_6, rsi_12, boll_upper, boll_mid, boll_lower, cci, turnover_rate, turnover_rate_f, volume_ratio, pe_ttm(市盈率), pb(市净率), ps_ttm, dv_ttm, total_share, float_share, free_share, total_mv, circ_mv |
1,086 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
font_prop = fm.FontProperties(fname=font_path)
The provided code snippet includes necessary dependencies for implementing the `plot_stock_data` function. Write a Python function `def plot_stock_data(stock_data: pd.DataFrame, ax: Optional[plt.Axes] = None, figure_type: str = 'line', title_name: str ='') -> plt.Axes` to solve the following problem:
This function plots stock data. Args: - stock_data: pandas DataFrame, the stock data to plot. The DataFrame should contain three columns: - Column 1: trade date in 'YYYYMMDD' - Column 2: Stock name or code (string format) - Column 3: Index value (numeric format) The DataFrame can be time series data or cross-sectional data. If it is time-series data, the first column represents different trade time, the second column represents the same name. For cross-sectional data, the first column is the same, the second column contains different stocks. - ax: matplotlib Axes object, the axes to plot the data on - figure_type: the type of figure (either 'line' or 'bar') - title_name Returns: - matplotlib Axes object, the axes containing the plot
Here is the function:
def plot_stock_data(stock_data: pd.DataFrame, ax: Optional[plt.Axes] = None, figure_type: str = 'line', title_name: str ='') -> plt.Axes:
"""
This function plots stock data.
Args:
- stock_data: pandas DataFrame, the stock data to plot. The DataFrame should contain three columns:
- Column 1: trade date in 'YYYYMMDD'
- Column 2: Stock name or code (string format)
- Column 3: Index value (numeric format)
The DataFrame can be time series data or cross-sectional data. If it is time-series data, the first column represents different trade time, the second column represents the same name. For cross-sectional data, the first column is the same, the second column contains different stocks.
- ax: matplotlib Axes object, the axes to plot the data on
- figure_type: the type of figure (either 'line' or 'bar')
- title_name
Returns:
- matplotlib Axes object, the axes containing the plot
"""
index_name = stock_data.columns[2]
name_list = stock_data.iloc[:,1]
date_list = stock_data.iloc[:,0]
if name_list.nunique() == 1 and date_list.nunique() != 1:
# Time Series Data
unchanged_var = name_list.iloc[0] # stock name
x_dim = date_list # tradingdate
x_name = stock_data.columns[0]
elif name_list.nunique() != 1 and date_list.nunique() == 1:
# Cross-sectional Data
unchanged_var = date_list.iloc[0] # tradingdate
x_dim = name_list # stock name
x_name = stock_data.columns[1]
data_size = x_dim.shape[0]
start_x_dim, end_x_dim = x_dim.iloc[0], x_dim.iloc[-1]
start_y = stock_data.iloc[0, 2]
end_y = stock_data.iloc[-1, 2]
def generate_random_color():
r = random.randint(0, 255)/ 255.0
g = random.randint(0, 100)/ 255.0
b = random.randint(0, 255)/ 255.0
return (r, g, b)
color = generate_random_color()
if ax is None:
_, ax = plt.subplots()
if figure_type =='line':
#
ax.plot(x_dim, stock_data.iloc[:, 2], label = unchanged_var+'_' + index_name, color=color,linewidth=3)
#
plt.scatter(x_dim, stock_data.iloc[:, 2], color=color,s=3) # Add markers to the data points
#
#ax.scatter(x_dim, stock_data.iloc[:, 2],label = unchanged_var+'_' + index_name, color=color, s=3)
#
ax.annotate(unchanged_var + ':' + str(round(start_y, 2)) + ' @' + start_x_dim, xy=(start_x_dim, start_y),
xytext=(start_x_dim, start_y),
textcoords='data', fontsize=14,color=color, horizontalalignment='right',fontproperties=font_prop)
ax.annotate(unchanged_var + ':' + str(round(end_y, 2)) +' @' + end_x_dim, xy=(end_x_dim, end_y),
xytext=(end_x_dim, end_y),
textcoords='data', fontsize=14, color=color, horizontalalignment='left',fontproperties=font_prop)
elif figure_type == 'bar':
ax.bar(x_dim, stock_data.iloc[:, 2], label = unchanged_var + '_' + index_name, width=0.3, color=color)
ax.annotate(unchanged_var + ':' + str(round(start_y, 2)) + ' @' + start_x_dim, xy=(start_x_dim, start_y),
xytext=(start_x_dim, start_y),
textcoords='data', fontsize=14, color=color, horizontalalignment='right',fontproperties=font_prop)
ax.annotate(unchanged_var + ':' + str(round(end_y, 2)) + ' @' + end_x_dim, xy=(end_x_dim, end_y),
xytext=(end_x_dim, end_y),
textcoords='data', fontsize=14, color=color, horizontalalignment='left',fontproperties=font_prop)
plt.xticks(x_dim,rotation=45) #
ax.xaxis.set_major_locator(MaxNLocator( integer=True, prune=None, nbins=100)) #
plt.xlabel(x_name, fontproperties=font_prop,fontsize=18)
plt.ylabel(f'{index_name}', fontproperties=font_prop,fontsize=16)
ax.set_title(title_name , fontproperties=font_prop,fontsize=16)
plt.legend(prop=font_prop) # 显示图例
fig = plt.gcf()
fig.set_size_inches(18, 12)
return ax | This function plots stock data. Args: - stock_data: pandas DataFrame, the stock data to plot. The DataFrame should contain three columns: - Column 1: trade date in 'YYYYMMDD' - Column 2: Stock name or code (string format) - Column 3: Index value (numeric format) The DataFrame can be time series data or cross-sectional data. If it is time-series data, the first column represents different trade time, the second column represents the same name. For cross-sectional data, the first column is the same, the second column contains different stocks. - ax: matplotlib Axes object, the axes to plot the data on - figure_type: the type of figure (either 'line' or 'bar') - title_name Returns: - matplotlib Axes object, the axes containing the plot |
1,087 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
def query_fund_name_or_code(fund_name: str = '', fund_code: str = '') -> str:
#
"""
Retrieves the fund code based on the fund name or Retrieves the fund name based on the fund code.
Args:
fund_name (str, optional): Fund name. Defaults to ''.
fund_code (str, optional): Fund code. Defaults to ''.
Returns:
code or name: Fund code if fund_name is provided and fund_code is empty. Fund name if fund_code is provided and fund_name is empty.
"""
#df = pd.read_csv('./tushare_fund_basic_20230508193747.csv')
# Query the fund code based on the fund name.
if fund_name != '' and fund_code == '':
#
df = pd.read_csv('./tushare_fund_basic_all.csv')
#
# df = pro.fund_basic(**{
# "ts_code": "",
# "market": "",
# "update_flag": "",
# "offset": "",
# "limit": "",
# "status": "",
# "name": fund_name
# }, fields=[
# "ts_code",
# "name"
# ])
try:
#
code = df[df['name'] == fund_name]['ts_code'].values[0]
except:
#print(fund_name,'基金名称不存在')
return None
return code
# Query the fund name based on the fund code.
if fund_code != '' and fund_name == '':
df = pd.read_csv('./tushare_fund_basic_all.csv')
try:
name = df[df['ts_code'] == fund_code]['name'].values[0]
except:
#print(fund_code,'基金代码不存在')
return None
return name
The provided code snippet includes necessary dependencies for implementing the `query_fund_Manager` function. Write a Python function `def query_fund_Manager(Manager_name: str) -> pd.DataFrame` to solve the following problem:
Retrieves information about a fund manager. Args: Manager_name (str): The name of the fund manager. Returns: df (DataFrame): A DataFrame containing the fund manager's information, including the fund codes, announcement dates, manager's name, gender, birth year, education, nationality, start and end dates of managing funds, and the manager's resume.
Here is the function:
def query_fund_Manager(Manager_name: str) -> pd.DataFrame:
# 代码fund_code,公告日期ann_date,基金经理名字name,性别gender,出生年份birth_year,学历edu,国籍nationality,开始管理日期begin_date,结束日期end_date,简历resume
"""
Retrieves information about a fund manager.
Args:
Manager_name (str): The name of the fund manager.
Returns:
df (DataFrame): A DataFrame containing the fund manager's information, including the fund codes, announcement dates,
manager's name, gender, birth year, education, nationality, start and end dates of managing funds,
and the manager's resume.
"""
df = pro.fund_manager(**{
"ts_code": "",
"ann_date": "",
"name": Manager_name,
"offset": "",
"limit": ""
}, fields=[
"ts_code",
"ann_date",
"name",
"gender",
"birth_year",
"edu",
"nationality",
"begin_date",
"end_date",
"resume"
])
#
df.rename(columns={'ts_code': 'fund_code'}, inplace=True)
# To query the fund name based on the fund code and store it in a new column called fund_name, while removing the rows where the fund name is not found
df['fund_name'] = df['fund_code'].apply(lambda x: query_fund_name_or_code('', x))
df.dropna(subset=['fund_name'], inplace=True)
df.rename(columns={'name': 'manager_name'}, inplace=True)
#
df_out = df[['fund_name','fund_code','ann_date','manager_name','begin_date','end_date']]
return df_out | Retrieves information about a fund manager. Args: Manager_name (str): The name of the fund manager. Returns: df (DataFrame): A DataFrame containing the fund manager's information, including the fund codes, announcement dates, manager's name, gender, birth year, education, nationality, start and end dates of managing funds, and the manager's resume. |
1,088 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `calculate_stock_index` function. Write a Python function `def calculate_stock_index(stock_data: pd.DataFrame, index:str='close') -> pd.DataFrame` to solve the following problem:
Calculate a specific index of a stock based on its price information. Args: stock_data (pd.DataFrame): DataFrame containing the stock's price information. index (str, optional): The index to calculate. The available options depend on the column names in the input stock price data. Additionally, there are two special indices: 'candle_K' and 'Cumulative_Earnings_Rate'. Returns: DataFrame containing the corresponding index data of the stock. In general, it includes three columns: 'trade_date', 'name', and the corresponding index value. Besides, if index is 'candle_K', the function returns the DataFrame containing 'trade_date', 'Open', 'High', 'Low', 'Close', 'Volume','name' column. If index is a technical index such as 'macd' or a trading index likes 'pe_ttm', the function returns the DataFrame with corresponding columns.
Here is the function:
def calculate_stock_index(stock_data: pd.DataFrame, index:str='close') -> pd.DataFrame:
"""
Calculate a specific index of a stock based on its price information.
Args:
stock_data (pd.DataFrame): DataFrame containing the stock's price information.
index (str, optional): The index to calculate. The available options depend on the column names in the
input stock price data. Additionally, there are two special indices: 'candle_K' and 'Cumulative_Earnings_Rate'.
Returns:
DataFrame containing the corresponding index data of the stock. In general, it includes three columns: 'trade_date', 'name', and the corresponding index value.
Besides, if index is 'candle_K', the function returns the DataFrame containing 'trade_date', 'Open', 'High', 'Low', 'Close', 'Volume','name' column.
If index is a technical index such as 'macd' or a trading index likes 'pe_ttm', the function returns the DataFrame with corresponding columns.
"""
if 'stock_name' not in stock_data.columns and 'index_name' in stock_data.columns:
stock_data.rename(columns={'index_name': 'stock_name'}, inplace=True)
#
index = index.lower()
if index=='Cumulative_Earnings_Rate' or index =='Cumulative_Earnings_Rate'.lower() :
stock_data[index] = (1 + stock_data['pct_chg'] / 100.).cumprod() - 1.
stock_data[index] = stock_data[index] * 100.
if 'stock_name' in stock_data.columns :
selected_index = stock_data[['trade_date', 'stock_name', index]].copy()
#
if 'fund_name' in stock_data.columns:
selected_index = stock_data[['trade_date', 'fund_name', index]].copy()
return selected_index
elif index == 'candle_K' or index == 'candle_K'.lower():
#tech_df = tech_df.drop(['name', 'symbol', 'industry', 'area','market','list_date','ts_code','close'], axis=1)
# Merge two DataFrames based on the 'trade_date' column.
stock_data = stock_data.rename(
columns={'open': 'Open', 'high': 'High', 'low': 'Low', 'close': 'Close',
'vol': 'Volume'})
selected_index = stock_data[['trade_date', 'Open', 'High', 'Low', 'Close', 'Volume','stock_name']].copy()
return selected_index
elif index =='macd':
selected_index = stock_data[['trade_date','macd','macd_dea','macd_dif']].copy()
return selected_index
elif index =='rsi':
selected_index = stock_data[['trade_date','rsi_6','rsi_12']].copy()
return selected_index
elif index =='boll':
selected_index = stock_data[['trade_date', 'boll_upper', 'boll_lower','boll_mid']].copy()
return selected_index
elif index =='kdj':
selected_index = stock_data[['trade_date', 'kdj_k', 'kdj_d','kdj_j']].copy()
return selected_index
elif index =='cci':
selected_index = stock_data[['trade_date', 'cci']].copy()
return selected_index
elif index == '换手率':
selected_index = stock_data[['trade_date', 'turnover_rate','turnover_rate_f']].copy()
return selected_index
elif index == '市值':
selected_index = stock_data[['trade_date', 'total_mv','circ_mv']].copy()
return selected_index
elif index in stock_data.columns:
stock_data = stock_data
if 'stock_name' in stock_data.columns :
selected_index = stock_data[['trade_date', 'stock_name', index]].copy()
if 'fund_name' in stock_data.columns:
selected_index = stock_data[['trade_date', 'fund_name', index]].copy()
# Except for candlestick chart and technical indicators, the remaining outputs consist of three columns: date, name, and indicator.
return selected_index | Calculate a specific index of a stock based on its price information. Args: stock_data (pd.DataFrame): DataFrame containing the stock's price information. index (str, optional): The index to calculate. The available options depend on the column names in the input stock price data. Additionally, there are two special indices: 'candle_K' and 'Cumulative_Earnings_Rate'. Returns: DataFrame containing the corresponding index data of the stock. In general, it includes three columns: 'trade_date', 'name', and the corresponding index value. Besides, if index is 'candle_K', the function returns the DataFrame containing 'trade_date', 'Open', 'High', 'Low', 'Close', 'Volume','name' column. If index is a technical index such as 'macd' or a trading index likes 'pe_ttm', the function returns the DataFrame with corresponding columns. |
1,089 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `rank_index_cross_section` function. Write a Python function `def rank_index_cross_section(stock_data: pd.DataFrame, Top_k: int = -1, ascending: bool = False) -> pd.DataFrame` to solve the following problem:
Sort the cross-sectional data based on the given index. Args: stock_data : DataFrame containing the cross-sectional data. It should have three columns, and the last column represents the variable to be sorted. Top_k : The number of data points to retain after sorting. (Default: -1, which retains all data points) ascending: Whether to sort the data in ascending order or not. (Default: False) Returns: stock_data_selected : DataFrame containing the sorted data. It has the same structure as the input DataFrame.
Here is the function:
def rank_index_cross_section(stock_data: pd.DataFrame, Top_k: int = -1, ascending: bool = False) -> pd.DataFrame:
"""
Sort the cross-sectional data based on the given index.
Args:
stock_data : DataFrame containing the cross-sectional data. It should have three columns, and the last column represents the variable to be sorted.
Top_k : The number of data points to retain after sorting. (Default: -1, which retains all data points)
ascending: Whether to sort the data in ascending order or not. (Default: False)
Returns:
stock_data_selected : DataFrame containing the sorted data. It has the same structure as the input DataFrame.
"""
index = stock_data.columns[-1]
stock_data = stock_data.sort_values(by=index, ascending=ascending)
#stock_data_selected = stock_data[['trade_date','stock_name', index]].copy()
stock_data_selected = stock_data[:Top_k]
stock_data_selected = stock_data_selected.drop_duplicates(subset=['stock_name'], keep='first')
return stock_data_selected | Sort the cross-sectional data based on the given index. Args: stock_data : DataFrame containing the cross-sectional data. It should have three columns, and the last column represents the variable to be sorted. Top_k : The number of data points to retain after sorting. (Default: -1, which retains all data points) ascending: Whether to sort the data in ascending order or not. (Default: False) Returns: stock_data_selected : DataFrame containing the sorted data. It has the same structure as the input DataFrame. |
1,090 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
def get_stock_code(stock_name: str) -> str:
# Retrieve the stock code of a given stock name. If we call get_stock_code('贵州茅台'), it will return '600519.SH'.
df = pd.read_csv('tushare_stock_basic_20230421210721.csv')
try:
code = df.loc[df.name==stock_name].ts_code.iloc[0]
return code
except:
return None
The provided code snippet includes necessary dependencies for implementing the `get_company_info` function. Write a Python function `def get_company_info(stock_name: str='') -> pd.DataFrame` to solve the following problem:
This function retrieves company information including stock code, exchange, chairman, manager, secretary, registered capital, setup date, province, city, website, email, employees, business scope, main business, introduction, office, and announcement date. Args: - stock_name (str): The name of the stock. Returns: - pd.DataFrame: A DataFrame that contains the company information.
Here is the function:
def get_company_info(stock_name: str='') -> pd.DataFrame:
# ts_code: str 股票代码, exchange:str 交易所代码SSE上交所 SZSE深交所, chairman:str 法人代表, manager:str 总经理, secretary:str 董秘 # reg_capital:float 注册资本, setup_date:str 注册日期, province:str 所在省份 ,city:str 所在城市
# introduction:str 公司介绍, website:str 公司主页 , email:str 电子邮件, office:str 办公室 # ann_date: str 公告日期, business_scope:str 经营范围, employees:int 员工人数, main_business:str 主要业务及产品
"""
This function retrieves company information including stock code, exchange, chairman, manager, secretary,
registered capital, setup date, province, city, website, email, employees, business scope, main business,
introduction, office, and announcement date.
Args:
- stock_name (str): The name of the stock.
Returns:
- pd.DataFrame: A DataFrame that contains the company information.
"""
stock_code = get_stock_code(stock_name)
df = pro.stock_company(**{
"ts_code": stock_code,"exchange": "","status": "", "limit": "","offset": ""
}, fields=[
"ts_code","exchange","chairman", "manager","secretary", "reg_capital","setup_date", "province","city",
"website", "email","employees","business_scope","main_business","introduction","office", "ann_date"
])
en_to_cn = {
'ts_code': '股票代码',
'exchange': '交易所代码',
'chairman': '法人代表',
'manager': '总经理',
'secretary': '董秘',
'reg_capital': '注册资本',
'setup_date': '注册日期',
'province': '所在省份',
'city': '所在城市',
'introduction': '公司介绍',
'website': '公司主页',
'email': '电子邮件',
'office': '办公室',
'ann_date': '公告日期',
'business_scope': '经营范围',
'employees': '员工人数',
'main_business': '主要业务及产品'
}
df.rename(columns=en_to_cn, inplace=True)
df.insert(0, '股票名称', stock_name)
# for column in df.columns:
# print(f"[{column}]: {df[column].values[0]}")
return df | This function retrieves company information including stock code, exchange, chairman, manager, secretary, registered capital, setup date, province, city, website, email, employees, business scope, main business, introduction, office, and announcement date. Args: - stock_name (str): The name of the stock. Returns: - pd.DataFrame: A DataFrame that contains the company information. |
1,091 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
def get_stock_code(stock_name: str) -> str:
# Retrieve the stock code of a given stock name. If we call get_stock_code('贵州茅台'), it will return '600519.SH'.
df = pd.read_csv('tushare_stock_basic_20230421210721.csv')
try:
code = df.loc[df.name==stock_name].ts_code.iloc[0]
return code
except:
return None
The provided code snippet includes necessary dependencies for implementing the `get_Financial_data_from_time_range` function. Write a Python function `def get_Financial_data_from_time_range(stock_name:str, start_date:str, end_date:str, financial_index:str='') -> pd.DataFrame` to solve the following problem:
Retrieves the financial data for a given stock within a specified date range. Args: stock_name (str): The stock code. start_date (str): The start date of the data range in the format "YYYYMMDD". end_date (str): The end date of the data range in the format "YYYYMMDD". financial_index (str, optional): The financial indicator to be queried. Returns: pd.DataFrame: A DataFrame containin financial data for the specified stock and date range.
Here is the function:
def get_Financial_data_from_time_range(stock_name:str, start_date:str, end_date:str, financial_index:str='') -> pd.DataFrame:
# start_date='20190101',end_date='20221231',financial_index='roe', The returned data consists of the ROE values for the entire three-year period from 2019 to 2022.
# To query quarterly or annual financial report data for a specific moment, "yyyy0331"为一季报,"yyyy0630"为半年报,"yyyy0930"为三季报,"yyyy1231"为年报,例如get_Financial_data_from_time_range("600519.SH", "20190331", "20190331", "roe") means to query the return on equity (ROE) data from the first quarter of 2019,
# # current_ratio 流动比率 # quick_ratio 速动比率 # netprofit_margin 销售净利率 # grossprofit_margin 销售毛利率 # roe 净资产收益率 # roe_dt 净资产收益率(扣除非经常损益)
# roa 总资产报酬率 # debt_to_assets 资产负债率 # roa_yearly 年化总资产净利率 # q_dtprofit 扣除非经常损益后的单季度净利润 # q_eps 每股收益(单季度)
# q_netprofit_margin 销售净利率(单季度) # q_gsprofit_margin 销售毛利率(单季度) # basic_eps_yoy 基本每股收益同比增长率(%) # netprofit_yoy 归属母公司股东的净利润同比增长率(%) # q_netprofit_yoy 归属母公司股东的净利润同比增长率(%)(单季度) # q_netprofit_qoq 归属母公司股东的净利润环比增长率(%)(单季度) # equity_yoy 净资产同比增长率
"""
Retrieves the financial data for a given stock within a specified date range.
Args:
stock_name (str): The stock code.
start_date (str): The start date of the data range in the format "YYYYMMDD".
end_date (str): The end date of the data range in the format "YYYYMMDD".
financial_index (str, optional): The financial indicator to be queried.
Returns:
pd.DataFrame: A DataFrame containin financial data for the specified stock and date range.
"""
stock_code = get_stock_code(stock_name)
stock_data = pro.fina_indicator(**{
"ts_code": stock_code,
"ann_date": "",
"start_date": start_date,
"end_date": end_date,
"period": '',
"update_flag": "1",
"limit": "",
"offset": ""
}, fields=["ts_code", "end_date", financial_index])
#stock_name = get_stock_name_from_code(stock_code)
stock_data['stock_name'] = stock_name
stock_data = stock_data.sort_values(by='end_date', ascending=True) # 按照日期升序排列
# 把end_data列改名为trade_date
stock_data.rename(columns={'end_date': 'trade_date'}, inplace=True)
stock_financial_data = stock_data[['stock_name', 'trade_date', financial_index]]
return stock_financial_data | Retrieves the financial data for a given stock within a specified date range. Args: stock_name (str): The stock code. start_date (str): The start date of the data range in the format "YYYYMMDD". end_date (str): The end date of the data range in the format "YYYYMMDD". financial_index (str, optional): The financial indicator to be queried. Returns: pd.DataFrame: A DataFrame containin financial data for the specified stock and date range. |
1,092 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
The provided code snippet includes necessary dependencies for implementing the `get_GDP_data` function. Write a Python function `def get_GDP_data(start_quarter:str='', end_quarter:str='', index:str='gdp_yoy') -> pd.DataFrame` to solve the following problem:
Retrieves GDP data for the chosen index and specified time period. Args: - start_quarter (str): The start quarter of the query, in YYYYMMDD format. - end_quarter (str): The end quarter, in YYYYMMDD format. - index (str): The specific GDP index to retrieve. Default is `gdp_yoy`. Returns: - pd.DataFrame: A pandas DataFrame with three columns: `quarter`, `country`, and the selected `index`.
Here is the function:
def get_GDP_data(start_quarter:str='', end_quarter:str='', index:str='gdp_yoy') -> pd.DataFrame:
# The available indicators for query include the following 9 categories: # gdp GDP累计值(亿元)# gdp_yoy 当季同比增速(%)# pi 第一产业累计值(亿元)# pi_yoy 第一产业同比增速(%)# si 第二产业累计值(亿元)# si_yoy 第二产业同比增速(%)# ti 第三产业累计值(亿元) # ti_yoy 第三产业同比增速(%)
"""
Retrieves GDP data for the chosen index and specified time period.
Args:
- start_quarter (str): The start quarter of the query, in YYYYMMDD format.
- end_quarter (str): The end quarter, in YYYYMMDD format.
- index (str): The specific GDP index to retrieve. Default is `gdp_yoy`.
Returns:
- pd.DataFrame: A pandas DataFrame with three columns: `quarter`, `country`, and the selected `index`.
"""
# The output is a DataFrame with three columns:
# the first column represents the quarter (quarter), the second column represents the country (country), and the third column represents the index (index).
df = pro.cn_gdp(**{
"q":'',
"start_q": start_quarter,
"end_q": end_quarter,
"limit": "",
"offset": ""
}, fields=[
"quarter",
"gdp",
"gdp_yoy",
"pi",
"pi_yoy",
"si",
"si_yoy",
"ti",
"ti_yoy"
])
df = df.sort_values(by='quarter', ascending=True) #
df['country'] = 'China'
df = df[['quarter', 'country', index]].copy()
return df | Retrieves GDP data for the chosen index and specified time period. Args: - start_quarter (str): The start quarter of the query, in YYYYMMDD format. - end_quarter (str): The end quarter, in YYYYMMDD format. - index (str): The specific GDP index to retrieve. Default is `gdp_yoy`. Returns: - pd.DataFrame: A pandas DataFrame with three columns: `quarter`, `country`, and the selected `index`. |
1,093 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
The provided code snippet includes necessary dependencies for implementing the `get_cpi_ppi_currency_supply_data` function. Write a Python function `def get_cpi_ppi_currency_supply_data(start_month: str = '', end_month: str = '', type: str = 'cpi', index: str = '') -> pd.DataFrame` to solve the following problem:
This function is used to retrieve China's monthly CPI (Consumer Price Index), PPI (Producer Price Index), and monetary supply data published by the National Bureau of Statistics, and return a DataFrame table containing month, country, and index values. The function parameters include start month, end month, query type, and query index. For query indexes that are not within the query range, the default index for the corresponding type is returned. Args: - start_month (str): start month of the query, in the format of YYYYMMDD. - end_month (str):end month in YYYYMMDD - type (str): required parameter, query type, including three types: cpi, ppi, and currency_supply. - index (str): optional parameter, query index, the specific index depends on the query type. If the query index is not within the range, the default index for the corresponding type is returned. Returns: - pd.DataFrame: DataFrame type, including three columns: month, country, and index value.
Here is the function:
def get_cpi_ppi_currency_supply_data(start_month: str = '', end_month: str = '', type: str = 'cpi', index: str = '') -> pd.DataFrame:
# The query types (type) include three categories: CPI, PPI, and currency supply. Each type corresponds to different indices.
# Specifically, CPI has 12 indices, PPI has 30 indices, and currency supply has 9 indices.
# The output is a DataFrame table with three columns: the first column represents the month (month), the second column represents the country (country), and the third column represents the index (index).
# type='cpi',monthly CPI data include the following 12 categories:
# nt_val 全国当月值 # nt_yoy 全国同比(%)# nt_mom 全国环比(%)# nt_accu 全国累计值# town_val 城市当月值# town_yoy 城市同比(%)# town_mom 城市环比(%)# town_accu 城市累计值# cnt_val 农村当月值# cnt_yoy 农村同比(%)# cnt_mom 农村环比(%)# cnt_accu 农村累计值
# type = 'ppi', monthly PPI data include the following 30 categories:
# ppi_yoy PPI:全部工业品:当月同比
# ppi_mp_yoy PPI:生产资料:当月同比
# ppi_mp_qm_yoy PPI:生产资料:采掘业:当月同比
# ppi_mp_rm_yoy PPI:生产资料:原料业:当月同比
# ppi_mp_p_yoy PPI:生产资料:加工业:当月同比
# ppi_cg_yoy PPI:生活资料:当月同比
# ppi_cg_f_yoy PPI:生活资料:食品类:当月同比
# ppi_cg_c_yoy PPI:生活资料:衣着类:当月同比
# ppi_cg_adu_yoy PPI:生活资料:一般日用品类:当月同比
# ppi_cg_dcg_yoy PPI:生活资料:耐用消费品类:当月同比
# ppi_mom PPI:全部工业品:环比
# ppi_mp_mom PPI:生产资料:环比
# ppi_mp_qm_mom PPI:生产资料:采掘业:环比
# ppi_mp_rm_mom PPI:生产资料:原料业:环比
# ppi_mp_p_mom PPI:生产资料:加工业:环比
# ppi_cg_mom PPI:生活资料:环比
# ppi_cg_f_mom PPI:生活资料:食品类:环比
# ppi_cg_c_mom PPI:生活资料:衣着类:环比
# ppi_cg_adu_mom PPI:生活资料:一般日用品类:环比
# ppi_cg_dcg_mom PPI:生活资料:耐用消费品类:环比
# ppi_accu PPI:全部工业品:累计同比
# ppi_mp_accu PPI:生产资料:累计同比
# ppi_mp_qm_accu PPI:生产资料:采掘业:累计同比
# ppi_mp_rm_accu PPI:生产资料:原料业:累计同比
# ppi_mp_p_accu PPI:生产资料:加工业:累计同比
# ppi_cg_accu PPI:生活资料:累计同比
# ppi_cg_f_accu PPI:生活资料:食品类:累计同比
# ppi_cg_c_accu PPI:生活资料:衣着类:累计同比
# ppi_cg_adu_accu PPI:生活资料:一般日用品类:累计同比
# ppi_cg_dcg_accu PPI:生活资料:耐用消费品类:累计同比
# type = 'currency_supply', monthly currency supply data include the following 9 categories:
# m0 M0(亿元)# m0_yoy M0同比(%)# m0_mom M0环比(%)# m1 M1(亿元)# m1_yoy M1同比(%)# m1_mom M1环比(%)# m2 M2(亿元)# m2_yoy M2同比(%)# m2_mom M2环比(%)
"""
This function is used to retrieve China's monthly CPI (Consumer Price Index), PPI (Producer Price Index),
and monetary supply data published by the National Bureau of Statistics,
and return a DataFrame table containing month, country, and index values.
The function parameters include start month, end month, query type, and query index.
For query indexes that are not within the query range, the default index for the corresponding type is returned.
Args:
- start_month (str): start month of the query, in the format of YYYYMMDD.
- end_month (str):end month in YYYYMMDD
- type (str): required parameter, query type, including three types: cpi, ppi, and currency_supply.
- index (str): optional parameter, query index, the specific index depends on the query type.
If the query index is not within the range, the default index for the corresponding type is returned.
Returns:
- pd.DataFrame: DataFrame type, including three columns: month, country, and index value.
"""
if type == 'cpi':
df = pro.cn_cpi(**{
"m": '',
"start_m": start_month,
"end_m": end_month,
"limit": "",
"offset": ""
}, fields=[
"month", "nt_val","nt_yoy", "nt_mom","nt_accu", "town_val", "town_yoy", "town_mom",
"town_accu", "cnt_val", "cnt_yoy", "cnt_mom", "cnt_accu"])
# If the index is not within the aforementioned range, the index is set as "nt_yoy".
if index not in df.columns:
index = 'nt_yoy'
elif type == 'ppi':
df = pro.cn_ppi(**{
"m": '',
"start_m": start_month,
"end_m": end_month,
"limit": "",
"offset": ""
}, fields=[
"month", "ppi_yoy", "ppi_mp_yoy", "ppi_mp_qm_yoy", "ppi_mp_rm_yoy", "ppi_mp_p_yoy", "ppi_cg_yoy",
"ppi_cg_f_yoy", "ppi_cg_c_yoy", "ppi_cg_adu_yoy", "ppi_cg_dcg_yoy",
"ppi_mom", "ppi_mp_mom", "ppi_mp_qm_mom", "ppi_mp_rm_mom", "ppi_mp_p_mom", "ppi_cg_mom", "ppi_cg_f_mom",
"ppi_cg_c_mom", "ppi_cg_adu_mom", "ppi_cg_dcg_mom",
"ppi_accu", "ppi_mp_accu", "ppi_mp_qm_accu", "ppi_mp_rm_accu", "ppi_mp_p_accu", "ppi_cg_accu",
"ppi_cg_f_accu", "ppi_cg_c_accu", "ppi_cg_adu_accu", "ppi_cg_dcg_accu"
])
if index not in df.columns:
index = 'ppi_yoy'
elif type == 'currency_supply':
df = pro.cn_m(**{
"m": '',
"start_m": start_month,
"end_m": end_month,
"limit": "",
"offset": ""
}, fields=[
"month", "m0", "m0_yoy","m0_mom", "m1",
"m1_yoy", "m1_mom", "m2", "m2_yoy", "m2_mom"])
if index not in df.columns:
index = 'm2_yoy'
df = df.sort_values(by='month', ascending=True) #
df['country'] = 'China'
df = df[['month', 'country', index]].copy()
return df | This function is used to retrieve China's monthly CPI (Consumer Price Index), PPI (Producer Price Index), and monetary supply data published by the National Bureau of Statistics, and return a DataFrame table containing month, country, and index values. The function parameters include start month, end month, query type, and query index. For query indexes that are not within the query range, the default index for the corresponding type is returned. Args: - start_month (str): start month of the query, in the format of YYYYMMDD. - end_month (str):end month in YYYYMMDD - type (str): required parameter, query type, including three types: cpi, ppi, and currency_supply. - index (str): optional parameter, query index, the specific index depends on the query type. If the query index is not within the range, the default index for the corresponding type is returned. Returns: - pd.DataFrame: DataFrame type, including three columns: month, country, and index value. |
1,094 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `predict_next_value` function. Write a Python function `def predict_next_value(df: pd.DataFrame, pred_index: str = 'nt_yoy', pred_num:int = 1. ) -> pd.DataFrame` to solve the following problem:
Predict the next n values of a specific column in the DataFrame using linear regression. Parameters: df (pandas.DataFrame): The input DataFrame. pred_index (str): The name of the column to predict. pred_num (int): The number of future values to predict. Returns: pandas.DataFrame: The DataFrame with the predicted values appended to the specified column and other columns filled as pred+index.
Here is the function:
def predict_next_value(df: pd.DataFrame, pred_index: str = 'nt_yoy', pred_num:int = 1. ) -> pd.DataFrame:
"""
Predict the next n values of a specific column in the DataFrame using linear regression.
Parameters:
df (pandas.DataFrame): The input DataFrame.
pred_index (str): The name of the column to predict.
pred_num (int): The number of future values to predict.
Returns:
pandas.DataFrame: The DataFrame with the predicted values appended to the specified column
and other columns filled as pred+index.
"""
input_array = df[pred_index].values
# Convert the input array into the desired format.
x = np.array(range(len(input_array))).reshape(-1, 1)
y = input_array.reshape(-1, 1)
# Train a linear regression model.
model = LinearRegression()
model.fit(x, y)
# Predict the future n values.
next_indices = np.array(range(len(input_array), len(input_array) + pred_num)).reshape(-1, 1)
predicted_values = model.predict(next_indices).flatten()
for i, value in enumerate(predicted_values, 1):
row_data = {pred_index: value}
for other_col in df.columns:
if other_col != pred_index:
row_data[other_col] = 'pred' + str(i)
df = df.append(row_data, ignore_index=True)
# Return the updated DataFrame
return df | Predict the next n values of a specific column in the DataFrame using linear regression. Parameters: df (pandas.DataFrame): The input DataFrame. pred_index (str): The name of the column to predict. pred_num (int): The number of future values to predict. Returns: pandas.DataFrame: The DataFrame with the predicted values appended to the specified column and other columns filled as pred+index. |
1,095 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
The provided code snippet includes necessary dependencies for implementing the `get_latest_new_from_web` function. Write a Python function `def get_latest_new_from_web(src: str = 'sina') -> pd.DataFrame` to solve the following problem:
Retrieves the latest news data from major news websites, including Sina Finance, 10jqka, Eastmoney, and Yuncaijing. Args: src (str): The name of the news website. Default is 'sina'. Optional parameters include: 'sina' for Sina Finance, '10jqka' for 10jqka, 'eastmoney' for Eastmoney, and 'yuncaijing' for Yuncaijing. Returns: pd.DataFrame: A DataFrame containing the news data, including two columns for date/time and content.
Here is the function:
def get_latest_new_from_web(src: str = 'sina') -> pd.DataFrame:
# 新浪财经 sina 获取新浪财经实时资讯
# 同花顺 10jqka 同花顺财经新闻
# 东方财富 eastmoney 东方财富财经新闻
# 云财经 yuncaijing 云财经新闻
"""
Retrieves the latest news data from major news websites, including Sina Finance, 10jqka, Eastmoney, and Yuncaijing.
Args:
src (str): The name of the news website. Default is 'sina'. Optional parameters include: 'sina' for Sina Finance,
'10jqka' for 10jqka, 'eastmoney' for Eastmoney, and 'yuncaijing' for Yuncaijing.
Returns:
pd.DataFrame: A DataFrame containing the news data, including two columns for date/time and content.
"""
df = pro.news(**{
"start_date": '',
"end_date": '',
"src": src,
"limit": "",
"offset": ""
}, fields=[
"datetime",
"content",
])
df = df.apply(lambda x: '[' + x.name + ']' + ': ' + x.astype(str))
return df | Retrieves the latest news data from major news websites, including Sina Finance, 10jqka, Eastmoney, and Yuncaijing. Args: src (str): The name of the news website. Default is 'sina'. Optional parameters include: 'sina' for Sina Finance, '10jqka' for 10jqka, 'eastmoney' for Eastmoney, and 'yuncaijing' for Yuncaijing. Returns: pd.DataFrame: A DataFrame containing the news data, including two columns for date/time and content. |
1,096 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
The provided code snippet includes necessary dependencies for implementing the `get_index_constituent` function. Write a Python function `def get_index_constituent(index_name: str = '', start_date:str ='', end_date:str ='') -> pd.DataFrame` to solve the following problem:
Query the constituent stocks of basic index (中证500) or a specified SW (申万) industry index args: index_name: the name of the index. start_date: the start date in "YYYYMMDD". end_date: the end date in "YYYYMMDD". return: A pandas DataFrame containing the following columns: index_code index_name stock_code: the code of the constituent stock. stock_name: the name of the constituent stock. weight: the weight of the constituent stock.
Here is the function:
def get_index_constituent(index_name: str = '', start_date:str ='', end_date:str ='') -> pd.DataFrame:
"""
Query the constituent stocks of basic index (中证500) or a specified SW (申万) industry index
args:
index_name: the name of the index.
start_date: the start date in "YYYYMMDD".
end_date: the end date in "YYYYMMDD".
return:
A pandas DataFrame containing the following columns:
index_code
index_name
stock_code: the code of the constituent stock.
stock_name: the name of the constituent stock.
weight: the weight of the constituent stock.
"""
if '申万' in index_name:
if '申万一级行业' in index_name:
# index_name取后面的名字
index_name = index_name[6:]
df1 = pd.read_csv('SW2021_industry_L1.csv')
index_code = df1[df1['industry_name'] == index_name]['index_code'].iloc[0]
elif '申万二级行业' in index_name:
index_name = index_name[6:]
df1 = pd.read_csv('SW2021_industry_L2.csv')
index_code = df1[df1['industry_name'] == index_name]['index_code'].iloc[0]
elif '申万三级行业' in index_name:
index_name = index_name[6:]
df1 = pd.read_csv('SW2021_industry_L3.csv')
index_code = df1[df1['industry_name'] == index_name]['index_code'].iloc[0]
print('The industry code for ', index_name, ' is: ', index_code)
# 拉取数据
df = pro.index_member(**{
"index_code": index_code , #'851251.SI'
"is_new": "",
"ts_code": "",
"limit": "",
"offset": ""
}, fields=[
"index_code",
"con_code",
"in_date",
"out_date",
"is_new",
"index_name",
"con_name"
])
#
# For each stock, filter the start_date and end_date that are between in_date and out_date.
df = df[(df['in_date'] <= start_date)]
df = df[(df['out_date'] >= end_date) | (df['out_date'].isnull())]
df.rename(columns={'con_code': 'stock_code'}, inplace=True)
df.rename(columns={'con_name': 'stock_name'}, inplace=True)
#
df['weight'] = np.nan
df = df[['index_code', "index_name", 'stock_code', 'stock_name','weight']]
else: # 宽基指数
df1 = pro.index_basic(**{
"ts_code": "",
"market": "",
"publisher": "",
"category": "",
"name": index_name,
"limit": "",
"offset": ""
}, fields=[
"ts_code",
"name",
])
index_code = df1["ts_code"][0]
print(f'index_code for basic index {index_name} is {index_code}')
# Step 2: Retrieve the constituents of an index based on the index code and given date.
df = pro.index_weight(**{
"index_code": index_code,
"trade_date": '',
"start_date": start_date,
"end_date": end_date,
"limit": "",
"offset": ""
}, fields=[
"index_code",
"con_code",
"trade_date",
"weight"
])
# df = df.sort_values(by='trade_date', ascending=True) #
df['index_name'] = index_name
last_day = df['trade_date'][0]
# for the last trading day
df = df[df['trade_date'] == last_day]
df_stock = pd.read_csv('tushare_stock_basic_20230421210721.csv')
# Merge based on the stock code.
df = pd.merge(df, df_stock, how='left', left_on='con_code', right_on='ts_code')
# df.rename(columns={'name_y': 'name'}, inplace=True)
df = df.drop(columns=['symbol', 'area', 'con_code'])
df.sort_values(by='weight', ascending=False, inplace=True)
df.rename(columns={'name': 'stock_name'}, inplace=True)
df.rename(columns={'ts_code': 'stock_code'}, inplace=True)
df.dropna(axis=0, how='any', inplace=True)
#
df = df[['index_code', "index_name", 'stock_code', 'stock_name', 'weight']]
return df | Query the constituent stocks of basic index (中证500) or a specified SW (申万) industry index args: index_name: the name of the index. start_date: the start date in "YYYYMMDD". end_date: the end date in "YYYYMMDD". return: A pandas DataFrame containing the following columns: index_code index_name stock_code: the code of the constituent stock. stock_name: the name of the constituent stock. weight: the weight of the constituent stock. |
1,097 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
def get_stock_prices_data(stock_name: str='', start_date: str='', end_date: str='', freq:str='daily') -> pd.DataFrame:
"""
Retrieves the daily/weekly/monthly price data for a given stock code during a specific time period. get_stock_prices_data('贵州茅台','20200120','20220222','daily')
Args:
- stock_name (str)
- start_date (str): The start date in the format 'YYYYMMDD'.
- end_date (str): The end date in 'YYYYMMDD'.
- freq (str): The frequency of the price data, can be 'daily', 'weekly', or 'monthly'.
Returns:
- pd.DataFrame: A dataframe that contains the daily/weekly/monthly data. The output columns contain stock_code, trade_date, open, high, low, close, pre_close(昨天收盘价), change(涨跌额), pct_chg(涨跌幅),vol(成交量),amount(成交额)
"""
stock_code = get_stock_code(stock_name)
if freq == 'daily':
stock_data = pro.daily(**{
"ts_code": stock_code,
"trade_date": '',
"start_date": start_date,
"end_date": end_date,
"offset": "",
"limit": ""
}, fields=[
"ts_code",
"trade_date",
"open",
"high",
"low",
"close",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
elif freq == 'weekly':
stock_data = pro.weekly(**{
"ts_code": stock_code,
"trade_date": '',
"start_date": start_date,
"end_date": end_date,
"limit": "",
"offset": ""
}, fields=[
"ts_code",
"trade_date",
"close",
"open",
"high",
"low",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
elif freq == 'monthly':
stock_data = pro.monthly(**{
"ts_code": stock_code,
"trade_date": '',
"start_date": start_date,
"end_date": end_date,
"limit": "",
"offset": ""
}, fields=[
"ts_code",
"trade_date",
"close",
"open",
"high",
"low",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
adj_f = get_adj_factor(stock_code, start_date, end_date)
stock_data = pd.merge(stock_data, adj_f, on=['ts_code', 'trade_date'])
# Multiply the values of open, high, low, and close by their corresponding adjustment factors.
# To obtain the adjusted close price
stock_data[['open', 'high', 'low', 'close']] *= stock_data['adj_factor'].values.reshape(-1, 1)
#stock_data.rename(columns={'vol': 'volume'}, inplace=True)
df = pd.read_csv('tushare_stock_basic_20230421210721.csv')
stock_data_merged = pd.merge(stock_data, df, on='ts_code')
stock_data_merged.rename(columns={'ts_code': 'stock_code'}, inplace=True)
stock_data_merged.rename(columns={'name': 'stock_name'}, inplace=True)
stock_data_merged = stock_data_merged.sort_values(by='trade_date', ascending=True) # To sort the DataFrame by date in ascending order
return stock_data_merged
def is_fund(ts_name: str = '') -> bool:
# call get_stock_code()和query_fund_name_or_code()
if get_stock_code(ts_name) is not None and query_fund_name_or_code(ts_name) is None:
return False
elif get_stock_code(ts_name) is None and query_fund_name_or_code(ts_name) is not None:
return True
def cal_dt(num_at_time_2: float = 0.0, num_at_time_1: float = 0.0) -> float:
"""
This function calculates the percentage change of a metric from one time to another.
Args:
- num_at_time_2: the metric value at time 2 (end time)
- num_at_time_1: the metric value at time 1 (start time)
Returns:
- float: the percentage change of the metric from time 1 to time 2
"""
if num_at_time_1 == 0:
num_at_time_1 = 0.0000000001
return round((num_at_time_2 - num_at_time_1) / num_at_time_1, 4)
def query_fund_data(fund_code: str = '', start_date: str = '', end_date: str = '') -> pd.DataFrame:
#
# ts_code str Y TS代码 # ann_date str Y 公告日期 # nav_date str Y 净值日期 # unit_nav float Y 单位净值 # accum_nav float Y 累计净值
# accum_div float Y 累计分红 # net_asset float Y 资产净值 # total_netasset float Y 合计资产净值 # adj_nav float Y 复权单位净值 pct_chg 每日涨跌幅
"""
Retrieves fund data based on the fund code, start date, and end date.
Args:
fund_code (str, optional): Fund code. Defaults to ''.
start_date (str, optional): Start date in YYYYMMDD format. Defaults to ''.
end_date (str, optional): End date in YYYYMMDD format. Defaults to ''.
Returns:
df (DataFrame): A DataFrame containing fund data, including TS code, announcement date, net asset value date,
unit net asset value, accumulated net asset value, accumulated dividends, net asset value,
total net asset value, adjusted unit net asset value, and fund name. The 'ts_code' column is renamed
to 'fund_code', 'nav_date' is renamed to 'trade_date', and the DataFrame is sorted by the trade date
in ascending order. If the fund code does not exist, None is returned.
"""
df = pro.fund_nav(**{
"ts_code": fund_code,
"nav_date": "",
"offset": "",
"limit": "",
"market": "",
"start_date": start_date,
"end_date": end_date
}, fields=[
"ts_code",
"ann_date",
"nav_date",
"unit_nav",
"accum_nav",
"accum_div",
"net_asset",
"total_netasset",
"adj_nav",
"update_flag"
])
try:
fund_name= query_fund_name_or_code(fund_code=fund_code)
df['fund_name'] = fund_name
#
df.rename(columns={'ts_code': 'fund_code'}, inplace=True)
df.rename(columns={'nav_date': 'trade_date'}, inplace=True)
df.sort_values(by='trade_date', ascending=True, inplace=True)
except:
print(fund_code,'基金代码不存在')
return None
#
df['pct_chg'] = df['adj_nav'].pct_change()
#
df.loc[0, 'pct_chg'] = 0.0
return df
def query_fund_name_or_code(fund_name: str = '', fund_code: str = '') -> str:
#
"""
Retrieves the fund code based on the fund name or Retrieves the fund name based on the fund code.
Args:
fund_name (str, optional): Fund name. Defaults to ''.
fund_code (str, optional): Fund code. Defaults to ''.
Returns:
code or name: Fund code if fund_name is provided and fund_code is empty. Fund name if fund_code is provided and fund_name is empty.
"""
#df = pd.read_csv('./tushare_fund_basic_20230508193747.csv')
# Query the fund code based on the fund name.
if fund_name != '' and fund_code == '':
#
df = pd.read_csv('./tushare_fund_basic_all.csv')
#
# df = pro.fund_basic(**{
# "ts_code": "",
# "market": "",
# "update_flag": "",
# "offset": "",
# "limit": "",
# "status": "",
# "name": fund_name
# }, fields=[
# "ts_code",
# "name"
# ])
try:
#
code = df[df['name'] == fund_name]['ts_code'].values[0]
except:
#print(fund_name,'基金名称不存在')
return None
return code
# Query the fund name based on the fund code.
if fund_code != '' and fund_name == '':
df = pd.read_csv('./tushare_fund_basic_all.csv')
try:
name = df[df['ts_code'] == fund_code]['name'].values[0]
except:
#print(fund_code,'基金代码不存在')
return None
return name
The provided code snippet includes necessary dependencies for implementing the `calculate_earning_between_two_time` function. Write a Python function `def calculate_earning_between_two_time(stock_name: str = '', start_date: str = '', end_date: str = '', index: str = 'close') -> float` to solve the following problem:
Calculates the rate of return for a specified stock/fund between two dates. Args: stock_name: stock_name or fund_name start_date end_date index (str): The index used to calculate the stock return, including 'open' and 'close'. Returns: float: The rate of return for the specified stock between the two dates.
Here is the function:
def calculate_earning_between_two_time(stock_name: str = '', start_date: str = '', end_date: str = '', index: str = 'close') -> float:
"""
Calculates the rate of return for a specified stock/fund between two dates.
Args:
stock_name: stock_name or fund_name
start_date
end_date
index (str): The index used to calculate the stock return, including 'open' and 'close'.
Returns:
float: The rate of return for the specified stock between the two dates.
"""
if is_fund(stock_name):
fund_code = query_fund_name_or_code(stock_name)
stock_data = query_fund_data(fund_code, start_date, end_date)
if index =='':
index = 'adj_nav'
else:
stock_data = get_stock_prices_data(stock_name, start_date, end_date,'daily')
try:
end_price = stock_data.iloc[-1][index]
start_price = stock_data.iloc[0][index]
earning = cal_dt(end_price, start_price)
# earning = round((end_price - start_price) / start_price * 100, 2)
except:
print(ts_code,start_date,end_date)
print('##################### 该股票没有数据 #####################')
return None
# percent = earning * 100
# percent_str = '{:.2f}%'.format(percent)
return earning | Calculates the rate of return for a specified stock/fund between two dates. Args: stock_name: stock_name or fund_name start_date end_date index (str): The index used to calculate the stock return, including 'open' and 'close'. Returns: float: The rate of return for the specified stock between the two dates. |
1,098 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
def get_stock_name_from_code(stock_code: str) -> str:
"""
Reads a local file to retrieve the stock name from a given stock code.
Args:
- stock_code (str): The code of the stock.
Returns:
- str: The stock name of the given stock code.
"""
# For example,if we call get_stock_name_from_code('600519.SH'), it will return '贵州茅台'.
df = pd.read_csv('tushare_stock_basic_20230421210721.csv')
name = df.loc[df.ts_code == stock_code].name.iloc[0]
return name
def query_fund_name_or_code(fund_name: str = '', fund_code: str = '') -> str:
#
"""
Retrieves the fund code based on the fund name or Retrieves the fund name based on the fund code.
Args:
fund_name (str, optional): Fund name. Defaults to ''.
fund_code (str, optional): Fund code. Defaults to ''.
Returns:
code or name: Fund code if fund_name is provided and fund_code is empty. Fund name if fund_code is provided and fund_name is empty.
"""
#df = pd.read_csv('./tushare_fund_basic_20230508193747.csv')
# Query the fund code based on the fund name.
if fund_name != '' and fund_code == '':
#
df = pd.read_csv('./tushare_fund_basic_all.csv')
#
# df = pro.fund_basic(**{
# "ts_code": "",
# "market": "",
# "update_flag": "",
# "offset": "",
# "limit": "",
# "status": "",
# "name": fund_name
# }, fields=[
# "ts_code",
# "name"
# ])
try:
#
code = df[df['name'] == fund_name]['ts_code'].values[0]
except:
#print(fund_name,'基金名称不存在')
return None
return code
# Query the fund name based on the fund code.
if fund_code != '' and fund_name == '':
df = pd.read_csv('./tushare_fund_basic_all.csv')
try:
name = df[df['ts_code'] == fund_code]['name'].values[0]
except:
#print(fund_code,'基金代码不存在')
return None
return name
The provided code snippet includes necessary dependencies for implementing the `loop_rank` function. Write a Python function `def loop_rank(df: pd.DataFrame, func: callable, *args, **kwargs) -> pd.DataFrame` to solve the following problem:
It iteratively applies the given function to each row and get a result using function. It then stores the calculated result in 'new_feature' column. Args: df: DataFrame with a single column func : The function to be applied to each row: func(row, *args, **kwargs) *args: Additional positional arguments for `func` function. **kwargs: Additional keyword arguments for `func` function. Returns: pd.DataFrame: A output DataFrame with three columns: the constant column, input column, and new_feature column. The DataFrame is sorted based on the new_feature column in descending order.
Here is the function:
def loop_rank(df: pd.DataFrame, func: callable, *args, **kwargs) -> pd.DataFrame:
"""
It iteratively applies the given function to each row and get a result using function. It then stores the calculated result in 'new_feature' column.
Args:
df: DataFrame with a single column
func : The function to be applied to each row: func(row, *args, **kwargs)
*args: Additional positional arguments for `func` function.
**kwargs: Additional keyword arguments for `func` function.
Returns:
pd.DataFrame: A output DataFrame with three columns: the constant column, input column, and new_feature column.
The DataFrame is sorted based on the new_feature column in descending order.
"""
df['new_feature'] = None
loop_var = df.columns[0]
for _, row in df.iterrows():
res = None
var = row[loop_var] #
if var is not None:
if loop_var == 'stock_name':
stock_name = var
elif loop_var == 'stock_code':
stock_name = get_stock_name_from_code(var)
elif loop_var == 'fund_name':
stock_name = var
elif loop_var == 'fund_code':
stock_name = query_fund_name_or_code('',var)
time.sleep(0.4)
try:
res = func(stock_name, *args, **kwargs) #
except:
raise ValueError('#####################Error for func#####################')
# res represents the result obtained for the variable. For example, if the variable is a stock name, res could be the return rate of that stock over a certain period or a specific feature value of that stock. Therefore, res should be a continuous value.
# If the format of res is a float, then it can be used directly. However, if res is in DataFrame format, you can retrieve the value corresponding to the index.
if isinstance(res, pd.DataFrame) and not res.empty:
#
try:
res = round(res.loc[:,args[-1]][0], 2)
df.loc[df[loop_var] == var, 'new_feature'] = res
except:
raise ValueError('##################### Error ######################')
elif isinstance(res, float): #
res = res
df.loc[df[loop_var] == var, 'new_feature'] = res
print(var, res)
# Remove the rows where the new_feature column is empty.
df = df.dropna(subset=['new_feature'])
stock_data = df.sort_values(by='new_feature', ascending=False)
#
stock_data.insert(0, 'unchanged', loop_var)
stock_data = stock_data.loc[:,[stock_data.columns[0], loop_var, 'new_feature']]
return stock_data | It iteratively applies the given function to each row and get a result using function. It then stores the calculated result in 'new_feature' column. Args: df: DataFrame with a single column func : The function to be applied to each row: func(row, *args, **kwargs) *args: Additional positional arguments for `func` function. **kwargs: Additional keyword arguments for `func` function. Returns: pd.DataFrame: A output DataFrame with three columns: the constant column, input column, and new_feature column. The DataFrame is sorted based on the new_feature column in descending order. |
1,099 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
def output_mean_median_col(data: pd.DataFrame, col: str = 'new_feature') -> float:
# It calculates the mean and median value for the specified column.
mean = round(data[col].mean(), 2)
median = round(data[col].median(), 2)
#
#print(title, mean)
return (mean, median) | null |
1,100 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `output_weighted_mean_col` function. Write a Python function `def output_weighted_mean_col(data: pd.DataFrame, col: str, weight_col: pd.Series) -> float` to solve the following problem:
Calculates the weighted mean of a column and returns the result as a float. Args: data (pd.DataFrame): The input cross-sectional or time-series data containing the feature columns. col (str): The name of the feature column to calculate the weighted mean for. weight_col (pd.Series): The weights used for the calculation, as a pandas Series. Returns: float: The weighted mean of the specified feature column.
Here is the function:
def output_weighted_mean_col(data: pd.DataFrame, col: str, weight_col: pd.Series) -> float:
"""
Calculates the weighted mean of a column and returns the result as a float.
Args:
data (pd.DataFrame): The input cross-sectional or time-series data containing the feature columns.
col (str): The name of the feature column to calculate the weighted mean for.
weight_col (pd.Series): The weights used for the calculation, as a pandas Series.
Returns:
float: The weighted mean of the specified feature column.
"""
weighted_mean = round(np.average(data[col], weights = weight_col)/100., 2)
return weighted_mean | Calculates the weighted mean of a column and returns the result as a float. Args: data (pd.DataFrame): The input cross-sectional or time-series data containing the feature columns. col (str): The name of the feature column to calculate the weighted mean for. weight_col (pd.Series): The weights used for the calculation, as a pandas Series. Returns: float: The weighted mean of the specified feature column. |
1,101 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
The provided code snippet includes necessary dependencies for implementing the `get_index_data` function. Write a Python function `def get_index_data(index_name: str = '', start_date: str = '', end_date: str = '', freq: str = 'daily') -> pd.DataFrame` to solve the following problem:
This function retrieves daily, weekly, or monthly data for a given stock index. Arguments: - index_name: Name of the index - start_date: Start date in 'YYYYMMDD' - end_date: End date in 'YYYYMMDD' - freq: Frequency 'daily', 'weekly', or 'monthly' Returns: A DataFrame containing the following columns: trade_date, ts_code, close, open, high, low, pre_close: Previous day's closing price, change(涨跌额), pct_chg(涨跌幅), vol(成交量), amount(成交额), name: Index Name
Here is the function:
def get_index_data(index_name: str = '', start_date: str = '', end_date: str = '', freq: str = 'daily') -> pd.DataFrame:
"""
This function retrieves daily, weekly, or monthly data for a given stock index.
Arguments:
- index_name: Name of the index
- start_date: Start date in 'YYYYMMDD'
- end_date: End date in 'YYYYMMDD'
- freq: Frequency 'daily', 'weekly', or 'monthly'
Returns:
A DataFrame containing the following columns:
trade_date, ts_code, close, open, high, low, pre_close: Previous day's closing price, change(涨跌额), pct_chg(涨跌幅), vol(成交量), amount(成交额), name: Index Name
"""
df1 = pro.index_basic(**{
"ts_code": "",
"market": "",
"publisher": "",
"category": "",
"name": index_name,
"limit": "",
"offset": ""
}, fields=[
"ts_code",
"name",
])
index_code = df1["ts_code"][0]
print(f'index_code for index {index_name} is {index_code}')
#
if freq == 'daily':
df = pro.index_daily(**{
"ts_code": index_code,
"trade_date": '',
"start_date": start_date,
"end_date": end_date,
"limit": "",
"offset": ""
}, fields=[
"trade_date",
"ts_code",
"close",
"open",
"high",
"low",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
elif freq == 'weekly':
df = pro.index_weekly(**{
"ts_code": index_code,
"trade_date": '',
"start_date": start_date,
"end_date": end_date,
"limit": "",
"offset": ""
}, fields=[
"trade_date",
"ts_code",
"close",
"open",
"high",
"low",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
elif freq == 'monthly':
df = pro.index_monthly(**{
"ts_code": index_code,
"trade_date": '',
"start_date": start_date,
"end_date": end_date,
"limit": "",
"offset": ""
}, fields=[
"trade_date",
"ts_code",
"close",
"open",
"high",
"low",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
df = df.sort_values(by='trade_date', ascending=True) #
df['index_name'] = index_name
return df | This function retrieves daily, weekly, or monthly data for a given stock index. Arguments: - index_name: Name of the index - start_date: Start date in 'YYYYMMDD' - end_date: End date in 'YYYYMMDD' - freq: Frequency 'daily', 'weekly', or 'monthly' Returns: A DataFrame containing the following columns: trade_date, ts_code, close, open, high, low, pre_close: Previous day's closing price, change(涨跌额), pct_chg(涨跌幅), vol(成交量), amount(成交额), name: Index Name |
1,102 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
def get_north_south_money(start_date: str = '', end_date: str = '', trade_date: str = '') -> pd.DataFrame:
#
# trade_date: 交易日期
# ggt_ss: 港股通(上海)
# ggt_sz: 港股通(深圳)
# hgt: 沪股通(亿元)
# sgt: 深股通(亿元)
# north_money: 北向资金(亿元)= hgt + sgt
# south_money: 南向资金(亿元)= ggt_ss + ggt_sz
# name: 固定为'A-H',代表A股和H股
# accumulate_north_money: 累计北向资金流入
# accumulate_south_money: 累计南向资金流入
month_df = pro.moneyflow_hsgt(**{
"trade_date": trade_date,
"start_date": start_date,
"end_date": end_date,
"limit": "",
"offset": ""
}, fields=[
"trade_date",
"ggt_ss",
"ggt_sz",
"hgt",
"sgt",
"north_money",
"south_money"
])
month_df[['ggt_ss','ggt_sz','hgt','sgt','north_money','south_money']] = month_df[['ggt_ss','ggt_sz','hgt','sgt','north_money','south_money']]/100.0
month_df = month_df.sort_values(by='trade_date', ascending=True) #
month_df['stock_name'] = 'A-H'
month_df['accumulate_north_money'] = month_df['north_money'].cumsum()
month_df['accumulate_south_money'] = month_df['south_money'].cumsum()
return month_df | null |
1,103 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `plot_k_line` function. Write a Python function `def plot_k_line(stock_data: pd.DataFrame, title: str = '') -> None` to solve the following problem:
Plots a K-line chart of stock price and volume. Args: stock_data : A pandas DataFrame containing the stock price information, in which each row represents a daily record. The DataFrame must contain the 'trade_date','open', 'close', 'high', 'low','volume', 'name' columns, which is used for k-line and volume. 如果dataframe中还含有'macd','kdj', 'rsi', 'cci', 'boll','pe_ttm','turnover_rate'等列,则在k线图下方绘制这些指标的子图. title : The title of the K-line chart. Returns: None
Here is the function:
def plot_k_line(stock_data: pd.DataFrame, title: str = '') -> None:
"""
Plots a K-line chart of stock price and volume.
Args:
stock_data : A pandas DataFrame containing the stock price information, in which each row
represents a daily record. The DataFrame must contain the 'trade_date','open', 'close', 'high', 'low','volume', 'name' columns, which is used for k-line and volume.
如果dataframe中还含有'macd','kdj', 'rsi', 'cci', 'boll','pe_ttm','turnover_rate'等列,则在k线图下方绘制这些指标的子图.
title : The title of the K-line chart.
Returns:
None
"""
#
stock_data['trade_date'] = pd.to_datetime(stock_data['trade_date'], format='%Y%m%d')
stock_data.set_index('trade_date', inplace=True)
#
custom_style = mpf.make_marketcolors(up='r', down='k', inherit=True)
china_style = mpf.make_mpf_style(marketcolors=custom_style)
# MACD
# stock_data['macd1'] = stock_data['Close'].ewm(span=12).mean() - stock_data['Close'].ewm(span=26).mean()
# stock_data['macd_signal1'] = stock_data['macd'].ewm(span=9).mean()
#
#mpf.plot(stock_data, type='candle', volume=True, title=title, mav=(5, 10, 20), style = china_style, addplot = macd)
add_plot = []
# The index column is located after the name column in the last few columns.
# Retrieve the column names after the 'name' column.
index_list = stock_data.columns[stock_data.columns.get_loc('stock_name')+1:]
index_df = stock_data[index_list]
color_list = ['green','blue','red','yellow','black','purple','orange','pink','brown','gray']
custom_lines = []
for i in range(len(index_list)):
# If the column names contain 'boll', set panel to 0. Otherwise, set panel to 2.
if 'boll' in index_list[i]:
sub_plot = mpf.make_addplot(index_df[index_list[i]], panel=0, ylabel=index_list[i], color=color_list[i], type='line', secondary_y=True)
elif index_list[i] =='macd':
sub_plot = mpf.make_addplot(index_df[index_list[i]], panel=2, ylabel=index_list[i], color=color_list[i], type='bar', secondary_y=False)
else:
sub_plot = mpf.make_addplot(index_df[index_list[i]], panel=2, ylabel=index_list[i], color=color_list[i], type='line', secondary_y=False)
custom_line = Line2D([0], [0], color=color_list[i], lw=1, linestyle='dashed')
add_plot.append(sub_plot)
custom_lines.append(custom_line)
mav_colors = ['red', 'green', 'blue']
fig, axes = mpf.plot(stock_data, type='candle', volume=True, title=title, mav=(5, 10, 20), mavcolors=mav_colors, style=china_style, addplot=add_plot, returnfig=True)
mav_labels = ['5-day MA', '10-day MA', '20-day MA']
#
legend_lines = [plt.Line2D([0], [0], color=color, lw=2) for color in mav_colors]
#
axes[0].legend(legend_lines, mav_labels)
if len(index_list) ==1:
label = index_list[0]
elif len(index_list) > 1:
label_list = [i.split('_')[0] for i in index_list]
#
label = list(set(label_list))[0]
if len(index_list) >= 1:
if 'boll' in label:
axes[0].legend(custom_lines, index_list, loc='lower right')
elif len(index_list) > 1:
axes[-2].set_ylabel(label)
axes[-2].legend(custom_lines, index_list, loc='lower right')
#
fig.set_size_inches(20, 16)
#
for ax in axes:
ax.grid(True)
#fig.show()
return axes | Plots a K-line chart of stock price and volume. Args: stock_data : A pandas DataFrame containing the stock price information, in which each row represents a daily record. The DataFrame must contain the 'trade_date','open', 'close', 'high', 'low','volume', 'name' columns, which is used for k-line and volume. 如果dataframe中还含有'macd','kdj', 'rsi', 'cci', 'boll','pe_ttm','turnover_rate'等列,则在k线图下方绘制这些指标的子图. title : The title of the K-line chart. Returns: None |
1,104 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
pro = ts.pro_api(tushare_token)
The provided code snippet includes necessary dependencies for implementing the `query_fund_info` function. Write a Python function `def query_fund_info(fund_code: str = '') -> pd.DataFrame` to solve the following problem:
Retrieves information about a fund based on the fund code. Args: fund_code (str, optional): Fund code. Defaults to ''. Returns: df (DataFrame): A DataFrame containing various information about the fund, including fund code, fund name, management company, custodian company, investment type, establishment date, maturity date, listing date, issuance date, delisting date, issue amount, management fee, custodian fee, fund duration, face value, minimum investment amount, benchmark, fund status, investment style, fund type, start date for daily purchases, start date for daily redemptions, and market type. The column 'ts_code' is renamed to 'fund_code', and 'name' is renamed to 'fund_name' in the DataFrame.
Here is the function:
def query_fund_info(fund_code: str = '') -> pd.DataFrame:
#
# fund_code str Y 基金代码 # fund_name str Y 简称 # management str Y 管理人 # custodian str Y 托管人 # fund_type str Y 投资类型 # found_date str Y 成立日期 # due_date str Y 到期日期 # list_date str Y 上市时间 # issue_date str Y 发行日期 # delist_date str Y 退市日期 # issue_amount float Y 发行份额(亿) # m_fee float Y 管理费 # c_fee float Y 托管费
# duration_year float Y 存续期 # p_value float Y 面值 # min_amount float Y 起点金额(万元) # benchmark str Y 业绩比较基准 # status str Y 存续状态D摘牌 I发行 L已上市 # invest_type str Y 投资风格 # type str Y 基金类型 # purc_startdate str Y 日常申购起始日 # redm_startdate str Y 日常赎回起始日 # market str Y E场内O场外
"""
Retrieves information about a fund based on the fund code.
Args:
fund_code (str, optional): Fund code. Defaults to ''.
Returns:
df (DataFrame): A DataFrame containing various information about the fund, including fund code, fund name,
management company, custodian company, investment type, establishment date, maturity date,
listing date, issuance date, delisting date, issue amount, management fee, custodian fee,
fund duration, face value, minimum investment amount, benchmark, fund status, investment style,
fund type, start date for daily purchases, start date for daily redemptions, and market type.
The column 'ts_code' is renamed to 'fund_code', and 'name' is renamed to 'fund_name' in the DataFrame.
"""
df = pro.fund_basic(**{
"ts_code": fund_code,
"market": "",
"update_flag": "",
"offset": "",
"limit": "",
"status": "",
"name": ""
}, fields=[
"ts_code",
"name",
"management",
"custodian",
"fund_type",
"found_date",
"due_date",
"list_date",
"issue_date",
"delist_date",
"issue_amount",
"m_fee",
"c_fee",
"duration_year",
"p_value",
"min_amount",
"benchmark",
"status",
"invest_type",
"type",
"purc_startdate",
"redm_startdate",
"market"
])
#
df.rename(columns={'ts_code': 'fund_code'}, inplace=True)
df.rename(columns={'name': 'fund_name'}, inplace=True)
return df | Retrieves information about a fund based on the fund code. Args: fund_code (str, optional): Fund code. Defaults to ''. Returns: df (DataFrame): A DataFrame containing various information about the fund, including fund code, fund name, management company, custodian company, investment type, establishment date, maturity date, listing date, issuance date, delisting date, issue amount, management fee, custodian fee, fund duration, face value, minimum investment amount, benchmark, fund status, investment style, fund type, start date for daily purchases, start date for daily redemptions, and market type. The column 'ts_code' is renamed to 'fund_code', and 'name' is renamed to 'fund_name' in the DataFrame. |
1,105 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `print_save_table` function. Write a Python function `def print_save_table(df: pd.DataFrame, title_name: str, save:bool = False ,file_path: str = './output/') -> None` to solve the following problem:
It prints the dataframe as a formatted table using the PrettyTable library and saves it to a CSV file at the specified file path. Args: - df: the dataframe to be printed and saved to a CSV file - title_name: the name of the table to be printed and saved - save: whether to save the table to a CSV file - file_path: the file path where the CSV file should be saved. Returns: None
Here is the function:
def print_save_table(df: pd.DataFrame, title_name: str, save:bool = False ,file_path: str = './output/') -> None:
"""
It prints the dataframe as a formatted table using the PrettyTable library and saves it to a CSV file at the specified file path.
Args:
- df: the dataframe to be printed and saved to a CSV file
- title_name: the name of the table to be printed and saved
- save: whether to save the table to a CSV file
- file_path: the file path where the CSV file should be saved.
Returns: None
"""
# 创建表格table.max_width = 20
# table = PrettyTable(df.columns.tolist())
# table.align = 'l'
# table.max_width = 40
#
# #
# for row in df.itertuples(index=False):
# table.add_row(row)
#print(table)
if not os.path.exists(file_path):
os.makedirs(file_path)
if file_path is not None and save == True:
file_path = file_path + title_name + '.csv'
df.to_csv(file_path, index=False)
return df | It prints the dataframe as a formatted table using the PrettyTable library and saves it to a CSV file at the specified file path. Args: - df: the dataframe to be printed and saved to a CSV file - title_name: the name of the table to be printed and saved - save: whether to save the table to a CSV file - file_path: the file path where the CSV file should be saved. Returns: None |
1,106 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `merge_indicator_for_same_stock` function. Write a Python function `def merge_indicator_for_same_stock(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame` to solve the following problem:
Merges two DataFrames (two indicators of the same stock) based on common names for same stock. Data from two different stocks cannot be merged Args: df1: DataFrame contains some indicators for stock A. df2: DataFrame contains other indicators for stock A. Returns: pd.DataFrame: The merged DataFrame contains two different indicators.
Here is the function:
def merge_indicator_for_same_stock(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
"""
Merges two DataFrames (two indicators of the same stock) based on common names for same stock. Data from two different stocks cannot be merged
Args:
df1: DataFrame contains some indicators for stock A.
df2: DataFrame contains other indicators for stock A.
Returns:
pd.DataFrame: The merged DataFrame contains two different indicators.
"""
if len(set(df1.columns).intersection(set(df2.columns))) > 0:
# If there are identical column names, merge the two DataFrames based on the matching column names.
#
common_cols = list(set(df1.columns).intersection(set(df2.columns)))
#
df = pd.merge(df1, df2, on=common_cols)
return df
else:
#
raise ValueError('The two dataframes have no columns in common.') | Merges two DataFrames (two indicators of the same stock) based on common names for same stock. Data from two different stocks cannot be merged Args: df1: DataFrame contains some indicators for stock A. df2: DataFrame contains other indicators for stock A. Returns: pd.DataFrame: The merged DataFrame contains two different indicators. |
1,107 | import tushare as ts
import matplotlib.pyplot as plt
import pandas as pd
import os
import random
from matplotlib.ticker import MaxNLocator
import time
from datetime import datetime, timedelta
import numpy as np
import mplfinance as mpf
from typing import Optional
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
from typing import Union, Any
from sklearn.linear_model import LinearRegression
The provided code snippet includes necessary dependencies for implementing the `select_value_by_column` function. Write a Python function `def select_value_by_column(df1:pd.DataFrame, col_name: str = '', row_index: int = -1) -> Union[pd.DataFrame, Any]` to solve the following problem:
Selects a specific column or a specific value within a DataFrame. Args: df1: The input DataFrame. col_name: The name of the column to be selected. row_index: The index of the row to be selected. Returns: Union[pd.DataFrame, Any]. row_index=-1: df1[col_name].to_frame() or df1[col_name][row_index]
Here is the function:
def select_value_by_column(df1:pd.DataFrame, col_name: str = '', row_index: int = -1) -> Union[pd.DataFrame, Any]:
"""
Selects a specific column or a specific value within a DataFrame.
Args:
df1: The input DataFrame.
col_name: The name of the column to be selected.
row_index: The index of the row to be selected.
Returns:
Union[pd.DataFrame, Any]. row_index=-1: df1[col_name].to_frame() or df1[col_name][row_index]
"""
if row_index == -1:
#
return df1[col_name].to_frame()
else:
#
return df1[col_name][row_index] | Selects a specific column or a specific value within a DataFrame. Args: df1: The input DataFrame. col_name: The name of the column to be selected. row_index: The index of the row to be selected. Returns: Union[pd.DataFrame, Any]. row_index=-1: df1[col_name].to_frame() or df1[col_name][row_index] |
1,108 | import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from io import BytesIO
from main import run, add_to_queue,gradio_interface
import io
import sys
import time
import os
import pandas as pd
"""
# Hello Data-Copilot ! 😀
A powerful AI system connects humans and data.
The current version only supports Chinese financial data, in the future we will support for other country data
""")
if not OPENAI_KEY:
with gr.Row().style():
with gr.Column(scale=0.9):
gr.Markdown(
"""
You can use gpt35 from openai or from openai-azure.
""")
openai_api_key = gr.Textbox(
show_label=False,
placeholder="Set your OpenAI API key here and press Submit (e.g. sk-xxx)",
lines=1,
type="password"
).style(container=False)
with gr.Row():
openai_api_key_azure = gr.Textbox(
show_label=False,
placeholder="Set your Azure-OpenAI key",
lines=1,
type="password"
).style(container=False)
openai_api_base_azure = gr.Textbox(
show_label=False,
placeholder="Azure-OpenAI api_base (e.g. https://zwq0525.openai.azure.com)",
lines=1,
type="password"
).style(container=False)
openai_api_engine_azure = gr.Textbox(
show_label=False,
placeholder="Azure-OpenAI engine here (e.g. gpt35)",
lines=1,
type="password"
).style(container=False)
gr.Markdown(
"""
It is recommended to use the Openai paid API or Azure-OpenAI service, because the free Openai API will be limited by the access speed and 3 Requests per minute (very slow).
""")
with gr.Column(scale=0.1, min_width=0):
btn1 = gr.Button("OK").style(height= '100px')
with gr.Row():
with gr.Column(scale=0.9):
input_text = gr.inputs.Textbox(lines=1, placeholder='Please input your problem...', label='what do you want to find?')
with gr.Column(scale=0.1, min_width=0):
start_btn = gr.Button("Start").style(full_height=True)
# end_btn = gr.Button("Stop").style(full_height=True)
gr.Markdown(
"""
# Try these examples ➡️➡️
""")
with gr.Row():
example_selector1 = gr.Dropdown(choices=example_stock, interactive=True,
label="查股票 Query stock:", show_label=True)
example_selector2 = gr.Dropdown(choices=example_economic, interactive=True,
label="查经济 Query Economy:", show_label=True)
example_selector3 = gr.Dropdown(choices=example_company, interactive=True,
label="查公司 Query Company:", show_label=True)
example_selector4 = gr.Dropdown(choices=example_fund, interactive=True,
label="查基金 Query Fund:", show_label=True)
# def stop(state):
# print('Stop signal received!')
# state["client"].stop = True
with gr.Row():
with gr.Column(scale=0.3, min_width="500px", max_width="500px", min_height="500px", max_height="500px"):
Res = gr.Textbox(label="Summary and Result: ")
with gr.Column(scale=0.7, min_width="500px", max_width="500px", min_height="500px", max_height="500px"):
solving_step = gr.Textbox(label="Solving Step: ", lines=5)
img = gr.outputs.Image(type='numpy')
df = gr.outputs.Dataframe(type='pandas')
with gr.Row():
gr.Markdown(
"""
[Tushare](https://tushare.pro/) provides financial data support for our Data-Copilot.
[OpenAI](https://openai.com/) provides the powerful Chatgpt model for our Data-Copilot.
""")
outputs = [solving_step ,img, Res, df]
#设置change事件
example_selector1.change(fn = change_textbox, inputs = example_selector1, outputs = input_text)
example_selector2.change(fn = change_textbox, inputs = example_selector2, outputs = input_text)
example_selector3.change(fn = change_textbox, inputs = example_selector3, outputs = input_text)
example_selector4.change(fn = change_textbox, inputs = example_selector4, outputs = input_text)
if not OPENAI_KEY:
openai_api_key.submit(set_key, [state, openai_api_key, openai_api_key_azure,openai_api_base_azure, openai_api_engine_azure], [openai_api_key, openai_api_key_azure,openai_api_base_azure, openai_api_engine_azure])
btn1.click(set_key, [state, openai_api_key, openai_api_key_azure,openai_api_base_azure, openai_api_engine_azure], [openai_api_key,openai_api_key_azure, openai_api_base_azure, openai_api_engine_azure])
start_btn.click(fn = run, inputs = [state, input_text], outputs=outputs)
# end_btn.click(stop, state)
def change_textbox(query):
# 根据不同输入对输出控件进行更新
return gr.update(lines=2, visible=True, value=query) | null |
1,109 | import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from io import BytesIO
from main import run, add_to_queue,gradio_interface
import io
import sys
import time
import os
import pandas as pd
def set_key(state, openai_api_key,openai_api_key_azure, openai_api_base_azure, openai_api_engine_azure):
return state["client"].set_key(openai_api_key, openai_api_key_azure,openai_api_base_azure, openai_api_engine_azure) | null |
1,110 | import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from io import BytesIO
from main import run, add_to_queue,gradio_interface
import io
import sys
import time
import os
import pandas as pd
def run(state, chatbot):
generator = state["client"].run(chatbot)
for solving_step, img, res, df in generator:
# if state["client"].stop:
# print('Stopping generation')
# break
yield solving_step, img, res, df | null |
1,111 | import json
import requests
import openai
import tiktoken
import os
import time
from functools import wraps
import threading
The provided code snippet includes necessary dependencies for implementing the `retry` function. Write a Python function `def retry(exception_to_check, tries=3, delay=5, backoff=1)` to solve the following problem:
Decorator used to automatically retry a failed function. Parameters: exception_to_check: The type of exception to catch. tries: Maximum number of retry attempts. delay: Waiting time between each retry. backoff: Multiplicative factor to increase the waiting time after each retry.
Here is the function:
def retry(exception_to_check, tries=3, delay=5, backoff=1):
"""
Decorator used to automatically retry a failed function. Parameters:
exception_to_check: The type of exception to catch.
tries: Maximum number of retry attempts.
delay: Waiting time between each retry.
backoff: Multiplicative factor to increase the waiting time after each retry.
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except exception_to_check as e:
print(f"{str(e)}, Retrying in {mdelay} seconds...")
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry | Decorator used to automatically retry a failed function. Parameters: exception_to_check: The type of exception to catch. tries: Maximum number of retry attempts. delay: Waiting time between each retry. backoff: Multiplicative factor to increase the waiting time after each retry. |
Subsets and Splits