Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def _split_generators(self, dl_manager):
root_url = "http://images.cocodataset.org/"
urls = {
# Train/validation set
"train_images": "zips/train2014.zip",
"val_images": "zips/val2014.zip",
"trainval_annotations": "annotations/annotations_trainval2014.zip",
# Testing set (no annotations) (2014)
"test_images": "zips/test2014.zip",
"test_annotations": "annotations/image_info_test2014.zip",
# Testing set (no annotations) (2015)
"test2015_images": "zips/test2015.zip",
"test2015_annotations": "annotations/image_info_test2015.zip",
}
extracted_paths = dl_manager.download_and_extract({
key: root_url + url for key, url in urls.items()
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["train_images"],
annotation_dir=extracted_paths["trainval_annotations"],
split_type="train2014",
)),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["val_images"],
annotation_dir=extracted_paths["trainval_annotations"],
split_type="val2014",
)),
# Warning: Testing split only contains the images without any annotation
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["test_images"],
annotation_dir=extracted_paths["test_annotations"],
split_type="test2014",
has_annotation=False,
)),
tfds.core.SplitGenerator(
name="test2015",
num_shards=10,
gen_kwargs=dict(
image_dir=extracted_paths["test2015_images"],
annotation_dir=extracted_paths["test2015_annotations"],
split_type="test2015",
has_annotation=False,
)),
]
|
[
"Returns SplitGenerators."
] |
Please provide a description of the function:def _generate_examples(
self, image_dir, annotation_dir, split_type, has_annotation=True):
if has_annotation:
instance_filename = "instances_{}.json"
else:
instance_filename = "image_info_{}.json"
# Load the label names and images
instance_path = os.path.join(
annotation_dir,
"annotations",
instance_filename.format(split_type),
)
coco_annotation = CocoAnnotation(instance_path)
# Each category is a dict:
# {
# 'id': 51, # From 1-91, some entry missing
# 'name': 'bowl',
# 'supercategory': 'kitchen',
# }
categories = coco_annotation.categories
# Each image is a dict:
# {
# 'id': 262145,
# 'file_name': 'COCO_train2014_000000262145.jpg'
# 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg',
# 'coco_url': 'http://images.cocodataset.org/train2014/xyz.jpg',
# 'license': 2,
# 'date_captured': '2013-11-20 02:07:55',
# 'height': 427,
# 'width': 640,
# }
images = coco_annotation.images
# TODO(b/121375022): ClassLabel names should also contains 'id' and
# and 'supercategory' (in addition to 'name')
# Warning: As Coco only use 80 out of the 91 labels, the c['id'] and
# dataset names ids won't match.
self.info.features["objects"]["label"].names = [
c["name"] for c in categories
]
# TODO(b/121375022): Conversion should be done by ClassLabel
categories_id2name = {c["id"]: c["name"] for c in categories}
# Iterate over all images
annotation_skipped = 0
for image_info in sorted(images, key=lambda x: x["id"]):
if has_annotation:
# Each instance annotation is a dict:
# {
# 'iscrowd': 0,
# 'bbox': [116.95, 305.86, 285.3, 266.03],
# 'image_id': 480023,
# 'segmentation': [[312.29, 562.89, 402.25, ...]],
# 'category_id': 58,
# 'area': 54652.9556,
# 'id': 86,
# }
instances = coco_annotation.get_annotations(img_id=image_info["id"])
else:
instances = [] # No annotations
if not instances:
annotation_skipped += 1
def build_bbox(x, y, width, height):
# pylint: disable=cell-var-from-loop
# build_bbox is only used within the loop so it is ok to use image_info
return tfds.features.BBox(
ymin=y / image_info["height"],
xmin=x / image_info["width"],
ymax=(y + height) / image_info["height"],
xmax=(x + width) / image_info["width"],
)
# pylint: enable=cell-var-from-loop
yield {
"image": os.path.join(image_dir, split_type, image_info["file_name"]),
"image/filename": image_info["file_name"],
"objects": [{
"bbox": build_bbox(*instance_info["bbox"]),
"label": categories_id2name[instance_info["category_id"]],
"is_crowd": bool(instance_info["iscrowd"]),
} for instance_info in instances],
}
logging.info(
"%d/%d images do not contains any annotations",
annotation_skipped,
len(images),
)
|
[
"Generate examples as dicts.\n\n Args:\n image_dir: `str`, directory containing the images\n annotation_dir: `str`, directory containing\n split_type: `str`, <split_name><year> (ex: train2014)\n has_annotation: `bool`, when False (for the testing set), the annotations\n are not recorded\n\n Yields:\n Generator yielding the next samples\n "
] |
Please provide a description of the function:def str2ints(self, str_value):
if not self._encoder:
raise ValueError(
"Text.str2ints is not available because encoder hasn't been defined.")
return self._encoder.encode(str_value)
|
[
"Conversion string => encoded list[int]."
] |
Please provide a description of the function:def ints2str(self, int_values):
if not self._encoder:
raise ValueError(
"Text.ints2str is not available because encoder hasn't been defined.")
return self._encoder.decode(int_values)
|
[
"Conversion list[int] => decoded string."
] |
Please provide a description of the function:def maybe_build_from_corpus(self, corpus_generator, **kwargs):
if self._encoder_cls is not text_lib.SubwordTextEncoder:
return
if self.encoder:
return
vocab_size = self._encoder_config.vocab_size
self.encoder = text_lib.SubwordTextEncoder.build_from_corpus(
corpus_generator=corpus_generator,
target_vocab_size=vocab_size,
**kwargs)
|
[
"Call SubwordTextEncoder.build_from_corpus is encoder_cls is such."
] |
Please provide a description of the function:def sharded_filenames(filename_prefix, num_shards):
shard_suffix = "%05d-of-%05d"
return [
"%s-%s" % (filename_prefix, shard_suffix % (i, num_shards))
for i in range(num_shards)
]
|
[
"Sharded filenames given prefix and number of shards."
] |
Please provide a description of the function:def _walk_omniglot_dir(directory):
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0])
alphabets = sorted(tf.io.gfile.listdir(directory))
for alphabet in alphabets:
alphabet_dir = os.path.join(directory, alphabet)
characters = sorted(tf.io.gfile.listdir(alphabet_dir))
for character in characters:
character_id = int(character[len("character"):]) - 1
character_dir = os.path.join(alphabet_dir, character)
images = tf.io.gfile.listdir(character_dir)
for image in images:
label, _ = image.split("_")
label = int(label) - 1
image_path = os.path.join(character_dir, image)
yield alphabet, character_id, label, image_path
|
[
"Walk an Omniglot directory and yield examples."
] |
Please provide a description of the function:def _get_names(dirs):
alphabets = set()
label_names = {}
for d in dirs:
for example in _walk_omniglot_dir(d):
alphabet, alphabet_char_id, label, _ = example
alphabets.add(alphabet)
label_name = "%s_%d" % (alphabet, alphabet_char_id)
if label in label_names:
assert label_names[label] == label_name
else:
label_names[label] = label_name
label_names = [label_names[k] for k in sorted(label_names)]
return alphabets, label_names
|
[
"Get alphabet and label names, union across all dirs."
] |
Please provide a description of the function:def size_str(size_in_bytes):
if not size_in_bytes:
return "?? GiB"
size_in_bytes = float(size_in_bytes)
for (name, size_bytes) in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return "{:.2f} {}".format(value, name)
return "{} {}".format(int(size_in_bytes), "bytes")
|
[
"Returns a human readable size string.\n\n If size_in_bytes is None, then returns \"?? GiB\".\n\n For example `size_str(1.5 * tfds.units.GiB) == \"1.50 GiB\"`.\n\n Args:\n size_in_bytes: `int` or `None`, the size, in bytes, that we want to\n format as a human-readable size string.\n "
] |
Please provide a description of the function:def tqdm(self):
async_tqdm = utils.async_tqdm
with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url:
with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size:
self._pbar_url = pbar_url
self._pbar_dl_size = pbar_dl_size
yield
|
[
"Add a progression bar for the current download."
] |
Please provide a description of the function:def download(self, url, destination_path):
self._pbar_url.update_total(1)
future = self._executor.submit(self._sync_download, url, destination_path)
return promise.Promise.resolve(future)
|
[
"Download url to given path.\n\n Returns Promise -> sha256 of downloaded file.\n\n Args:\n url: address of resource to download.\n destination_path: `str`, path to directory where to download the resource.\n\n Returns:\n Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).\n "
] |
Please provide a description of the function:def _sync_kaggle_download(self, kaggle_url, destination_path):
kaggle_file = kaggle.KaggleFile.from_url(kaggle_url)
downloader = self.kaggle_downloader(kaggle_file.competition)
filepath = downloader.download_file(kaggle_file.filename, destination_path)
dl_size = tf.io.gfile.stat(filepath).length
checksum = self._checksumer()
with tf.io.gfile.GFile(filepath, 'rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), dl_size
|
[
"Download with Kaggle API."
] |
Please provide a description of the function:def _get_drive_url(self, url, session):
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError(
'Failed to get url %s. HTTP code: %d.' % (url, response.status_code))
for k, v in response.cookies.items():
if k.startswith('download_warning'):
return url + '&confirm=' + v # v is the confirm token
# No token found, let's try with original URL:
return url
|
[
"Returns url, possibly with confirmation token."
] |
Please provide a description of the function:def _sync_download(self, url, destination_path):
proxies = {
'http': os.environ.get('TFDS_HTTP_PROXY', None),
'https': os.environ.get('TFDS_HTTPS_PROXY', None),
'ftp': os.environ.get('TFDS_FTP_PROXY', None)
}
if kaggle.KaggleFile.is_kaggle_url(url):
if proxies['http']:
os.environ['KAGGLE_PROXY'] = proxies['http']
return self._sync_kaggle_download(url, destination_path)
try:
# If url is on a filesystem that gfile understands, use copy. Otherwise,
# use requests.
if not url.startswith('http'):
return self._sync_file_copy(url, destination_path)
except tf.errors.UnimplementedError:
pass
session = requests.Session()
session.proxies = proxies
if _DRIVE_URL.match(url):
url = self._get_drive_url(url, session)
use_urllib = url.startswith('ftp')
if use_urllib:
if proxies['ftp']:
proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener) # pylint: disable=too-many-function-args
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
else:
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError('Failed to get url %s. HTTP code: %d.' %
(url, response.status_code))
fname = _get_filename(response)
path = os.path.join(destination_path, fname)
size = 0
size_mb = 0
unit_mb = units.MiB
self._pbar_dl_size.update_total(
int(response.headers.get('Content-length', 0)) // unit_mb)
with tf.io.gfile.GFile(path, 'wb') as file_:
checksum = self._checksumer()
if use_urllib:
iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'')
else:
iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)
for block in iterator:
size += len(block)
# Update the progress bar
size_mb += len(block)
if size_mb > unit_mb:
self._pbar_dl_size.update(size_mb // unit_mb)
size_mb %= unit_mb
checksum.update(block)
file_.write(block)
self._pbar_url.update(1)
return checksum.hexdigest(), size
|
[
"Synchronous version of `download` method."
] |
Please provide a description of the function:def _resize_image_if_necessary(image_fobj, target_pixels=None):
if target_pixels is None:
return image_fobj
cv2 = tfds.core.lazy_imports.cv2
# Decode image using OpenCV2.
image = cv2.imdecode(
np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3)
# Get image height and width.
height, width, _ = image.shape
actual_pixels = height * width
if actual_pixels > target_pixels:
factor = np.sqrt(target_pixels / actual_pixels)
image = cv2.resize(image, dsize=None, fx=factor, fy=factor)
# Encode the image with quality=72 and store it in a BytesIO object.
_, buff = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), 72])
return io.BytesIO(buff.tostring())
|
[
"Resize an image to have (roughly) the given number of target pixels.\n\n Args:\n image_fobj: File object containing the original image.\n target_pixels: If given, number of pixels that the image must have.\n\n Returns:\n A file object.\n "
] |
Please provide a description of the function:def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None):
if csv_path:
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
data = [(row["image"], int(row["level"]))
for row in reader
if csv_usage is None or row["Usage"] == csv_usage]
else:
data = [(fname[:-5], -1)
for fname in tf.io.gfile.listdir(images_dir_path)
if fname.endswith(".jpeg")]
for name, label in data:
yield {
"name": name,
"image": _resize_image_if_necessary(
tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name),
mode="rb"),
target_pixels=self.builder_config.target_pixels),
"label": label,
}
|
[
"Yields Example instances from given CSV.\n\n Args:\n images_dir_path: path to dir in which images are stored.\n csv_path: optional, path to csv file with two columns: name of image and\n label. If not provided, just scan image directory, don't set labels.\n csv_usage: optional, subset of examples from the csv file to use based on\n the \"Usage\" column from the csv.\n "
] |
Please provide a description of the function:def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info):
instruction_dicts = []
for sliced_split_info in list_sliced_split_info:
mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value)
# Compute filenames from the given split
filepaths = list(sorted(self._build_split_filenames(
split_info_list=[sliced_split_info.split_info],
)))
# Compute the offsets
if sliced_split_info.split_info.num_examples:
shard_id2num_examples = splits_lib.get_shard_id2num_examples(
sliced_split_info.split_info.num_shards,
sliced_split_info.split_info.num_examples,
)
mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples)
else:
logging.warning(
"Statistics not present in the dataset. TFDS is not able to load "
"the total number of examples, so using the subsplit API may not "
"provide precise subsplits."
)
mask_offsets = [0] * len(filepaths)
for filepath, mask_offset in zip(filepaths, mask_offsets):
instruction_dicts.append({
"filepath": filepath,
"mask": mask,
"mask_offset": mask_offset,
})
return instruction_dicts
|
[
"Return the list of files and reading mask of the files to read."
] |
Please provide a description of the function:def _build_split_filenames(self, split_info_list):
filenames = []
for split_info in split_info_list:
filenames.extend(naming.filepaths_for_dataset_split(
dataset_name=self.name,
split=split_info.name,
num_shards=split_info.num_shards,
data_dir=self._data_dir,
filetype_suffix=self._file_format_adapter.filetype_suffix,
))
return filenames
|
[
"Construct the split filenames associated with the split info.\n\n The filenames correspond to the pre-processed datasets files present in\n the root directory of the dataset.\n\n Args:\n split_info_list: (list[SplitInfo]) List of split from which generate the\n filenames\n\n Returns:\n filenames: (list[str]) The list of filenames path corresponding to the\n split info object\n "
] |
Please provide a description of the function:def _generate_examples(self, data_path):
with tf.io.gfile.GFile(data_path, "rb") as fp:
images = np.load(fp)
images = np.transpose(images, (1, 0, 2, 3))
images = np.expand_dims(images, axis=-1)
for sequence in images:
yield dict(image_sequence=sequence)
|
[
"Generate MovingMnist sequences.\n\n Args:\n data_path (str): Path to the data file\n\n Yields:\n 20 x 64 x 64 x 1 uint8 numpy arrays\n "
] |
Please provide a description of the function:def _parse_single_video(self, example_proto):
context_features = {
"game_duration_loops": tf.io.FixedLenFeature([1], tf.int64),
"game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32),
"n_steps": tf.io.FixedLenFeature([1], tf.int64),
"screen_size": tf.io.FixedLenFeature([2], tf.int64),
}
sequence_features = {
"rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string),
}
_, seq_feat = tf.io.parse_single_sequence_example(
example_proto,
context_features=context_features,
sequence_features=sequence_features)
video_frames = tf.map_fn(
tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8)
return video_frames
|
[
"Parses single video from the input tfrecords.\n\n Args:\n example_proto: tfExample proto with a single video.\n\n Returns:\n dict with all frames, positions and actions.\n "
] |
Please provide a description of the function:def _generate_examples(self, filepath):
# Simultaneously iterating through the different data sets in the hdf5
# file is >100x slower and the data set is small (26.7MB). Hence, we first
# load everything into memory before yielding the samples.
image_array, class_array, values_array = _load_data(filepath)
for image, classes, values in moves.zip(image_array, class_array,
values_array):
yield dict(
image=np.expand_dims(image, -1),
label_shape=classes[1],
label_scale=classes[2],
label_orientation=classes[3],
label_x_position=classes[4],
label_y_position=classes[5],
value_shape=values[1],
value_scale=values[2],
value_orientation=values[3],
value_x_position=values[4],
value_y_position=values[5])
|
[
"Generates examples for the dSprites data set.\n\n Args:\n filepath: path to the dSprites hdf5 file.\n\n Yields:\n Dictionaries with images, latent classes, and latent values.\n "
] |
Please provide a description of the function:def _split_generators(self, dl_manager):
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.download_and_extract({
"images": tfds.download.Resource(
url=os.path.join(_BASE_URL, "images.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR),
"annotations": tfds.download.Resource(
url=os.path.join(_BASE_URL, "annotations.tar.gz"),
extract_method=tfds.download.ExtractMethod.TAR)
})
images_path_dir = os.path.join(dl_paths["images"], "images")
annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations")
# Setup train and test splits
train_split = tfds.core.SplitGenerator(
name="train",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"trainval.txt"),
},
)
test_split = tfds.core.SplitGenerator(
name="test",
num_shards=_NUM_SHARDS,
gen_kwargs={
"images_dir_path": images_path_dir,
"images_list_file": os.path.join(annotations_path_dir,
"test.txt")
},
)
return [train_split, test_split]
|
[
"Returns splits."
] |
Please provide a description of the function:def _load_objects(csv_paths, csv_positions, prefix):
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_paths, csv_positions, prefix)
objects = collections.defaultdict(list)
for i, labels_path in enumerate(csv_paths):
with tf.io.gfile.GFile(labels_path) as csv_f:
if csv_positions[i] > 0:
csv_f.seek(csv_positions[i])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for image_id, source, label, confidence in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[i] = csv_f.tell()
image_id = int(image_id, 16)
current_obj = _Object(label, int(float(confidence) * 10), source)
objects[image_id].append(current_obj)
return dict(objects)
|
[
"Returns objects listed within given CSV files."
] |
Please provide a description of the function:def _load_bboxes(csv_path, csv_positions, prefix):
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_path, csv_positions, prefix)
boxes = collections.defaultdict(list)
with tf.io.gfile.GFile(csv_path) as csv_f:
if csv_positions[0] > 0:
csv_f.seek(csv_positions[0])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for (image_id, source, label, confidence, xmin, xmax, ymin, ymax,
is_occluded, is_truncated, is_group_of, is_depiction, is_inside,
) in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[0] = csv_f.tell()
image_id = int(image_id, 16)
del confidence # always 1 in bounding boxes.
current_row = _Bbox(
label, source, tfds.features.BBox(
float(ymin), float(xmin), float(ymax), float(xmax)),
int(is_occluded), int(is_truncated),
int(is_group_of), int(is_depiction), int(is_inside))
boxes[image_id].append(current_row)
return dict(boxes)
|
[
"Returns bounded boxes listed within given CSV file."
] |
Please provide a description of the function:def _split_generators(self, dl_manager):
paths = dl_manager.download_and_extract(_URLS)
# Load labels from CSVs:
def load(names):
csv_positions = [0] * len(names)
return functools.partial(_load_objects, [paths[name] for name in names],
csv_positions)
train_objects = load(['train_human_labels', 'train_machine_labels'])
test_objects = load(['test_human_labels', 'test_machine_labels'])
validation_objects = load(['validation_human_labels',
'validation_machine_labels'])
def load_boxes(name):
csv_positions = [0]
return functools.partial(_load_bboxes, paths[name], csv_positions)
train_bbox = load_boxes('train-annotations-bbox')
test_bbox = load_boxes('test-annotations-bbox')
validation_bbox = load_boxes('validation-annotations-bbox')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=512,
gen_kwargs=dict(archive_paths=paths['train_images'],
objects_getter=train_objects,
bboxes_getter=train_bbox,
prefixes='0123456789abcdef'),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=36,
gen_kwargs=dict(archive_paths=[paths['test_images']],
objects_getter=test_objects,
bboxes_getter=test_bbox),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=12,
gen_kwargs=dict(archive_paths=[paths['validation_images']],
objects_getter=validation_objects,
bboxes_getter=validation_bbox),
),
]
|
[
"Returns SplitGenerators."
] |
Please provide a description of the function:def _generate_examples(self, archive_paths, objects_getter, bboxes_getter,
prefixes=None):
trainable_classes = set(
self.info.features['objects_trainable']['label'].names)
for i, archive_path in enumerate(archive_paths):
prefix = prefixes[i] if prefixes else None
objects = objects_getter(prefix)
bboxes = bboxes_getter(prefix)
logging.info('Opening archive %s ...', archive_path)
archive = tfds.download.iter_archive(
archive_path, tfds.download.ExtractMethod.TAR_STREAM)
for fpath, fobj in archive:
fname = os.path.basename(fpath)
image_id = int(os.path.splitext(fname)[0], 16)
image_objects = [obj._asdict() for obj in objects.get(image_id, [])]
image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])]
image_objects_trainable = [
obj for obj in image_objects if obj['label'] in trainable_classes
]
yield {
'image': _resize_image_if_necessary(
fobj, target_pixels=self.builder_config.target_pixels),
'image/filename': fname,
'objects': image_objects,
'objects_trainable': image_objects_trainable,
'bobjects': image_bboxes,
}
|
[
"Yields examples."
] |
Please provide a description of the function:def _generate_examples(self, archive, directory):
reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", ""))
for path, imdb_f in archive:
res = reg.match(path)
if not res:
continue
text = imdb_f.read().strip()
yield {
"text": text,
"label": res.groupdict()["label"],
}
|
[
"Generate IMDB examples."
] |
Please provide a description of the function:def _get_url_hashes(path):
urls = _read_text_file(path)
def url_hash(u):
h = hashlib.sha1()
try:
u = u.encode('utf-8')
except UnicodeDecodeError:
logging.error('Cannot hash url: %s', u)
h.update(u)
return h.hexdigest()
return {url_hash(u): True for u in urls}
|
[
"Get hashes of urls in file."
] |
Please provide a description of the function:def _find_files(dl_paths, publisher, url_dict):
if publisher == 'cnn':
top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
elif publisher == 'dm':
top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
else:
logging.fatal('Unsupported publisher: %s', publisher)
files = tf.io.gfile.listdir(top_dir)
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0:basename.find('.story')] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files
|
[
"Find files corresponding to urls."
] |
Please provide a description of the function:def _subset_filenames(dl_paths, split):
assert isinstance(dl_paths, dict), dl_paths
# Get filenames for a split.
if split == tfds.Split.TRAIN:
urls = _get_url_hashes(dl_paths['train_urls'])
elif split == tfds.Split.VALIDATION:
urls = _get_url_hashes(dl_paths['val_urls'])
elif split == tfds.Split.TEST:
urls = _get_url_hashes(dl_paths['test_urls'])
else:
logging.fatal('Unsupported split: %s', split)
cnn = _find_files(dl_paths, 'cnn', urls)
dm = _find_files(dl_paths, 'dm', urls)
return cnn + dm
|
[
"Get filenames for a particular split."
] |
Please provide a description of the function:def _get_art_abs(story_file):
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
# make_datafiles.py
lines = _read_text_file(story_file)
# Lowercase everything
lines = [line.lower() for line in lines]
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
def fix_missing_period(line):
if '@highlight' in line: return line
if not line: return line
if line[-1] in END_TOKENS: return line
return line + ' .'
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith('@highlight'):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = ' '.join(article_lines)
# Make abstract into a single string, putting <s> and </s> tags around
# the sentences.
abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent,
SENTENCE_END) for sent in highlights])
return article, abstract
|
[
"Get abstract (highlights) and article from a story file path.",
"Adds a period to a line that is missing a period."
] |
Please provide a description of the function:def exporter(directory, method, datasets):
if method.lower() == 'json':
# Convert json_dict to a JSON styled string
json_string = json.dumps(datasets, indent=4)
savefile = open('{}/exported.json'.format(directory), 'w+')
savefile.write(json_string)
savefile.close()
if method.lower() == 'csv':
with open('{}/exported.csv'.format(directory), 'w+') as csvfile:
csv_writer = csv.writer(
csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, values in datasets.items():
if values is None:
csv_writer.writerow([key])
else:
csv_writer.writerow([key] + values)
csvfile.close()
|
[
"Export the results."
] |
Please provide a description of the function:def time_machine(host, mode):
now = datetime.datetime.now()
to = str(now.year) + str(now.day) + str(now.month)
if now.month > 6:
fro = str(now.year) + str(now.day) + str(now.month - 6)
else:
fro = str(now.year - 1) + str(now.day) + str(now.month + 6)
url = "http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s" % (host, mode, fro, to)
response = get(url).text
parsed = json.loads(response)[1:]
urls = []
for item in parsed:
urls.append(item[0])
return urls
|
[
"Query archive.org."
] |
Please provide a description of the function:def zap(input_url, archive, domain, host, internal, robots, proxies):
if archive:
print('%s Fetching URLs from archive.org' % run)
if False:
archived_urls = time_machine(domain, 'domain')
else:
archived_urls = time_machine(host, 'host')
print('%s Retrieved %i URLs from archive.org' % (
good, len(archived_urls) - 1))
for url in archived_urls:
verb('Internal page', url)
internal.add(url)
# Makes request to robots.txt
response = requests.get(input_url + '/robots.txt',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
# If you know it, you know it
matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response)
if matches:
# Iterating over the matches, match is a tuple here
for match in matches:
# One item in match will always be empty so will combine both
# items
match = ''.join(match)
# If the URL doesn't use a wildcard
if '*' not in match:
url = input_url + match
# Add the URL to internal list for crawling
internal.add(url)
# Add the URL to robots list
robots.add(url)
print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
# Makes request to sitemap.xml
response = requests.get(input_url + '/sitemap.xml',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
matches = xml_parser(response)
if matches: # if there are any matches
print('%s URLs retrieved from sitemap.xml: %s' % (
good, len(matches)))
for match in matches:
verb('Internal page', match)
# Cleaning up the URL and adding it to the internal list for
# crawling
internal.add(match)
|
[
"Extract links from robots.txt and sitemap.xml."
] |
Please provide a description of the function:def requester(
url,
main_url=None,
delay=0,
cook=None,
headers=None,
timeout=10,
host=None,
proxies=[None],
user_agents=[None],
failed=None,
processed=None
):
cook = cook or set()
headers = headers or set()
user_agents = user_agents or ['Photon']
failed = failed or set()
processed = processed or set()
# Mark the URL as crawled
processed.add(url)
# Pause/sleep the program for specified time
time.sleep(delay)
def make_request(url):
final_headers = headers or {
'Host': host,
# Selecting a random user-agent
'User-Agent': random.choice(user_agents),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip',
'DNT': '1',
'Connection': 'close',
}
try:
response = SESSION.get(
url,
cookies=cook,
headers=final_headers,
verify=False,
timeout=timeout,
stream=True,
proxies=random.choice(proxies)
)
except TooManyRedirects:
return 'dummy'
if 'text/html' in response.headers['content-type'] or \
'text/plain' in response.headers['content-type']:
if response.status_code != '404':
return response.text
else:
response.close()
failed.add(url)
return 'dummy'
else:
response.close()
return 'dummy'
return make_request(url)
|
[
"Handle the requests and return the response body.",
"Default request"
] |
Please provide a description of the function:def intel_extractor(url, response):
for rintel in rintels:
res = re.sub(r'<(script).*?</\1>(?s)', '', response)
res = re.sub(r'<[^<]+?>', '', res)
matches = rintel[0].findall(res)
if matches:
for match in matches:
verb('Intel', match)
bad_intel.add((match, rintel[1], url))
|
[
"Extract intel from the response body."
] |
Please provide a description of the function:def js_extractor(response):
# Extract .js files
matches = rscript.findall(response)
for match in matches:
match = match[2].replace('\'', '').replace('"', '')
verb('JS file', match)
bad_scripts.add(match)
|
[
"Extract js files from the response body"
] |
Please provide a description of the function:def extractor(url):
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
if clone:
mirror(url, response)
matches = rhref.findall(response)
for link in matches:
# Remove everything after a "#" to deal with in-page anchors
link = link[1].replace('\'', '').replace('"', '').split('#')[0]
# Checks if the URLs should be crawled
if is_link(link, processed, files):
if link[:4] == 'http':
if link.startswith(main_url):
verb('Internal page', link)
internal.add(link)
else:
verb('External page', link)
external.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
verb('Internal page', link)
internal.add(schema + '://' + link)
else:
verb('External page', link)
external.add(link)
elif link[:1] == '/':
verb('Internal page', link)
internal.add(remove_file(url) + link)
else:
verb('Internal page', link)
usable_url = remove_file(url)
if usable_url.endswith('/'):
internal.add(usable_url + link)
elif link.startswith('/'):
internal.add(usable_url + link)
else:
internal.add(usable_url + '/' + link)
if not only_urls:
intel_extractor(url, response)
js_extractor(response)
if args.regex and not supress_regex:
regxy(args.regex, response, supress_regex, custom)
if api:
matches = rentropy.findall(response)
for match in matches:
if entropy(match) >= 4:
verb('Key', match)
keys.add(url + ': ' + match)
|
[
"Extract details from the response body."
] |
Please provide a description of the function:def jscanner(url):
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
# Extract URLs/endpoints
matches = rendpoint.findall(response)
# Iterate over the matches, match is a tuple
for match in matches:
# Combining the items because one of them is always empty
match = match[0] + match[1]
# Making sure it's not some JavaScript code
if not re.search(r'[}{><"\']', match) and not match == '/':
verb('JS endpoint', match)
endpoints.add(match)
|
[
"Extract endpoints from JavaScript code."
] |
Please provide a description of the function:def updater():
print('%s Checking for updates' % run)
# Changes must be separated by ;
changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''
latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com')
# Just a hack to see if a new version is available
if changes not in latest_commit:
changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
# Splitting the changes to form a list
changelog = changelog.group(1).split(';')
print('%s A new version of Photon is available.' % good)
print('%s Changes:' % info)
for change in changelog: # print changes
print('%s>%s %s' % (green, end, change))
current_path = os.getcwd().split('/') # if you know it, you know it
folder = current_path[-1] # current directory name
path = '/'.join(current_path) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
print('%s Updating Photon' % run)
os.system('git clone --quiet https://github.com/s0md3v/Photon %s'
% (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'
% (path, folder, path, path, folder))
print('%s Update successful!' % good)
else:
print('%s Photon is up to date!' % good)
|
[
"Update the current installation.\n\n git clones the latest version and merges it with the current directory.\n "
] |
Please provide a description of the function:def find_subdomains(domain):
result = set()
response = get('https://findsubdomains.com/subdomains-of/' + domain).text
matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
for match in matches:
result.add(match.replace(' ', '').replace('\n', ''))
return list(result)
|
[
"Find subdomains according to the TLD."
] |
Please provide a description of the function:def flash(function, links, thread_count):
# Convert links (set) to list
links = list(links)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=thread_count)
futures = (threadpool.submit(function, link) for link in links)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(links) or (i + 1) % thread_count == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(links)),
end='\r')
print('')
|
[
"Process the URLs and uses a threadpool to execute a function."
] |
Please provide a description of the function:def regxy(pattern, response, supress_regex, custom):
try:
matches = re.findall(r'%s' % pattern, response)
for match in matches:
verb('Custom regex', match)
custom.add(match)
except:
supress_regex = True
|
[
"Extract a string based on regex pattern supplied by user."
] |
Please provide a description of the function:def is_link(url, processed, files):
if url not in processed:
is_file = url.endswith(BAD_TYPES)
if is_file:
files.add(url)
return False
return True
return False
|
[
"\n Determine whether or not a link should be crawled\n A url should not be crawled if it\n - Is a file\n - Has already been crawled\n\n Args:\n url: str Url to be processed\n processed: list[str] List of urls that have already been crawled\n\n Returns:\n bool If `url` should be crawled\n "
] |
Please provide a description of the function:def remove_regex(urls, regex):
if not regex:
return urls
# To avoid iterating over the characters of a string
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not re.search(regex, url)]
except TypeError:
return []
return non_matching_urls
|
[
"\n Parse a list for non-matches to a regex.\n\n Args:\n urls: iterable of urls\n regex: string regex to be parsed for\n\n Returns:\n list of strings not matching regex\n "
] |
Please provide a description of the function:def writer(datasets, dataset_names, output_dir):
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
filepath = output_dir + '/' + dataset_name + '.txt'
with open(filepath, 'w+') as out_file:
joined = '\n'.join(dataset)
out_file.write(str(joined.encode('utf-8').decode('utf-8')))
out_file.write('\n')
|
[
"Write the results."
] |
Please provide a description of the function:def timer(diff, processed):
# Changes seconds into minutes and seconds
minutes, seconds = divmod(diff, 60)
try:
# Finds average time taken by requests
time_per_request = diff / float(len(processed))
except ZeroDivisionError:
time_per_request = 0
return minutes, seconds, time_per_request
|
[
"Return the passed time."
] |
Please provide a description of the function:def entropy(string):
entropy = 0
for number in range(256):
result = float(string.encode('utf-8').count(
chr(number))) / len(string.encode('utf-8'))
if result != 0:
entropy = entropy - result * math.log(result, 2)
return entropy
|
[
"Calculate the entropy of a string."
] |
Please provide a description of the function:def extract_headers(headers):
sorted_headers = {}
matches = re.findall(r'(.*):\s(.*)', headers)
for match in matches:
header = match[0]
value = match[1]
try:
if value[-1] == ',':
value = value[:-1]
sorted_headers[header] = value
except IndexError:
pass
return sorted_headers
|
[
"This function extracts valid headers from interactive input."
] |
Please provide a description of the function:def top_level(url, fix_protocol=True):
ext = tld.get_tld(url, fix_protocol=fix_protocol)
toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split(
ext)[0] + ext
return toplevel
|
[
"Extract the top level domain from an URL."
] |
Please provide a description of the function:def proxy_type(v):
proxies = []
if re.match(r"((http|socks5):\/\/.)?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})", v):
proxies.append({"http": v,
"https": v})
return proxies
elif re.match(r"((http|socks5):\/\/.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}:(\d{1,5})", v):
proxies.append({"http": v,
"https": v})
return proxies
elif is_proxy_list(v, proxies):
return proxies
else:
raise argparse.ArgumentTypeError(
"Proxy should follow IP:PORT or DOMAIN:PORT format")
|
[
" Match IP:PORT or DOMAIN:PORT in a losse manner "
] |
Please provide a description of the function:def dnsdumpster(domain, output_dir):
response = requests.Session().get('https://dnsdumpster.com/').text
csrf_token = re.search(
r"name='csrfmiddlewaretoken' value='(.*?)'", response).group(1)
cookies = {'csrftoken': csrf_token}
headers = {'Referer': 'https://dnsdumpster.com/'}
data = {'csrfmiddlewaretoken': csrf_token, 'targetip': domain}
response = requests.Session().post(
'https://dnsdumpster.com/', cookies=cookies, data=data, headers=headers)
image = requests.get('https://dnsdumpster.com/static/map/%s.png' % domain)
if image.status_code == 200:
with open('%s/%s.png' % (output_dir, domain), 'wb') as f:
f.write(image.content)
|
[
"Query dnsdumpster.com."
] |
Please provide a description of the function:def prompt(default=None):
editor = 'nano'
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
child_pid = os.fork()
is_child = child_pid == 0
if is_child:
os.execvp(editor, [editor, tmpfile.name])
else:
os.waitpid(child_pid, 0)
tmpfile.seek(0)
return tmpfile.read().strip()
|
[
"Present the user a prompt."
] |
Please provide a description of the function:def start_market(self):
# 启动 trade_engine 线程
self.market.start()
# 注册 backtest_broker ,并且启动和它关联线程QAThread 存放在 kernels 词典中, { 'broker_name': QAThread }
#self.market.register(self.broker_name, self.broker)
self.market.connect(self.broker_name)
|
[
"\n start the market thread and register backtest broker thread\n QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承\n "
] |
Please provide a description of the function:def run(self):
# 如果出现了日期的改变 才会进行结算的事件
_date = None
while QA_util_if_tradetime(self.now):
for data in self.ingest_data: # 对于在ingest_data中的数据
# <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'>
date = data.date[0]
if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场
if _date != date: # 如果新的date
# 前一天的交易日已经过去
# 往 broker 和 account 发送 settle 事件
try:
self.market.trade_engine.join()
# time.sleep(2)
self.market._settle(self.broker_name)
except Exception as e:
raise e
# 基金 指数 期货
elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]:
self.market._settle(self.broker_name)
# print(data)
self.broker.run(
QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data))
# 生成 UPCOMING_DATA 事件放到 队列中去执行
self.market.upcoming_data(self.broker_name, data)
self.market.trade_engine.join()
_date = date
|
[
"generator driven data flow\n "
] |
Please provide a description of the function:def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
}
|
[] |
Please provide a description of the function:def init_hold_with_account(self):
return self.init_hold.reset_index().assign(
account_cookie=self.account_cookie
).set_index(['code',
'account_cookie'])
|
[
"带account_cookie的初始化持仓\n\n Returns:\n [type] -- [description]\n "
] |
Please provide a description of the function:def start_date(self):
if self.start_==None:
if len(self.time_index_max) > 0:
return str(min(self.time_index_max))[0:10]
else:
print(
RuntimeWarning(
'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'
)
)
else:
return self.start_
|
[
"账户的起始交易日期(只在回测中使用)\n\n Raises:\n RuntimeWarning -- [description]\n\n Returns:\n [type] -- [description]\n "
] |
Please provide a description of the function:def end_date(self):
if self.start_==None:
if len(self.time_index_max) > 0:
return str(max(self.time_index_max))[0:10]
else:
print(
RuntimeWarning(
'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE'
)
)
else:
return self.end_
|
[
"账户的交易结束日期(只在回测中使用)\n\n Raises:\n RuntimeWarning -- [description]\n\n Returns:\n [type] -- [description]\n "
] |
Please provide a description of the function:def history_table_min(self):
'区间交易历史的table'
if len(self.history_min) > 0:
lens = len(self.history_min[0])
else:
lens = len(self._history_headers)
return pd.DataFrame(
data=self.history_min,
columns=self._history_headers[:lens]
).sort_index()
|
[] |
Please provide a description of the function:def history_table(self):
'交易历史的table'
if len(self.history) > 0:
lens = len(self.history[0])
else:
lens = len(self._history_headers)
return pd.DataFrame(
data=self.history,
columns=self._history_headers[:lens]
).sort_index()
|
[] |
Please provide a description of the function:def cash_table(self):
'现金的table'
_cash = pd.DataFrame(
data=[self.cash[1::],
self.time_index_max],
index=['cash',
'datetime']
).T
_cash = _cash.assign(
date=_cash.datetime.apply(lambda x: pd.to_datetime(str(x)[0:10]))
).assign(account_cookie=self.account_cookie) # .sort_values('datetime')
return _cash.set_index(['datetime', 'account_cookie'], drop=False)
|
[
"\n 实验性质\n @2018-06-09\n\n # 对于账户持仓的分解\n\n 1. 真实持仓hold:\n\n 正常模式/TZero模式:\n hold = 历史持仓(init_hold)+ 初始化账户后发生的所有交易导致的持仓(hold_available)\n\n 动态持仓(初始化账户后的持仓)hold_available:\n self.history 计算而得\n\n 2. 账户的可卖额度(sell_available)\n\n 正常模式:\n sell_available\n 结算前: init_hold+ 买卖交易(卖-)\n 结算后: init_hold+ 买卖交易(买+ 卖-)\n TZero模式:\n sell_available\n 结算前: init_hold - 买卖交易占用的额度(abs(买+ 卖-))\n 结算过程 是为了补平(等于让hold={})\n 结算后: init_hold\n "
] |
Please provide a description of the function:def hold(self):
return pd.concat(
[self.init_hold,
self.hold_available]
).groupby('code').sum().replace(0,
np.nan).dropna().sort_index()
|
[
"真实持仓\n "
] |
Please provide a description of the function:def hold_available(self):
return self.history_table.groupby('code').amount.sum().replace(
0,
np.nan
).dropna().sort_index()
|
[
"可用持仓\n "
] |
Please provide a description of the function:def trade(self):
return self.history_table.pivot_table(
index=['datetime',
'account_cookie'],
columns='code',
values='amount',
aggfunc=np.sum
).fillna(0).sort_index()
|
[
"每次交易的pivot表\n\n Returns:\n pd.DataFrame\n\n 此处的pivot_table一定要用np.sum\n "
] |
Please provide a description of the function:def daily_cash(self):
'每日交易结算时的现金表'
res = self.cash_table.drop_duplicates(subset='date', keep='last')
le=pd.DataFrame(pd.Series(data=None, index=pd.to_datetime(self.trade_range_max).set_names('date'), name='predrop'))
ri=res.set_index('date')
res_=pd.merge(le,ri,how='left',left_index=True,right_index=True)
res_=res_.ffill().fillna(self.init_cash).drop(['predrop','datetime','account_cookie'], axis=1).reset_index().set_index(['date'],drop=False).sort_index()
res_=res_[res_.index.isin(self.trade_range)]
return res_
|
[] |
Please provide a description of the function:def daily_hold(self):
'每日交易结算时的持仓表'
data = self.trade.cumsum()
if len(data) < 1:
return None
else:
# print(data.index.levels[0])
data = data.assign(account_cookie=self.account_cookie).assign(
date=pd.to_datetime(data.index.levels[0]).date
)
data.date = pd.to_datetime(data.date)
data = data.set_index(['date', 'account_cookie'])
res = data[~data.index.duplicated(keep='last')].sort_index()
# 这里会导致股票停牌时的持仓也被计算 但是计算market_value的时候就没了
le=pd.DataFrame(pd.Series(data=None, index=pd.to_datetime(self.trade_range_max).set_names('date'), name='predrop'))
ri=res.reset_index().set_index('date')
res_=pd.merge(le,ri,how='left',left_index=True,right_index=True)
res_=res_.ffill().fillna(0).drop(['predrop','account_cookie'], axis=1).reset_index().set_index(['date']).sort_index()
res_=res_[res_.index.isin(self.trade_range)]
return res_
|
[] |
Please provide a description of the function:def daily_frozen(self):
'每日交易结算时的持仓表'
res_=self.history_table.assign(date=pd.to_datetime(self.history_table.datetime)).set_index('date').resample('D').frozen.last().fillna(method='pad')
res_=res_[res_.index.isin(self.trade_range)]
return res_
|
[] |
Please provide a description of the function:def hold_table(self, datetime=None):
"到某一个时刻的持仓 如果给的是日期,则返回当日开盘前的持仓"
if datetime is None:
hold_available = self.history_table.set_index(
'datetime'
).sort_index().groupby('code').amount.sum().sort_index()
else:
hold_available = self.history_table.set_index(
'datetime'
).sort_index().loc[:datetime].groupby('code'
).amount.sum().sort_index()
return pd.concat([self.init_hold,
hold_available]).groupby('code').sum().sort_index(
).apply(lambda x: x if x > 0 else None).dropna()
|
[] |
Please provide a description of the function:def current_hold_price(self):
def weights(x):
n=len(x)
res=1
while res>0 or res<0:
res=sum(x[:n]['amount'])
n=n-1
x=x[n+1:]
if sum(x['amount']) != 0:
return np.average(
x['price'],
weights=x['amount'],
returned=True
)
else:
return np.nan
return self.history_table.set_index(
'datetime',
drop=False
).sort_index().groupby('code').apply(weights).dropna()
|
[
"计算目前持仓的成本 用于模拟盘和实盘查询\n\n Returns:\n [type] -- [description]\n "
] |
Please provide a description of the function:def hold_price(self, datetime=None):
def weights(x):
if sum(x['amount']) != 0:
return np.average(
x['price'],
weights=x['amount'],
returned=True
)
else:
return np.nan
if datetime is None:
return self.history_table.set_index(
'datetime',
drop=False
).sort_index().groupby('code').apply(weights).dropna()
else:
return self.history_table.set_index(
'datetime',
drop=False
).sort_index().loc[:datetime].groupby('code').apply(weights
).dropna()
|
[
"计算持仓成本 如果给的是日期,则返回当日开盘前的持仓\n\n Keyword Arguments:\n datetime {[type]} -- [description] (default: {None})\n\n Returns:\n [type] -- [description]\n "
] |
Please provide a description of the function:def hold_time(self, datetime=None):
def weights(x):
if sum(x['amount']) != 0:
return pd.Timestamp(self.datetime
) - pd.to_datetime(x.datetime.max())
else:
return np.nan
if datetime is None:
return self.history_table.set_index(
'datetime',
drop=False
).sort_index().groupby('code').apply(weights).dropna()
else:
return self.history_table.set_index(
'datetime',
drop=False
).sort_index().loc[:datetime].groupby('code').apply(weights
).dropna()
|
[
"持仓时间\n\n Keyword Arguments:\n datetime {[type]} -- [description] (default: {None})\n "
] |
Please provide a description of the function:def reset_assets(self, init_cash=None):
'reset_history/cash/'
self.sell_available = copy.deepcopy(self.init_hold)
self.history = []
self.init_cash = init_cash
self.cash = [self.init_cash]
self.cash_available = self.cash[-1]
|
[] |
Please provide a description of the function:def receive_simpledeal(
self,
code,
trade_price,
trade_amount,
trade_towards,
trade_time,
message=None,
order_id=None,
trade_id=None,
realorder_id=None
):
self.datetime = trade_time
if realorder_id in self.finishedOrderid:
pass
else:
self.finishedOrderid.append(realorder_id)
market_towards = 1 if trade_towards > 0 else -1
# value 合约价值 unit 合约乘数
if self.allow_margin:
frozen = self.market_preset.get_frozen(code) # 保证金率
unit = self.market_preset.get_unit(code) # 合约乘数
raw_trade_money = trade_price * trade_amount * market_towards # 总市值
value = raw_trade_money * unit # 合约总价值
trade_money = value * frozen # 交易保证金
else:
trade_money = trade_price * trade_amount * market_towards
raw_trade_money = trade_money
value = trade_money
unit = 1
frozen = 1
# 计算费用
# trade_price
if self.market_type == MARKET_TYPE.FUTURE_CN:
# 期货不收税
# 双边手续费 也没有最小手续费限制
commission_fee_preset = self.market_preset.get_code(code)
if trade_towards in [ORDER_DIRECTION.BUY_OPEN,
ORDER_DIRECTION.BUY_CLOSE,
ORDER_DIRECTION.SELL_CLOSE,
ORDER_DIRECTION.SELL_OPEN]:
commission_fee = commission_fee_preset['commission_coeff_pervol'] * trade_amount + \
commission_fee_preset['commission_coeff_peramount'] * \
abs(value)
elif trade_towards in [ORDER_DIRECTION.BUY_CLOSETODAY,
ORDER_DIRECTION.SELL_CLOSETODAY]:
commission_fee = commission_fee_preset['commission_coeff_today_pervol'] * trade_amount + \
commission_fee_preset['commission_coeff_today_peramount'] * \
abs(value)
tax_fee = 0 # 买入不收印花税
elif self.market_type == MARKET_TYPE.STOCK_CN:
commission_fee = self.commission_coeff * \
abs(trade_money)
commission_fee = 5 if commission_fee < 5 else commission_fee
if int(trade_towards) > 0:
tax_fee = 0 # 买入不收印花税
else:
tax_fee = self.tax_coeff * abs(trade_money)
# 结算交易
if self.cash[-1] > trade_money + commission_fee + tax_fee:
self.time_index_max.append(trade_time)
# TODO: 目前还不支持期货的锁仓
if self.allow_sellopen:
if trade_towards in [ORDER_DIRECTION.BUY_OPEN,
ORDER_DIRECTION.SELL_OPEN]:
# 开仓单占用现金 计算avg
# 初始化
if code in self.frozen.keys():
if trade_towards in self.frozen[code].keys():
pass
else:
self.frozen[code][str(trade_towards)] = {
'money': 0,
'amount': 0,
'avg_price': 0
}
else:
self.frozen[code] = {
str(ORDER_DIRECTION.BUY_OPEN): {
'money': 0,
'amount': 0,
'avg_price': 0
},
str(ORDER_DIRECTION.SELL_OPEN): {
'money': 0,
'amount': 0,
'avg_price': 0
}
}
self.frozen[code][str(trade_towards)]['money'] = (
(
self.frozen[code][str(trade_towards)]['money'] *
self.frozen[code][str(trade_towards)]['amount']
) + abs(trade_money)
) / (
self.frozen[code][str(trade_towards)]['amount'] +
trade_amount
)
self.frozen[code][str(trade_towards)]['avg_price'] = (
(
self.frozen[code][str(trade_towards)]['avg_price'] *
self.frozen[code][str(trade_towards)]['amount']
) + abs(raw_trade_money)
) / (
self.frozen[code][str(trade_towards)]['amount'] +
trade_amount
)
self.frozen[code][str(trade_towards)]['amount'] += trade_amount
self.cash.append(
self.cash[-1] - abs(trade_money) - commission_fee -
tax_fee
)
elif trade_towards in [ORDER_DIRECTION.BUY_CLOSE, ORDER_DIRECTION.BUY_CLOSETODAY,
ORDER_DIRECTION.SELL_CLOSE, ORDER_DIRECTION.SELL_CLOSETODAY]:
# 平仓单释放现金
# if trade_towards == ORDER_DIRECTION.BUY_CLOSE:
# 卖空开仓 平仓买入
# self.cash
if trade_towards in [ORDER_DIRECTION.BUY_CLOSE, ORDER_DIRECTION.BUY_CLOSETODAY]: # 买入平仓 之前是空开
# self.frozen[code][ORDER_DIRECTION.SELL_OPEN]['money'] -= trade_money
self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN)
]['amount'] -= trade_amount
frozen_part = self.frozen[code][
str(ORDER_DIRECTION.SELL_OPEN)]['money'] * trade_amount
# 账户的现金+ 冻结的的释放 + 买卖价差* 杠杆
self.cash.append(
self.cash[-1] + frozen_part +
(frozen_part - trade_money) / frozen -
commission_fee - tax_fee
)
if self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN)
]['amount'] == 0:
self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN)
]['money'] = 0
self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN)
]['avg_price'] = 0
elif trade_towards in [ORDER_DIRECTION.SELL_CLOSE, ORDER_DIRECTION.SELL_CLOSETODAY]: # 卖出平仓 之前是多开
# self.frozen[code][ORDER_DIRECTION.BUY_OPEN]['money'] -= trade_money
self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN)
]['amount'] -= trade_amount
frozen_part = self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN)
]['money'] * trade_amount
self.cash.append(
self.cash[-1] + frozen_part +
(abs(trade_money) - frozen_part) / frozen -
commission_fee - tax_fee
)
if self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN)
]['amount'] == 0:
self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN)
]['money'] = 0
self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN)
]['avg_price'] = 0
else: # 不允许卖空开仓的==> 股票
self.cash.append(
self.cash[-1] - trade_money - tax_fee - commission_fee
)
if self.allow_t0 or trade_towards == ORDER_DIRECTION.SELL:
self.sell_available[code] = self.sell_available.get(
code,
0
) + trade_amount * market_towards
self.buy_available = self.sell_available
self.cash_available = self.cash[-1]
frozen_money = abs(trade_money) if trade_towards in [
ORDER_DIRECTION.BUY_OPEN,
ORDER_DIRECTION.SELL_OPEN
] else 0
self.history.append(
[
str(trade_time),
code,
trade_price,
market_towards * trade_amount,
self.cash[-1],
order_id,
realorder_id,
trade_id,
self.account_cookie,
commission_fee,
tax_fee,
message,
frozen_money,
trade_towards
]
)
else:
print('ALERT MONEY NOT ENOUGH!!!')
print(self.cash[-1])
self.cash_available = self.cash[-1]
|
[
"快速撮合成交接口\n\n\n 此接口是一个直接可以成交的接口, 所以务必确保给出的信息是可以成交的\n\n 此接口涉及的是\n 1. 股票/期货的成交\n 2. 历史记录的增加\n 3. 现金/持仓/冻结资金的处理\n\n Arguments:\n code {[type]} -- [description]\n trade_price {[type]} -- [description]\n trade_amount {[type]} -- [description]\n trade_towards {[type]} -- [description]\n trade_time {[type]} -- [description]\n\n Keyword Arguments:\n message {[type]} -- [description] (default: {None})\n\n\n 2018/11/7 @yutiansut\n 修复一个bug: 在直接使用该快速撮合接口的时候, 期货卖出会扣减保证金, 买回来的时候应该反算利润\n\n 如 3800卖空 3700买回平仓 应为100利润\n @2018-12-31 保证金账户ok\n\n\n @2019/1/3 一些重要的意思\n frozen = self.market_preset.get_frozen(code) # 保证金率\n unit = self.market_preset.get_unit(code) # 合约乘数\n raw_trade_money = trade_price*trade_amount*market_towards # 总市值\n value = raw_trade_money * unit # 合约总价值\n trade_money = value * frozen # 交易保证金\n ",
"[summary]\n # frozen的计算\n # money 冻结的资金\n # amount 冻结的数量\n\n 2018-12-31 \n\n "
] |
Please provide a description of the function:def receive_deal(
self,
code: str,
trade_id: str,
order_id: str,
realorder_id: str,
trade_price: float,
trade_amount: int,
trade_towards: int,
trade_time: str,
message=None
):
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!receive deal')
trade_time = str(trade_time)
code = str(code)
trade_price = float(trade_price)
trade_towards = int(trade_towards)
realorder_id = str(realorder_id)
trade_id = str(trade_id)
trade_amount = int(trade_amount)
order_id = str(order_id)
market_towards = 1 if trade_towards > 0 else -1
self.receive_simpledeal(
code,
trade_price,
trade_amount,
trade_towards,
trade_time,
message=message,
order_id=order_id,
trade_id=trade_id,
realorder_id=realorder_id
)
|
[
"更新deal\n\n Arguments:\n code {str} -- [description]\n trade_id {str} -- [description]\n order_id {str} -- [description]\n realorder_id {str} -- [description]\n trade_price {float} -- [description]\n trade_amount {int} -- [description]\n trade_towards {int} -- [description]\n trade_time {str} -- [description]\n\n Returns:\n [type] -- [description]\n ",
"2019/01/03 直接使用快速撮合接口了\n 2333 这两个接口现在也没啥区别了....\n 太绝望了\n "
] |
Please provide a description of the function:def send_order(
self,
code=None,
amount=None,
time=None,
towards=None,
price=None,
money=None,
order_model=None,
amount_model=None,
*args,
**kwargs
):
wrong_reason = None
assert code is not None and time is not None and towards is not None and order_model is not None and amount_model is not None
# 🛠todo 移到Utils类中, 时间转换
# date 字符串 2011-10-11 长度10
date = str(time)[0:10] if len(str(time)) == 19 else str(time)
# time 字符串 20011-10-11 09:02:00 长度 19
time = str(time) if len(str(time)) == 19 else '{} 09:31:00'.format(
str(time)[0:10]
)
# 🛠todo 移到Utils类中, amount_to_money 成交量转金额
# BY_MONEY :: amount --钱 如10000元 因此 by_money里面 需要指定价格,来计算实际的股票数
# by_amount :: amount --股数 如10000股
if self.allow_margin:
amount = amount if amount_model is AMOUNT_MODEL.BY_AMOUNT else int(
money / (
self.market_preset.get_unit(code) *
self.market_preset.get_frozen(code) * price *
(1 + self.commission_coeff)
) / 100
) * 100
else:
amount = amount if amount_model is AMOUNT_MODEL.BY_AMOUNT else int(
money / (price * (1 + self.commission_coeff)) / 100
) * 100
# 🛠todo 移到Utils类中, money_to_amount 金额转成交量
if self.allow_margin:
money = amount * price * self.market_preset.get_unit(code)*self.market_preset.get_frozen(code) * \
(1+self.commission_coeff) if amount_model is AMOUNT_MODEL.BY_AMOUNT else money
else:
money = amount * price * \
(1+self.commission_coeff) if amount_model is AMOUNT_MODEL.BY_AMOUNT else money
# flag 判断买卖 数量和价格以及买卖方向是否正确
flag = False
assert (int(towards) != 0)
if int(towards) in [1, 2, 3]:
# 是买入的情况(包括买入.买开.买平)
if self.cash_available >= money:
if self.market_type == MARKET_TYPE.STOCK_CN: # 如果是股票 买入的时候有100股的最小限制
amount = int(amount / 100) * 100
self.cash_available -= money
flag = True
if self.running_environment == RUNNING_ENVIRONMENT.TZERO:
if abs(self.buy_available.get(code, 0)) >= amount:
flag = True
self.cash_available -= money
self.buy_available[code] -= amount
else:
flag = False
wrong_reason = 'T0交易买入超出限额'
if self.market_type == MARKET_TYPE.FUTURE_CN:
# 如果有负持仓-- 允许卖空的时候
if towards == 3: # 多平
_hold = self.sell_available.get(code, 0)
# 假设有负持仓:
# amount为下单数量 如 账户原先-3手 现在平1手
#left_amount = amount+_hold if _hold < 0 else amount
_money = abs(
float(amount * price * (1 + self.commission_coeff))
)
print(_hold)
if self.cash_available >= _money:
if _hold < 0:
self.cash_available -= _money
flag = True
else:
wrong_reason = '空单仓位不足'
else:
wrong_reason = '平多剩余资金不够'
if towards == 2:
self.cash_available -= money
flag = True
else:
wrong_reason = 'QAACCOUNT: 可用资金不足 cash_available {} code {} time {} amount {} towards {}'.format(
self.cash_available,
code,
time,
amount,
towards
)
elif int(towards) in [-1, -2, -3]:
# 是卖出的情况(包括卖出,卖出开仓allow_sellopen如果允许. 卖出平仓)
# print(self.sell_available[code])
_hold = self.sell_available.get(code, 0) # _hold 是你的持仓
# 如果你的hold> amount>0
# 持仓数量>卖出数量
if _hold >= amount:
self.sell_available[code] -= amount
# towards = ORDER_DIRECTION.SELL
flag = True
# 如果持仓数量<卖出数量
else:
# 如果是允许卖空开仓 实际计算时 先减去持仓(正持仓) 再计算 负持仓 就按原先的占用金额计算
if self.allow_sellopen and towards == -2:
if self.cash_available >= money: # 卖空的市值小于现金(有担保的卖空), 不允许裸卖空
# self.cash_available -= money
flag = True
else:
print('sellavailable', _hold)
print('amount', amount)
print('aqureMoney', money)
print('cash', self.cash_available)
wrong_reason = "卖空资金不足/不允许裸卖空"
else:
wrong_reason = "卖出仓位不足"
if flag and (amount > 0):
_order = QA_Order(
user_cookie=self.user_cookie,
strategy=self.strategy_name,
frequence=self.frequence,
account_cookie=self.account_cookie,
code=code,
market_type=self.market_type,
date=date,
datetime=time,
sending_time=time,
callback=self.receive_deal,
amount=amount,
price=price,
order_model=order_model,
towards=towards,
money=money,
broker=self.broker,
amount_model=amount_model,
commission_coeff=self.commission_coeff,
tax_coeff=self.tax_coeff,
*args,
**kwargs
) # init
# 历史委托order状态存储, 保存到 QA_Order 对象中的队列中
self.datetime = time
self.orders.insert_order(_order)
return _order
else:
print(
'ERROR : CODE {} TIME {} AMOUNT {} TOWARDS {}'.format(
code,
time,
amount,
towards
)
)
print(wrong_reason)
return False
|
[
"\n ATTENTION CHANGELOG 1.0.28\n 修改了Account的send_order方法, 区分按数量下单和按金额下单两种方式\n\n - AMOUNT_MODEL.BY_PRICE ==> AMOUNT_MODEL.BY_MONEY # 按金额下单\n - AMOUNT_MODEL.BY_AMOUNT # 按数量下单\n\n 在按金额下单的时候,应给予 money参数\n 在按数量下单的时候,应给予 amount参数\n\n python code:\n Account=QA.QA_Account()\n\n Order_bymoney=Account.send_order(code='000001',\n price=11,\n money=0.3*Account.cash_available,\n time='2018-05-09',\n towards=QA.ORDER_DIRECTION.BUY,\n order_model=QA.ORDER_MODEL.MARKET,\n amount_model=QA.AMOUNT_MODEL.BY_MONEY\n )\n\n Order_byamount=Account.send_order(code='000001',\n price=11,\n amount=100,\n time='2018-05-09',\n towards=QA.ORDER_DIRECTION.BUY,\n order_model=QA.ORDER_MODEL.MARKET,\n amount_model=QA.AMOUNT_MODEL.BY_AMOUNT\n )\n\n :param code: 证券代码\n :param amount: 买卖 数量多数股\n :param time: Timestamp 对象 下单时间\n :param towards: int , towards>0 买入 towards<0 卖出\n :param price: 买入,卖出 标的证券的价格\n :param money: 买卖 价格\n :param order_model: 类型 QA.ORDER_MODE\n :param amount_model:类型 QA.AMOUNT_MODEL\n :return: QA_Order | False\n\n @2018/12/23\n send_order 是QA的标准返回, 如需对接其他接口, 只需要对于QA_Order做适配即可\n\n\n @2018/12/27\n 在判断账户为期货账户(及 允许双向交易)\n\n @2018/12/30 保证金账户的修改\n 1. 保证金账户冻结的金额\n 2. 保证金账户的结算\n 3. 保证金账户的判断\n\n "
] |
Please provide a description of the function:def close_positions_order(self):
order_list = []
time = '{} 15:00:00'.format(self.date)
if self.running_environment == RUNNING_ENVIRONMENT.TZERO:
for code, amount in self.hold_available.iteritems():
order = False
if amount < 0:
# 先卖出的单子 买平
order = self.send_order(
code=code,
price=0,
amount=abs(amount),
time=time,
towards=ORDER_DIRECTION.BUY,
order_model=ORDER_MODEL.CLOSE,
amount_model=AMOUNT_MODEL.BY_AMOUNT,
)
elif amount > 0:
# 先买入的单子, 卖平
order = self.send_order(
code=code,
price=0,
amount=abs(amount),
time=time,
towards=ORDER_DIRECTION.SELL,
order_model=ORDER_MODEL.CLOSE,
amount_model=AMOUNT_MODEL.BY_AMOUNT
)
if order:
order_list.append(order)
return order_list
else:
raise RuntimeError(
'QAACCOUNT with {} environments cannot use this methods'.format(
self.running_environment
)
)
|
[
"平仓单\n\n Raises:\n RuntimeError -- if ACCOUNT.RUNNING_ENVIRONMENT is NOT TZERO\n\n Returns:\n list -- list with order\n "
] |
Please provide a description of the function:def settle(self, settle_data = None):
#print('FROM QUANTAXIS QA_ACCOUNT: account settle')
if self.running_environment == RUNNING_ENVIRONMENT.TZERO and self.hold_available.sum(
) != 0:
raise RuntimeError(
'QAACCOUNT: 该T0账户未当日仓位,请平仓 {}'.format(
self.hold_available.to_dict()
)
)
if self.market_type == MARKET_TYPE.FUTURE_CN:
# 增加逐日盯市制度
self.static_balance['frozen'].append(
sum(
[
rx['money'] * rx['amount']
for var in self.frozen.values()
for rx in var.values()
]
)
)
self.static_balance['cash'].append(self.cash[-1])
self.static_balance['hold'].append(self.hold.to_dict())
self.static_balance['date'].append(self.date)
self.static_balance['static_assets'].append(
self.static_balance['cash'][-1] +
self.static_balance['frozen'][-1]
)
self.sell_available = self.hold
self.buy_available = self.hold
self.cash_available = self.cash[-1]
self.datetime = '{} 09:30:00'.format(
QA_util_get_next_day(self.date)
) if self.date is not None else None
|
[
"\n 股票/期货的日结算\n\n 股票的结算: 结转股票可卖额度\n T0的结算: 结转T0的额度\n\n 期货的结算: 结转静态资金\n\n\n @2019-02-25 yutiansut\n hold 在下面要进行大变化:\n\n 从 只计算数量 ==> 数量+成本+买入价 (携带更多信息)\n\n 基于history去计算hold ==> last_settle+ today_pos_change\n\n ",
"静态权益的结算\n\n 只关心开仓价/ 不做盯市制度\n\n 动态权益的结算需要关心\n\n "
] |
Please provide a description of the function:def on_bar(self, event):
'''
策略事件
:param event:
:return:
'''
'while updating the market data'
print(
"on_bar account {} ".format(self.account_cookie),
event.market_data.data
)
print(event.send_order)
try:
for code in event.market_data.code:
if self.sell_available.get(code, 0) > 0:
print('可以卖出 {}'.format(self._currenttime))
event.send_order(
account_cookie=self.account_cookie,
amount=self.sell_available[code],
amount_model=AMOUNT_MODEL.BY_AMOUNT,
time=self.current_time,
code=code,
price=0,
order_model=ORDER_MODEL.MARKET,
towards=ORDER_DIRECTION.SELL,
market_type=self.market_type,
frequence=self.frequence,
broker_name=self.broker
)
else:
print('{} 无仓位, 买入{}'.format(self._currenttime, code))
event.send_order(
account_cookie=self.account_cookie,
amount=100,
amount_model=AMOUNT_MODEL.BY_AMOUNT,
time=self.current_time,
code=code,
price=0,
order_model=ORDER_MODEL.MARKET,
towards=ORDER_DIRECTION.BUY,
market_type=self.market_type,
frequence=self.frequence,
broker_name=self.broker
)
except Exception as e:
print(e)
|
[] |
Please provide a description of the function:def from_message(self, message):
self.account_cookie = message.get('account_cookie', None)
self.portfolio_cookie = message.get('portfolio_cookie', None)
self.user_cookie = message.get('user_cookie', None)
self.broker = message.get('broker', None)
self.market_type = message.get('market_type', None)
self.strategy_name = message.get('strategy_name', None)
self._currenttime = message.get('current_time', None)
self.allow_sellopen = message.get('allow_sellopen', False)
self.allow_margin = message.get('allow_margin', False)
self.allow_t0 = message.get('allow_t0', False)
self.margin_level = message.get('margin_level', False)
self.frequence = message.get('frequence', FREQUENCE.FIFTEEN_MIN) #默认15min
self.init_cash = message.get(
'init_cash',
message.get('init_assets',
1000000)
) # 兼容修改
self.init_hold = pd.Series(message.get('init_hold', {}), name='amount')
self.init_hold.index.name = 'code'
self.commission_coeff = message.get('commission_coeff', 0.00015)
self.tax_coeff = message.get('tax_coeff', 0.0015)
self.history = message['history']
self.cash = message['cash']
self.time_index_max = message['trade_index']
self.running_time = message.get('running_time', None)
self.quantaxis_version = message.get('quantaxis_version', None)
self.running_environment = message.get(
'running_environment',
RUNNING_ENVIRONMENT.BACKETEST
)
self.frozen = message.get('frozen', {})
self.finishedOrderid = message.get('finished_id', [])
self.settle()
return self
|
[
"resume the account from standard message\n 这个是从数据库恢复账户时需要的"
] |
Please provide a description of the function:def from_otgdict(self, message):
self.allow_margin = True
self.allow_sellopen = True
self.allow_t0 = True
self.account_cookie = message['accounts']['user_id']
# 可用资金
self.cash_available = message['accounts']['available']
self.balance = message['accounts']['balance']
# 都是在结算的时候计算的
# 昨日权益/静态权益 ==> 这两个是一样的
self.static_balance = message['accounts']['static_balance']
self.pre_balance = message['accounts']['pre_balance']
# 平仓盈亏
self.close_profit = message['accounts']['close_profit']
# 持仓盈亏
self.position_profit = message['accounts']['position_profit']
# 动态权益
self.float_profit = message['accounts']['float_profit']
# 占用保证金
self.margin = message['accounts']['margin']
self.commission = message['accounts']['commission']
|
[
"[summary]\n balance = static_balance + float_profit\n\n\n \"currency\": \"\", # \"CNY\" (币种)\n \"pre_balance\": float(\"nan\"), # 9912934.78 (昨日账户权益)\n \"static_balance\": float(\"nan\"), # (静态权益)\n \"balance\": float(\"nan\"), # 9963216.55 (账户权益)\n \"available\": float(\"nan\"), # 9480176.15 (可用资金)\n \"float_profit\": float(\"nan\"), # 8910.0 (浮动盈亏)\n \"position_profit\": float(\"nan\"), # 1120.0(持仓盈亏)\n \"close_profit\": float(\"nan\"), # -11120.0 (本交易日内平仓盈亏)\n \"frozen_margin\": float(\"nan\"), # 0.0(冻结保证金)\n \"margin\": float(\"nan\"), # 11232.23 (保证金占用)\n \"frozen_commission\": float(\"nan\"), # 0.0 (冻结手续费)\n \"commission\": float(\"nan\"), # 123.0 (本交易日内交纳的手续费)\n \"frozen_premium\": float(\"nan\"), # 0.0 (冻结权利金)\n \"premium\": float(\"nan\"), # 0.0 (本交易日内交纳的权利金)\n \"deposit\": float(\"nan\"), # 1234.0 (本交易日内的入金金额)\n \"withdraw\": float(\"nan\"), # 890.0 (本交易日内的出金金额)\n \"risk_ratio\": float(\"nan\"), # 0.048482375 (风险度)\n "
] |
Please provide a description of the function:def table(self):
return pd.DataFrame([
self.message,
]).set_index(
'account_cookie',
drop=False
).T
|
[
"\n 打印出account的内容\n "
] |
Please provide a description of the function:def run(self, event):
'''
这个方法是被 QA_ThreadEngine 处理队列时候调用的, QA_Task 中 do 方法调用 run (在其它线程中)
'QA_WORKER method 重载'
:param event: 事件类型 QA_Event
:return:
'''
'QA_WORKER method'
if event.event_type is ACCOUNT_EVENT.SETTLE:
print('account_settle')
self.settle()
# elif event.event_type is ACCOUNT_EVENT.UPDATE:
# self.receive_deal(event.message)
elif event.event_type is ACCOUNT_EVENT.MAKE_ORDER:
data = self.send_order(
code=event.code,
amount=event.amount,
time=event.time,
amount_model=event.amount_model,
towards=event.towards,
price=event.price,
order_model=event.order_model
)
if event.callback:
event.callback(data)
else:
return data
elif event.event_type is ENGINE_EVENT.UPCOMING_DATA:
self._currenttime = event.market_data.datetime[0]
if self._market_data is None:
self._market_data = event.market_data
else:
self._market_data = self._market_data + event.market_data
self.on_bar(event)
if event.callback:
event.callback(event)
|
[
"generate order\n if callback callback the order\n if not return back the order\n ",
"update the market_data\n 1. update the inside market_data struct\n 2. tell the on_bar methods\n\n # 这样有点慢\n\n\n "
] |
Please provide a description of the function:def sync_account(self, sync_message):
self.init_hold = sync_message['hold_available']
self.init_cash = sync_message['cash_available']
self.sell_available = copy.deepcopy(self.init_hold)
self.history = []
self.cash = [self.init_cash]
self.cash_available = self.cash[-1]
|
[
"同步账户\n\n Arguments:\n sync_message {[type]} -- [description]\n "
] |
Please provide a description of the function:def change_cash(self, money):
res = self.cash[-1] + money
if res >= 0:
# 高危操作
self.cash[-1] = res
|
[
"\n 外部操作|高危|\n "
] |
Please provide a description of the function:def get_history(self, start, end):
return self.history_table.set_index(
'datetime',
drop=False
).loc[slice(pd.Timestamp(start),
pd.Timestamp(end))]
|
[
"返回历史成交\n\n Arguments:\n start {str} -- [description]\n end {str]} -- [description]\n "
] |
Please provide a description of the function:def QA_SU_save_order(orderlist, client=DATABASE):
if isinstance(orderlist, pd.DataFrame):
collection = client.order
collection.create_index(
[('account_cookie',
ASCENDING),
('realorder_id',
ASCENDING)],
unique=True
)
try:
orderlist = QA_util_to_json_from_pandas(orderlist.reset_index())
for item in orderlist:
if item:
#item['date']= QA_util_get_order_day()
collection.update_one(
{
'account_cookie': item.get('account_cookie'),
'realorder_id': item.get('realorder_id')
},
{'$set': item},
upsert=True
)
except Exception as e:
print(e)
pass
|
[
"存储order_handler的order_status\n\n Arguments:\n orderlist {[dataframe]} -- [description]\n\n Keyword Arguments:\n client {[type]} -- [description] (default: {DATABASE})\n "
] |
Please provide a description of the function:def QA_SU_save_deal(dealist, client=DATABASE):
if isinstance(dealist, pd.DataFrame):
collection = client.deal
collection.create_index(
[('account_cookie',
ASCENDING),
('trade_id',
ASCENDING)],
unique=True
)
try:
dealist = QA_util_to_json_from_pandas(dealist.reset_index())
collection.insert_many(dealist, ordered=False)
except Exception as e:
pass
|
[
"存储order_handler的deal_status\n\n Arguments:\n dealist {[dataframe]} -- [description]\n\n Keyword Arguments:\n client {[type]} -- [description] (default: {DATABASE})\n "
] |
Please provide a description of the function:def QA_SU_save_order_queue(order_queue, client=DATABASE):
collection = client.order_queue
collection.create_index(
[('account_cookie',
ASCENDING),
('order_id',
ASCENDING)],
unique=True
)
for order in order_queue.values():
order_json = order.to_dict()
try:
collection.update_one(
{
'account_cookie': order_json.get('account_cookie'),
'order_id': order_json.get('order_id')
},
{'$set': order_json},
upsert=True
)
except Exception as e:
print(e)
|
[
"增量存储order_queue\n\n Arguments:\n order_queue {[type]} -- [description]\n\n Keyword Arguments:\n client {[type]} -- [description] (default: {DATABASE})\n "
] |
Please provide a description of the function:def SMA(Series, N, M=1):
ret = []
i = 1
length = len(Series)
# 跳过X中前面几个 nan 值
while i < length:
if np.isnan(Series.iloc[i]):
i += 1
else:
break
preY = Series.iloc[i] # Y'
ret.append(preY)
while i < length:
Y = (M * Series.iloc[i] + (N - M) * preY) / float(N)
ret.append(Y)
preY = Y
i += 1
return pd.Series(ret, index=Series.tail(len(ret)).index)
|
[
"\n 威廉SMA算法\n\n 本次修正主要是对于返回值的优化,现在的返回值会带上原先输入的索引index\n 2018/5/3\n @yutiansut\n "
] |
Please provide a description of the function:def CROSS(A, B):
var = np.where(A < B, 1, 0)
return (pd.Series(var, index=A.index).diff() < 0).apply(int)
|
[
"A<B then A>B A上穿B B下穿A\n\n Arguments:\n A {[type]} -- [description]\n B {[type]} -- [description]\n\n Returns:\n [type] -- [description]\n "
] |
Please provide a description of the function:def COUNT(COND, N):
return pd.Series(np.where(COND, 1, 0), index=COND.index).rolling(N).sum()
|
[
"\n 2018/05/23 修改\n\n 参考https://github.com/QUANTAXIS/QUANTAXIS/issues/429\n\n 现在返回的是series\n "
] |
Please provide a description of the function:def LAST(COND, N1, N2):
N2 = 1 if N2 == 0 else N2
assert N2 > 0
assert N1 > N2
return COND.iloc[-N1:-N2].all()
|
[
"表达持续性\n 从前N1日到前N2日一直满足COND条件\n\n Arguments:\n COND {[type]} -- [description]\n N1 {[type]} -- [description]\n N2 {[type]} -- [description]\n "
] |
Please provide a description of the function:def AVEDEV(Series, N):
return Series.rolling(N).apply(lambda x: (np.abs(x - x.mean())).mean(), raw=True)
|
[
"\n 平均绝对偏差 mean absolute deviation\n 修正: 2018-05-25 \n\n 之前用mad的计算模式依然返回的是单值\n "
] |
Please provide a description of the function:def MACD(Series, FAST, SLOW, MID):
EMAFAST = EMA(Series, FAST)
EMASLOW = EMA(Series, SLOW)
DIFF = EMAFAST - EMASLOW
DEA = EMA(DIFF, MID)
MACD = (DIFF - DEA) * 2
DICT = {'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD}
VAR = pd.DataFrame(DICT)
return VAR
|
[
"macd指标 仅适用于Series\n 对于DATAFRAME的应用请使用QA_indicator_macd\n "
] |
Please provide a description of the function:def BBI(Series, N1, N2, N3, N4):
'多空指标'
bbi = (MA(Series, N1) + MA(Series, N2) +
MA(Series, N3) + MA(Series, N4)) / 4
DICT = {'BBI': bbi}
VAR = pd.DataFrame(DICT)
return VAR
|
[] |
Please provide a description of the function:def BARLAST(cond, yes=True):
if isinstance(cond.index, pd.MultiIndex):
return len(cond)-cond.index.levels[0].tolist().index(cond[cond != yes].index[-1][0])-1
elif isinstance(cond.index, pd.DatetimeIndex):
return len(cond)-cond.index.tolist().index(cond[cond != yes].index[-1])-1
|
[
"支持MultiIndex的cond和DateTimeIndex的cond\n 条件成立 yes= True 或者 yes=1 根据不同的指标自己定\n\n Arguments:\n cond {[type]} -- [description]\n "
] |
Please provide a description of the function:def get_today_all(output='pd'):
data = []
today = str(datetime.date.today())
codes = QA_fetch_get_stock_list('stock').code.tolist()
bestip = select_best_ip()['stock']
for code in codes:
try:
l = QA_fetch_get_stock_day(
code, today, today, '00', ip=bestip)
except:
bestip = select_best_ip()['stock']
l = QA_fetch_get_stock_day(
code, today, today, '00', ip=bestip)
if l is not None:
data.append(l)
res = pd.concat(data)
if output in ['pd']:
return res
elif output in ['QAD']:
return QA_DataStruct_Stock_day(res.set_index(['date', 'code'], drop=False))
|
[
"today all\n\n Returns:\n [type] -- [description]\n "
] |
Please provide a description of the function:def QA_SU_save_stock_day(client=DATABASE, ui_log=None, ui_progress=None):
'''
save stock_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用
'''
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
coll_stock_day = client.stock_day
coll_stock_day.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
# saveing result
def __gen_param(stock_list, coll_stock_day, ip_list=[]):
results = []
count = len(ip_list)
total = len(stock_list)
for item in range(len(stock_list)):
try:
code = stock_list[item]
QA_util_log_info(
'##JOB01 Now Saving STOCK_DAY==== {}'.format(str(code)),
ui_log
)
# 首选查找数据库 是否 有 这个代码的数据
search_cond = {'code': str(code)[0:6]}
ref = coll_stock_day.find(search_cond)
end_date = str(now_time())[0:10]
ref_count = coll_stock_day.count_documents(search_cond)
# 当前数据库已经包含了这个代码的数据, 继续增量更新
# 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现
if ref_count > 0:
# 接着上次获取的日期继续更新
start_date = ref[ref_count - 1]['date']
# print("ref[ref.count() - 1]['date'] {} {}".format(ref.count(), coll_stock_day.count_documents({'code': str(code)[0:6]})))
else:
# 当前数据库中没有这个代码的股票数据, 从1990-01-01 开始下载所有的数据
start_date = '1990-01-01'
QA_util_log_info(
'UPDATE_STOCK_DAY \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log
)
if start_date != end_date:
# 更新过的,不更新
results.extend([(code, start_date, end_date, '00', 'day', ip_list[item % count]['ip'],
ip_list[item % count]['port'], item, total, ui_log, ui_progress)])
except Exception as error0:
print('Exception:{}'.format(error0))
err.append(code)
return results
ips = get_ip_list_by_multi_process_ping(stock_ip_list, _type='stock')[:cpu_count() * 2 + 1]
param = __gen_param(stock_list, coll_stock_day, ips)
ps = QA_SU_save_stock_day_parallelism(processes=cpu_count() if len(ips) >= cpu_count() else len(ips),
client=client, ui_log=ui_log)
ps.add(do_saving_work, param)
ps.run()
if len(err) < 1:
QA_util_log_info('SUCCESS save stock day ^_^', ui_log)
else:
QA_util_log_info('ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log)
|
[] |
Please provide a description of the function:def QA_user_sign_in(username, password):
#user = QA_User(name= name, password=password)
cursor = DATABASE.user.find_one(
{'username': username, 'password': password})
if cursor is None:
QA_util_log_info('SOMETHING WRONG')
return False
else:
return True
|
[
"用户登陆\n 不使用 QAUSER库\n 只返回 TRUE/FALSE\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.