id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3266262
|
<reponame>niyiwei/python-study
def countWords(fileName):
try:
with open(fileName,"r") as fObj:
contents = fObj.read()
except FileNotFoundError:
#pass语句,可以在代码快中使用它来让Python什么都不做
pass
# msg = "Sorry, the file "+ fileName + " does not exist.";
# print(msg);
else:
# 计算文件大致包含多少个单词
words = contents.split()
numWords = len(words)
print("The file "+ fileName + " has about "+ str(numWords)+" words.")
# fileName = "alice.txt"
# countWords(fileName)
fileNames = ['alice.txt', 'siddhartha.txt', 'moby_dick.txt', 'little_woman.txt']
for file in fileNames:
countWords(file)
|
StarcoderdataPython
|
11821
|
<reponame>Lisa-pa/SAMAE
"""Standard test images.
"""
import os
from skimage.io import imread
data_dir = os.path.abspath(os.path.dirname(__file__))
__all__ = ['data_dir', 'circle', 'skmuscimg']
def _load(f, as_gray=False):
"""Load an image file located in the data directory.
Parameters
----------
f : string
File name.
as_gray : bool, optional
Whether to convert the image to grayscale.
Returns
-------
img : ndarray
Image loaded from ``data_dir``.
"""
# importing io is quite slow since it scans all the backends
# we lazy import it here
return imread(f, as_gray=as_gray)
def circle():
"""Synthetic image of a circle
Returns
-------
circle : (xdim, ydim) bool ndarray
Circle image.
"""
return _load(os.path.join(data_dir, "circle.bmp"))
def skmuscimg():
"""Cropped US image of a musculoskeletal muscle
"""
return _load(os.path.join(data_dir, "skmuscle.jpg"))
def panoimg():
"""Panoramic US image of a musculoskeletal muscle
"""
return _load(os.path.join(data_dir, "panoramic_echo.jpg"))
def simpleimg():
"""Simple US image of a musculoskeletal muscle
"""
return _load(os.path.join(data_dir, "simple_echo.jpg"))
def downloadFromDropbox(tok, path2file):
"""Download an image from a Dropbox account.
Args:
tok (string): access token that connects to the wanted
app in Dropbox account
path2file (string): Path of the file to download, in the
app corresponding to the above token.
Output:
image (numpy.ndarray): 3-channel color image, with
coefficients' type == uint8
Example:
1) Register a new app in the App Console of your Dropbox
account. Set up parameters as you want.
2) In Dropbox>Applications>MyApp, import your data.
3) In the settings page of MyApp, generate a token and copy it.
It should look like a random string of letters and figures,
as below. (!!!This access token can be used to access your
account via the API. Don’t share your access token with anyone!!!)
> token = '<KEY>' //token not available anymore
> path = '/cropped_20181002_153426_image.jpg'
> dt = downloadFromDropbox(token, path);
"""
import dropbox
import numpy as np
import cv2
dbx = dropbox.Dropbox(tok)
try:
metadata, file = dbx.files_download(path2file)
except dropbox.exceptions.HttpError as err:
print('*** HTTP error', err)
return None
data = np.frombuffer(file.content, np.uint8)
image = cv2.imdecode(data, 1)
return image
|
StarcoderdataPython
|
146019
|
# Write your frequency_dictionary function here:
def frequency_dictionary(words):
freqs = {}
for word in words:
if word not in freqs:
freqs[word] = 0
freqs[word] += 1
return freqs
# Uncomment these function calls to test your function:
print(frequency_dictionary(["apple", "apple", "cat", 1]))
# should print {"apple":2, "cat":1, 1:1}
print(frequency_dictionary([0,0,0,0,0]))
# should print {0:5}
|
StarcoderdataPython
|
1785538
|
<reponame>alitariqbsc06/mmclassification
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='MultiLabelLinearClsHead',
num_classes=3,
in_channels=2048,
loss=#[
dict(type='CrossEntropyLoss', loss_weight=1.0, use_sigmoid=True),
# dict(type='LabelSmoothLoss',
# loss_weight=1.0,
# label_smooth_val=0.1,
# num_classes=3,mode='classy_vision')
#],
#topk=(1, 5),
),
train_cfg=dict(
augments=[
dict(type='BatchMixup', alpha=0.2, num_classes=3,prob=.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=3, prob=.5),
]),
)
|
StarcoderdataPython
|
3314628
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ---
# Flask app factory from:
# https://github.com/mattupstate/overholt/blob/master/overholt/factory.py
# ---
#
# MIT License
#
# Copyright (C) 2013 by <NAME>
# Copyright (C) 2014 by <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from flask import Flask
from os import environ
from .helpers import project_name, register_blueprints, slugify
def environment():
default_env = 'dev'
env_variable = '_'.join([slugify(project_name, '_').upper(), 'ENV'])
return environ.get(env_variable, default_env).lower()
def create_app(package_name, package_path, settings_override=None):
"""Returns a :class:`Flask` application instance configured with common
functionality for the application platform.
:param package_name: application package name
:param package_path: application package path
:param settings_override: a dictionary of settings to override
"""
app = Flask(package_name, instance_relative_config=True)
def env_settings(module):
return '.'.join([module, environment().capitalize()])
app.config.from_object(env_settings('app.settings'))
try: app.config.from_object(env_settings('instance.settings'))
except ImportError: pass
app.config.from_object(settings_override)
register_blueprints(app, package_name, package_path)
return app
|
StarcoderdataPython
|
114925
|
<gh_stars>0
import pandas as pd
import os
from scipy.sparse import csr_matrix
from scipy.sparse import save_npz
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sklearn.externals import joblib
class initalizer:
def __init__(self, items_csv_path="../data/new_movies.csv", ratings_csv_path="../data/ratings.csv"):
self.path_movies = items_csv_path
self.path_ratings = ratings_csv_path
self.item_user_matrix_sparse, self.hashmap = self._prep_data()
self.model = NearestNeighbors(n_neighbors=20, algorithm='brute', metric='cosine')
self.model.fit(self.item_user_matrix_sparse)
def _prep_data(self):
#reading the data
path = os.path.join(self.path_movies)
usecols = ['movieId', 'tmdbId', 'title']
dtype = {"movieId": 'int32', 'tmdbId': 'int32', "title": "str"}
df_items = pd.read_csv(path, usecols=usecols, dtype=dtype)
path = os.path.join(self.path_ratings)
usecols = ['userId', 'movieId', 'rating']
dtype = {"userId": 'int32', 'movieId': 'int32', "rating": "float32"}
df_ratings = pd.read_csv(path, usecols=usecols, dtype=dtype)
#pivot and create item-user matrix
item_user_matrix = df_ratings.pivot(index='movieId', columns='userId', values='rating')
#filling na's with 0's
item_user_matrix = item_user_matrix.fillna(0)
#transform the matrix into a scipy sparse matrix to minimize the nagitive impact
# on calculation performances
item_user_matrix_sparse = csr_matrix(item_user_matrix.values)
#create mapper from item, the id to index of the movie in the item_user matrix
hashmap = {
item: i for i, item in
enumerate(list(df_items.set_index('movieId').loc[item_user_matrix.index].tmdbId))
}
print(list(df_items.set_index('movieId').loc[item_user_matrix.index].tmdbId))
print(df_items.set_index('movieId').loc[item_user_matrix.index].tmdbId)
print(hashmap)
print(hashmap.keys())
print(hashmap[862.0])
return item_user_matrix_sparse, hashmap
def _save_data(self):
"""
Save data to the disk
1. hashmap
2. scipy item-user sparse matrix
3. trained model
:return: none
"""
np.save('./hashmap.npy', self.hashmap)
save_npz("./matrix.npz", self.item_user_matrix_sparse)
joblib.dump(self.model, './model.joblib')
|
StarcoderdataPython
|
3335537
|
<gh_stars>0
# Bot written to play the game using what seems to be the best strategy according to the authors.
import MLModifiedSpaceShooter as game
import numpy as np
def playGame():
game_state = game.GameState()
todo = 0
counter = 0
while True:
if counter > 12:
counter = 0
if todo == 0:
todo = 1
else:
todo = 0
counter = counter + 1
# shoot
action = np.zeros(4)
action[2] = 1
game_state.frame_step(action)
# shoot
action = np.zeros(4)
action[2] = 1
game_state.frame_step(action)
# move
action = np.zeros(4)
action[todo] = 1
game_state.frame_step(action)
# shoot
action = np.zeros(4)
action[2] = 1
game_state.frame_step(action)
# shoot
action = np.zeros(4)
action[2] = 1
game_state.frame_step(action)
def main():
playGame()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1796046
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc.
# http://code.google.com/p/protobuf/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This implementation is modified from google's original Protobuf implementation.
The original author is: <EMAIL> (<NAME>).
Modified by <EMAIL>(onesuper).
"""
import sys
import array
import struct
from . import errors
from . import wire_format
class OutputStream(object):
"""Contains all logic for writing bits, and ToString() to get the result."""
def __init__(self):
self._buffer = array.array('B')
if sys.version_info < (3, 3):
def append_raw_bytes(self, raw_bytes):
"""Appends raw_bytes to our internal buffer."""
self._buffer.fromstring(raw_bytes)
else:
def append_raw_bytes(self, raw_bytes):
"""Appends raw_bytes to our internal buffer."""
self._buffer.frombytes(raw_bytes)
def append_little_endian32(self, unsigned_value):
"""Appends an unsigned 32-bit integer to the internal buffer,
in little-endian byte order.
"""
if not 0 <= unsigned_value <= wire_format.UINT32_MAX:
raise errors.EncodeError(
'Unsigned 32-bit out of range: %d' % unsigned_value)
self.append_raw_bytes(struct.pack(
wire_format.FORMAT_UINT32_LITTLE_ENDIAN, unsigned_value))
def append_little_endian64(self, unsigned_value):
"""Appends an unsigned 64-bit integer to the internal buffer,
in little-endian byte order.
"""
if not 0 <= unsigned_value <= wire_format.UINT64_MAX:
raise errors.EncodeError(
'Unsigned 64-bit out of range: %d' % unsigned_value)
self.append_raw_bytes(struct.pack(
wire_format.FORMAT_UINT64_LITTLE_ENDIAN, unsigned_value))
def append_varint32(self, value):
"""Appends a signed 32-bit integer to the internal buffer,
encoded as a varint. (Note that a negative varint32 will
always require 10 bytes of space.)
"""
if not wire_format.INT32_MIN <= value <= wire_format.INT32_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
self.append_varint64(value)
def append_var_uint32(self, value):
"""Appends an unsigned 32-bit integer to the internal buffer,
encoded as a varint.
"""
if not 0 <= value <= wire_format.UINT32_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
self.append_var_uint64(value)
def append_varint64(self, value):
"""Appends a signed 64-bit integer to the internal buffer,
encoded as a varint.
"""
if not wire_format.INT64_MIN <= value <= wire_format.INT64_MAX:
raise errors.EncodeError('Value out of range: %d' % value)
if value < 0:
value += (1 << 64)
self.append_var_uint64(value)
def append_var_uint64(self, unsigned_value):
"""Appends an unsigned 64-bit integer to the internal buffer,
encoded as a varint.
"""
if not 0 <= unsigned_value <= wire_format.UINT64_MAX:
raise errors.EncodeError('Value out of range: %d' % unsigned_value)
while True:
bits = unsigned_value & 0x7f
unsigned_value >>= 7
if unsigned_value:
bits |= 0x80
self._buffer.append(bits)
if not unsigned_value:
break
if sys.version_info < (3, 3):
def tostring(self):
"""Returns a string containing the bytes in our internal buffer."""
return self._buffer.tostring()
else:
def tostring(self):
"""Returns a string containing the bytes in our internal buffer."""
return self._buffer.tobytes()
def __len__(self):
return len(self._buffer)
|
StarcoderdataPython
|
3350939
|
import torch.nn.functional as F
from collections import OrderedDict
import numpy as np
class InputPadder:
"""Pads images such that dimensions are divisible by 8 , from RAFT."""
def __init__(self, dims, mode='sintel'):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
if mode == 'sintel':
self._pad = [
pad_wd // 2, pad_wd - pad_wd // 2, pad_ht // 2,
pad_ht - pad_ht // 2
]
else:
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
def pad(self, *inputs):
return [F.pad(x, self._pad, mode='replicate') for x in inputs]
def unpad(self, x):
ht, wd = x.shape[-2:]
c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
return x[..., c[0]:c[1], c[2]:c[3]]
def fill_order_keys(key, fill_value='_model.', fill_position=7):
"""fill order_dict keys in checkpoint, by Hao."""
return key[0:fill_position] + fill_value + key[fill_position:]
def fix_order_keys(key, delete_value=6):
"""fix order_dict keys in checkpoint, by Hao."""
return key[0:delete_value] + key[13:]
def fix_read_order_keys(key, start_value=7):
"""fix reading restored ckpt order_dict keys, by Hao."""
return key[start_value:]
# CARLA semantic labels
camvid_colors = OrderedDict([
("Unlabeled", np.array([0, 0, 0], dtype=np.uint8)),
("Building", np.array([70, 70, 70], dtype=np.uint8)),
("Fence", np.array([100, 40, 40], dtype=np.uint8)),
("Other", np.array([55, 90, 80], dtype=np.uint8)),
("Pedestrian", np.array([220, 20, 60], dtype=np.uint8)),
("Pole", np.array([153, 153, 153], dtype=np.uint8)),
("RoadLine", np.array([157, 234, 50], dtype=np.uint8)),
("Road", np.array([128, 64, 128], dtype=np.uint8)),
("SideWalk", np.array([244, 35, 232], dtype=np.uint8)),
("Vegetation", np.array([107, 142, 35], dtype=np.uint8)),
("Vehicles", np.array([0, 0, 142], dtype=np.uint8)),
("Wall", np.array([102, 102, 156], dtype=np.uint8)),
("TrafficSign", np.array([220, 220, 0], dtype=np.uint8)),
("Sky", np.array([70, 130, 180], dtype=np.uint8)),
("Ground", np.array([81, 0, 81], dtype=np.uint8)),
("Bridge", np.array([150, 100, 100], dtype=np.uint8)),
("RailTrack", np.array([230, 150, 140], dtype=np.uint8)),
("GroundRail", np.array([180, 165, 180], dtype=np.uint8)),
("TrafficLight", np.array([250, 170, 30], dtype=np.uint8)),
("Static", np.array([110, 190, 160], dtype=np.uint8)),
("Dynamic", np.array([170, 120, 50], dtype=np.uint8)),
("Water", np.array([45, 60, 150], dtype=np.uint8)),
("Terrain", np.array([145, 170, 100], dtype=np.uint8)),
])
def convert_label_to_grayscale(im):
out = (np.ones(im.shape[:2]) * 255).astype(np.uint8)
for gray_val, (label, rgb) in enumerate(camvid_colors.items()):
match_pxls = np.where((im == np.asarray(rgb)).sum(-1) == 3)
out[match_pxls] = gray_val
assert (out !=
255).all(), "rounding errors or missing classes in camvid_colors"
return out.astype(np.uint8)
def convert_label_to_rgb(im):
out = np.zeros((im.shape[0], im.shape[1], 3)).astype(np.uint8)
for gray_val, (label, rgb) in enumerate(camvid_colors.items()):
match_x, match_y = np.where(im == gray_val)
out[match_x, match_y] = rgb
return out.astype(np.uint8)
|
StarcoderdataPython
|
1642264
|
<reponame>philippjfr/idom-bokeh
from .panel import IDOM
__author__ = "idom-team"
__version__ = "0.0.1" # DO NOT MODIFY
__all__ = ["IDOM"]
|
StarcoderdataPython
|
1689153
|
import requests
data = {
"chat_id": chat_id,
"chat_keypad_type": "Removed",
}
url = f'https://messengerg2b1.iranlms.ir/v3/{token}/editChatKeypad'
response = requests.post(url, data=data)
print(response.text)
|
StarcoderdataPython
|
115375
|
<filename>gotime/maps.py
import requests
import time
class BaseMapsApi(object):
def __init__(self):
pass
def format_data(self, key, origin, destination, model=None, now_secs=None):
data = {'key': key,
'origin': origin.replace(' ', '+'),
'destination': destination.replace(' ', '+')}
if model is not None:
data['model'] = model
if now_secs is not None:
data['now_secs'] = now_secs
return data
class GoogleMapsApi(BaseMapsApi):
config_name = 'google'
# See here for API info:
# https://developers.google.com/maps/documentation/directions/intro#traffic-model
url = 'https://maps.googleapis.com/maps/api/directions/json?'
url += 'origin={origin}&destination={destination}'
url += '&key={key}&mode=driving&traffic_model={model}'
url += '&departure_time={now_secs}'
def __init__(self, key=None):
super().__init__()
if key is None:
self.key = 'UNKNOWN'
else:
self.key = key
def query(self, origin, destination):
now_secs = str(int(time.time()))
data = self.format_data(self.key, origin, destination,
TrafficModel.BEST, now_secs)
r = requests.get(self.url.format(**data))
if r.status_code != 200:
return None, None
legs = r.json()['routes'][0]['legs'][0]
duration_in_traffic = legs['duration_in_traffic']['value']
seconds = int(duration_in_traffic)
steps = len(r.json()['routes'][0]['legs'][0]['steps'])
return seconds, steps
def get_type(self):
return self.config_name
class TrafficModel:
"""
Enum-like class showing the Google Traffic model
"""
BEST = "best_guess"
|
StarcoderdataPython
|
1655217
|
<gh_stars>1-10
import gitbigfile
try:
from setuptools import setup
kw = {
'install_requires': 'docopt == 0.5.0',
}
except ImportError:
from distutils.core import setup
kw = {}
setup(
name='git-bigfile',
version=gitbigfile.__version__,
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
description='git-bigfile allows you to use Git with large files without storing the file in Git itself',
long_description=open('README.rst').read(),
url='https://github.com/beenje/git-bigfile',
packages=['gitbigfile'],
scripts=['bin/git-bigfile'],
classifiers=['Development Status :: 4 - Beta',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python'],
**kw
)
|
StarcoderdataPython
|
3368152
|
<reponame>techthiyanes/textacy
import pytest
import textacy
import textacy.text_stats
@pytest.fixture(scope="module")
def doc(lang_en):
text = "I write code. They wrote books."
return textacy.make_spacy_doc(text, lang=lang_en)
def test_morph(doc):
exp = {
"Case": {"Nom": 2},
"Number": {"Sing": 2, "Plur": 2},
"Person": {"1": 1, "3": 1},
"PronType": {"Prs": 2},
"Tense": {"Pres": 1, "Past": 1},
"VerbForm": {"Fin": 2},
"PunctType": {"Peri": 2},
}
assert textacy.text_stats.counts.morph(doc) == exp
def test_tag(doc):
exp = {"PRP": 2, "VBP": 1, "NN": 1, ".": 2, "VBD": 1, "NNS": 1}
assert textacy.text_stats.counts.tag(doc) == exp
def test_pos(doc):
exp = {"PRON": 2, "VERB": 2, "NOUN": 2, "PUNCT": 2}
assert textacy.text_stats.counts.pos(doc) == exp
def test_dep(doc):
exp = {"nsubj": 2, "ROOT": 2, "dobj": 2, "punct": 2}
assert textacy.text_stats.counts.dep(doc) == exp
|
StarcoderdataPython
|
11952
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portable library for registering and publishing executions."""
import copy
import os
from typing import List, Mapping, MutableMapping, Optional, Sequence, cast
from absl import logging
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from ml_metadata.proto import metadata_store_pb2
def _check_validity(new_artifact: metadata_store_pb2.Artifact,
original_artifact: types.Artifact,
has_multiple_artifacts: bool) -> None:
"""Check the validity of new artifact against the original artifact."""
if new_artifact.type_id != original_artifact.type_id:
raise RuntimeError('Executor output should not change artifact type.')
if has_multiple_artifacts:
# If there are multiple artifacts in the executor output, their URIs should
# be a direct sub-dir of the system generated URI.
if os.path.dirname(new_artifact.uri) != original_artifact.uri:
raise RuntimeError(
'When there are multiple artifacts to publish, their URIs '
'should be direct sub-directories of the URI of the system generated '
'artifact.')
else:
# If there is only one output artifact, its URI should not be changed
if new_artifact.uri != original_artifact.uri:
# TODO(b/175426744): Data Binder will modify the uri.
logging.warning(
'When there is one artifact to publish, the URI of it should be '
'identical to the URI of system generated artifact.')
def publish_cached_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
) -> None:
"""Marks an existing execution as using cached outputs from a previous execution.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.CACHED
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
input_artifacts=None,
output_artifacts=output_artifacts)
def _set_execution_result_if_not_empty(
executor_output: Optional[execution_result_pb2.ExecutorOutput],
execution: metadata_store_pb2.Execution) -> bool:
"""Sets execution result as a custom property of the execution."""
if executor_output and (executor_output.execution_result.result_message or
executor_output.execution_result.metadata_details or
executor_output.execution_result.code):
# TODO(b/190001754): Consider either switching to base64 encoding or using
# a proto descriptor pool to circumvent TypeError which may be raised when
# converting embedded `Any` protos.
try:
execution_lib.set_execution_result(executor_output.execution_result,
execution)
except TypeError:
logging.exception(
'Skipped setting execution_result as custom property of the '
'execution due to error')
def publish_succeeded_execution(
metadata_handler: metadata.Metadata,
execution_id: int,
contexts: Sequence[metadata_store_pb2.Context],
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> Optional[MutableMapping[str, List[types.Artifact]]]:
"""Marks an existing execution as success.
Also publishes the output artifacts produced by the execution. This method
will also merge the executor produced info into system generated output
artifacts. The `last_know_state` of the execution will be changed to
`COMPLETE` and the output artifacts will be marked as `LIVE`.
Args:
metadata_handler: A handler to access MLMD.
execution_id: The id of the execution to mark successful.
contexts: MLMD contexts to associated with the execution.
output_artifacts: Output artifacts skeleton of the execution, generated by
the system. Each artifact will be linked with the execution through an
event with type OUTPUT.
executor_output: Executor outputs. `executor_output.output_artifacts` will
be used to update system-generated output artifacts passed in through
`output_artifacts` arg. There are three contraints to the update: 1. The
keys in `executor_output.output_artifacts` are expected to be a subset
of the system-generated output artifacts dict. 2. An update to a certain
key should contains all the artifacts under that key. 3. An update to an
artifact should not change the type of the artifact.
Returns:
The maybe updated output_artifacts, note that only outputs whose key are in
executor_output will be updated and others will be untouched. That said,
it can be partially updated.
Raises:
RuntimeError: if the executor output to a output channel is partial.
"""
output_artifacts = copy.deepcopy(output_artifacts) or {}
output_artifacts = cast(MutableMapping[str, List[types.Artifact]],
output_artifacts)
if executor_output:
if not set(executor_output.output_artifacts.keys()).issubset(
output_artifacts.keys()):
raise RuntimeError(
'Executor output %s contains more keys than output skeleton %s.' %
(executor_output, output_artifacts))
for key, artifact_list in output_artifacts.items():
if key not in executor_output.output_artifacts:
continue
updated_artifact_list = executor_output.output_artifacts[key].artifacts
# We assume the original output dict must include at least one output
# artifact and all artifacts in the list share the same type.
original_artifact = artifact_list[0]
# Update the artifact list with what's in the executor output
artifact_list.clear()
# TODO(b/175426744): revisit this:
# 1) Whether multiple output is needed or not after TFX componets
# are upgraded.
# 2) If multiple output are needed and is a common practice, should we
# use driver instead to create the list of output artifact instead
# of letting executor to create them.
for proto_artifact in updated_artifact_list:
_check_validity(proto_artifact, original_artifact,
len(updated_artifact_list) > 1)
python_artifact = types.Artifact(original_artifact.artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
artifact_list.append(python_artifact)
# Marks output artifacts as LIVE.
for artifact_list in output_artifacts.values():
for artifact in artifact_list:
artifact.mlmd_artifact.state = metadata_store_pb2.Artifact.LIVE
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(
metadata_handler, execution, contexts, output_artifacts=output_artifacts)
return output_artifacts
def publish_failed_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Marks an existing execution as failed.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
executor_output: The output of executor.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.FAILED
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(metadata_handler, execution, contexts)
def publish_internal_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None
) -> None:
"""Marks an exeisting execution as as success and links its output to an INTERNAL_OUTPUT event.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type INTERNAL_OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts,
output_event_type=metadata_store_pb2.Event.INTERNAL_OUTPUT)
def register_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
exec_properties: Optional[Mapping[str, types.Property]] = None,
) -> metadata_store_pb2.Execution:
"""Registers a new execution in MLMD.
Along with the execution:
- the input artifacts will be linked to the execution.
- the contexts will be linked to both the execution and its input artifacts.
Args:
metadata_handler: A handler to access MLMD.
execution_type: The type of the execution.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event.
exec_properties: Execution properties. Will be attached to the execution.
Returns:
An MLMD execution that is registered in MLMD, with id populated.
"""
execution = execution_lib.prepare_execution(
metadata_handler, execution_type, metadata_store_pb2.Execution.RUNNING,
exec_properties)
return execution_lib.put_execution(
metadata_handler, execution, contexts, input_artifacts=input_artifacts)
|
StarcoderdataPython
|
3363769
|
<reponame>MauroCL75/Gerald<gh_stars>0
"""
Introduction
============
Capture, document and manage database schemas.
This is the Schema module, it contains one useful class, Schema. This is a
super class which is then sub-classed for specific databases (eg.
OracleSchema, MySQLSchema, etc).
A schema is comprised of collections of tables, views, stored code objects,
triggers, sequences and other assorted 'objects'
This module is licensed under the BSD License (see LICENSE.txt)
This module requires Python 2.3 (and above) and a valid DB-API module
To do
=====
- change get_ddl method(s) to put different objects in different files like
Oracle Designer (one each for tables, constraints, views, code objects)
- Table and View dump methods rely on calc_precision function, we really
should do away with this
- One possible solution is to make Column a class and just implement a dump
method for that
Possible Future development
===========================
- Support the Jakarta Torque schema DTD
(http://db.apache.org/torque/schema-reference.html)
- Or possibly the Dewdrop DTD
(http://dewdrop.sourceforge.net/xmlns/database.xsd)
- Change the compare method for the Schema class. Perhaps using the difflib
library module?
- Change the to_xml methods to use ElementTree elements rather than strings
"""
__author__ = "<NAME> <<EMAIL>>"
__date__ = (2010, 4, 8)
__version__ = (0, 4, 1)
from decimal import Decimal
import os
import sys
from gerald.utilities.dburi import get_connection
from gerald.utilities.Log import get_log
if 'TMP' in os.environ:
LOG_DIRECTORY = os.environ['TMP']
elif 'HOME' in os.environ:
LOG_DIRECTORY = os.environ['HOME']
LOG_FILENAME = os.path.join(LOG_DIRECTORY, 'gerald.log')
LOG = get_log('gerald', LOG_FILENAME, 'INFO')
# LOG = get_log('gerald', LOG_FILENAME, 'INFO')
# LOG = get_log('gerald', LOG_FILENAME, 'DEBUG')
class Schema(object):
"""
A representation of a database schema.
A schema is a collection of objects within a database. It is a logical
grouping, physical implementation is independent of the schema.
This is an abstract class which shouldn't be used directly. It is designed
to be sub-classed in database specific modules.
These sub-classes will need to implement the _get_schema and __init__ methods
A schema will have the following attributes
- name. This will be the same as the connectionString
- api_version. To indicate when we are change the API
- schema. A collection of objects which form this schema.
Private attributes
- _db. A database connection. Optional, need not be provided by sub-classes.
- _cursor. A cursor generated from _db
"""
def __init__(self, schema_name, connection_string=None, omit_error_objects=False):
"""
Initialise the schema.
@param schema_name: A name for this schema
@type schema_name: String
@param connection_string: If this is provided then we populate the
schema's attributes from the database it connects us to.
@type connection_string: String
@return: Success or failure
@rtype: Boolean
"""
self.name = schema_name
self.api_version = Decimal('1.1')
self.omit_error_objects = omit_error_objects
self.schema = {}
if connection_string:
# Connect to the db and suck out the data dictionary information
self.connect(connection_string)
self.schema = self._get_schema(self._cursor)
def connect(self, connection_string):
"Connect to a database and set _db and _cursor attributes"
LOG.debug('Connecting to %s' % self.name)
self._db = get_connection(connection_string)
self._cursor = self._db.cursor()
LOG.debug('Established connection to %s' % self.name)
def _get_schema(self, cursor):
"Place holder method to be implemented by child classes"
raise NotImplementedError
def dump(self, file_name=None, sort=None):
"""
Output this schema in a nice easy to read format to <file_name>. If a
<file_name> isn't provided then we return the stream.
We rely on each object to output its own details.
@param file_name: The name of a file to dump the output to
@type file_name: String
@param sort: If this is set the schema objects will be sorted by name
@type sort: Boolean
@return: Schema contents or, if file_name is specified, nothing
@rtype: String
"""
if file_name:
dump_file = open(file_name, 'w')
results = ["Schema: %s\n" % self.name]
objects = list(self.schema.keys())
if sort:
objects.sort()
for schema_object in objects:
results.append(self.schema[schema_object].dump())
if file_name:
dump_file.write('\n'.join(results))
dump_file.close()
else:
return '\n'.join(results)
def to_xml(self, file_name=None):
"""
Output this schema in XML format to <file_name>. If a <file_name> isn't
provided then we return the stream.
We rely on each object to produce its own XML fragment which are then
combined here.
@param file_name: The name of a file to dump the XML to
@type file_name: String
@return: Schema XML or, if file_name is specified, nothing
@rtype: String
"""
if file_name:
xml_file = open(file_name, 'w')
results = ['<schema name="%s">' % self.name]
for schema_object in list(self.schema.keys()):
results.append(self.schema[schema_object].to_xml())
results.append('</schema>')
if file_name:
xml_file.write('\n'.join(results))
xml_file.close()
else:
return '\n'.join(results)
def get_ddl(self, file_name=None):
"""
Output the DDL to create this schema to <file_name>. If a <file_name>
isn't provided then we return the stream.
We rely on each schema object to produce its own DDL statements which are
then combined here.
@param file_name: The name of a file to dump the XML to
@type file_name: String
@return: Schema DDL or, if file_name is specified, nothing
@rtype: String
"""
results = []
for schema_object in list(self.schema.keys()):
results.append(self.schema[schema_object].get_ddl())
if file_name:
ddl_file = open(file_name, 'w')
ddl_file.write('\n'.join(results))
ddl_file.close()
else:
return '\n'.join(results)
def __cmp__(self, other_schema):
"""
Compare this schema with <other_schema>
@param other_schema: A schema to be compared to self
@type other_schema: An object of a class inherited from schema.Schema
@return: 0 if the two schemas are the same, otherwise we return 1
@rtype: Boolean, well Integer really
"""
# __cmp__ functions return -1 if we are less than schema
# 0 if we are the same as schema
# 1 if we are greater than schema
# If our 'compare' method returns anything there are differences
if self.compare(other_schema):
return 1
else:
return 0
def compare(self, other_schema):
"""
Calculate the differences between the current schema and <other_schema>.
@param other_schema: A schema to be compared to self
@type other_schema: An object of a class inherited from schema.Schema
@return: The differences between the two schemas
@rtype: String
"""
# I left a note here about difflib, but can't find it. Oh dear.
results = []
if not isinstance(other_schema, Schema):
results.append('We are not comparing two schemas')
else:
if list(self.schema.keys()) != list(other_schema.schema.keys()):
results.append('The schemas have different objects')
for schema_object in list(self.schema.keys()):
if schema_object in list(other_schema.schema.keys()):
if self.schema[schema_object].__class__ != other_schema.schema[schema_object].__class__:
results.append('%s is of different types' % schema_object)
results.append('in the two schemas')
if self.schema[schema_object] != other_schema.schema[schema_object]:
results.append('%s is different ' % schema_object)
results.append('in the two schemas')
else:
results.append('%s is missing ' % schema_object)
results.append('in the one schema')
return ' '.join(results)
def _set_unless_fail(self, schema, key, object_name, object_type, *args):
"""Try and create object_name in schema taking appropriate action on failure
This method is particularly useful if someone (say, Oracle) has broken
their own rules and created system objects that trip up Gerald. By
setting omit_error_objects to True when creating our schema we can not
blow up when finding something that doesn't quite fulfil our
expectations.
Because we are maintaining compatibility with Python 2.5 we can't access
the exception instance so the log message won't necessarily be that
useful.
"""
if (not object_name.isupper()) and ('"' not in object_name):
# then this must be a chaotic evil case-sensitive object name
object_name = '"%s"' % object_name
try:
schema[key] = object_type(object_name, *args)
except AttributeError:
if self.omit_error_objects:
LOG.warning("Couldn't get details for %s" % (key, ))
else:
raise
class Table(object):
"""
A representation of a database table.
A table is made up of columns and will have indexes, triggers, constraints,
primary and foreign keys.
It may also have comments - although this is currently only available in Oracle
This is an abstract class which shouldn't be used. It is designed to be
sub-classed in database specific modules.
The sub-classes will need to implement the L{_get_table} and L{get_ddl}
methods
They will also need a class method called calc_precision, whose signature
will depend on the module
A table will have the following attributes
- name
- columns. A dictionary (keyed on column name) of column dictionaries.
These column dictionaries must have the following keys:
- sequence. The order of this column in the table. Integer
- name. Column name. Text
- type. Native data type, will vary by database. Text
- length. Maximum length of column. Integer
- nullable. Can this column contain NULL values? Boolean
These column dictionaries may have the following keys:
- precision. Maximum number of digits before the decimal point, only
valid for numeric columns. Integer
- scale. Maximum number of digits after the decimal point. Integer
- default. Default value to be inserted if this column is NULL on insert
- special. Only used by MySQL to indicate if a column has auto_increment
set. Boolean
- comment. Column comment. Text
- indexes. A dictionary (keyed on index name) of index dictionaries
These index dictionaries must have the following keys:
- type. Index type, usually one of 'Unique' or 'Non-Unique'. Text
- unique. Flag to indicate if index elements must be unique. Boolean
- columns. A sequence of column names in the index. Sequence
- constraints. A dictionary (keyed on constraint name) of constraint
dictionaries. These dictionaries must have the following keys:
- type. One of 'Primary', 'Foreign', 'Check' or 'Unique'. Text
- enabled. Is the constraint enabled? Boolean
- columns. A sequence of the column names in this constraint. Sequence
These constraint dictionaries may have the following keys:
- reftable. Reference table (only used for Foreign keys). Text
- refcolumns. List of reference columns (only used for Foreign keys).
Sequence
- triggers. A dictionary of trigger objects keyed on name
Optional attributes
- tablespace_name, or table_type (for MySQL)
- comments. Optional. A text string describing the table
- schema. The schema this object lives in
"""
def __init__(self, table_name, cursor=None, schema=None):
"""
Initialise a table object. If a value is passed into the cursor parameter
then the last thing we do is call L{_get_table}.
@param table_name: The name of this table
@type table_name: String
@param cursor: If this is provided then we use it to call L{_get_table}
@type cursor: Database cursor object
@param schema: The schema this object is stored in
@type schema: String
@return: Nothing
"""
self.name = table_name
self.tablespace_name = None
self.table_type = None
self.columns = {}
self.indexes = {}
self.constraints = {}
self.triggers = {}
if schema:
self.schema = schema
else:
self.schema = None
if cursor:
self._get_table(cursor)
def _get_table(self, cursor):
"""
Query the data dictionary for this table and populate the object
attributes
Not implemented in this class as its database specific, but present
for completeness.
@param cursor: A database cursor
@type cursor: Database cursor object
@return: Nothing
"""
raise NotImplementedError
def dump(self):
"""
Return the structure of the table in a nice, easy to read, format
@return: A description of this table
@rtype: String
"""
# This is pretty, but we could just return the ddl_string
outputs = ["Table : %s\n" % self.name]
# We show the columns in sequence order, using DSU
# DSU = Decorate, Sort, Undecorate - a.k.a Schwartzian transform
deco_cols = [ (x['sequence'], x) for x in list(self.columns.values()) ]
deco_cols.sort()
cols = [ col for seq, col in deco_cols ]
for column in cols:
outputs.append(" %-30s" % column['name'])
if 'length' in column and column['length'] != None:
if 'precision' in column and column['precision'] != None:
# This column is a numeric data type
column_defn = column['type']+self.__class__.calc_precision(column['type'], column['length'], column['precision'], column['scale'])
else:
# This column is a text data type
column_defn = '%s(%d)' % (column['type'], column['length'])
else:
# This column is a simple data type such as date or boolean
column_defn = column['type']
outputs.append(" %-15s " % column_defn)
if not column['nullable']:
outputs.append(" NOT NULL")
if 'special' in column:
# Special case for e.g. 'enum' in MySQL
outputs.append(' %s' % column['special'])
outputs.append("\n")
# Constraints please
if len(self.constraints) != 0:
outputs.append(" Constraints;\n")
for constraint_name, constraint in list(self.constraints.items()):
outputs.append(" %s, " % constraint_name)
outputs.append("%s " % (constraint['type']))
if 'columns' in constraint:
outputs.append(": ")
outputs.append(', '.join(constraint['columns']))
outputs.append("\n")
# Indexes
if len(self.indexes) > 0:
outputs.append(" Indexes:\n")
for index_name, index in list(self.indexes.items()):
outputs.append(" %s, " % index_name)
outputs.append("%s\n" % index['type'])
# Don't check number of columns because there must be at least 1
outputs.append(" Columns: ")
outputs.append(", ".join(index['columns']))
outputs.append("\n")
# LOG.debug("Table Dump output: " + "".join(outputs))
return "".join(outputs)
def get_ddl(self):
"""
Generate the DDL necessary to create this table
Not implemented in this class as its database specific.
@return: DDL to create this table
@rtype: String
"""
raise NotImplementedError
def to_xml(self):
"""
Return the structure of this table as an XML document fragment
This will be of the form::
<table name="table name">
<tablespace name="tablespace name" />
<column name="name" data-type="data type" sequence="sequence">
<length>x</length>
<precision>x</precision>
<scale>x</scale>
</column>
<column ...>
<constraint name="constraint name" type="Primary|Foreign|Check">
... constraint details ...
</constraint>
<index name="index name" type="index type">
<column name="column name" />
... other columns ...
</index>
... trigger details ...
</table>
@return: An XML fragment describing this table
@rtype: String
"""
xml_strings = ['<table name="%s">\n' % self.name]
if self.tablespace_name:
xml_strings.append(' <tablespace name=')
xml_strings.append('"%s" />\n' % self.tablespace_name)
# Columns
for column in self.columns:
col_details = self.columns[column]
xml_strings.append(' <column name="%s"' % column)
xml_strings.append(' data-type="%s"' % col_details['type'])
xml_strings.append(' sequence="%d">\n' % col_details['sequence'])
# The following statement means we need Python 2.5 and above
# pre 2.5 it would be col_details.has_key('length')
if 'length' in col_details:
xml_strings.append(' <length>')
xml_strings.append(str(col_details['length']))
xml_strings.append('</length>\n')
if 'precision' in col_details and col_details['precision'] != 0:
xml_strings.append(' <precision>')
xml_strings.append(str(col_details['precision'])+'</precision>\n')
if 'scale' in col_details:
xml_strings.append(' <scale>')
xml_strings.append(str(col_details['scale']))
xml_strings.append('</scale>\n')
xml_strings.append(' </column>\n')
# Constraints
for constraint, cons_details in list(self.constraints.items()):
# Exclude check constraint that start with 'SYS_C' (an Oracle hack)
if cons_details['type'] != 'Check' or not constraint.startswith("SYS_C"):
xml_strings.append(' <constraint name="%s"' % constraint)
xml_strings.append(' type="%s">\n' % cons_details['type'])
if cons_details['type'] == 'Check':
xml_strings.append(' <details>')
xml_strings.append(cons_details['condition'])
xml_strings.append('</details>\n')
if cons_details['type'] == 'Primary':
for column in cons_details['columns']:
xml_strings.append(' <column name="%s" />\n' % column)
if cons_details['type'] == 'Foreign':
xml_strings.append(' <jointable ')
xml_strings.append(' name="%s"' % cons_details['reftable'])
xml_strings.append(' pk="%s">\n' % cons_details['refpk'])
for col_index in range(len(cons_details['columns'])):
xml_strings.append(' <constraintcolumn')
name = cons_details['columns'][col_index]
xml_strings.append(' name="%s"' % name)
xml_strings.append(' joincolumn')
join_column = cons_details['refcolumns'][col_index]
xml_strings.append('="%s" />\n' % join_column)
xml_strings.append(' </jointable>\n')
xml_strings.append(' </constraint>\n')
# Indexes
for index in self.indexes:
index_details = self.indexes[index]
xml_strings.append(' <index name="%s"' % index)
xml_strings.append(' type="%s">\n' % index_details['type'])
for column in index_details['columns']:
xml_strings.append(' <column name="%s" />\n' % column)
xml_strings.append(' </index>\n')
# Triggers
for trigger in self.triggers:
xml_strings.append(self.triggers[trigger].to_xml())
xml_strings.append('</table>')
return "".join(xml_strings)
def __cmp__(self, other_table):
"""
Compare this table with <other_table>
@param other_table: A table to compare this one to
@type other_table: An object of a class derived from Schema.Table
@return: 0 if the two tables are the same, otherwise we return 1
@rtype: Boolean, well integer really
"""
# __cmp__ functions return -1 if we are less than schema
# 0 if we are the same as schema
# 1 if we are greater than schema
# If our 'compare' method returns anything there are differences
if self.compare(other_table):
return 1
else:
return 0
def compare(self, other_table):
"""
Calculate the differences between the current table and <other_table>.
@param other_table: Another table to compare to this one
@type other_table: An object of a class derived from Schema.Table
@return: The differences between the two tables
@rtype: String
"""
response = []
if self.name != other_table.name:
response.append('DIFF: Table names: %s and %s' % (self.name, other_table.name))
if self.tablespace_name != other_table.tablespace_name:
response.append('DIFF: Tablespace names: %s and %s' % (self.tablespace_name, other_table.tablespace_name))
# Compare columns
for column_name in list(self.columns.keys()):
if column_name in list(other_table.columns.keys()):
if self.columns[column_name] != other_table.columns[column_name]:
response.append('DIFF: Definition of %s is different' % column_name)
else:
response.append('DIFF: Column %s not in %s' % (column_name, other_table.name))
for column_name in list(other_table.columns.keys()):
if column_name not in list(self.columns.keys()):
response.append('DIFF: Column %s not in %s' % (column_name, self.name))
return "\n".join(response)
class View(object):
"""
A representation of a database view.
A View is made up of columns and also has an associated SQL statement.
It may also have comments - although this is currently only available in Oracle
This is an abstract class which shouldn't be used. It is designed to be
sub-classed in database specific modules.
The sub-classes will need to implement the L{_get_view} and L{get_ddl} methods
A view will have the following mandatory attributes
- name
- columns. A dictionary (keyed on column name) of Column dictionaries
data_type, data_length, data_precision, data_scale, nullable)
- sql. The SQL that forms the view
- triggers. A dictionary of trigger objects keyed on name
A view may have the following optional attributes:
- schema. The schema this object lives in
"""
def __init__(self, view_name, cursor=None, schema=None):
"""
Initialise a view object.
@param view_name: The name of the view
@type view_name: String
@param cursor: An optional database cursor which, if provided, will be
used to populate this object's attributes by calling _getView
@type cursor: Database cursor object
@param schema: The schema this view lives in
@type schema: String
@return: Nothing
"""
self.name = view_name
self.type = 'view' # Saves using type() or isinstance
self.columns = {}
self.sql = ''
self.triggers = {}
if schema:
self.schema = schema
else:
schema = None
if cursor:
self._get_view(cursor)
def _get_view(self, cursor):
"""
Query the data dictionary for this view and populate the object attributes
Not implemented in this class as it's database specific, but present
for completeness.
@param cursor: A database cursor
@type cursor: Database cursor object
@return: Nothing
"""
raise NotImplementedError
def dump(self):
"""
Output the structure of the view in a nice, easy to read, format
@return: A description of this view
@rtype: String
"""
outputs = ["View : %s\n" % self.name]
cols = list(self.columns.values())
cols.sort()
for column in cols:
outputs.append(" %-30s %-12s" % (column['name'], column['type']))
outputs.append("%7s" % self.__class__.calc_precision(column['type'],
column['length'], column['precision'], column['scale']))
if not column['nullable']:
outputs.append(" NOT NULL")
outputs.append("\n")
outputs.append("\n")
outputs.append(self.sql+"\n")
outputs.append("\n")
return "".join(outputs)
def to_xml(self):
"""
Return the structure of this view as an XML document fragment
This will be of the form::
<view name="view name">
<column name="column name" sequence="numeric indicator of order" />
<column ...>
<...>
<sql>SQL text to create the view</sql>
</view>
@return: An XML fragment describing this view
@rtype: String
"""
xml_strings = ['<view name="%s">\n' % self.name]
for column in list(self.columns.values()):
xml_strings.append(' <column name="%s"' % column['name'])
xml_strings.append(' sequence="%d" />\n' % column['sequence'])
xml_strings.append(' <sql>%s</sql>\n' % self.sql)
xml_strings.append('</view>\n')
return "".join(xml_strings)
def get_ddl(self):
"""
Generate the DDL necessary to create this view
Not implemented in this class as its database specific.
@return: DDL to create this table
@rtype: String
"""
raise NotImplementedError
def __cmp__(self, other_view):
"""
Compare this view with <other_view>
@param other_view: A view to compare this one to
@type other_view: An object of a class derived from Schema.View
@return: 0 if the two views are the same, otherwise we return 1
@rtype: Boolean, well integer really
"""
# __cmp__ functions return -1 if we are less than schema
# 0 if we are the same as schema
# 1 if we are greater than schema
# If our 'compare' method returns anything there are differences
if self.compare(other_view):
return 1
else:
return 0
def compare(self, other_view):
"""
Calculate the differences between this view and <other_view>.
@param other_view: Another view to compare to this one
@type other_view: An object of a class derived from Schema.View
@return: A string describing the differences between the two views
"""
response = []
if self.name != other_view.name:
response.append('DIFF: View names:')
response.append('%s and %s' % (self.name, other_view.name))
# Compare columns
tv_column_names = [col[1] for col in self.columns]
ov_column_names = [col[1] for col in other_view.columns]
for column in tv_column_names:
if column not in ov_column_names:
response.append('DIFF: Column %s' % column)
response.append('not in %s' % other_view.name)
for column in ov_column_names:
if column not in tv_column_names:
response.append('DIFF: Column %s' % column)
response.append('not in %s' % self.name)
return response
class Trigger(object):
"""
A representation of a database trigger.
A trigger has triggering events and a SQL statement. A trigger can only
exist within the context of a table and thus doesn't need any table references
as you can get those from its parent. Apart from the table name, of course,
which we need for the get_ddl method.
This is an abstract class which shouldn't be used. It is designed to be
sub-classed in database specific modules.
The sub-classes will need to implement the L{_get_trigger} and L{get_ddl}
methods
A trigger will have the following attributes
- name
- scope. 'before', 'after' or 'instead of'
- events. A list of the events that cause this trigger to fire
- level. Is this a 'row' or 'statement' level trigger?
- sql. The SQL that is executed when this trigger is fired
"""
def __init__(self, trigger_name, cursor=None, schema=None):
"""
Initialise a trigger object. If a value is passed into the cursor
parameter then the last thing we do is call L{_get_trigger}.
@param trigger_name: The name of this trigger
@type trigger_name: String
@param cursor: If this is provided then we use it to call L{_get_trigger}
@type cursor: Database cursor object
@return: Nothing
"""
self.name = trigger_name
self.table_name = None
self.scope = None
self.events = []
self.level = None
self.sql = None
if schema:
self.schema = schema
else:
self.schema = None
if cursor:
self._get_trigger(cursor)
def _get_trigger(self, cursor):
"""
Query the data dictionary for this trigger and populate the object
attributes
Not implemented in this class as it's database specific, but present
for completeness.
@param cursor: A database cursor
@type cursor: Database cursor object
@return: Nothing
"""
raise NotImplementedError
def dump(self):
"""
Return the structure of the trigger in a nice, easy to read, format
@return: A description of this trigger
@rtype: String
"""
outputs = ["Trigger : %s\n" % self.name]
outputs.append(" %s %s " % (self.scope, ','.join(self.events)))
if self.level:
outputs.append("ON %s\n" % self.level)
outputs.append("\n")
outputs.append(self.sql+"\n")
outputs.append("\n")
return "".join(outputs)
def to_xml(self):
"""
Return the structure of this trigger as an XML document fragment
This will be of the form::
<trigger name="trigger name">
<scope>trigger scope</scope>
<level>trigger level</level>
<events>triggering events</events/>
<sql>SQL statement that is fired</sql>
</trigger>
@return: An XML fragment describing this trigger
@rtype: String
"""
xml_strings = [' <trigger name="%s">\n' % self.name]
xml_strings.append(' <scope>%s</scope>\n' % self.scope)
if self.level:
xml_strings.append(' <level>%s</level>\n' % self.level)
xml_strings.append(' <events>%s</events>\n' % self.events)
xml_strings.append(' <sql>%s</sql>\n' % self.sql)
xml_strings.append(' </trigger>\n')
return "".join(xml_strings)
def get_ddl(self):
"""
Generate the DDL necessary to create this trigger
Not implemented in this class as its database specific
@return: DDL to create this trigger
@rtype: String
"""
raise NotImplementedError
def __cmp__(self, other_trigger):
"""
Compare this trigger with <other_trigger>
@param other_trigger: A trigger to compare this one to
@type other_trigger: An object of a class derived from Schema.Trigger
@return: 1 if triggers are different, 0 if they are the same
@rtype: Boolean, well integer really
"""
# __cmp__ functions return -1 if we are less than schema
# 0 if we are the same as schema
# 1 if we are greater than schema
# If our 'compare' method returns anything there are differences
if self.compare(other_trigger):
return 1
else:
return 0
def compare(self, other_trigger):
"""
Calculate the differences between the current trigger and
<other_trigger>
@param other_trigger: Another trigger to compare to this one
@type other_trigger: An object of a class derived from Schema.Trigger
@return: The differences between the two triggers
@rtype: String
"""
response = []
if self.name != other_trigger.name:
response.append('DIFF: Trigger names: %s' % self.name)
response.append('and %s' % other_trigger.name)
# Compare types
if self.scope != other_trigger.scope:
response.append('DIFF: Trigger %s scope' % self.name)
response.append('%s is different to ' % self.scope)
response.append('trigger %s ' % other_trigger.name)
response.append('scope %s' % other_trigger.scope)
# Compare triggering events
if self.events != other_trigger.events:
response.append('DIFF: Trigger %s' % self.name)
response.append(' events %s is ' % self.events)
response.append('different to trigger %s' % other_trigger.name)
response.append('events %s' % other_trigger.events)
# Compare SQL statements
if self.sql != other_trigger.sql:
response.append('DIFF: Trigger %s ' % self.name)
response.append('SQL %s ' % self.sql)
response.append('is different to trigger %s ' % other_trigger.name)
response.append('SQL %s' % other_trigger.sql)
return response
class Sequence(object):
"""
A representation of a database sequence.
A sequence is an in memory construct that provides numbers in a sequence.
They are generally used to generate primary key values.
This is an abstract class which shouldn't be used. It is designed to be
sub-classed in database specific modules.
The sub-classes will need to implement the L{_get_sequence} and L{get_ddl}
methods
A sequence will have the following attributes:
- name
- min_value
- max_value
- increment_by
- curr_value
A sequence may have one or more of the following attributes
- schema
"""
def __init__(self, sequence_name, cursor=None, schema=None):
"""
Initialise a sequence object. If a value is passed into the cursor
parameter then the last thing we do is call L{_get_sequence}.
@param sequence_name: The name of this sequence
@type sequence_name: String
@param cursor: If this is provided then we use it to call L{_get_sequence}
@type cursor: Database cursor object
@param schema: The schema this sequence lives in
@type schema: String
@return: Nothing
"""
self.name = sequence_name
self.type = 'sequence' # Saves using type() or isinstance
self.min_value = None
self.max_value = None
self.increment_by = None
self.curr_value = None
if schema:
self.schema = schema
else:
schema = None
if cursor:
self._get_sequence(cursor)
def _get_sequence(self, cursor):
"""
Query the data dictionary for this sequence and populate the object
attributes
Not implemented in this class as it's database specific, but present
for completeness.
@param cursor: A database cursor
@type cursor: Database cursor object
@return: Nothing
"""
raise NotImplementedError
def dump(self):
"""
Return the structure of the sequence in a nice, easy to read, format
@return: A description of this sequence
@rtype: String
"""
outputs = ["Sequence : %s" % self.name]
if self.curr_value:
outputs.append(" start : %d" % self.curr_value)
outputs.append(" minimum : %d" % self.min_value)
outputs.append(" maximum : %d" % self.max_value)
if self.increment_by > 1:
outputs.append(" increment : %d" % self.increment_by)
return "\n".join(outputs)
def to_xml(self):
"""
Return the structure of this sequence as an XML document fragment
This will be of the form::
<sequence name="%s">
<start value="%d" />
<minimum value="%d" />
<maximum value="%d" />
<increment value="%d" />
</sequence>
@return: An XML fragment describing this sequence
@rtype: String
"""
xml_strings = ['<sequence name="%s">' % self.name]
if hasattr(self, 'currval'):
xml_strings.append(' <start value="%d" />' % self.curr_value)
if self.min_value > 1:
xml_strings.append(' <minimum value="%d" />' % self.min_value)
try:
xml_strings.append(' <maximum value="%d" />' % self.max_value)
except OverflowError:
None # maxValue is 1.000E+27 which we can't convert to an int
if self.increment_by > 1:
xml_strings.append(' <increment value="%d" />'% self.increment_by)
xml_strings.append('</sequence>')
return "".join(xml_strings)
def get_ddl(self):
"""
Generate the DDL necessary to create this sequence
Not implemented in this class as it's database specific
@return: DDL to create this sequence
@rtype: String
"""
raise NotImplementedError
def __cmp__(self, other_sequence):
"""
Compare this sequence with <other_sequence>
@param other_sequence: A sequence to compare this one to
@type other_sequence: An object of a class derived from Schema.Sequence
@return: 0 if the two sequences are the same, otherwise we return 1
@rtype: Boolean, well integer really
"""
# __cmp__ functions return -1 if we are less than schema
# 0 if we are the same as schema
# 1 if we are greater than schema
# If our 'compare' method returns anything there are differences
if self.compare(other_sequence):
return 1
else:
return 0
def compare(self, other_sequence):
"""
Calculate the differences between this sequence and <other_sequence>
@param other_sequence: Another sequence to compare to this one
@type other_sequence: An object of a class derived from Schema.Sequence
@return: The differences between the two sequences
@rtype: String
"""
response = []
if self.name != other_sequence.name:
response.append('DIFF: Sequence names: %s' % self.name)
response.append('and %s' % other_sequence.name)
if self.increment_by != other_sequence.increment_by:
response.append('DIFF: Increment interval')
response.append('is %d,' % self.increment_by)
response.append('for %s' % other_sequence.name)
response.append('it is %d' % other_sequence.increment_by)
if self.min_value != other_sequence.min_value:
response.append('DIFF: Min value is %d' % self.min_value)
response.append(' for %s' % other_sequence.name)
response.append('it is %d' % other_sequence.min_value)
if self.max_value != other_sequence.max_value:
response.append('DIFF: Max value is %d' % self.max_value)
response.append(', for %s ' % other_sequence.name)
response.append('it is %d' % other_sequence.max_value)
# The only attribute we don't check is currval, becuase it will be
# different in 999 cases out of a 1000
return response
class CodeObject(object):
"""
A representation of a database code object.
This is an abstract class which shouldn't be used. It is designed to be
sub-classed in database specific modules.
The sub-classes will need to implement the L{_get_code_object} and
L{get_ddl} methods
A code object will have the following attributes
- name
- object_type (usually one of function, procedure or package)
- source code (a sequence of (line number, code) sequences
A code object may have one or more of the following attributes:
- schema
"""
def __init__(self, code_object_name, code_object_type, cursor=None,
schema=None):
"""
Initialise a code object. If a value is passed into the cursor
parameter then the last thing we do is call L{_get_code_object}.
@param code_object_name: The name of this code object
@type code_object_name: String
@param code_object_type: The type of this code object (function,
procedure, etc)
@type code_object_type: String
@param cursor: If this is provided then we use it to call
L{_get_code_object}
@type cursor: Database cursor object
@param schema: The schema that this object lives in
@type schema: String
@return: Nothing
"""
self.name = code_object_name
self.object_type = code_object_type
self.source = []
if schema:
self.schema = schema
else:
self.schema = None
if cursor:
self._get_code_object(cursor)
def _get_code_object(self, cursor):
"""
Query the data dictionary for this code object and populate the object
attributes
Not implemented in this class as it's database specific, but present
for completeness.
@param cursor: A database cursor
@type cursor: Database cursor object
@return: Nothing
"""
raise NotImplementedError
def dump(self):
"""
Return the structure of the code object in a nice, easy to read, format
@return: A description of this code object
@rtype: String
"""
outputs = ["Code object : %s" % self.name]
outputs.append(" Type : %s" % self.object_type)
for source_line in self.source:
# Each line is a (line_number, code) pair
outputs.append('%d: %s' % source_line)
return "".join(outputs)
def to_xml(self):
"""
Return the structure of this code object as an XML document fragment
This will be of the form::
<code_object name="%s">
<type value="%s" />
<source>
.. The source code to recreate this object ..
</source>
</code_object>
@return: An XML fragment describing this code object
@rtype: String
"""
xml_strings = ['<code_object name="%s">' % self.name]
xml_strings.append(' <type value="%s" />' % self.object_type)
xml_strings.append(' <source>')
for source_line in self.source:
xml_strings.append(source_line[1])
xml_strings.append(' </source>')
xml_strings.append('</code_object>')
return "".join(xml_strings)
def get_ddl(self):
"""
Generate the DDL necessary to create this code object
Not implemented in this class as it's database specific
@return: DDL to create this code object
@rtype: String
"""
raise NotImplementedError
def __cmp__(self, other_code_object):
"""
Compare this code object with <other_code_object>
@param other_code_object : A code object to compare this one to
@type other_code_object : An object of a class derived from Schema.CodeObject
@return: True if the two sequences are the same, otherwise False
@rtype: Boolean
"""
# If our 'compare' method returns anything there are differences
if self.compare(other_code_object):
return True
else:
return False
def compare(self, other_code_object):
"""
Calculate the differences between this code object and <other_code_sequence>
@param other_code_object: Another code object to compare to this one
@type other_code_object: An object of a class derived from Schema.CodeObject
@return: The differences between the two code objects
@rtype: String
"""
response = []
if self.name != other_code_object.name:
response.append('DIFF: Code object names: %s' % self.name)
response.append('and %s' % other_code_object.name)
if self.object_type != other_code_object.object_type:
response.append('DIFF: Code object types: %s' % self.object_type)
response.append('and %s' % other_code_object.object_type)
return response
if __name__ == "__main__":
print("This module should not be invoked from the command line")
sys.exit(1)
|
StarcoderdataPython
|
1763971
|
<reponame>huachao2017/goodsdl
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-05-06 13:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0065_shelfimage_image_name'),
]
operations = [
migrations.AddField(
model_name='shelfgoods',
name='shelfid',
field=models.IntegerField(db_index=True, default=0),
),
migrations.AddField(
model_name='shelfgoods',
name='shopid',
field=models.IntegerField(db_index=True, default=0),
),
]
|
StarcoderdataPython
|
74180
|
"""
Data access functions
---------------------
"""
from __future__ import absolute_import
from os.path import join as pjoin, basename, dirname
import subprocess
import tempfile
import logging
import numpy as np
import h5py
import rasterio
from rasterio.crs import CRS
from rasterio.warp import reproject
from rasterio.enums import Resampling
from wagl.geobox import GriddedGeoBox
from wagl.tiling import generate_tiles
def get_pixel(filename, lonlat, band=1):
"""Return a pixel from `filename` at the longitude and latitude given
by the tuple `lonlat`. Optionally, the `band` can be specified."""
with rasterio.open(filename) as src:
x, y = [int(v) for v in ~src.transform * lonlat]
if isinstance(band, list):
data = src.read(band, window=((y, y + 1), (x, x + 1))).ravel()
else:
data = src.read(band, window=((y, y + 1), (x, x + 1))).flat[0]
return data
def select_acquisitions(acqs_list, fn=(lambda acq: True)):
"""
Given a list of acquisitions, apply the supplied fn to select the
desired acquisitions.
"""
acqs = [acq for acq in acqs_list if fn(acq)]
return acqs
def stack_data(acqs_list, fn=(lambda acq: True), window=None, masked=False):
"""
Given a list of acquisitions, return the data from each acquisition
collected in a 3D numpy array (first index is the acquisition number).
If window is defined, then the subset contained within the window is
returned along with a GriddedGeoBox instance detailing the
spatial information associated with that subset.
:param acqs_list:
The list of acquisitions from which to generate a stack of data.
:param window:
Defines a subset ((ystart, yend), (xstart, xend)) in array
co-ordinates. Default is None.
:param masked:
Indicates whether or not to return a masked array. Default is False.
:return:
A 2-tuple containing:
* 1. A 3D numpy array (or None) containing the corresponding
acquisition data. (None if no data).
* 2. A GriddedGeoBox instance specifying the spatial context
of the 3D numpy array. Note: All Acquisitions share the
same GriddedGeoBox.
"""
# determine data type and dimensions by reading the first band
acqs = acqs_list
a, geo_box = acqs[0].data_and_box(window=window, masked=masked)
# create the result array, setting datatype based on source type
stack_shape = (len(acqs), a.shape[0], a.shape[1])
stack = np.empty(stack_shape, a.dtype)
stack[0] = a
del a
# read remaining aquisitions into it
for i in range(1, stack_shape[0]):
# can't use this statement because it will cause data to be
# resampled. But we want an exception thrown if the user
# tries to stack irreqular aquisitions
stack[i] = acqs[i].data(window=window, masked=masked)
return stack, geo_box
def write_img(array, filename, driver='GTiff', geobox=None, nodata=None,
tags=None, options=None, cogtif=False, levels=None,
resampling=Resampling.nearest):
"""
Writes a 2D/3D image to disk using rasterio.
:param array:
A 2D/3D NumPy array.
:param filename:
A string containing the output file name.
:param driver:
A string containing a GDAL compliant image driver. Default is
'GTiff'.
:param geobox:
An instance of a GriddedGeoBox object.
:param nodata:
A value representing the no data value for the array.
:param tags:
A dictionary of dataset-level metadata.
:param options:
A dictionary containing other dataset creation options.
See creation options for the respective GDAL formats.
:param cogtif:
If set to True, override the `driver` keyword with `GTiff`
and create a Cloud Optimised GeoTiff. Default is False.
See:
https://trac.osgeo.org/gdal/wiki/CloudOptimizedGeoTIFF
:param levels:
If cogtif is set to True, build overviews/pyramids
according to levels. Default levels are [2, 4, 8, 16, 32].
:param resampling:
If cogtif is set to True, build overviews/pyramids using
a resampling method from `rasterio.enums.Resampling`.
Default is `Resampling.nearest`.
:notes:
If array is an instance of a `h5py.Dataset`, then the output
file will include blocksizes based on the `h5py.Dataset's`
chunks. To override the blocksizes, specify them using the
`options` keyword. Eg {'blockxsize': 512, 'blockysize': 512}.
If `cogtif` is set to True, the default blocksizes will be
256x256. To override this behaviour, specify them using the
`options` keyword. Eg {'blockxsize': 512, 'blockysize': 512}.
"""
# Get the datatype of the array
dtype = array.dtype.name
# Check for excluded datatypes
excluded_dtypes = ['int64', 'int8', 'uint64']
if dtype in excluded_dtypes:
msg = "Datatype not supported: {dt}".format(dt=dtype)
raise TypeError(msg)
# convert any bools to uin8
if dtype == 'bool':
array = np.uint8(array)
dtype = 'uint8'
ndims = array.ndim
dims = array.shape
# Get the (z, y, x) dimensions (assuming BSQ interleave)
if ndims == 2:
samples = dims[1]
lines = dims[0]
bands = 1
elif ndims == 3:
samples = dims[2]
lines = dims[1]
bands = dims[0]
else:
logging.error('Input array is not of 2 or 3 dimensions!!!')
err = 'Array dimensions: {dims}'.format(dims=ndims)
raise IndexError(err)
# If we have a geobox, then retrieve the geotransform and projection
if geobox is not None:
transform = geobox.transform
projection = geobox.crs.ExportToWkt()
else:
transform = None
projection = None
# override the driver if we are creating a cogtif
if cogtif:
driver = 'GTiff'
# compression predictor choices
predictor = {'int8': 2,
'uint8': 2,
'int16': 2,
'uint16': 2,
'int32': 2,
'uint32': 2,
'int64': 2,
'uint64': 2,
'float32': 3,
'float64': 3}
kwargs = {'count': bands,
'width': samples,
'height': lines,
'crs': projection,
'transform': transform,
'dtype': dtype,
'driver': driver,
'nodata': nodata,
'predictor': predictor[dtype]}
if isinstance(array, h5py.Dataset):
# TODO: if array is 3D get x & y chunks
if array.chunks[1] == array.shape[1]:
# GDAL doesn't like tiled or blocksize options to be set
# the same length as the columns (probably true for rows as well)
array = array[:]
else:
y_tile, x_tile = array.chunks
tiles = generate_tiles(samples, lines, x_tile, y_tile)
# add blocksizes to the creation keywords
kwargs['tiled'] = 'yes'
kwargs['blockxsize'] = x_tile
kwargs['blockysize'] = y_tile
# the user can override any derived blocksizes by supplying `options`
if options is not None:
for key in options:
kwargs[key] = options[key]
with tempfile.TemporaryDirectory() as tmpdir:
out_fname = pjoin(tmpdir, basename(filename)) if cogtif else filename
with rasterio.open(out_fname, 'w', **kwargs) as outds:
if bands == 1:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
outds.write(array[idx], 1, window=tile)
else:
outds.write(array, 1)
else:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
subs = array[:, idx[0], idx[1]]
for i in range(bands):
outds.write(subs[i], i + 1, window=tile)
else:
for i in range(bands):
outds.write(array[i], i + 1)
if tags is not None:
outds.update_tags(**tags)
# overviews/pyramids
if cogtif:
if levels is None:
levels = [2, 4, 8, 16, 32]
outds.build_overviews(levels, resampling)
if cogtif:
cmd = ['gdal_translate',
'-co',
'TILED=YES',
'-co',
'COPY_SRC_OVERVIEWS=YES',
'-co',
'{}={}'.format('PREDICTOR', predictor[dtype])]
for key, value in options.items():
cmd.extend(['-co', '{}={}'.format(key, value)])
cmd.extend([out_fname, filename])
subprocess.check_call(cmd, cwd=dirname(filename))
def read_subset(fname, ul_xy, ur_xy, lr_xy, ll_xy, bands=1):
"""
Return a 2D or 3D NumPy array subsetted to the given bounding
extents.
:param fname:
A string containing the full file pathname to an image on
disk.
:param ul_xy:
A tuple containing the Upper Left (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param ur_xy:
A tuple containing the Upper Right (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param lr_xy:
A tuple containing the Lower Right (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param ll_xy:
A tuple containing the Lower Left (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param bands:
Can be an integer of list of integers representing the band(s)
to be read from disk. If bands is a list, then the returned
subset will be 3D, otherwise the subset will be strictly 2D.
:return:
A tuple of 3 elements:
* 1. 2D or 3D NumPy array containing the image subset.
* 2. A list of length 6 containing the GDAL geotransform.
* 3. A WKT formatted string representing the co-ordinate
reference system (projection).
:additional notes:
The ending array co-ordinates are increased by +1,
i.e. xend = 270 + 1
to account for Python's [inclusive, exclusive) index notation.
"""
if isinstance(fname, h5py.Dataset):
geobox = GriddedGeoBox.from_dataset(fname)
prj = fname.attrs['crs_wkt']
else:
# Open the file
with rasterio.open(fname) as src:
# Get the inverse transform of the affine co-ordinate reference
geobox = GriddedGeoBox.from_dataset(src)
prj = src.crs.wkt # rasterio returns a unicode
inv = ~geobox.transform
rows, cols = geobox.shape
# Convert each map co-ordinate to image/array co-ordinates
img_ul_x, img_ul_y = [int(v) for v in inv * ul_xy]
img_ur_x, img_ur_y = [int(v) for v in inv * ur_xy]
img_lr_x, img_lr_y = [int(v) for v in inv * lr_xy]
img_ll_x, img_ll_y = [int(v) for v in inv * ll_xy]
# Calculate the min and max array extents
# The ending array extents have +1 to account for Python's
# [inclusive, exclusive) index notation.
xstart = min(img_ul_x, img_ll_x)
ystart = min(img_ul_y, img_ur_y)
xend = max(img_ur_x, img_lr_x) + 1
yend = max(img_ll_y, img_lr_y) + 1
# Check for out of bounds
if (((xstart < 0) or (ystart < 0)) or
((xend -1 > cols) or (yend -1 > rows))):
msg = ("Error! Attempt to read a subset that is outside of the"
"image domain. Index: ({ys}, {ye}), ({xs}, {xe}))")
msg = msg.format(ys=ystart, ye=yend, xs=xstart, xe=xend)
raise IndexError(msg)
if isinstance(fname, h5py.Dataset):
subs = fname[ystart:yend, xstart:xend]
else:
with rasterio.open(fname) as src:
subs = src.read(bands, window=((ystart, yend), (xstart, xend)))
# Get the new UL co-ordinates of the array
ul_x, ul_y = geobox.transform * (xstart, ystart)
geobox_subs = GriddedGeoBox(shape=subs.shape, origin=(ul_x, ul_y),
pixelsize=geobox.pixelsize, crs=prj)
return (subs, geobox_subs)
def reproject_file_to_array(src_filename, src_band=1, dst_geobox=None,
resampling=Resampling.nearest):
"""
Given an image on file, reproject to the desired coordinate
reference system.
:param src_filename:
A string containing the full file path name to the source
image on disk.
:param src_band:
An integer representing the band number to be reprojected.
Default is 1, the 1st band.
:param dst_geobox:
An instance of a GriddedGeoBox object containing the
destination parameters such as origin, affine, projection,
and array dimensions.
:param resampling:
An integer representing the resampling method to be used.
check rasterio.warp.RESMPLING for more details.
Default is 0, nearest neighbour resampling.
:return:
A NumPy array containing the reprojected result.
"""
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
with rasterio.open(src_filename) as src:
# Define a rasterio band
rio_band = rasterio.band(src, src_band)
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src.dtypes[0])
# Get the rasterio proj4 styled dict
prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
reproject(rio_band, dst_arr, dst_transform=dst_geobox.transform,
dst_crs=prj, resampling=resampling)
return dst_arr
def reproject_img_to_img(src_img, src_geobox, dst_geobox,
resampling=Resampling.nearest):
"""
Reprojects an image/array to the desired co-ordinate reference system.
:param src_img:
A NumPy array containing the source image.
:param src_geobox:
An instance of a GriddedGeoBox object containing the
source parameters such as origin, affine, projection.
:param dst_geobox:
An instance of a GriddedGeoBox object containing the
destination parameters such as origin, affine, projection,
and array dimensions.
:param resampling:
An integer representing the resampling method to be used.
check rasterio.warp.RESMPLING for more details.
Default is 0, nearest neighbour resampling.
:return:
A NumPy array containing the reprojected result.
"""
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
if not isinstance(src_geobox, GriddedGeoBox):
msg = 'src_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(src_geobox))
raise TypeError(msg)
# Get the source and destination projections in Proj4 styled dicts
src_prj = CRS.from_string(src_geobox.crs.ExportToProj4())
dst_prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
# Get the source and destination transforms
src_trans = src_geobox.transform
dst_trans = dst_geobox.transform
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src_img.dtype)
reproject(src_img, dst_arr, src_transform=src_trans,
src_crs=src_prj, dst_transform=dst_trans, dst_crs=dst_prj,
resampling=resampling)
return dst_arr
def as_array(array, dtype, transpose=False):
"""
Given an array and dtype, array will be converted to dtype if
and only if array.dtype != dtype. If transpose is set to True
then array will be transposed before returning.
:param array:
A NumPy array.
:param dtype:
The type to return the array as.
:type dtype:
A NumPy data type (e.g. ``numpy.float32``).
:param transpose:
If set then array will be transposed before returning.
Useful for passing arrays into Fortran routiines. Default is
False.
:type transpose:
Bool.
:return:
A :py:class:`numpy.ndarry` of type ``dtype`` with the same
dimensions as array.
"""
if array.dtype != dtype:
if transpose:
return array.astype(dtype).transpose()
return array.astype(dtype)
if transpose:
return array.transpose()
return array
|
StarcoderdataPython
|
1615004
|
<filename>dnd.py
#!/usr/bin/env python3
import argparse
import json
from helpers.spells import *
from helpers.classes import *
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--dndclass', help="Get all spells for a certain class")
parser.add_argument('-l', '--level', help="Spell level", type=int)
parser.add_argument('-s', '--spell', help="Get a specific spell")
parser.add_argument('-o', '--output', help="Output file")
return parser.parse_args()
def main():
arguments = get_args()
if arguments.spell is not None:
# Get description of a specific spell
spell_info = get_spell_info_by_name(arguments.spell.lower())
print(json.dumps(spell_info, indent=2, sort_keys=False))
if arguments.output is not None:
with open(arguments.output, "w") as fp:
json.dump(spell_info, fp, indent=2, sort_keys=False)
exit(0)
if arguments.dndclass is not None:
spell_list = get_class_spells(arguments.dndclass, arguments.level)
print(json.dumps(spell_list, indent=2, sort_keys=True))
if arguments.output is not None:
with open(arguments.output, "w") as fp:
json.dump({'spells': spell_list}, fp, indent=2, sort_keys=False)
exit(0)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1707374
|
import logging
import os
import requests
import json
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext
from texts import get_manual_do_membro
TOKEN = os.environ['TELEGRAM_TOKEN']
PORT = int(os.environ.get("PORT", "8443"))
HEROKU_APP_NAME = os.environ.get("bot-telegram-turing")
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# /help
def send_help(update, context):
update.message.reply_text("Comandos: /quant; /nlp; /cv; /rl; /ds; /gpt2; /qa")
# Descrição das Areas de foco
# /quant
def send_quant_describe(update, context):
update.message.reply_text('Essa área de foco tem como principal objetivo estudar as aplicações de programação no mercado financeiro, através de determinadas plataformas. Buscamos, juntos, aprender tanto sobre mercado financeiro quanto sobre aplicações e métodos quantitativos utilizados nesse mercado.')
# /nlp
def send_nlp_describe(update, context):
update.message.reply_text('Processamento de Linguagem Natural é uma área da inteligência artificial cujo objetivo é a interpretação e manipulação de linguagens humanas. NLP tem muitas tarefas, algumas se relacionam ao processamento mais imediato dos componentes linguísticos, como a análise sintática, morfossintática (POS Tagging), lematização etc.')
# /cv
def send_cv_describe(update, context):
update.message.reply_text('Em Computer Vision (ou Visão Computacional) trabalhamos principalmente com o processamento de imagens.')
# /rl
def send_rl_describe(update, context):
update.message.reply_text('O Aprendizado por Reforço é uma das áreas mais únicas do Aprendizado de Máquina, fundamentada em ensinar a um agente como agir em um ambiente a partir de suas experiências.')
# /ds
def send_ds_describe(update, context):
update.message.reply_text('Data Science (ou Ciência de Dados) é sobre obter insights ou conhecimento através do estudo e análise de dados')
# /start
def send_welcome(update, context):
update.message.reply_text("Salve, Salve Grupo Turing!")
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def gpt2_reply(update, context):
GPT2_API_URL = "https://api-inference.huggingface.co/models/pierreguillou/gpt2-small-portuguese"
payload_input_text = json.dumps(update.message.text)
response = requests.post(GPT2_API_URL, payload_input_text)
text = str(response.json()[0])[20:-2]
update.message.reply_text(text)
def turing_qa(update, context):
API_URL = "https://api-inference.huggingface.co/models/mrm8488/bert-base-portuguese-cased-finetuned-squad-v1-pt"
payload = json.dumps({
"context": get_manual_do_membro(),
"question": update.message.text
})
response = requests.post(API_URL, payload)
update.message.reply_text(response.json()['answer'])
def main():
logger.info("Bot started")
updater = Updater(
TOKEN, use_context=True)
dp = updater.dispatcher
logger.info("working till here")
dp.add_handler(CommandHandler("start", send_welcome))
dp.add_handler(CommandHandler("help", send_help))
dp.add_handler(CommandHandler("quant", send_quant_describe))
dp.add_handler(CommandHandler("nlp", send_nlp_describe))
dp.add_handler(CommandHandler("ds", send_ds_describe))
dp.add_handler(CommandHandler("cv", send_cv_describe))
dp.add_handler(CommandHandler("rl", send_rl_describe))
dp.add_handler(CommandHandler("gpt2", gpt2_reply))
dp.add_handler(CommandHandler("qa", turing_qa))
dp.add_error_handler(error)
updater.start_webhook(listen="0.0.0.0",
port=PORT,
url_path=TOKEN)
updater.bot.set_webhook("https://bot-telegram-turing.herokuapp.com/" + TOKEN)
logger.info("Listening for messages...")
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3343104
|
from docsie_universal_importer.providers.base import StorageTreeView, ImporterView
from .import_provider import BitbucketProvider
storage_view = StorageTreeView.provider_view(BitbucketProvider)
importer_view = ImporterView.provider_view(BitbucketProvider)
|
StarcoderdataPython
|
1688471
|
from gna.ui import basecmd
from gna.env import env
import ROOT
import numpy as np
import scipy.misc
from scipy.stats import poisson
import gna.constructors as C
class cmd(basecmd):
@classmethod
def initparser(cls, parser, env):
parser.add_argument('--name', required=True)
parser.add_argument('--Emin', default=0, type=float)
parser.add_argument('--Emax', default=5, type=float)
parser.add_argument('--nbins', default=70, type=int)
parser.add_argument('--order', default=8)
parser.add_argument('--PoissonMean', default=0.2, type=float)
parser.add_argument('--PoissonOrder', default=4, type=int)
def init(self):
ns = env.ns(self.opts.name)
ns.reqparameter('BackgroundRate', central=0, sigma=0.1)
ns.reqparameter('Mu', central=1, sigma=1)
ns.reqparameter('E0', central=2, sigma=0.05)
ns.reqparameter('Width', central=0.2, sigma=0.005)
edges = np.linspace(self.opts.Emin, self.opts.Emax, self.opts.nbins+1)
orders = np.array([self.opts.order]*(len(edges)-1), dtype=int)
integrator = ROOT.GaussLegendre(edges, orders, len(orders))
hist = ROOT.GaussLegendreHist(integrator)
signal = ROOT.Sum()
n = self.opts.PoissonOrder
model = {}
#ff = np.arange(1,n+1)
#ff = 1/scipy.misc.factorial(ff)*np.exp(-self.opts.PoissionMean)
#ff_points = C.Points(ff)
#print(ff, ff_points)
with ns:
for i in range(1, n+1):
print(i, n)
model[i] = ROOT.GaussianPeakWithBackground(i)
model[i].rate.E(integrator.points.x)
# print(model[i].rate,model[i].rate.rate,ff_points[i])
prod = ROOT.Product()
prod.multiply(model[i].rate.rate)
poisson_factor = poisson.pmf(i, self.opts.PoissonMean)
poisson_factor_prod = C.Points([poisson_factor])
print(type(model[i].rate), poisson_factor, poisson_factor_prod)
prod.multiply(poisson_factor_prod)
signal.add(prod)
hist.hist.f(signal)
ns.addobservable('spectrum', hist.hist)
|
StarcoderdataPython
|
3256085
|
<reponame>ZRHonor/pytorch_resnet_cifar10<gh_stars>0
from torch.nn import CrossEntropyLoss
from torch import nn
import numpy as np
import torch
class CBCELoss(nn.Module):
def __init__(self, lt_factor, num_classes) -> None:
super(CBCELoss).__init__()
weight = np.linspace(1/lt_factor, 1, num=num_classes)
self.ce_loss = CrossEntropyLoss(weight=torch.tensor(weight))
def forward(self, input, target):
return self.ce_loss(input, target)
class GHMLoss(nn.Module):
# TODO complete GHMloss
def __init__(self, num_classes) -> None:
super(GHMLoss).__init__()
pass
def forward(self, input, target):
return 0
class GroupGHMLoss(nn.Module):
# TODO complete GGHMloss
def __init__(self) -> None:
super(GroupGHMLoss).__init__()
pass
def forward(self, input, target):
return 0
def get_loss_fn(loss_fn, lt_factor, num_classes):
if loss_fn == 'CrossEntropyLoss':
return eval(loss_fn)().cuda()
return 0
|
StarcoderdataPython
|
1684176
|
<filename>lib/galaxy/model/migrate/versions/0080_quota_tables.py
"""
Migration script to create tables for disk quotas.
"""
from __future__ import print_function
import datetime
import logging
from sqlalchemy import BigInteger, Boolean, Column, DateTime, ForeignKey, Integer, MetaData, String, Table, TEXT
now = datetime.datetime.utcnow
log = logging.getLogger(__name__)
metadata = MetaData()
# Tables to add
Quota_table = Table("quota", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("description", TEXT),
Column("bytes", BigInteger),
Column("operation", String(8)),
Column("deleted", Boolean, index=True, default=False))
UserQuotaAssociation_table = Table("user_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
GroupQuotaAssociation_table = Table("group_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
DefaultQuotaAssociation_table = Table("default_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("type", String(32), index=True, unique=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True))
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print(__doc__)
metadata.reflect()
# Create quota table
try:
Quota_table.create()
except Exception:
log.exception("Creating quota table failed.")
# Create user_quota_association table
try:
UserQuotaAssociation_table.create()
except Exception:
log.exception("Creating user_quota_association table failed.")
# Create group_quota_association table
try:
GroupQuotaAssociation_table.create()
except Exception:
log.exception("Creating group_quota_association table failed.")
# Create default_quota_association table
try:
DefaultQuotaAssociation_table.create()
except Exception:
log.exception("Creating default_quota_association table failed.")
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
# Drop default_quota_association table
try:
DefaultQuotaAssociation_table.drop()
except Exception:
log.exception("Dropping default_quota_association table failed.")
# Drop group_quota_association table
try:
GroupQuotaAssociation_table.drop()
except Exception:
log.exception("Dropping group_quota_association table failed.")
# Drop user_quota_association table
try:
UserQuotaAssociation_table.drop()
except Exception:
log.exception("Dropping user_quota_association table failed.")
# Drop quota table
try:
Quota_table.drop()
except Exception:
log.exception("Dropping quota table failed.")
|
StarcoderdataPython
|
3394199
|
from .evaluator import MultiScaleEvaluator
|
StarcoderdataPython
|
72016
|
"""Arithmetic Coding
Functions for doing compression using arithmetic coding.
http://en.wikipedia.org/wiki/Arithmetic_coding
The functions and classes all need predictive models; see model.py
"""
import math
import itertools
from seq_predict import CTW
def grouper(n, iterable, fillvalue=None):
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def compress_bits(model, bits):
"""Compresses a stream of bits into another stream of bits.
Requires a prediction model.
"""
encoder = BinaryArithmeticEncoder(model)
for c in itertools.chain.from_iterable((encoder.encode(b) for b in bits)):
yield c
for c in encoder.flush():
yield c
def compress_bytes(model, bytes):
"""Compresses a stream of bytes into another steam of bytes.
Requires a prediction model.
"""
bits = ((m >> i) & 1 for m in bytes for i in range(8))
cbits = compress_bits(model, bits)
for c in (int(''.join(reversed(byte)), 2) for byte in grouper(8, (str(b) for b in cbits), '0')):
yield c
def decompress_bits(model, bits, msglen):
"""Decompresses a stream of bits into another stream of bits.
Requires the same prediction model (from its original state) that was
used for decompression and the number of bits in the message.
"""
decoder = BinaryArithmeticDecoder(model)
nbits = 0
for r in itertools.chain.from_iterable((decoder.decode(b) for b in bits)):
yield r
nbits += 1
for r in decoder.flush(msglen - nbits):
yield r
def decompress_bytes(model, bytes, msglen):
"""Decompresses a stream of bytes into another stream of bytes.
Requires the same prediction model (from its original state) that was
used for decompression and the number of bytes in the message.
"""
cbits = ((m >> i) & 1 for m in bytes for i in range(8))
bits = decompress_bits(model, cbits, msglen * 8)
for r in (int(''.join(reversed(byte)), 2) for byte in grouper(8, (str(b) for b in bits), '0')):
yield r
class BinaryArithmeticEncoder:
"""BinaryArithmeticEncoder
An arithmetic encoder for binary data sources. For the theory behind the encoder
see http://en.wikipedia.org/wiki/Arithmetic_coding.
>>> encoder = BinaryArithmeticEncoder(CTW(8))
See also: BinaryArithmeticDecoder, compress, and compress_bytes
"""
def __init__(self, model, num_bits = 32):
self.model = model
self.num_bits = num_bits
self._top = 2 ** self.num_bits
self._half = self._top // 2 # [0, self._half) is outputs the zero bit
self._1_4 = self._half // 2
self._3_4 = self._top - self._1_4
self.low = 0 # Interval is [self.low, self.high)
self.high = self._top
self.follow_bits = 0 # Opposing bits to follow the next output'd bit
self.history = []
def encode(self, symbol):
"""Encodes a symbol returning a sequence of coded bits.
The encoder is stateful and (since it is hopefully compressing the input) it will not
return output bits for each input symbol.
You will need to flush the encoder to get remaining coded bits after encoding the
complete sequence.
"""
output = []
# Find the split point
p_zero = self.model.predict(0, self.history)
split = self.low + max(1, int((self.high - self.low) * p_zero)) # 0-interval is [self.low, split)
# Update the model
self.model.update(symbol, self.history)
self.history.append(symbol)
# Update the range based on the observed symbol
if symbol:
self.low = split
else:
self.high = split
# If the range no longer overlaps the midpoint, the next bit is known
# also rescale the interval to get back precision
#
# If the range overlaps the midpoint but not the 1/4 or 3/4 points then
# we rescale the interval, but track this with follow bits. If the next
# bit to output is a 1, then we already know it's at the low end of the upper
# half, so we follow with a 0. Similarly if the next bit is a 0, then
# we already know it's at the high end of the lower half, so we follow
# with a 1.
# If this happens a second time before outputting any bit, then there will
# need to be 2 of these follow bits. So we track this by just incrementing
# a follow bit counter.
#
# This is in a loop because the new range may not overlap the new midpoint,
# allowing multiple bits to be determined
output = []
while True:
if self.high <= self._half:
output.append(0)
output.extend([1] * self.follow_bits) # Add the follow bits
self.follow_bits = 0
elif self.low >= self._half:
output.append(1)
output.extend([0] * self.follow_bits) # Add the follow bits
self.follow_bits = 0
self.low -= self._half
self.high -= self._half
elif self.low >= self._1_4 and self.high <= self._3_4:
self.follow_bits += 1
self.low -= self._1_4
self.high -= self._1_4
else:
break
self.low *= 2
self.high *= 2
return output
def flush(self):
"""Flushes any coded bits in the encoder. Typically called after the entire
sequence has been encoded.
"""
if self.low < self._1_4:
output = [0] + [1] * (self.follow_bits + 1)
else:
output = [1] + [0] * (self.follow_bits + 1)
return output
class BinaryArithmeticDecoder:
def __init__(self, model, num_bits = 32):
self.model = model
self.num_bits = num_bits
self._top = 2 ** self.num_bits
self._half = self._top // 2 # [0, self._half) outputs the zero bit
self._1_4 = self._half // 2
self._3_4 = self._top - self._1_4
self.low = 0
self.high = 1 # This ensures num_bits are read before decoding
self.value = 0
self.history = []
def decode(self, bit):
if self.high <= self._half:
pass
elif self.low >= self._half:
self.value -= self._half
self.low -= self._half
self.high -= self._half
elif self.low >= self._1_4 and self.high <= self._3_4:
self.value -= self._1_4
self.low -= self._1_4
self.high -= self._1_4
self.low *= 2
self.high *= 2
self.value *= 2
self.value += bit
output = []
while (self.low < self._half < self.high and
((self.low < self._1_4 or self.high > self._3_4))):
p_zero = self.model.predict(0, self.history)
split = self.low + max(1, int((self.high - self.low) * p_zero)) # 0-interval is [self.low, split)
symbol = 0 if self.value < split else 1
output.append(symbol)
self.model.update(symbol, self.history)
self.history.append(symbol)
if symbol:
self.low = split
else:
self.high = split
return output
def flush(self, nbits):
output = []
while len(output) < nbits:
output += self.decode(0)
return output[:nbits]
|
StarcoderdataPython
|
3334109
|
import re
from textwrap import dedent, indent
import click as cl
def run_file(text):
"""Evaluate a Bayes file."""
global file_vars, env
file_vars = {}
env = {"__builtins__": {}}
# extract code chunks from text, combine, and sort by position
chunks = extract_chunks(text)
hypotheses = {}
# as we insert values, the string will get longer, so we must keep track
# of how much we've shifted it
offset = 0
# Evaluate each chunk
for match in chunks:
chunk = dedent(match.group(1))
if chunk.startswith("@"): # block
new = run_block(chunk, hypotheses)
new = indent(new, " "*4)
else: # inline
new = run_inline(chunk)
# insert evaluated chunk back into text
start, end = match.span(1)
text = text[:start+offset] + new + text[end+offset:]
offset += len(dedent(new)) - len(chunk)
return text, hypotheses
def run_inline(chunk):
"""Evaluate a chunk of inline code."""
sides = chunk.split("=")
if len(sides) > 1:
expr = sides[1]
else:
expr = sides[0]
value = eval(expr, env, file_vars)
# save value
if len(sides) > 1:
varname = sides[0].strip()
file_vars[varname] = value
if is_num(expr):
return chunk
else:
return f"{chunk} [{value}]"
def run_block(chunk, hypotheses):
"""Evaluate a Bayes block."""
first, *lines, last = chunk.split("\n")
kind = first.split(":", 1)[0]
if kind == "@priors":
new_lines = run_priors(lines, hypotheses)
elif kind == "@evidence":
new_lines = run_evidence(first, lines, hypotheses)
else:
raise cl.UsageError(f"Unknown block type '{kind[1:]}.'")
new_lines = "\n".join(new_lines)
return f"{first}\n{new_lines}\n{last}"
def run_priors(lines, hypotheses):
"""Evaluate a priors block."""
data = {}
for line in lines:
name, expr = line.split(":")
prior = eval(expr, env, file_vars)
data[line] = prior
hypotheses[name] = [prior]
norm = normalize(hypotheses)
for i, line in enumerate(lines):
if is_num(expr) and norm == 1: continue
lines[i] = f"{line} [{data[line] / norm:.6f}]"
return lines
def run_evidence(first, lines, hypotheses):
"""Evaluate an evidence block."""
data = {}
for line in lines:
name, expr = line.split(":")
if not name in hypotheses:
raise cl.UsageError(f"Unknown hypothesis: '{name}.'")
likelihood = eval(expr, env, file_vars)
# apply Bayes' rule
prior = hypotheses[name][-1]
posterior = prior * likelihood
data[line] = [prior, likelihood, posterior]
hypotheses[name].append(posterior)
norm_post = normalize(hypotheses)
norm_like = sum([l for _, l, _ in data.values()])
for i, line in enumerate(lines):
lines[i] = (f"{line} [{data[line][0]:.6f} "
f"=={data[line][1] / norm_like:.6f}==> "
f"{data[line][2] / norm_post:.6f}]")
return lines
def normalize(hypotheses, index=-1):
"""Normalize probabilities."""
total = 0
for probs in hypotheses.values():
total += probs[index]
for probs in hypotheses.values():
probs[index] /= total
return total
def clean_text(text):
"""Clear out old evaluations and results from file."""
text = re.sub(r" ?\[[-+]?\d*\.?\d*\]", "", text)
text = re.sub("\[([+-]?\d+.?\d*) ==([+-]?\d+.?\d*)==> ([+-]?\d+.?\d*)\]", "", text)
return text
def extract_chunks(text):
"""Extract chunks of code from Bayes file."""
inline = re.finditer(r"`([^`]+)`", text)
blocks = re.finditer(r"\n((?:^(?: | *\t)+.*\n)+)", text, re.M)
chunks = [*inline] + [*blocks]
chunks.sort(key=lambda m: m.start())
return chunks
def is_num(text):
"""Check if text can be parsed as a number."""
try:
float(text)
except:
return False
else:
return True
|
StarcoderdataPython
|
10950
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import DEPS
CONFIG_CTX = DEPS['gclient'].CONFIG_CTX
ChromiumGitURL = DEPS['gclient'].config.ChromiumGitURL
@CONFIG_CTX()
def v8(c):
soln = c.solutions.add()
soln.name = 'v8'
soln.url = ChromiumGitURL(c, 'v8', 'v8')
c.got_revision_reverse_mapping['got_revision'] = 'v8'
# Needed to get the testers to properly sync the right revision.
# TODO(infra): Upload full buildspecs for every build to isolate and then use
# them instead of this gclient garbage.
c.parent_got_revision_mapping['parent_got_revision'] = 'got_revision'
p = c.patch_projects
p['icu'] = ('v8/third_party/icu', 'HEAD')
@CONFIG_CTX(includes=['v8'])
def dynamorio(c):
soln = c.solutions.add()
soln.name = 'dynamorio'
soln.url = ChromiumGitURL(c, 'external', 'dynamorio')
@CONFIG_CTX(includes=['v8'])
def llvm_compiler_rt(c):
c.solutions[0].custom_deps['v8/third_party/llvm/projects/compiler-rt'] = (
ChromiumGitURL(c, 'external', 'llvm.org', 'compiler-rt'))
@CONFIG_CTX()
def node_js(c):
soln = c.solutions.add()
soln.name = 'node.js'
soln.url = ChromiumGitURL(c, 'external', 'github.com', 'v8', 'node')
soln.revision = 'vee-eight-lkgr:HEAD'
c.got_revision_reverse_mapping['got_node_js_revision'] = soln.name
@CONFIG_CTX(includes=['v8'])
def v8_valgrind(c):
c.solutions[0].custom_deps['v8/third_party/valgrind'] = (
ChromiumGitURL(c, 'chromium', 'deps', 'valgrind', 'binaries'))
|
StarcoderdataPython
|
128401
|
<gh_stars>1-10
import os
import subprocess
from nmigen.build import *
from nmigen.vendor.lattice_ecp5 import *
from nmigen_boards.resources import *
from ..gateware.ft600 import FT600Resource
__all__ = ["KilsythPlatform"]
class KilsythPlatform(LatticeECP5Platform):
device = "LFE5U-45F"
package = "BG381"
speed = 6
default_clk = "clk16"
resources = [
Resource("clk16", 0, Pins("G3", dir="i"),
Clock(16e6), Attrs(GLOBAL=True, IO_TYPE="LVCMOS33")),
*LEDResources(pins="A9 B9 B10 A10 A11 C10 B11 C11", invert=False, attrs=Attrs(IO_TYPE="LVCMOS33")),
FT600Resource(0,
clk="H2",
data="P4 P3 P2 P1 N4 N3 N2 N1 M3 M1 L3 L2 L1 K4 K3 K2",
be="K1 J5",
rd_n="M4",
wr_n="J1",
gpio1="G5",
txe_n="J4",
rxf_n="J3",
oe_n="H1",
)
]
connectors = []
def toolchain_program(self, products, name):
openocd = os.environ.get("OPENOCD", "openocd")
interface = os.environ.get("INTERFACE", "SiPEED")
if interface == "SiPEED" or interface == "busblaster":
if interface == "SiPEED":
args = ["-c", """
interface ftdi
ftdi_vid_pid 0x0403 0x6010
ftdi_layout_init 0x0018 0x05fb
ftdi_layout_signal nSRST -data 0x0010
"""]
elif interface == "busblaster":
args = ["-f", "interface/ftdi/dp_busblaster.cfg"]
with products.extract("{}.svf".format(name)) as vector_filename:
subprocess.check_call([openocd,
*args,
"-c", "transport select jtag; adapter_khz 10000; init; svf -quiet {}; exit".format(vector_filename)
])
else:
raise Exception("Unsupported interface")
|
StarcoderdataPython
|
4803505
|
<filename>src/api/filters/FilterScopes.py
class FilterScopes:
filter_scopes = {
'user:read': ['name', 'email'],
'manager': ['name', 'email', 'password']
}
def filter(self, response):
if 'error' in response:
return response
token = self.get_token()
scopes = token['scopes'].split(',')
filtered_response = {}
for scope in scopes:
if scope in self.filter_scopes:
if isinstance(response, dict):
return self.filter_dict(response, scope)
elif isinstance(response, list):
return self.filter_list(response, scope)
return filtered_response
def filter_dict(self, response, scope):
filtered_response = {}
for key, value in response.items():
if key in self.filter_scopes[scope]:
filtered_response.update({key: value})
return filtered_response
def filter_list(self, response, scope):
filtered_response = {}
for elem in response:
for key, value in elem.items():
if key in self.filter_scopes[scope]:
filtered_response.update({key: value})
return filtered_response
|
StarcoderdataPython
|
3276066
|
<reponame>RoastVeg/cports
pkgname = "xbitmaps"
pkgver = "1.1.2"
pkgrel = 0
build_style = "gnu_configure"
hostmakedepends = ["pkgconf"]
pkgdesc = "Common X11 bitmaps"
maintainer = "q66 <<EMAIL>>"
license = "MIT"
url = "https://xorg.freedesktop.org"
source = f"$(XORG_SITE)/data/{pkgname}-{pkgver}.tar.bz2"
sha256 = "b9f0c71563125937776c8f1f25174ae9685314cbd130fb4c2efce811981e07ee"
def post_install(self):
self.install_license("COPYING")
|
StarcoderdataPython
|
3314917
|
<filename>multiband_melgan/utils.py
def repeat(iterable):
while True:
for x in iterable:
yield x
|
StarcoderdataPython
|
114019
|
<reponame>damnit/pymite
# -*- coding: utf-8 -*-
# File: test_daily_adapter.py
""" daily adapter test module. """
import urllib.request
from .conftest import mock_urlopen, _get_url
from pymite.adapters import Daily
def test_daily_setup(libfactory):
""" Test tracker setup. """
factory = libfactory.daily_adapter
assert factory is not None
daily = Daily(factory.realm, factory.apikey)
assert daily.adapter == 'daily'
def test_daily_at(monkeypatch, libfactory):
daily = libfactory.daily_adapter
at_data = [
{'time_entry': {}},
{'time_entry': {}}
]
urlopen_at = mock_urlopen(at_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_at)
at = daily.at('2015', '2', '2')
assert at == list(map(lambda x: x['time_entry'], at_data))
assert len(at) == len(at_data)
def test_daily_at_url(monkeypatch, libfactory):
d = libfactory.daily_adapter
monkeypatch.setattr(Daily, '_get', _get_url('time_entry'))
url = d.at(2015, 2, 2)['api']
assert url == 'https://foo.mite.yo.lk/daily/2015/2/2.json'
url = d.at(2015, '2', '02')['api']
assert url == 'https://foo.mite.yo.lk/daily/2015/2/2.json'
def test_daily_today(monkeypatch, libfactory):
daily = libfactory.daily_adapter
today_data = [
{'time_entry': {}},
{'time_entry': {}}
]
urlopen_today = mock_urlopen(today_data)
monkeypatch.setattr(urllib.request, 'urlopen', urlopen_today)
at = daily.today()
assert at == list(map(lambda x: x['time_entry'], today_data))
assert len(at) == len(today_data)
def test_daily_today_url(monkeypatch, libfactory):
d = libfactory.daily_adapter
monkeypatch.setattr(Daily, '_get', _get_url('time_entry'))
url = d.today()['api']
assert url == 'https://foo.mite.yo.lk/daily.json'
# vim: set ft=python ts=4 sw=4 expandtab :
|
StarcoderdataPython
|
1681566
|
<filename>nahamconctf/2021/ngrocket/client.py
#!/usr/bin/env python3
import time
from pwn import *
target = b'HOST:PORT'
while True:
conn = remote('challenge.nahamcon.com', 0)
conn.recv()
conn.recv()
conn.send(target)
data = conn.recvuntil(b'ngrocket')
print(data)
conn.close()
time.sleep(0.1)
local = remote('localhost', 4444)
local.send(data)
local.close()
|
StarcoderdataPython
|
3253558
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-08-30 13:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('frontend', '0024_measure_tags'),
('frontend', '0028_prescription_net_cost'),
]
operations = [
]
|
StarcoderdataPython
|
3245653
|
<gh_stars>1-10
from enum import Enum
from json import loads
from typing import List, Dict
import numpy as np
from tdw.tdw_utils import TDWUtils
from tdw.output_data import OutputData, Bounds, Raycast, SegmentationColors, Magnebot
from magnebot.arm import Arm
from magnebot.util import get_data
from magnebot.ik.orientation_mode import OrientationMode
from magnebot.ik.target_orientation import TargetOrientation
from magnebot.action_status import ActionStatus
from magnebot.magnebot_static import MagnebotStatic
from magnebot.magnebot_dynamic import MagnebotDynamic
from magnebot.image_frequency import ImageFrequency
from magnebot.actions.ik_motion import IKMotion
from magnebot.paths import CONVEX_SIDES_PATH
class _GraspStatus(Enum):
getting_bounds = 1,
spherecasting = 2
raycasting = 4
grasping = 8
class Grasp(IKMotion):
"""
Try to grasp a target object.
The action ends when either the Magnebot grasps the object, can't grasp it, or fails arm articulation.
"""
# A list of indices of convex sides per object. See: `_BOUNDS_SIDES`.
_CONVEX_SIDES: Dict[str, List[int]] = loads(CONVEX_SIDES_PATH.read_text(encoding="utf-8"))
# The order of bounds sides. The values in `_CONVEX_SIDES` correspond to indices in this list.
_BOUNDS_SIDES: List[str] = ["left", "right", "front", "back", "top", "bottom"]
def __init__(self, target: int, arm: Arm, orientation_mode: OrientationMode, target_orientation: TargetOrientation,
dynamic: MagnebotDynamic):
"""
:param target: The ID of the target object.
:param arm: [The arm used for this action.](../arm.md)
:param orientation_mode: [The orientation mode.](../ik/orientation_mode.md)
:param target_orientation: [The target orientation.](../ik/target_orientation.md)
:param dynamic: [The dynamic Magnebot data.](../magnebot_dynamic.md)
"""
super().__init__(arm=arm, orientation_mode=orientation_mode, target_orientation=target_orientation,
dynamic=dynamic)
self._target: int = target
self._grasp_status: _GraspStatus = _GraspStatus.getting_bounds
self._target_bounds: Dict[str, np.array] = dict()
self._target_name: str = ""
self._target_position: np.array = np.array([0, 0, 0])
def get_initialization_commands(self, resp: List[bytes], static: MagnebotStatic, dynamic: MagnebotDynamic,
image_frequency: ImageFrequency) -> List[dict]:
# This Magnebot is already holding the object.
if self._target in dynamic.held[Arm.left] or self._target in dynamic.held[Arm.right]:
self.status = ActionStatus.success
return []
# Check if another Magnebot is holding the object.
for i in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[i])
if r_id == "magn":
magnebot = Magnebot(resp[i])
if magnebot.get_id() != static.robot_id:
if self._target in magnebot.get_held_left() or self._target in magnebot.get_held_right():
self.status = ActionStatus.held_by_other
return []
commands = super().get_initialization_commands(resp=resp, static=static, dynamic=dynamic,
image_frequency=image_frequency)
commands.extend([{"$type": "set_magnet_targets",
"arm": self._arm.name,
"targets": [self._target],
"id": static.robot_id},
{"$type": "send_bounds",
"frequency": "once"},
{"$type": "send_segmentation_colors",
"frequency": "once"}])
return commands
def get_end_commands(self, resp: List[bytes], static: MagnebotStatic, dynamic: MagnebotDynamic,
image_frequency: ImageFrequency) -> List[dict]:
commands = super().get_end_commands(resp=resp, static=static, dynamic=dynamic, image_frequency=image_frequency)
commands.append({"$type": "set_magnet_targets",
"arm": self._arm.name,
"targets": [],
"id": static.robot_id})
return commands
def get_ongoing_commands(self, resp: List[bytes], static: MagnebotStatic, dynamic: MagnebotDynamic) -> List[dict]:
if self._is_success(resp=resp, static=static, dynamic=dynamic):
self.status = ActionStatus.success
return []
elif self._grasp_status == _GraspStatus.grasping:
return self._evaluate_arm_articulation(resp=resp, static=static, dynamic=dynamic)
elif self._grasp_status == _GraspStatus.getting_bounds:
# Get the segmentation color data and get the object name.
segmentation_colors = get_data(resp=resp, d_type=SegmentationColors)
for i in range(segmentation_colors.get_num()):
if segmentation_colors.get_object_id(i) == self._target:
self._target_name = segmentation_colors.get_object_name(i).lower()
break
# Get the bounds data and spherecast to the center.
bounds = get_data(resp=resp, d_type=Bounds)
for i in range(bounds.get_num()):
if bounds.get_id(i) == self._target:
self._target_bounds = {"left": np.array(bounds.get_left(i)),
"right": np.array(bounds.get_right(i)),
"front": np.array(bounds.get_front(i)),
"back": np.array(bounds.get_back(i)),
"top": np.array(bounds.get_top(i)),
"bottom": np.array(bounds.get_bottom(i)),
"center": np.array(bounds.get_center(i))}
self._grasp_status = _GraspStatus.spherecasting
return [{"$type": "send_spherecast",
"radius": 0.2,
"origin": TDWUtils.array_to_vector3(dynamic.joints[static.magnets[self._arm]].position),
"destination": TDWUtils.array_to_vector3(bounds.get_center(0)),
"id": static.robot_id}]
raise Exception(f"No bounds data: {resp}")
elif self._grasp_status == _GraspStatus.spherecasting:
magnet_position = dynamic.joints[static.magnets[self._arm]].position
# Get the nearest spherecasted point.
nearest_distance = np.inf
nearest_position = np.array([0, 0, 0])
got_raycast_point = False
for i in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[i])
if r_id == "rayc":
raycast = Raycast(resp[i])
if raycast.get_raycast_id() == static.robot_id:
# Ignore raycasts that didn't hit the target.
if not raycast.get_hit() or not raycast.get_hit_object() or raycast.get_object_id() != self._target:
continue
got_raycast_point = True
point = np.array(raycast.get_point())
raycast_distance = np.linalg.norm(point - magnet_position)
if raycast_distance < nearest_distance:
nearest_distance = raycast_distance
nearest_position = point
# We found a good target!
if got_raycast_point:
self._target_position = self._absolute_to_relative(position=nearest_position, dynamic=dynamic)
self._set_start_arm_articulation_commands(static=static, dynamic=dynamic)
self._grasp_status = _GraspStatus.grasping
return self._evaluate_arm_articulation(resp=resp, static=static, dynamic=dynamic)
# Try to get a target from cached bounds data.
else:
# If we haven't cached the bounds for this object, just return all of the sides.
if self._target_name not in Grasp._CONVEX_SIDES:
sides = list(self._target_bounds.values())[:-1]
else:
# Get only the convex sides of the object using cached data.
sides: List[np.array] = list()
for i, side in enumerate(Grasp._BOUNDS_SIDES):
if i in Grasp._CONVEX_SIDES[self._target_name]:
sides.append(self._target_bounds[side])
# If there are no valid bounds sides, aim for the center and hope for the best.
if len(sides) == 0:
self._target_position = self._absolute_to_relative(position=self._target_bounds["center"],
dynamic=dynamic)
self._set_start_arm_articulation_commands(static=static, dynamic=dynamic)
self._grasp_status = _GraspStatus.grasping
return self._evaluate_arm_articulation(resp=resp, static=static, dynamic=dynamic)
else:
# If the object is higher up than the magnet, remove the lowest side.
if self._target_bounds["center"][1] > magnet_position[1] and len(sides) > 1:
lowest: int = -1
y = np.inf
for i in range(len(sides)):
if sides[i][1] < y:
lowest = i
y = sides[i][1]
del sides[lowest]
# Get the closest side to the magnet.
nearest_side: np.array = sides[0]
d = np.inf
for side in sides:
dd = np.linalg.norm(side - magnet_position)
if dd < d:
nearest_side = side
d = dd
self._grasp_status = _GraspStatus.raycasting
return [{"$type": "send_raycast",
"origin": TDWUtils.array_to_vector3(nearest_side),
"destination": TDWUtils.array_to_vector3(self._target_bounds["center"]),
"id": static.robot_id}]
elif self._grasp_status == _GraspStatus.raycasting:
for i in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[i])
if r_id == "rayc":
raycast = Raycast(resp[i])
if raycast.get_raycast_id() == static.robot_id:
# If the raycast hit the object, aim for that point.
if raycast.get_hit() and raycast.get_hit_object() and raycast.get_object_id() == self._target:
self._target_position = self._absolute_to_relative(dynamic=dynamic,
position=np.array(raycast.get_point()))
self._set_start_arm_articulation_commands(static=static, dynamic=dynamic)
self._grasp_status = _GraspStatus.grasping
return self._evaluate_arm_articulation(resp=resp, static=static, dynamic=dynamic)
else:
self._target_position = self._absolute_to_relative(position=self._target_bounds["center"],
dynamic=dynamic)
self._set_start_arm_articulation_commands(static=static, dynamic=dynamic)
self._grasp_status = _GraspStatus.grasping
return self._evaluate_arm_articulation(resp=resp, static=static, dynamic=dynamic)
self._target_position = self._absolute_to_relative(position=self._target_bounds["center"],
dynamic=dynamic)
self._set_start_arm_articulation_commands(static=static, dynamic=dynamic)
self._grasp_status = _GraspStatus.grasping
return self._evaluate_arm_articulation(resp=resp, static=static, dynamic=dynamic)
else:
raise Exception(self._grasp_status)
def _get_fail_status(self) -> ActionStatus:
return ActionStatus.failed_to_grasp
def _is_success(self, resp: List[bytes], static: MagnebotStatic, dynamic: MagnebotDynamic) -> bool:
return self._target in dynamic.held[self._arm]
def _get_ik_target_position(self) -> np.array:
return self._target_position
|
StarcoderdataPython
|
38183
|
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binned_statistic
def var_correct(qso, rest_ranges=None, zq_range=[1.6, 4.], fit_model=True,
savefile=False):
""" Corrections to the vriance assigned to the pixels by the pipeline
Notes:
1. Scaling spectra doesn't affect this estimate
2. If the spectra changes by a lot(isn't flat), it will lead to
underestimates of eta
"""
if rest_ranges is None:
rest_ranges = [[1350, 1360], [1470, 1480]]
zq_ind = np.where((qso.zq > zq_range[0]) & (qso.zq <= zq_range[1]) & (qso.sn > 5))[0]
# restframe ranges over which to analyze
# currently this allows for only a single bin
if np.asarray(rest_ranges).ndim == 1:
raise TypeError("Please provide the ranges as two dimensional array")
lambda_mean, eta = [], []
for ranges in rest_ranges:
ind_set = (qso.wl > ranges[0]) & (qso.wl <= ranges[1])
# create local flux and ivar matrices
loc_flux = qso.flux[zq_ind[:, None], ind_set]
loc_ivar = qso.ivar[zq_ind[:, None], ind_set]
# mask to select pixels that provide information
ivar_mask = loc_ivar > 0
# sum of good pixels along each spectra where num is greater than 10
num = np.sum(ivar_mask, 1)
num_ind = num > 10
# chi-square along each spectra
# eta = N / sum((f_i - mu)^2 / sigma_i^2)
mu = np.average(loc_flux[num_ind], weights=loc_ivar[num_ind], axis=1)
chisq = np.sum((
loc_flux[num_ind] - mu[:, None]) ** 2 * loc_ivar[num_ind], axis=1)
lambda_obs = np.array((np.mat(qso.wl[ind_set]).T *
np.mat(1 + qso.zq[zq_ind][num_ind]))).T
# mean of observed wavelength spanned along each spectra
lambda_mean += list(np.average(lambda_obs, weights=ivar_mask[num_ind], axis=1))
# eta values along each spectra
eta += list(num[num_ind] / chisq)
# binned statistic with scipy
y, bin_edges, __ = binned_statistic(lambda_mean, eta,
statistic='mean', bins=100)
bin_width = (bin_edges[1] - bin_edges[0])
X = bin_edges[1:] - bin_width/2
# plot the results if specified
fig, ax = plt.subplots(1)
ax.plot(X, y, '+', color='k', markersize=8)
# fit a simple piecewise function to the data
if fit_model:
popt1 = np.polyfit(X[X < 5850], y[X < 5850], deg=1)
popt2 = np.polyfit(X[X > 5850], y[X > 5850], deg=2)
xline1 = np.linspace(3500, 5850, 100)
ax.plot(xline1, np.polyval(popt1, xline1), '-r')
xline2 = np.linspace(5850, 7500, 100)
ax.plot(xline2, np.polyval(popt2, xline2), '--r')
ax.set_xlabel(r'$\lambda_\mathrm{obs} [\mathrm{\AA}]$')
plt.show()
if savefile:
np.savetxt("var_correct.txt", list(popt1) + list(popt2))
def calibrate(wl, spec, ivar, zq, rest_range, norm_min, norm_max, savetag,
plotit=False):
""" Obtain flux calibration vector by doing optical depth analysis redwards
of Lyman-Alpha
Only the shape is estimated, the overall normalization is unconstrained
"""
# Collect relevant indices over restframe
r_ind = []
for j in range(len(rest_range)):
foo = np.where((wl > rest_range[j][0]) & (wl < rest_range[j][1]))[0]
r_ind = np.concatenate((r_ind, foo))
rInd = r_ind.astype(int)
# Obtain the corresponding data matrices
lam_obs = np.array(np.mat(wl[rInd]).T * np.mat(1 + zq)).T
cflux, civar = spec[:, rInd], ivar[:, rInd]
# Scale to the same baseline
# this will introduce addtional errors that we are neglecting
nValue = np.zeros(len(rInd))
for i in range(len(rInd)):
blah = np.where((lam_obs[:, i] > norm_min) & (lam_obs[:, i] < norm_max)
& (civar[:, i] > 0))[0]
nValue[i] = np.average(cflux[:, i][blah], weights=civar[:, i][blah])
# Scale fluxes and ivars accordingly
NormFlux = cflux / nValue
NormIvar = civar * nValue ** 2
pixObs = np.ravel(lam_obs)
pixFlux, pixIvar = np.ravel(NormFlux), np.ravel(NormIvar)
# Controls the smoothing of the results
ObsBin = np.arange(3500, 7000, 3)
# Correction vector
Cvec = np.zeros(len(ObsBin) - 1)
for k in range(len(Cvec)):
bInd = np.where((pixObs > ObsBin[k]) & (pixObs <= ObsBin[k + 1])
& (pixIvar > 0) & np.isfinite(pixFlux))[0]
if len(bInd) > 5:
Cvec[k] = np.average(pixFlux[bInd], weights=pixIvar[bInd])
Lvec = (ObsBin[1:] + ObsBin[:-1])/2.
if plotit:
plt.figure()
good = Cvec != 0
plt.plot(Lvec[good], Cvec[good], '-k', lw=0.6)
plt.xlabel(r'$\lambda_{obs}$')
plt.ylabel(r'$Correction$')
# plt.xlim(1.8 , 3)
plt.ylim(0.9, 1.1)
plt.axhline(1, c='r')
plt.show()
if savetag is not None:
np.savetxt('../Data/calibration' + savetag + '.dat',
[Lvec[good], Cvec[good]])
# EOF
|
StarcoderdataPython
|
1714904
|
<filename>scripts/dataex_list_user_preferences.py
#!/usr/bin/env python3
import json
import click
import requests
from yaspin import yaspin
from dataexclient.auth import auth
from dataexclient import auth_helper
from dataexclient.utils import check_error, check_output_format
from dataexclient.config import GET_REGION_PREFERENCE_URL, GET_POINT_PREFERENCE_URL
@click.command()
@click.option('--preference_type', '-pt' ,required=True, help='choose either region or point',type=click.Choice(['region', 'point'], case_sensitive=False))
def main(preference_type):
payload = dict()
auth_obj = auth()
cred = auth_obj.get_auth()
payload['username'] = cred['username']
if preference_type == 'region':
url = GET_REGION_PREFERENCE_URL
type = 'regions'
else:
url = GET_POINT_PREFERENCE_URL
type = 'points'
headers = {
'Content-Type': 'application/json',
'Authorization': auth_helper.get_token(),
}
with yaspin(text="Downloading...", color="yellow") as spinner:
response = requests.post(url, headers=headers, data=json.dumps(payload))
if response.status_code == 200:
data = response.json()
if check_error(data):
spinner.fail("💥 ")
else:
spinner.text = "Done"
spinner.ok("✅")
check_output_format(data[type], output_format='table')
else:
print(response.status_code)
spinner.fail("💥 ")
if __name__=='__main__':
main()
|
StarcoderdataPython
|
1656898
|
# Generated by Django 2.2.6 on 2021-04-13 11:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('control', '0039_auto_20201203_0057'),
]
operations = [
migrations.AlterUniqueTogether(
name='agentadmin',
unique_together={('ip', 'name')},
),
]
|
StarcoderdataPython
|
1687703
|
#!/usr/bin/python3
import unittest
from pypcep.pcep_parser import parse_pcep, PCEPMessageType
PCEP_OPEN_MSG = [
'0x20', '0x01', '0x00', '0x50', '0x01', '0x10', '0x00', '0x4c',
'0x20', '0x1e', '0x78', '0x01', '0x00', '0x10', '0x00', '0x04',
'0x00', '0x00', '0x01', '0xc5', '0x00', '0x18', '0x00', '0x10',
'0xfc', '0x01', '0xff', '0x00', '0x00', '0x00', '0x00', '0x00',
'0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00',
'0x00', '0x1a', '0x00', '0x04', '0x00', '0x00', '0x00', '0x0b',
'0x00', '0x65', '0x00', '0x04', '0x00', '0x00', '0x00', '0x00',
'0x00', '0x06', '0x00', '0x02', '0x00', '0x00', '0x00', '0x00',
'0x00', '0x72', '0x00', '0x04', '0x00', '0x00', '0x00', '0x02',
'0x00', '0x67', '0x00', '0x02', '0x00', '0x00', '0x00', '0x00']
PCEP_CLOSE_MSG = [
'0x20', '0x07', '0x00', '0x0c', '0x0f', '0x10', '0x00', '0x08',
'0x00', '0x00', '0x00', '0x02']
PCEP_KEEPALIVE_MSG = [
'0x20', '0x02', '0x00', '0x04']
PCEP_NOTIFICATION_MSG = [
'0x20', '0x05', '0x00', '0x0c', '0x0c', '0x10', '0x00', '0x08',
'0x00', '0x00', '0x02', '0x01']
PCEP_LSP_STATE_REPORT_MSG = [
'0x20', '0x0a', '0x00', '0x10', '0x20', '0x12', '0x00', '0x08',
'0x00', '0x00', '0x00', '0x00', '0x07', '0x10', '0x00', '0x04']
class ParsePCEPTestCase(unittest.TestCase):
def _test_bytes(self, msg):
return bytes([int(i, 16) for i in msg])
def test_parse_keepalive(self):
keepalive_msg_bytes = self._test_bytes(PCEP_KEEPALIVE_MSG)
keepalive_msg = parse_pcep(keepalive_msg_bytes)
self.assertEqual(1, keepalive_msg.header.pcep_version)
self.assertEqual(PCEPMessageType.KEEPALIVE, PCEPMessageType(keepalive_msg.header.pcep_type))
def test_parse_open(self):
open_msg_bytes = self._test_bytes(PCEP_OPEN_MSG)
open_msg = parse_pcep(open_msg_bytes)
self.assertEqual(1, open_msg.header.pcep_version)
self.assertEqual(PCEPMessageType.OPEN, PCEPMessageType(open_msg.header.pcep_type))
objs = open_msg.pcep_objs
self.assertEqual(1, len(objs))
obj = objs[0]
self.assertEqual(1, obj.obj_fields['version'])
self.assertEqual(1, obj.obj_fields['sid'])
self.assertEqual(30, obj.obj_fields['keepalive'])
self.assertEqual(120, obj.obj_fields['deadtime'])
tlvs = obj.obj_fields['tlvs']
self.assertEqual(7, len(tlvs))
def test_parse_close(self):
close_msg_bytes = self._test_bytes(PCEP_CLOSE_MSG)
close_msg = parse_pcep(close_msg_bytes)
self.assertEqual(1, close_msg.header.pcep_version)
self.assertEqual(PCEPMessageType.CLOSE, PCEPMessageType(close_msg.header.pcep_type))
self.assertEqual(2, close_msg.pcep_objs[0].obj_fields['reason'])
def test_parse_notification(self):
notification_msg_bytes = self._test_bytes(PCEP_NOTIFICATION_MSG)
notification_msg = parse_pcep(notification_msg_bytes)
self.assertEqual(1, notification_msg.header.pcep_version)
self.assertEqual(PCEPMessageType.NOTIFICATION, PCEPMessageType(notification_msg.header.pcep_type))
self.assertEqual(0, notification_msg.pcep_objs[0].obj_fields['reserved'])
def test_parse_lsp_state_report(self):
lsp_state_report_msg_bytes = self._test_bytes(PCEP_LSP_STATE_REPORT_MSG)
lsp_state_report_msg = parse_pcep(lsp_state_report_msg_bytes)
self.assertEqual(1, lsp_state_report_msg.header.pcep_version)
self.assertEqual(PCEPMessageType.LSP_STATE_REPORT, PCEPMessageType(lsp_state_report_msg.header.pcep_type))
|
StarcoderdataPython
|
1762081
|
<gh_stars>1-10
from django.contrib.auth.models import User
from django.db import models
from django_extensions.db.fields import AutoSlugField
from apps.government.models import Government
from apps.core.models import BaseData
from apps.contacts.models import Contact
from datetime import datetime
from django.utils import timezone
class AgencyManager(models.Manager):
def all_them(self):
return super(AgencyManager, self).get_query_set().filter(deprecated__isnull=True).prefetch_related("government", "creator")
def get_query_set(self):
return super(AgencyManager, self).get_query_set().filter(deprecated__isnull=True, hidden=False).prefetch_related("government", "creator")
class Agency(BaseData):
name = models.CharField(max_length=255)
slug = AutoSlugField(populate_from=('name', ), overwrite=False)
government = models.ForeignKey(Government)
contacts = models.ManyToManyField(Contact, blank=True, null=True, related_name='agency_related_contacts')
objects = AgencyManager()
creator = models.ForeignKey(User, null = True)
hidden = models.BooleanField(default = False)
pub_contact_cnt = models.IntegerField(default=0)
editor_contact_cnt = models.IntegerField(default=0)
class Meta:
verbose_name_plural = 'Agencies'
def __unicode__(self):
return self.name
def has_editable_contact(self, usr):
for contact in self.contacts.all():
if contact.creator == usr:
return True
return False
@property
def late_requests(self):
"""
How many requests have FAILED to meet their deadlines?
"""
num_late_requests = 0
for r in self.related_agencies.all():
if r.is_late_naive: num_late_requests += 1
return num_late_requests
@property
def average_time_outstanding(self):
days_late = 0
for r in self.related_agencies.all():
days_late += r.time_outstanding
return days_late
def save(self, *args, **kw):
if self.pk is not None:
self.pub_contact_cnt = self.contacts.filter(hidden=False).count()
self.editor_contact_cnt = self.contacts.all().count()
else:
self.pub_contact_cnt = 0
self.editor_contact_cnt = 0
super(Agency, self).save(*args, **kw)
|
StarcoderdataPython
|
3234805
|
<filename>ldndc2nc/extra.py
# -*- coding: utf-8 -*-
"""ldndc2nc.extra: extra module within the ldndc2nc package."""
import logging
import os
from pkg_resources import Requirement, resource_filename
import shutil
import string
import numpy as np
import param
import yaml
log = logging.getLogger(__name__)
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
def _copy_default_config():
""" copy default conf file to user dir """
#TODO somewhat redundand, merge with set_config code
fname = resource_filename(
Requirement.parse("ldndc2nc"), "ldndc2nc/data/ldndc2nc.conf")
shutil.copyfile(fname, os.path.join(
os.path.expanduser("~"), "ldndc2nc.conf"))
def _find_config():
""" look for cfgFile in the default locations """
cfgFile = None
locations = [os.curdir, os.path.expanduser("~"), "/etc/ldndc2nc",
os.environ.get("LDNDC2NC_CONF")]
locations = [x for x in locations if x is not None]
for loc in locations:
f = os.path.join(loc, "ldndc2nc.conf")
if os.path.isfile(f):
cfgFile = f
break
return cfgFile
def _parse_config(cfgFile):
""" read yaml config file and modify special properties"""
with open(cfgFile, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
for k, vs in cfg['variables'].items():
vs_new = []
for v in vs:
def is_multipart_item(x):
return ';' in x
if is_multipart_item(v):
x = v.split(';')
vs_new.append((x[0], x[1:]))
else:
vs_new.append(v)
cfg['variables'][k] = vs_new
return cfg
def parse_config(cfg, section=None):
""" parse config data structure, return data of required section """
def is_valid_section(s):
valid_sections = ['info', 'project', 'variables', 'refdata']
return s in valid_sections
cfg_data = None
if is_valid_section(section):
try:
cfg_data = cfg[section]
except KeyError:
log.critical(cfg.keys())
log.critical("Section <%s> not found in config" % section)
exit(1)
else:
log.critical("Section <%s> not a valid name" % section)
exit(1)
return cfg_data
def get_config(cfgFile=None):
""" locate and read config file """
cfg = None
locations = []
def cfgfile_exists(cfgFile):
return cfgFile != None
if cfgfile_exists(cfgFile):
if not os.path.isfile(cfgFile):
log.critical("Specified configuration file not found.")
exit(1)
else:
cfgFile = _find_config()
if not cfgfile_exists(cfgFile):
log.info("Copying config file")
_copy_default_config()
cfgFile = _find_config()
cfg = _parse_config(cfgFile)
return cfg
def set_config(cfg):
""" write cfg file to user dir """
fname = os.path.join(os.path.expanduser("~"), 'ldndc2nc.conf')
with open(fname, 'w') as f:
f.write(yaml.dump(cfg, default_flow_style=False))
class RefDataBuilder(param.Parameterized):
lonmin = param.Number(None, bounds=(-180, 180), doc="min longitude")
latmin = param.Number(None, bounds=(-90, 90), doc="min latitude")
lonmax = param.Number(None, bounds=(-180, 180), doc="max longitude")
latmax = param.Number(None, bounds=(-90, 90), doc="max latitude")
local = param.Boolean(False, doc="number ids for regional subset")
formula = param.String(default='continuous')
res = param.Number(None, bounds=(0, 5), doc="cell resolution in degrees")
i_shift = 0
j_shift = 0
def __init__(self, cfg):
_cfg = parse_config(cfg, section='refdata')
try:
self.lonmin, self.latmin, self.lonmax, self.latmax = _cfg['bbox']
except Exception as e:
log.critical(str(e))
try:
self.res = _cfg['res']
except Exception as e:
log.critical("No <res> statement in refdata")
exit(1)
try:
self.local = _cfg['local']
except Exception as e:
log.debug("No <local> statement in refdata: using 'global'")
try:
formula = _cfg['formula']
formula = self._check_formula(formula)
self.formula = formula
except:
log.debug("No <formula> statement in refdata: using 'continuous'")
# we work with cell centers
cell_half = self.res * 0.5
self.lons = np.arange(self.lonmin + cell_half, self.lonmax, self.res)
self.lats = np.arange(self.latmin + cell_half, self.latmax, self.res)
self.globlons = np.arange(-180 + cell_half, 180, self.res)
self.globlats = np.arange(-90 + cell_half, 90, self.res)
if not self.local:
# compute shift of local bbox in respect to global domain
m_lon = self._find_nearest(self.globlons, self.lons[0])
m_lat = self._find_nearest(self.globlats, self.lats[::-1][0])
self.i_shift = np.where(self.globlons == m_lon)[0]
self.j_shift = np.where(self.globlats[::-1] == m_lat)[0]
def _check_formula(formula_str):
""" check norefdata formula given in conf file """
valid_chars = "xyij0123456789+*^"
safe_method = []
formula_str = string.lower(string.replace(formula_str, '^', '**'))
for c in formula_str:
if c in valid_chars:
formula_str_validated.append(c)
return ''.join(formula_str_validated)
def _compute_formula_cid(self, j, i):
""" calculate cellid based on formula """
i += self.i_shift
j += self.j_shift
return eval(self.rd.formula, {'__builtins__': None}, {})
def _compute_continuous_cid(self, j, i):
""" calculate continuous cellid """
if self.local:
i_len = len(self.lons)
else:
i_len = len(self.globlons)
return (j + self.j_shift) * i_len + (i + self.i_shift)
def _find_nearest(self, array, value):
""" locate closest value match """
idx = (np.abs(array - value)).argmin()
return array.flat[idx]
def build(self):
""" actually populate cellid array """
cell_ids = np.empty((len(self.lats), len(self.lons)), np.int64)
for j, lat in enumerate(self.lats):
for i, lon in enumerate(self.lons):
if self.formula != 'continuous':
cid = self._compute_formula_cid(j, i)
else:
cid = self._compute_continuous_cid(j, i)
cell_ids[j, i] = cid
return (cell_ids, self.lats, self.lons)
def create_refdata(cfg):
""" produce refdata using info from cfg file
:param: yaml cfg
:return: cell_ids, lats, lons
:rtype: tuple
"""
rdb = RefDataBuilder(cfg)
cell_ids = rdb.build()
|
StarcoderdataPython
|
3224778
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: <EMAIL>
@site: http://www.xingag.top
@software: PyCharm
@file: views.py
@time: 2018/7/23 21:20
@description:TODO
"""
import qiniu
from flask import (
Blueprint,
jsonify
)
bp = Blueprint('common', __name__, url_prefix='/c')
# 获取七牛云的uptoken【通过AK和SK】,并以json的格式返回
@bp.route('/uptoken/')
def uptoken():
AccessKey = "<KEY>"
SecretKey = "<KEY>"
q = qiniu.Auth(AccessKey, SecretKey)
# 参数:仓库名
bucket = "flask"
token = q.upload_token(bucket)
print(token)
# 返回给前端JSON格式的数据【注意:键值必须是uptoken,否则使用七牛云的JS SDK就获取不成功了】
return jsonify({'uptoken': token})
|
StarcoderdataPython
|
3380831
|
import pytest
from metrics_layer.core.model.project import AccessDeniedOrDoesNotExistException
@pytest.mark.query
def test_query_no_join_with_limit(connection):
query = connection.get_sql_query(metrics=["total_item_revenue"], dimensions=["channel"], limit=499)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue "
"FROM analytics.order_line_items order_lines GROUP BY order_lines.sales_channel "
"ORDER BY order_lines_total_item_revenue DESC LIMIT 499;"
)
assert query == correct
@pytest.mark.query
def test_alias_only_query(connection):
metric = connection.get_metric(metric_name="total_item_revenue")
query = metric.sql_query(query_type="SNOWFLAKE", alias_only=True)
assert query == "SUM(order_lines_total_item_revenue)"
@pytest.mark.query
def test_alias_only_query_number(connection):
metric = connection.get_metric(metric_name="line_item_aov")
query = metric.sql_query(query_type="SNOWFLAKE", alias_only=True)
assert query == "(SUM(order_lines_total_item_revenue)) / (COUNT(orders_number_of_orders))"
@pytest.mark.query
def test_alias_only_query_symmetric_average_distinct(connection):
metric = connection.get_metric(metric_name="average_order_revenue")
query = metric.sql_query(query_type="SNOWFLAKE", alias_only=True)
correct = (
"(COALESCE(CAST((SUM(DISTINCT (CAST(FLOOR(COALESCE(order_lines_average_order_revenue, 0) "
"* (1000000 * 1.0)) AS DECIMAL(38,0))) + (TO_NUMBER(MD5(order_lines_order_id), "
"'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') % 1.0e27)::NUMERIC(38, 0)) "
"- SUM(DISTINCT (TO_NUMBER(MD5(order_lines_order_id), 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') "
"% 1.0e27)::NUMERIC(38, 0))) AS DOUBLE PRECISION) / CAST((1000000*1.0) AS "
"DOUBLE PRECISION), 0) / NULLIF(COUNT(DISTINCT CASE WHEN (order_lines_average_order_revenue) "
"IS NOT NULL THEN order_lines_order_id ELSE NULL END), 0))"
)
assert query == correct
@pytest.mark.query
def test_query_no_join_average_distinct(connection):
query = connection.get_sql_query(metrics=["average_order_revenue"], dimensions=["channel"])
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,(COALESCE(CAST((SUM(DISTINCT "
"(CAST(FLOOR(COALESCE(order_lines.order_total, 0) * (1000000 * 1.0)) AS DECIMAL(38,0))) "
"+ (TO_NUMBER(MD5(order_lines.order_unique_id), 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') "
"% 1.0e27)::NUMERIC(38, 0)) - SUM(DISTINCT (TO_NUMBER(MD5(order_lines.order_unique_id), "
"'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') % 1.0e27)::NUMERIC(38, 0))) AS DOUBLE PRECISION) "
"/ CAST((1000000*1.0) AS DOUBLE PRECISION), 0) / NULLIF(COUNT(DISTINCT CASE WHEN "
"(order_lines.order_total) IS NOT NULL THEN order_lines.order_unique_id ELSE NULL END), 0)) "
"as order_lines_average_order_revenue FROM analytics.order_line_items order_lines "
"GROUP BY order_lines.sales_channel ORDER BY order_lines_average_order_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join(connection):
query = connection.get_sql_query(metrics=["total_item_revenue"], dimensions=["channel", "new_vs_repeat"])
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders ON "
"order_lines.order_unique_id=orders.id GROUP BY order_lines.sales_channel,orders.new_vs_repeat "
"ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_dimension(connection):
query = connection.get_sql_query(metrics=[], dimensions=["new_vs_repeat"])
correct = (
"SELECT orders.new_vs_repeat as orders_new_vs_repeat FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders ON "
"order_lines.order_unique_id=orders.id GROUP BY orders.new_vs_repeat "
"ORDER BY orders_new_vs_repeat ASC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_dimension_with_comment(connection):
query = connection.get_sql_query(metrics=["total_item_revenue"], dimensions=["parent_channel"])
correct = (
"SELECT CASE\n--- parent channel\nWHEN order_lines.sales_channel ilike '%social%' then "
"'Social'\nELSE 'Not Social'\nEND as order_lines_parent_channel,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue "
"FROM analytics.order_line_items order_lines GROUP BY CASE\n--- parent channel\nWHEN "
"order_lines.sales_channel ilike '%social%' then 'Social'\nELSE 'Not Social'\nEND "
"ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_dimension_with_multi_filter(connection):
query = connection.get_sql_query(metrics=["total_item_costs"], dimensions=["channel"])
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,SUM(case when order_lines.product_name"
"='Portable Charger' and order_lines.product_name IN ('Portable Charger','Dual Charger') "
"and orders.revenue * 100>100 then order_lines.item_costs end) "
"as order_lines_total_item_costs FROM analytics.order_line_items order_lines LEFT JOIN "
"analytics.orders orders ON order_lines.order_unique_id=orders.id "
"GROUP BY order_lines.sales_channel ORDER BY order_lines_total_item_costs DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_dimension_sa_duration(connection):
query = connection.get_sql_query(metrics=["average_days_between_orders"], dimensions=["product_name"])
correct = (
"SELECT order_lines.product_name as order_lines_product_name,(COALESCE(CAST((SUM(DISTINCT "
"(CAST(FLOOR(COALESCE(DATEDIFF('DAY', orders.previous_order_date, orders.order_date), 0) "
"* (1000000 * 1.0)) AS DECIMAL(38,0))) + (TO_NUMBER(MD5(orders.id), "
"'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') % 1.0e27)::NUMERIC(38, 0)) "
"- SUM(DISTINCT (TO_NUMBER(MD5(orders.id), 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') "
"% 1.0e27)::NUMERIC(38, 0))) AS DOUBLE PRECISION) / CAST((1000000*1.0) AS DOUBLE PRECISION), 0) "
"/ NULLIF(COUNT(DISTINCT CASE WHEN (DATEDIFF('DAY', orders.previous_order_date, "
"orders.order_date)) IS NOT NULL THEN orders.id "
"ELSE NULL END), 0)) as orders_average_days_between_orders "
"FROM analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
"ON order_lines.order_unique_id=orders.id GROUP BY order_lines.product_name "
"ORDER BY orders_average_days_between_orders DESC;"
)
assert query == correct
@pytest.mark.query
def test_functional_pk_resolve_one_to_many(connection):
query = connection.get_sql_query(
metrics=["discount_usd"],
dimensions=["country"],
explore_name="discounts_only",
)
correct = (
"SELECT discounts.country as discounts_country,"
"SUM(discount_detail.total_usd) as discount_detail_discount_usd "
"FROM analytics_live.discounts discounts "
"LEFT JOIN analytics.discount_detail discount_detail "
"ON discounts.discount_id=discount_detail.discount_id "
"GROUP BY discounts.country ORDER BY discount_detail_discount_usd DESC;"
)
assert query == correct
@pytest.mark.query
def test_ensure_join_fields_are_respected(connection):
with pytest.raises(AccessDeniedOrDoesNotExistException) as exc_info:
connection.get_explore("order_lines_all")
connection.get_sql_query(
metrics=["discount_usd"],
dimensions=["discount_promo_name"],
explore_name="discounts_only",
)
assert exc_info.value
@pytest.mark.query
def test_query_single_join_count(connection):
query = connection.get_sql_query(
metrics=["order_lines.count"],
dimensions=["channel", "new_vs_repeat"],
explore_name="order_lines_all",
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"orders.new_vs_repeat as orders_new_vs_repeat,"
"COUNT(order_lines.order_line_id) as order_lines_count FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders ON "
"order_lines.order_unique_id=orders.id GROUP BY order_lines.sales_channel,orders.new_vs_repeat "
"ORDER BY order_lines_count DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join_metric_with_sub_field(connection):
query = connection.get_sql_query(
metrics=["line_item_aov"],
dimensions=["channel"],
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,(SUM(order_lines.revenue)) "
"/ (NULLIF(COUNT(DISTINCT CASE WHEN (orders.id) IS NOT NULL "
"THEN orders.id ELSE NULL END), 0)) as order_lines_line_item_aov "
"FROM analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
"ON order_lines.order_unique_id=orders.id GROUP BY order_lines.sales_channel "
"ORDER BY order_lines_line_item_aov DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join_with_forced_additional_join(connection):
query = connection.get_sql_query(
metrics=["avg_rainfall"],
dimensions=["discount_promo_name"],
query_type="BIGQUERY",
)
correct = (
"SELECT discount_detail.promo_name as discount_detail_discount_promo_name,(COALESCE(CAST(("
"SUM(DISTINCT (CAST(FLOOR(COALESCE(country_detail.rain, 0) * (1000000 * 1.0)) AS FLOAT64))"
" + CAST(FARM_FINGERPRINT(CAST(country_detail.country AS STRING)) AS BIGNUMERIC)) - SUM(DISTINCT "
"CAST(FARM_FINGERPRINT(CAST(country_detail.country AS STRING)) AS BIGNUMERIC))) AS FLOAT64) "
"/ CAST((1000000*1.0) AS FLOAT64), 0) / NULLIF(COUNT(DISTINCT CASE WHEN "
"(country_detail.rain) IS NOT NULL THEN country_detail.country ELSE NULL END), "
"0)) as country_detail_avg_rainfall FROM analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics_live.discounts discounts ON orders.id=discounts.order_id "
"LEFT JOIN analytics.discount_detail discount_detail "
"ON discounts.discount_id=discount_detail.discount_id "
"AND DATE_TRUNC(CAST(discounts.order_date as DATE), WEEK) is not null "
"LEFT JOIN (SELECT * FROM ANALYTICS.COUNTRY_DETAIL) as country_detail "
"ON discounts.country=country_detail.country and CAST(DATE_TRUNC(CAST(order_lines.order_date "
"as DATE), DAY) AS TIMESTAMP) is not null GROUP BY discount_detail.promo_name;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join_select_args(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["channel", "new_vs_repeat"],
select_raw_sql=[
"CAST(new_vs_repeat = 'Repeat' AS INT) as group_1",
"CAST(date_created > '2021-04-02' AS INT) as period",
],
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue,"
"CAST(new_vs_repeat = 'Repeat' AS INT) as group_1,"
"CAST(date_created > '2021-04-02' AS INT) as period FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders ON "
"order_lines.order_unique_id=orders.id GROUP BY order_lines.sales_channel,orders.new_vs_repeat,"
"CAST(new_vs_repeat = 'Repeat' AS INT),CAST(date_created > '2021-04-02' AS INT) "
"ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join_with_case_raw_sql(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["is_on_sale_sql", "new_vs_repeat"],
)
correct = (
"SELECT CASE WHEN order_lines.product_name ilike '%sale%' then TRUE else FALSE end "
"as order_lines_is_on_sale_sql,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
"ON order_lines.order_unique_id=orders.id GROUP BY CASE WHEN order_lines.product_name "
"ilike '%sale%' then TRUE else FALSE end,orders.new_vs_repeat "
"ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join_with_case(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["is_on_sale_case", "new_vs_repeat"],
)
correct = (
"SELECT case when order_lines.product_name ilike '%sale%' then 'On sale' else 'Not on sale' end " # noqa
"as order_lines_is_on_sale_case,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
"ON order_lines.order_unique_id=orders.id GROUP BY case when order_lines.product_name "
"ilike '%sale%' then 'On sale' else 'Not on sale' end,orders.new_vs_repeat "
"ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join_with_tier(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["order_tier", "new_vs_repeat"],
)
tier_case_query = "case when order_lines.revenue < 0 then 'Below 0' when order_lines.revenue >= 0 "
tier_case_query += "and order_lines.revenue < 20 then '[0,20)' when order_lines.revenue >= 20 and "
tier_case_query += "order_lines.revenue < 50 then '[20,50)' when order_lines.revenue >= 50 and "
tier_case_query += "order_lines.revenue < 100 then '[50,100)' when order_lines.revenue >= 100 and "
tier_case_query += "order_lines.revenue < 300 then '[100,300)' when order_lines.revenue >= 300 "
tier_case_query += "then '[300,inf)' else 'Unknown' end"
correct = (
f"SELECT {tier_case_query} as order_lines_order_tier,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
f"ON order_lines.order_unique_id=orders.id GROUP BY {tier_case_query},orders.new_vs_repeat "
"ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join_with_filter(connection):
query = connection.get_sql_query(
metrics=["number_of_email_purchased_items"],
dimensions=["channel", "new_vs_repeat"],
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"orders.new_vs_repeat as orders_new_vs_repeat,"
"COUNT(case when order_lines.sales_channel='Email' then order_lines.order_id end) "
"as order_lines_number_of_email_purchased_items FROM analytics.order_line_items "
"order_lines LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id"
" GROUP BY order_lines.sales_channel,orders.new_vs_repeat "
"ORDER BY order_lines_number_of_email_purchased_items DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_multiple_join(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY customers.region,orders.new_vs_repeat ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_multiple_join_with_duration(connection):
query = connection.get_sql_query(
metrics=["total_sessions"],
dimensions=["months_between_orders"],
)
correct = (
"SELECT DATEDIFF('MONTH', orders.previous_order_date, orders.order_date) as orders_months_between_orders," # noqa
"COALESCE(CAST((SUM(DISTINCT (CAST(FLOOR(COALESCE(case when customers.is_churned=false then "
"customers.total_sessions end, 0) * (1000000 * 1.0)) AS DECIMAL(38,0))) "
"+ (TO_NUMBER(MD5(case when customers.is_churned=false then customers.customer_id end), 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') % 1.0e27)::NUMERIC(38, 0)) " # noqa
"- SUM(DISTINCT (TO_NUMBER(MD5(case when customers.is_churned=false then customers.customer_id end), 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') " # noqa
"% 1.0e27)::NUMERIC(38, 0))) AS DOUBLE PRECISION) / CAST((1000000*1.0) AS DOUBLE PRECISION), 0) "
"as customers_total_sessions "
"FROM analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY DATEDIFF('MONTH', orders.previous_order_date, orders.order_date) "
"ORDER BY customers_total_sessions DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_multiple_join_where_dict(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
where=[{"field": "region", "expression": "not_equal_to", "value": "West"}],
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"WHERE customers.region<>'West' "
"GROUP BY customers.region,orders.new_vs_repeat ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_multiple_join_where_literal(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
where="first_order_week > '2021-07-12'",
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"WHERE DATE_TRUNC('WEEK', CAST(customers.first_order_date as DATE)) > '2021-07-12' "
"GROUP BY customers.region,orders.new_vs_repeat ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_multiple_join_having_dict(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
having=[{"field": "total_item_revenue", "expression": "greater_than", "value": -12}],
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY customers.region,orders.new_vs_repeat HAVING SUM(order_lines.revenue)>-12 "
"ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_multiple_join_having_literal(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
having="total_item_revenue > -12",
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY customers.region,orders.new_vs_repeat HAVING (SUM(order_lines.revenue)) > -12 "
"ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_multiple_join_order_by_literal(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
order_by="total_item_revenue",
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY customers.region,orders.new_vs_repeat ORDER BY total_item_revenue ASC;"
)
assert query == correct
@pytest.mark.query
def test_query_multiple_join_all(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
where=[{"field": "region", "expression": "not_equal_to", "value": "West"}],
having=[{"field": "total_item_revenue", "expression": "greater_than", "value": -12}],
order_by=[{"field": "total_item_revenue", "sort": "asc"}],
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"WHERE customers.region<>'West' "
"GROUP BY customers.region,orders.new_vs_repeat HAVING SUM(order_lines.revenue)>-12 "
"ORDER BY total_item_revenue ASC;"
)
assert query == correct
@pytest.mark.query
def test_query_single_join_count_and_filter(connection):
query = connection.get_sql_query(
metrics=["new_order_count"],
dimensions=["channel"],
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,COUNT(DISTINCT("
"case when orders.new_vs_repeat='New' then orders.id end)) "
"as orders_new_order_count FROM analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"GROUP BY order_lines.sales_channel ORDER BY orders_new_order_count DESC;"
)
assert query == correct
@pytest.mark.query
def test_query_number_measure_w_dimension_reference(connection):
query = connection.get_sql_query(
metrics=["ending_on_hand_qty"],
dimensions=["product_name"],
)
correct = (
"SELECT order_lines.product_name as order_lines_product_name,"
"split_part(listagg((order_lines.inventory_qty), ',') within group "
"(order by (DATE_TRUNC('DAY', order_lines.order_date)) desc), ',', 0)::int "
"as order_lines_ending_on_hand_qty "
"FROM analytics.order_line_items order_lines GROUP BY order_lines.product_name "
"ORDER BY order_lines_ending_on_hand_qty DESC;"
)
assert query == correct
@pytest.mark.query
@pytest.mark.parametrize("bool_value", ["True", "False"])
def test_query_bool_and_date_filter(connection, bool_value):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["channel"],
where=[
{"field": "is_churned", "expression": "equal_to", "value": bool_value},
{"field": "order_lines.order_date", "expression": "greater_than", "value": "2022-04-03"},
],
)
if bool_value == "True":
negation = ""
else:
negation = "NOT "
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,SUM(order_lines.revenue) "
"as order_lines_total_item_revenue FROM analytics.order_line_items order_lines "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
f"WHERE {negation}customers.is_churned AND DATE_TRUNC('DAY', order_lines.order_date)>'2022-04-03' "
"GROUP BY order_lines.sales_channel ORDER BY order_lines_total_item_revenue DESC;"
)
assert query == correct
@pytest.mark.query
def test_cross_join(connection):
query = connection.get_sql_query(
metrics=["sessions.number_of_sessions"],
explore_name="discounts_only",
)
correct = (
"SELECT NULLIF(COUNT(DISTINCT CASE WHEN (sessions.id) IS NOT NULL THEN "
" sessions.id ELSE NULL END), 0) as sessions_number_of_sessions "
"FROM analytics_live.discounts discounts CROSS JOIN analytics.sessions sessions "
"ORDER BY sessions_number_of_sessions DESC;"
)
assert query == correct
|
StarcoderdataPython
|
4820307
|
<reponame>sanger-tol/weskit-api
# Copyright (c) 2021. Berlin Institute of Health (BIH) and Deutsches Krebsforschungszentrum (DKFZ).
#
# Distributed under the MIT License. Full text at
#
# https://gitlab.com/one-touch-pipeline/weskit/api/-/blob/master/LICENSE
#
# Authors: The WESkit Team
import uuid
import pytest
from pymongo import MongoClient
from weskit.classes.Run import Run
from weskit.classes.RunStatus import RunStatus
mock_run_data = {
"run_id": str(uuid.uuid4()),
"run_status": "INITIALIZING",
"request_time": None,
"user_id": "test_id",
"request": {
"workflow_url": "",
"workflow_params": '{"text":"hello_world"}'
},
}
@pytest.mark.integration
def test_create_and_load_run(database_container):
new_run = Run(mock_run_data)
new_run.status = RunStatus.RUNNING
client = MongoClient(database_container.get_connection_url())
db = client["WES"]
collection = db["test_runs"]
collection.insert_one(dict(new_run))
data = collection.find()
for x in data:
load_run = Run(x)
if load_run.id == new_run.id:
assert dict(load_run) == dict(new_run)
def test_create_run_fails():
with pytest.raises(Exception):
Run({})
|
StarcoderdataPython
|
1757992
|
<reponame>Julio-Yanes/NiMARE
import os
import pathlib
import click
from nilearn.image import resample_to_img
from nilearn.masking import apply_mask
from ..base import MetaResult
from ..meta.ibma import rfx_glm
from ..meta.cbma import Peaks2MapsKernel
from ..io import convert_sleuth_to_dataset
n_iters_default = 10000
@click.command(name='peaks2maps',
short_help='Permutation-based meta-analysis of coordinates '
'that uses deep learning to reconstruct the original '
'maps.',
help='Method for performing coordinate-based meta-analysis that '
'uses a pretrained deep neural network to reconstruct '
'unthresholded maps from peak coordinates. The reconstructed '
'maps are evaluated for statistical significance using a '
'permutation-based approach with Family Wise Error multiple '
'comparison correction.')
@click.argument('sleuth_file', type=click.Path(exists=True))
@click.option('--output_dir', help="Where to put the output maps.")
@click.option('--output_prefix', help="Common prefix for output maps.")
@click.option('--n_iters', default=n_iters_default, show_default=True,
help="Number of iterations for permutation testing.")
def peaks2maps_workflow(sleuth_file, output_dir=None, output_prefix=None,
n_iters=n_iters_default):
click.echo("Loading coordinates...")
dset = convert_sleuth_to_dataset(sleuth_file)
click.echo("Reconstructing unthresholded maps...")
k = Peaks2MapsKernel(resample_to_mask=False)
imgs = k.transform(dset, masked=False)
mask_img = resample_to_img(dset.mask, imgs[0], interpolation='nearest')
z_data = apply_mask(imgs, mask_img)
click.echo("Estimating the null distribution...")
res = rfx_glm(z_data, null='empirical', n_iters=n_iters)
res = MetaResult('rfx_glm', maps=res, mask=mask_img)
if output_dir is None:
output_dir = os.path.dirname(os.path.abspath(sleuth_file))
else:
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
if output_prefix is None:
base = os.path.basename(sleuth_file)
output_prefix, _ = os.path.splitext(base)
output_prefix += '_'
click.echo("Saving output maps...")
res.save_maps(output_dir=output_dir, prefix=output_prefix)
|
StarcoderdataPython
|
1707155
|
from .modeconvo import ModeConvo
class ModeTalkWin3(ModeConvo):
pass
|
StarcoderdataPython
|
38054
|
<reponame>kevin-ci/advent-of-code-2020<gh_stars>0
import re
input = """iyr:2010 ecl:gry hgt:181cm
pid:591597745 byr:1920 hcl:#6b5442 eyr:2029 cid:123
cid:223 byr:1927
hgt:177cm hcl:#602927 iyr:2016 pid:404183620
ecl:amb
eyr:2020
byr:1998
ecl:hzl
cid:178 hcl:#a97842 iyr:2014 hgt:166cm pid:594143498 eyr:2030
ecl:hzl
pid:795349208 iyr:2018
eyr:2024 hcl:#de745c hgt:157cm
hgt:159cm pid:364060467 eyr:2025 byr:1978 iyr:2018 cid:117
ecl:hzl
hcl:#18171d
hcl:#cfa07d
ecl:amb iyr:2012
hgt:182cm cid:338
eyr:2020
pid:374679609 byr:1925
eyr:2021 byr:1981
hcl:#623a2f cid:195 iyr:2010
pid:579769934 ecl:grn hgt:192cm
byr:1970
ecl:oth
eyr:2025
pid:409994798 iyr:2018 hgt:189cm
hgt:153cm pid:817651329 iyr:2019
eyr:2029
hcl:#623a2f byr:1920
ecl:gry
iyr:2011 ecl:amb hcl:#a97842 byr:1965 pid:648375525 eyr:2028 hgt:177cm cid:287
iyr:2012 pid:369979235 hcl:#c0946f
ecl:amb hgt:178cm
byr:1927 ecl:brn hgt:178cm eyr:2026 hcl:#efcc98
iyr:2011 pid:770851101
eyr:2028
ecl:oth cid:298
byr:1943
hgt:168cm iyr:2018 hcl:#ceb3a1 pid:116783406
eyr:2027 hgt:175cm hcl:#733820
ecl:gry cid:349 iyr:2017 byr:1960
pid:257797292
cid:66 ecl:amb
eyr:2030
iyr:2026 byr:2024
hcl:a22966 hgt:179cm pid:155cm
eyr:2023 hcl:#c0946f pid:081232570 ecl:hzl
iyr:2010 hgt:158cm byr:1969
byr:1958
ecl:grn hcl:#ceb3a1
hgt:173cm
pid:600039004
cid:107 iyr:2012 eyr:2027
ecl:amb pid:021066381
hcl:#ceb3a1 byr:1982 iyr:2017
hgt:167cm eyr:2025 cid:61
hcl:#341e13
cid:268
pid:358390884 hgt:188cm byr:1961 iyr:2014 eyr:2027 ecl:blu
ecl:brn eyr:2020
pid:607203641
hcl:#fffffd iyr:2011
byr:1962
hgt:156cm
iyr:2018
hcl:#b6652a
byr:1942 ecl:blu eyr:2029 hgt:154cm pid:649263319
ecl:oth hgt:73in iyr:2012 hcl:#888785 eyr:2020
pid:147939289
byr:1961
ecl:oth iyr:2015
hgt:189cm hcl:#341e13 pid:686943691 eyr:2023 byr:1987
pid:568844323
eyr:2023 byr:1921 hgt:167cm cid:154 hcl:#b6652a
ecl:gry iyr:2020
eyr:2023 byr:1994
iyr:1937 hgt:177cm hcl:#c0946f pid:686240814 cid:231 ecl:#a8ba32
hcl:#b6652a
byr:1946 pid:543383899 iyr:2013 hgt:153cm ecl:hzl cid:238 eyr:2023
eyr:2028 ecl:blu
hgt:154cm cid:252
pid:196374590
byr:1987 iyr:2011
hcl:#7d3b0c
iyr:2013
ecl:amb cid:187
hgt:187cm pid:593027548 byr:1963
eyr:2024 hcl:#fffffd
pid:588211492 hgt:156cm
iyr:2021 eyr:2021 ecl:gry hcl:z byr:1928
ecl:amb hcl:#888785 hgt:180cm eyr:2022 byr:1923 pid:490291639 cid:173 iyr:2015
iyr:2014 cid:211 pid:404157420 hcl:#602927
ecl:oth byr:1946 eyr:2030 hgt:175cm
hcl:z byr:2026
pid:61805448
hgt:125 iyr:2025
eyr:2028
hgt:156cm
hcl:#341e13 cid:103 ecl:amb iyr:2017 byr:1937 pid:320691739
hgt:185cm
pid:440489464 byr:1929 ecl:amb iyr:2011 eyr:2021 cid:327 hcl:#341e13
byr:1988 ecl:grn
pid:062728732 iyr:2013
hgt:181cm
hcl:#18171d
eyr:2026
pid:000647617
eyr:2029 byr:1937
ecl:gry hcl:#e8eff3 hgt:164cm cid:151
iyr:2016
hgt:179cm
byr:1949
eyr:2029 pid:459190453
ecl:grn iyr:2020 hcl:#c0946f
hgt:160cm pid:476613532 cid:190 iyr:2016 hcl:#4657e5
byr:1929
eyr:2028
ecl:grn
eyr:2027 byr:1982
hcl:#18171d
pid:630408328
cid:65 iyr:2020
hgt:161cm
pid:752776254
hcl:#888785
hgt:189cm
eyr:2027 iyr:2020 ecl:hzl
cid:194 byr:1934
iyr:2015 hgt:167cm byr:1977
eyr:2021 hcl:#14564f pid:504471386 ecl:oth
hgt:84 pid:168cm
hcl:8532fb eyr:2023
iyr:2012 ecl:xry byr:2008 cid:288
cid:323 eyr:2024
iyr:2019
pid:495737304 byr:1966 hcl:#7d3b0c ecl:hzl
hgt:73in
iyr:2020 byr:1953 ecl:hzl hcl:#efcc98 hgt:174cm eyr:2026 pid:546906638
pid:839249028
hcl:z byr:2024
hgt:145 eyr:2034 iyr:2021 ecl:#891c47
eyr:2036
ecl:#89d2ae
cid:183 byr:2014
hcl:b3af0f
pid:12086913 iyr:1981
hgt:61cm
ecl:brn eyr:2030 pid:083487445 byr:1929 hcl:z iyr:2021
hgt:182 cid:318
eyr:2020
pid:188609216 hcl:#341e13
iyr:2012 hgt:179cm
eyr:2029
hcl:#888785 pid:704026565 hgt:173cm iyr:2020 ecl:blu byr:1950 cid:237
ecl:grn
eyr:2030
byr:1961 pid:695808266
iyr:2012 cid:56
hgt:155cm
iyr:2011 ecl:amb
byr:1986 pid:243061330 hgt:163cm eyr:2021
eyr:2030 hcl:#623a2f hgt:170cm ecl:hzl
pid:694575319 iyr:2011
byr:1939
iyr:2014 pid:184152121
hcl:#c0946f hgt:163cm
eyr:2028 byr:1992 cid:114
ecl:hzl
hgt:75in cid:233
hcl:#866857 pid:269157261 iyr:2020
byr:1973 eyr:2029
hgt:174cm
hcl:#f86751 iyr:2016
pid:904779190
ecl:brn eyr:2024 byr:1950
cid:123 iyr:2019
eyr:2030 pid:402585706
ecl:brn byr:1995 hcl:#4ff7fa
hgt:65in
ecl:grn eyr:2029
pid:083364259 iyr:2013 cid:50 byr:1938 hgt:187cm
hcl:#a97842
hcl:#6b5442 cid:101 iyr:2011
ecl:amb eyr:2029 byr:1963 pid:664573740
eyr:2025 hcl:#602927
hgt:188cm
iyr:2019
pid:521514539 byr:1940 ecl:gry
hcl:dc0449 eyr:1981 pid:188cm
cid:151 iyr:1979 hgt:61cm ecl:dne
byr:2028
iyr:2017 byr:1924
hgt:163cm eyr:2024 hcl:#ceb3a1 pid:424127124
ecl:amb
eyr:2039 pid:7837217107 hcl:z byr:2005
iyr:1989 ecl:#d95f4d hgt:190in
ecl:#329eb1 cid:178 hgt:192
eyr:2020 iyr:2012
hcl:#602927
byr:2028 pid:7175349420
ecl:gry byr:1931
hgt:162cm iyr:2014
eyr:2030 cid:50
hcl:#cfa07d pid:653585396
eyr:2025 hgt:177cm
ecl:gry hcl:#efcc98
iyr:2015
byr:1942
pid:388475446
hcl:#efcc98 ecl:grn
hgt:185cm
byr:1921 pid:253592171
eyr:2031 cid:220 iyr:2024
byr:1950
hgt:158cm ecl:gry iyr:2015 hcl:#18171d
eyr:2023
pid:151cm
byr:1957
hcl:z
eyr:2026
ecl:grn
iyr:1971 hgt:192in pid:5479810865
hgt:161cm pid:473851111 iyr:2018
ecl:brn byr:1982
eyr:2029
pid:136216608 byr:1958
cid:226 eyr:2023 hcl:#866857 iyr:2017 ecl:hzl hgt:159cm
byr:1993 hcl:#866857 hgt:169cm pid:488392920
cid:109 iyr:2017 ecl:oth eyr:2029
cid:248 ecl:amb eyr:2025 iyr:2017 byr:1951 hcl:#ceb3a1 pid:731763175 hgt:162cm
hcl:#835e79
eyr:2021
ecl:oth pid:617055193 byr:1997 iyr:2010
hgt:173cm
eyr:2024 pid:257895944
hcl:#ceb3a1
hgt:165cm
ecl:oth iyr:2020
byr:1958
pid:438580092
ecl:grt byr:2025
hcl:z iyr:2000 eyr:1952
cid:271 hgt:170in
iyr:2010 hcl:#6b5442 hgt:156cm
eyr:2026 ecl:grn pid:409793041 byr:1941
pid:076486440
hgt:177cm hcl:#888785 ecl:blu iyr:2017 eyr:2029
eyr:2028 ecl:amb hgt:186cm hcl:#1d5836 pid:563307670 iyr:2019 byr:1950
byr:1939 ecl:hzl hgt:193cm pid:329759796
hcl:#cfa07d eyr:2025 iyr:2011 cid:73
byr:1995
hgt:188cm eyr:2028
ecl:blu
iyr:2016 hcl:#888785 pid:459613739 cid:115
hcl:#623a2f
eyr:2021 cid:197 hgt:187cm ecl:oth
byr:1969
iyr:2010 pid:385660251
hgt:192cm cid:143 byr:1995 hcl:#fffffd
iyr:2017 ecl:oth
eyr:2020 pid:087661720
ecl:oth
byr:1994 hgt:183cm
eyr:2020 iyr:2020 pid:448389966 cid:92 hcl:#866857
pid:088166852 hgt:155cm cid:307 byr:1940
hcl:#7d3b0c
ecl:#af542f eyr:2023 iyr:2014
byr:2026 eyr:2039 hcl:5449b3
ecl:hzl hgt:176in
iyr:1962 pid:177cm
iyr:2020 ecl:amb hgt:164cm hcl:#c0946f
pid:931543453 eyr:2024 byr:2001
iyr:2010 eyr:2023 hgt:188cm
hcl:#866857 ecl:hzl pid:866631112 byr:1997
byr:1958 hgt:184cm
cid:117 hcl:#7d3b0c iyr:2019 pid:615734013 eyr:2028 ecl:gry
hgt:86 iyr:1935 ecl:grt pid:#af8e67 eyr:2031
byr:2018 hcl:6a2940
hgt:73in eyr:2022 pid:580461358 byr:1962
cid:129 iyr:2015 hcl:#7d3b0c
iyr:2019 hcl:#b6652a hgt:172cm ecl:blu pid:077121198 eyr:2021
byr:1995
hcl:#ceb3a1 cid:253
iyr:2015 hgt:177cm byr:1973
ecl:hzl pid:311289324 eyr:2025
iyr:2017 hcl:#efcc98
cid:57 byr:1940 ecl:blu
eyr:2025 hgt:157cm pid:827480048
eyr:2028 hgt:189cm
iyr:2016 byr:1978 ecl:hzl pid:127497651 cid:87
hcl:#623a2f
hcl:#341e13 byr:2015
ecl:brn hgt:187in
pid:82075551
eyr:1936
cid:200
iyr:1939
ecl:grn byr:1962
iyr:2011 hgt:169cm
pid:661559147
hcl:#623a2f eyr:2023
ecl:gry
hcl:#efcc98 eyr:2009 byr:2028
hgt:170in
cid:129 pid:161cm iyr:2018
pid:098090405 hcl:#623a2f byr:1943 ecl:hzl
hgt:152cm iyr:2013 eyr:2029
pid:495271053 iyr:2011 ecl:gry hcl:#623a2f cid:285
byr:1925 eyr:2024 hgt:187cm
cid:306
hgt:73in
iyr:2010 hcl:#448fd7
byr:1946
ecl:grn pid:137146932 eyr:2021
eyr:2020 hgt:159cm cid:90 iyr:2010 ecl:brn hcl:#341e13 byr:1955
hcl:#18171d iyr:2017 ecl:amb
pid:168517472
eyr:2021 hgt:181cm byr:1942
cid:325 eyr:2022 pid:947158470 byr:1994 iyr:2019 ecl:grn hgt:172cm hcl:#ec63ce
iyr:2011
pid:243339529
ecl:amb
hgt:169cm
byr:1967
eyr:2025 hcl:#b6652a
pid:664966826 eyr:2036 iyr:2015 byr:1972 hgt:68in
hcl:z
ecl:#038105
eyr:2021 pid:236054221
hgt:179cm
hcl:#b6652a iyr:2020 ecl:blu
ecl:grn
iyr:2010
pid:870519416 byr:1945 hcl:#a97842
hgt:176cm eyr:2030
hcl:#3318db eyr:2022
byr:1966
ecl:grn iyr:2013
cid:349
hgt:168cm pid:827688488
pid:124116963
hcl:#866857 eyr:2026
iyr:2013 ecl:grn byr:1983 hgt:183cm
iyr:2017 byr:1993
hcl:#18171d ecl:utc hgt:68in cid:168 eyr:2030 pid:#2fd9f2
ecl:blu cid:134 eyr:2025 pid:588957573
iyr:2017
hgt:151cm byr:1942 hcl:#4280c1
hcl:#51b593
iyr:2013
ecl:amb pid:668244584
cid:282
byr:1936
eyr:1985 hgt:161cm
pid:494051052
hgt:185cm byr:1996 eyr:2028 iyr:2018
ecl:amb
hcl:#efcc98
ecl:brn
eyr:2025
iyr:2011
hgt:163cm hcl:#a97842
byr:1989 pid:557549000
pid:828235468 cid:55
iyr:2010 byr:1926 eyr:2029 hgt:153cm hcl:#cfa07d
ecl:blu
hgt:158cm iyr:2015 pid:957913612 ecl:grn eyr:2020 byr:1984 cid:76 hcl:#6b5442
ecl:amb eyr:2020 pid:596116320
byr:1936
hcl:#cfa07d
hgt:165cm cid:86 iyr:2014
iyr:2012
cid:278 hcl:#602927
eyr:2020 ecl:hzl
hgt:176cm byr:1987 pid:594817909
iyr:2011 byr:1929 pid:073211525 eyr:2022
hgt:188cm
ecl:blu
hcl:#733820
hcl:#602927 hgt:187cm
pid:706155322 cid:203
ecl:brn byr:1952 iyr:2017 eyr:2020
hcl:bcb5f7
byr:2002 eyr:2029 pid:850069752 iyr:2019 ecl:hzl
hgt:167cm
hcl:#b6652a hgt:72in iyr:2013
ecl:grn eyr:2024 byr:1920 cid:114
pid:983486664
byr:1931 iyr:2020 pid:182737852 hgt:162cm
ecl:grn hcl:#888785 eyr:2028
eyr:2035
byr:1962 iyr:2012 cid:120
ecl:xry
hgt:61cm hcl:ce89a8 pid:335540582
pid:#05153d iyr:1990
eyr:1927 hgt:71cm
byr:2019 cid:346 ecl:#e38688
hcl:c6abd9
ecl:#cd58d8 pid:166cm iyr:2012
hcl:0d1b02 hgt:68
eyr:1958
pid:976419172 byr:1922 cid:345 hcl:#6b5442 iyr:2010 eyr:2026
ecl:grn hgt:155cm
ecl:gry hcl:#1bbadc hgt:168cm
eyr:2028
byr:1984 cid:179 iyr:2013 pid:706186218
ecl:blu hgt:188cm
pid:764775319 byr:1936 hcl:#7d3b0c iyr:2020
hcl:#623a2f
iyr:2012
pid:382832140 ecl:gry
eyr:2026
cid:350
hgt:165cm byr:1968
hcl:0b87a1 byr:2020 pid:4365879329
cid:110 ecl:grn
eyr:2032 hgt:155cm
iyr:2018
hgt:193cm eyr:2029 hcl:#733820 pid:081071142 byr:1929 ecl:oth
ecl:brn
eyr:2023 pid:876924536 cid:165
hcl:#efcc98 hgt:151cm byr:1972
iyr:2020
hgt:186cm eyr:2022
ecl:grn
byr:1972 pid:997639611 hcl:#ceb3a1 iyr:2013
byr:1926
pid:808460262
iyr:2012 eyr:2031 hcl:#a97842 ecl:amb
hgt:190cm
hgt:163cm
hcl:#ceb3a1 eyr:2028
ecl:grn
byr:1944 pid:381144425 iyr:2012
hcl:#95a232 pid:015229624 byr:1947 iyr:2013 hgt:66cm ecl:gry eyr:2027
hcl:z byr:1965 iyr:2013 hgt:157cm ecl:#8b12fb cid:246 pid:283039791 eyr:2023
ecl:gry byr:1950
hcl:#623a2f cid:276 iyr:2013 eyr:2030 pid:798610943 hgt:189in
eyr:2030 cid:52 hcl:#fffffd pid:041625574 ecl:amb iyr:2016 byr:1944
hgt:191cm
byr:1995
iyr:2015 cid:221 pid:279080024
eyr:2022
hgt:181cm ecl:brn hcl:#888785
hcl:z
ecl:blu
iyr:1970
eyr:2022
hgt:193cm pid:#540e31 cid:95 byr:1952
hcl:z eyr:2024 ecl:hzl
byr:2028
cid:323 pid:1949331457
hgt:69
eyr:2030 hcl:#866857
cid:173 iyr:2017
hgt:190cm byr:1941
ecl:blu
pid:269015932
hcl:#b6652a
iyr:2018
eyr:2022 ecl:brn hgt:185cm pid:456195468
hcl:#6b5442 hgt:188cm
iyr:2019 byr:1966 cid:298
pid:050653473
ecl:gry eyr:2028
cid:208
ecl:amb eyr:2023 hgt:176cm byr:1971 hcl:#7d3b0c pid:650190272 iyr:2018
hgt:68in pid:615309584
iyr:2011 byr:1950
hcl:#efcc98 ecl:oth
eyr:2024
eyr:2022 iyr:2011 hcl:#623a2f ecl:amb byr:1955
hgt:190cm
pid:244918527
iyr:2013 hcl:#ceb3a1 eyr:2029 hgt:164cm
ecl:oth
byr:1928 pid:337615663
hcl:#ceb3a1 pid:#ae7eea byr:2027
cid:254
hgt:125
iyr:1940
ecl:zzz
pid:033663619 iyr:2012 byr:1989 eyr:2030 ecl:hzl
hcl:#b6652a hgt:154cm
hgt:175cm byr:1929 pid:100788192
ecl:#92b14c
iyr:1940 hcl:#ceb3a1 eyr:2033
eyr:2029
pid:357835141 ecl:oth iyr:2019 hcl:#866857 hgt:154cm byr:1954
pid:895992818 byr:1965 iyr:2017 hcl:#efcc98 ecl:amb hgt:153cm eyr:2025
byr:1928 ecl:amb hgt:168cm pid:346938111 eyr:2025 iyr:2014
hcl:#cfa07d
hcl:#b6652a pid:825661608 eyr:2020 iyr:2019 byr:1974
hgt:180cm ecl:amb
byr:1970 hgt:159cm hcl:#733820 pid:101838832 iyr:2015 eyr:2027 ecl:blu
byr:1941 ecl:amb
eyr:2024 pid:015890498
hgt:175cm
iyr:2018 hcl:#cfa07d
hgt:67in
pid:404983369 eyr:2023 iyr:2018 byr:1974 hcl:#602927
ecl:blu
byr:1957
hcl:#fcc940 pid:615831236
iyr:2018 eyr:2020 ecl:brn hgt:181cm cid:218
hcl:#fffffd ecl:grn pid:271614109
eyr:2028 hgt:184cm byr:1974 iyr:2015
ecl:#e45ee0 pid:151cm cid:127 iyr:2014 byr:2022 hcl:973bc1 eyr:2033 hgt:181in
hcl:#6b5442 pid:502739402 eyr:2020 byr:1926 ecl:brn
iyr:2010
ecl:xry hgt:169cm byr:2023
iyr:1973 pid:4137668
eyr:2037 hcl:z
ecl:#3a8c46 hcl:43730a pid:57210146 eyr:2031 cid:117 iyr:2013 byr:2010
hcl:#341e13 cid:237 hgt:150cm iyr:2016 byr:1967 ecl:blu
pid:674080319 eyr:2024
iyr:2011 hcl:#866857 pid:111247018
byr:1920 hgt:192in ecl:#8bf268 eyr:2021
iyr:2022 hcl:z ecl:gry
hgt:159cm
pid:#88e8df
byr:2026 eyr:2032 cid:221
hgt:156cm eyr:2026
ecl:blu
hcl:#192dea cid:280 pid:788808021 byr:1980
iyr:2013
hgt:156in
byr:2024 hcl:4e4dd6
eyr:2030
iyr:2028 pid:35683378
ecl:#3a9fba
pid:081236370 cid:150 hcl:d15b43 byr:2029 hgt:118 iyr:2026 eyr:2038
ecl:grt
eyr:2034 pid:186cm
ecl:utc cid:300 iyr:2009 byr:2018 hcl:163913 hgt:74cm
ecl:hzl
pid:249858519 byr:1936 hgt:182cm
cid:343 iyr:2013 eyr:2030 hcl:#7d3b0c
cid:168
ecl:hzl
hgt:174cm iyr:2020
pid:446135799 hcl:#888785
eyr:2024 byr:1998
pid:545342162
hcl:#5cd3bd cid:126
eyr:2024
iyr:2012 ecl:grn
pid:104835585
byr:1989 hcl:#733820 ecl:oth eyr:2024 iyr:2017
hgt:180cm
hgt:184cm byr:2001 pid:199216567 ecl:gry
eyr:2022
cid:185 hcl:#7d3b0c
iyr:2019
byr:1996 eyr:2022 pid:503963080 ecl:grn iyr:2010 hcl:#fffffd
eyr:2030 iyr:2017
pid:472300557 hcl:#a97842
ecl:grn hgt:190cm
byr:1994
ecl:#2a8a59
eyr:2027
iyr:2015 byr:2021 hgt:158cm pid:365979521 hcl:z cid:242
ecl:gry
iyr:2020 hcl:#866857
pid:363851353 cid:319 hgt:154cm eyr:2027
byr:1953
ecl:grn hgt:165cm eyr:2026
pid:443722683 hcl:#341e13
iyr:2018 byr:1923
byr:1920 ecl:blu
cid:193 hgt:153cm hcl:#341e13 iyr:2010 pid:934896568
eyr:2021
eyr:2025
pid:524699651 cid:92
hcl:#602927 byr:1999
iyr:2011 ecl:brn hgt:164cm
eyr:2030 pid:739947771 iyr:2018
byr:1990
hgt:185cm hcl:#602927 ecl:gry
byr:1967 ecl:amb iyr:2020 hcl:#341e13
hgt:165cm
pid:681478012 eyr:2028
pid:807715479 ecl:blu byr:1955 eyr:1972 iyr:2018 hcl:#a97842 hgt:151
pid:635008585 cid:97
hgt:186cm hcl:#b6652a iyr:2015 eyr:2020 ecl:gry byr:1959
iyr:2017
cid:155 byr:1999 pid:550276277
hcl:#18171d
eyr:2020 hgt:164cm ecl:amb
byr:1977 hcl:#6b5442 ecl:grn iyr:2012 hgt:156cm
eyr:2028 pid:125635376
hgt:65in pid:042700658 byr:1962 iyr:2020
hcl:#888785 eyr:2021 ecl:gry
ecl:blu iyr:2017 hcl:#efcc98 pid:447451869 hgt:176cm
byr:1958
eyr:2024
ecl:amb hgt:155cm eyr:2022 hcl:#efcc98
pid:614496034 byr:1957
iyr:2016
cid:99
eyr:2020
ecl:amb iyr:2017
hgt:163cm pid:128207503 byr:1977
hcl:#866857
ecl:amb cid:342 eyr:2026 hgt:172cm pid:317675262
byr:1942 hcl:#a97842 iyr:2010
ecl:grn pid:077163993
hgt:187cm hcl:#341e13 iyr:2012 byr:1934 eyr:2024
pid:423538706 hgt:156cm
ecl:oth hcl:#341e13 iyr:2016 eyr:2028
iyr:2030 ecl:#faff64
byr:2012
pid:734434105 hgt:164in hcl:z eyr:2023
hgt:150in iyr:2016 pid:173cm hcl:db675a cid:219 eyr:2032 byr:1958
ecl:xry
pid:087437383
eyr:2025 hgt:178cm ecl:gry byr:1954
cid:227 hcl:#fffffd
iyr:2018
pid:152cm
iyr:2030 eyr:2030
byr:2010 hcl:z
hgt:155cm
ecl:amb
byr:1934
hcl:#341e13 hgt:167cm
pid:#7356dd ecl:amb
iyr:2011
eyr:2030
cid:123
eyr:2027
byr:2005
hgt:173cm cid:174 hcl:#ceb3a1 iyr:2018 ecl:amb pid:179cm
iyr:2019 ecl:grn eyr:2023
hgt:162cm
pid:649681621 hcl:#4ee6d2 byr:1955
hgt:165cm byr:1929 ecl:blu pid:839016251 iyr:2017 hcl:#c0946f
eyr:2020
eyr:2020
iyr:2017 hcl:#c7ed42 ecl:blu byr:1928
hgt:74in pid:112604496
eyr:2026 hgt:184 cid:113
byr:1933
pid:952646285
iyr:2019 hcl:#fffffd ecl:gry
pid:455008820 byr:1982 eyr:2030 ecl:gry iyr:2020 cid:103 hcl:#733820 hgt:184cm
hcl:#733820 iyr:2020 hgt:182cm ecl:grn
cid:226 pid:081011361 eyr:2022 byr:1995
iyr:1999
hcl:#18171d pid:9252198900
ecl:amb byr:1999 hgt:175cm eyr:2021
iyr:2020 hgt:165cm
ecl:blu
eyr:2023 pid:760213482
byr:1968
hcl:#c0946f
pid:242381670 ecl:amb
hgt:172cm byr:1980 eyr:2020 iyr:2014 hcl:#866857
byr:2021 pid:#a94a22 hcl:#cfa07d iyr:1969 eyr:2030 ecl:zzz
hgt:76cm
ecl:oth cid:168
byr:1954 pid:079481919 eyr:2025 hcl:#c0946f hgt:172cm
hgt:171cm
eyr:2030
byr:1969 cid:170
pid:164128658 ecl:amb
hcl:#c2265e iyr:2019
byr:1983
cid:163
eyr:2020 pid:232659795 iyr:2013 hcl:#888785 hgt:162cm
ecl:blu
ecl:gry hcl:#7d3b0c
pid:001171231 eyr:2020
byr:1935 hgt:160cm
iyr:2011
iyr:2012 hcl:#a97842
eyr:2029 pid:809880438 hgt:164cm cid:83 byr:1961 ecl:hzl
cid:288 eyr:2027
hgt:181cm byr:1955
iyr:2020
ecl:oth pid:754135833 hcl:#c0946f
iyr:2012 pid:053980893
cid:54 byr:1961 ecl:gry hcl:#602927 eyr:2020 hgt:167cm
iyr:2013
eyr:2025
hgt:176cm pid:169006156 cid:270 ecl:oth byr:2001
cid:244 pid:914067457
iyr:2017 byr:1926 hcl:#733820 ecl:brn hgt:187cm
eyr:2030
ecl:oth byr:1942
hgt:176cm iyr:2020 eyr:2027
hcl:#efcc98
pid:688816242
hgt:177cm hcl:#efcc98 eyr:2030 pid:888703414
iyr:2010 byr:1973 ecl:gry
cid:257 eyr:2030
ecl:brn
pid:359774824
byr:1988 hcl:#6b5442 iyr:2013 hgt:187cm
iyr:2011 hgt:173cm cid:290 byr:2000 ecl:gry
hcl:#7d3b0c
pid:743371399 eyr:2029
cid:162
eyr:1920 byr:2010 pid:#69d6ba hgt:74 hcl:z ecl:#d256f3 iyr:1933
pid:435518624 byr:1938 eyr:2027 iyr:2016 hcl:#18171d
hgt:161cm
ecl:gry
ecl:gry eyr:2027 hcl:#7d3b0c hgt:170cm
pid:928345976 iyr:2020
hcl:#5f4023 ecl:blu
pid:024527693
eyr:1932 iyr:2023 hgt:154cm byr:1948
cid:284 iyr:2011 byr:1920 eyr:2024 ecl:blu hgt:153cm
hcl:#602927 pid:005741906
iyr:2029 hgt:108 byr:2029 hcl:c8b25d
pid:522512400 eyr:2038 ecl:zzz cid:163
pid:371295649
eyr:2022 ecl:hzl
iyr:2019 hgt:153cm byr:1961
hcl:z
eyr:2027 iyr:2020 pid:619653661 byr:1968 hcl:#b6652a cid:62 ecl:hzl
hgt:186cm
iyr:1931
pid:565552342 ecl:#af97bb hcl:c92cd6 eyr:1931 byr:2025 hgt:184in
hgt:187cm
ecl:grn
byr:1954 cid:145
iyr:2016
hcl:#efcc98 eyr:2030 pid:202254357
cid:177
iyr:2013 byr:1926 hcl:#efcc98
pid:298693475 hgt:181cm eyr:2023 ecl:dne
byr:2014
cid:255
iyr:1951 hgt:72in
hcl:#efcc98 eyr:2039 pid:135688013
ecl:grn
byr:2019 eyr:1971 pid:#a95cb4
hcl:#ceb3a1 ecl:#6f919c
hgt:193cm iyr:2012
pid:497726268
ecl:grn
eyr:2025 hcl:#efcc98 iyr:2019 hgt:170cm byr:1970
byr:1939 hcl:#18171d cid:250
iyr:2011 ecl:blu pid:216607711
hgt:158cm eyr:2029
byr:1937
eyr:1931
hcl:#5ee898
pid:#876b1a hgt:190cm
cid:277 ecl:#5f0f80 iyr:2013
ecl:oth hgt:191cm eyr:2025 byr:1978 pid:271136754 hcl:#888785
iyr:2012
hcl:#6b5442
iyr:2015 byr:1958 pid:510020331 hgt:158cm eyr:2024 ecl:blu
byr:1998 cid:142 eyr:2026 iyr:2015 hcl:#733820
pid:671943334 hgt:186cm ecl:oth
eyr:2025 ecl:brn hcl:#7d3b0c pid:000803215
byr:1947
iyr:2017 hgt:168cm cid:230
pid:612432109 hgt:186cm byr:1963 ecl:hzl iyr:2019 eyr:2027
hcl:#efcc98
cid:148
hcl:#c0946f pid:846986027 eyr:2025 byr:1941
cid:154 hgt:158cm iyr:2012
ecl:brn
ecl:gry hgt:186cm
iyr:2015 hcl:#602927 byr:1923 eyr:2023
pid:48544569
pid:857428120 hgt:158cm hcl:#e4a267 iyr:2014 eyr:2020 byr:1975 ecl:blu
ecl:blu pid:559783197 byr:1935 cid:119 iyr:2017 hgt:157cm hcl:#6b5442 eyr:2020
ecl:oth pid:724332293 hcl:#602927
cid:77 iyr:2019
byr:2001 hgt:192cm eyr:2024
ecl:hzl eyr:2031
hcl:#efcc98 byr:2011 cid:280 iyr:2017
pid:377875085
hgt:172cm
byr:1947 hgt:174cm ecl:amb iyr:2018 cid:94 hcl:#a97842 eyr:2026 pid:286225332
hgt:85 ecl:xry eyr:2033 iyr:1952 pid:92902290
hcl:a6f86d
byr:2013
byr:1935 hcl:#c0946f pid:368741489 ecl:blu
eyr:2020 hgt:164cm
iyr:2018
cid:196
pid:718568707
ecl:oth byr:2003 hcl:#a97842 iyr:2010 hgt:168cm eyr:2025 cid:261
hcl:#6b5442
pid:675429853
hgt:62in ecl:grn iyr:2016
eyr:2027 byr:1932
byr:1978
pid:080846464 hcl:#ceb3a1 ecl:gry iyr:2015 hgt:190cm eyr:2029
pid:1756319674
iyr:2010 byr:1998 hcl:#866857 cid:259
eyr:2025 hgt:73in ecl:hzl
eyr:2035
hcl:z hgt:61cm
pid:3267812127
cid:230
byr:2029 iyr:2028 ecl:lzr
hgt:161cm ecl:hzl byr:1934 iyr:2011 eyr:2025 hcl:#cfa07d pid:354474868
pid:727482965
hcl:#623a2f iyr:2010 hgt:156cm eyr:2020 cid:68 ecl:grn byr:1950
pid:040800697 hgt:186cm
hcl:#341e13 iyr:2030 ecl:hzl
byr:1937 eyr:2020
iyr:2013 byr:1928 pid:752644096 eyr:2030 hgt:191cm ecl:hzl
cid:93 hcl:#a97842
pid:022267155 hcl:#cfa07d eyr:2026
ecl:hzl
hgt:187cm iyr:2014 cid:347
hgt:73in
eyr:2021 pid:054367702 ecl:amb hcl:#18171d byr:1965
iyr:2020 cid:267
eyr:2022
cid:140 pid:189859171 byr:1984 iyr:2020 ecl:brn hgt:166cm hcl:#623a2f
byr:1971 iyr:2015
hgt:168cm
eyr:2020 pid:650970816 hcl:#341e13
ecl:grn
cid:168
hcl:#c0946f byr:1948 hgt:189cm
pid:868785851
cid:194 ecl:amb eyr:2024 iyr:2011
eyr:2040
byr:2030 hcl:afde59
hgt:172cm pid:72468598 iyr:1990 cid:165 ecl:#896a8e
iyr:2009 hcl:#6b5442
eyr:2028
cid:53 ecl:hzl
hgt:165cm byr:1999 pid:844037301
cid:281 eyr:2022
iyr:2020 byr:1976 hgt:176cm hcl:#6b5442 ecl:amb pid:755280305
hgt:154cm iyr:2013
pid:059284139 byr:1992
cid:215 ecl:blu eyr:2025 hcl:#b6652a
ecl:grn
cid:308
hgt:187cm pid:009080324 eyr:2027
iyr:2012 byr:1955
pid:083241291 hcl:#7c1810 eyr:2030 iyr:2019 byr:1950 ecl:brn hgt:72in
cid:148 byr:1953 hcl:#623a2f
pid:076848285 hgt:175cm iyr:2017
eyr:2022
ecl:oth
iyr:2020
hgt:160cm
eyr:2028 cid:312 ecl:brn hcl:#888785 pid:681067688 byr:1986
iyr:1972 cid:170 eyr:2023
pid:21811501 ecl:#17c6e8
hgt:158in byr:2015 hcl:5b7956
pid:720571739 cid:304 byr:1951 hgt:191cm
eyr:2025 hcl:#341e13
iyr:2011
eyr:2020 ecl:blu hcl:#cfa07d pid:097863725
hgt:150cm
byr:1951
cid:143 iyr:2013
eyr:2027 iyr:2019 ecl:#a0eeca hcl:#c0946f pid:724783488 byr:1943 cid:282 hgt:124
byr:2012
iyr:2013 eyr:2036 hcl:z hgt:97
pid:#677847 ecl:dne
pid:341708492 hgt:190cm
byr:1988 hcl:#888785
ecl:hzl
iyr:2015 eyr:2029
iyr:2020 byr:1968
ecl:gry
eyr:2030 hcl:#1976b0
cid:127 pid:701862616
hgt:161cm"""
inputs = [i for i in input.split("\n\n")]
"""one"""
requirements = ['ecl', 'pid', 'eyr', 'hcl', 'byr', 'iyr', 'hgt']
total = len(inputs)
valid = [] #tacked on for part 2
for i in inputs:
for r in requirements:
if r not in i:
total -= 1
break
elif r == requirements[-1]: #tacked on for part 2
valid.append(i)
print(total)
"""two"""
eye_colours = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
valid_count = 0
def is_valid(entry):
e_dict = {}
for e in entry:
e = e.split(':')
e_dict[e[0]] = e[1]
if all(keys in e_dict for keys in requirements):
if int(e_dict['byr']) < 1920 or int(e_dict['byr']) > 2002:
return False
if int(e_dict['iyr']) < 2010 or int(e_dict['iyr']) > 2020:
return False
if int(e_dict['eyr']) < 2020 or int(e_dict['eyr']) > 2030:
return False
if 'cm' in e_dict["hgt"]:
hgt = int(e_dict["hgt"].replace('cm', ''))
if hgt < 150 or hgt > 193:
return False
elif 'in' in e_dict["hgt"]:
hgt = int(e_dict["hgt"].replace('in', ''))
if hgt < 59 or hgt > 76:
return False
else:
return False
if not re.match('#[0-9a-f]{6}$', e_dict["hcl"]):
return False
if e_dict["ecl"] not in eye_colours:
return False
if not re.match('[0-9]{9}$', e_dict["pid"]):
return False
return True
return False
for v in valid:
v = v.replace(' ', '\n')
v = v.split('\n')
if is_valid(v):
valid_count += 1
print(valid_count)
|
StarcoderdataPython
|
173479
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_cointegration_detection [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_cointegration_detection&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from arpym.estimation import cointegration_fp, fit_var1
from arpym.tools import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-parameters)
t_in = 1260 # length of the in-sample time series (days)
t_ = 2268 # length of the complete series (in and out-of-sample) (days)
u = 0.35 # coefficient of linear combination
l_select = 3 # selected eigenvector
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step00): Load data
# +
tau = np.array([1, 2, 3, 5, 7, 10])
path = '../../../databases/global-databases/fixed-income/db_yields'
x = pd.read_csv(path + '/data.csv', header=0, index_col=0)
x = x[tau.astype(float).astype(str)].tail(t_).values
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step01): Select the in-sample and out-of-sample series
# +
x_in = x[:t_in, :] # in-sample series
x_out = x[t_in:, :] # out-of-sample series
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step02): Cointegrated eigenvectors
# +
c_hat, _ = cointegration_fp(x_in)
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step03): In sample and out-of-sample cointegrated series
# +
# store cointegrated vectors
c_hat_sel = np.zeros((c_hat.shape[0], 3))
c_hat_sel[:, 0] = c_hat[:, l_select+1]
c_hat_sel[:, 1] = c_hat[:, l_select]
c_hat_sel[:, 2] = (1 - u) * c_hat[:, l_select + 1] + u * \
c_hat[:, l_select]
# in-sample cointegrated series (basis points)
y_in = x_in @ c_hat_sel * 10000
# out-of-sample cointegrated series (basis points)
y_out = x_out @ c_hat_sel * 10000
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step04): AR(1) long term parameters
# +
exp_infty = np.zeros(3)
sd_infty = np.zeros(3)
tau_halflife = np.zeros(3)
for k in range(3):
# AR1 fit
b_hat, mu_hat_epsi, sig2_hat_epsi = fit_var1(y_in[:, [k]])
# long-run expectation
exp_infty[k] = mu_hat_epsi / (1 - b_hat)
# long-run standard deviation
sd_infty[k] = np.sqrt(sig2_hat_epsi / (1 - b_hat ** 2))
# half life
tau_halflife[k] = -np.log(2) / np.log(abs(b_hat))
# -
# ## Plots
# +
plt.style.use('arpm')
for k in range(3):
fig = plt.figure()
min_y = min(min(y_in[:, k]), min(y_out[:, k]))
max_y = max(max(y_in[:, k]), max(y_out[:, k]))
t = np.arange(t_)/252
plt.axis([0, t[-1], min_y, max_y])
plt.xlabel('time (years)')
plt.ylabel('basis points')
plt.xticks()
plt.yticks()
insample = plt.plot(t[:t_in], y_in[:, k], color='k', linewidth=1)
outofsample = plt.plot(t[t_in:], y_out[:, k], color='b', linewidth=1)
expect = plt.plot(t, np.tile(exp_infty[k], t_), color='g')
up_sd = plt.plot(t, np.tile(exp_infty[k] + 2 * sd_infty[k], t_),
color='r')
plt.plot(t, np.tile(exp_infty[k] - 2 * sd_infty[k], t_),
color='r')
plt.legend(handles=[insample[0], expect[0], up_sd[0], outofsample[0]],
labels=['In-Sample', 'In-Sample Mean',
'+/- 2 In-Sample St. Dev', 'Out-of-Sample'], loc=2)
if k == 0:
plt.title(('Series = {index}-th Eigvect. In-Sample Mean-Reversion ' +
'Half-Life = ' +
' {halflife:.0f} days.').format(index=l_select,
halflife=tau_halflife[k]))
elif k == 1:
plt.title(('Series = {index}-th Eigvect. In-Sample Mean-Reversion ' +
'Half-Life = ' +
' {halflife:.0f} days.').format(index=l_select+1,
halflife=tau_halflife[k]))
else:
plt.title(('Series = {a:1.2f} x {index}-th Eigvect. + ' +
'{a2:1.2f} x {index2}-th Eigvect.' +
'\nIn-Sample Mean-Reversion Half-Life ' +
'= {halflife:.0f} days.').format(a=np.sqrt(1-u**2),
index=l_select,
a2=u**2,
index2=l_select+1,
halflife=tau_halflife[k]))
add_logo(fig)
plt.tight_layout()
|
StarcoderdataPython
|
3269976
|
""" do book related things with other users """
from django.apps import apps
from django.db import models, IntegrityError, transaction
from django.db.models import Q
from bookwyrm.settings import DOMAIN
from .base_model import BookWyrmModel
from . import fields
from .relationship import UserBlocks
class Group(BookWyrmModel):
"""A group of users"""
name = fields.CharField(max_length=100)
user = fields.ForeignKey("User", on_delete=models.CASCADE)
description = fields.TextField(blank=True, null=True)
privacy = fields.PrivacyField()
def get_remote_id(self):
"""don't want the user to be in there in this case"""
return f"https://{DOMAIN}/group/{self.id}"
@classmethod
def followers_filter(cls, queryset, viewer):
"""Override filter for "followers" privacy level to allow non-following
group members to see the existence of group-curated lists"""
return queryset.exclude(
~Q( # user is not a group member
Q(user__followers=viewer) | Q(user=viewer) | Q(memberships__user=viewer)
),
privacy="followers", # and the status of the group is followers only
)
@classmethod
def direct_filter(cls, queryset, viewer):
"""Override filter for "direct" privacy level to allow group members
to see the existence of groups and group lists"""
return queryset.exclude(~Q(memberships__user=viewer), privacy="direct")
class GroupMember(models.Model):
"""Users who are members of a group"""
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
group = models.ForeignKey(
"Group", on_delete=models.CASCADE, related_name="memberships"
)
user = models.ForeignKey(
"User", on_delete=models.CASCADE, related_name="memberships"
)
class Meta:
"""Users can only have one membership per group"""
constraints = [
models.UniqueConstraint(fields=["group", "user"], name="unique_membership")
]
def save(self, *args, **kwargs):
"""don't let a user invite someone who blocked them"""
# blocking in either direction is a no-go
if UserBlocks.objects.filter(
Q(
user_subject=self.group.user,
user_object=self.user,
)
| Q(
user_subject=self.user,
user_object=self.group.user,
)
).exists():
raise IntegrityError()
# accepts and requests are handled by the GroupMemberInvitation model
super().save(*args, **kwargs)
@classmethod
def from_request(cls, join_request):
"""converts a join request into a member relationship"""
# remove the invite
join_request.delete()
# make a group member
return cls.objects.create(
user=join_request.user,
group=join_request.group,
)
@classmethod
def remove(cls, owner, user):
"""remove a user from a group"""
memberships = cls.objects.filter(group__user=owner, user=user).all()
for member in memberships:
member.delete()
class GroupMemberInvitation(models.Model):
"""adding a user to a group requires manual confirmation"""
created_date = models.DateTimeField(auto_now_add=True)
group = models.ForeignKey(
"Group", on_delete=models.CASCADE, related_name="user_invitations"
)
user = models.ForeignKey(
"User", on_delete=models.CASCADE, related_name="group_invitations"
)
class Meta:
"""Users can only have one outstanding invitation per group"""
constraints = [
models.UniqueConstraint(fields=["group", "user"], name="unique_invitation")
]
def save(self, *args, **kwargs):
"""make sure the membership doesn't already exist"""
# if there's an invitation for a membership that already exists, accept it
# without changing the local database state
if GroupMember.objects.filter(user=self.user, group=self.group).exists():
self.accept()
return
# blocking in either direction is a no-go
if UserBlocks.objects.filter(
Q(
user_subject=self.group.user,
user_object=self.user,
)
| Q(
user_subject=self.user,
user_object=self.group.user,
)
).exists():
raise IntegrityError()
# make an invitation
super().save(*args, **kwargs)
# now send the invite
model = apps.get_model("bookwyrm.Notification", require_ready=True)
notification_type = "INVITE"
model.objects.create(
user=self.user,
related_user=self.group.user,
related_group=self.group,
notification_type=notification_type,
)
@transaction.atomic
def accept(self):
"""turn this request into the real deal"""
GroupMember.from_request(self)
model = apps.get_model("bookwyrm.Notification", require_ready=True)
# tell the group owner
model.objects.create(
user=self.group.user,
related_user=self.user,
related_group=self.group,
notification_type="ACCEPT",
)
# let the other members know about it
for membership in self.group.memberships.all():
member = membership.user
if member not in (self.user, self.group.user):
model.objects.create(
user=member,
related_user=self.user,
related_group=self.group,
notification_type="JOIN",
)
def reject(self):
"""generate a Reject for this membership request"""
self.delete()
|
StarcoderdataPython
|
197024
|
import numpy as np
from numpy import linalg
from gym import utils
import os
from gym.envs.mujoco import mujoco_env
import math
#from gym_reinmav.envs.mujoco import MujocoQuadEnv
# For testing whether a number is close to zero
_FLOAT_EPS = np.finfo(np.float64).eps
_EPS4 = _FLOAT_EPS * 4.0
class BallBouncingQuadEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
#xml_path = os.path.join(os.path.dirname(__file__), "./assets", 'half_cheetah.xml')
self.avg_rwd=-3.0 #obtained from eprewmean
self.gamma=0.99 #ppo2 default setting value
self.log_cnt=0
mujoco_env.MujocoEnv.__init__(self, 'ball_bouncing_quad.xml', 5)
utils.EzPickle.__init__(self)
def step(self, action):
mass=self.get_mass()
#print("mass=",mass[1])
#temp_thrust=
#action[0] += mass[1]*9.81 #gravity compensation, 0.4*9.81=3.92
#print("gamma=",self.gamma)
act_min=[3.5,-0.5,-0.7,-0.03]
act_max=[30,0.5,0.7,0.03]
# #action = np.clip(action, a_min=-np.inf, a_max=np.inf)
action = np.clip(action, a_min=act_min, a_max=act_max)
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
pos = ob[0:3]
#R = ob[3:12]
#lin_vel = ob[12:15]
#ang_vel= ob[15:18]
quat = ob[3:7]
lin_vel = ob[7:10]
ang_vel = ob[10:13]
#R=self.quat2mat(quat.transpose())
#rpy = self.RotToRPY(R)
#print("rpy(degrees) =",np.rad2deg(rpy))
reward_ctrl = - 0.1e-3 * np.sum(np.square(action))
reward_position = -linalg.norm(pos) * 1e-2
reward_linear_velocity = -linalg.norm(lin_vel) * 0.1e-3
reward_angular_velocity = -linalg.norm(ang_vel) * 0.1e-3
reward_alive = 1e-2
reward = reward_ctrl+reward_position+reward_linear_velocity+reward_angular_velocity+reward_alive
done= abs(pos[2]) >50 \
or abs(pos[0]) > 50.0 \
or abs(pos[1]) > 50.0
# print("status=",status)
print("pos=",pos)
info = {
'rwp': reward_position,
'rwlv': reward_linear_velocity,
'rwav': reward_angular_velocity,
'rwctrl': reward_ctrl,
'obx': pos[0],
'oby': pos[1],
'obz': pos[2],
'obvx': lin_vel[0],
'obvy': lin_vel[1],
'obvz': lin_vel[2],
}
# retOb= np.concatenate([
# pos,R.flat,lin_vel,ang_vel])
if done:
reward = self.avg_rwd / (1-self.gamma)*2#-13599.99
#print("terminated reward=",reward)
#return retOb, reward, done, info
if (self.log_cnt==1e4):
print("x={},y={},z={}\n".format(pos[0],pos[1],pos[2]))
print("thrust={}, dx={}, dy={}, dz={}".format(action[0],action[1],action[2],action[3]))
self.log_cnt=0
else: self.log_cnt=self.log_cnt+1
return ob, reward, done, info
def _get_obs(self):
# pos = self.sim.data.qpos*1e-1
# vel = self.sim.data.qvel*1e-2
pos = self.sim.data.qpos*1e-0
vel = self.sim.data.qvel*1e-0
return np.concatenate([pos.flat,vel.flat])
def reset_model(self):
# pos = self.np_random.uniform(size=3, low=-20, high=20)
# quat = self.np_random.uniform(size=4, low=-1, high=1)
# linVel = self.np_random.uniform(size=3, low=-2, high=2)
# angVel = self.np_random.uniform(size=3, low=-0.5, high=0.5)
# qpos = np.concatenate([pos,quat])
# qvel = np.concatenate([linVel,angVel])
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-0.1, high=0.1)
qvel = self.init_qvel + self.np_random.uniform(size=self.model.nv, low=-0.05, high=0.05)
#qpos[0:3] += self.np_random.uniform(low=-5, high=5, size=3)
#qpos = self.init_qpos
#qpos[0:3] = qpos[0:3]+self.np_random.uniform(size=3, low=-10, high=10)
#ob[3:12] = self.np_random.uniform(size=9, low=-1, high=1)
#qvel += self.np_random.uniform(size=6, low=-0.5, high=0.5)
#qvel[0:3] = self.np_random.uniform(size=3, low=-2, high=2)
#qvel[3:6] = self.np_random.uniform(size=3, low=-0.5, high=0.5)
self.set_state(qpos, qvel)
observation = self._get_obs();
return observation
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent * 4
def get_mass(self):
mass = np.expand_dims(self.model.body_mass, axis=1)
return mass
#stealed from rotations.py
def quat2mat(self,quat):
""" Convert Quaternion to Rotation matrix. See rotation.py for notes """
quat = np.asarray(quat, dtype=np.float64)
assert quat.shape[-1] == 4, "Invalid shape quat {}".format(quat)
w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]
Nq = np.sum(quat * quat, axis=-1)
s = 2.0 / Nq
X, Y, Z = x * s, y * s, z * s
wX, wY, wZ = w * X, w * Y, w * Z
xX, xY, xZ = x * X, x * Y, x * Z
yY, yZ, zZ = y * Y, y * Z, z * Z
mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 0, 0] = 1.0 - (yY + zZ)
mat[..., 0, 1] = xY - wZ
mat[..., 0, 2] = xZ + wY
mat[..., 1, 0] = xY + wZ
mat[..., 1, 1] = 1.0 - (xX + zZ)
mat[..., 1, 2] = yZ - wX
mat[..., 2, 0] = xZ - wY
mat[..., 2, 1] = yZ + wX
mat[..., 2, 2] = 1.0 - (xX + yY)
return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))
def RotToRPY(self,R):
R=R.reshape(3,3) #to remove the last dimension i.e., 3,3,1
phi = math.asin(R[1,2])
psi = math.atan2(-R[1,0]/math.cos(phi),R[1,1]/math.cos(phi))
theta = math.atan2(-R[0,2]/math.cos(phi),R[2,2]/math.cos(phi))
return phi,theta,psi
# def __init__(self, xml_name="quadrotor_quat.xml"):
# super(MujocoQuadQuaternionEnv, self).__init__(xml_name=xml_name)
# def step(self, action):
# goal_pos = np.array([0.0, 0.0, 1.0])
# alive_bonus = 1e1
# xposbefore = self.sim.data.qpos[0]
# self.do_simulation(action, self.frame_skip)
# xposafter = self.sim.data.qpos[0]
# ob = self._get_obs()
# pos = ob[0:3]
# quat = ob[3:7]
# lin_vel = ob[7:10]
# ang_vel= ob[10:13]
# lin_acc = ob[13:16]
# ang_acc = ob[16:19]
# #print("step a=",a)
# #reward_position = -linalg.norm(pos-goal_pos) * 0.2e-1
# reward_position = -linalg.norm(pos) * 0.2e-1
# reward_linear_velocity = -linalg.norm(lin_vel) * 1e-3
# reward_angular_velocity = -linalg.norm(ang_vel) * 1e-1
# reward_action = -linalg.norm(action)+np.sum(action)*1e-1
# reward_alive = alive_bonus
# # reward = reward_position \
# # + reward_linear_velocity \
# # + reward_angular_velocity \
# # + reward_action \
# # + reward_alive
# reward_ctrl = - 0.1 * np.square(action).sum()
# reward_run = (xposafter - xposbefore)/self.dt
# #print("r_ctrl=",reward_ctrl)
# #print("r_run=",reward_run)
# reward = reward_ctrl + reward_run
# # notdone = np.isfinite(ob).all() \
# # and pos[2] > 0.3 \
# # and abs(pos[0]) < 2.0 \
# # and abs(pos[1]) < 2.0
# notdone = np.isfinite(ob).all() \
# and abs(pos[0]) < 2.0 \
# and abs(pos[1]) < 2.0
# # info = {
# # 'rp': reward_position,
# # 'rlv': reward_linear_velocity,
# # 'rav': reward_angular_velocity,
# # 'ra': reward_action,
# # 'rlive': reward_alive,
# # }
# # info = {
# # 'rp': reward_position,
# # 'rlv': reward_linear_velocity,
# # 'rav': reward_ctrl,
# # 'ra': reward_action,
# # 'rlive': reward_run,
# # }
# info=dict(reward_run=reward_run, reward_ctrl=reward_ctrl)
# #if done=True indicates the episode has terminated and it's time to reset the environment. (For example, perhaps the pole tipped too far, or you lost your last life.) https://gym.openai.com/docs/
# #done = not notdone
# done = False
# return ob, reward, done, info
# def reset_model(self):
# #If reset, then we add some variations to the initial state that will be exploited for the next ep. The low and high bounds empirically set.
# qpos=self.init_qpos
# qvel=self.init_qvel
# qpos[0:3] +=self.np_random.uniform(size=3, low=-0.1, high=0.1)
# qvel[0:3] +=self.np_random.uniform(size=3, low=-0.01, high=0.01)
# self.set_state(qpos, qvel)
# return self._get_obs()
# def clip_action(self, action):
# """
# clip action to [0, inf]
# :param action:
# :return: clipped action
# """
# act_min=[0,-0.5,-0.5,-0.5]
# act_max=[7,0.5,0.5,0.5]
# #action = np.clip(action, a_min=-np.inf, a_max=np.inf)
# action = np.clip(action, a_min=act_min, a_max=act_max)
# return action
|
StarcoderdataPython
|
1689866
|
<reponame>PENGUINLIONG/tinyspv
from re import match
HEADERS = []
op_names = []
with open("./include/tinyspv/spirv/unified1/spirv.hpp") as f:
lines = [x.strip() for x in f.readlines()]
for line in lines:
if not line.startswith("//"):
break
HEADERS += [line]
for line in lines:
if "Alias" in line:
continue
m = match(r"Op([A-Za-z0-9]+) = [0-9]+,", line)
if m:
op_names += [m[1]]
with open("./src/tinyspv/spirv/unified1/opcode2str.cpp", "w") as f:
src = HEADERS + [
"#include \"tinyspv/spirv/unified1/opcode2str.hpp\"",
"namespace tinyspv {",
"const char* opcode2str(int opcode) {",
" switch (opcode) {",
]
src += [f" case Op::Op{x}: return \"{x}\";" for x in op_names]
src += [
" default: return nullptr;",
"}",
"}",
"} // namespace tinyspv",
"",
]
f.write('\n'.join(src))
with open("./include/tinyspv/spirv/unified1/opcode2str.hpp", "w") as f:
src = HEADERS + [
"#pragma once",
"#include \"tinyspv/spirv/unified1/spirv.hpp\"",
"namespace tinyspv {",
"extern const char* opcode2str(int opcode);",
"} // namespace tinyspv",
"",
]
f.write('\n'.join(src))
|
StarcoderdataPython
|
62290
|
# -*- coding: utf-8 -*-
import re
from mattermost_bot.bot import respond_to
@respond_to('(.*) added to the channel by (.*)', re.IGNORECASE)
def added_to_channel(message, myname, channel_admin):
message.reply('Hi, %s. I am %s. Glad to join this channel :) ' % (channel_admin, myname))
added_to_channel.__doc__ = "Response when added to a channel"
|
StarcoderdataPython
|
3318739
|
from securitytxt.parsers.textparsers.comment_line_parser import CommentLineParser
from securitytxt.parsers.textparsers.field_line_parser import FieldLineParser
from securitytxt.parsers.textparsers.signed_text_parser import SignedTextParser
from securitytxt.securitytxt import SecurityTXT
class FileParser:
"""Takes a security.txt file and parses it. Creating an object of the parser immediately parses the given file.
Attributes:
securitytxt: the resulting securitytxt after parsing.
Public methods:
None
Raises:
:raises AttributeError: if the format of the file is invalid.
"""
def __init__(self, text: str):
"""
Initialize SecurityTXT and run the parser to fill this object
:param text: The text to parse
:raises AttributeError: if the file does not has an incorrect format.
"""
self.securitytxt: SecurityTXT = SecurityTXT(raw=text)
self._parse(text)
def _parse(self, text) -> SecurityTXT:
"""
The function to parse the security.txt file. It follows the ABNF grammar specified in section 5 of the draft RFC
:param text: The text to parse
:return: The SecurityTXT object representing the file
:raises AttributeError: if the file does not has an incorrect format.
"""
# Grammar: body = signed / unsigned
text = self._normalize(text)
self._parse_signed(text) if SignedTextParser.is_signed_text(text) else self._parse_unsigned(text)
return self.securitytxt
def _normalize(self, text: str) -> str:
"""
Normalize the text for further processing. For example, replacing CRLF by LF
:param text: The text to normalize
:return: The normalized text
"""
return text.replace('\r\n', '\n')
def _parse_signed(self, signed_text: str) -> None:
"""
Parse a signed text and set the values in the securitytxt attribute
:param signed_text: the signed text to parse
:raises AttributeError: if the signed_text is not a valid signed file.
"""
signed_text_parser = SignedTextParser(signed_text)
self.securitytxt.signature = signed_text_parser.signature
self._parse_unsigned(signed_text_parser.unsigned_text)
def _parse_unsigned(self, unsigned_text: str) -> None:
"""
Parse an unsigned security.txt file
:param unsigned_text: the unsigned text to parse
"""
for line in unsigned_text.splitlines():
self._parse_line(line)
def _parse_line(self, line: str) -> None:
"""
Parse a line in a security.txt file
:param line: a line from the file
"""
if FieldLineParser.is_field(line):
self._parse_field(line)
elif CommentLineParser.is_comment(line):
self._parse_comment(line)
def _parse_field(self, line: str) -> None:
"""
Parse a field line
:param line: The line to parse
"""
try:
field = FieldLineParser(line)
self.securitytxt.add_field(field.key, field.value)
except AttributeError:
pass
def _parse_comment(self, line: str) -> None:
"""
Parse a comment
:param line: The comment
"""
comment = CommentLineParser(line)
self.securitytxt.comments.append(comment.comment)
|
StarcoderdataPython
|
3212393
|
from .BidirectionalLSTM import BidirectionalLSTM
|
StarcoderdataPython
|
3222810
|
"""Functions for evaluating the cifar10 CNN model."""
import tensorflow as tf
from cifar10_input import Cifar10Data
def evaluation(logits, labels):
"""
Evaluates image inference model.
:param logits: A tensor of shape [BATCH_SIZE, NUM_CLASS], float32, each row represents an image inference
:param labels: A tensor of shape [BATCH_SIZE], int32 or int64, the true id value of the image
:return: Accuracy of image inference model.
"""
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_mean(tf.cast(correct, tf.float32))
def mass_evaluation(cifar10_data_obj, sess, eval_op, loss_op, images_pl, labels_pl, keep_prob_pl, is_test_pl):
"""
Split massive testing images evaluation to 1000 images per-step due to the memory size limit of GPU
:param cifar10_data_obj: Object of Cifar10Data.
:param sess:
:param eval_op:
:param loss_op:
:param images_pl:
:param labels_pl:
:param keep_prob_pl:
:param is_test_pl:
:return: Scalars of accuracy and loss
"""
a = 0.0
c = 0.0
step = 0
has_next = True
cifar10_data_obj.init_testing_batch()
while has_next:
step = step + 1
has_next, testing_image, testing_label = cifar10_data_obj.next_testing_batch(100)
feed_dict = {images_pl: testing_image,
labels_pl: testing_label,
keep_prob_pl: 1.0,
is_test_pl: True}
a_temp, c_temp = sess.run([eval_op, loss_op], feed_dict=feed_dict)
a += a_temp
c += c_temp
a /= step
c /= step
return a, c
|
StarcoderdataPython
|
4835369
|
<reponame>sethmlarson/selectors2<filename>tests/support.py
import os
import signal
import socket
import threading
import time
__all__ = [
"get_time",
"resource",
"socketpair",
"AlarmMixin",
"TimerMixin"
]
# Tolerance values for timer/speed fluctuations.
TOLERANCE = 0.5
# Detect whether we're running on Travis or AppVeyor. This
# is used to skip some verification points inside of tests to
# not randomly fail our CI due to wild timer/speed differences.
TRAVIS_CI = "TRAVIS" in os.environ
APPVEYOR = "APPVEYOR" in os.environ
try: # Python 2.x doesn't define time.perf_counter.
from time import perf_counter as get_time
except ImportError:
from time import time as get_time
try: # Python 2.6 doesn't have the resource module.
import resource
except ImportError:
resource = None
if hasattr(socket, 'socketpair'):
# Since Python 3.5, socket.socketpair() is now also available on Windows
socketpair = socket.socketpair
else:
# Replacement for socket.socketpair()
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
"""A socket pair usable as a self-pipe, for Windows.
Origin: https://gist.github.com/4325783, by <NAME>.
Public domain.
"""
if family == socket.AF_INET:
host = '127.0.0.1'
elif family == socket.AF_INET6:
host = '::1'
else:
raise ValueError("Only AF_INET and AF_INET6 socket address "
"families are supported")
if type != socket.SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with setblocking(0)
# that prevents us from having to create a thread.
lsock = socket.socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen(1)
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket.socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (OSError, socket.error):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return ssock, csock
class AlarmThread(threading.Thread):
def __init__(self, timeout):
super(AlarmThread, self).__init__(group=None)
self.setDaemon(True)
self.timeout = timeout
self.canceled = False
def cancel(self):
self.canceled = True
def run(self):
time.sleep(self.timeout)
if not self.canceled:
os.kill(os.getpid(), signal.SIGALRM)
class AlarmMixin(object):
alarm_thread = None
def _begin_alarm_thread(self, timeout):
if not hasattr(signal, "SIGALRM"):
self.skipTest("Platform doesn't have signal.SIGALRM")
self.addCleanup(self._cancel_alarm_thread)
self.alarm_thread = AlarmThread(timeout)
self.alarm_thread.start()
def _cancel_alarm_thread(self):
if self.alarm_thread is not None:
self.alarm_thread.cancel()
self.alarm_thread.join(0.0)
self.alarm_thread = None
def set_alarm(self, duration, handler):
sigalrm_handler = signal.signal(signal.SIGALRM, handler)
self.addCleanup(signal.signal, signal.SIGALRM, sigalrm_handler)
self._begin_alarm_thread(duration)
class TimerContext(object):
def __init__(self, testcase, lower=None, upper=None):
self.testcase = testcase
self.lower = lower
self.upper = upper
self.start_time = None
self.end_time = None
def __enter__(self):
self.start_time = get_time()
def __exit__(self, *args, **kwargs):
self.end_time = get_time()
total_time = self.end_time - self.start_time
# Skip timing on CI due to flakiness.
if TRAVIS_CI or APPVEYOR:
return
if self.lower is not None:
self.testcase.assertGreaterEqual(total_time, self.lower * (1.0 - TOLERANCE))
if self.upper is not None:
self.testcase.assertLessEqual(total_time, self.upper * (1.0 + TOLERANCE))
class TimerMixin(object):
def assertTakesTime(self, lower=None, upper=None):
return TimerContext(self, lower=lower, upper=upper)
|
StarcoderdataPython
|
3290430
|
from c2d_clang2c_ast import *
from typing import List, Dict, Set
import copy
class UnaryExtractorNodeLister(NodeVisitor):
def __init__(self):
self.nodes: List[UnOp] = []
def visit_UnOp(self, node: UnOp):
if node.op in ["++", "--"]:
self.nodes.append(node)
return self.generic_visit(node)
def visit_ForStmt(self, node: ForStmt):
return self.generic_visit(node.body[0])
def visit_BasicBlock(self, node: BasicBlock):
return
class UnaryExtractor(NodeTransformer):
def __init__(self, count=0):
self.count = count
def visit_ForStmt(self, node: ForStmt):
return ForStmt(init=node.init,
body=self.generic_visit(node.body[0]),
cond=node.cond,
iter=node.iter)
def visit_UnOp(self, node: UnOp):
if node.op in ["++", "--"]:
self.count = self.count + 1
return DeclRefExpr(name="tmp_unop_result" + str(self.count - 1))
else:
return node
def visit_BasicBlock(self, node: BasicBlock):
newbody = []
for child in node.body:
lister = UnaryExtractorNodeLister()
lister.visit(child)
res = lister.nodes
post = []
tmp_count = self.count
if res is not None:
for i in range(0, len(res)):
if res[i] in node.body:
#print("SKIPPING!")
continue
tmp_name = "tmp_unop_result" + str(tmp_count)
newbody.append(
DeclStmt(vardecl=[
VarDecl(
name=tmp_name, type=Int(), init=res[i].lvalue)
]))
if res[i].postfix:
post.append(res[i])
else:
newbody.append(
BinOp(op="=",
lvalue=DeclRefExpr(name=tmp_name),
rvalue=res[i]))
tmp_count = tmp_count + 1
if isinstance(child, UnOp):
newbody.append(
UnOp(op=child.op, lvalue=self.visit(child.lvalue)))
else:
newbody.append(self.visit(child))
for i in post:
newbody.append(i)
return BasicBlock(body=newbody)
class IndicesExtractorNodeLister(NodeVisitor):
def __init__(self):
self.nodes: List[ArraySubscriptExpr] = []
def visit_ArraySubscriptExpr(self, node: ArraySubscriptExpr):
#if not isinstance(node.index,IntLiteral):
self.nodes.append(node)
return self.generic_visit(node)
def visit_BasicBlock(self, node: BasicBlock):
return
class IndicesExtractor(NodeTransformer):
def __init__(self, count=0):
self.count = count
def visit_ArraySubscriptExpr(self, node: ArraySubscriptExpr):
#if isinstance(node.index,IntLiteral):
# return node
if not hasattr(self, "count"):
self.count = 0
else:
self.count = self.count + 1
tmp = self.count
return ArraySubscriptExpr(
name=node.name,
indices=node.indices,
type=node.type,
unprocessed_name=self.visit(node.unprocessed_name),
index=DeclRefExpr(name="tmp_index_" + str(tmp - 1), type=Int()))
def visit_BasicBlock(self, node: BasicBlock):
newbody = []
for child in node.body:
# res = [node for node in Node.walk(child) if isinstance(node, ArraySubscriptExpr)]
lister = IndicesExtractorNodeLister()
lister.visit(child)
res = lister.nodes
temp = self.count
if res is not None:
for i in range(0, len(res)):
tmp_name = "tmp_index_" + str(temp)
temp = temp + 1
newbody.append(
DeclStmt(vardecl=[VarDecl(name=tmp_name, type=Int())]))
newbody.append(
BinOp(op="=",
lvalue=DeclRefExpr(name=tmp_name),
rvalue=res[i].index))
newbody.append(self.visit(child))
return BasicBlock(body=newbody)
class InitExtractorNodeLister(NodeVisitor):
def __init__(self):
self.nodes: List[VarDecl] = []
def visit_ForStmt(self, node: ForStmt):
return
def visit_VarDecl(self, node: VarDecl):
if hasattr(node, "init"):
self.nodes.append(node)
return self.generic_visit(node)
def visit_BasicBlock(self, node: BasicBlock):
return
class InsertMissingBasicBlocks(NodeTransformer):
def insert_missing_block(self, body):
assert isinstance(body, list)
if isinstance(body[0], BasicBlock):
return body
return [BasicBlock(body=body)]
def visit_ForStmt(self, node: ForStmt):
node.body = self.insert_missing_block(node.body)
return self.generic_visit(node)
def visit_DoStmt(self, node: DoStmt):
node.body = self.insert_missing_block(node.body)
return self.generic_visit(node)
def visit_WhileStmt(self, node: WhileStmt):
node.body = self.insert_missing_block(node.body)
return self.generic_visit(node)
def visit_IfStmt(self, node: IfStmt):
node.body_if = self.insert_missing_block(node.body_if)
if hasattr(node, "body_else"):
node.body_else = self.insert_missing_block(node.body_else)
return self.generic_visit(node)
class InitExtractor(NodeTransformer):
def __init__(self, count=0):
self.count = count
def visit_BasicBlock(self, node: BasicBlock):
newbody = []
for child in node.body:
# res = [node for node in Node.walk(child) if isinstance(node, ArraySubscriptExpr)]
lister = InitExtractorNodeLister()
lister.visit(child)
res = lister.nodes
temp = self.count
newbody.append(self.visit(child))
if res is not None:
for i in range(0, len(res)):
#print(res[i].name)
newbody.append(
BinOp(op="=",
lvalue=DeclRefExpr(name=res[i].name),
rvalue=res[i].init))
return BasicBlock(body=newbody)
class CallExtractorNodeLister(NodeVisitor):
def __init__(self):
self.nodes: List[CallExpr] = []
def visit_ForStmt(self, node: ForStmt):
return
def visit_CallExpr(self, node: CallExpr):
if node.name.name not in ["malloc", "expf", "powf", "sqrt", "cbrt"]:
self.nodes.append(node)
return self.generic_visit(node)
def visit_BasicBlock(self, node: BasicBlock):
return
class CallExtractor(NodeTransformer):
def __init__(self, count=0):
self.count = count
def visit_CallExpr(self, node: CallExpr):
#if isinstance(node.index,IntLiteral):
# return node
if not hasattr(self, "count"):
self.count = 0
else:
self.count = self.count + 1
tmp = self.count
if node.name.name in ["malloc", "expf", "powf", "sqrt", "cbrt"]:
return node
return DeclRefExpr(name="tmp_call_" + str(tmp - 1))
def visit_BasicBlock(self, node: BasicBlock):
newbody = []
for child in node.body:
# res = [node for node in Node.walk(child) if isinstance(node, ArraySubscriptExpr)]
lister = CallExtractorNodeLister()
lister.visit(child)
res = lister.nodes
for i in res:
if i == child:
res.pop(res.index(i))
temp = self.count
if res is not None:
for i in range(0, len(res)):
print("CALL:", res[i].name)
newbody.append(
DeclStmt(vardecl=[
VarDecl(name="tmp_call_" + str(temp),
type=res[i].type)
]))
newbody.append(
BinOp(op="=",
lvalue=DeclRefExpr(name="tmp_call_" + str(temp),
type=res[i].type),
rvalue=res[i]))
if isinstance(child, CallExpr):
new_args = []
for i in child.args:
new_args.append(self.visit(i))
new_child = CallExpr(type=child.type,
name=child.name,
args=new_args)
newbody.append(new_child)
else:
newbody.append(self.visit(child))
return BasicBlock(body=newbody)
class CondExtractorNodeLister(NodeVisitor):
def __init__(self):
self.nodes: List[Node] = []
def visit_ForStmt(self, node: ForStmt):
return
def visit_IfStmt(self, node: IfStmt):
self.nodes.append(node.cond[0])
def visit_BasicBlock(self, node: BasicBlock):
return
class CondExtractor(NodeTransformer):
def __init__(self, count=0):
self.count = count
def visit_IfStmt(self, node: IfStmt):
if not hasattr(self, "count"):
self.count = 0
else:
self.count = self.count + 1
tmp = self.count
cond = [
BinOp(op="!=",
lvalue=DeclRefExpr(name="tmp_if_" + str(tmp - 1)),
rvalue=IntLiteral(value="0"))
]
body_if = [self.visit(node.body_if[0])]
if hasattr(node, "body_else"):
body_else = [self.visit(node.body_else[0])]
return IfStmt(cond=cond, body_if=body_if, body_else=body_else)
else:
return IfStmt(cond=cond, body_if=body_if)
def visit_BasicBlock(self, node: BasicBlock):
newbody = []
for child in node.body:
lister = CondExtractorNodeLister()
lister.visit(child)
res = lister.nodes
temp = self.count
if res is not None:
for i in range(0, len(res)):
newbody.append(
DeclStmt(vardecl=[
VarDecl(name="tmp_if_" + str(temp), type=Int())
]))
newbody.append(
BinOp(op="=",
lvalue=DeclRefExpr(name="tmp_if_" + str(temp)),
rvalue=res[i]))
newbody.append(self.visit(child))
return BasicBlock(body=newbody)
class ForDeclarerNodeLister(NodeVisitor):
def __init__(self):
self.nodes: List[Node] = []
def visit_ForStmt(self, node: ForStmt):
if isinstance(node.init[0], BinOp):# for(int i=0;) for (i=0;)
self.nodes.append(node.init[0])
def visit_BasicBlock(self, node: BasicBlock):
return
class ForDeclarer(NodeTransformer):
def __init__(self):
self.count = 0
self.name_mapping = {}
def visit_BasicBlock(self, node: BasicBlock):
# make sure name mapping gets reverted properly when exiting contexts
prev = self.name_mapping.copy()
newbody = []
for child in node.body:
lister = ForDeclarerNodeLister()
lister.visit(child)
res = lister.nodes
if res is not None:
for i in range(0, len(res)):
#print("FOREXTRABINOP")
newbody.append(res[i])
newbody.append(self.visit(child))
self.name_mapping = prev
return BasicBlock(body=newbody)
def visit_DeclRefExpr(self, node: DeclRefExpr):
if self.name_mapping.get(node.name) is None:
return node
else:
return DeclRefExpr(name=self.name_mapping[node.name])
def visit_ForStmt(self, node: ForStmt):
if isinstance(node.body, list):
node.body = node.body[0]
if isinstance(node.init[0], BinOp):
self.count = self.count + 1
assert isinstance(
node.init[0].lvalue,
DeclRefExpr), "expecting lvalue of binop to be a declRefExpr"
self.name_mapping[node.init[0].lvalue.name] = "tmp_for_" + str(
self.count)
return ForStmt(init=[
DeclStmt(vardecl=[
VarDecl(name="tmp_for_" + str(self.count),
type=Int(),
init=node.init[0].rvalue)
])
],
cond=[self.generic_visit(node.cond[0])],
body=[self.generic_visit(node.body)],
iter=[self.generic_visit(node.iter[0])])
elif isinstance(node.init[0], DeclStmt):
return self.generic_visit(node)
class UnaryToBinary(NodeTransformer):
def visit_UnOp(self, node: UnOp):
if node.op == "++":
return BinOp(op="=",
lvalue=node.lvalue,
rvalue=BinOp(op="+",
lvalue=node.lvalue,
rvalue=IntLiteral(value="1")))
elif node.op == "--":
return BinOp(op="=",
lvalue=node.lvalue,
rvalue=BinOp(op="-",
lvalue=node.lvalue,
rvalue=IntLiteral(value="1")))
else:
return self.generic_visit(node)
class CompoundToBinary(NodeTransformer):
def visit_CompoundAssignOp(self, node: CompoundAssignOp):
newop = (node.op).replace("=", "")
return BinOp(op="=",
lvalue=node.lvalue,
rvalue=BinOp(op=newop,
lvalue=node.lvalue,
rvalue=node.rvalue))
class UnaryReferenceAndPointerRemover(NodeTransformer):
def visit_UnOp(self, node: UnOp):
if node.op == "*" or node.op == "&":
return self.generic_visit(node.lvalue)
else:
return self.generic_visit(node)
class FindOutputNodesVisitor(NodeVisitor):
def __init__(self):
self.nodes: List[DeclRefExpr] = []
def visit_BinOp(self, node: BinOp):
if node.op == "=":
if isinstance(node.lvalue, DeclRefExpr):
self.nodes.append(node.lvalue)
if isinstance(node.lvalue, UnOp):
if node.lvalue.op == "*":
if isinstance(node.lvalue.lvalue, DeclRefExpr):
self.nodes.append(node.lvalue.lvalue)
if isinstance(node.lvalue.lvalue, ArraySubscriptExpr):
tmp = node.lvalue.lvalue
while isinstance(tmp, ArraySubscriptExpr):
tmp = tmp.unprocessed_name
if isinstance(tmp, DeclRefExpr):
self.nodes.append(tmp)
if isinstance(node.lvalue, ArraySubscriptExpr):
tmp = node.lvalue
while isinstance(tmp, ArraySubscriptExpr):
tmp = tmp.unprocessed_name
if isinstance(tmp, DeclRefExpr):
self.nodes.append(tmp)
self.visit(node.rvalue)
#def visit_TernaryExpr(self, node: TernaryExpr):
# used_vars_condition = [node for node in walk(node.cond) if isinstance(node, DeclRefExpr)]
# used_vars_left = [node for node in walk(node.left) if isinstance(node, DeclRefExpr)]
# used_vars_right = [node for node in walk(node.right) if isinstance(node, DeclRefExpr)]
# self.nodes = self.nodes + used_vars_condition
class FindInputNodesVisitor(NodeVisitor):
def __init__(self):
self.nodes: List[DeclRefExpr] = []
def visit_DeclRefExpr(self, node: DeclRefExpr):
self.nodes.append(node)
def visit_BinOp(self, node: BinOp):
if node.op == "=":
if isinstance(node.lvalue, DeclRefExpr):
pass
if isinstance(node.lvalue, ArraySubscriptExpr):
tmp = node.lvalue
while isinstance(tmp, ArraySubscriptExpr):
self.visit(tmp.index)
tmp = tmp.unprocessed_name
if isinstance(tmp, DeclRefExpr):
pass
else:
self.visit(node.lvalue)
self.visit(node.rvalue)
class FunctionLister(NodeVisitor):
def __init__(self) -> None:
self.function_names: Set[str] = set()
self.defined_function_names: Set[str] = set()
self.undefined_function_names: Set[str] = set()
def visit_AST(self, node: AST):
self.generic_visit(node)
self.undefined_function_names = self.function_names.difference(
self.defined_function_names)
def visit_FuncDecl(self, node: FuncDecl):
self.function_names.add(node.name)
if node.body is not None and node.body != []:
self.defined_function_names.add(node.name)
def is_defined(self, function_name: str) -> bool:
return function_name in self.defined_function_names
def is_declared(self, function_name: str) -> bool:
return function_name in self.function_names
class MoveReturnValueToArguments(NodeTransformer):
"""
expects: no class method calls
"""
def visit_AST(self, node: AST) -> None:
self.function_lister = FunctionLister()
self.function_lister.visit(node)
return self.generic_visit(node)
def visit_FuncDecl(self, node: FuncDecl):
if self.function_lister.is_defined(node.name):
if not isinstance(node.result_type, Void):
#node.args.append(ParmDecl(name = "c2d_retval", type = Pointer(pointee_type = node.result_type), lineno = node.lineno))
node.args.append(
ParmDecl(name="c2d_retval",
type=node.result_type,
lineno=node.lineno))
node.result_type = Void()
return self.generic_visit(node)
def visit_CallExpr(self, node: CallExpr):
if self.function_lister.is_defined(node.name.name):
if not isinstance(node.type, Void):
node.args.append(
DeclRefExpr(name="NULL",
type=Pointer(pointee_type=Void())))
node.type = Void()
return self.generic_visit(node)
def visit_BinOp(self, node: BinOp):
if isinstance(node.rvalue, CallExpr):
if self.function_lister.is_defined(node.rvalue.name.name):
#reference = UnOp(lvalue = node.lvalue, op = "&", postfix = False, type = Pointer(pointee_type = node.lvalue.type))
reference = node.lvalue
node.rvalue.args.append(reference)
return self.generic_visit(node.rvalue)
return self.generic_visit(node)
def visit_RetStmt(self, node: RetStmt):
if hasattr(node, "ret_expr"):
return_type = node.ret_expr.type
#left = UnOp(op = "*", postfix = False, type = Pointer(pointee_type = return_type),
# lvalue = DeclRefExpr(name = "c2d_retval", type = return_type))
# TODO: implement using pointers
left = DeclRefExpr(name="c2d_retval", type=return_type)
assignment = BinOp(op="=",
lvalue=left,
rvalue=node.ret_expr,
type=return_type)
return [assignment, RetStmt()]
return self.generic_visit(node)
class FlattenStructs(NodeTransformer):
def __init__(self) -> None:
self.structdefs: Dict[str, StructDecl] = {}
def struct_is_defined(self, struct_name):
return struct_name in self.structdefs.keys()
def visit_AST(self, node: AST):
self.structdefs = {sd.name: sd for sd in node.structdefs}
return self.generic_visit(node)
def visit_StructDecl(self, node: StructDecl):
replacement_fields = []
for field in node.fields:
if field.type.is_struct_like():
nested_struct_name = field.type.get_chain_end().name
if self.struct_is_defined(nested_struct_name):
nested_struct_fields = self.structdefs[
nested_struct_name].fields
for nested_field in nested_struct_fields:
replacement_fields.append(
FieldDecl(name=field.name + "_" +
nested_field.name,
type=field.type.inject_type(
nested_field.type)))
else:
replacement_fields.append(field)
else:
replacement_fields.append(field)
return StructDecl(name=node.name, fields=replacement_fields)
class ReplaceStructDeclStatements(NodeTransformer):
def __init__(self):
self.structdefs: Dict[str, StructDecl] = {}
def struct_is_defined(self, struct_name):
return struct_name in self.structdefs.keys()
def get_struct(self, struct_name: str):
return self.structdefs.get(struct_name)
def get_field_replacement_name(self, struct_type_name: str,
struct_variable_name: str, field_name: str):
return "c2d_struct_" + struct_type_name + "_" + struct_variable_name + "_" + field_name
def split_struct_type(self, struct_like_type,
var_name) -> Dict[str, Tuple[str, Type]]:
if isinstance(struct_like_type, Struct):
if not self.struct_is_defined(struct_like_type.name):
return None
defined_struct = self.get_struct(struct_like_type.name)
return {
field.name:
(self.get_field_replacement_name(defined_struct.name, var_name,
field.name), field.type)
for field in defined_struct.fields
}
if isinstance(struct_like_type, ConstantArray):
splits = self.split_struct_type(struct_like_type.element_type,
var_name)
if splits is not None:
return {
field_name: (split_name + "_arr",
ConstantArray(size=struct_like_type.size,
element_type=split_type))
for (field_name, (split_name,
split_type)) in splits.items()
}
else:
return None
if isinstance(struct_like_type, Pointer):
splits = self.split_struct_type(struct_like_type.pointee_type,
var_name)
if splits is not None:
return {
field_name:
(split_name + "_ptr", Pointer(pointee_type=split_type))
for (field_name, (split_name,
split_type)) in splits.items()
}
else:
return None
raise Exception("split_struct_type expects struct like type")
def replace_container_expr(self, container_expr: Expression,
desired_field: str):
if isinstance(container_expr, DeclRefExpr):
replacement_name, replacement_type = self.split_struct_type(
container_expr.type, container_expr.name)[desired_field]
return DeclRefExpr(name=replacement_name, type=replacement_type)
if isinstance(container_expr, ArraySubscriptExpr):
replacement = copy.deepcopy(container_expr)
replacement.unprocessed_name = self.replace_container_expr(
container_expr.unprocessed_name, desired_field)
replacement.type = replacement.unprocessed_name.type.element_type
return replacement
if isinstance(container_expr, MemberRefExpr):
replacement = copy.deepcopy(container_expr)
replacement.name += "_" + desired_field
return self.visit(replacement)
if isinstance(container_expr, UnOp):
replacement = copy.deepcopy(container_expr)
replacement.lvalue = self.replace_container_expr(
container_expr.lvalue, desired_field)
replacement.type = container_expr.type.inject_type(
replacement.lvalue.type)
return replacement
raise Exception("cannot replace container expression: ",
container_expr)
def visit_AST(self, node: AST):
self.structdefs = {sd.name: sd for sd in node.structdefs}
return self.generic_visit(node)
def visit_DeclRefExpr(self, node: DeclRefExpr):
if hasattr(node, "type") and node.type.is_struct_like():
splits = self.split_struct_type(node.type, node.name)
if splits is not None:
return [
DeclRefExpr(name=n, type=t) for (n, t) in splits.values()
]
return self.generic_visit(node)
def visit_BinOp(self, node: BinOp):
if hasattr(node.lvalue, "type") and hasattr(node.rvalue, "type"):
if node.lvalue.type.is_struct_like():
if node.lvalue.type == node.rvalue.type:
struct = node.lvalue.type.get_chain_end()
if node.op == "=":
replacement_statements = []
fields = self.get_struct(struct.name).fields
if fields is not None:
for f in fields:
l_member_ref = MemberRefExpr(
name=f.name,
type=node.lvalue.type.inject_type(f.type),
containerexpr=node.lvalue)
r_member_ref = MemberRefExpr(
name=f.name,
type=node.rvalue.type.inject_type(f.type),
containerexpr=node.rvalue)
binop = BinOp(op="=",
type=f.type,
lvalue=l_member_ref,
rvalue=r_member_ref)
replacement_statements.append(binop)
return [
self.visit(s) for s in replacement_statements
]
return self.generic_visit(node)
def visit_VarDecl(self, node: VarDecl):
if node.type.is_struct_like():
splits = self.split_struct_type(node.type, node.name)
if splits is not None:
return [VarDecl(name=n, type=t) for (n, t) in splits.values()]
return self.generic_visit(node)
def visit_DeclStmt(self, node: DeclStmt):
replacement_stmts = []
for var_decl in node.vardecl:
replacement_var_decls = self.as_list(self.visit(var_decl))
replacement_stmts += [
DeclStmt(vardecl=[vd]) for vd in replacement_var_decls
]
return replacement_stmts
def visit_MemberRefExpr(self, node: MemberRefExpr):
if hasattr(node.containerexpr,
"type") and node.containerexpr.type.is_struct_like():
struct_like_type = node.containerexpr.type.get_chain_end()
if self.struct_is_defined(struct_like_type.name):
return self.replace_container_expr(node.containerexpr,
node.name)
return self.generic_visit(node)
def visit_ParmDecl(self, node: ParmDecl):
if node.type.is_struct_like():
splits = self.split_struct_type(node.type, node.name)
return [ParmDecl(name=n, type=t) for (n, t) in splits.values()]
return self.generic_visit(node)
class CXXClassToStruct(NodeTransformer):
def __init__(self):
self.replacement_structs: List[StructDecl] = []
self.exported_functions: List[FuncDecl] = []
def get_class_type_replacement_name(self, class_name):
return "c2d_class_as_struct_" + class_name
def get_class_type_replacement(self, class_like_type) -> Type:
if isinstance(class_like_type, Class):
return Struct(name=self.get_class_type_replacement_name(
class_like_type.name))
if isinstance(class_like_type, Pointer):
t = self.get_class_type_replacement(class_like_type.pointee_type)
if t is not None:
return Pointer(pointee_type=t)
if isinstance(class_like_type, ConstantArray):
t = self.get_class_type_replacement(class_like_type.element_type)
if t is not None:
return ConstantArray(size=class_like_type.size, element_type=t)
return None
def is_class_like(self, type) -> bool:
if isinstance(type, ConstantArray):
return self.is_class_like(type.element_type)
if isinstance(type, Pointer):
return self.is_class_like(type.pointee_type)
return isinstance(type, Class)
def get_class_variable_replacement_name(self, var_name):
return var_name
def get_method_replacement_name(self, method_name, class_name):
return "c2d_" + class_name + "_method_" + method_name
def visit_AST(self, node: AST):
transformed_ast = self.generic_visit(node)
transformed_ast.structdefs += self.replacement_structs
transformed_ast.funcdefs += self.exported_functions
return transformed_ast
def visit_ClassDecl(self, node: ClassDecl):
replacement_struct = StructDecl(
name=self.get_class_type_replacement_name(node.name),
fields=node.fields)
self.replacement_structs.append(replacement_struct)
self.generic_visit(node)
return None
def visit_CXXMethod(self, node: CXXMethod):
if node.body is None:
return None
node = self.generic_visit(node)
replacement_struct_name = self.get_class_type_replacement_name(
node.parent_class_type.name)
this_arg = ParmDecl(
name="c2d_this",
type=Pointer(pointee_type=Struct(name=replacement_struct_name)))
replacement_function_name = self.get_method_replacement_name(
node.name, node.parent_class_type.name)
replacement_function = FuncDecl(name=replacement_function_name,
args=[this_arg] + node.args,
body=node.body)
self.exported_functions.append(replacement_function)
return None
def visit_MemberRefExpr(self, node: MemberRefExpr):
"""
Replace any class field accesses with accesses to their replacement struct
"""
node = self.generic_visit(node)
return node
def visit_DeclRefExpr(self, node: DeclRefExpr):
"""
Replace any references to a class declaration with references to their replacement struct
"""
if hasattr(node, "type") and self.is_class_like(node.type):
replacement_struct_type = self.get_class_type_replacement(
node.type)
return DeclRefExpr(name=self.get_class_variable_replacement_name(
node.name),
type=replacement_struct_type)
return self.generic_visit(node)
def visit_VarDecl(self, node: VarDecl):
if self.is_class_like(node.type):
replacement_struct_type = self.get_class_type_replacement(
node.type)
return VarDecl(name=self.get_class_variable_replacement_name(
node.name),
type=replacement_struct_type)
return self.generic_visit(node)
def visit_CXXThisExpr(self, node: CXXThisExpr):
replacement_struct_name = self.get_class_type_replacement_name(
node.type.pointee_type.name)
return DeclRefExpr(
name="c2d_this",
type=Pointer(pointee_type=Struct(name=replacement_struct_name)))
def visit_CallExpr(self, node: CallExpr):
"""
Replace any method calls with calls to their exported counterparts.
"""
if isinstance(node.name, MemberRefExpr):
mrefexpr = node.name
containerexpr = mrefexpr.containerexpr
#direct member ref expression: class_var.method()
if hasattr(containerexpr, "type") and isinstance(
containerexpr.type, Class):
replacement_function_name = self.get_method_replacement_name(
mrefexpr.name, containerexpr.type.name)
first_argument = UnOp(lvalue=self.visit(containerexpr),
op="&",
postfix=False)
return CallExpr(name=replacement_function_name,
args=[first_argument] +
[self.visit(a) for a in node.args])
#indirect member ref expression: class_var->method()
if hasattr(containerexpr, "type") and isinstance(
containerexpr.type, Pointer):
if isinstance(containerexpr.type.pointee_type, Class):
replacement_function_name = self.get_method_replacement_name(
mrefexpr.name, containerexpr.type.pointee_type.name)
first_argument = self.visit(containerexpr)
return CallExpr(name=replacement_function_name,
args=[first_argument] +
[self.visit(a) for a in node.args])
return self.generic_visit(node)
|
StarcoderdataPython
|
164605
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, getdate
from frappe import _
from erpnext.stock.utils import get_valid_serial_nos
from erpnext.utilities.transaction_base import TransactionBase
class InstallationNote(TransactionBase):
def __init__(self, *args, **kwargs):
super(InstallationNote, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Installation Note Item',
'target_dt': 'Delivery Note Item',
'target_field': 'installed_qty',
'target_ref_field': 'qty',
'join_field': 'prevdoc_detail_docname',
'target_parent_dt': 'Delivery Note',
'target_parent_field': 'per_installed',
'source_field': 'qty',
'percent_join_field': 'prevdoc_docname',
'status_field': 'installation_status',
'keyword': 'Installed',
'overflow_type': 'installation'
}]
def validate(self):
self.validate_installation_date()
self.check_item_table()
from erpnext.controllers.selling_controller import check_active_sales_items
check_active_sales_items(self)
def is_serial_no_added(self, item_code, serial_no):
has_serial_no = frappe.db.get_value("Item", item_code, "has_serial_no")
if has_serial_no == 1 and not serial_no:
frappe.throw(_("Serial No is mandatory for Item {0}").format(item_code))
elif has_serial_no != 1 and cstr(serial_no).strip():
frappe.throw(_("Item {0} is not a serialized Item").format(item_code))
def is_serial_no_exist(self, item_code, serial_no):
for x in serial_no:
if not frappe.db.exists("Serial No", x):
frappe.throw(_("Serial No {0} does not exist").format(x))
def get_prevdoc_serial_no(self, prevdoc_detail_docname):
serial_nos = frappe.db.get_value("Delivery Note Item",
prevdoc_detail_docname, "serial_no")
return get_valid_serial_nos(serial_nos)
def is_serial_no_match(self, cur_s_no, prevdoc_s_no, prevdoc_docname):
for sr in cur_s_no:
if sr not in prevdoc_s_no:
frappe.throw(_("Serial No {0} does not belong to Delivery Note {1}").format(sr, prevdoc_docname))
def validate_serial_no(self):
prevdoc_s_no, sr_list = [], []
for d in self.get('items'):
self.is_serial_no_added(d.item_code, d.serial_no)
if d.serial_no:
sr_list = get_valid_serial_nos(d.serial_no, d.qty, d.item_code)
self.is_serial_no_exist(d.item_code, sr_list)
prevdoc_s_no = self.get_prevdoc_serial_no(d.prevdoc_detail_docname)
if prevdoc_s_no:
self.is_serial_no_match(sr_list, prevdoc_s_no, d.prevdoc_docname)
def validate_installation_date(self):
for d in self.get('items'):
if d.prevdoc_docname:
d_date = frappe.db.get_value("Delivery Note", d.prevdoc_docname, "posting_date")
if d_date > getdate(self.inst_date):
frappe.throw(_("Installation date cannot be before delivery date for Item {0}").format(d.item_code))
def check_item_table(self):
if not(self.get('items')):
frappe.throw(_("Please pull items from Delivery Note"))
def on_update(self):
frappe.db.set(self, 'status', 'Draft')
def on_submit(self):
self.validate_serial_no()
self.update_prevdoc_status()
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.update_prevdoc_status()
frappe.db.set(self, 'status', 'Cancelled')
|
StarcoderdataPython
|
86370
|
<filename>setup.py
from setuptools import setup, find_packages
REQUIRED_PYTHON = (3, 6, 4)
EXCLUDE_FROM_PACKAGES = []
try:
with open('LICENSE.txt', 'r') as f:
_license = f.read()
except:
_license = ''
try:
with open('README.md', 'r') as f:
_readme = f.read()
except:
_readme = ''
setup(
name='financePy',
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
version='0.1',
description='',
author='<NAME>',
author_email='<EMAIL>',
license=_license,
long_description=_readme,
url='no',
download_url='https://github.com/MichelangeloConserva/prova/releases',
install_requires=['lxml','setuptools', 'pandas', 'requests','quandl','bs4','xlrd','scipy','selenium','matplotlib','html5lib==0.9999999'],
)
|
StarcoderdataPython
|
1610820
|
class ConsumeException(Exception):
def __init__(self, message):
self.__message = message
def __str__(self):
return repr(self.__message)
def get_message(self):
return self.__message
|
StarcoderdataPython
|
1721731
|
<filename>projects/democv/calibrate.py
import numpy as np
import cv2
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((8*6,3), np.float32)
objp[:,:2] = np.mgrid[0:6,0:8].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
cap = cv2.VideoCapture(1)
img_ = None
while True:
ret, img = cap.read()
if img is None:
print('Failed to get image')
continue
img = cv2.resize(img, None, fx=0.5, fy=0.5)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img_ = gray
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (6, 8))
if ret == True:
print('Found chessboard.')
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (6, 8), corners2,ret)
cv2.imshow('img',img)
ch = cv2.waitKey(1)
if ch == 27:
break
cv2.destroyAllWindows()
# Find the calibration parameters.
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_.shape[::-1],None,None)
print(camera_matrix)
|
StarcoderdataPython
|
1791262
|
<reponame>lunduniversity/schoolprog-satellite
temps = []
with open("data_april_2017_lund.txt", "r") as f:
# print(f.read().split("\n")[:-1])
temps = list(map(lambda x: float(x.split()[3]), f.read().split("\n")[:-1]))
print(temps)
with open("temps_april_2017_lund.txt", "w") as f:
for i in range(len(temps)):
if(i < len(temps)-1):
f.write(str(temps[i]) + "\n")
else:
f.write(str(temps[i]))
|
StarcoderdataPython
|
1739010
|
<reponame>hujingguang/Archery
# -*- coding: UTF-8 -*-
"""
@author: hhyo、yyukai
@license: Apache Licence
@file: redis.py
@time: 2019/03/26
"""
import re
import redis
import logging
import traceback
from common.utils.timer import FuncTimer
from . import EngineBase
from .models import ResultSet, ReviewSet, ReviewResult
__author__ = 'hhyo'
logger = logging.getLogger('default')
class RedisEngine(EngineBase):
def get_connection(self, db_name=None):
db_name = db_name or 0
return redis.Redis(host=self.host, port=self.port, db=db_name, password=<PASSWORD>,
encoding_errors='ignore', decode_responses=True)
@property
def name(self):
return 'Redis'
@property
def info(self):
return 'Redis engine'
def get_all_databases(self):
"""
获取数据库列表
:return:
"""
result = ResultSet(full_sql='CONFIG GET databases')
conn = self.get_connection()
rows = conn.config_get('databases')['databases']
db_list = [str(x) for x in range(int(rows))]
result.rows = db_list
return result
def query_check(self, db_name=None, sql='', limit_num=0):
"""提交查询前的检查"""
result = {'msg': '', 'bad_query': True, 'filtered_sql': sql, 'has_star': False}
safe_cmd = ["scan", "exists", "ttl", "pttl", "type", "get", "mget", "strlen",
"hgetall", "hexists", "hget", "hmget", "hkeys", "hvals",
"smembers", "scard", "sdiff", "sunion", "sismember", "llen", "lrange", "lindex"]
# 命令校验,仅可以执行safe_cmd内的命令
for cmd in safe_cmd:
if re.match(fr'^{cmd}', sql.strip(), re.I):
result['bad_query'] = False
break
if result['bad_query']:
result['msg'] = "禁止执行该命令!"
return result
def query(self, db_name=None, sql='', limit_num=0, close_conn=True):
"""返回 ResultSet """
result_set = ResultSet(full_sql=sql)
try:
conn = self.get_connection(db_name=db_name)
rows = conn.execute_command(sql)
result_set.column_list = ['Result']
if isinstance(rows, list):
if re.match(fr'^scan', sql.strip(), re.I):
keys = [[row] for row in rows[1]]
keys.insert(0, rows[0])
result_set.rows = tuple(keys)
result_set.affected_rows = len(rows[1])
else:
result_set.rows = tuple([row] for row in rows)
result_set.affected_rows = len(rows)
else:
result_set.rows = tuple([[rows]])
result_set.affected_rows = 1 if rows else 0
if limit_num > 0:
result_set.rows = result_set.rows[0:limit_num]
except Exception as e:
logger.warning(f"Redis命令执行报错,语句:{sql}, 错误信息:{traceback.format_exc()}")
result_set.error = str(e)
return result_set
def filter_sql(self, sql='', limit_num=0):
return sql.strip()
def query_masking(self, db_name=None, sql='', resultset=None):
"""不做脱敏"""
return resultset
def execute_check(self, db_name=None, sql=''):
"""上线单执行前的检查, 返回Review set"""
check_result = ReviewSet(full_sql=sql)
split_sql = [cmd.strip() for cmd in sql.split('\n') if cmd.strip()]
line = 1
for cmd in split_sql:
result = ReviewResult(id=line,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=cmd,
affected_rows=0,
execute_time=0, )
check_result.rows += [result]
line += 1
return check_result
def execute_workflow(self, workflow):
"""执行上线单,返回Review set"""
sql = workflow.sqlworkflowcontent.sql_content
split_sql = [cmd.strip() for cmd in sql.split('\n') if cmd.strip()]
execute_result = ReviewSet(full_sql=sql)
line = 1
cmd = None
try:
conn = self.get_connection(db_name=workflow.db_name)
for cmd in split_sql:
with FuncTimer() as t:
conn.execute_command(cmd)
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=cmd,
affected_rows=0,
execute_time=t.cost,
))
line += 1
except Exception as e:
logger.warning(f"Redis命令执行报错,语句:{cmd or sql}, 错误信息:{traceback.format_exc()}")
# 追加当前报错语句信息到执行结果中
execute_result.error = str(e)
execute_result.rows.append(ReviewResult(
id=line,
errlevel=2,
stagestatus='Execute Failed',
errormessage=f'异常信息:{e}',
sql=cmd,
affected_rows=0,
execute_time=0,
))
line += 1
# 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中
for statement in split_sql[line - 1:]:
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Audit completed',
errormessage=f'前序语句失败, 未执行',
sql=statement,
affected_rows=0,
execute_time=0,
))
line += 1
return execute_result
|
StarcoderdataPython
|
1685110
|
<filename>yuntu/models.py
from django.db import models
class Index(models.Model):
master_title = models.CharField(max_length=50)
slave_title = models.CharField(max_length=200)
about_us_string = models.TextField()
contact_us_address = models.CharField(max_length=500)
contact_us_email = models.CharField(max_length=200)
def __unicode__(self):
return self.master_title
class FeatureMatrix(models.Model):
feature_name = models.CharField(max_length=200)
yuntu_app = models.CharField(max_length=100)
replay_app = models.CharField(max_length=100)
meipai_app = models.CharField(max_length=100)
weishi_app = models.CharField(max_length=100)
xiaoying_app = models.CharField(max_length=100)
imovie_app = models.CharField(max_length=100)
hyperlapse_app = models.CharField(max_length=100)
weipai_app = models.CharField(max_length=100)
wopai_app = models.CharField(max_length=100)
yowo_app = models.CharField(max_length=100)
musemage_app = models.CharField(max_length=100)
def __unicode__(self):
return self.feature_name
class SubscribedEmail(models.Model):
subscribed_email_address = models.EmailField(null=False, blank=False)
subscribed_date_time = models.DateTimeField(auto_now_add=True)
notified = models.BooleanField(default=False)
def __unicode__(self):
return self.subscribed_email_address
|
StarcoderdataPython
|
4838401
|
import factory
from factory import Faker
from factory.django import DjangoModelFactory
from ..models import Agency, Correspondence, RecordRequest, RecordRequestFile
class AgencyFactory(DjangoModelFactory):
class Meta:
model = Agency
name = Faker("company")
class RecordRequestFactory(DjangoModelFactory):
class Meta:
model = RecordRequest
agency = factory.SubFactory(AgencyFactory)
class CorrespondenceFactory(DjangoModelFactory):
class Meta:
model = Correspondence
contact_address = factory.Faker("email")
request = factory.SubFactory(RecordRequestFactory)
class RecordRequestFileFactory(DjangoModelFactory):
class Meta:
model = RecordRequestFile
request = factory.SubFactory(RecordRequestFactory)
title = Faker("file_name")
file = factory.django.FileField()
|
StarcoderdataPython
|
3276967
|
<reponame>jim/documenters-aggregator<filename>tests/test_chi_school_community_action_council.py<gh_stars>0
from datetime import datetime
import pytest
from freezegun import freeze_time
from tests.utils import file_response
from city_scrapers.spiders.chi_school_community_action_council import Chi_school_community_action_councilSpider
freezer = freeze_time('2018-06-01 12:00:01')
freezer.start()
test_response = file_response('files/chi_school_community_action_council_CAC.html', url='http://cps.edu/FACE/Pages/CAC.aspx')
spider = Chi_school_community_action_councilSpider()
parsed_items = [item for item in spider.parse(test_response) if isinstance(item, dict)]
current_month_number = datetime.today().month
freezer.stop()
def test_num_items():
assert len(parsed_items) == (13 - current_month_number)*8
def test_name():
assert parsed_items[0]['name'] == 'Austin Community Action Council'
def test_start_time():
assert parsed_items[0]['start_time'].isoformat() == '2018-06-12T17:30:00'
def test_end_time():
assert parsed_items[0]['end_time'].isoformat() == '2018-06-12T20:30:00'
# def test_id():
# assert parsed_items[0]['id'] == \
# 'chi_school_community_action_council/201805081730/x/austin_community_action_council'
def test_location():
assert parsed_items[0]['location'] == {
'url': None,
'name': ' <NAME> ',
'address': '5101 W Harrison St.',
'coordinates': {
'latitude': None,
'longitude': None,
},
}
@pytest.mark.parametrize('item', parsed_items)
def test_description(item):
assert item['description'] == "Community Action Councils, or CACs, consist of 25-30 voting members who are " \
"directly involved in developing a strategic plan for educational success within " \
"their communities. CAC members include parents; elected officials; faith-based " \
"institutions, health care and community-based organizations; Local School" \
" Council (LSC) members; business leaders; educators and school administrators; " \
"staff members from Chicago's Sister Agencies; community residents; " \
"and students. There are nine CACs across Chicago. Each works to empower the " \
"community they serve to lead the improvement of local quality education."
@pytest.mark.parametrize('item', parsed_items)
def test_sources(item):
assert item['sources'] == [{'url': 'http://cps.edu/FACE/Pages/CAC.aspx',
'note': ''}]
@pytest.mark.parametrize('item', parsed_items)
def test_timezone(item):
assert item['timezone'] == 'America/Chicago'
@pytest.mark.parametrize('item', parsed_items)
def test_all_day(item):
assert item['all_day'] is False
@pytest.mark.parametrize('item', parsed_items)
def test_classification(item):
assert item['classification'] == 'Education'
@pytest.mark.parametrize('item', parsed_items)
def test__type(item):
assert parsed_items[0]['_type'] == 'event'
|
StarcoderdataPython
|
3205906
|
import copy
import torch.nn as nn
from models.glt_models import LinearClassifier
from models.resnet_blocks import BasicBlock, Bottleneck, DownsampleConv2d
from models.svdo_layers import LinearSVDO, Conv2dSVDO
class SequentialSparsifier(nn.Module):
def __init__(self, pretrained_model):
super(SequentialSparsifier, self).__init__()
self.model = nn.ModuleList()
for module in pretrained_model:
self.model.append(self.__get_sparse_layer(module))
self.train_mask = [False for _ in range(len(pretrained_model))]
@classmethod
def __get_sparse_layer(cls, dense_layer):
if isinstance(dense_layer, nn.Linear):
sparse_layer = LinearSVDO(dense_layer.in_features, dense_layer.out_features,
dense_layer.bias is not None)
sparse_layer.weight.data = dense_layer.weight.data.clone()
if dense_layer.bias is not None:
sparse_layer.bias.data = dense_layer.bias.data.clone()
return sparse_layer
elif isinstance(dense_layer, nn.Conv2d):
sparse_layer = Conv2dSVDO(dense_layer.in_channels, dense_layer.out_channels,
dense_layer.kernel_size, stride=dense_layer.stride,
padding=dense_layer.padding, dilation=dense_layer.dilation,
groups=dense_layer.groups, bias=dense_layer.bias is not None)
sparse_layer.weight.data = dense_layer.weight.data.clone()
if dense_layer.bias is not None:
sparse_layer.bias.data = dense_layer.bias.data.clone()
return sparse_layer
elif isinstance(dense_layer, DownsampleConv2d):
sparse_layer = DownsampleConv2d(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv = cls.__get_sparse_layer(dense_layer.conv)
return sparse_layer
elif isinstance(dense_layer, BasicBlock):
sparse_layer = BasicBlock(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv_1 = cls.__get_sparse_layer(dense_layer.conv_1)
sparse_layer.conv_2 = cls.__get_sparse_layer(dense_layer.conv_2)
if dense_layer.shortcut is not None:
sparse_layer.shortcut = cls.__get_sparse_layer(dense_layer.shortcut)
sparse_layer.bn_1 = copy.copy(dense_layer.bn_1)
sparse_layer.bn_2 = copy.copy(dense_layer.bn_2)
return sparse_layer
elif isinstance(dense_layer, Bottleneck):
sparse_layer = Bottleneck(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv_1 = cls.__get_sparse_layer(dense_layer.conv_1)
sparse_layer.conv_2 = cls.__get_sparse_layer(dense_layer.conv_2)
sparse_layer.conv_3 = cls.__get_sparse_layer(dense_layer.conv_3)
if dense_layer.shortcut is not None:
sparse_layer.shortcut = cls.__get_sparse_layer(dense_layer.shortcut)
sparse_layer.bn_1 = copy.copy(dense_layer.bn_1)
sparse_layer.bn_2 = copy.copy(dense_layer.bn_2)
sparse_layer.bn_3 = copy.copy(dense_layer.bn_3)
return sparse_layer
elif isinstance(dense_layer, LinearClassifier):
sparse_layer = LinearClassifier(dense_layer.in_channels, num_classes=dense_layer.num_classes,
sparse=True)
sparse_layer.linear = cls.__get_sparse_layer(dense_layer.linear)
sparse_layer.bn = copy.copy(dense_layer.bn)
return sparse_layer
else:
return copy.copy(dense_layer)
@classmethod
def __get_dense_layer(cls, sparse_layer):
if isinstance(sparse_layer, LinearSVDO):
dense_layer = nn.Linear(sparse_layer.in_features, sparse_layer.out_features,
sparse_layer.bias is not None)
dense_layer.weight.data = sparse_layer.weight.data.clone()
dense_layer.weight.data *= (sparse_layer.log_alpha.data < sparse_layer.threshold).float()
if sparse_layer.bias is not None:
dense_layer.bias.data = sparse_layer.bias.data.clone()
return dense_layer
elif isinstance(sparse_layer, Conv2dSVDO):
dense_layer = nn.Conv2d(sparse_layer.in_channels, sparse_layer.out_channels,
sparse_layer.kernel_size, stride=sparse_layer.stride,
padding=sparse_layer.padding, dilation=sparse_layer.dilation,
groups=sparse_layer.groups, bias=sparse_layer.bias is not None)
dense_layer.weight.data = sparse_layer.weight.data.clone()
dense_layer.weight.data *= (sparse_layer.log_alpha.data < sparse_layer.threshold).float()
if sparse_layer.bias is not None:
dense_layer.bias.data = sparse_layer.bias.data.clone()
return dense_layer
elif isinstance(sparse_layer, DownsampleConv2d):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = DownsampleConv2d(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv = cls.__get_dense_layer(sparse_layer.conv)
return dense_layer
elif isinstance(sparse_layer, BasicBlock):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = BasicBlock(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv_1 = cls.__get_dense_layer(sparse_layer.conv_1)
dense_layer.conv_2 = cls.__get_dense_layer(sparse_layer.conv_2)
if sparse_layer.shortcut is not None:
dense_layer.shortcut = cls.__get_dense_layer(sparse_layer.shortcut)
dense_layer.bn_1 = copy.copy(sparse_layer.bn_1)
dense_layer.bn_2 = copy.copy(sparse_layer.bn_2)
return dense_layer
elif isinstance(sparse_layer, Bottleneck):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = Bottleneck(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv_1 = cls.__get_dense_layer(sparse_layer.conv_1)
dense_layer.conv_2 = cls.__get_dense_layer(sparse_layer.conv_2)
dense_layer.conv_3 = cls.__get_dense_layer(sparse_layer.conv_3)
if sparse_layer.shortcut is not None:
dense_layer.shortcut = cls.__get_dense_layer(sparse_layer.shortcut)
dense_layer.bn_1 = copy.copy(sparse_layer.bn_1)
dense_layer.bn_2 = copy.copy(sparse_layer.bn_2)
dense_layer.bn_3 = copy.copy(sparse_layer.bn_3)
return dense_layer
elif isinstance(sparse_layer, LinearClassifier):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = LinearClassifier(sparse_layer.in_channels, num_classes=sparse_layer.num_classes,
sparse=False)
dense_layer.linear = cls.__get_dense_layer(sparse_layer.linear)
dense_layer.bn = copy.copy(sparse_layer.bn)
return dense_layer
else:
return copy.copy(sparse_layer)
def update_mask(self, new_mask):
self.train_mask = new_mask
def set_gradient_flow(self):
for module, train_flag in zip(self.model, self.train_mask):
module.train(mode=train_flag)
for parameter in module.parameters():
parameter.requires_grad = train_flag
def finalize_blocks(self, finalize_mask):
for i in range(len(self.train_mask)):
if self.train_mask[i]:
self.model[i] = self.__get_dense_layer(self.model[i])
def forward(self, x):
out = x
for module in self.model:
out = module(out)
return out
def kl_divergence(self):
total_kl = 0.0
for module, train_flag in zip(self.model, self.train_mask):
if train_flag:
total_kl = total_kl + module.kl_divergence()
return total_kl
|
StarcoderdataPython
|
1742266
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import enum
import errno
import json
import os
import pprint
import time
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import namedtuple
import numpy as np
import click
class GoldenOutputNotAvailableError(Exception):
pass
class NoCorrectnessDesired(Exception):
pass
class ExtrasNeeded(Exception):
def __init__(self, extras):
super(ExtrasNeeded, self).__init__(
'Missing needed packages for benchmark; to fix, pip install {}'.format(
' '.join(extras)))
self.extras = extras
class FrontendCommand(click.Command):
def __init__(self, networks, *args, **kwargs):
super(FrontendCommand, self).__init__(*args, **kwargs)
self.__networks = networks
def format_epilog(self, ctx, formatter):
with formatter.section('Supported Networks'):
formatter.write_text(', '.join(self.__networks))
class Precision(enum.Enum):
TRAINING = 0.2
INFERENCE = 5e-04
class StopWatch(object):
def __init__(self, use_callgrind):
self._start = None
self._stop = None
self._use_callgrind = use_callgrind
self._callgrind_active = False
self._total = 0.0
def start_outer(self):
# Like start(), but does not turn on callgrind.
self._start = time.time()
def start(self):
self._start = time.time()
if self._use_callgrind:
os.system('callgrind_control --instr=on {}'.format(os.getpid()))
self._callgrind_active = True
def stop(self):
if self._start is not None:
stop = time.time()
self._total += stop - self._start
self._start = None
if self._callgrind_active:
self._callgrind_active = False
os.system('callgrind_control --instr=off {}'.format(os.getpid()))
def elapsed(self):
return self._total
class Output(object):
def __init__(self):
self.contents = None
self.precision = 'untested'
class Params(
namedtuple('Params', [
'batch_size', 'epochs', 'examples', 'warmups', 'network_name', 'backend_name',
'backend_opts', 'learn_phase'
])):
"""Parameters applied to a network during benchmarking."""
__slots__ = ()
@property
def epoch_size(self):
return self.examples // self.epochs
class ExplicitParamBuilder(object):
"""Builds Params for an explicit benchmark run."""
def __init__(self, batch_size, epochs, examples, warmups=32, learn_phase=None):
if not examples:
examples = 1024
self.params = Params(batch_size, epochs, examples, warmups, None, None, None, learn_phase)
def __call__(self, frontend, backend_name, network_names):
if not network_names:
raise click.UsageError('No networks specified; did you mean to add --blanket-run?')
for network_name in network_names:
params = self.params._replace(network_name=network_name, backend_name=backend_name)
yield params
class BlanketParamBuilder(object):
"""Builds Params for a blanket benchmark run."""
def __init__(self, epochs, learn_phase=None):
self.params = Params(None, epochs, 256, 32, None, None, None, learn_phase=learn_phase)
def __call__(self, frontend, backend_name, network_names):
if network_names:
raise click.UsageError(
'Networks specified with --blanket-run; choose one or the other')
for network_name in frontend.network_names:
for batch_size in frontend.blanket_batch_sizes:
params = self.params._replace(network_name=network_name,
batch_size=batch_size,
backend_name=backend_name)
yield params
class ConsoleReporter(object):
def __init__(self):
self.configuration = {}
def report(self, params, results, output):
print(results)
def complete(self):
pass
class ExplicitReporter(object):
"""Handles reports for an explicit benchmark run."""
def __init__(self, result_dir):
self.result_dir = result_dir
self.configuration = {}
def report(self, params, results, output):
try:
os.makedirs(self.result_dir)
except OSError as ex:
if ex.errno != errno.EEXIST:
click.echo(ex)
return
pprint.pprint(results)
with open(os.path.join(self.result_dir, 'result.json'), 'w') as out:
json.dump(results, out)
if isinstance(output, np.ndarray):
np.save(os.path.join(self.result_dir, 'result.npy'), output)
def complete(self):
pass
class BlanketReporter(object):
"""Handles reports for a blanket benchmark run."""
def __init__(self, result_dir):
self.result_dir = result_dir
self.outputs = {}
self.configuration = {}
self.configuration['frontend'] = None
self.configuration['backend'] = None
self.configuration['train'] = False
self.configuration['blanket_run'] = True
def report(self, params, results, output):
composite_str = ":".join(
[params.backend_name, params.network_name,
str(params.batch_size)])
self.outputs[composite_str] = {'results': dict(results)}
def complete(self):
self.outputs['run_configuration'] = self.configuration
try:
os.makedirs(self.result_dir)
except OSError as ex:
if ex.errno != errno.EEXIST:
click.echo(ex)
return
with open(
os.path.join(
self.result_dir, '{}-{}-report.json'.format(self.configuration['backend'],
self.configuration['frontend'])),
'w') as out:
json.dump(self.outputs, out, sort_keys=True, indent=2)
def _inner_run(reports,
frontend,
network_names,
params,
warmup,
callgrind,
print_stacktraces,
tile=None):
import plaidbench.cli as pb
model = frontend.model(params)
click.secho('Running {0} examples with {1}, batch size {2}, on backend {3}'.format(
params.examples, params.network_name, params.batch_size, params.backend_name),
fg='magenta')
benchmark_results = {}
model_output = None
if params.examples % params.batch_size != 0:
raise ValueError('The number of examples must be divisible by the batch size.')
try:
model.validate()
model.setup()
exec_stop_watch = StopWatch(callgrind)
compile_stop_watch = StopWatch(callgrind)
click.echo('Compiling network...', nl=False)
compile_stop_watch.start_outer()
model.compile()
compile_stop_watch.stop()
model_output, overrides = model.run(once=True)
if tile:
click.echo(' Saving Tile to {}...'.format(tile), nl=False)
model.model.predict_function._invoker.save(tile)
# Run a few more warmups -- this seems to improve the variability of the
# benchmark results.
if warmup:
click.echo(' Warming up...', nl=False)
model.run(warmup=True)
click.echo(' Running...')
exec_stop_watch.start_outer()
_, overrides = model.run()
exec_stop_watch.stop()
# Record stopwatch times
execution_duration = overrides.get('time', exec_stop_watch.elapsed())
exec_per_example = overrides.get('lastExecTimeInNS', execution_duration / params.examples)
compile_duration = compile_stop_watch.elapsed()
flops = overrides.get('flops', None)
gflops = None
if flops:
gflops = (flops / 10.0**9 / exec_per_example)
benchmark_results['GFLOP/s'] = gflops
benchmark_results['flops'] = flops
benchmark_results['compile_duration'] = compile_duration
benchmark_results['duration_per_example'] = exec_per_example
benchmark_results['tile_duration_per_example'] = exec_per_example
benchmark_results['examples'] = params.examples
benchmark_results['batch_size'] = params.batch_size
benchmark_results['model'] = params.network_name
benchmark_results['backend'] = params.backend_name
resstr = 'Example finished, elapsed: {:.3f}s (compile), {:.3f}s (execution)\n'.format(
compile_duration, execution_duration)
if gflops:
resstr += ', {:.2f} (GFLOP/s)'.format(gflops)
click.secho(resstr, fg='cyan', bold=True)
print(
"-----------------------------------------------------------------------------------------"
)
print("%-20s %-25s %-20s" % ("Network Name", "Inference Latency", "Time / FPS"))
print(
"-----------------------------------------------------------------------------------------"
)
print("%-20s %-25s %-20s" %
(params.network_name, "%.2f ms" % (exec_per_example * 1000), "%.2f ms / %.2f fps" %
(exec_per_example * 1000, 1.0 / exec_per_example)))
(golden_output, precision) = model.golden_output()
(correct, max_error, max_abs_error,
fail_ratio) = Runner._check_correctness(golden_output, model_output, precision.value)
benchmark_results['correct'] = correct
benchmark_results['max_error'] = float(max_error)
benchmark_results['max_abs_error'] = float(max_abs_error)
benchmark_results['fail_ratio'] = fail_ratio
if correct:
status = 'PASS'
else:
status = 'FAIL'
click.secho('Correctness: {}, max_error: {}, max_abs_error: {}, fail_ratio: {}'.format(
status, max_error, max_abs_error, fail_ratio),
fg='green' if status == 'PASS' else 'red')
except GoldenOutputNotAvailableError:
click.echo('Correctness: untested. Could not find golden data to compare against.')
except NoCorrectnessDesired:
pass
# Error handling
except Exception as ex:
# click.echo statements
click.echo(ex)
click.echo('Set --print-stacktraces to see the entire traceback')
# Record error
benchmark_results['exception'] = str(ex)
if print_stacktraces:
raise
finally:
reports.append((params, benchmark_results, model_output))
class Runner(object):
"""Runs an ML benchmark."""
def __init__(self, param_builder=ExplicitParamBuilder(1, 2, 1024), reporter=ConsoleReporter()):
"""Initializes the benchmark runner.
Args:
param_builder ((frontend, [str])->((Model, Params)...)): A callable that takes a
frontend and a list of network names, and returns a sequence of (Model, Params)
tuples describing the benchmarks to be run.
reporter (Reporter): Handles benchmark reports.
"""
self.verbose = False
self.result_dir = None
self.callgrind = False
self.param_builder = param_builder
self.print_stacktraces = False
self.reporter = reporter
self.warmup = True
self.timeout_secs = None
self.tile = None
def run(self, frontend, backend_name, network_names):
"""Runs a set of benchmarks.
Args:
frontend (Frontend): The interface to the ML frontend.
network_names ([str]): The names of the networks to benchmark.
"""
self.reporter.configuration['frontend'] = frontend.name
self.reporter.configuration['backend'] = backend_name
self.reporter.configuration['example_size'] = self.param_builder.params.examples
reports = []
try:
for params in self.param_builder(frontend, backend_name, network_names):
_inner_run(
reports,
frontend,
network_names,
params,
self.warmup,
self.callgrind,
self.print_stacktraces,
self.tile,
)
except KeyboardInterrupt:
click.secho("Aborting all runs...", fg="red")
finally:
# Reporter's gonna report
for report in reports:
self.reporter.report(*report)
self.reporter.complete()
return 0
@staticmethod
def _check_correctness(base_output, cur_output, precision):
# TODO: Parameterize relative and absolute error tolerance
correct = np.allclose(base_output, cur_output, rtol=precision, atol=1e-06)
# This duplicates allclose calculation for more detailed report
relative_error = ((precision * np.absolute(base_output - cur_output)) /
(1e-06 + precision * np.absolute(cur_output)))
max_error = np.amax(relative_error)
max_abs_error = np.amax(np.absolute(base_output - cur_output))
correct_entries = 0
incorrect_entries = 0
for x in np.nditer(relative_error):
if x > precision:
incorrect_entries += 1
else:
correct_entries += 1
try:
fail_ratio = incorrect_entries / float(correct_entries + incorrect_entries)
except ZeroDivisionError:
fail_ratio = 'Undefined'
return (correct, max_error, max_abs_error, fail_ratio)
class Frontend(object):
"""An abstract interface to an ML frontend."""
__metaclass__ = ABCMeta
def __init__(self, network_names):
# Need to POPT this for pickling for windows
self.network_names = list(network_names)
self.configuration = {}
@property
def name(self):
raise NotImplementedError()
@property
def init_args(self):
return (self.network_names,)
@property
def blanket_batch_sizes(self):
return [1]
@abstractmethod
def model(self, params):
"""Returns a model built from the specified parameters.
Args:
params (Params): The parameters to use for the model.
"""
pass
class Model(object):
"""An abstract interface to an ML model."""
__metaclass__ = ABCMeta
@abstractmethod
def setup(self):
"""Prepares a model to run benchmarks.
This call gives an implementation to perform any setup/initialization actions that should
not be included in the benchmark, e.g. downloading data files and other filesystem
operations.
"""
pass
@abstractmethod
def compile(self):
"""Compiles a model for repeated use.
This call should be used by the implementation to construct an in-memory optimized form
of the model, suitable for repeated use. The implementation should not actually run
the model; when compile() returns, the benchmarking infrastructure will issue an explicit
call to run() to warm all relevant caches as part of the compilation measurement.
"""
pass
@abstractmethod
def run(self, once, warmup):
"""Runs the model, e.g. performing an inference or training batch.
Args:
once (Boolean): If True, runs with a number of examples equal to the batch size. This
is used in the very first run of a network for network compilation timing.
warmup (Boolean): If True, uses the warmup parameter to determine the number of
examples. This is used to prepare the graphics card and other variable-performance
elements for the main timing run, ensuring that they dedicate the necessary resources
to accurately time a heavy workload, without taking the time needed to run a full set
of examples.
Returns:
The model outputs - if inference, the inference output; if training, the training loss.
"""
pass
def validate(self):
"""An optional hook for the model to use to validate its parameters."""
pass
@abstractmethod
def golden_output(self):
"""The golden model output.
Returns:
(ndarray, Precision) - The golden model output.
Throws:
GoldenOutputNotAvailableError - If the golden output is unavailable for this model.
"""
|
StarcoderdataPython
|
3253645
|
from django.apps import AppConfig
from cmsplugin_text_ng.type_registry import register_type
class CmsPluginTextNgConfig(AppConfig):
name = 'cmsplugin_text_ng'
verbose_name = "Django Cms Plugin Text-NG"
def ready(self):
from cmsplugin_text_ng.models import TextNGVariableText
register_type('text', TextNGVariableText)
|
StarcoderdataPython
|
1688740
|
from mrjob.job import MRJob
class MRCcounter(MRJob):
def mapper(self, key, value):
for phrase in value.split('.')[:-1]:
yield 'phrase', 1
for word in phrase.split(' '):
yield 'word', 1
yield 'characters', len(word)
def reducer(self, key, values):
yield key, sum(values)
if __name__=='__main__':
MRCcounter.run()
|
StarcoderdataPython
|
1667408
|
# !/usr/bin/env python
# -*-coding:utf8 -*-
"""
Front controller
"""
class Dispatcher(object):
frontcontroller = None
module = "default"
controller = "default"
action = "index"
isDispatched = False
def __init__(self, frontcontroller):
"""
"""
self.frontcontroller = frontcontroller
def dispatch(self):
if self.isDispatched == False:
controllerFile = "modules.%s.controllers.%s" % (self.module,
self.controller)
controller = __import__(controllerFile)
controller = controller.getInstance()
eval("controller.%s.Action()" % self.action)
def getInstance(frontcontroller):
return Dispatcher(frontcontroller)
|
StarcoderdataPython
|
3332295
|
from __future__ import print_function
import itertools
import os
import sys
from functools import wraps
from ruskit import cli
NO_RETRY = -1
COLOR_MAP = {
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"purple": 35
}
def echo(*values, **kwargs):
end = kwargs.get("end", '\n')
color = kwargs.get("color", None)
bold = 0 if kwargs.get("bold", False) is False else 1
disable = kwargs.get("diable", False)
if disable:
return
msg = ' '.join(str(v) for v in values) + end
if not color or os.getenv("ANSI_COLORS_DISABLED") is not None:
sys.stdout.write(msg)
else:
color_prefix = "\033[{};{}m".format(bold, COLOR_MAP[color])
color_suffix = "\033[0m"
sys.stdout.write(color_prefix + msg + color_suffix)
sys.stdout.flush()
def divide(n, m):
"""Divide integer n to m chunks
"""
avg = int(n / m)
remain = n - m * avg
data = list(itertools.repeat(avg, m))
for i in range(len(data)):
if not remain:
break
data[i] += 1
remain -= 1
return data
def spread(nodes, n):
"""Distrubute master instances in different nodes
{
"192.168.0.1": [node1, node2],
"192.168.0.2": [node3, node4],
"192.168.0.3": [node5, node6]
} => [node1, node3, node5]
"""
target = []
while len(target) < n and nodes:
for ip, node_group in list(nodes.items()):
if not node_group:
nodes.pop(ip)
continue
target.append(node_group.pop(0))
if len(target) >= n:
break
return target
class RuskitException(Exception):
pass
class InvalidNewNode(RuskitException):
pass
def check_new_nodes(new_nodes, old_nodes=None):
if old_nodes is None:
old_nodes = []
versions = set(n.info()['redis_version'] for n in old_nodes)
for instance in new_nodes:
info = instance.info()
if not info.get("cluster_enabled"):
raise InvalidNewNode("cluster not enabled")
if instance.cluster_info()["cluster_known_nodes"] != 1:
raise InvalidNewNode(
"node {}:{} belong to other cluster".format(
instance.host, instance.port))
if info.get("db0"):
raise InvalidNewNode("data exists in db0 of {}".format(instance))
versions.add(info['redis_version'])
if len(versions) != 1:
raise InvalidNewNode(
"multiple versions found: {}".format(list(versions)))
def timeout_argument(func):
from .cluster import ClusterNode
@cli.argument('--timeout', type=int)
@wraps(func)
def _wrapper(*arguments):
if len(arguments) == 1:
args = arguments[0]
elif len(arguments) == 2:
ctx, args = arguments
else:
raise Exception('invalid arguments')
ClusterNode.socket_timeout = args.timeout
return func(*arguments)
return _wrapper
|
StarcoderdataPython
|
3225098
|
<filename>python_tests/test_input.py
from __future__ import print_function
import dynet as dy
import numpy as np
input_vals = np.arange(81)
squared_norm = (input_vals**2).sum()
shapes = [(81,), (3, 27), (3, 3, 9), (3, 3, 3, 3)]
for i in range(4):
# Not batched
dy.renew_cg()
input_tensor = input_vals.reshape(shapes[i])
x = dy.inputTensor(input_tensor)
assert (x.dim()[0] == shapes[i] and x.dim()[1] == 1),"Dimension mismatch : {} : ({}, {})".format(x.dim(), shapes[i],1)
assert (x.npvalue() == input_tensor).all(), "Expression value different from initial value"
assert dy.squared_norm(x).scalar_value() == squared_norm, "Value mismatch"
# Batched
dy.renew_cg()
xb = dy.inputTensor(input_tensor, batched=True)
assert (xb.dim()[0] == (shapes[i][:-1] if i>0 else (1,)) and xb.dim()[1] == shapes[i][-1]), "Dimension mismatch with batch size : {} : ({}, {})".format(xb.dim(), (shapes[i][:-1] if i>0 else 1),shapes[i][-1])
assert (xb.npvalue() == input_tensor).all(), "Batched expression value different from initial value"
assert dy.sum_batches(dy.squared_norm(xb)).scalar_value() == squared_norm, "Value mismatch"
# Batched with list
dy.renew_cg()
xb = dy.inputTensor([np.asarray(x).transpose() for x in input_tensor.transpose()])
assert (xb.dim()[0] == (shapes[i][:-1] if i>0 else (1,)) and xb.dim()[1] == shapes[i][-1]) , "Dimension mismatch with batch size : {} : ({}, {})".format(xb.dim(), (shapes[i][:-1] if i>0 else 1),shapes[i][-1])
assert (xb.npvalue() == input_tensor).all(), "Batched expression value different from initial value"
assert dy.sum_batches(dy.squared_norm(xb)).scalar_value() == squared_norm, "Value mismatch"
caught = False
try:
dy.renew_cg()
x = dy.inputTensor("This is not a tensor", batched=True)
except TypeError:
caught = True
assert caught, "Exception wasn't caught"
|
StarcoderdataPython
|
3297995
|
<reponame>davidbarkhuizen/pytxt2html<gh_stars>1-10
source_file_path = '/home/mage/Downloads/c0.txt'
template = '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>{{title}}</title>
</head>
<body class='bodyStyle'>{{body}}</body>
<style>{{style}}</style>
</html>'''
br = '<br>'
title = 'c0'
style = '''
.bodyStyle {
background: black;
color: white;
margin-left: 20%;
margin-right: 20%;
font-size: large;
}
'''
src_file_string = None
with open(source_file_path) as source_file:
src_file_string = source_file.read()
src_file_string = src_file_string.replace('\r', '')
body = src_file_string.replace('\n\n', br)
body = body.replace('\t', br + ' '*3)
html = template.replace('{{body}}', body)
html = html.replace('{{title}}', title)
html = html.replace('{{style}}', style)
dest_file_path = source_file_path + '.html'
with open(dest_file_path, 'tw') as dest_file:
dest_file.write(html)
|
StarcoderdataPython
|
34468
|
<reponame>verycourt/Elections
#!/usr/bin/python
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import warnings
import dateparser
import datetime
import time
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.1f')
warnings.filterwarnings('ignore')
URL = "https://fr.wikipedia.org/wiki/Liste_de_sondages_sur_l'%C3%A9lection_pr%C3%A9sidentielle_fran%C3%A7aise_de_2017#2016"
path1 = "/var/www/html/1ertour/"
path2 = "/var/www/html/2ndtour/"
'''dicoTableMois = {4:"Janvier 2016", 5:"Février 2016", 6:"Mars 2016", 7:"Avril 2016", 8:"Mai 2016", 9:"Juin 2016",\
10:"Juillet 2016", 11:"Septembre 2016", 12:"Octobre 2016", 13:"Novembre 2016", 14:"Décembre 2016", \
15:"Janvier 2017", 16:"Février 2017"}
'''
dicoTableMois = {0:"Mars 2017", 1:"Février 2017", 2:"Janvier 2017"}
dico_couleurs_candidats = {u"<NAME>":"#CC0066", u"<NAME>":"#CC3399",u"<NAME>":"#008000", u"<NAME>":"#A9A9A9",
u"<NAME>":"#FF6600", u"<NAME>":"#3399FF", u"<NAME>":"#FF9999", u"<NAME>":"#CC0000",
u"<NAME>":"#FF0000", u"<NAME>":"#FF6699", u"<NAME>":"#000080", u"<NAME>":"#CC0033",
u"<NAME>":"#0000CC", u"<NAME>":"#66CC00", u"<NAME>":"#990033",
u"<NAME>":"#FF0066", u"<NAME>":"#339900"}
dico_candidat_parti = {u"<NAME>":"PS",u"<NAME>":"PS",u"<NAME>":"eelv",
u"<NAME>" : "En Marche",
u"<NAME>" : "MoDem", u"<NAME>":"Les Républicains",
u"<NAME>" : "PS", u"<NAME>" : "sp",
u"<NAME>" : "Parti de Gauche", u"<NAME>":"PS",u"<NAME>":"FN",
u"<NAME>":"lutte ouvriere",
u"<NAME>":"Debout La France", u"<NAME>":"empty", u"<NAME>":"NPA",
u"<NAME>":"ps", u"<NAME>":"eelv"}
def loadHTML(URL):
resultats = requests.get(URL)
return BeautifulSoup(resultats.text, 'html.parser')
def loadPandas(URL):
tables = loadHTML(URL).findAll("table")
dfF = pd.DataFrame()
dfFs = pd.DataFrame()
#Pour chaque table de wikipedia :
for idx, table in enumerate(tables) :
lignes = table.findAll("tr")
#On récupère le nom de chaque colonne :
colonnes = []
for elem in lignes[0].findAll("th"):
if elem.find("a") is None :
if elem.text != u'Autres candidats':
colonnes.append(elem.text)
else :
if(elem.find("a").text != ""):
colonnes.append(elem.find("a").text)
for elem in lignes[1].findAll("th"):
if elem.find("a") is not None :
colonnes.append(elem.find("a").text)
if len(colonnes) < 7:
for elem in lignes[2].findAll("th"):
a=3
colonnes.append(elem.text)
#On crée un pandas dataframe pour stocker nos table :
df = pd.DataFrame(columns = colonnes)
#print(len(colonnes))
nbRowspan = 0
rowspan = []
rowspanMil = []
#our chaque ligne de notre table :
for j,ligne in enumerate(lignes[2:]):
line = list(np.zeros(len(colonnes)))
line = ["/" for item in line]
#lorsque certains éléments de notre tableau occupent plusieurs lignes
for i,item in enumerate(rowspanMil):
if item[0] > 1 :
line[item[1]] = item[2]
item[0] -= 1
for i,elem in enumerate(ligne.findAll("td")):
try:
while line[i] != "/":
i+=1
except:
continue
if elem.has_attr("rowspan"):
nbRowspan = int(elem["rowspan"])
if nbRowspan >1:
try :
rowspanMil.append([nbRowspan, i, float(elem.text.replace("%", "").replace(",",".").replace("<",""))])
except Exception as e :
rowspanMil.append([nbRowspan, i, (elem.text.replace("%", "").replace(",",".").replace("<",""))])
try:
line[i] = (float(elem.text.replace("%", "").replace(",",".").replace("<","")))
except Exception as e :
line[i] = (elem.text.replace("%", "").replace(",",".").replace("<",""))
if len(line) > len(colonnes) - 3 :
df.loc[j] = line
#print(df)
try :
df = df[df["Date"] != "/"]
except:
continue
if idx >= 0 and idx <= 2:
df["Date"] = df["Date"].map(lambda x : x.lower().replace(dicoTableMois[idx].lower()[:-5],""))
df["Date"] = df["Date"].map(lambda x : x+" "+dicoTableMois[idx])
#2ème tour :
if len(colonnes) < 7 :
dfFs = dfFs.append(df)
#1er tour :
elif idx >= 0 and idx <= 2:
dfF = dfF.append(df.ix[1:])
return (dfF, dfFs)
dfF, dfFs = loadPandas(URL)
#######################################################################
########################### 1er tour ##################################
#######################################################################
dfF = dfF.replace(to_replace=["-", "–"], value=" ")
dfF = dfF[dfF["Pourrait changer d'avis"]!="/"]
dfF["Pourrait changer d'avis"] = dfF["Pourrait changer d'avis"].map(lambda x : (str(x).split("[")[0].strip()))
dfF["Pourrait changer d'avis"] = dfF["Pourrait changer d'avis"].map(lambda x : 0 if x == "nan" or x == "" else float(x[:2]))
notCandidats = [u"Date", u"Sondeur", u"Échantillon"]
anciensCandidats = [u"<NAME>", u"<NAME>", u"<NAME>", u"<NAME>", u"Eva Joly", u"<NAME>", u"<NAME>", u"<NAME>"]
for col in dfF.columns:
if col not in notCandidats:
dfF[col] = dfF[col].map(lambda x: x if isinstance(x, float) else np.nan)
dfF2 = dfF
for col in anciensCandidats:
if col in dfF2.columns :
dfF2 = dfF2[dfF2[col].isnull()]
dfF2 = dfF2.drop(col, axis=1)
dfF2["Pourrait changer d'avis"] = dfF2["Pourrait changer d'avis"].map(lambda x : np.nan if x==0 else x)
#print(dfF)
dfF3 = dfF2
dfF3["Date"] = dfF3["Date"].map(lambda x : x.replace("1er", "1").replace("fév.", ""))
dfF3["Date"] = dfF3["Date"].map(lambda x : ' '.join(x.split()))
dfF3["Date"] = dfF3["Date"].map(lambda x : x if len(x.split(" ")) < 4 else " ".join(x.split(" ")[-3:]))
dfF3["Date"] = dfF3["Date"].map(lambda x : dateparser.parse(x).date())
dfF3 = dfF3.groupby(["Date"]).mean().reset_index()
dfF3 = dfF3.sort_values('Date', ascending=1)
def dateToString(date):
if len(str(date.month))==1:
month = "0"+str(date.month)
else :
month = str(date.month)
if len(str(date.day))==1:
day = "0"+str(date.day)
else :
day = str(date.day)
return str(date.year)+month+day
dfF3 = dfF3.round(2)
dfF3 = dfF3[dfF3["Date"] > datetime.date(year=2017,month=01,day=01)]
dfF4 = dfF3
#dfF4 = dfF4.drop([u"<NAME>", u"<NAME>", u"<NAME>", u"<NAME>"], axis=1)
for col in dfF4.columns:
if col not in [u"<NAME>", u"<NAME>", u"Date", u"<NAME>",\
u"<NAME>", u"<NAME>", u"<NAME>"]:
dfF4 = dfF4.drop(col, axis=1)
dfF5 = dfF4
dfF4["date"] = dfF4["Date"].map(lambda x: dateToString(x))
dfF4 = dfF4.drop("Date", axis=1)
dfF4 = dfF4.set_index("date")
dfF4 = dfF4.dropna(axis=1, how='all')
dfF4 = dfF4.dropna(axis=0, how='all')
# --- To json --- #
dfF5 = dfF5.dropna(axis=1, how='all')
dfF5 = dfF5.dropna(axis=0, how='all')
dfF5 = dfF5.set_index("Date")
#dfF5.to_csv("table_agrege.csv")
dfF5 = pd.read_csv("table_agrege.csv", encoding="utf-8")
dfF5["Date"] = pd.to_datetime(dfF5["Date"])
dfF5 = dfF5.groupby(["Date", "date"]).mean().reset_index()
dfF5.set_index("Date", inplace=True)
print(dfF5)
idx = pd.date_range(min(dfF5.index), max(dfF5.index))
dfF5 = dfF5.reindex(idx, fill_value="null")
########################
# Agrégats sur 6 jours #
########################
dfF5 = dfF5.drop("date", axis=1)
dfF5 = dfF5.replace(to_replace=["null"], value=np.nan)
diffDaysLast = (datetime.datetime.now()-max(dfF5.index).to_datetime()).days
#dfF5.index = dfF5.index.map(lambda x : x.to_datetime() + datetime.timedelta(days=diffDaysLast))
#dfF5 = dfF5.map(lambda x : )
lastsondages = max(dfF5.index)
to_add = (max(dfF5.index) - (max(dfF5.groupby(pd.TimeGrouper('6D')).mean().index))).days
dfF5.index = dfF5.index.map(lambda x : (x + datetime.timedelta(days=to_add)) )
dfF5 = dfF5.groupby(pd.TimeGrouper('6D')).mean()
#dfF5 = dfF5.index.map(lambda x : x.to_datetime() + datetime.timedelta(days=6))
for col in dfF5.columns :
dfF5[col] = np.round(dfF5[col], 1)
print(dfF5)
to_json = []
dico_sondage = {}
dico_sondage["id"] = 1
dico_sondage["refresh"] = {}
dfF5 = dfF5.fillna("null")
dico_sondage["refresh"]["last"] = time.mktime((lastsondages.to_datetime()).timetuple())
dico_sondage["refresh"]["dayInterval"] = 6
dico_sondage["title"] = "Agrégation des sondages pour le 1er tour de 11 instituts*"
dico_sondage["legende"] = "* Les données de ce graphique sont les moyennes des sondages d'intentions de vote de 11 instituts sur six jours. \
Plus précisément, pour chaque jour affiché, il fait la moyenne sur les six derniers jours. \
Les instituts sont : Ifop-Fiducial, OpinionWay, CSA, Future Thinking - SSI, BVA, Odoxa, Harris Interactive, TNS Sofres, Cevipof Ipsos-Sopra Steria, Elabe, Dedicated Research."
dico_sondage["unit"] = "%"
dico_sondage["dataset"] = []
for col in dfF5.columns:
#On garde les candidats demandés :
dico_temp = {}
dico_temp["title"] = col
if col in dico_candidat_parti.keys():
dico_temp["subtitle"] = dico_candidat_parti[col]
else :
dico_temp["subtitle"] = ""
if col in dico_couleurs_candidats.keys():
dico_temp["color"] = dico_couleurs_candidats[col]
else :
dico_temp["color"] = "#ffffff"
dico_temp["data"] = list(dfF5[col])
dico_sondage["dataset"].append(dico_temp)
to_json.append(dico_sondage)
#dfF4.to_csv(path+"sondages1er.csv", sep="\t", encoding='utf-8')
#dfF4.to_json(path1+"pollster1.json", force_ascii=False)
dfF4.to_csv(path1+"sondages1er.csv", sep="\t", encoding='utf-8')
dfF4.to_csv(path1+"data.tsv", sep="\t", encoding='utf-8')
dfF4.to_csv("data.tsv", sep="\t", encoding='utf-8')
#print(dfF3[["<NAME>", "Date"]])
#######################################################################
########################### 2nd tour ##################################
#######################################################################
dfFs2 = dfFs
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x if len(x)>5 else np.nan)
dfFs2 = dfFs2[dfFs2["Date"].notnull()]
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x.replace(u"-", " ").replace(u"–", " "))
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x if len(x.split(" ")) < 4 else " ".join(x.split(" ")[-3:]))
dfFs2["Date"] = dfFs2["Date"].map(lambda x : dateparser.parse(x).date())
#dfFs2 = dfFs2.set_index(["Date"])
#dfFs2.index = pd.to_datetime(dfFs2.index)
notCandidats = [u"Date", u"Sondeur", u"Échantillon"]
def dateToString2(date):
if len(str(date.month))==1:
month = "0"+str(date.month)
else :
month = str(date.month)
if len(str(date.day))==1:
day = "0"+str(date.day)
else :
day = str(date.day)
return day+"/"+month+"/"+str(date.year)
def getDuel(df, nom1, nom2):
return df[[nom1, nom2, "date"]].set_index("date").dropna(axis=0, how='any')
for col in dfFs2.columns:
if col not in notCandidats:
if col != "Abstention, blanc ou nul":
dfFs2[col] = dfFs2[col].map(lambda x: x if isinstance(x, float) else np.nan)
else :
dfFs2[col] = dfFs2[col].map(lambda x: x if isinstance(x, float) else 0)
#dfFs2["Date"] = pd.to_datetime(dfFs2["Date"])
#dfFs2 = dfFs2.groupby(dfFs2["Date"].dt.month).mean()
#dfFs2 = dfFs2.reset_index()
dfFs2["date"] = dfFs2["Date"].map(lambda x: dateToString2(x))
dfFs2 = dfFs2.drop("Date", axis=1)
getDuel(dfFs2, u"<NAME>", u"<NAME>").to_csv(path2+"mlpVSff.tsv", sep="\t", encoding="utf-8")
getDuel(dfFs2, u"<NAME>", u"<NAME>").to_csv(path2+"mlpVSmv.tsv", sep="\t", encoding='utf-8')
getDuel(dfFs2, u"<NAME>", u"<NAME>").to_csv(path2+"mlpVSem.tsv", sep="\t", encoding='utf-8')
getDuel(dfFs2, u"<NAME>", u"<NAME>").to_csv(path2+"emvsff.tsv", sep="\t", encoding="utf-8")
'''
getDuel(dfFs2, u"<NAME>", u"<NAME>").to_json(path2+"mlpVSmv.json", force_ascii=False)
getDuel(dfFs2, u"<NAME>", u"<NAME>").to_json(path2+"mlpVSff.json", force_ascii=False)
getDuel(dfFs2, u"<NAME>", u"<NAME>").to_json(path2+"mlpVSem.json", force_ascii=False)
getDuel(dfFs2, u"<NAME>", u"<NAME>").to_json(path2+"emvsff.json", force_ascii=False)
'''
dfFs2.to_csv(path2+"sondages2e.csv", encoding='utf-8')
#dfFs2.to_json(path2+"sondages2e.json")
print("Done")
|
StarcoderdataPython
|
50039
|
<reponame>Chicone/SSM-VPR<filename>ssmbase.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ssm.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1262, 783)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setUnifiedTitleAndToolBarOnMac(True)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setMinimumSize(QtCore.QSize(1200, 730))
self.centralwidget.setObjectName("centralwidget")
self.groupBox_7 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_7.setGeometry(QtCore.QRect(590, 299, 671, 411))
self.groupBox_7.setObjectName("groupBox_7")
self.queryGroupBox = QtWidgets.QGroupBox(self.centralwidget)
self.queryGroupBox.setGeometry(QtCore.QRect(590, 30, 224, 231))
self.queryGroupBox.setStyleSheet("")
self.queryGroupBox.setObjectName("queryGroupBox")
self.scrollArea = QtWidgets.QScrollArea(self.queryGroupBox)
self.scrollArea.setGeometry(QtCore.QRect(0, 20, 221, 211))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea.sizePolicy().hasHeightForWidth())
self.scrollArea.setSizePolicy(sizePolicy)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 219, 209))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.queryImageLabel = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.queryImageLabel.setGeometry(QtCore.QRect(0, 0, 224, 224))
self.queryImageLabel.setText("")
self.queryImageLabel.setObjectName("queryImageLabel")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.stage1_groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.stage1_groupBox.setEnabled(True)
self.stage1_groupBox.setGeometry(QtCore.QRect(30, 30, 161, 411))
self.stage1_groupBox.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.stage1_groupBox.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"\n"
"")
self.stage1_groupBox.setObjectName("stage1_groupBox")
self.imageSizeGroupBox_s1 = QtWidgets.QGroupBox(self.stage1_groupBox)
self.imageSizeGroupBox_s1.setGeometry(QtCore.QRect(10, 30, 141, 101))
self.imageSizeGroupBox_s1.setMaximumSize(QtCore.QSize(160, 16777215))
self.imageSizeGroupBox_s1.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"")
self.imageSizeGroupBox_s1.setObjectName("imageSizeGroupBox_s1")
self.imageWidthLineEdit_s1 = QtWidgets.QLineEdit(self.imageSizeGroupBox_s1)
self.imageWidthLineEdit_s1.setGeometry(QtCore.QRect(60, 30, 41, 27))
self.imageWidthLineEdit_s1.setAutoFillBackground(False)
self.imageWidthLineEdit_s1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.imageWidthLineEdit_s1.setReadOnly(False)
self.imageWidthLineEdit_s1.setPlaceholderText("")
self.imageWidthLineEdit_s1.setObjectName("imageWidthLineEdit_s1")
self.imageHeightLineEdit_s1 = QtWidgets.QLineEdit(self.imageSizeGroupBox_s1)
self.imageHeightLineEdit_s1.setGeometry(QtCore.QRect(60, 65, 41, 27))
self.imageHeightLineEdit_s1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.imageHeightLineEdit_s1.setReadOnly(False)
self.imageHeightLineEdit_s1.setObjectName("imageHeightLineEdit_s1")
self.label = QtWidgets.QLabel(self.imageSizeGroupBox_s1)
self.label.setGeometry(QtCore.QRect(10, 30, 80, 30))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.imageSizeGroupBox_s1)
self.label_2.setGeometry(QtCore.QRect(7, 70, 66, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.imageSizeGroupBox_s1)
self.label_3.setGeometry(QtCore.QRect(110, 30, 21, 30))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.imageSizeGroupBox_s1)
self.label_4.setGeometry(QtCore.QRect(110, 70, 30, 20))
self.label_4.setObjectName("label_4")
self.groupBox_2 = QtWidgets.QGroupBox(self.stage1_groupBox)
self.groupBox_2.setGeometry(QtCore.QRect(10, 140, 141, 151))
self.groupBox_2.setMaximumSize(QtCore.QSize(160, 16777215))
self.groupBox_2.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"")
self.groupBox_2.setObjectName("groupBox_2")
self.vggRadioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.vggRadioButton.setGeometry(QtCore.QRect(20, 30, 115, 22))
self.vggRadioButton.setObjectName("vggRadioButton")
self.netvladRadioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.netvladRadioButton.setGeometry(QtCore.QRect(20, 122, 115, 20))
self.netvladRadioButton.setObjectName("netvladRadioButton")
self.resnetRadioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.resnetRadioButton.setGeometry(QtCore.QRect(20, 60, 115, 22))
self.resnetRadioButton.setObjectName("resnetRadioButton")
self.googlenetRadioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.googlenetRadioButton.setGeometry(QtCore.QRect(20, 90, 115, 22))
self.googlenetRadioButton.setObjectName("googlenetRadioButton")
self.groupBox_8 = QtWidgets.QGroupBox(self.stage1_groupBox)
self.groupBox_8.setGeometry(QtCore.QRect(10, 300, 141, 100))
self.groupBox_8.setMaximumSize(QtCore.QSize(160, 16777215))
self.groupBox_8.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"")
self.groupBox_8.setObjectName("groupBox_8")
self.pcaDimLineEdit_s1 = QtWidgets.QLineEdit(self.groupBox_8)
self.pcaDimLineEdit_s1.setGeometry(QtCore.QRect(80, 20, 51, 27))
self.pcaDimLineEdit_s1.setAutoFillBackground(False)
self.pcaDimLineEdit_s1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.pcaDimLineEdit_s1.setReadOnly(False)
self.pcaDimLineEdit_s1.setPlaceholderText("")
self.pcaDimLineEdit_s1.setObjectName("pcaDimLineEdit_s1")
self.label_21 = QtWidgets.QLabel(self.groupBox_8)
self.label_21.setGeometry(QtCore.QRect(10, 20, 80, 30))
self.label_21.setObjectName("label_21")
self.label_22 = QtWidgets.QLabel(self.groupBox_8)
self.label_22.setGeometry(QtCore.QRect(10, 50, 80, 30))
self.label_22.setObjectName("label_22")
self.pcaSamplesLineEdit_s1 = QtWidgets.QLineEdit(self.groupBox_8)
self.pcaSamplesLineEdit_s1.setGeometry(QtCore.QRect(80, 50, 51, 27))
self.pcaSamplesLineEdit_s1.setAutoFillBackground(False)
self.pcaSamplesLineEdit_s1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.pcaSamplesLineEdit_s1.setReadOnly(False)
self.pcaSamplesLineEdit_s1.setPlaceholderText("")
self.pcaSamplesLineEdit_s1.setObjectName("pcaSamplesLineEdit_s1")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setGeometry(QtCore.QRect(30, 460, 531, 131))
self.groupBox_3.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"\n"
"")
self.groupBox_3.setObjectName("groupBox_3")
self.btnLoadReference = QtWidgets.QPushButton(self.groupBox_3)
self.btnLoadReference.setGeometry(QtCore.QRect(20, 24, 101, 27))
self.btnLoadReference.setObjectName("btnLoadReference")
self.btnLoadTest = QtWidgets.QPushButton(self.groupBox_3)
self.btnLoadTest.setGeometry(QtCore.QRect(20, 60, 101, 27))
self.btnLoadTest.setObjectName("btnLoadTest")
self.btnLoadGroungTruth = QtWidgets.QPushButton(self.groupBox_3)
self.btnLoadGroungTruth.setGeometry(QtCore.QRect(20, 96, 101, 27))
self.btnLoadGroungTruth.setObjectName("btnLoadGroungTruth")
self.refOkLabel = QtWidgets.QLabel(self.groupBox_3)
self.refOkLabel.setGeometry(QtCore.QRect(130, 30, 391, 17))
font = QtGui.QFont()
font.setPointSize(9)
font.setItalic(True)
self.refOkLabel.setFont(font)
self.refOkLabel.setText("")
self.refOkLabel.setObjectName("refOkLabel")
self.testOkLabel = QtWidgets.QLabel(self.groupBox_3)
self.testOkLabel.setGeometry(QtCore.QRect(130, 66, 391, 17))
font = QtGui.QFont()
font.setPointSize(9)
font.setItalic(True)
self.testOkLabel.setFont(font)
self.testOkLabel.setText("")
self.testOkLabel.setObjectName("testOkLabel")
self.groundTruthOkLabel = QtWidgets.QLabel(self.groupBox_3)
self.groundTruthOkLabel.setGeometry(QtCore.QRect(130, 103, 391, 17))
font = QtGui.QFont()
font.setPointSize(9)
font.setItalic(True)
self.groundTruthOkLabel.setFont(font)
self.groundTruthOkLabel.setText("")
self.groundTruthOkLabel.setObjectName("groundTruthOkLabel")
self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_4.setGeometry(QtCore.QRect(30, 610, 161, 111))
self.groupBox_4.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"\n"
"")
self.groupBox_4.setObjectName("groupBox_4")
self.btnCreateDB = QtWidgets.QPushButton(self.groupBox_4)
self.btnCreateDB.setGeometry(QtCore.QRect(30, 30, 91, 27))
self.btnCreateDB.setObjectName("btnCreateDB")
self.btnRecognition = QtWidgets.QPushButton(self.groupBox_4)
self.btnRecognition.setGeometry(QtCore.QRect(30, 70, 91, 27))
self.btnRecognition.setObjectName("btnRecognition")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setGeometry(QtCore.QRect(590, 320, 671, 401))
font = QtGui.QFont()
font.setFamily("Monospace")
font.setPointSize(8)
self.textBrowser.setFont(font)
self.textBrowser.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"\n"
"")
self.textBrowser.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.textBrowser.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByKeyboard|QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextBrowserInteraction|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.textBrowser.setObjectName("textBrowser")
self.outputGroupBox = QtWidgets.QGroupBox(self.centralwidget)
self.outputGroupBox.setGeometry(QtCore.QRect(810, 30, 224, 231))
self.outputGroupBox.setStyleSheet("")
self.outputGroupBox.setObjectName("outputGroupBox")
self.scrollArea_4 = QtWidgets.QScrollArea(self.outputGroupBox)
self.scrollArea_4.setGeometry(QtCore.QRect(0, 20, 221, 211))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea_4.sizePolicy().hasHeightForWidth())
self.scrollArea_4.setSizePolicy(sizePolicy)
self.scrollArea_4.setWidgetResizable(True)
self.scrollArea_4.setObjectName("scrollArea_4")
self.scrollAreaWidgetContents_4 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_4.setGeometry(QtCore.QRect(0, 0, 219, 209))
self.scrollAreaWidgetContents_4.setObjectName("scrollAreaWidgetContents_4")
self.outputImageLabel = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.outputImageLabel.setGeometry(QtCore.QRect(0, 0, 224, 224))
self.outputImageLabel.setText("")
self.outputImageLabel.setObjectName("outputImageLabel")
self.scrollArea_4.setWidget(self.scrollAreaWidgetContents_4)
self.referenceGroupBox = QtWidgets.QGroupBox(self.centralwidget)
self.referenceGroupBox.setGeometry(QtCore.QRect(1030, 30, 224, 231))
self.referenceGroupBox.setStyleSheet("")
self.referenceGroupBox.setObjectName("referenceGroupBox")
self.scrollArea_6 = QtWidgets.QScrollArea(self.referenceGroupBox)
self.scrollArea_6.setGeometry(QtCore.QRect(0, 20, 221, 211))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea_6.sizePolicy().hasHeightForWidth())
self.scrollArea_6.setSizePolicy(sizePolicy)
self.scrollArea_6.setWidgetResizable(True)
self.scrollArea_6.setObjectName("scrollArea_6")
self.scrollAreaWidgetContents_6 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_6.setGeometry(QtCore.QRect(0, 0, 219, 209))
self.scrollAreaWidgetContents_6.setObjectName("scrollAreaWidgetContents_6")
self.referenceImageLabel = QtWidgets.QLabel(self.scrollAreaWidgetContents_6)
self.referenceImageLabel.setGeometry(QtCore.QRect(1, 0, 224, 224))
self.referenceImageLabel.setText("")
self.referenceImageLabel.setObjectName("referenceImageLabel")
self.scrollArea_6.setWidget(self.scrollAreaWidgetContents_6)
self.groupBox_5 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_5.setGeometry(QtCore.QRect(400, 610, 161, 111))
self.groupBox_5.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"\n"
"")
self.groupBox_5.setObjectName("groupBox_5")
self.btnPause = QtWidgets.QPushButton(self.groupBox_5)
self.btnPause.setGeometry(QtCore.QRect(30, 30, 91, 27))
self.btnPause.setObjectName("btnPause")
self.btnStop = QtWidgets.QPushButton(self.groupBox_5)
self.btnStop.setGeometry(QtCore.QRect(30, 70, 91, 27))
self.btnStop.setObjectName("btnStop")
self.stage2_groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.stage2_groupBox.setGeometry(QtCore.QRect(210, 30, 161, 411))
self.stage2_groupBox.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"\n"
"")
self.stage2_groupBox.setObjectName("stage2_groupBox")
self.groupBox_10 = QtWidgets.QGroupBox(self.stage2_groupBox)
self.groupBox_10.setGeometry(QtCore.QRect(10, 30, 141, 101))
self.groupBox_10.setMaximumSize(QtCore.QSize(160, 16777215))
self.groupBox_10.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"\n"
"")
self.groupBox_10.setObjectName("groupBox_10")
self.imageWidthLineEdit_s2 = QtWidgets.QLineEdit(self.groupBox_10)
self.imageWidthLineEdit_s2.setGeometry(QtCore.QRect(60, 29, 41, 27))
self.imageWidthLineEdit_s2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.imageWidthLineEdit_s2.setReadOnly(False)
self.imageWidthLineEdit_s2.setPlaceholderText("")
self.imageWidthLineEdit_s2.setObjectName("imageWidthLineEdit_s2")
self.imageHeightLineEdit_s2 = QtWidgets.QLineEdit(self.groupBox_10)
self.imageHeightLineEdit_s2.setGeometry(QtCore.QRect(60, 63, 41, 30))
self.imageHeightLineEdit_s2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.imageHeightLineEdit_s2.setReadOnly(False)
self.imageHeightLineEdit_s2.setObjectName("imageHeightLineEdit_s2")
self.label_8 = QtWidgets.QLabel(self.groupBox_10)
self.label_8.setGeometry(QtCore.QRect(10, 30, 80, 30))
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.groupBox_10)
self.label_9.setGeometry(QtCore.QRect(7, 70, 66, 16))
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(self.groupBox_10)
self.label_10.setGeometry(QtCore.QRect(110, 30, 21, 30))
self.label_10.setObjectName("label_10")
self.label_13 = QtWidgets.QLabel(self.groupBox_10)
self.label_13.setGeometry(QtCore.QRect(110, 66, 30, 31))
self.label_13.setObjectName("label_13")
self.groupBox_9 = QtWidgets.QGroupBox(self.stage2_groupBox)
self.groupBox_9.setGeometry(QtCore.QRect(10, 140, 141, 151))
self.groupBox_9.setMaximumSize(QtCore.QSize(160, 16777215))
self.groupBox_9.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"")
self.groupBox_9.setObjectName("groupBox_9")
self.vggRadioButton_s2 = QtWidgets.QRadioButton(self.groupBox_9)
self.vggRadioButton_s2.setGeometry(QtCore.QRect(20, 30, 115, 22))
self.vggRadioButton_s2.setObjectName("vggRadioButton_s2")
self.resnetRadioButton_s2 = QtWidgets.QRadioButton(self.groupBox_9)
self.resnetRadioButton_s2.setGeometry(QtCore.QRect(20, 60, 115, 22))
self.resnetRadioButton_s2.setObjectName("resnetRadioButton_s2")
self.googlenetRadioButton_s2 = QtWidgets.QRadioButton(self.groupBox_9)
self.googlenetRadioButton_s2.setGeometry(QtCore.QRect(20, 90, 115, 22))
self.googlenetRadioButton_s2.setObjectName("googlenetRadioButton_s2")
self.groupBox_11 = QtWidgets.QGroupBox(self.stage2_groupBox)
self.groupBox_11.setGeometry(QtCore.QRect(10, 300, 141, 100))
self.groupBox_11.setMaximumSize(QtCore.QSize(160, 16777215))
self.groupBox_11.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"")
self.groupBox_11.setObjectName("groupBox_11")
self.pcaDimLineEdit_s2 = QtWidgets.QLineEdit(self.groupBox_11)
self.pcaDimLineEdit_s2.setGeometry(QtCore.QRect(80, 20, 51, 27))
self.pcaDimLineEdit_s2.setAutoFillBackground(False)
self.pcaDimLineEdit_s2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.pcaDimLineEdit_s2.setReadOnly(False)
self.pcaDimLineEdit_s2.setPlaceholderText("")
self.pcaDimLineEdit_s2.setObjectName("pcaDimLineEdit_s2")
self.label_23 = QtWidgets.QLabel(self.groupBox_11)
self.label_23.setGeometry(QtCore.QRect(10, 20, 80, 30))
self.label_23.setObjectName("label_23")
self.label_24 = QtWidgets.QLabel(self.groupBox_11)
self.label_24.setGeometry(QtCore.QRect(10, 50, 80, 30))
self.label_24.setObjectName("label_24")
self.pcaSamplesLineEdit_s2 = QtWidgets.QLineEdit(self.groupBox_11)
self.pcaSamplesLineEdit_s2.setGeometry(QtCore.QRect(80, 50, 51, 27))
self.pcaSamplesLineEdit_s2.setAutoFillBackground(False)
self.pcaSamplesLineEdit_s2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.pcaSamplesLineEdit_s2.setReadOnly(False)
self.pcaSamplesLineEdit_s2.setPlaceholderText("")
self.pcaSamplesLineEdit_s2.setObjectName("pcaSamplesLineEdit_s2")
self.groupBox_6 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_6.setGeometry(QtCore.QRect(220, 610, 161, 111))
self.groupBox_6.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"\n"
"")
self.groupBox_6.setObjectName("groupBox_6")
self.btnSaveOutput = QtWidgets.QPushButton(self.groupBox_6)
self.btnSaveOutput.setGeometry(QtCore.QRect(30, 30, 91, 27))
self.btnSaveOutput.setObjectName("btnSaveOutput")
self.btnPRcurves = QtWidgets.QPushButton(self.groupBox_6)
self.btnPRcurves.setGeometry(QtCore.QRect(30, 70, 91, 27))
self.btnPRcurves.setObjectName("btnPRcurves")
self.groupBox_12 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_12.setGeometry(QtCore.QRect(390, 30, 171, 221))
self.groupBox_12.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"")
self.groupBox_12.setObjectName("groupBox_12")
self.label_17 = QtWidgets.QLabel(self.groupBox_12)
self.label_17.setGeometry(QtCore.QRect(10, 65, 111, 30))
self.label_17.setObjectName("label_17")
self.frameTolLineEdit = QtWidgets.QLineEdit(self.groupBox_12)
self.frameTolLineEdit.setGeometry(QtCore.QRect(125, 65, 38, 27))
self.frameTolLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.frameTolLineEdit.setObjectName("frameTolLineEdit")
self.candidatesLineEdit = QtWidgets.QLineEdit(self.groupBox_12)
self.candidatesLineEdit.setGeometry(QtCore.QRect(125, 31, 38, 27))
self.candidatesLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.candidatesLineEdit.setObjectName("candidatesLineEdit")
self.label_18 = QtWidgets.QLabel(self.groupBox_12)
self.label_18.setGeometry(QtCore.QRect(10, 32, 81, 30))
self.label_18.setObjectName("label_18")
self.prevFramesLineEdit = QtWidgets.QLineEdit(self.groupBox_12)
self.prevFramesLineEdit.setGeometry(QtCore.QRect(125, 100, 38, 27))
self.prevFramesLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.prevFramesLineEdit.setObjectName("prevFramesLineEdit")
self.label_5 = QtWidgets.QLabel(self.groupBox_12)
self.label_5.setGeometry(QtCore.QRect(10, 100, 111, 31))
self.label_5.setText("Frame corr. (FC)")
self.label_5.setObjectName("label_5")
self.gpuGroupBox = QtWidgets.QGroupBox(self.centralwidget)
self.gpuGroupBox.setGeometry(QtCore.QRect(390, 310, 171, 131))
self.gpuGroupBox.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 3px;\n"
" padding: 3 0 3 0;\n"
"}\n"
"")
self.gpuGroupBox.setObjectName("gpuGroupBox")
self.label_12 = QtWidgets.QLabel(self.gpuGroupBox)
self.label_12.setGeometry(QtCore.QRect(10, 80, 111, 31))
self.label_12.setText("Max. candidates ")
self.label_12.setObjectName("label_12")
self.gpuCandLineEdit = QtWidgets.QLineEdit(self.gpuGroupBox)
self.gpuCandLineEdit.setGeometry(QtCore.QRect(125, 80, 38, 27))
self.gpuCandLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.gpuCandLineEdit.setObjectName("gpuCandLineEdit")
self.loadDbOnGpuCheckBox = QtWidgets.QCheckBox(self.gpuGroupBox)
self.loadDbOnGpuCheckBox.setGeometry(QtCore.QRect(10, 40, 141, 22))
self.loadDbOnGpuCheckBox.setObjectName("loadDbOnGpuCheckBox")
self.useGpuCheckBox = QtWidgets.QCheckBox(self.centralwidget)
self.useGpuCheckBox.setGeometry(QtCore.QRect(390, 280, 96, 22))
self.useGpuCheckBox.setObjectName("useGpuCheckBox")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1262, 25))
self.menuBar.setObjectName("menuBar")
self.menuAbout = QtWidgets.QMenu(self.menuBar)
self.menuAbout.setObjectName("menuAbout")
MainWindow.setMenuBar(self.menuBar)
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionSpectrogram = QtWidgets.QAction(MainWindow)
self.actionSpectrogram.setObjectName("actionSpectrogram")
self.actionFrequency_Map = QtWidgets.QAction(MainWindow)
self.actionFrequency_Map.setObjectName("actionFrequency_Map")
self.actionSave_path = QtWidgets.QAction(MainWindow)
self.actionSave_path.setObjectName("actionSave_path")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.menuAbout.addAction(self.actionAbout)
self.menuBar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Visual Place Recognition interface"))
self.groupBox_7.setTitle(_translate("MainWindow", "Console"))
self.queryGroupBox.setTitle(_translate("MainWindow", "Query"))
self.stage1_groupBox.setTitle(_translate("MainWindow", "STAGE I "))
self.imageSizeGroupBox_s1.setTitle(_translate("MainWindow", "Image size"))
self.imageWidthLineEdit_s1.setText(_translate("MainWindow", "224"))
self.imageHeightLineEdit_s1.setText(_translate("MainWindow", "224"))
self.label.setText(_translate("MainWindow", "Width"))
self.label_2.setText(_translate("MainWindow", "Height"))
self.label_3.setText(_translate("MainWindow", "px"))
self.label_4.setText(_translate("MainWindow", "px"))
self.groupBox_2.setTitle(_translate("MainWindow", "Method"))
self.vggRadioButton.setText(_translate("MainWindow", "VGG16"))
self.netvladRadioButton.setText(_translate("MainWindow", "NetVLAD"))
self.resnetRadioButton.setText(_translate("MainWindow", "ResNet"))
self.googlenetRadioButton.setText(_translate("MainWindow", "GoogLeNet"))
self.groupBox_8.setTitle(_translate("MainWindow", "PCA"))
self.pcaDimLineEdit_s1.setText(_translate("MainWindow", "125"))
self.label_21.setText(_translate("MainWindow", "Dim."))
self.label_22.setText(_translate("MainWindow", "Samples"))
self.pcaSamplesLineEdit_s1.setText(_translate("MainWindow", "10000"))
self.groupBox_3.setTitle(_translate("MainWindow", "Select files"))
self.btnLoadReference.setText(_translate("MainWindow", "Reference dir"))
self.btnLoadTest.setText(_translate("MainWindow", "Test dir"))
self.btnLoadGroungTruth.setText(_translate("MainWindow", "Ground truth"))
self.groupBox_4.setTitle(_translate("MainWindow", "Run"))
self.btnCreateDB.setText(_translate("MainWindow", "Create DB"))
self.btnRecognition.setText(_translate("MainWindow", "Recognition"))
self.textBrowser.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Monospace\'; font-size:8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:11pt;\"><br /></p></body></html>"))
self.outputGroupBox.setTitle(_translate("MainWindow", "Recognized"))
self.referenceGroupBox.setTitle(_translate("MainWindow", "Ground truth"))
self.groupBox_5.setTitle(_translate("MainWindow", "Controls"))
self.btnPause.setText(_translate("MainWindow", "Pause"))
self.btnStop.setText(_translate("MainWindow", "Stop"))
self.stage2_groupBox.setTitle(_translate("MainWindow", "STAGE II "))
self.groupBox_10.setTitle(_translate("MainWindow", "Image size"))
self.imageWidthLineEdit_s2.setText(_translate("MainWindow", "224"))
self.imageHeightLineEdit_s2.setText(_translate("MainWindow", "224"))
self.label_8.setText(_translate("MainWindow", "Width"))
self.label_9.setText(_translate("MainWindow", "Height"))
self.label_10.setText(_translate("MainWindow", "px"))
self.label_13.setText(_translate("MainWindow", "px"))
self.groupBox_9.setTitle(_translate("MainWindow", "Method"))
self.vggRadioButton_s2.setText(_translate("MainWindow", "VGG16"))
self.resnetRadioButton_s2.setText(_translate("MainWindow", "ResNet"))
self.googlenetRadioButton_s2.setText(_translate("MainWindow", "GoogLeNet"))
self.groupBox_11.setTitle(_translate("MainWindow", "PCA"))
self.pcaDimLineEdit_s2.setText(_translate("MainWindow", "100"))
self.label_23.setText(_translate("MainWindow", "Dim."))
self.label_24.setText(_translate("MainWindow", "Samples"))
self.pcaSamplesLineEdit_s2.setText(_translate("MainWindow", "10000"))
self.groupBox_6.setTitle(_translate("MainWindow", "Output"))
self.btnSaveOutput.setText(_translate("MainWindow", "Save "))
self.btnPRcurves.setText(_translate("MainWindow", "PR curves"))
self.groupBox_12.setTitle(_translate("MainWindow", "Hyperparameters"))
self.label_17.setText(_translate("MainWindow", "Frame tol."))
self.frameTolLineEdit.setText(_translate("MainWindow", "2"))
self.candidatesLineEdit.setText(_translate("MainWindow", "50"))
self.label_18.setText(_translate("MainWindow", "Candidates"))
self.prevFramesLineEdit.setText(_translate("MainWindow", "2"))
self.gpuGroupBox.setTitle(_translate("MainWindow", "GPU Options"))
self.gpuCandLineEdit.setText(_translate("MainWindow", "2"))
self.loadDbOnGpuCheckBox.setText(_translate("MainWindow", "Load DB on GPU"))
self.useGpuCheckBox.setText(_translate("MainWindow", "Use GPU"))
self.menuAbout.setTitle(_translate("MainWindow", "Help"))
self.actionOpen.setText(_translate("MainWindow", "Open Video"))
self.actionSpectrogram.setText(_translate("MainWindow", "Spectrogram"))
self.actionFrequency_Map.setText(_translate("MainWindow", "Frequency Map"))
self.actionSave_path.setText(_translate("MainWindow", "Save to directory"))
self.actionAbout.setText(_translate("MainWindow", "About"))
|
StarcoderdataPython
|
1696677
|
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_requests, \
sdk_eval_timeout, sdk_get_and_check_replies
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.pool_transactions.helper import sdk_add_new_nym, \
prepare_new_node_data, prepare_node_request, sdk_sign_and_send_prepared_request
from plenum.test.test_node import checkProtocolInstanceSetup
from plenum.test.view_change.helper import ensure_view_change
from plenum.test.conftest import tdirWithPoolTxns
from plenum.test.pool_transactions.conftest import sdk_node_theta_added
from plenum.test.primary_selection.conftest import sdk_one_node_added
from plenum.test.batching_3pc.conftest import tconf
def test_different_ledger_request_interleave(tconf, looper, txnPoolNodeSet,
sdk_one_node_added,
tdir,
tdirWithPoolTxns,
allPluginsPath,
sdk_pool_handle, sdk_wallet_client,
sdk_wallet_steward):
"""
Send pool and domain ledger requests such that they interleave, and do
view change in between and verify the pool is functional
"""
new_node = sdk_one_node_added
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 2)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
# Send domain ledger requests but don't wait for replies
requests = sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 2)
# Add another node by sending pool ledger request
_, new_theta = sdk_node_theta_added(looper,
txnPoolNodeSet,
tdir,
tconf,
sdk_pool_handle,
sdk_wallet_steward,
allPluginsPath,
name='new_theta')
# Send more domain ledger requests but don't wait for replies
requests.extend(sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 3))
# Do view change without waiting for replies
ensure_view_change(looper, nodes=txnPoolNodeSet)
checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1)
# Make sure all requests are completed
total_timeout = sdk_eval_timeout(len(requests), len(txnPoolNodeSet))
sdk_get_and_check_replies(looper, requests, timeout=total_timeout)
sdk_ensure_pool_functional(looper, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle)
new_steward_wallet, steward_did = sdk_add_new_nym(looper,
sdk_pool_handle,
sdk_wallet_steward,
'another_ste',
role='STEWARD')
# Send another pool ledger request (NODE) but don't wait for completion of
# request
next_node_name = 'next_node'
sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort = \
prepare_new_node_data(tconf, tdir, next_node_name)
node_req = looper.loop.run_until_complete(
prepare_node_request(next_node_name, steward_did, clientIp,
clientPort, nodeIp, nodePort, bls_key,
sigseed))
sdk_wallet = (new_steward_wallet, steward_did)
request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet,
sdk_pool_handle,
node_req)
# Send more domain ledger requests but don't wait for replies
request_couples = [request_couple, *
sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 5)]
# Make sure all requests are completed
total_timeout = sdk_eval_timeout(len(request_couples), len(txnPoolNodeSet))
sdk_get_and_check_replies(looper, request_couples, timeout=total_timeout)
# Make sure pool is functional
sdk_ensure_pool_functional(looper, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle)
|
StarcoderdataPython
|
3207127
|
<reponame>ngi-nix/liberaforms
"""
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2021 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
import os
import pytest
from liberaforms.models.user import User
from liberaforms.models.formuser import FormUser
from .utils import login, logout
class TestFormAdminSettings():
def test_disable_form(self, client, anon_client, users, forms):
""" Tests disable form
Test permissions
"""
form_id = forms['test_form'].id
url = f"/admin/forms/toggle-public/{form_id}"
response = anon_client.get(
url,
follow_redirects=True
)
assert response.status_code == 200
html = response.data.decode()
assert '<!-- site_index_page -->' in html
login(client, users['editor'])
response = client.get(
url,
follow_redirects=True
)
assert response.status_code == 200
html = response.data.decode()
assert '<!-- site_index_page -->' in html
logout(client)
login(client, users['admin'])
response = client.get(
url,
follow_redirects=True
)
assert response.status_code == 200
html = response.data.decode()
assert '<!-- inspect_form_page -->' in html
assert forms['test_form'].adminPreferences['public'] == False
assert forms['test_form'].is_public() == False
response = anon_client.get(
f"/{forms['test_form'].slug}",
follow_redirects=True
)
assert response.status_code == 404
html = response.data.decode()
assert '<!-- page_not_found_404 -->' in html
forms['test_form'].adminPreferences['public'] = True
forms['test_form'].save()
def test_change_author(self, client, anon_client, forms, users):
""" Tests nonexistent username and valid username
Tests permission
"""
form_id = forms['test_form'].id
url = f"/admin/forms/change-author/{form_id}"
response = anon_client.get(
url,
follow_redirects=True
)
assert response.status_code == 200
assert '<!-- site_index_page -->' in response.data.decode()
login(client, users['editor'])
response = client.get(
url,
follow_redirects=True
)
assert response.status_code == 200
assert '<!-- site_index_page -->' in response.data.decode()
logout(client)
login(client, users['admin'])
response = client.get(
url,
follow_redirects=True
)
assert response.status_code == 200
assert '<!-- change_author_page -->' in response.data.decode()
initial_author = forms['test_form'].author
assert FormUser.find(form_id=forms['test_form'].id,
user_id=forms['test_form'].author_id) != None
nonexistent_username = "nonexistent"
response = client.post(
url,
data = {
"old_author_username": initial_author.username,
"new_author_username": nonexistent_username,
},
follow_redirects=False
)
assert response.status_code == 200
html = response.data.decode()
assert '<!-- change_author_page -->' in html
assert '<div class="warning flash_message">' in html
# TODO. Test change author to the alread existing author
#assert g.current.username != initial_author.username
# we use the dummy_1 user to be the new author
dummy_1 = User.find(username=users['dummy_1']['username'])
response = client.post(
url,
data = {
"old_author_username": initial_author.username,
"new_author_username": dummy_1.username,
},
follow_redirects=True
)
assert response.status_code == 200
html = response.data.decode()
assert '<div class="success flash_message">' in html
assert '<!-- inspect_form_page -->' in html
assert forms['test_form'].author.id == dummy_1.id
assert FormUser.find(form_id=forms['test_form'].id,
user_id=dummy_1.id) != None
# reset author to initial value to continue testing
response = client.post(
url,
data = {
"old_author_username": dummy_1.username,
"new_author_username": initial_author.username,
}
)
# Note: dummy_1 is now an editor
|
StarcoderdataPython
|
1733603
|
<filename>styn/tests/build_scripts/annotation_misuse_2.py
from styn import chore
@chore()
def clean():
pass
# Should be marked as chore.
def html():
pass
# References a non chore.
@chore(clean, html)
def android():
pass
|
StarcoderdataPython
|
141425
|
import dash
import dash as html
import dash as dcc
from dash import dash_table
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
import datetime
import mysql.connector
import os,io
#==================================================================================================
# Fill out your info about TwitterAPI and AWS(if you want to run it on EC2)
config = {***}
mysql_info = {'DB_HOST':config['DB_HOST'] , 'DB_NAME': config['DB_NAME'],
'DB_USER': config['DB_USER'], 'DB_PASSWORD': config['DB_PASSWORD'] , 'CHARSET': config['CHARSET']}
mydb = mysql.connector.connect(host=mysql_info['DB_HOST'],port=3306,
db=mysql_info['DB_NAME'],user=mysql_info['DB_USER'],
passwd=<PASSWORD>['<PASSWORD>'],charset=mysql_info['CHARSET'])
sql = "SELECT * FROM all_data"
cursor = mydb.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
mydb.close()
cursor.close()
df = pd.DataFrame(list(rows))
columns = {0: 'tweet_id', 1: 'day_id', 2:'text', 3:'created_at', 4:'user_friends',
5:'user_followers', 6:'retweet_count', 7:'favorite_count', 8:'AWS', 9:'happy',
10:'sad', 11:'disgust', 12:'angry', 13:'surprise', 14:'day', 15:'risk'}
df = df.rename(columns = columns)
#==================================================================================================
#=================================================================================================================
#全体データ
#df = pd.read_csv('tweet_data.csv')
today = datetime.date.today()
yesterday = today - datetime.timedelta(1)
day_yesterday = df[df['day_id'].dt.date == yesterday]
day_today = df[df['day_id'].dt.date == today]
day_yesterday = day_yesterday.copy()
day_today = day_today.copy()
#datetime型に変換
day_yesterday['day_id'] = pd.to_datetime(day_yesterday['day_id'])
day_today['day_id'] = pd.to_datetime(day_today['day_id'])
#本日の総ツイート数
day_yesterday_TweetCou = len(day_yesterday)
day_today_TweetCou = len(day_today)
#本日の総ツイート人数
day_yesterday_TweetNum = len(day_yesterday.groupby(['tweet_id']).count())
day_today_TweetNum = len(day_today.groupby(['tweet_id']).count())
#本日の最高リツイート数
day_yesterday_ReTweetMax = day_yesterday['retweet_count'].max()
day_today_ReTweetMax = day_today['retweet_count'].max()
#本日の最高ファボ数
day_yesterday_FavMax = day_yesterday['favorite_count'].max()
day_today_FavMax = day_today['favorite_count'].max()
#tweetポジネガグラフ
#ポジネガ判定(データのそれぞれのcountについて)
day_today_po = day_today['AWS'][day_today['AWS'] == 'NEGATIVE'].count()
day_today_ne = day_today['AWS'][day_today['AWS'] == 'POSITIVE'].count()
colors = ['orange', '#dd1e35']
fig1 = go.Figure()
fig1.add_trace(go.Pie(labels=['POSITIVE', 'NEGATIVE'],
values=[day_today_po,day_today_ne],
marker=dict(colors=colors),
hoverinfo='label+value+percent',
textinfo='label+value',
#hole=7,
#rotation=45
))
#=================================================================================================================
layout = go.Layout(
title={'text': 'Total Cases: ',
'y': 0.93,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont={'color': 'white',
'size': 20},
font=dict(family='sans-serif',
color='white',
size=12),
hovermode='closest',
paper_bgcolor='#1f2c56',
plot_bgcolor='#1f2c56',
legend={'orientation': 'h',
'bgcolor': '#1f2c56',
'xanchor': 'center', 'x': 0.5, 'y': -0.7}
)
data1 = go.Pie(labels=['NEGATIVE', 'POSITIVE'],
values=[day_today_po,day_today_ne],
marker=dict(colors=colors),
hoverinfo='label+value+percent',
textinfo='label+value',
#hole=7,
#rotation=45
)
fig1 = go.Figure(data=data1, layout=layout)
df['day_id'] = pd.to_datetime(df['day_id'])
df_co = df.groupby('day_id', as_index=False).count()
data2 = go.Bar(x=df_co['day_id'],
y=df_co['text'],
name='Tweet推移',
marker=dict(color='orange'),
hoverinfo='text')
fig2 = go.Figure(data=data2, layout=layout)
#=================================================================================================================
def generate_table(dataframe, max_rows=10):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
rank = day_today.sort_values('risk', ascending=False)[:5]
rank = rank[['text', 'risk']]
#=================================================================================================================
app = dash.Dash(__name__, title='Falminf App',
assets_folder='static',
assets_url_path='static'
,meta_tags=[{"name": "viewport", "content": "width=device-width"}])
app.layout = html.Div([
html.Div([
html.Div([
html.Img(src=app.get_asset_url('log3.jpg'),
id = 'corona-image',
style={'height': '60px',
'width': 'auto',
'margin-bottom': '25px'})
], className='one-third column'),
html.Div([
html.Div([
html.H3('学研SNS動向', style={'margin-bottom': '0px', 'color': 'white'}),
html.H5('炎上リスク監視', style={'margin-bottom': '0px', 'color': 'white'})
])
], className='one-half column', id = 'title'),
html.Div([
html.H6('Last Updated: ' + str(day_today['day_id'].iloc[1].strftime('%B, %d, %Y')) ,
style={'color': 'orange'})
], className='one-third column', id = 'title1')
], id = 'header', className= 'row flex-display', style={'margin-bottom': '25px'}),
html.Div([
html.Div([
html.H6(children='本日のツイート数',
style={'textAlign': 'center',
'color': 'white'}),
html.P(f"{day_today_TweetCou:,.0f}",
style={'textAlign': 'center',
'color': 'orange',
'fontSize': 40}),
html.P('new: ' + f"{day_today_TweetCou - day_yesterday_TweetCou:,.0f}"
+ ' (' + str(round(((day_today_TweetCou - day_yesterday_TweetCou) /
(day_today_TweetCou)) *100, 2)) + '%)',
style={'textAlign': 'center',
'color': 'orange',
'fontsize': 15,
'margin-top': '-18px'})
], className='card_container three columns'),
html.Div([
html.H6(children='ツイート人数',
style={'textAlign': 'center',
'color': 'white'}),
html.P(f"{day_today_TweetNum:,.0f}",
style={'textAlign': 'center',
'color': '#dd1e35',
'fontSize': 40}),
html.P('new: ' + f"{day_today_TweetNum - day_yesterday_TweetNum:,.0f}"
+ ' (' + str(round(((day_today_TweetNum - day_yesterday_TweetNum) /
day_today_TweetNum) * 100, 2)) + '%)',
style={'textAlign': 'center',
'color': '#dd1e35',
'fontSize': 15,
'margin-top': '-18px'})
], className='card_container three columns'),
html.Div([
html.H6(children='最高リツート数',
style={'textAlign': 'center',
'color': 'white'}),
html.P(f"{day_today_ReTweetMax:,.0f}",
style={'textAlign': 'center',
'color': 'green',
'fontSize': 40}),
html.P('new: ' + f"{day_today_ReTweetMax - day_yesterday_ReTweetMax:,.0f}"
+ ' (' + str(round(((day_today_ReTweetMax - day_yesterday_ReTweetMax) /
day_today_ReTweetMax) * 100, 2)) + '%)',
style={'textAlign': 'center',
'color': 'green',
'fontSize': 15,
'margin-top': '-18px'})
], className='card_container three columns'),
html.Div([
html.H6(children='最高ファボ数',
style={'textAlign': 'center',
'color': 'white'}),
html.P(f"{day_today_FavMax:,.0f}",
style={'textAlign': 'center',
'color': '#e55467',
'fontSize': 40}),
html.P('new: ' + f"{day_today_FavMax - day_yesterday_FavMax:,.0f}"
+ ' (' + str(round(((day_today_FavMax - day_yesterday_FavMax) /
day_today_FavMax) * 100, 2)) + '%)',
style={'textAlign': 'center',
'color': '#e55467',
'fontSize': 15,
'margin-top': '-18px'})
], className='card_container three columns'),
], className='row flex display'),
##select countryに関して詳細に記載する
##select countryのデザイン(style.css)の詳細に関して記載する.
html.Div([
html.Div([
dcc.Graph(id = 'pie_chart',
figure=fig1,
config={'displayModeBar': 'hover'},
style={'background-color': 'rgba(255,0,0,0.5)'})
], className='create_container four columns'),
html.Div([
dcc.Graph(id = 'line_chart',
figure=fig2,
config={'displayModeBar': 'hover'})
], className='create_container five columns'),
],className='row flex-display'),
#html.Div(children=[
# html.H6(children='リスク評価ランキング',
# style={'textAlign': 'center','color': 'white'}),
# generate_table(day4)
#])
dash_table.DataTable(
id='table',
columns=[ {"name": i, "id": i} for i in rank.columns],
data=rank.to_dict("rows")
)
], id = 'mainContainer',style={'display': 'flex', 'flex-direction': 'column'})
server = app.server
if __name__ == '__main__':
app.run_server(debug=True)
|
StarcoderdataPython
|
119322
|
<reponame>amirmallaei/recipe-app-api
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""test the user API public"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""test creating user with valud payload successful"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Amir test'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""check if user exist"""
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>', }
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""password must be at least 5 char"""
payload = {'email': '<EMAIL>', 'password': 'ts', }
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""test a token is created for th user"""
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credintial(self):
"""test that token is not created if invalid credintioals are given"""
create_user(email='<EMAIL>', password="<PASSWORD>")
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""test that token is not created if user does not exist"""
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_fields(self):
"""password are required"""
res = self.client.post(TOKEN_URL, {'email': 'amir', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unathorized(self):
"""test that authentication is required for user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""test api requests that requre authentication"""
def setUp(self):
self.user = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='amir'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""test to retrieve profile for logged in users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
def test_post_not_allowed(self):
"""test that post is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""test updating profile updating for logged in user"""
payload = {'name': 'new name', 'password': '<PASSWORD>'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
StarcoderdataPython
|
1743316
|
<reponame>pyccel/psydac
# coding: utf-8
import sys
import os
import importlib
import inspect
import numpy as np
from functools import lru_cache
from mpi4py import MPI
from sympy import Mul, Tuple
from sympy import Mod as sy_Mod, Abs, Range, Symbol, Max
from sympy import Function, Integer
from psydac.pyccel.ast.core import Variable, IndexedVariable
from psydac.pyccel.ast.core import For
from psydac.pyccel.ast.core import Slice, String
from psydac.pyccel.ast.datatypes import NativeInteger
from psydac.pyccel.ast.core import ValuedArgument
from psydac.pyccel.ast.core import Assign
from psydac.pyccel.ast.core import AugAssign
from psydac.pyccel.ast.core import Product
from psydac.pyccel.ast.core import FunctionDef
from psydac.pyccel.ast.core import FunctionCall
from psydac.pyccel.ast.core import Import
from psydac.api.ast.nodes import FloorDiv
from psydac.api.ast.utilities import variables, math_atoms_as_str
from psydac.api.ast.utilities import build_pyccel_types_decorator
from psydac.fem.splines import SplineSpace
from psydac.fem.tensor import TensorFemSpace
from psydac.fem.vector import ProductFemSpace
from psydac.api.ast.basic import SplBasic
from psydac.api.printing import pycode
from psydac.api.settings import PSYDAC_BACKENDS, PSYDAC_DEFAULT_FOLDER
from psydac.api.utilities import mkdir_p, touch_init_file, random_string, write_code
#==============================================================================
def variable_to_sympy(x):
if isinstance(x, Variable) and isinstance(x.dtype, NativeInteger):
x = Symbol(x.name, integer=True)
return x
#==============================================================================
def compute_diag_len(p, md, mc, return_padding=False):
n = ((np.ceil((p+1)/mc)-1)*md).astype('int')
ep = np.minimum(0, n-p)
n = n-ep + p+1
if return_padding:
return n.astype('int'), (-ep).astype('int')
else:
return n.astype('int')
def Mod(a,b):
if b == 1:
return Integer(0)
else:
return sy_Mod(a,b)
@lru_cache(maxsize=32)
class LinearOperatorDot(SplBasic):
def __new__(cls, ndim, **kwargs):
return SplBasic.__new__(cls, 'dot', name='lo_dot', prefix='lo_dot')
def __init__(self, ndim, **kwargs):
backend = dict(kwargs.pop('backend'))
code = self._initialize(ndim, backend=backend, **kwargs)
self._arguments = dict((str(a.name),a) for a in code.arguments)
self._code = code
self._folder = self._initialize_folder()
self._generate_code(backend=backend)
self._compile(backend=backend)
@property
def func(self):
return self._func
@property
def arguments(self):
return self._arguments
@property
def code(self):
return self._code
@property
def folder(self):
return self._folder
def _initialize(self, ndim, **kwargs):
nrows = kwargs.pop('nrows', variables('n1:%s'%(ndim+1), 'int'))
nrows_extra = kwargs.pop('nrows_extra', variables('ne1:%s'%(ndim+1), 'int'))
starts = kwargs.pop('starts', variables('s1:%s'%(ndim+1), 'int'))
indices1 = variables('i1:%s'%(ndim+1), 'int')
bb = variables('b1:%s'%(ndim+1), 'int')
indices2 = variables('k1:%s'%(ndim+1), 'int')
v = variables('v','real')
x, out = variables('x, out','real',cls=IndexedVariable, rank=ndim)
mat = variables('mat','real',cls=IndexedVariable, rank=2*ndim)
backend = kwargs.pop('backend', None)
pads = kwargs.pop('pads')
gpads = kwargs.pop('gpads')
cm = kwargs.pop('cm')
dm = kwargs.pop('dm')
ndiags, _ = list(zip(*[compute_diag_len(p,mj,mi, return_padding=True) for p,mi,mj in zip(pads,cm,dm)]))
inits = [Assign(b,p*m+p+1-n-Mod(s,m)) for b,p,m,n,s in zip(bb, gpads, dm, ndiags, starts) if not isinstance(p*m+p+1-n-Mod(s,m),(int,np.int64, Integer))]
bb = [b if not isinstance(p*m+p+1-n-Mod(s,m),(int,np.int64, Integer)) else p*m+p+1-n-Mod(s,m) for b,p,m,n,s in zip(bb, gpads, dm, ndiags, starts)]
body = []
ranges = [Range(variable_to_sympy(n)) for n in ndiags]
target = Product(*ranges)
diff = [variable_to_sympy(gp-p) for gp,p in zip(gpads, pads)]
v1 = x[tuple(b-d+FloorDiv((i1+Mod(s,mj)),mi)*mj + i2 for i1,mi,mj,b,s,d,i2 in zip(indices1,cm,dm,bb,starts,diff,indices2))]
v2 = mat[tuple(i+m*j for i,j,m in zip(indices1,gpads,cm))+ tuple(indices2)]
v3 = out[tuple(i+m*j for i,j,m in zip(indices1,gpads,cm))]
body = [AugAssign(v,'+' ,Mul(v2, v1))]
# Decompose fused loop over Cartesian product of multiple ranges
# into nested loops, each over a single range
if ndim > 1:
for i,j in zip(indices2[::-1], target.args[::-1]):
body = [For(i,j, body)]
else:
body = [For(indices2, target, body)]
body.insert(0,Assign(v, 0.0))
body.append(Assign(v3,v))
ranges = [Range(variable_to_sympy(i)) for i in nrows]
target = Product(*ranges)
# Decompose fused loop over Cartesian product of multiple ranges
# into nested loops, each over a single range
if ndim > 1:
for i,j in zip(indices1[::-1], target.args[::-1]):
body = [For(i,j, body)]
else:
body = [For(indices1, target, body)]
nrowscopy = list(nrows).copy()
nrows = list(nrows)
for dim in range(ndim):
if nrows_extra[dim] == 0:continue
v1 = [b-d+FloorDiv((i1+(nrows[dim] if dim==x else 0)+Mod(s,mj)),mi)*mj + i2 for x,i1,mi,mj,b,s,d,i2 in zip(range(ndim), indices1,cm,dm,bb,starts,diff,indices2)]
v2 = [i+m*j for i,j,m in zip(indices1,gpads,cm)]
v2[dim] += nrows[dim]
v3 = v2
v1 = x[tuple(v1)]
v2 = mat[tuple(v2)+ indices2]
v3 = out[tuple(v3)]
rows = list(nrows)
rows[dim] = nrows_extra[dim]
ranges = [variable_to_sympy(n) for n in ndiags]
ranges[dim] -= variable_to_sympy(indices1[dim]) + 1
ranges = [ind if i>=dim else ind - Max(0, variable_to_sympy(d1)+1-variable_to_sympy(r)) for i,(ind,d1,r) in enumerate(zip(ranges, indices1, nrowscopy)) ]
ranges = [Range(i) for i in ranges]
target = Product(*ranges)
for_body = [AugAssign(v, '+',Mul(v1,v2))]
# Decompose fused loop over Cartesian product of multiple ranges
# into nested loops, each over a single range
if ndim > 1:
for i,j in zip(indices2[::-1], target.args[::-1]):
for_body = [For(i,j, for_body)]
else:
for_body = [For(indices2, target, for_body)]
for_body.insert(0,Assign(v, 0.0))
for_body.append(Assign(v3,v))
ranges = [Range(variable_to_sympy(i)) for i in rows]
target = Product(*ranges)
# Decompose fused loop over Cartesian product of multiple ranges
# into nested loops, each over a single range
if ndim > 1:
for i,j in zip(indices1[::-1], target.args[::-1]):
for_body = [For(i,j, for_body)]
body += for_body
else:
body += [For(indices1, target, for_body)]
nrows[dim] += nrows_extra[dim]
body = inits + body
func_args = (mat, x, out)
if isinstance(starts[0], Variable):
func_args = func_args + tuple(starts)
if isinstance(nrowscopy[0], Variable):
func_args = func_args + tuple(nrowscopy)
if isinstance(nrows_extra[0], Variable):
func_args = func_args + tuple(nrows_extra)
decorators = {}
header = None
imports = []
if backend:
if backend['name'] == 'pyccel':
a = [String(str(i)) for i in build_pyccel_types_decorator(func_args)]
decorators = {'types': Function('types')(*a)}
elif backend['name'] == 'numba':
decorators = {'njit': Function('njit')(ValuedArgument(Symbol('fastmath'), backend['fastmath']))}
elif backend['name'] == 'pythran':
header = build_pythran_types_header(name, func_args)
func = FunctionDef(self.name, list(func_args), [], body, imports=imports, decorators=decorators)
return func
def _initialize_folder(self, folder=None):
# ...
if folder is None:
basedir = os.getcwd()
folder = PSYDAC_DEFAULT_FOLDER
folder = os.path.join( basedir, folder )
# ... add __init__ to all directories to be able to
touch_init_file('__pycache__')
for root, dirs, files in os.walk(folder):
touch_init_file(root)
# ...
else:
raise NotImplementedError('user output folder not yet available')
folder = os.path.abspath( folder )
mkdir_p(folder)
# ...
return folder
def _generate_code(self, backend=None):
code = ''
tag = random_string( 8 )
if backend and backend['name'] == 'pyccel':
imports = 'from pyccel.decorators import types'
elif backend and backend['name'] == 'numba':
imports = 'from numba import njit'
else:
imports = ''
if MPI.COMM_WORLD.rank == 0:
modname = 'dependencies_{}'.format(tag)
code = '{imports}\n{code}'.format(imports=imports, code=pycode.pycode(self.code))
write_code(modname+ '.py', code, folder = self.folder)
else:
modname = None
self._modname = MPI.COMM_WORLD.bcast( modname, root=0 )
def _compile(self, backend=None):
module_name = self._modname
sys.path.append(self.folder)
package = importlib.import_module( module_name )
sys.path.remove(self.folder)
if backend and backend['name'] == 'pyccel':
package = self._compile_pyccel(package, backend)
self._func = getattr(package, 'lo_dot')
def _compile_pyccel(self, mod, backend, verbose=False):
# ... convert python to fortran using pyccel
compiler = backend['compiler']
fflags = backend['flags']
_PYCCEL_FOLDER = backend['folder']
from pyccel.epyccel import epyccel
fmod = epyccel(mod,
compiler = compiler,
fflags = fflags,
comm = MPI.COMM_WORLD,
bcast = True,
folder = _PYCCEL_FOLDER,
verbose = verbose)
return fmod
@lru_cache(maxsize=32)
class TransposeOperator(SplBasic):
def __new__(cls, ndim, **kwargs):
return SplBasic.__new__(cls, 'transpose', name='lo_transpose', prefix='lo_transpose')
def __init__(self, ndim, **kwargs):
self.ndim = ndim
backend = dict(kwargs.pop('backend'))
self._code = inspect.getsource(_transpose[ndim])
self._args_dtype = _args_dtype[ndim]
self._folder = self._initialize_folder()
self._generate_code(backend=backend)
self._compile(backend=backend)
@property
def func(self):
return self._func
@property
def arguments(self):
return self._arguments
@property
def code(self):
return self._code
@property
def folder(self):
return self._folder
def _initialize_folder(self, folder=None):
# ...
if folder is None:
basedir = os.getcwd()
folder = PSYDAC_DEFAULT_FOLDER
folder = os.path.join( basedir, folder )
# ... add __init__ to all directories to be able to
touch_init_file('__pycache__')
for root, dirs, files in os.walk(folder):
touch_init_file(root)
# ...
else:
raise NotImplementedError('user output folder not yet available')
folder = os.path.abspath( folder )
mkdir_p(folder)
# ...
return folder
def _generate_code(self, backend=None):
dec = ''
code = self._code
tag = random_string( 8 )
if backend and backend['name'] == 'pyccel':
import pyccel
from packaging import version
if version.parse(pyccel.__version__) < version.parse('1.1.0'):
# Add @types decorator due the minimum required Pyccel version 0.10.1
imports = 'from pyccel.decorators import types'
dec = '@types({})'.format(','.join(self._args_dtype))
else:
imports = ''
dec = ''
elif backend and backend['name'] == 'numba':
imports = 'from numba import njit'
dec = '@njit(fastmath={})'.format(backend['fastmath'])
else:
imports = ''
if MPI.COMM_WORLD.rank == 0:
modname = 'dependencies_{}'.format(tag)
code = '{imports}\n{dec}\n{code}'.format(imports=imports, dec=dec, code=code)
write_code(modname+ '.py', code, folder = self.folder)
else:
modname = None
self._modname = MPI.COMM_WORLD.bcast( modname, root=0 )
def _compile(self, backend=None):
module_name = self._modname
sys.path.append(self.folder)
package = importlib.import_module( module_name )
sys.path.remove(self.folder)
if backend and backend['name'] == 'pyccel':
package = self._compile_pyccel(package, backend)
self._func = getattr(package, 'transpose_{}d'.format(self.ndim))
def _compile_pyccel(self, mod, backend, verbose=False):
# ... convert python to fortran using pyccel
compiler = backend['compiler']
fflags = backend['flags']
_PYCCEL_FOLDER = backend['folder']
from pyccel.epyccel import epyccel
fmod = epyccel(mod,
compiler = compiler,
fflags = fflags,
comm = MPI.COMM_WORLD,
bcast = True,
folder = _PYCCEL_FOLDER,
verbose = verbose)
return fmod
class VectorDot(SplBasic):
def __new__(cls, ndim, backend=None):
obj = SplBasic.__new__(cls, 'dot', name='v_dot', prefix='v_dot')
obj._ndim = ndim
obj._backend = backend
obj._func = obj._initilize()
return obj
@property
def ndim(self):
return self._ndim
@property
def func(self):
return self._func
@property
def backend(self):
return self._backend
def _initialize(self):
ndim = self.ndim
indices = variables('i1:%s'%(ndim+1),'int')
dims = variables('n1:%s'%(ndim+1),'int')
pads = variables('p1:%s'%(ndim+1),'int')
out = variables('out','real')
x1,x2 = variables('x1, x2','real',rank=ndim,cls=IndexedVariable)
body = []
ranges = [Range(p,n-p) for n,p in zip(dims,pads)]
target = Product(*ranges)
v1 = x1[indices]
v2 = x2[indices]
body = [AugAssign(out,'+' ,Mul(v1,v2))]
body = [For(indices, target, body)]
body.insert(0,Assign(out, 0.0))
body.append(Return(out))
func_args = (x1, x2) + pads + dims
self._imports = [Import('itertools', 'product')]
decorators = {}
header = None
if self.backend['name'] == 'pyccel':
decorators = {'types': build_pyccel_types_decorator(func_args), 'external':[]}
elif self.backend['name'] == 'numba':
decorators = {'jit':[]}
elif self.backend['name'] == 'pythran':
header = build_pythran_types_header(self.name, func_args)
return FunctionDef(self.name, list(func_args), [], body,
decorators=decorators,header=header)
#========================================================================================================
def transpose_1d( M:'float[:,:]', Mt:'float[:,:]', n1:int, nc1:int, gp1:int, p1:int,
dm1:int, cm1:int, nd1:int, ndT1:int, si1:int, sk1:int, sl1:int):
d1 = gp1-p1
for x1 in range(n1):
j1 = dm1*gp1 + x1
for l1 in range(nd1):
i1 = si1 + cm1*(x1//dm1) + l1 + d1
k1 = sk1 + x1%dm1-dm1*(l1//cm1)
if k1<ndT1 and k1>-1 and l1<nd1 and i1<nc1:
Mt[j1, l1+sl1] = M[i1, k1]
# ...
def transpose_2d( M:'float[:,:,:,:]', Mt:'float[:,:,:,:]', n1:int, n2:int, nc1:int, nc2:int,
gp1:int, gp2:int, p1:int, p2:int, dm1:int, dm2:int,
cm1:int, cm2:int, nd1:int, nd2:int, ndT1:int, ndT2:int,
si1:int, si2:int, sk1:int, sk2:int, sl1:int, sl2:int):
d1 = gp1-p1
d2 = gp2-p2
for x1 in range(n1):
for x2 in range(n2):
j1 = dm1*gp1 + x1
j2 = dm2*gp2 + x2
for l1 in range(nd1):
for l2 in range(nd2):
i1 = si1 + cm1*(x1//dm1) + l1 + d1
i2 = si2 + cm2*(x2//dm2) + l2 + d2
k1 = sk1 + x1%dm1-dm1*(l1//cm1)
k2 = sk2 + x2%dm2-dm2*(l2//cm2)
if k1<ndT1 and k1>-1 and k2<ndT2 and k2>-1\
and l1<nd1 and l2<nd2 and i1<nc1 and i2<nc2:
Mt[j1,j2, l1+sl1,l2+sl2] = M[i1,i2, k1,k2]
# ...
def transpose_3d( M:'float[:,:,:,:,:,:]', Mt:'float[:,:,:,:,:,:]', n1:int, n2:int, n3:int,
nc1:int, nc2:int, nc3:int, gp1:int, gp2:int, gp3:int, p1:int, p2:int, p3:int,
dm1:int, dm2:int, dm3:int, cm1:int, cm2:int, cm3:int, nd1:int, nd2:int, nd3:int,
ndT1:int, ndT2:int, ndT3:int, si1:int, si2:int, si3:int, sk1:int, sk2:int, sk3:int,
sl1:int, sl2:int, sl3:int):
d1 = gp1-p1
d2 = gp2-p2
d3 = gp3-p3
for x1 in range(n1):
for x2 in range(n2):
for x3 in range(n3):
j1 = dm1*gp1 + x1
j2 = dm2*gp2 + x2
j3 = dm3*gp3 + x3
for l1 in range(nd1):
for l2 in range(nd2):
for l3 in range(nd3):
i1 = si1 + cm1*(x1//dm1) + l1 + d1
i2 = si2 + cm2*(x2//dm2) + l2 + d2
i3 = si3 + cm3*(x3//dm3) + l3 + d3
k1 = sk1 + x1%dm1-dm1*(l1//cm1)
k2 = sk2 + x2%dm2-dm2*(l2//cm2)
k3 = sk3 + x3%dm3-dm3*(l3//cm3)
if k1<ndT1 and k1>-1 and k2<ndT2 and k2>-1 and k3<ndT3 and k3>-1\
and l1<nd1 and l2<nd2 and l3<nd3 and i1<nc1 and i2<nc2 and i3<nc3:
Mt[j1,j2,j3, l1 + sl1,l2 + sl2,l3 + sl3] = M[i1,i2,i3, k1,k2,k3]
_transpose = {1:transpose_1d,2:transpose_2d, 3:transpose_3d}
_args_dtype = {1:[repr('float[:,:]')]*2 + ['int']*11,2:[repr('float[:,:,:,:]')]*2 + ['int']*22, 3:[repr('float[:,:,:,:,:,:]')]*2 + ['int']*33}
|
StarcoderdataPython
|
3357257
|
"""
Genetic Programming for Quant
Author: UePG
Reference:
[1] https://gplearn.readthedocs.io/en/stable/
[2] <NAME>, "Genetic Programming", 1992.
"""
__version__ = "0.1.4"
__all__ = ["Function", "Fitness", "Backtester", "SymbolicRegressor"]
|
StarcoderdataPython
|
1719276
|
<reponame>mingkaic/tenncor
import re
import logging
from tools.gen.plugin_base import PluginBase
from tools.gen.file_rep import FileRep
from plugins.template import build_template
from plugins.apis import api_header
from plugins.common import order_classes, reference_classes
from plugins.cformat.funcs import render_defn as cfrender
from plugins.pformat.clas import render as crender
from plugins.pformat.funcs import render as frender, process_modname, pybindt
_header_template = '''
// type to replace template arguments in pybind
using {pybind} = {pybind_type};
//>>> ^ pybind, pybind_type
'''
_source_template = '''
#define STRINGIFY(X) #X
namespace py = pybind11;
//>>> global_decs
{global_decs}
//>>> modname
PYBIND11_MODULE({modname}, m_{modname})
{{
m_{modname}.doc() = "pybind for {modname} api";
#ifdef VERSION_INFO
m_{modname}.attr("__version__") = STRINGIFY(VERSION_INFO);
#else
m_{modname}.attr("__version__") = "dev";
#endif
//>>> input_defs
{input_defs}
#ifdef CUSTOM_PYBIND_EXT
//>>> modname
CUSTOM_PYBIND_EXT(m_{modname})
#endif
//>>> content
{content}
}}
'''
_content_template = '''
//>>> class_defs
{class_defs}
//>>> global_defs
{global_defs}
//>>> func_defs
{func_defs}
'''
def render_pyglobal_decl(mems):
out = []
for mem in mems:
assert('name' in mem and 'type' in mem)
affix = ''
if 'decl' in mem:
out.append(cfrender({
'name': 'py_' + mem['name'] + '_global',
'out': {
'type': mem['type'] + '&',
'val': mem['decl']
}
}))
else:
if 'val' in mem:
affix = '= ' + mem['val']
out.append(' '.join([mem['type'], mem['name'], affix + ';']))
return out
def render_pyclasses(classes, ext_path, mod, namespace):
classes = reference_classes(classes, ext_path)
class_defs = dict()
class_inputs = dict()
for clas in classes:
class_def, cinputs = crender(clas, mod, namespace)
cname = clas['name']
class_defs[cname] = class_def
class_inputs.update(cinputs)
order = order_classes(classes)
return [class_defs[clas] for clas in order], class_inputs
def render_pyglobal(mem, mod):
global_tmpl = '{mod}.attr("{name}") = &{val};'
name = mem['name']
if 'decl' in mem:
val = 'py_' + name + '_global()'
else:
val = name
return global_tmpl.format(mod=mod, name=name, val=val)
def render_pyapi(api, ext_path, mod, ns=''):
_submodule_def = 'py::module m_{submod} = {mod}.def_submodule("{submod}", "A submodule of \'{mod}\'");'
global_decls = []
content_lines = []
input_types = dict()
if 'namespaces' in api:
for ns in api['namespaces']:
namespace = ns['name']
sub_decls, sub_content, sub_inputs = render_pyapi(
ns['content'], ext_path, 'm_' + namespace, ns + '::' + namespace)
global_decls += sub_decls
content_lines += [_submodule_def.format(submod=namespace, mod=mod)] + sub_content
input_types.update(sub_inputs)
global_mems = api.get('pyglobal', [])
funcs = api.get('funcs', [])
classes = api.get('classes', [])
funcs = list(filter(lambda f: not f.get('pyignores', False), funcs))
global_decls += render_pyglobal_decl(global_mems)
class_content, class_inputs = render_pyclasses(classes, ext_path, mod, ns)
content_lines += class_content
input_types.update(class_inputs)
content_lines += [render_pyglobal(mem, mod) for mem in global_mems]
for f in funcs:
fcontent, finputs = frender(f, mod, ns)
content_lines.append(fcontent)
input_types.update(finputs)
return global_decls, content_lines, input_types
_plugin_id = 'PYBINDER'
_pyapi_header = 'pyapi.hpp'
@PluginBase.register
class PyAPIsPlugin:
def plugin_id(self):
return _plugin_id
def process(self, generated_files, arguments, **kwargs):
plugin_key = 'api'
if plugin_key not in arguments:
logging.warning(
'no relevant arguments found for plugin %s', _plugin_id)
return
api = arguments[plugin_key]
bindtype = api.get('pybind_type', 'double')
generated_files[_pyapi_header] = FileRep(
_header_template.format(pybind=pybindt, pybind_type=bindtype),
user_includes=['"tenncor/eteq/etens.hpp"'], internal_refs=[])
# split modules by top-level namespaces
modname = api['pybind_module']
ignore_types = [process_modname(dtype)
for dtype in api.get('pyignore_type', [])] + [process_modname(pybindt)]
assert('ext_path' in kwargs)
ext_path = kwargs['ext_path']
decls, content_lines, input_types = render_pyapi(api, ext_path, 'm_' + modname)
src_file = 'pyapi_{}.cpp'.format(modname)
generated_files[src_file] = FileRep(
_source_template.format(
modname=modname,
input_defs='\n'.join([input_types[input_mod]
for input_mod in input_types if input_mod not in ignore_types]),
global_decs='\n'.join(decls),
content='\n\n'.join(content_lines)),
user_includes=[
'"pybind11/pybind11.h"',
'"pybind11/stl.h"',
'"pybind11/operators.h"',
] + api.get('pybind_includes', []),
internal_refs=[_pyapi_header, api_header])
return generated_files
|
StarcoderdataPython
|
1790251
|
import numpy as np
import pandas as pd
def compute_sprns_array(cells_file, spk_f_names, f_out_r, t_start, t_stop, bin_size):
cells_db = pd.read_csv(cells_file, sep=' ')
t_bins = np.arange(t_start, t_stop, bin_size)
r_data = np.zeros( (len(cells_db.index), t_bins[:-1].size) )
t = np.array([])
gids = np.array([])
for f_name in spk_f_names:
print 'Processing file %s.' % (f_name)
data = np.genfromtxt(f_name, delimiter=' ')
if (data.size == 0):
t_tmp = np.array([])
gids_tmp = np.array([])
elif (data.size == 2):
t_tmp = np.array([data[0]])
gids_tmp = np.array([data[1]])
else:
t_tmp = data[:, 0]
gids_tmp = data[:, 1]
t = np.concatenate( (t, t_tmp) )
gids = np.concatenate( (gids, gids_tmp) )
for k_t, t_bin in enumerate(t_bins[:-1]):
print 'Computing rates in bins; working on bin %d of %d.' % (k_t, t_bins[:-1].size)
ind = np.intersect1d( np.where(t >= t_bin), np.where(t < (t_bin + bin_size)) )
t_tmp = t[ind]
gids_tmp = gids[ind]
df = pd.DataFrame( {'gid': gids_tmp, 't': t_tmp} )
df_tmp = df.groupby('gid').count() * 1000.0 / bin_size # Time is in ms and rate is in Hz.
df_tmp.columns = ['rates']
for gid in df_tmp.index:
r_data[gid, k_t] = df_tmp['rates'].loc[gid]
np.save(f_out_r, r_data)
|
StarcoderdataPython
|
157670
|
<filename>queues/queues_two_stacks.py
class Stack:
def __init__(self):
self.data = []
def pop(self):
if self.is_empty():
return None
val = self.data[len(self.data)-1]
self.data = self.data[:len(self.data)-1]
return val
def peek(self):
if self.is_empty():
return None
return self.data[len(self.data)-1]
def push(self, val):
self.data.append(val)
def is_empty(self):
return len(self.data) == 0
class Queue:
def __init__(self):
self.in_stack = Stack()
self.out_stack = Stack()
def enqueue(self, val):
self.in_stack.push(val)
def dequeue(self):
if self.out_stack.is_empty():
if self.in_stack.is_empty():
return None
while self.in_stack.is_empty() == False:
self.out_stack.push(self.in_stack.pop())
return self.out_stack.pop()
def peek(self):
if self.out_stack.is_empty():
if self.in_stack.is_empty():
return None
while self.in_stack.is_empty() == False:
self.out_stack.push(self.in_stack.pop())
return self.out_stack.peek()
def is_empty(self):
return self.in_stack.is_empty() and self.out_stack.is_empty
def print_queue(q):
s = "["
for i in range(0, len(q.out_stack.data)):
s += str(q.out_stack.data[i])
if i < len(q.out_stack.data)-1:
s += ", "
for i in range(0, len(q.in_stack.data)):
s += str(q.in_stack.data[i])
if i < len(q.in_stack.data)-1:
s += ", "
s += "]"
print(s)
queue = Queue()
queue.enqueue(1)
queue.enqueue(2)
queue.enqueue(3)
# should print [1, 2, 3]
print_queue(queue)
v = queue.dequeue()
# should print [2, 3]
print_queue(queue)
v = queue.dequeue()
# should print [3]
print_queue(queue)
queue.enqueue(4)
queue.enqueue(5)
queue.enqueue(6)
v = queue.dequeue()
# should print [4, 5, 6]
print_queue(queue)
v = queue.dequeue()
v = queue.dequeue()
v = queue.dequeue()
print_queue(queue)
v = queue.dequeue()
print_queue(queue)
|
StarcoderdataPython
|
1697370
|
<reponame>jgurtowski/pbcore_python
from nose.tools import assert_equal, assert_true, assert_false
from pbcore import data
from pbcore.io import FastaReader, FastaWriter, FastaRecord
from StringIO import StringIO
class TestFastaRecord:
def setup(self):
self.name = "chr1|blah|blah"
self.sequence = "GATTACA" * 20
self.expected__str__ = \
">chr1|blah|blah\n" \
"GATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATT\n" \
"ACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAG\n" \
"ATTACAGATTACAGATTACA"
self.record = FastaRecord(self.name, self.sequence)
def test__init__(self):
assert_equal(self.name, self.record.name)
assert_equal(self.sequence, self.record.sequence)
def test__str__(self):
assert_equal(self.expected__str__, str(self.record))
def test_fromString(self):
recordFromString = FastaRecord.fromString(self.expected__str__)
assert_equal(self.name, recordFromString.name)
assert_equal(self.sequence, recordFromString.sequence)
def test_md5(self):
assert_equal("67fc75ce599ed0ca1fc8ed2dcbccc95d",
self.record.md5)
def test_eq(self):
name = 'r1'
seq = 'ACGT'
r1 = FastaRecord(name, seq)
r2 = FastaRecord(name, seq)
assert_true(r1 == r2)
def test_not_equal(self):
r1 = FastaRecord('r1', 'ACGT')
r2 = FastaRecord('r2', 'ACGT')
r3 = FastaRecord('r1', 'ACGT')
assert_true(r1 != r2)
assert_false(r1 != r3)
class TestFastaReader:
def test_readFasta(self):
f = FastaReader(data.getFasta())
entries = list(f)
assert_equal(48, len(entries))
assert_equal("ref000001|EGFR_Exon_2", entries[0].name)
assert_equal("TTTCTTCCAGTTTGCCAAGGCACGAGTAACAAGCTCACGCAGTTGGGCACTTT"
"TGAAGATCATTTTCTCAGCCTCCAGAGGATGTTCAATAACTGTGAGGTGGTCC"
"TTGGGAATTTGGAAATTACCTATGTGCAGAGGAATTATGATCTTTCCTTCTTA"
"AAGGTTGGTGACTTTGATTTTCCT",
entries[0].sequence)
assert_equal("e3912e9ceacd6538ede8c1b2adda7423",
entries[0].md5)
def test_dosLineEndingsFasta(self):
f = FastaReader(data.getDosFormattedFasta())
entries = list(f)
for e in entries:
assert_true("\r" not in e.name)
assert_equal(16, len(e.sequence))
class TestFastaWriter:
def setup(self):
self.fasta1 = StringIO(
">chr1|blah|blah\n" \
"GATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATT\n" \
"ACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAG\n" \
"ATTACAGATTACAGATTACA\n")
self.fasta2 = StringIO(self.fasta1.getvalue() + "\n" + \
">chr2|blah|blah\n" \
"GATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATT\n" \
"ACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAG\n" \
"ATTACAGATTACAGATTACA\n")
def test_writeFasta1(self):
f = StringIO()
w = FastaWriter(f)
for record in FastaReader(self.fasta1):
w.writeRecord(record)
assert_equal(self.fasta1.getvalue(), f.getvalue())
def test_writeFasta2(self):
f = StringIO()
w = FastaWriter(f)
for record in FastaReader(self.fasta1):
w.writeRecord(record.name, record.sequence)
assert_equal(self.fasta1.getvalue(), f.getvalue())
|
StarcoderdataPython
|
1607674
|
#IMPORTING REQUIRED MODULES
import mysql.connector
import datetime
import pyautogui
#CONNECTING TO DB AND CREATING CURSOR OBJECT
mycon = mysql.connector.connect(user='root',passwd='',host='localhost',database='hotel_management')
cursor = mycon.cursor()
#DEFINING GLOBAL VARIABLES NEEDED
global action
global count
count = 0
#DEFINING NEEDED FUNCTIONS
#Checkin
def add_customer():
try:
fName = str(input("ENTER CUSTOMER NAME : "))
Phone_Number = str(input("ENTER PHONE NUMBER : "))
customer_id = str(input("CUSTOMER ID TYPE : "))
id_number = str(input("ID NUMBER : "))
room_id = str(input("SELECT ONE ROOM OUT OF " + rooms_available() + " : "))
cursor.execute("INSERT INTO customers(fName,Phone_Number,customer_id,id_number,room_id) VALUE('{}',{},'{}','{}',{});".format(fName,Phone_Number,customer_id,id_number,room_id))
mycon.commit()
cursor.execute("SELECT Sr_No FROM customers WHERE fName = '{}' AND Phone_Number = {};".format(fName,Phone_Number))
data = cursor.fetchone()
Sr_No = data[0]
cursor.execute("UPDATE rooms SET Occupied_by_customer = {} WHERE room_id = {};".format(Sr_No,room_id))
cursor.execute("UPDATE rooms SET availabilty = 'Booked' WHERE room_id = {};".format(room_id))
mycon.commit()
print("Customer has been succesfully added to database.")
except:
print("There was an error in making new entry. Please try again")
add_customer()
#returns a string made of available rooms
def rooms_available():
cursor.execute("SELECT room_id FROM rooms WHERE availabilty = 'available';")
data = cursor.fetchall()
rooms = ''
for room in data:
for r in room:
rooms = rooms + str(r) + ' '
return rooms
#to search database for customer name,phone number and their id, of the customer currently staying in the room
def customer_info_based_on_room():
try:
room_id = str(input("Enter room number : "))
cursor.execute("SELECT fName,Phone_Number,id_number FROM customers WHERE room_id = {} AND doc IS NULL".format(room_id))
record = cursor.fetchone()
if record is not None:
name = record[0]
phone = record[1]
id_num = record[2]
print("{} is staying in room {}. \nHis phone number is {}. \nHis id number is {}.".format(name,room_id,phone,id_num))
elif room_id in ['1001','1002','1003','1004','1005','1006','1007','1008','1009','1010']:
print("No one is staying in room {}".format(room_id))
else:
print("No such room exists")
customer_info_based_on_room()
except:
print("There was an error in finding information. Please try again")
customer_info_based_on_room()
#checkout
def checkout():
try:
name = str(input("ENTER NAME OF CUSTOMER ABOUT TO CHECKOUT : "))
cursor.execute("SELECT Sr_No,fName,doa,doc,room_id FROM customers WHERE fName = '{}' AND doc IS NULL;".format(name))
record = cursor.fetchone()
if record is not None:
Sr_No = record[0]
name = record[1]
doa = record[2]
room_id = record[4]
make_bill(Sr_No,name,doa,room_id)
cursor.execute("UPDATE customers SET doc = CURRENT_TIMESTAMP() WHERE Sr_No = {}".format(Sr_No))
cursor.execute("UPDATE rooms SET Occupied_by_customer = NULL, availabilty = 'available' WHERE room_id = {};".format(room_id))
mycon.commit()
else:
print("Customer {} is not staying here right now".format(name))
checkout()
except:
print("There was an error in checkout. Please try again")
checkout()
#makes bill and adds to table bills
def make_bill(Sr_No,name,doa,room_id):
cursor.execute("SELECT cost_pernight FROM rooms WHERE room_id = {}".format(room_id))
record = cursor.fetchone()
cost = record[0]
doc = datetime.datetime.today()
days = (doc-doa).days
amount = days * cost
cursor.execute("SELECT bill_id FROM bills")
last_bill_id = cursor.fetchall()[-1][0]
number = int(last_bill_id[2:5]) + 1
no_of_zeros = 3-len(str(number))
new_bill_id = 'AA' + '0'*no_of_zeros + str(number)
payment_method = str(input("ENTER PAYMENT METHOD : "))
discount = amount / 10
cursor.execute("INSERT INTO bills(Bill_id,customer_name,Sr_No,amount,discount,payment_method) VALUE('{}','{}',{},{},{},'{}');".format(new_bill_id,name,Sr_No,amount,discount,payment_method))
print("Bill has been created successfully.\nBill id is {} \nAmount to be paid is {}".format(new_bill_id,amount-discount))
#searches table for room if currently staying, or else for phone number and id
def info_based_on_customer_name():
try:
name = str(input("ENTER CUSTOMER NAME : "))
phone = input("ENTER PHONE NUMBER(not necessary) : ")
if phone == '':
cursor.execute("SELECT Sr_No,customer_id,id_number,doa,doc,room_id FROM customers WHERE fName = '{}'".format(name))
data = cursor.fetchall()
if data != []:
customer_id = data[0][1]
id_number = data[0][2]
room_id = data[0][5]
if data[-1][4] is None:
print("Customer {} is currently staying in room {}. \nHis {} number is {}.".format(name,room_id,customer_id,id_number) )
else:
times = len(data)
print("Customer {} has stayed {} times. \nHis {} number is {}.".format(name,times,customer_id,id_number))
else:
print("No such customer exists")
#info_based_on_customer_name()
else:
cursor.execute("SELECT Sr_No,customer_id,id_number,doa,doc,room_id FROM customers WHERE fName = '{}' AND Phone_number = {}".format(name,phone))
data = cursor.fetchall()
if data != []:
customer_id = data[0][1]
id_number = data[0][2]
room_id = data[0][5]
if data[-1][4] is None:
print("Customer {} is currently staying in room {}. \nHis {} number is {}.".format(name,room_id,customer_id,id_number) )
else:
times = len(data)
print("Customer {} has stayed {} times. \nHis {} number is {}.".format(name,times,customer_id,id_number))
else:
print("No such customer exists")
info_based_on_customer_name()
except:
print("There was an error in searching database. Please try again")
info_based_on_customer_name()
#calculates sum of total money intake and discount given
def money_made():
cursor.execute("SELECT SUM(ALL net_to_be_paid),SUM(ALL discount) FROM bills;")
data = cursor.fetchall()
money = data[0][0]
discount = data[0][1]
print("The cash inflow is \u20B9{} since the hotel's opening, after giving \u20B9{} as discount.".format(money,discount))
#takes input for choice of action
def input_action():
global action
try:
action = int(input("Please select one action from list above(enter the number associated with action)"))
if action not in [1,2,3,4,5,6,7]:
print("Not a valid option, please select again")
input_action()
except:
print("Not a valid option, please select again.")
input_action()
#prints a lot of new line characters to make previous printed lines go up, and gives a clear screen to print more to
def clear_screen():
print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
#scrolls up so that the heading of main menu appear always as first line on screen
def scroll():
global count
if count != 1:
pyautogui.keyDown('ctrl')
pyautogui.press('down', presses = 27)
pyautogui.keyUp('ctrl')
#The Main Menu of the program that controls everything
def main_menu():
global action
global count
print("#################~~-*-*-*-*-WELCOME TO HOTEL PLAZA-*-*-*-*-~~#################",end = '\n\n\n\n')
print("Following are what this program can do -----")
print("1. Add new customer(CheckIn)")
print("2. CheckOut")
print("3. Search for available rooms")
print("4. Search for a customer's information staying in some room")
print("5. Search for room and id information based on customer name and/or phone number")
print("6. Calculate lifetime money intake for the hotel")
print("7. Exit Program")
print("================================================================================================================")
count += 1
scroll()
input_action()
if action == 1:
add_customer()
input()
clear_screen()
main_menu()
elif action == 2:
checkout()
input()
clear_screen()
main_menu()
elif action == 3:
print("Following rooms are available --")
print(rooms_available())
input()
clear_screen()
main_menu()
elif action == 4:
customer_info_based_on_room()
input()
clear_screen()
main_menu()
elif action == 5:
info_based_on_customer_name()
input()
clear_screen()
main_menu()
elif action == 6:
money_made()
input()
clear_screen()
main_menu()
elif action == 7:
mycon.close()
#CALLING THE MAIN MENU
main_menu()
|
StarcoderdataPython
|
1690555
|
<reponame>dpleshkov/programming-class
class Furniture:
def __init__(self, name, width, height, material, quantity):
self.name = name
self.width = width
self.height = height
self.material = material
self.quantity = quantity
|
StarcoderdataPython
|
1670610
|
# IMPORTS
from flask import Flask, jsonify, request
import logging.handlers
import datetime
from mongoSetup import Patient
from pymodm import errors
from pymodm import connect
# FLASK SERVER SETUP
app = Flask(__name__)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = logging.handlers.RotatingFileHandler(
'app.log',
maxBytes=1024 * 1024)
handler.setFormatter(formatter)
handler.setLevel("DEBUG")
app.logger.addHandler(handler)
# SERVER ENDPOINTS
@app.route("/api/new_patient", methods=["POST"])
def post_new_patient():
""" Registers a new patient with the server, and allows for future heart
rate measurements from the patient. The JSON request received should be a
dictionary with the following key-value pairs:
"patient_id" : int representing patient's ID number
"attending_email" : String representing attending physician's email
"user_age" : float representing patient's age in years
:return: A tuple of length 2. The second entry is always the HTTP status
code. If no errors raised, the first entry is a patient data dictionary
serialized as JSON, containing patient ID, attending physician's email,
and patient's age. Otherwise, it is an error message serialized as JSON.
"""
patient_data = request.get_json()
try:
cleared_patient_data = check_patient_data(patient_data)
except KeyError:
return jsonify("Error 400: Patient data dictionary missing keys."), 400
except ValueError:
return jsonify("Error 400: Invalid entry in patient data dict."), 400
except TypeError:
return jsonify("Error 400: Attending email not of type String."), 400
patient = Patient(cleared_patient_data["patient_id"],
attending_email=cleared_patient_data["attending_email"],
age=cleared_patient_data["user_age"],
heart_rates=["begin"],
timestamps=["begin"])
patient.save()
return jsonify(cleared_patient_data), 200
@app.route("/api/heart_rate", methods=["POST"])
def post_heart_rate():
""" Posts a patient's heart rate to the server along with his/her ID. The
JSON request received should be a dictionary with the following key-value
pairs:
"patient_id" : int representing patient's ID number
"heart_rate" : float representing current heart rate measurement
:return: A tuple of length 2. The second entry is always the HTTP status
code. If no errors raised, the first entry is a patient data dictionary
serialized as JSON, containing patient ID, heart rate, and timestamp.
Otherwise, it is an error message serialized as JSON.
"""
heart_rate = request.get_json()
try:
cleared_heart_rate = check_heart_rate(heart_rate)
cleared_heart_rate["timestamp"] = str(datetime.datetime.now())
except KeyError:
return jsonify("Error 400: Patient data dictionary missing keys."), 400
except ValueError:
return jsonify("Error 400: Invalid entry in patient data dict."), 400
try:
patient = Patient.objects.raw({"_id": cleared_heart_rate["patient_id"]
}).first()
except errors.DoesNotExist:
return jsonify("Error 404: Patient with the requested ID does not "
"exist."), 404
hrs = patient.heart_rates
if "begin" in hrs:
patient.heart_rates = [cleared_heart_rate["heart_rate"]]
else:
hrs.append(cleared_heart_rate["heart_rate"])
patient.heart_rates = hrs
tss = patient.timestamps
if "begin" in tss:
patient.timestamps = [cleared_heart_rate["timestamp"]]
else:
tss.append(cleared_heart_rate["timestamp"])
patient.timestamps = tss
patient.save()
return jsonify(cleared_heart_rate), 200
@app.route("/api/status/<patient_id>", methods=["GET"])
def get_status(patient_id):
""" Returns whether the patient is currently tachycardic based on the
previously available heart rate, as well as a timestamp of the most recent
heart rate.
:param patient_id: int representing the patient's ID number
:return: A tuple of length 2. The second entry is always the HTTP status
code. If no errors raised, the first entry is a patient data dictionary
serialized as JSON, containing a String detailing whether or not the
patient's heart rate is tachycardic and a String representing a timestamp
of the most recent measurement. Otherwise, it is an error message
serialized as JSON.
"""
patient_id = int(patient_id)
try:
patient = Patient.objects.raw({"_id": patient_id}).first()
except errors.DoesNotExist:
return jsonify("Error 404: Patient with the requested ID does not "
"exist."), 404
try:
hrs = patient.heart_rates
newest_hr = hrs[len(hrs)-1]
tss = patient.timestamps
newest_timestamp = tss[len(tss)-1]
if newest_hr == "begin" or newest_timestamp == "begin":
raise ValueError
except IndexError:
return jsonify("Error 400: No heart rates have been entered yet."), 400
except ValueError:
return jsonify("Error 400: No heart rates have been entered yet."), 400
age = patient.age
status = is_tachycardic(newest_hr, age)
return jsonify({"Tachycardia status": status,
"Timestamp": newest_timestamp}), 200
@app.route("/api/heart_rate/<patient_id>", methods=["GET"])
def get_heart_rates(patient_id):
""" Returns all previous heart rate measurements for the patient.
:param patient_id: int representing the patient's ID number
:return: A tuple of length 2. The second entry is always the HTTP status
code. If no errors raised, the first entry is a list of heart rates
serialized as JSON. Otherwise, it is an error message serialized as JSON.
"""
patient_id = int(patient_id)
try:
patient = Patient.objects.raw({"_id": patient_id}).first()
except errors.DoesNotExist:
return jsonify("Error 404: Patient with the requested ID does not "
"exist."), 404
if type(patient.heart_rates[0]) == str:
return jsonify("Error 400: No heart rates have been entered yet."), 400
return jsonify(patient.heart_rates), 200
@app.route("/api/heart_rate/average/<patient_id>", methods=["GET"])
def get_heart_rate_avg(patient_id):
""" Returns the average of all previous heart rate measurements for the
patient.
:param patient_id: int representing the patient's ID number
:return: A tuple of length 2. The second entry is always the HTTP status
code. If no errors raised, the first entry is a float representing the
average of the patient's past HR measurements, serialized as JSON.
Otherwise, it is an error message serialized as JSON.
"""
patient_id = int(patient_id)
try:
patient = Patient.objects.raw({"_id": patient_id}).first()
except errors.DoesNotExist:
return jsonify("Error 404: Patient with the requested ID does not "
"exist."), 404
try:
hrs = patient.heart_rates
avg_hr = sum(hrs)/len(hrs)
except ZeroDivisionError:
return jsonify("Error 400: No heart rates have been entered yet."), 400
except ValueError:
return jsonify("Error 400: No heart rates have been entered yet."), 400
except TypeError:
return jsonify("Error 400: No heart rates have been entered yet."), 400
return jsonify(avg_hr), 200
@app.route("/api/heart_rate/interval_average", methods=["POST"])
def get_heart_rate_interval_avg():
""" Returns the average of all previous heart rate measurements for the
patient from the inputted timestamp onwards. The JSON request received
should be a dictionary with the following key-value pairs:
"patient_id" : int representing patient's ID number
"heart_rate_average_since" : String representing timestamp
:return: A tuple of length 2. The second entry is always the HTTP status
code. If no errors raised, the first entry is a float representing the
average of the patient's past HR measurements in the specified interval,
serialized as JSON. Otherwise, it is an error message serialized as JSON.
"""
avg_request_dict = request.get_json()
try:
cleared = check_avg_request_dict(avg_request_dict)
except KeyError:
return jsonify("Error 400: Patient data dictionary missing keys."), 400
except ValueError:
return jsonify("Error 400: Invalid entry in patient data dict."), 400
except TypeError:
return jsonify("Error 400: Timestamp not of type String."), 400
try:
patient = Patient.objects.raw({"_id": cleared["patient_id"]}).first()
except errors.DoesNotExist:
return jsonify("Error 404: Patient with the requested ID does not "
"exist."), 404
start_index = -999
try:
for index, ts in enumerate(patient.timestamps):
if ts >= cleared["heart_rate_average_since"]:
start_index = index
break
if start_index == -999:
return jsonify("Error 400: No heart rates have been entered "
"past that date and time."), 400
hrs = patient.heart_rates[start_index:]
avg_hr = sum(hrs)/len(hrs)
except ZeroDivisionError:
return jsonify("Error 400: No heart rates have been entered past that "
"date and time."), 400
return jsonify(avg_hr), 200
# HELPER FUNCTIONS
def check_patient_data(patient_data):
""" Checks to see that the input for the /api/new_patient endpoint is
valid, i.e. a dictionary with the correct key-value pairs and types.
:param patient_data: The inputted patient data
:return: The input, if no exceptions are thrown
"""
if "patient_id" not in patient_data or "attending_email" not in \
patient_data or "user_age" not in patient_data:
app.logger.error("A key is missing from the patient data dictionary.")
raise KeyError
if None in patient_data.values():
app.logger.error("One of the fields is empty.")
raise ValueError
for value in patient_data.values():
if type(value) == bool:
app.logger.error("Boolean detected where it shouldn't be.")
raise ValueError
patient_id = int(patient_data["patient_id"])
if type(patient_id) != int:
app.logger.error("Patient ID not a number")
raise ValueError
if type(patient_data["attending_email"]) != str:
app.logger.error("Attending email not a String")
raise TypeError
if "@" not in patient_data["attending_email"] or "." not in \
patient_data["attending_email"]:
app.logger.error("Email address missing a special character "
"('@' or '.')")
raise ValueError
patient_age = float(patient_data["user_age"])
if type(patient_age) != float:
app.logger.error("Patient age not a number")
raise ValueError
if patient_age < 0:
app.logger.error("Patient age invalid")
raise ValueError
try:
if len(patient_data) > 3:
raise ValueError
except ValueError:
app.logger.warning("Too many keys in dictionary. Check user input to "
"save space.")
patient_data["patient_id"] = patient_id
patient_data["user_age"] = patient_age
return patient_data
def check_heart_rate(patient_data):
""" Checks to see that the input for the /api/heart_rate endpoint is
valid, i.e. a dictionary with the correct key-value pairs and types.
:param patient_data: The inputted patient data
:return: The input, if no exceptions are thrown
"""
if "patient_id" not in patient_data or "heart_rate" not in \
patient_data:
app.logger.error("A key is missing from the patient data dictionary.")
raise KeyError
if None in patient_data.values():
app.logger.error("One of the fields is empty.")
raise ValueError
for value in patient_data.values():
if type(value) == bool:
app.logger.error("Boolean detected where it shouldn't be.")
raise ValueError
patient_id = int(patient_data["patient_id"])
if type(patient_id) != int:
app.logger.error("Patient ID not a number")
raise ValueError
heart_rate = float(patient_data["heart_rate"])
if type(heart_rate) != float:
app.logger.error("Heart rate not a number")
raise ValueError
if heart_rate < 0:
app.logger.error("Invalid heart rate")
raise ValueError
try:
if len(patient_data) > 2:
raise ValueError
except ValueError:
app.logger.warning("Too many keys in dictionary. Check user input to "
"save space.")
patient_data["patient_id"] = patient_id
patient_data["heart_rate"] = heart_rate
return patient_data
def check_avg_request_dict(patient_data):
""" Checks to see that the input for the /api/heart_rate/interval_average
endpoint is valid, i.e. a dictionary with the correct key-value pairs and
types.
:param patient_data: The inputted patient data
:return: The input, if no exceptions are thrown
"""
if "patient_id" not in patient_data or "heart_rate_average_since" not in \
patient_data:
app.logger.error("A key is missing from the patient data dictionary.")
raise KeyError
if None in patient_data.values():
app.logger.error("One of the fields is empty.")
raise ValueError
for value in patient_data.values():
if type(value) == bool:
app.logger.error("Boolean detected where it shouldn't be.")
raise ValueError
patient_id = int(patient_data["patient_id"])
if type(patient_id) != int:
app.logger.error("Patient ID not a number")
raise ValueError
if type(patient_data["heart_rate_average_since"]) != str:
app.logger.error("Timestamp not a String")
raise TypeError
if "-" not in patient_data["heart_rate_average_since"] or \
":" not in patient_data["heart_rate_average_since"] or \
"." not in patient_data["heart_rate_average_since"]:
app.logger.error("Timestamp not correctly formatted")
raise ValueError
try:
if len(patient_data) > 2:
raise ValueError
except ValueError:
app.logger.warning("Too many keys in dictionary. Check user input to "
"save space.")
patient_data["patient_id"] = patient_id
return patient_data
def is_tachycardic(heart_rate, age):
""" Checks to see if a patient's heart rate is tachycardic based on
his/her age.
:param heart_rate: Float representing the patient's heart rate
:param age: Float representing the patient's age in years
:return: Boolean representing tachycardia status of patient
"""
if 1/365 <= age < 3/365:
if heart_rate > 159:
return True
else:
return False
elif 3/365 <= age < 7/365:
if heart_rate > 166:
return True
else:
return False
elif 7/365 <= age < 28/365:
if heart_rate > 182:
return True
else:
return False
elif 28/365 <= age < 90/365:
if heart_rate > 179:
return True
else:
return False
elif 90/365 <= age < 150/365:
if heart_rate > 186:
return True
else:
return False
elif 150/365 <= age < 1:
if heart_rate > 169:
return True
else:
return False
elif 1 <= age < 3:
if heart_rate > 151:
return True
else:
return False
elif 3 <= age < 5:
if heart_rate > 137:
return True
else:
return False
elif 5 <= age < 8:
if heart_rate > 133:
return True
else:
return False
elif 8 <= age < 12:
if heart_rate > 130:
return True
else:
return False
elif 12 <= age < 16:
if heart_rate > 119:
return True
else:
return False
else:
if heart_rate > 100:
return True
else:
return False
# INSTRUCTIONS FOR CALLING DRIVER
if __name__ == "__main__":
connect("mongodb://abg28:<EMAIL>:25253/bme590")
app.run(host="0.0.0.0")
|
StarcoderdataPython
|
1795837
|
<gh_stars>1-10
import hashlib, json
from collections import OrderedDict
class MerkleTree:
def __init__(self, listoftransaction=None):
self.listoftransaction = listoftransaction
self.ordered_transaction = OrderedDict()
def create(self):
listoftransaction = self.listoftransaction
past_transaction = self.ordered_transaction
temp_transaction = []
for index in range(0, len(listoftransaction), 2):
current = listoftransaction[index]
if index + 1 != len(listoftransaction):
current_right = listoftransaction[index + 1]
else:
current_right = ''
current_hash = current
if current_right != '':
current_right_hash = current_right
past_transaction[listoftransaction[index]] = current_hash
if current_right != '':
past_transaction[listoftransaction[index + 1]] = current_right_hash
if current_right != '':
temp_transaction.append(current_hash + current_right_hash)
else:
temp_transaction.append(current_hash)
if len(listoftransaction) != 1:
self.listoftransaction = temp_transaction
self.ordered_transaction = past_transaction
self.create()
def Get_ordered_transacion(self):
return self.ordered_transaction
def Get_Root_node(self):
last_key = list(self.ordered_transaction.keys())[-1]
return self.ordered_transaction[last_key]
def calculateMerkleRoot(self, txs):
tx_hashes = []
for tx in txs:
tx_hashes.append(tx.hash)
# transaction = ['t1', 't2', 't3', 't4']
self.listoftransaction = tx_hashes
self.create()
past_transaction = self.Get_ordered_transacion()
return self.Get_Root_node()
|
StarcoderdataPython
|
4840926
|
<filename>test/test_admin_host.py<gh_stars>1-10
#!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
from nose import SkipTest
from .utils import needs_uge
from .utils import create_config_file
from uge.api.qconf_api import QconfApi
from uge.config.config_manager import ConfigManager
from uge.log.log_manager import LogManager
from uge.exceptions.object_not_found import ObjectNotFound
from uge.exceptions.object_already_exists import ObjectAlreadyExists
from uge.exceptions.authorization_error import AuthorizationError
create_config_file()
API = QconfApi()
CONFIG_MANAGER = ConfigManager.get_instance()
LOG_MANAGER = LogManager.get_instance()
@needs_uge
def test_list_ahosts():
try:
hl = API.list_ahosts()
assert(hl is not None)
except ObjectNotFound as ex:
raise SkipTest('There are no configured UGE admin hosts.')
def test_object_already_exists():
try:
try:
hl = API.list_ahosts()
API.add_ahosts([hl[0]])
assert(False)
except ObjectNotFound as ex:
raise SkipTest('There are no configured UGE admin hosts.')
except ObjectAlreadyExists as ex:
# ok
pass
def test_delete_and_add_ahosts():
try:
hl = API.list_ahosts()
host_name = None
for h in hl:
if h != CONFIG_MANAGER['host']:
host_name = h
break
if not host_name:
raise SkipTest('Could not find UGE admin host that could be removed.')
try:
hl2 = API.delete_ahosts([host_name])
assert(hl2.data.count(host_name) == 0)
except AuthorizationError as ex:
raise SkipTest('Skip master host warning %s.' % str(ex))
hl3 = API.add_ahosts(host_name)
assert(hl3.data.count(host_name) == 1)
except ObjectNotFound as ex:
raise SkipTest('There are no configured UGE admin hosts.')
|
StarcoderdataPython
|
1709680
|
<filename>services/evaluation/ocr_evaluation_service.py<gh_stars>1-10
import os
from typing import List, Dict
import csv
import torch
from scipy import spatial
import numpy as np
from entities.batch_representation import BatchRepresentation
from enums.evaluation_type import EvaluationType
from enums.metric_type import MetricType
from enums.run_type import RunType
from overrides import overrides
from services.evaluation.base_evaluation_service import BaseEvaluationService
from services.arguments.postocr_characters_arguments_service import PostOCRCharactersArgumentsService
from services.vocabulary_service import VocabularyService
from services.metrics_service import MetricsService
from services.file_service import FileService
from services.process.ocr_character_process_service import OCRCharacterProcessService
from services.data_service import DataService
class OCREvaluationService(BaseEvaluationService):
def __init__(
self,
arguments_service: PostOCRCharactersArgumentsService,
vocabulary_service: VocabularyService,
process_service: OCRCharacterProcessService,
metrics_service: MetricsService,
file_service: FileService,
data_service: DataService):
super().__init__()
self._vocabulary_service = vocabulary_service
self._process_service = process_service
self._metrics_service = metrics_service
self._file_service = file_service
self._arguments_service = arguments_service
self._data_service = data_service
(self._input_strings,
self._input_target_strings,
self._original_edit_distances,
self._original_levenshtein_distance_sum,
self._original_histogram) = (None, None, None, None, None)
self.final_results = []
@overrides
def evaluate_batch(
self,
output: torch.Tensor,
batch_input: BatchRepresentation,
evaluation_types: List[EvaluationType],
batch_index: int) -> Dict[EvaluationType, List]:
"""Evaluates the generated output based on the chosen evaluation types
:param output: the generated output from the model
:type output: torch.Tensor
:param evaluation_types: list of different types of evaluations that should be performed
:type evaluation_types: List[EvaluationType]
:return: a dictionary with evaluation scores for every type
:rtype: Dict[EvaluationType, List]
"""
if self._input_strings is None:
(self._input_strings,
self._input_target_strings,
self._original_edit_distances,
self._original_levenshtein_distance_sum,
self._original_histogram) = self._process_service.calculate_data_statistics(run_type=RunType.Test, log_summaries=False)
_, _, predictions = output
predictions = predictions.cpu().detach().numpy()
targets = batch_input.targets[:, 1:].cpu().detach().numpy()
predicted_characters = []
target_characters = []
for i in range(targets.shape[0]):
indices = np.array(
(targets[i] != self._vocabulary_service.pad_token), dtype=bool)
predicted_characters.append(predictions[i])
target_characters.append(targets[i][indices])
evaluation = {}
predicted_strings = [self._vocabulary_service.ids_to_string(
x, exclude_special_tokens=True, cut_after_end_token=True) for x in predicted_characters]
target_strings = [self._vocabulary_service.ids_to_string(
x, exclude_special_tokens=True) for x in target_characters]
if EvaluationType.JaccardSimilarity in evaluation_types:
jaccard_score = np.mean([self._metrics_service.calculate_jaccard_similarity(
target_strings[i], predicted_strings[i]) for i in range(len(predicted_strings))])
evaluation[EvaluationType.JaccardSimilarity] = [jaccard_score]
if EvaluationType.LevenshteinEditDistanceImprovement in evaluation_types:
predicted_levenshtein_distances = [
self._metrics_service.calculate_levenshtein_distance(
predicted_string, target_string) for predicted_string, target_string in zip(predicted_strings, target_strings)
]
evaluation[EvaluationType.LevenshteinEditDistanceImprovement] = predicted_levenshtein_distances
indices_order = [
self._input_target_strings.index(target_string) for target_string in target_strings
]
input_strings = [self._input_strings[i]
for i in indices_order]
input_distances = [
self._original_edit_distances[i] for i in indices_order]
for x in zip(input_strings, predicted_strings, target_strings, input_distances, predicted_levenshtein_distances):
self.final_results.append(x)
return evaluation
@overrides
def save_results(self, evaluation: Dict[EvaluationType, List]):
edit_distances = evaluation[EvaluationType.LevenshteinEditDistanceImprovement]
jaccard_similarities = evaluation[EvaluationType.JaccardSimilarity]
predicted_edit_sum = sum(edit_distances)
original_edit_sum = self._original_levenshtein_distance_sum
improvement_percentage = (
1 - (float(predicted_edit_sum) / original_edit_sum)) * 100
checkpoints_path = self._file_service.combine_path(
self._file_service.get_checkpoints_path(),
'output',
create_if_missing=True)
pickle_filename = f'output-{self._arguments_service.checkpoint_name}'
self._data_service.save_python_obj(
{
'original-edit-distances': self._original_edit_distances,
'edit-distances': edit_distances,
'jaccard-similarities': jaccard_similarities
},
path=checkpoints_path,
name=pickle_filename,
print_success=False)
csv_file_path = os.path.join(
checkpoints_path, f'output-{self._arguments_service.checkpoint_name}.csv')
with open(csv_file_path, 'w', encoding='utf-8', newline='') as output_file:
csv_writer = csv.DictWriter(output_file, fieldnames=[
'Input',
'Prediction',
'Target',
'Input edit distance',
'Predicted edit distance',
'Difference'])
csv_writer.writeheader()
for result in self.final_results:
csv_writer.writerow({
'Input': result[0],
'Prediction': result[1],
'Target': result[2],
'Input edit distance': result[3],
'Predicted edit distance': result[4],
'Difference': (result[3] - result[4])
})
csv_writer.writerow(
{'Input': f'Improvement percentage: {improvement_percentage}'})
print(f'Improvement percentage: {improvement_percentage}')
return True
|
StarcoderdataPython
|
1783206
|
/home/runner/.cache/pip/pool/5b/9f/71/72b1957fed5e77d1644200b1816d1df5ada510d6b11d4a58540676c34d
|
StarcoderdataPython
|
186501
|
import unittest
from osm_export_tool.sources import Overpass
from osm_export_tool.mapping import Mapping
class TestMappingToOverpass(unittest.TestCase):
def test_mapping(self):
y = '''
buildings:
types:
- points
select:
- column1
where: column2 IS NOT NULL
other1:
types:
- points
- polygons
select:
- column1
- irrelevant
where: column2 IS NOT NULL AND column3 IN ('foo','bar')
other2:
types:
- lines
select:
- column5:key
'''
mapping = Mapping(y)
nodes, ways, relations = Overpass.filters(mapping)
self.assertCountEqual(nodes,["['column3'~'foo|bar']","['column2']"])
# force quoting of strings to handle keys with colons
self.assertCountEqual(ways,["['column5:key']","['column3'~'foo|bar']","['column2']"])
self.assertCountEqual(relations,["['column3'~'foo|bar']","['column2']"])
class TestSQLToOverpass(unittest.TestCase):
def test_basic(self):
s = Overpass.sql("name = 'somename'")
self.assertEqual(s,["['name'='somename']"])
s = Overpass.sql("level > 4")
self.assertEqual(s,["['level']"])
def test_basic_list(self):
s = Overpass.sql("name IN ('val1','val2')")
self.assertEqual(s,["['name'~'val1|val2']"])
def test_whitespace(self):
s = Overpass.sql("name = 'some value'")
self.assertEqual(s,["['name'='some value']"])
def test_notnull(self):
s = Overpass.sql("name is not null")
self.assertEqual(s,["['name']"])
def test_and_or(self):
s = Overpass.sql("name1 = 'foo' or name2 = 'bar'")
self.assertEqual(s,["['name1'='foo']","['name2'='bar']"])
s = Overpass.sql("(name1 = 'foo' and name2 = 'bar') or name3 = 'baz'")
self.assertEqual(s,["['name1'='foo']","['name2'='bar']","['name3'='baz']"])
|
StarcoderdataPython
|
1692044
|
<filename>Algorithm1B.py
'''
Copyright (c) 2020 <NAME> (<NAME>
@file Algorithm1B.py
@date 2020/03/08
@brief Lane detection application
@license This project is released under the BSD-3-Clause license.
'''
import numpy as np
import cv2
'''
@brief Application which detects and overlays road lanes in a video stream
'''
class Algorithm1B:
#K = np.array([]) # Camera intrinsic matrix
#distortionCoeffs = np.array([]) # Camera distortion coefficients
cropFactor = 0 # 0-1 value that represents percentage to crop from the top of a frame to isolate lanes
cropLineY = 0 # cropFactor value in terms of pixels from the top of the frame
frame = np.array([])
laneMask = np.array([])
#yellowHSVLowBound = np.array([10, 70, 200])
#yellowHSVUpperBound = np.array([65, 180, 255])
yellowHSVLowBound = np.array([10, 30, 140])
yellowHSVUpperBound = np.array([30, 255, 255])
#yellowHSVLowBound = np.array([10, 0, 200])
#yellowHSVUpperBound = np.array([30, 75, 255])
#whiteHSVLowBound = np.array([0, 0, 190])
#whiteHSVUpperBound = np.array([170, 25, 255])
whiteHSVLowBound = np.array([0, 0, 208])
whiteHSVUpperBound = np.array([255, 75, 255])
def __init__(self, cropFactor=0.6):
#self.K = K
#self.distortionCoeffs = distortionCoeffs
self.cropFactor = cropFactor
'''
@brief Undistort an image, remove noise and crop to ROI
@param
@return
'''
def prepareImage(self, frame):
h = frame.shape[0]
w = frame.shape[1]
#newK, _ = cv2.getOptimalNewCameraMatrix(self.K, self.distortionCoeffs, (w, h), 0, (w, h))
#undistortedFrame = cv2.undistort(frame, self.K, self.distortionCoeffs, None, newK)
#self.frame = undistortedFrame
self.frame = frame
blurredFrame = cv2.GaussianBlur(self.frame, (5,5), 0)
self.cropLineY = int(self.cropFactor*h)
croppedFrame = blurredFrame[self.cropLineY:h, :, :]
return croppedFrame
'''
@brief Determine the lane line equations from a frame
@param
@return
'''
def getLaneLines(self, frame):
hsvFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
yellowMask = cv2.inRange(hsvFrame, self.yellowHSVLowBound, self.yellowHSVUpperBound)
whiteMask = cv2.inRange(hsvFrame, self.whiteHSVLowBound, self.whiteHSVUpperBound)
self.laneMask = yellowMask | whiteMask
lines = cv2.HoughLinesP(self.laneMask, 1, np.pi / 180, 50)
lineParams = []
if (lines is not None and len(lines) > 1):
for i in range(0, len(lines)):
for x1,y1,x2,y2 in lines[i]:
#cv2.line(undistortedFrame,(x1,y1+cropLineY),(x2,y2+cropLineY),(0,0,255),2)
if (y2 != y1 and x2 != x1):
m = (y2-y1) / (x2-x1)
b = y1 - m*x1
if (np.isnan(m) == False and np.isnan(b) == False):
lineParams.append([m,b])
else:
continue
lineParams = np.array(lineParams)
if (len(lineParams) <= 1):
return lineParams
# Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# Set flags (Just to avoid line break in the code)
flags = cv2.KMEANS_RANDOM_CENTERS
# Apply KMeans
lineParams = np.float32(lineParams)
_, _, centers = cv2.kmeans(lineParams, 2, None, criteria, 10, flags)
m1 = centers[0,0]
m2 = centers[1,0]
b1 = centers[0,1] + self.cropLineY
b2 = centers[1,1] + self.cropLineY
laneParams = np.array([[m1, b1],
[m2, b2]])
return laneParams
'''
@brief Display the lane lines, lane mesh and drivetrajectory angle on the frame
@param
@return
'''
def visualization(self, laneParams):
if (len(laneParams) <= 1):
return self.frame
h = self.frame.shape[0]
w = self.frame.shape[1]
m1 = laneParams[0,0]
m2 = laneParams[1,0]
b1 = laneParams[0,1]
b2 = laneParams[1,1]
y1 = h
y2 = self.cropLineY
y3 = h
y4 = self.cropLineY
x1 = int((y1 - b1) / m1)
x2 = int((y2 - b1) / m1)
x3 = int((y3 - b2) / m2)
x4 = int((y4 - b2) / m2)
outputFrame = self.frame.copy()
# Draw lane lines
cv2.line(outputFrame, (x1,y1), (x2,y2), color=(0,255,0), thickness=3)
cv2.line(outputFrame, (x3,y3), (x4,y4), color=(0,255,0), thickness=3)
# Create a translucent mesh over the lane
overlay = outputFrame.copy()
laneMesh = np.array([[x1,y1], [x2,y2], [x4,y4], [x3,y3]])
cv2.fillPoly(overlay, [laneMesh], color=(0, 0, 255))
cv2.addWeighted(src1=overlay, alpha=0.5, \
src2=outputFrame, beta=0.5, \
dst=outputFrame, gamma=0)
# Calculate the centerline angle and display the driving trajectory
laneAngle1 = np.degrees(np.arctan(m1))
laneAngle2 = np.degrees(np.arctan(m2))
if (laneAngle1 < 0):
laneAngle1 = laneAngle1 + 180
if (laneAngle2 < 0):
laneAngle2 = laneAngle2 + 180
centerlineAngle = (laneAngle1 + laneAngle2) / 2
textOverlay = 'Drive Trajectory: ' \
+ str(round(centerlineAngle - 90)) \
+ ' degrees from center'
cv2.putText(outputFrame, textOverlay, (10,30), \
cv2.FONT_HERSHEY_SIMPLEX, 1, \
color=(0,0,255), thickness=3)
scale = 1000 / w # percent of original size
dim = (int(w * scale), int(h * scale))
dimCropped = (int(self.laneMask.shape[1] * scale), int(self.laneMask.shape[0] * scale))
'''
cv2.imshow("Frame", cv2.resize(outputFrame, dim))
cv2.imshow("Mask", cv2.resize(self.laneMask, dimCropped))
cv2.waitKey(0)
'''
# Create binary image of lane
binaryImage = np.zeros((self.frame.shape[0], self.frame.shape[1]))
cv2.fillPoly(binaryImage, [laneMesh], color=(255, 255, 255))
return outputFrame, binaryImage.astype('uint8')
def detectLane(self, frame):
preparedFrame = self.prepareImage(frame)
laneParams = self.getLaneLines(preparedFrame)
outputFrame, binaryImage = self.visualization(laneParams)
return binaryImage
if __name__ == '__main__':
'''
K = np.array([[9.037596e+02, 0.000000e+00, 6.957519e+02],
[0.000000e+00, 9.019653e+02, 2.242509e+02],
[0.000000e+00, 0.000000e+00, 1.000000e+00]])
distortionCoeffs = np.array([-3.639558e-01, 1.788651e-01, 6.029694e-04, -3.922424e-04, -5.382460e-02])
K = np.array([[1.15422732e+03, 0.00000000e+00, 6.71627794e+02],
[0.00000000e+00, 1.14818221e+03, 3.86046312e+02],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
distortionCoeffs = np.array([-2.42565104e-01, -4.77893070e-02, -1.31388084e-03, -8.79107779e-05, 2.20573263e-02])
'''
# How much to crop a frame from the top to isolate the road
cropFactor = 0.5
# Run application
laneDetector = Algorithm1B(cropFactor)
frame = cv2.imread('sampleLane2.png')
laneDetector.detectLane(frame)
# preparedFrame = laneDetector.prepareImage(frame)
# laneParams = laneDetector.getLaneLines(preparedFrame)
# outputFrame = laneDetector.visualization(laneParams)
|
StarcoderdataPython
|
3213806
|
<gh_stars>1-10
import unittest
from translate.factory import get_translator
youdao = get_translator('youdao')
class YoudaoTest(unittest.TestCase):
def test_query(self):
youdao.query('hello')
self.assertTrue('发音' in youdao.format())
self.assertTrue('helˈō' in youdao.format())
self.assertTrue('həˈləʊ' in youdao.format())
self.assertTrue('英汉翻译' in youdao.format())
self.assertTrue('表示问候, 惊奇或唤起注意时的用语' in youdao.format())
self.assertTrue('网络释义' in youdao.format(1))
self.assertTrue('Hello Kitty' in youdao.format(1))
|
StarcoderdataPython
|
192448
|
n#!/usr/bin/env python
# Copyright (c) 2014-2016 <NAME>
# Permission is hereby granted, free of charge, to any person
# obtaining a COPY of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, codecs, re
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
import fontforge
cps = set();
# create a subclass and override the handler methods
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.outdata = u""
def handle_data(self, data):
data = re.sub(r"[\t\r\n ]+", " ", data);
self.outdata += data
def handle_entityref(self, name):
try:
c = unichr(name2codepoint[name])
except KeyError:
if (name == 'apos'):
c = unichr(8217)
#c = unichr(39)
else:
c = ""
self.outdata += c
def get_results(self):
return self.outdata
for dtuple in os.walk(sys.argv[1]):
d, ds, fs = dtuple
for f in fs:
if (f.lower().endswith('.xhtml') or
f.lower().endswith('.html') or
f.lower().endswith('.xml')):
path = os.path.join(d, f)
parser = MyHTMLParser()
inp = codecs.open(path, encoding='utf-8')
for line in inp:
parser.feed(line)
parser.close()
print parser.get_results();
for c in parser.get_results():
cps.add(c)
for fontfile in sys.argv[2:]:
font = fontforge.open(fontfile)
font.selection.none()
for c in list(cps):
font.selection.select(("unicode", "more"), ord(c))
font.selection.invert()
font.clear()
os.rename(fontfile, fontfile + ".bak")
font.generate(fontfile)
|
StarcoderdataPython
|
3269562
|
<reponame>chechir/doors
import datetime
import re
import numpy as np
import pandas as pd
import pytz
def get_day_of_year(dates):
return _get_timestamp_attribute(dates, "dayofyear")
def get_day_of_week(dates):
return pd.Series(dates).apply(lambda x: x.weekday()).values
def get_hour(dates):
return _get_timestamp_attribute(dates, "hour")
def get_month(dates):
return _get_timestamp_attribute(dates, "month")
def get_week(dates):
return _get_timestamp_attribute(dates, "week")
def _get_timestamp_attribute(dates, attr):
timestamps = pd.Series(dates)
result = timestamps.apply(lambda x: getattr(x, attr)).values
return result
def utc_to_bst(dt):
dt = pytz.timezone("utc").localize(dt)
bst_dt = dt.astimezone(pytz.timezone("Europe/London"))
return bst_dt
def extract_timestamp(string):
regex_expression = "[1-2][0-9]{1,7}_[0-9]{1,6}"
result = re.search(regex_expression, string)
if result:
timestamp = result.group(0)
else:
raise ValueError("no timestamp found.")
return timestamp
def replace_timestamp(string):
timestamp = extract_timestamp(string)
new_string = string.replace(timestamp, get_timestamp())
return new_string
def get_diff_in_days(dates, add_init=False):
""" Gives the difference in days for datetime64 objects """
diff_date = np.diff(dates)
delta_minutes = diff_date.astype("timedelta64[m]") / np.timedelta64(1, "m")
delta_days = 1.0 * delta_minutes / (60 * 24)
if add_init:
delta_days = np.concatenate([[np.nan], delta_days])
return delta_days
def cum_diff_years(dates, init=0):
""" Calculates cumulative difference in years betweeon dates[i] and date[] """
experience_days = np.empty(len(dates))
experience_days[1:] = np.cumsum(get_diff_in_days(dates))
experience_days[0] = init
experience_years = experience_days / 365
return experience_years
def get_months(dates):
return dates.astype("datetime64[M]").astype(int) % 12 + 1
def get_datetime_now():
return datetime.datetime.utcnow()
def get_datetime_end_of_day():
now = get_datetime_now()
return datetime.datetime(now.year, now.month, now.day, 23, 59, 59)
def get_datetime_start_of_month():
this_morning = get_datetime_this_morning()
start_of_month = this_morning.replace(day=1)
return start_of_month
def get_datetime_this_morning():
now = get_datetime_now()
this_morning = datetime.datetime(year=now.year, month=now.month, day=now.day)
return this_morning
def get_datetime_tomorrow_morning():
this_morning = get_datetime_this_morning()
tomorrow = this_morning + datetime.timedelta(days=1)
return tomorrow
def get_datetime_yesterday_morning():
this_morning = get_datetime_this_morning()
tomorrow = this_morning - datetime.timedelta(days=1)
return tomorrow
def get_datetime_last_week():
this_morning = get_datetime_this_morning()
tomorrow = this_morning - datetime.timedelta(days=7)
return tomorrow
def round_datetime64(v, to="s"):
# can replace this with v.astype('datetime64[{}]'.format(to))
if to == "s":
decimals = -9
elif to == "m":
decimals = -10
else:
assert isinstance(to, int), "Unexpected to."
decimals = to
return np.round(v.astype(np.int64), decimals).astype("datetime64[ns]")
def get_timestamp(time_format="%Y%m%d_%H%M%S"):
""" Returns a timestamp by checking the date and time at the moment. """
return str(datetime.datetime.utcnow().strftime(time_format))
def is_datetime(v):
return "datetime" in str(v.dtype)
def get_all_dates_in_range(start_date, end_date=None):
if end_date is None:
end_date = datetime.datetime.now().date()
assert isinstance(start_date, datetime.date)
assert isinstance(end_date, datetime.date)
n_days = (end_date - start_date).days + 1 # include the max date
all_days = [start_date + datetime.timedelta(n) for n in range(n_days)]
return all_days
def get_season(day_number):
day_spring_start = 80
day_summer_start = 172
day_autumn_start = 264
day_winter_start = 356
if (day_number < day_spring_start) | (day_number >= day_winter_start):
return "winter"
if day_number < day_summer_start:
return "spring"
if day_number < day_autumn_start:
return "summer"
if day_number < day_winter_start:
return "autumn"
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.