id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3330379
|
<filename>Python/025.py
# coding=utf-8
'''
25) [DESAFIO] Crie um programa que leia o tamanho de três segmentos de reta.
Analise seus comprimentos e diga se é possível formar um triângulo com essas
retas. Matematicamente, para três segmentos formarem um triângulo, o comprimento
de cada lado deve ser menor que a soma dos outros dois.
'''
r1 = int(input('Digite o valor da primeira reta: '))
r2 = int(input('Digite o valor da segunda reta: '))
r3 = int(input('Digite o valor da terceira reta: '))
if ((r1<=(r2+r3)) and (r2<=(r1+r3)) and (r3<=(r1+r2))):
print('É possivel fazer um triangulo.')
else:
print('Não é possivel fazer um tringulo com essas retas!')
|
StarcoderdataPython
|
3350819
|
import bpy
from ..goldsrc_shader_base import GoldSrcShaderBase
from ...shader_base import Nodes
from .....library.goldsrc.mdl_v10.structs.texture import MdlTextureFlag
class GoldSrcShaderMode5(GoldSrcShaderBase):
SHADER: str = 'goldsrc_shader_mode5'
def create_nodes(self, material_name: str, rad_info=None):
if super().create_nodes(material_name) in ['UNKNOWN', 'LOADED']:
return
basetexture = self.load_texture(material_name)
basetexture_node = self.create_node(Nodes.ShaderNodeTexImage, '$basetexture')
basetexture_node.image = basetexture
if rad_info is not None:
self._emit_surface(basetexture_node, rad_info)
return
vertex_color_alpha = self.create_node(Nodes.ShaderNodeVertexColor)
vertex_color_alpha.layer_name = "RENDER_AMOUNT"
material_output = self.create_node(Nodes.ShaderNodeOutputMaterial)
shader = self.create_node(Nodes.ShaderNodeEmission, self.SHADER)
self.connect_nodes(basetexture_node.outputs['Color'], shader.inputs['Color'])
math_mul = self.create_node(Nodes.ShaderNodeMath)
math_mul.operation = 'MULTIPLY'
self.connect_nodes(basetexture_node.outputs['Alpha'], math_mul.inputs[0])
self.connect_nodes(vertex_color_alpha.outputs['Color'], math_mul.inputs[1])
if self._vavle_material.flags & MdlTextureFlag.CHROME:
uvs_node = self.create_node(Nodes.ShaderNodeTexCoord)
self.connect_nodes(uvs_node.outputs['Reflection'], basetexture_node.inputs['Vector'])
shader_mix_node = self.create_node(Nodes.ShaderNodeMixShader)
self.connect_nodes(math_mul.outputs[0], shader_mix_node.inputs['Fac'])
self.connect_nodes(shader.outputs['Emission'], shader_mix_node.inputs[2])
transparent_node = self.create_node(Nodes.ShaderNodeBsdfTransparent)
self.connect_nodes(transparent_node.outputs['BSDF'], shader_mix_node.inputs[1])
self.connect_nodes(shader_mix_node.outputs[0], material_output.inputs['Surface'])
|
StarcoderdataPython
|
4825515
|
<reponame>aaronbiller/comparator<gh_stars>1-10
import re
from io import open
from setuptools import setup, find_packages
README = 'README.rst'
CHANGES = 'CHANGES.rst'
VERSION_FILE = 'comparator/__init__.py'
def read(path):
with open(path, encoding='utf-8') as f:
return f.read()
def find_version():
version_file = read(VERSION_FILE)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='comparator',
version=find_version(),
author='<NAME>',
author_email='<EMAIL>',
description='Utility for comparing results between data sources',
long_description=read(README) + '\n' + read(CHANGES),
license='Apache 2.0',
keywords='utility compare database',
url='https://github.com/aaronbiller/comparator',
packages=find_packages(),
tests_require=[
'pytest',
'pytest-cov',
'mock',
'spackl'
],
install_requires=[
'future==0.16.0',
'google-cloud-bigquery==1.5.0',
'psycopg2-binary==2.7.5',
'PyYAML',
'SQLAlchemy==1.2.11',
'sqlalchemy-redshift==0.7.1',
'pandas>=0.22.0',
'pytest-runner==4.2',
],
extras_require={
':python_version == "2.7"': [
'pathlib2==2.3.2',
],
},
include_package_data=True,
scripts=[],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Database',
'Topic :: Utilities',
],
)
|
StarcoderdataPython
|
1679727
|
"""Methods related to sampling and smoothing elevations."""
import time
import numpy as np
from sfrmaker.routing import get_nextupsegs, get_upsegs, make_graph
def smooth_elevations(fromids, toids, elevations, start_elevations=None): # elevup, elevdn):
"""
Parameters
----------
fromids : sequence of hashables
toids : sequence of hashables
Downstream connections of fromids
elevations : sequence of floats
Elevation for each edge (line) in a stream network, or if start_elevations
are specified, the end elevation for each edge.
start_elevations : sequence of floats, optional
Start elevation for edge (line) in a stream network.
By default, None.
Returns
-------
Elevations : dict or tuple
Dictionary of smoothed edge elevations,
or smoothed end elevations, start elevations
"""
# make forward and reverse dictionaries with routing info
graph = dict(zip(fromids, toids))
assert 0 in set(graph.values()), 'No outlets in routing network!'
graph_r = make_graph(toids, fromids)
# make dictionaries of segment end elevations
elevations = dict(zip(fromids, elevations))
if start_elevations is not None:
elevmax = dict(zip(fromids, start_elevations))
def get_upseg_levels(seg):
"""Traverse routing network, returning a list of segments
at each level upstream from the outlets. (level 0 route to seg;
segments in level 1 route to a segment in level 0, etc.)
Parameters:
-----------
seg : int
Starting segment number
Returns
-------
all_upsegs : list
List with list of segments at each level
"""
upsegs = graph_r[seg].copy()
all_upsegs = [upsegs]
for i in range(len(fromids)):
upsegs = get_nextupsegs(graph_r, upsegs)
if len(upsegs) > 0:
all_upsegs.append(upsegs)
else:
break
return all_upsegs
def reset_elevations(seg):
"""Reset segment elevations above (upsegs) and below (outseg) a node.
"""
oseg = graph[seg]
all_upsegs = np.array(list(get_upsegs(graph_r, seg)) + [seg]) # all segments upstream of node
elevmin_s = np.min([elevations[s] for s in all_upsegs]) # minimum current elevation upstream of node
oldmin_s = elevations[seg]
elevs = [elevmin_s, oldmin_s]
if oseg > 0: # if segment is not an outlet,
if start_elevations is not None:
elevs.append(elevmax[oseg]) # outseg start elevation (already updated)
# set segment end elevation as min of
# upstream elevations, current elevation, outseg start elevation
elevations[seg] = np.min(elevs)
# if the node is not an outlet, reset the outseg max if the current min is lower
if oseg > 0:
if start_elevations is not None:
next_reach_elev = elevmax[oseg]
elevmax[graph[seg]] = np.min([elevmin_s, next_reach_elev])
else:
next_reach_elev = elevations[oseg]
elevations[graph[seg]] = np.min([elevmin_s, next_reach_elev])
print('\nSmoothing elevations...')
ta = time.time()
# get list of segments at each level, starting with 0 (outlet)
segment_levels = get_upseg_levels(0)
# at each level, reset all of the segment elevations as necessary
for level in segment_levels:
for s in level:
if 0 in level:
j=2
reset_elevations(s)
print("finished in {:.2f}s".format(time.time() - ta))
if start_elevations is not None:
return elevations, elevmax
return elevations
|
StarcoderdataPython
|
3313048
|
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, The Spark Structured Playground Project"
__credits__ = []
__license__ = "Apache License"
__version__ = "2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Education Purpose"
import pandas as pd
from pyspark.sql.types import IntegerType
from sklearn.base import BaseEstimator, TransformerMixin
from pyspark.sql.functions import udf
from ssp.utils.ai_key_words import AIKeyWords
def labelme(text, keywords=AIKeyWords.POSITIVE.split("|")):
text = text.replace("#", "").replace("@", "")
res = 0
for keyword in keywords:
if f' {keyword.lower()} ' in f' {text.lower()} ':
res = 1
return res
labelme_udf = udf(labelme, IntegerType())
class SSPTextLabeler(BaseEstimator, TransformerMixin):
def __init__(self, input_col=None, output_col="label"):
self._input_col = input_col
self._output_col = output_col
# Return self nothing else to do here
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if isinstance(X, pd.DataFrame):
if self._input_col:
X[self._output_col] = X[self._input_col].swifter.apply(lambda x: labelme(x, AIKeyWords.POSITIVE))
print(X[self._output_col].value_counts())
return X
elif isinstance(X, list):
X = [self.labelme(x, AIKeyWords.POSITIVE) for x in X]
return X
elif isinstance(X, str):
return self.labelme(X, AIKeyWords.POSITIVE)
|
StarcoderdataPython
|
150460
|
<filename>ch01-05/05_05-toppings.py
requested_toppings = ['mushrooms', 'extra cheese']
if 'mushrooms' in requested_toppings:
print("Adding mushrooms")
if 'pepperoni' in requested_toppings:
print("Adding pepperoni.")
if 'extra cheese' in requested_toppings:
print("Adding extra cheese.")
print("\nFinished making your pizza!")
"""TRY IT YOURSELFS"""
alien_color = 'green'
if alien_color is 'red':
print("The alien is shot down! You get 5 points!")
elif alien_color is 'yellow':
print("He's weak! Keep shooting!")
elif alien_color is 'green':
print("The alien is flying high!")
age = 3
if age <= 2:
print("You are a baby")
elif age <= 4:
print("You are a toddler")
elif age <= 13:
print("You are a kid")
elif age <= 20:
print("You are a teenager.")
elif age <= 65:
print("You are an adult")
else:
print("You are an elder")
faves = ['kiwi', 'apple', 'orange']
if 'apple' in faves:
print("mmm apple" )
if 'kiwi' in faves:
print("mmm kiwi")
if 'banana' in faves:
print("ok i get it")
|
StarcoderdataPython
|
131505
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to manage bots using GCE instance templates and groups."""
from builtins import object
from builtins import str
import google_auth_httplib2
import googleapiclient
import httplib2
import time
from googleapiclient.discovery import build
from base import retry
from google_cloud_utils import credentials
RETRY_COUNT = 8
RETRY_DELAY = 4
REQUEST_TIMEOUT = 180
_SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
]
class BotManagerException(Exception):
"""Base exception class."""
class OperationError(BotManagerException):
"""Errors during an operation."""
class RequestError(BotManagerException):
"""Errors during a request."""
class NotFoundError(RequestError):
"""Not found."""
class AlreadyExistsError(RequestError):
"""Already exists."""
class RetryableError(RequestError):
"""Retryable request error."""
class BotManager(object):
"""Manager for bots."""
def __init__(self, project_id, zone):
self.project_id = project_id
self.zone = zone
creds = credentials.get_default(scopes=_SCOPES)[0]
http = google_auth_httplib2.AuthorizedHttp(
creds, http=httplib2.Http(timeout=REQUEST_TIMEOUT))
self.compute = build('compute', 'v1', http=http, cache_discovery=False)
def instance_group(self, name):
"""Get an InstanceGroup resource with the given name."""
return InstanceGroup(name, self)
def instance_template(self, name):
"""Get an InstanceTemplate resource with the given name."""
return InstanceTemplate(name, self)
class Resource(object):
"""Represents a resource."""
def __init__(self, name, manager):
self.name = name
self.manager = manager
@property
def compute(self):
return self.manager.compute
@property
def project_id(self):
return self.manager.project_id
@property
def zone(self):
return self.manager.zone
def get(self):
raise NotImplementedError
def exists(self):
"""Return whether or not the resource exists."""
try:
self.get()
return True
except NotFoundError:
return False
def _wait_for_operation(self, operation):
"""Wait for an operation to complete."""
while True:
if operation['status'] == 'DONE':
if 'error' in operation:
raise OperationError(operation['error'])
return operation
time.sleep(1)
if 'zone' in operation:
operation = self.compute.zoneOperations().get(
project=self.project_id,
zone=self.zone,
operation=operation['name']).execute()
else:
operation = self.compute.globalOperations().get(
project=self.project_id, operation=operation['name']).execute()
def _identity(self, response):
"""Identify function for convenience."""
return response
@retry.wrap(
RETRY_COUNT,
RETRY_DELAY,
'handlers.cron.helpers.bot_manager.Resource.execute',
exception_type=RetryableError)
def execute(self, request, result_proc=None):
"""Execute a request."""
if result_proc is None:
result_proc = self._wait_for_operation
try:
response = request.execute()
except googleapiclient.errors.HttpError as e:
if e.resp.status in [400, 500, 503]:
raise RetryableError(str(e))
if e.resp.status == 404:
raise NotFoundError(str(e))
elif e.resp.status == 409:
raise AlreadyExistsError(str(e))
else:
raise RequestError(str(e))
return result_proc(response)
class InstanceGroup(Resource):
"""Instance group."""
# At least 80% of the instances should've been created. Some errors may be
# expected because of limited resources in the zone.
MIN_INSTANCES_RATIO = 0.8
MAX_ERROR_RATIO = 1.0 - MIN_INSTANCES_RATIO
def _wait_for_instances(self):
"""Wait for instance actions to complete."""
while True:
num_instances = 0
instances_ready = 0
errors = []
for instance in self.list_managed_instances():
num_instances += 1
if instance['currentAction'] == 'NONE':
instances_ready += 1
elif 'lastAttempt' in instance and 'errors' in instance['lastAttempt']:
errors.append(instance['lastAttempt']['errors'])
if instances_ready >= max(1, num_instances * self.MIN_INSTANCES_RATIO):
return
if len(errors) > num_instances * self.MAX_ERROR_RATIO:
raise OperationError(errors)
time.sleep(1)
def _handle_size_change(self, response):
"""Response handler for operations that change instances."""
self._wait_for_operation(response)
self._wait_for_instances()
def get(self):
"""Get an instance group for a cluster."""
return self.execute(
self.compute.instanceGroupManagers().get(
project=self.project_id,
zone=self.zone,
instanceGroupManager=self.name),
result_proc=self._identity)
def list_managed_instances(self, instance_filter=None):
"""List managed instances in the group."""
next_page_token = None
while True:
response = self.execute(
self.compute.instanceGroupManagers().listManagedInstances(
project=self.project_id,
zone=self.zone,
instanceGroupManager=self.name,
pageToken=next_page_token,
filter=instance_filter),
result_proc=self._identity)
for instance in response['managedInstances']:
if instance['currentAction'] != 'DELETING':
# Instances can be stuck in DELETING, don't include them.
yield instance
if 'nextPageToken' in response:
next_page_token = response['nextPageToken']
else:
break
def create(self,
base_instance_name,
instance_template,
size=0,
wait_for_instances=True):
"""Create this instance group."""
manager_body = {
'baseInstanceName': base_instance_name,
'instanceTemplate': 'global/instanceTemplates/' + instance_template,
'name': self.name,
'targetSize': size,
}
result_proc = None
if wait_for_instances:
result_proc = self._handle_size_change
self.execute(
self.compute.instanceGroupManagers().insert(
project=self.project_id, zone=self.zone, body=manager_body),
result_proc=result_proc)
def resize(self, new_size, wait_for_instances=True):
"""Resize this instance group."""
result_proc = None
if wait_for_instances:
result_proc = self._handle_size_change
self.execute(
self.compute.instanceGroupManagers().resize(
project=self.project_id,
zone=self.zone,
instanceGroupManager=self.name,
size=new_size),
result_proc=result_proc)
def delete(self):
"""Delete this instance group."""
self.execute(self.compute.instanceGroupManagers().delete(
project=self.project_id, zone=self.zone,
instanceGroupManager=self.name))
class InstanceTemplate(Resource):
"""Instance template."""
def get(self):
"""Get the instance template."""
return self.execute(
self.compute.instanceTemplates().get(
instanceTemplate=self.name, project=self.project_id),
result_proc=self._identity)
def delete(self):
"""Delete the instance template."""
self.execute(self.compute.instanceTemplates().delete(
instanceTemplate=self.name, project=self.project_id))
def create(self, template_body):
"""Create the instance template."""
template_body['name'] = self.name
self.execute(self.compute.instanceTemplates().insert(
project=self.project_id, body=template_body))
|
StarcoderdataPython
|
170512
|
<filename>crawler_data_binance.py
from binance.client import Client
import numpy as np
from decimal import *
import time
from config_db import config_db
try:
import mysql.connector as mysql
except :
import MySQLdb as mysql
class crawlerDataBinance(object):
COIN_INFO_IDCOIN = 0
COIN_INFO_SYMBOL = 1
COIN_INFO_MINQTY = 2
COIN_INFO_TICKSIZE = 3
COIN_INFO_STATUS = 4
COIN_INFO_BASEASSET = 5
COIN_INFO_QUOTEASSET = 6
client = Client("api_key", "api_secret")
def get_coin_info_from_binance(self):
market = []
symbols = self.client.get_exchange_info()['symbols']
for symbol in symbols:
temp = [symbol["symbol"], symbol["filters"][2]["minQty"], symbol["filters"][0]["tickSize"], symbol["status"], symbol["baseAsset"], symbol["quoteAsset"]]
market.append(temp)
return market
def insert_coin_info_to_db(self):
cnx = config_db()
cursor = cnx.cursor()
coin_info = self.get_coin_info_from_binance()
for coin in coin_info:
try:
query_string = "INSERT INTO coin_info(symbol, minQty, tickSize, status, baseAsset, quoteAsset) VALUES (%s,%s,%s,%s,%s,%s)"
cursor.execute(query_string, coin)
cnx.commit()
except mysql.Error as err:
cnx.rollback()
print("Something went wrong: {}".format(err))
cursor.close()
cnx.close()
del coin_info
def get_coinInfo_from_db(self):
cnx = config_db()
cursor = cnx.cursor()
query_string = "SELECT id, symbol, minQty, tickSize FROM coin_info"
cursor.execute(query_string)
coins_info = cursor.fetchall()
cursor.close()
cnx.close()
return coins_info
def insert_candlestick_data_db(self, klines, idCoin):
cnx = config_db()
cursor = cnx.cursor()
klines = np.insert(klines, [0], [idCoin], axis=1).tolist()
try:
query_string = "INSERT INTO candlestick_data(idCoin, openTime, `open`, high, low, `close`, volume, closeTime, quoteAssetVolume, numberOfTrader, takerBuyBaseAssetVolume, takerBuyQuoteAssetVolume, `ignore`) \
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
cursor.executemany(query_string, klines)
cnx.commit()
except mysql.Error as err:
cnx.rollback()
print("Something went wrong: {}".format(err))
cursor.close()
cnx.close()
del klines
def get_max_closeTime_from_db(self, idCoin):
cnx = config_db()
cursor = cnx.cursor()
query_string = "SELECT MAX(closeTime) FROM candlestick_data WHERE idCoin = %s" % idCoin
cursor.execute(query_string)
coins_info = cursor.fetchall()
cursor.close()
cnx.close()
if coins_info[0][0] == None:
return 0
return coins_info[0][0]
def get_klines_startTime(self, symbol, startTime = 0):
return self.client.get_klines(symbol = symbol,
interval = self.client.KLINE_INTERVAL_1HOUR,
startTime = startTime,
limit = 1000)
def insert_symbols_candlestick_data(self):
symbols = self.get_coinInfo_from_db()
for symbol in symbols:
closeTime = self.get_max_closeTime_from_db(symbol[self.COIN_INFO_IDCOIN])
klines = self.get_klines_startTime(symbol[self.COIN_INFO_SYMBOL], closeTime + 1)
while len(klines) > 0:
self.insert_candlestick_data_db(klines, symbol[self.COIN_INFO_IDCOIN])
closeTime = self.get_max_closeTime_from_db(symbol[self.COIN_INFO_IDCOIN])
klines = self.get_klines_startTime(symbol[self.COIN_INFO_SYMBOL], closeTime + 1)
del klines
if __name__ == '__main__':
time.strftime('%X %x')
start_time = time.time()
crawler = crawlerDataBinance()
crawler.insert_coin_info_to_db() # run first time
crawler.insert_symbols_candlestick_data()
print("Total time get data: %f"%(time.time() - start_time))
|
StarcoderdataPython
|
29965
|
import os, sys
exp_id=[
"exp1.0",
]
env_source=[
"file",
]
exp_mode = [
"continuous",
#"newb",
#"base",
]
num_theories_init=[
4,
]
pred_nets_neurons=[
8,
]
pred_nets_activation=[
"linear",
# "leakyRelu",
]
domain_net_neurons=[
8,
]
domain_pred_mode=[
"onehot",
]
mse_amp=[
1e-7,
]
simplify_criteria=[
'\("DLs",0,3,"relative"\)',
]
scheduler_settings=[
'\("ReduceLROnPlateau",40,0.1\)',
]
optim_type=[
'\("adam",5e-3\)',
]
optim_domain_type=[
'\("adam",1e-3\)',
]
reg_amp=[
1e-8,
]
reg_domain_amp = [
1e-5,
]
batch_size = [
2000,
]
loss_core = [
"DLs",
]
loss_order = [
-1,
]
loss_decay_scale = [
"None",
]
is_mse_decay = [
False,
]
loss_balance_model_influence = [
False,
]
num_examples = [
20000,
]
iter_to_saturation = [
5000,
]
MDL_mode = [
"both",
]
num_output_dims = [
2,
]
num_layers = [
3,
]
is_pendulum = [
False,
]
date_time = [
"10-9",
]
seed = [
0,
30,
60,
90,
120,
150,
180,
210,
240,
270,
]
def assign_array_id(array_id, param_list):
if len(param_list) == 0:
print("redundancy: {0}".format(array_id))
return []
else:
param_bottom = param_list[-1]
length = len(param_bottom)
current_param = param_bottom[array_id % length]
return assign_array_id(int(array_id / length), param_list[:-1]) + [current_param]
array_id = int(sys.argv[1])
param_list = [exp_id,
env_source,
exp_mode,
num_theories_init,
pred_nets_neurons,
pred_nets_activation,
domain_net_neurons,
domain_pred_mode,
mse_amp,
simplify_criteria,
scheduler_settings,
optim_type,
optim_domain_type,
reg_amp,
reg_domain_amp,
batch_size,
loss_core,
loss_order,
loss_decay_scale,
is_mse_decay,
loss_balance_model_influence,
num_examples,
iter_to_saturation,
MDL_mode,
num_output_dims,
num_layers,
is_pendulum,
date_time,
seed,
]
param_chosen = assign_array_id(array_id, param_list)
exec_str = "python ../theory_learning/theory_exp.py"
for param in param_chosen:
exec_str += " {0}".format(param)
exec_str += " {0}".format(array_id)
print(param_chosen)
print(exec_str)
from shutil import copyfile
current_PATH = os.path.dirname(os.path.realpath(__file__))
def make_dir(filename):
import os
import errno
if not os.path.exists(os.path.dirname(filename)):
print("directory {0} does not exist, created.".format(os.path.dirname(filename)))
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
print(exc)
raise
filename = "../data/" + "{0}_{1}/".format(param_chosen[0], param_chosen[-2])
make_dir(filename)
fc = "run_theory.py"
if not os.path.isfile(filename + fc):
copyfile(current_PATH + "/" + fc, filename + fc)
os.system(exec_str)
|
StarcoderdataPython
|
87081
|
"""Test project's batch types list command."""
# pylint: disable=wrong-import-order, import-error
import io
import operator
import sys
from uuid import uuid4
from click import echo
from click.testing import CliRunner
from gencove.client import APIClient, APIClientTimeout # noqa: I100
from gencove.command.projects.cli import list_project_batch_types
from gencove.models import ProjectBatchTypes
from gencove.tests.decorators import assert_authorization
from gencove.tests.filters import filter_jwt, replace_gencove_url_vcr
from gencove.tests.projects.vcr.filters import (
filter_get_project_batch_types_request,
)
from gencove.tests.upload.vcr.filters import filter_volatile_dates
from gencove.tests.utils import get_vcr_response
import pytest
from vcr import VCR
@pytest.fixture(scope="module")
def vcr_config():
"""VCR configuration."""
return {
"cassette_library_dir": "gencove/tests/projects/vcr",
"filter_headers": [
"Authorization",
"Content-Length",
"User-Agent",
],
"filter_post_data_parameters": [
("email", "<EMAIL>"),
("password", "<PASSWORD>"),
],
"match_on": ["method", "scheme", "port", "path", "query"],
"path_transformer": VCR.ensure_suffix(".yaml"),
"before_record_request": [
replace_gencove_url_vcr,
filter_get_project_batch_types_request,
],
"before_record_response": [
filter_jwt,
filter_volatile_dates,
],
}
@pytest.mark.vcr
@assert_authorization
def test_list_project_batch_types__empty(
credentials, mocker, project_id, recording, vcr
): # pylint: disable=unused-argument
"""Test project has not batch types."""
runner = CliRunner()
if not recording:
# Mock get_project_batch_types only if using the cassettes, since we
# mock the return value.
get_project_batch_types_response = get_vcr_response(
"/api/v2/project-batch-types/", vcr, operator.contains
)
mocked_get_project_batch_types = mocker.patch.object(
APIClient,
"get_project_batch_types",
return_value=ProjectBatchTypes(
**get_project_batch_types_response
),
)
res = runner.invoke(
list_project_batch_types,
[project_id, *credentials],
)
assert res.exit_code == 0
if not recording:
mocked_get_project_batch_types.assert_called_once()
assert res.output == ""
@pytest.mark.default_cassette("jwt-create.yaml")
@pytest.mark.vcr
@assert_authorization
def test_list_project_batch_types__not_empty_slow_response_retry(
mocker, credentials
):
"""Test project batch types being outputed to the shell."""
runner = CliRunner()
mocked_get_project_batch_types = mocker.patch.object(
APIClient,
"get_project_batch_types",
side_effect=APIClientTimeout("Could not connect to the api server"),
)
res = runner.invoke(
list_project_batch_types,
[str(uuid4()), *credentials],
)
assert res.exit_code == 1
assert mocked_get_project_batch_types.call_count == 2
@pytest.mark.vcr
@assert_authorization
def test_list_project_batch_types__not_empty(
mocker, credentials, project_id_batches, recording, vcr
):
"""Test project batch types being outputed to the shell."""
runner = CliRunner()
if not recording:
# Mock get_project_batch_types only if using the cassettes, since we
# mock the return value.
get_project_batch_types_response = get_vcr_response(
"/api/v2/project-batch-types/", vcr, operator.contains
)
mocked_get_project_batch_types = mocker.patch.object(
APIClient,
"get_project_batch_types",
return_value=ProjectBatchTypes(
**get_project_batch_types_response
),
)
res = runner.invoke(
list_project_batch_types,
[project_id_batches, *credentials],
)
assert res.exit_code == 0
assert res.output != ""
if not recording:
mocked_get_project_batch_types.assert_called_once()
output_line = io.BytesIO()
sys.stdout = output_line
batches_output = [
f"{batch_type['key']}\t{batch_type['description']}"
for batch_type in get_project_batch_types_response["results"]
]
echo("\n".join(batches_output))
assert output_line.getvalue() == res.output.encode()
|
StarcoderdataPython
|
182946
|
<reponame>lexnederbragt/denovo-assembly-tutorial<gh_stars>1-10
# by <NAME>
from Bio import SeqIO
import sys
class Manifest:
def __init__(self, cols):
self.id = cols[0]
self.path = cols[1]
self.extra = cols[2:]
def read_manifest(fn):
samples = []
for ln in open(fn):
if ln.startswith('#'):
continue
cols = ln.rstrip().split("\t")
samples.append(Manifest(cols))
return samples
def stats(seq_name, fh):
contig_lengths = [len(rec) for rec in SeqIO.parse(fh, "fasta") if len(rec) >= MIN_LENGTH]
contig_lengths.sort(reverse=True)
print "\n".join([seq_name + "\t" + str(l) for l in contig_lengths])
MIN_LENGTH = 0
#if len(sys.argv) == 3:
# MIN_LENGTH = int(sys.argv[2])
#samples = read_manifest(sys.argv[1])
for s in sys.argv[1:]:
stats(s, open(s))
|
StarcoderdataPython
|
3319051
|
import os
from typing import Optional
from _pytest.config import Config
from typepy import Bool, Integer, StrictLevel
from typepy.error import TypeConversionError
from ._const import Default, Option
class DiscordOptRetriever:
def __init__(self, config: Config):
self.__config = config
def retrieve_webhook_url(self) -> Optional[str]:
return self.__retrieve_discord_opt(Option.DISCORD_WEBHOOK)
def retrieve_verbosity_level(self) -> int:
config = self.__config
discord_opt = Option.DISCORD_VERBOSE
verbosity_level = None
if hasattr(config.option, discord_opt.inioption_str):
verbosity_level = getattr(config.option, discord_opt.inioption_str)
if verbosity_level is not None and verbosity_level < 0:
verbosity_level = None
if verbosity_level is None:
verbosity_level = self._to_int(os.environ.get(discord_opt.envvar_str))
if verbosity_level is None:
verbosity_level = self._to_int(config.getini(discord_opt.inioption_str))
if verbosity_level is None:
verbosity_level = config.option.verbose
return verbosity_level
def retrieve_username(self) -> str:
username = self.__retrieve_discord_opt(Option.DISCORD_USERNAME) # type: ignore
if not username:
return Default.USERNAME
return username
def retrieve_success_icon(self) -> Optional[str]:
return self.__retrieve_discord_opt(Option.DISCORD_SUCCESS_ICON)
def retrieve_skip_icon(self) -> Optional[str]:
return self.__retrieve_discord_opt(Option.DISCORD_SKIP_ICON)
def retrieve_fail_icon(self) -> Optional[str]:
return self.__retrieve_discord_opt(Option.DISCORD_FAIL_ICON)
def retrieve_attach_file(self) -> bool:
config = self.__config
discord_opt = Option.DISCORD_ATTACH_FILE
value = None
if hasattr(config.option, discord_opt.inioption_str):
value = getattr(config.option, discord_opt.inioption_str)
if value is None:
try:
value = Bool(
os.environ.get(discord_opt.envvar_str), strict_level=StrictLevel.MIN
).convert()
except TypeConversionError:
value = None
if value is None:
value = config.getini(discord_opt.inioption_str)
if value is None:
return False
return value
def __retrieve_discord_opt(self, discord_opt: Option) -> Optional[str]:
config = self.__config
value = None
if hasattr(config.option, discord_opt.inioption_str):
value = getattr(config.option, discord_opt.inioption_str)
if not value:
value = os.environ.get(discord_opt.envvar_str)
if not value:
value = config.getini(discord_opt.inioption_str)
return value
@staticmethod
def _to_int(value) -> Optional[int]:
try:
return Integer(value, strict_level=StrictLevel.MIN).convert()
except TypeConversionError:
return None
|
StarcoderdataPython
|
105058
|
<filename>spotify_stats/__init__.py
"""
libraries
"""
from .track_details import TrackDetails
from .library import playlist_url_to_id, playlist_id_to_track_list, track_list_to_details
|
StarcoderdataPython
|
69736
|
# *******************************************************************************
# Copyright 2017 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
#
# @microservice: py-core-data-client library
# @author: <NAME>, Dell
# @version: 1.0.0
# *******************************************************************************
import static org.junit.Assert.self.assertEqual
import java.lang.reflect.Field
from controller import PingCoreDataClient
from controller.impl import PingCoreDataClientImpl
from test.category import RequiresCoreDataRunning
import org.junit.Before
import org.junit.Test
import org.junit.experimental.categories.Category
@Category({RequiresCoreDataRunning.class})
class PingCoreDataClientTest {
private static final String ENDPT = "http://localhost:48080/api/v1/ping"
private PingCoreDataClient client
# setup tests the add function
@Before
def setUp() throws Exception {
client = new PingCoreDataClientImpl()
setURL()
private void setURL() throws Exception {
Class<?> clientClass = client.getClass()
Field temp = clientClass.getDeclaredField("url")
temp.setAccessible(true)
temp.set(client, ENDPT)
def testPing():
self.assertEqual("pong", client.ping(), "Ping Core Data Micro Service failed")
}
|
StarcoderdataPython
|
1633052
|
<gh_stars>0
# from flask import Flask, render_template, url_for, flash, redirect
# from flask_sqlalchemy import SQLAlchemy
# # from forms import RegistrationForm, LoginForm
# app = Flask(__name__)
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.site'
# db = SQLAlchemy(app)
# class User(db.Model):
# id = db.Column(db.Integer, primary_key = True)
# username = db.Column(db.String(20), unique=True, nullable = False)
# password = db.Column(db.String(60), nullable = False)
# image_file = db.Column(db.String(20), nullable=False, default = 'default.jpg')
# def repr(__self__):
# return f"User('{self.username}', '{self.password}', '{self.image_file}')"
#separating routes into its own modue l: routes.py
from safebay import app
if __name__ == '__main__':
app.run(debug = True)
|
StarcoderdataPython
|
26423
|
# Generated by Django 2.0 on 2018-02-24 11:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sky', '0007_auto_20180224_1120'),
]
operations = [
migrations.RemoveField(
model_name='news',
name='label',
),
]
|
StarcoderdataPython
|
3307427
|
<reponame>AlgoveraAI/creations
import gradio as gr
from ocean_lib.config import Config
from ocean_lib.models.btoken import BToken #BToken is ERC20
from ocean_lib.ocean.ocean import Ocean
from ocean_lib.web3_internal.wallet import Wallet
from ocean_lib.web3_internal.currency import from_wei # wei is the smallest denomination of ether e.g. like cents
# from ocean_lib.web3_internal.currency import pretty_ether_and_wei
from wallet import get_wallet
config = Config('config.ini')
ocean = Ocean(config)
def wallet(private_key):
if private_key:
mnemonic = None
else:
account, mnemonic = get_wallet()
private_key = account.key.hex()
wallet = Wallet(ocean.web3, private_key, transaction_timeout=20, block_confirmations=config.block_confirmations)
address = wallet.address
OCEAN_token = BToken(ocean.web3, ocean.OCEAN_address)
eth_balance = from_wei(ocean.web3.eth.get_balance(address))
ocean_balance = from_wei(OCEAN_token.balanceOf(address))
return address, private_key, mnemonic, eth_balance, ocean_balance
# def wallet(private_key, did):
# wallet = Wallet(ocean.web3, private_key, transaction_timeout=20, block_confirmations=config.block_confirmations)
# address = wallet.address
# OCEAN_token = BToken(ocean.web3, ocean.OCEAN_address)
# eth_balance = from_wei(ocean.web3.eth.get_balance(wallet.address))
# ocean_balance = from_wei(OCEAN_token.balanceOf(wallet.address))
# asset = ocean.assets.resolve(did)
# ALG_ddo = ocean.assets.resolve(did)
# alg_token = ocean.get_data_token(ALG_ddo.data_token_address)
# alg_token_balance = pretty_ether_and_wei(alg_token.balanceOf(wallet.address))
# return address, eth_balance, ocean_balance, alg_token_balance
description = (
"This demo shows the balance of tokens in your Web3 wallet. If you do not have a Web3 wallet, leave the input field empty when running and the app will create a wallet for you. "
"A wallet consists of a public and private key. You can think of the public key like your email address and the private key like your password. "
"The public key can be easily determined from the private key, but not vice versa. "
"The private key is output in the form of both a hexadecimal number and the corresponding mnemonic phrase, which is easier to remember. "
"If you want to continue to use the same wallet in future, you should store the private key (and/or the mnemonic phrase, which can be used to recover the private key). "
"Then enter the private key to the input field when running the app. "
"Do not give your private key to anyone ever. In fact, it is bad practice to store your private key on your PC for wallets that contain tokens with real value. "
"However, we are using test tokens on the Ethereum test network (Rinkeby) where the tokens have no real value. "
"Initially, your wallet should have no ETH and OCEAN tokens in it. You can then request ETH and OCEAN test tokens by entering your public address into faucets (follow the links at the bottom of the page). "
"Then wait about 15 seconds and re-run the app for the same private key. "
"This demo uses the Ocean Protocol Python library in the backend. For more information on the advantages of combinining Ocean and HuggingFace, check out the blog post link below. "
""
)
# description = (
# "This demo shows the balance of algorithm tokens, as well as ETH and OCEAN, in your Web3 wallet (for a given private key). The algorithm tokens will be used to run Algovera apps on HF spaces in future. "
# "Currently, you need to export your private key from a MetaMask wallet (we plan to randomly generate a private key in the app and bypass MetaMask in future). "
# "For a guide on how to install MetaMask (an extension in your browser), check the link at the bottom of the page. "
# "We highly recommend doing this with a wallet that has no real tokens in it. We use a test network (Rinkeby) where the tokens have no real value. "
# "After an initial setup, your wallet should have no tokens. You can request ETH and OCEAN test tokens from faucets at the links at the bottom of the page. "
# "To buy an algorithm token (using the OCEAN and ETH), you can search for algorithms on the Ocean marketplace (see link at bottom). Make sure to use algorithms that are on the Rinkeby test network (you need to select Rinkeby from the dropdown menu). "
# "We have provided a link to our DCGAN model on the test network at the bottom. If you can't see it you are not on the test network. "
# "After you buy an algorithm token, you need to locate the DID in the metadata on the marketplace. Then enter it into the input textbox. "
# "Later we will add HF Spaces apps to search algorithms and buy algorithm tokens, which you can use to run demos of the algorithms. "
# "This demo uses the Ocean Python library in the backend (see link below)."
# )
article = (
"<p style='text-align: center'>"
"<a href='https://faucet.rinkeby.io/' target='_blank'>1. ETH faucet</a> | "
"<a href='https://faucet.rinkeby.oceanprotocol.com/' target='_blank'>2. OCEAN faucet | </a>"
"<a href='https://docs.algovera.ai/blog/2022/01/04/Using%20the%20Ocean%20Marketplace%20with%20HuggingFace%20Apps,%20Algorithms%20and%20Datasets' target='_blank'>3. Blog about Ocean Protocol on HuggingFace</a> "
"</p>"
)
interface = gr.Interface(
wallet,
[
gr.inputs.Textbox(label="Private Key"),
],
[
#gr.outputs.Textbox(label="Public Key"),
#gr.outputs.Textbox(label="Algorithm token balance"),
gr.outputs.Textbox(label="Public Address"),
gr.outputs.Textbox(label="Private Key"),
gr.outputs.Textbox(label="Recovery Passphrase"),
gr.outputs.Textbox(label="ETH balance"),
gr.outputs.Textbox(label="OCEAN balance"),
],
title="Web3 Wallet",
description=description,
article=article,
theme="huggingface",
)
interface.launch()
|
StarcoderdataPython
|
111598
|
from kik.messages.message import Message
class FriendPickerMessage(Message):
"""
A friend picker message, as documented at `<https://dev.kik.com/#/docs/messaging#friend-picker-response-object>`_.
"""
def __init__(self, picked=None, chat_type=None, **kwargs):
super(FriendPickerMessage, self).__init__(type='friend-picker', **kwargs)
self.picked = picked
self.chat_type = chat_type
@classmethod
def property_mapping(cls):
mapping = super(FriendPickerMessage, cls).property_mapping()
mapping.update({
'picked': 'picked',
'chat_type': 'chatType'
})
return mapping
|
StarcoderdataPython
|
1795252
|
<filename>prereise/gather/demanddata/bldg_electrification/puma_data_agg.py
# This script develops puma-level data directly from census and aggregated from census tract data
import os
import geopandas as gpd
import numpy as np
import pandas as pd
from prereise.gather.demanddata.bldg_electrification import const
def aggregate_puma_df(
puma_states, tract_puma_mapping, tract_gbs_area, tract_degday_normals, tract_pop
):
"""Scale census tract data up to puma areas.
:param pandas.DataFrame puma_states: mapping of puma to state.
:param pandas.DataFrame tract_puma_mapping: tract to puma mapping.
:param pandas.DataFrame tract_gbs_area: General Building Stock area for residential, commercial, industrial areas by tract
:param pandas.DataFrame tract_degday_normals: heating and cooling degree day normals by tract
:param pandas.DataFrame tract_pop: population by tract
:return: (*pandas.DataFrame*) -- population; residential, commercial, industrial areas;
heating degree days; cooling degree days; residential space heating household fuel
fractions.
"""
# Set up puma_df data frame
puma_df = puma_states.to_frame()
# Combine tract-level data into single data frame with only census tracts with building area data
tract_data = pd.concat(
[tract_gbs_area, tract_degday_normals, tract_pop], axis=1, join="inner"
)
tract_data = tract_data.loc[:, ~tract_data.columns.duplicated()]
# Group tracts by PUMA for aggregration
grouped_tracts = tract_data.groupby(tract_puma_mapping["puma"])
# Sum population and GBS areas; store in data frame
puma_df.loc[grouped_tracts.groups.keys(), "pop"] = grouped_tracts["pop"].sum()
puma_df.loc[grouped_tracts.groups.keys(), "res_area_gbs_m2"] = grouped_tracts[
"res_area_gbs_m2"
].sum()
puma_df.loc[grouped_tracts.groups.keys(), "com_area_gbs_m2"] = grouped_tracts[
"com_area_gbs_m2"
].sum()
puma_df.loc[grouped_tracts.groups.keys(), "ind_area_gbs_m2"] = grouped_tracts[
"ind_area_gbs_m2"
].sum()
# Population-weighted average hdd, cdd, and acpen
tract_data["pop_hdd65_normals"] = tract_data["pop"] * tract_data["hdd65_normals"]
tract_data["pop_cdd65_normals"] = tract_data["pop"] * tract_data["cdd65_normals"]
puma_df.loc[grouped_tracts.groups.keys(), "hdd65_normals"] = (
grouped_tracts["pop_hdd65_normals"].sum() / grouped_tracts["pop"].sum()
)
puma_df.loc[grouped_tracts.groups.keys(), "cdd65_normals"] = (
grouped_tracts["pop_cdd65_normals"].sum() / grouped_tracts["pop"].sum()
)
# Load RECS and CBECS area scales for res and com
resscales = pd.read_csv(os.path.join(data_dir, "area_scale_res.csv"))
comscales = pd.read_csv(os.path.join(data_dir, "area_scale_com.csv"))
# Compute GBS areas for state groups in RECS and CBECS
resscales["GBS"] = [
puma_df.query("state in @s")["res_area_gbs_m2"].sum()
* const.conv_m2_to_ft2
* const.conv_ft2_to_bsf
for s in resscales.fillna(0).values.tolist()
]
comscales["GBS"] = [
puma_df.query("state in @s")["com_area_gbs_m2"].sum()
* const.conv_m2_to_ft2
* const.conv_ft2_to_bsf
for s in comscales.fillna(0).values.tolist()
]
# Compute scalar for GBS area to base year area correspondingg to RECS/CBECS
# and assuming a constant annual growth rate
resscales["area_scalar"] = (
resscales[f"RECS{const.recs_date_1}"]
* (
(
resscales[f"RECS{const.recs_date_2}"]
/ resscales[f"RECS{const.recs_date_1}"]
)
** (
(const.base_year - const.recs_date_1)
/ (const.recs_date_2 - const.recs_date_1)
)
)
/ resscales["GBS"]
)
comscales["area_scalar"] = (
comscales[f"CBECS{const.cbecs_date_1}"]
* (
(
comscales[f"CBECS{const.cbecs_date_2}"]
/ comscales[f"CBECS{const.cbecs_date_1}"]
)
** (
(const.base_year - const.cbecs_date_1)
/ (const.cbecs_date_2 - const.cbecs_date_1)
)
)
/ comscales["GBS"]
)
# Scale puma area from gbs to base year
for state in const.state_list:
state_row_scale_res = resscales[resscales.eq(state).any(1)].reset_index()
state_row_scale_com = comscales[comscales.eq(state).any(1)].reset_index()
res_area_scalar = state_row_scale_res["area_scalar"][0]
com_area_scalar = state_row_scale_com["area_scalar"][0]
puma_df.loc[puma_df["state"] == state, f"res_area_{const.base_year}_m2"] = (
puma_df[puma_df["state"] == state]["res_area_gbs_m2"] * res_area_scalar
)
puma_df.loc[puma_df["state"] == state, f"com_area_{const.base_year}_m2"] = (
puma_df[puma_df["state"] == state]["com_area_gbs_m2"] * com_area_scalar
)
return puma_df
def scale_fuel_fractions(hh_fuels, puma_df, year=const.base_year):
"""Scale census tract data up to puma areas.
:param pandas.DataFrame hh_fuels: household fuel type by puma.
:param pandas.DataFrame puma_df: output of :func:`aggregate_puma_df`.
:param int/str year: year to use within label when creating columns.
:return: (*pandas.DataFrame*) -- fractions of natural gas, fuel oil and kerosone,
propane, and electricity used for space heating, hot water, cooking, and other
in residential and commercial buildings.
"""
# Calculate res fractions of fuel usage based off ACS puma_fuel household data
puma_df["frac_sh_res_natgas_acs"] = hh_fuels["hh_utilgas"] / hh_fuels["hh_total"]
for f in ["fok", "othergas", "coal", "wood", "solar", "elec", "other", "none"]:
puma_df[f"frac_sh_res_{f}_acs"] = hh_fuels[f"hh_{f}"] / hh_fuels["hh_total"]
region_map = {state: r for r, states in const.regions.items() for state in states}
puma_region_groups = puma_df.groupby(puma_df["state"].map(region_map))
for c in const.classes:
# Compute area fraction for each fuel type (column) in each region (index)
area_fractions = puma_region_groups.apply(
lambda x: pd.Series(
{
f: (
(
x[f"frac_sh_res_{f}_acs"]
* x[f"{c}_area_{const.base_year}_m2"]
).sum()
/ x[f"{c}_area_{const.base_year}_m2"].sum()
)
for f in const.fuel
}
)
)
# Scale per-PUMA values to match target regional values (calculated externally)
uselist = ["sh", "dhw", "other"] if c == "res" else ["sh", "dhw", "cook"]
for u in uselist:
area_fraction_targets = pd.read_csv(
os.path.join(data_dir, f"frac_target_{u}_{c}.csv"),
index_col=0,
)
down_scale = area_fraction_targets / area_fractions
up_scale = (area_fraction_targets - area_fractions) / (1 - area_fractions)
for r in const.regions:
for f in const.fuel:
pre_scaling = puma_region_groups.get_group(r)[
f"frac_sh_res_{f}_acs"
]
if down_scale.loc[r, f] <= 1:
scaled = pre_scaling * down_scale.loc[r, f]
else:
scaled = pre_scaling + up_scale.loc[r, f] * (1 - pre_scaling)
puma_df.loc[pre_scaling.index, f"frac_{f}_{u}_{c}_{year}"] = scaled
# Sum coal, wood, solar and other fractions for frac_com_other
named_sh_com_fuels = {"elec", "fok", "natgas", "othergas"}
named_sh_com_cols = [f"frac_{f}_sh_com_{year}" for f in named_sh_com_fuels]
puma_df[f"frac_other_sh_com_{year}"] = 1 - puma_df[named_sh_com_cols].sum(axis=1)
# Copy residential space heating columns to match new column naming convention
fossil_fuels = {"natgas", "othergas", "fok"}
for c in const.classes:
uselist = ["sh", "dhw", "other"] if c == "res" else ["sh", "dhw", "cook"]
for u in uselist:
fossil_cols = [f"frac_{f}_{u}_{c}_{year}" for f in fossil_fuels]
puma_df[f"frac_ff_{u}_{c}_{year}"] = puma_df[fossil_cols].sum(axis=1)
return puma_df
def puma_timezone_latlong(timezones, pumas):
"""Assign timezone and lat/long to each puma.
:param geopandas.DataFrame timezones: US timezones.
:param geopandas.DataFrame pumas: US pumas.
:return: (*pandas.Series*) -- timezone for every puma.
:return: (*pandas.DataFrame*) -- latitude and longitude for every puma.
"""
puma_timezone = gpd.overlay(pumas, timezones.to_crs("EPSG:4269"))
puma_timezone["area"] = puma_timezone.area
puma_timezone.sort_values("area", ascending=False, inplace=True)
puma_timezone = puma_timezone.drop_duplicates(subset="GEOID10", keep="first")
puma_timezone.sort_values("GEOID10", ascending=True, inplace=True)
puma_lat_long = pd.DataFrame(
{
"puma": "puma_" + pumas["GEOID10"],
"latitude": [float(pumas["INTPTLAT10"][i]) for i in range(len(pumas))],
"longitude": [float(pumas["INTPTLON10"][i]) for i in range(len(pumas))],
}
)
puma_lat_long = puma_lat_long.set_index("puma")
return puma_timezone["tzid"], puma_lat_long
if __name__ == "__main__":
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
# Load ACS fuel data
puma_fuel = pd.read_csv(os.path.join(data_dir, "puma_fuel.csv"), index_col="puma")
# Load tract_puma_mapping
tract_puma_mapping = pd.read_csv(
os.path.join(data_dir, "tract_puma_mapping.csv"), index_col="tract"
)
# Load tract-level data for General Building Stock area for residential, commercial and industral classes
tract_gbs_area = pd.read_csv(
os.path.join(data_dir, "tract_gbs_area.csv"), index_col="tract"
)
# Load tract-level data for heating and cooling degree day normals
tract_degday_normals = pd.read_csv(
os.path.join(data_dir, "tract_degday_normals.csv"), index_col="tract"
)
# Load tract-level data for population
tract_pop = pd.read_csv(os.path.join(data_dir, "tract_pop.csv"), index_col="tract")
puma_data_unscaled = aggregate_puma_df(
puma_fuel["state"],
tract_puma_mapping,
tract_gbs_area,
tract_degday_normals,
tract_pop,
)
puma_data = scale_fuel_fractions(puma_fuel, puma_data_unscaled)
# Add time zone information
puma_timezones = pd.read_csv(
os.path.join(data_dir, "puma_timezone.csv"), index_col="puma"
)
puma_data["timezone"] = puma_timezones["timezone"]
# Add latitude and longitude information
puma_lat_long = pd.read_csv(
os.path.join(data_dir, "puma_lat_long.csv"), index_col="puma"
)
puma_data["latitude"], puma_data["longitude"] = (
puma_lat_long["latitude"],
puma_lat_long["longitude"],
)
# Add residential AC penetration
acpen_b = 0.00117796
acpen_n = 1.1243
puma_data["AC_penetration"] = 1 - np.exp(
-acpen_b * puma_data["cdd65_normals"] ** acpen_n
)
puma_data.to_csv(os.path.join(data_dir, "puma_data.csv"))
|
StarcoderdataPython
|
3235065
|
<reponame>JCarlos831/python_getting_started_-pluralsight-<filename>module_3_types_statements_and_other_goodies/while_loops.py
x = 0
while x < 10:
print("Count is {0}".format(x))
x += 1
# Infinite Loop
# num = 10
# while True:
# if num == 42:
# break
# print("Hello World")
|
StarcoderdataPython
|
3270548
|
#!/opt/anaconda3/bin/python
# What interpretor
'''
#-------------------------------------------------------------------------------
'''
print(" ")
print("-------------------------------------------------------------------")
print(" Lets have a go ")
print("-------------------------------------------------------------------")
print(" ")
# I love a good print statment
'''
#-------------------------------------------------------------------------------
'''
#rmc6f_input = open(raw_input("Specify a file: ") + ".rmc6f", "r")
rmc6f_input = open("1_Test.rmc6f", "r")
# Load the file!
'''
#-------------------------------------------------------------------------------
'''
line_density = []
line_supercell = []
line_cell =[]
line_atom_list = []
for i in rmc6f_input:
# header
if i.find("Number density") >= 0:
line_density = i
if i.find("Supercell") >= 0:
line_supercell = i
if i.find("Cell") >= 0:
line_cell = i
# atom lines
if i.find("[1]") >= 0:
line_atom_list.append(i)
rmc6f_input.close()
# extract the useful header info and make a list of the atom lines
'''
#-------------------------------------------------------------------------------
'''
import re
temp = re.findall('\d+\.\d+', line_density)
density = [float(i) for i in temp]
temp = re.findall('[-+]?\d*\.\d+|\d+', line_supercell)
supercell = [int(i) for i in temp]
temp = re.findall('[-+]?\d*\.\d+|\d+', line_cell)
cell = [float(i) for i in temp]
#print(line_density)
print(" Density:")
print(density)
print("\n Supercell:")
#print(line_supercell)
print(supercell)
print("\n Cell Parameters:")
#print(line_cell)
print(cell)
# Deal with the header
'''
#-------------------------------------------------------------------------------
'''
atom_list = []
for line in line_atom_list:
temp = []
temp_1 = re.findall(r'\b\d+\b', line) # ints
temp_2 = re.findall('[a-zA-Z]+', line) # letters
temp_3 = re.findall('\d+\.\d+', line) # floats
temp.append(temp_2[0])
temp.append(int(temp_1[0]))
temp.append(float(temp_3[0]))
temp.append(float(temp_3[1]))
temp.append(float(temp_3[2]))
atom_list.append(temp)
print("\n The Format of the List-of-Lists: ")
print(atom_list[0])
#print(line_atom_list[0])
# sort the atom lines and create a list of lists where each list
# is the element, atom number, and the xyx coordintates
'''
#-------------------------------------------------------------------------------
'''
print(" ")
print("-------------------------------------------------------------------")
print(" Well go on then ")
print("-------------------------------------------------------------------")
print(" ")
|
StarcoderdataPython
|
1702324
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-07 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('busshaming', '0018_realtimeprogress'),
]
operations = [
migrations.AlterField(
model_name='realtimeprogress',
name='in_progress',
field=models.DateTimeField(null=True),
),
]
|
StarcoderdataPython
|
3353347
|
<reponame>iross/stromatolites_demo<filename>udf/ext_strat_target_distant.py
#==============================================================================
#DEFINE RELATIONSHIP BETWEEN TARGET ENTITIES AND DISTANT STRATIGRAPHIC PHRASES
#==============================================================================
# ACQUIRE RELEVANT MODULES and DATA
#==============================================================================
import time, random, psycopg2, yaml
from psycopg2.extensions import AsIs
#tic
start_time = time.time()
# Connect to Postgres
with open('./credentials', 'r') as credential_yaml:
credentials = yaml.load(credential_yaml)
with open('./config', 'r') as config_yaml:
config = yaml.load(config_yaml)
# Connect to Postgres
connection = psycopg2.connect(
dbname=credentials['postgres']['database'],
user=credentials['postgres']['user'],
host=credentials['postgres']['host'],
port=credentials['postgres']['port'])
cursor = connection.cursor()
doc_cursor=connection.cursor()
target_cursor=connection.cursor()
strat_cursor = connection.cursor()
sent_cursor = connection.cursor()
#some sort of magic
connection.set_isolation_level(0)
cursor.execute(""" VACUUM ANALYZE target_instances;
""")
connection.commit()
#some sort of magic
connection.set_isolation_level(0)
cursor.execute(""" VACUUM ANALYZE strat_phrases;
""")
connection.commit()
#some sort of magic
connection.set_isolation_level(0)
cursor.execute(""" VACUUM ANALYZE %(my_app)s_sentences_%(my_product)s;
""", {
"my_app": AsIs(config['app_name']),
"my_product": AsIs(config['product'].lower())
})
connection.commit()
#==============================================================================
# FIND STRATIGRAPHIC PHRASES NEAREST TO ORPHAN TARGET INSTANCES
#==============================================================================
#how many sentences back from orphan to look for stratigraphic phrases
strat_distance=3
#initialize the dump variable
strat_target_distant=[]
#list of docids with orphaned targets
doc_cursor.execute("""
SELECT DISTINCT ON (target_instances.docid)
target_instances.docid
FROM target_instances, %(my_app)s_sentences_%(my_product)s
WHERE target_instances.target_id
NOT IN (select strat_target.target_id from strat_target)
AND num_strat_doc<>0
AND target_instances.docid=%(my_app)s_sentences_%(my_product)s.docid
AND target_instances.sentid=%(my_app)s_sentences_%(my_product)s.sentid
ORDER BY target_instances.docid ASC, target_instances.sentid ASC
""", {
"my_app": AsIs(config['app_name']),
"my_product": AsIs(config['product'].lower())
})
#initalize the strat_target_distant relationship table
cursor.execute("""
DELETE FROM strat_target_distant;
""")
connection.commit()
#loop through document list
for idx,doc in enumerate(doc_cursor):
#orphaned targets from a given document
target_cursor.execute("""
SELECT DISTINCT ON (target_instances.docid,
target_instances.sentid,
target_instances.target_word_idx)
target_instances.docid,
target_instances.sentid,
target_word,
target_word_idx,
target_parent,
target_children,
%(my_app)s_sentences_%(my_product)s.words,
target_id
FROM target_instances, %(my_app)s_sentences_%(my_product)s
WHERE target_instances.target_id
NOT IN (select strat_target.target_id from strat_target)
AND target_instances.docid=%(my_docid)s
AND target_instances.docid=%(my_app)s_sentences_%(my_product)s.docid
AND target_instances.sentid=%(my_app)s_sentences_%(my_product)s.sentid
ORDER BY target_instances.docid ASC, target_instances.sentid ASC
""", {
"my_app": AsIs(config['app_name']),
"my_product": AsIs(config['product'].lower()),
"my_docid": doc[0]
})
#convert list of tuples to list of lists
tmp_target=target_cursor.fetchall()
tmp_target = [list(elem) for elem in tmp_target]
#define the sentences where those instances come from
sentids = [item[1] for item in tmp_target]
#gather all stratigraphic phrases from docid that occur before the deepest orphan
sent_query = max(sentids)
#strat_phrases from document that precede the orphan deepest into the document
strat_cursor.execute("""
SELECT DISTINCT ON (docid, sentid, strat_phrase_root,strat_name_id)
docid, sentid, strat_phrase_root, strat_flag, num_phrase, strat_name_id,int_name,age_agree from strat_phrases
WHERE docid=%s
AND sentid<%s
ORDER BY sentid ASC;""",
(doc[0], sent_query)
)
#convert list of tuples to list of lists
tmp_strat=strat_cursor.fetchall()
tmp_strat = [list(elem) for elem in tmp_strat]
#loop through the list of orphans
for idx2,target in enumerate(tmp_target):
#define set of variables from this particular orphan
target_sent=target[1]
target_word=target[2]
parent = target[4]
children = list(sum(eval(target[5]), []))
words = target[6]
target_id=target[7]
#find all stratigraphic phrases that occur before this orphan and within the defined buffer
strat_find = [item[1] for item in tmp_strat if target_sent-item[1]<=strat_distance and target_sent-item[1]>0]
#if candidate strat_phrase(s) are found
if strat_find:
#selet the closest sentence with phrase(s)
strat_find=max(strat_find)
#collect all the strat_phrase(s) in that sentence
strat_info = [item for item in tmp_strat if item[1]==strat_find]
#define the sentids for sentences that bridge the strat_phrase(s) to the orphan
sent_inbetween=range(strat_find,target[1]+1)
#collect the words between strat_phrases and orphaned target
sent_cursor.execute("""
SELECT docid, sentid, words from %(my_app)s_sentences_%(my_product)s
WHERE docid=%(my_docid)s
AND sentid=ANY(%(my_sentid)s)
ORDER BY sentid ASC;""",
{
"my_app": AsIs(config['app_name']),
"my_product": AsIs(config['product'].lower()),
"my_docid": doc[0],
"my_sentid": sent_inbetween
}
)
#convert list of tuples to list of lists
words_between=sent_cursor.fetchall()
words_between = [list(elem) for elem in words_between]
words_between = [' '.join(item[2]) for item in words_between]
words_between = ''.join(words_between)
#define the distance between orphan and strat_phrase(s) sentence
target_distance = target[1]-strat_find
#define grammatical parent and children (as words) of the orphan
parent = [words[i] for i in parent]
children = [words[i] for i in children]
#loop through all the strat_phrases found in the nearest host sentence
for match in strat_info:
#info about the strat_phrase
[docid, sentid, strat_phrase_root,
strat_flag, num_phrase, strat_name_id,
int_name, age_agree] = match
toadd=[docid, sentid, strat_phrase_root,
strat_flag, num_phrase, strat_name_id,
int_name, age_agree, target_distance,
target_id,target_word,parent,children,
words_between]
#dump to local variable
strat_target_distant.append(toadd)
#write to psql table
cursor.execute("""
INSERT INTO strat_target_distant( docid,
sentid,
strat_phrase_root,
strat_flag,
num_phrase,
strat_name_id,
int_name,
age_agree,
target_sent_dist,
target_id,
target_word,
target_parent,
target_children,
words_between)
VALUES (%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s);""",
(docid, sentid, strat_phrase_root,
strat_flag, num_phrase, strat_name_id,
int_name, age_agree, target_distance,
target_id,target_word,parent,children,
words_between)
)
#push the insertions
connection.commit()
#==============================================================================
# PROVIDE SUMMARIES FOR AGE-AGREEMENT BETWEEN STRAT_PHRASE AND MACROSTRAT STRAT_NAME
#==============================================================================
#initialize the age_agree column in strat_phrases
cursor.execute("""
UPDATE strat_target_distant
SET age_sum = '-';
""")
connection.commit()
#gather distinct Macrostrat links
cursor.execute("""
SELECT DISTINCT (strat_name_id) FROM strat_target_distant;
""")
#convert list of tuples to list of lists
tocheck=cursor.fetchall()
tocheck = [list(elem) for elem in tocheck]
#find all instances of strat_name_id occuring in the age_check table
cursor.execute("""
WITH query AS(SELECT DISTINCT (strat_name_id) FROM strat_target_distant)
SELECT strat_phrases.strat_name_id, strat_phrases.age_agree FROM strat_phrases,query
WHERE strat_phrases.strat_name_id=query.strat_name_id
AND strat_phrases.age_agree<>'-';
""",
)
#convert list of tuples to list of lists
results=cursor.fetchall()
results = [list(elem) for elem in results]
#loop through all strat_name_ids and summarize age agreement discoveries
for idx,name in enumerate(tocheck):
tmp = [i for i in results if i[0]==name[0]]
ids = name[0].split('~')
#initialize the age agreement list
counts = [[0] * 2 for i in range(len(ids))]
#loop through all comparisons between a strat_name_id string and interval information
for idx2,item in enumerate(tmp):
#consider each strat_name in the strat_name_string
ans = item[1].split('~')
#record whether its an allowable or disallowable match
for idx3,data in enumerate(ans):
if data=='yes':
counts[idx3][0]+=1
elif data=='no':
counts[idx3][1]+=1
#record the age agreement summary
tocheck[idx].extend([counts])
#variables to push to PSQL database
strat_name_id=name[0]
str_counts=str(counts)
#write to PSQL table
cursor.execute("""
UPDATE strat_target_distant
SET age_sum = %s
WHERE strat_name_id = %s;""",
(str_counts, strat_name_id)
)
connection.commit()
#summary statistic
success = 'number of strat-distant target tuples : %s' %len(strat_target_distant)
#toc
elapsed_time = time.time() - start_time
print '\n ###########\n\n %s \n elapsed time: %d seconds\n\n ###########\n\n' %(success,elapsed_time)
#show a random result
r=random.randint(0,len(strat_target_distant)-1); show = "\n".join(str(x) for x in strat_target_distant[r]); print "=========================\n" + show + "\n========================="
#close the postgres connection
connection.close()
|
StarcoderdataPython
|
67362
|
# Slate Macro Keypad
#
# UCF Senior Design Project - Group 8
# Summer - Fall '21
#
"""
This version runs on Feather nRF52840 Express with a 3.5" FeatherWing
"""
import time
import displayio
import terminalio
from adafruit_display_text import bitmap_label
from adafruit_displayio_layout.layouts.grid_layout import GridLayout
from adafruit_displayio_layout.widgets.icon_widget import IconWidget
from adafruit_featherwing import tft_featherwing_35
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
from adafruit_hid.consumer_control import ConsumerControl
from layers import (
slate_config,
KEY,
STRING,
MEDIA,
KEY_PRESS,
KEY_RELEASE,
CHANGE_LAYER,
)
# seems to help the touchscreen not get stuck with chip not found
time.sleep(2)
# display and touchscreen initialization
displayio.release_displays()
tft_featherwing = tft_featherwing_35.TFTFeatherWing35()
display = tft_featherwing.display
touchscreen = tft_featherwing.touchscreen
# HID setup
kbd = Keyboard(usb_hid.devices)
cc = ConsumerControl(usb_hid.devices)
kbd_layout = KeyboardLayoutUS(kbd)
# variables to enforce timout between icon presses
COOLDOWN_TIME = 0.5
LAST_PRESS_TIME = -1
# 'mock' icon indexes for the layer buttons
# used for debouncing
PREV_LAYER_INDEX = -1
NEXT_LAYER_INDEX = -2
HOME_LAYER_INDEX = -3
# start on first layer
current_layer = 0
# Make the main_group to hold everything
main_group = displayio.Group()
display.show(main_group)
# loading screen
loading_group = displayio.Group()
# black background, screen size minus side buttons
loading_background = displayio.Bitmap(
(display.width - 40) // 20, display.height // 20, 1
)
loading_palette = displayio.Palette(1)
loading_palette[0] = 0x0
# scaled group to match screen size minus side buttons
loading_background_scale_group = displayio.Group(scale=20)
loading_background_tilegrid = displayio.TileGrid(
loading_background, pixel_shader=loading_palette
)
loading_background_scale_group.append(loading_background_tilegrid)
# loading screen label
loading_label = bitmap_label.Label(terminalio.FONT, text="Loading...", scale=3)
loading_label.anchor_point = (0.5, 0.5)
loading_label.anchored_position = (display.width // 2, display.height // 2)
# append background and label to the group
loading_group.append(loading_background_scale_group)
loading_group.append(loading_label)
# GridLayout to hold the icons
# size and location can be adjusted to fit
# different sized screens.
layout = GridLayout(
x=20,
y=20,
width=420,
height=290,
grid_size=(4, 3),
cell_padding=6,
)
# list that holds the IconWidget objects for each icon.
_icons = []
# list that holds indexes of currently pressed icons and layer buttons
# used for debouncing
_pressed_icons = []
# layer label at the top of the screen
layer_label = bitmap_label.Label(terminalio.FONT)
layer_label.anchor_point = (0.5, 0.0)
layer_label.anchored_position = (display.width // 2, 4)
main_group.append(layer_label)
# right side layer buttons
next_layer_btn = IconWidget("", "icons/layer_next.bmp", on_disk=True)
next_layer_btn.x = display.width - 40
next_layer_btn.y = display.height - 100
next_layer_btn.resize = (40, 100)
main_group.append(next_layer_btn)
prev_layer_btn = IconWidget("", "icons/layer_prev.bmp", on_disk=True)
prev_layer_btn.x = display.width - 40
prev_layer_btn.y = 110
prev_layer_btn.resize = (40, 100)
main_group.append(prev_layer_btn)
home_layer_btn = IconWidget("", "icons/layer_home.bmp", on_disk=True)
home_layer_btn.x = display.width - 40
home_layer_btn.y = 0
home_layer_btn.resize = (40, 100)
main_group.append(home_layer_btn)
# helper method to load icons for an index by its index in the
# list of layers
def load_layer(layer_index):
# show the loading screen
main_group.append(loading_group)
time.sleep(0.05)
# resets icon lists to empty
global _icons
_icons = []
layout._cell_content_list = []
# remove previous layer icons from the layout
while len(layout) > 0:
layout.pop()
# set the layer labeled at the top of the screen
layer_label.text = slate_config["layers"][layer_index]["name"]
# loop over each shortcut and it's index
for i, shortcut in enumerate(slate_config["layers"][layer_index]["shortcuts"]):
# create an icon for the current shortcut
_new_icon = IconWidget(shortcut["label"], shortcut["icon"], on_disk=True)
# add it to the list of icons
_icons.append(_new_icon)
# add it to the grid layout
# calculate it's position from the index
layout.add_content(_new_icon, grid_position=(i % 4, i // 4), cell_size=(1, 1))
# hide the loading screen
time.sleep(0.05)
main_group.pop()
# append the grid layout to the main_group
# so it gets shown on the display
main_group.append(layout)
# load the first layer to start
load_layer(current_layer)
# main loop
while True:
if touchscreen.touched:
# loop over all data in touchscreen buffer
while not touchscreen.buffer_empty:
touches = touchscreen.touches
# loop over all points touched
for point in touches:
if point:
# current time, used for timeout between icon presses
_now = time.monotonic()
# if the timeout has passed
if _now - LAST_PRESS_TIME > COOLDOWN_TIME:
# print(point)
# map the observed minimum and maximum touch values
# to the screen size
y = point["y"] - 250
x = 4096 - point["x"] - 250
y = y * display.width // (3820 - 250)
x = x * display.height // (3820 - 250)
# touch data is 90 degrees rotated
# flip x, and y here to account for that
p = (y, x)
# print(p)
# Next layer button pressed
if (
next_layer_btn.contains(p)
and NEXT_LAYER_INDEX not in _pressed_icons
):
# increment layer
current_layer += 1
# wrap back to beginning from end
if current_layer >= len(slate_config["layers"]):
current_layer = 0
# load the new layer
load_layer(current_layer)
# save current time to check for timeout
LAST_PRESS_TIME = _now
# append this index to pressed icons for debouncing
_pressed_icons.append(NEXT_LAYER_INDEX)
# home layer button pressed
if (
home_layer_btn.contains(p)
and HOME_LAYER_INDEX not in _pressed_icons
):
# 0 index is home layer
current_layer = 0
# load the home layer
load_layer(current_layer)
# save current time to check for timeout
LAST_PRESS_TIME = _now
# append this index to pressed icons for debouncing
_pressed_icons.append(HOME_LAYER_INDEX)
# Previous layer button pressed
if (
prev_layer_btn.contains(p)
and PREV_LAYER_INDEX not in _pressed_icons
):
# decrement layer
current_layer -= 1
# wrap back to end from beginning
if current_layer < 0:
current_layer = len(slate_config["layers"]) - 1
# load the new layer
load_layer(current_layer)
# save current time to check for timeout
LAST_PRESS_TIME = _now
# append this index to pressed icons for debouncing
_pressed_icons.append(PREV_LAYER_INDEX)
# loop over current layer icons and their indexes
for index, icon_shortcut in enumerate(_icons):
# if this icon was pressed
if icon_shortcut.contains(p):
# debounce logic, check that it wasn't already pressed
if index not in _pressed_icons:
# print("pressed {}".format(index))
# get actions for this icon from config object
_cur_actions = slate_config["layers"][
current_layer
]["shortcuts"][index]["actions"]
# tuple means it's a single action
if isinstance(_cur_actions, tuple):
# put it in a list by itself
_cur_actions = [_cur_actions]
# loop over the actions
for _action in _cur_actions:
# HID keyboard keys
if _action[0] == KEY:
kbd.press(*_action[1])
kbd.release(*_action[1])
# String to write from layout
elif _action[0] == STRING:
kbd_layout.write(_action[1])
# Consumer control code
elif _action[0] == MEDIA:
cc.send(_action[1])
# Key press
elif _action[0] == KEY_PRESS:
kbd.press(*_action[1])
# Key release
elif _action[0] == KEY_RELEASE:
kbd.release(*_action[1])
# Change Layer
elif _action[0] == CHANGE_LAYER:
if isinstance(
_action[1], int
) and 0 <= _action[1] < len(
slate_config["layers"]
):
current_layer = _action[1]
load_layer(_action[1])
# if there are multiple actions
if len(_cur_actions) > 1:
# small sleep to make sure
# OS can respond to previous action
time.sleep(0.2)
# save current time to check for timeout
LAST_PRESS_TIME = _now
# append this index to pressed icons for debouncing
_pressed_icons.append(index)
else: # screen not touched
# empty the pressed icons list
_pressed_icons.clear()
|
StarcoderdataPython
|
1675742
|
#coding:utf-8
import caffe
from caffe import layers as L, params as P
def lenet(lmdb, batch_size):
# our version of LeNet: a series of linear and simple nonlinear transformations
n = caffe.NetSpec() # 见详解目录-1
n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
transform_param=dict(scale=1./255), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.score, n.label)
return n.to_proto() #写入到prototxt文件
# with open('D:/Develop/DL/sources/caffe/examples/mnist/lenet_auto_train.prototxt', 'w') as f:
# f.write(str(lenet('temp/mnist/mnist_data/mnist_train_lmdb', 64)))
# with open('D:/Develop/DL/sources/caffe/examples/mnist/lenet_auto_test.prototxt', 'w') as f:
# f.write(str(lenet('temp/mnist/mnist_data/mnist_test_lmdb', 100)))
caffe.set_device(0) #选择默认gpu
caffe.set_mode_gpu() #使用gpu
### load the solver and create train and test nets
solver = None # ignore this workaround for lmdb data (can't instantiate two solvers on the same data)
solver = caffe.SGDSolver('temp/mnist/lenet_auto_solver.prototxt')
result = [(k, v.data.shape) for k, v in solver.net.blobs.items()]
print(result)
print([(k, v[0].data.shape) for k, v in solver.net.params.items()])
|
StarcoderdataPython
|
1775636
|
<filename>mouseclick_opencv_channelbgr.py<gh_stars>0
import numpy as np
import cv2
def click_event(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
blue = img[x,y,0]
green =img[x,y,1]
red = img[x,y,2]
cv2.circle(img,(x,y),3,(0,255,255),-1)
mycolorimage = np.zeros([512,512,3],np.uint8)
mycolorimage[:] = [blue,green,red]
cv2.imshow('color',mycolorimage)
img = cv2.imread("lena.jpg")
cv2.imshow("image",img)
cv2.setMouseCallback("image",click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
StarcoderdataPython
|
3335212
|
<filename>tests/exceptions/test_validation_error.py
from flake8_aaa.checker import Checker
from flake8_aaa.exceptions import ValidationError
def test():
result = ValidationError(
line_number=99,
offset=777,
text='__MESSAGE__',
)
assert result.to_flake8(Checker) == (99, 777, '__MESSAGE__', Checker)
|
StarcoderdataPython
|
3300193
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
import binascii
import struct
import re
import requests
from requests.auth import HTTPDigestAuth
import logging
from datetime import datetime
__version__ = '0.2.1'
AUDIOTEKA_API_URL = "https://proxy3.audioteka.com/pl/MobileService.svc/"
AUDIOTEKA_API_VERSION = "2.3.15"
DEFAULT_HEADERS = {"User-agent": "Android/" + AUDIOTEKA_API_VERSION}
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def get_categories(
category_name, page=1, per_page_count=100, samples=False, session=None, headers=None
):
"""
gets Categories
:param category_name:
:param page:
:param per_page_count:
:param samples:
:param session:
:param headers:
:return:
"""
return _post(
"categories",
{},
session,
{
"categoryName": category_name,
"page": page,
"samples": samples,
"count": per_page_count,
},
headers,
).json()
def login(user_login, user_password, session=None, headers=None):
"""
signing in into Audioteka.
:param user_login:
:param user_password:
:param session:
:param headers:
:return: credentials Dict with login data,token and hashed password
{
"userLogin": "yyyyyyyyyyyyyyyyy",
"userPassword": "<PASSWORD>",
"HashedPassword": "<PASSWORD>",
"AuthenticationToken": "<PASSWORD>",
"Salt": "3666666666",
"Status": "LoginStatusOk"
}
"""
headers = headers if headers else DEFAULT_HEADERS
headers["XMobileAudiotekaVersion"] = AUDIOTEKA_API_VERSION
credentials = {"userLogin": user_login, "userPassword": <PASSWORD>}
r = _post("login", credentials, session, {}, headers)
logged_in_data = r.json()
if logged_in_data["Status"] == "LoginStatusErr":
_set_response_login_failed(r)
r.raise_for_status()
logged_in_data["HashedPassword"] = _get_hashed_password(
credentials["userPassword"], logged_in_data["Salt"]
)
logged_in_data["userLogin"] = credentials["userLogin"]
return logged_in_data
def get_shelf(credentials, session=None, headers=None):
"""
gets personal shelf content
:param credentials:
:param session:
:param headers:
:return:
"""
return _post(
"get_shelf", credentials, session, {"onlyPaid": "false"}, headers
).json()
def get_shelf_item(product_id, credentials, session=None, headers=None):
"""
gets one book details
:param product_id:
:param credentials:
:param session:
:param headers:
:return:
"""
return _post(
"shelf_item", credentials, session, {"productId": product_id}, headers
).json()
def get_chapters(
tracking_number, line_item_id, credentials, session=None, headers=None
):
"""
get list of chapters from book
:param tracking_number:
:param line_item_id:
:param credentials:
:param session:
:param headers:
:return:
"""
return _post(
"get_chapters",
credentials,
session,
{"lineItemId": line_item_id, "trackingNumber": tracking_number},
headers,
).json()
def get_chapter_file(
tracking_number,
line_item_id,
download_server_url,
download_server_footer,
file_name,
credentials,
stream=False,
session=None,
headers=None,
):
"""
gets chapter file.
:param tracking_number:
:param line_item_id:
:param download_server_url:
:param download_server_footer:
:param file_name:
:param credentials:
:param stream: Default: False. If True, returns stream (chunks)
:param session:
:param headers:
:return: Requests response
"""
s = session if session else requests.session()
if not headers:
headers = DEFAULT_HEADERS
headers["XMobileAudiotekaVersion"] = AUDIOTEKA_API_VERSION
headers["XMobileAppVersion"] = DEFAULT_HEADERS["User-agent"]
headers["Range"] = "bytes=0-"
url = (
download_server_url
+ "?TrackingNumber={0}&LineItemId={1}&FileName={2}&".format(
tracking_number, line_item_id, file_name
)
+ download_server_footer
)
r = s.get(
url,
auth=HTTPDigestAuth(credentials["userLogin"], credentials["HashedPassword"]),
headers=headers,
stream=stream
)
return r
def epoch_to_datetime(aud_dt):
"""
converts datetime in format: /Date(1545693401480+0100)/ into Datetime
:param aud_dt:
:return:
"""
result = re.search(r"Date\((.*)\+(.*)\)", aud_dt)
epoch_utc = result.group(1)
local_tz_offset = result.group(2)
try:
return datetime.utcfromtimestamp(
float(epoch_utc) if len(epoch_utc) < 11 else float(epoch_utc) / 1000
)
except (TypeError, ValueError) as e:
logger.error(str(e) + " Input epoch_utc: " + str(epoch_utc))
def _get_hashed_password(user_password, salt):
"""
calculates hashed password
Salt can be get calling `login`
:param user_password:
:param salt:
:return:
"""
salt_bytes = struct.pack(">I", int(salt))
password_encoded = user_password.encode("utf-16le")
hash_bytes = hashlib.sha256(salt_bytes + password_encoded).digest()
hashed_password = binascii.hexlify(salt_bytes + hash_bytes).upper()
return bytes(hashed_password).decode()
def _post(endpoint, credentials, session=None, data=None, headers=None):
d, h = _merge_into_data_and_headers(
credentials, data, headers if headers else DEFAULT_HEADERS
)
s = session if session else requests.session()
#
r = s.post(AUDIOTEKA_API_URL + endpoint, data=d, headers=h)
j = r.json()
if j == "login_failed":
_set_response_login_failed(r)
elif j == "item_not_found":
_set_response_item_not_found(r)
r.raise_for_status()
return r
def _merge_into_data_and_headers(credentials, data, headers):
if not credentials:
return data, headers
ret_data = dict()
ret_headers = dict()
ret_data["userLogin"] = credentials["userLogin"]
if "userPassword" in credentials:
ret_data["userPassword"] = credentials["userPassword"]
else:
ret_headers["XMobileAudiotekaVersion"] = AUDIOTEKA_API_VERSION
ret_headers["XMobileTokenAuthentication"] = credentials["AuthenticationToken"]
ret_headers["XMobileUserLogin"] = credentials["userLogin"]
return _merge_dicts(data, ret_data), _merge_dicts(ret_headers, headers)
def _merge_dicts(*dict_args):
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def _set_response_login_failed(r):
r.status_code = 401
r.reason = "Login failed"
def _set_response_item_not_found(r):
r.status_code = 404
r.reason = "Item not found"
|
StarcoderdataPython
|
107677
|
<filename>apps/slack.py
from talon.voice import Context, Key
from ..utils import text, insert, parse_words, join_words
ctx = Context("slack", bundle="com.tinyspeck.slackmacgap")
emoji_map = {
"thumbs up": ":+1:",
"okay": ":ok_hand:",
"check": ":heavy_check_mark:",
"crossed fingers": ":crossed_fingers:",
"fingers": ":crossed_fingers:",
"pray": ":pray:",
"shrug": r"¯\_(ツ)_/¯",
"tada": ":tada:",
"~": ":tada:",
"banhammer": ":banhammer:",
"fancy banhammer": ":banhammer_fancy:"
}
def emoji_picker(m):
try:
return insert(emoji_map[join_words(parse_words(m))])
except KeyError:
return insert("You asked for '{}', but it's not in the map.")
keymap = {
# Channel
"channel": Key("cmd-k"),
"channel <dgndictation>": [Key("cmd-k"), text],
"([channel] unread last | gopreev)": Key("alt-shift-up"),
"([channel] unread next | goneck)": Key("alt-shift-down"),
"(slack | lack) [channel] info": Key("cmd-shift-i"),
"channel up": Key("alt-up"),
"channel down": Key("alt-down"),
# Navigation
"(move | next) focus": Key("ctrl-`"),
"[next] (section | zone)": Key("f6"),
"(previous | last) (section | zone)": Key("shift-f6"),
"(slack | lack) [direct] messages": Key("cmd-shift-k"),
"(slack | lack) threads": Key("cmd-shift-t"),
"(slack | lack) (history [next] | back | backward)": Key("cmd-["),
"(slack | lack) forward": Key("cmd-]"),
"[next] (element | bit)": Key("tab"),
"(previous | last) (element | bit)": Key("shift-tab"),
"(slack | lack) (my stuff | activity)": Key("cmd-shift-m"),
"(slack | lack) directory": Key("cmd-shift-e"),
"(slack | lack) (starred [items] | stars)": Key("cmd-shift-s"),
"(slack | lack) unread [messages]": Key("cmd-j"),
"(go | undo | toggle) full": Key("ctrl-cmd-f"),
"(slack | lack) (find | search)": Key("cmd-f"),
# Messaging
"grab left": Key("shift-up"),
"grab right": Key("shift-down"),
"add line": Key("shift-enter"),
"(slack | lack) (slap | slaw | slapper)": [Key("cmd-right"), Key("shift-enter")],
"(slack | lack) (react | reaction)": Key("cmd-shift-\\"),
"(insert command | commandify)": Key("cmd-shift-c"),
"insert code": [
"``````",
Key("left left left"),
Key("shift-enter"),
Key("shift-enter"),
Key("up"),
],
"(slack | lack) (bull | bullet | bulleted) [list]": Key("cmd-shift-8"),
"(slack | lack) (number | numbered) [list]": Key("cmd-shift-7"),
"(slack | lack) (quotes | quotation)": Key("cmd-shift->"),
"bold": Key("cmd-b"),
"(italic | italicize)": Key("cmd-i"),
"(strike | strikethrough)": Key("cmd-shift-x"),
"mark all read": Key("shift-esc"),
"mark channel read": Key("esc"),
"(clear | scrap | scratch)": Key("cmd-a backspace"),
# Files and Snippets
"(slack | lack) upload": Key("cmd-u"),
"(slack | lack) snippet": Key("cmd-shift-enter"),
# Calls
"([toggle] mute | unmute)": Key("m"),
"(slack | lack) ([toggle] video)": Key("v"),
"(slack | lack) invite": Key("a"),
# Miscellaneous
"(slack | lack) shortcuts": Key("cmd-/"),
"send": Key("enter"),
"emo <dgndictation>": emoji_picker
}
ctx.keymap(keymap)
|
StarcoderdataPython
|
72320
|
<gh_stars>0
#!/usr/bin/env python
"""
Unit test/basic Daemon-Python implementation
"""
import sys
import time
from daemon import Daemon
class TestDaemon(Daemon):
def run(self): #Define what tasks/processes to daemonize
while True:
time.sleep(1)
if __name__ == "__main__":
daemon = TestDaemon('/tmp/daemon-py-test.pid') #Define a pidfile location (typically located in /tmp or /var/run)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'status' == sys.argv[1]:
daemon.status()
else:
sys.stdout.write("Unknown command\n")
sys.exit(2)
sys.exit(0)
else:
sys.stdout.write("Usage: %s start|stop|restart|status\n" % sys.argv[0])
sys.exit(2)
|
StarcoderdataPython
|
1748454
|
<filename>ci/setup.py
"""Instructions and steps to get integration tests up and running
This script will set up the tokens and parameters for easy transfer of the integration data.
Requirements:
The IBL Globus login credentials
A Globus endpoint set up for downloading the integration data
ibllib and iblscripts repositories
"""
from pathlib import Path
from ibllib.io import params
import oneibl.params
DEFAULT_PAR = {'local_endpoint': None, 'remote_endpoint': None, 'GLOBUS_CLIENT_ID': None}
print(
"""Setting up Globus
1. Login to the Globus Website (ask devs for the login credentials)
2. Go to Endpoints and create a new endpoint for the local device (the one that will run this
script).
3. In the new endpoint's overview page, copy the 'Endpoint UUID' field. This is the LOCAL_REPO_ID.
4. Go to the 'IBL Top Level' endpoint overview page and copy the 'Endpoint UUID' field. This is
the REMOTE_REPO_ID.
5. Copy your GLOBUS_CLIENT_ID (ask the software devs for this).
"""
)
params_id = 'globus/admin'
pars = params.read(params_id, DEFAULT_PAR)
default = pars.local_endpoint
local_endpoint = input(
f'Enter your LOCAL_REPO_ID ({default}):'
)
pars = pars.set('local_endpoint', local_endpoint.strip() or default)
params.write(params_id, pars)
default = pars.remote_endpoint
remote_endpoint = input(
f'Enter your REMOTE_REPO_ID ({default}):'
)
pars = pars.set('remote_endpoint', remote_endpoint.strip() or default)
params.write(params_id, pars)
default = pars.GLOBUS_CLIENT_ID
globus_client_id = input(
f'Enter your GLOBUS_CLIENT_ID ({default}):'
).strip()
pars = pars.set('GLOBUS_CLIENT_ID', globus_client_id or default)
params.write(params_id, pars)
print(
"""Setting up fixtures
You will now need to define a directory to which you will download the integration test data.
""")
params_id = 'ibl_ci'
pars = params.read(params_id, {'data_root': './'})
default = pars.data_root
data_root = input(
f'Enter the desired location of the test data ({default}):'
)
data_root = Path(data_root.strip() or default).absolute()
params.write(params_id, pars.set('data_root', str(data_root)))
print('You may now download the data by running `./download_data.py`')
|
StarcoderdataPython
|
1679555
|
<reponame>Vibrant-Planet/aorist
from . import download_data_from_remote_gcs_location
from . import download_data_from_remote_web_location
from . import download_data_from_remote_pushshift_api_location_to_newline_delimited_json
from . import extract_named_entities_using_spacy
from . import convert_json_to_csv
from . import text_corpus_data_from_hive
from . import text_corpus_data_from_sqlite
from . import train_fasttext_model
from . import upload_data_to_minio
from . import upload_fasttext_to_minio
from . import upload_data_to_sqlite
import pathlib
from aorist import register_recipes
def bash_module(filename):
text = open(filename).read()
assert(text.index('###+') == 0)
b = text[1:].index('###+')
decorator = text[5:b].replace("# ", "")
program = decorator[1:]
entrypoint = text[b + 5:].strip()
tree = ast.parse(program)
assert(len(tree.body[0].value.args) == 2)
constraint = tree.body[0].value.args[1]
tree.body[0].value.args += [ast.Constant(entrypoint)]
tree.body.insert(0, ast.Import(
[ast.alias(name="builtins", asname=None)],
))
tree.body.insert(0, ast.ImportFrom("aorist", [
ast.alias(name="aorist_bash", asname=None),
ast.alias(name=constraint.id, asname=None),
], 0))
tree.body.insert(2, ast.Assign(
[ast.Attribute(
ast.Name(id="builtins", ctx=ast.Load()),
constraint.id,
ast.Store(),
)],
ast.Name(id=constraint.id, ctx=ast.Load()),
))
tree.body.insert(3, ast.Assign(
[ast.Name(id="programs", ctx=ast.Store())],
ast.Dict([],[]),
))
code = astor.to_source(tree)
module_name = filename.replace('.sh', '').split('/')[-1]
module = imp.new_module(module_name)
exec(code, module.__dict__)
return module
path = pathlib.Path(__file__).parent.resolve()
programs = register_recipes(
py_modules=[
download_data_from_remote_pushshift_api_location_to_newline_delimited_json,
download_data_from_remote_gcs_location,
download_data_from_remote_web_location,
upload_data_to_minio,
convert_json_to_csv,
text_corpus_data_from_hive,
text_corpus_data_from_sqlite,
train_fasttext_model,
upload_fasttext_to_minio,
upload_data_to_sqlite,
extract_named_entities_using_spacy,
],
sql_modules=[
"%s/hive_directories_created.presto.sql" % path,
"%s/json_table_schemas_created.presto.sql" % path,
"%s/convert_json_table_to_orc_table.presto.sql" % path,
"%s/orc_table_schemas_created.presto.sql" % path,
],
bash_modules=[
"%s/download_data_from_remote_web_location.sh" % path,
],
r_modules=[
"%s/download_data_from_remote_web_location.R" % path,
],
)
|
StarcoderdataPython
|
43829
|
<gh_stars>0
from multiprocessing.pool import ThreadPool, Pool
from typing import Any, List, Callable, Sequence, TypeVar, Optional, Iterable
from functools import partial
from tqdm import tqdm
T = TypeVar('T')
def apply_map(func: Callable[[T], Any], sequence: Sequence[T],
parallelism: Optional[str], show_progress: bool, tqdm_description: str = None) -> List:
chunksize = max(int(len(sequence) / 1000), 1)
map_func = _choose_map_func(parallelism, chunksize)
map_generator = map_func(func, sequence)
if show_progress:
map_generator = tqdm(map_generator, desc=tqdm_description, total=len(sequence))
objects = list(map_generator)
return objects
def _choose_map_func(parallelism: Optional[str], chunksize: int) -> Callable[[Callable, Iterable], List]:
if parallelism is None:
return map
elif parallelism == "multi_thread":
return partial(ThreadPool().imap, chunksize=chunksize)
elif parallelism == "multi_process":
return partial(Pool().imap, chunksize=chunksize)
else:
raise ValueError("supported 'parallelism' types are [None, 'multi_thread', 'multi_process']")
|
StarcoderdataPython
|
3364351
|
<reponame>lyneca/rainbow-table
from datetime import datetime
def date(bad_date):
if not bad_date: return bad_date
good_date = datetime.strptime(bad_date, "%Y-%m-%d")
return good_date.timestamp()
|
StarcoderdataPython
|
3357712
|
<filename>client.py
#!/usr/bin/python3
import socket
import sys
import select
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip_address = '127.0.0.1'
port = 1234
print('Waiting for connection')
server.connect((ip_address, port))
while True:
sockets_list = [sys.stdin, server]
read_sockets, _ , _ = select.select(sockets_list, [], [])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
print(message.decode('ascii'))
else:
message = sys.stdin.readline()
server.send(message.encode('ascii'))
# else:
# print(socks.recv(2048))
server.close()
|
StarcoderdataPython
|
127935
|
<reponame>karimbahgat/PyA
import pipy
packpath = "pyagg"
pipy.define_upload(packpath,
name="PyAgg",
description="Simple user-oriented graphics drawing and image manipulation.",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
url="http://github.com/karimbahgat/PyAgg",
requires=["PIL", "aggdraw"],
keywords="graphics rendering drawing visualization imaging AGG aggdraw",
classifiers=["License :: OSI Approved",
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
'Intended Audience :: End Users/Desktop',
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Multimedia :: Graphics",
"Topic :: Scientific/Engineering :: GIS"],
changes=["Plenty of new feature additions",
"Drop precompiled aggdraw versions in favor of new maintained version of aggdraw",
"Plenty of bug fixes"],
)
pipy.generate_docs(packpath)
#pipy.upload_test(packpath)
pipy.upload(packpath)
|
StarcoderdataPython
|
170996
|
<reponame>revalo/hush.mit.edu
if __name__ == '__main__':
from confess.models import db
from confess.models.post import Post
from confess.models.vote import Vote
if raw_input('r u sure? ') == 'y':
Vote.query.delete()
Post.query.delete()
db.session.commit()
|
StarcoderdataPython
|
1615351
|
<reponame>oleglite/survival
# -*- coding: utf-8 -*-
SERVER_TICK = 0.05
WORLD_SIZE = (100, 100) # width, height
HUNGER_SPEED = 0.005
ILLNESS_SPEED = 0.05
HEALING_SPEED = 0.02
HUNGER_RESTORED_BY_EATING = 0.1
MAX_FOOD_ON_CELL = 10
MAX_GROW_FOOD_SPEED = 0.1
SEND_USER_PERSPECTIVE_RATE = 1
DATABASE = {
'user': 'postgres',
'database': 'postgres',
'host': '127.0.0.1',
'password': '<PASSWORD>'
}
DEBUG = False
if DEBUG:
SERVER_TICK = 1
HUNGER_SPEED = 0.1
ILLNESS_SPEED = 0.1
HEALING_SPEED = 0.05
MAX_GROW_FOOD_SPEED = 1
# Adjust tick-related settings
HUNGER_SPEED *= SERVER_TICK
ILLNESS_SPEED *= SERVER_TICK
HEALING_SPEED *= SERVER_TICK
MAX_GROW_FOOD_SPEED *= SERVER_TICK
|
StarcoderdataPython
|
178192
|
<filename>python-exercises-for-beginners/042.py
# Refaça o desafio 35 dos triângulos acrescentando o recurso de mostrar que tipo de triângulo será formado:
# Equilátero
# Escaleno
# Isósceles
l1 = float(input('Lado 1: '))
l2 = float(input('Lado 2: '))
l3 = float(input('Lado 3: '))
if l1 < l2 + l3 and l2 < l3 + l1 and l3 < l1 + l2:
print('Essas medidas podem formar um triangulo', end=' ')
if l1 == l2 == l3:
print('EQUILÁTERO')
if l1 != l2 != l3 != l1:
print('ESCALENO')
else:
print('ISÓSCELES')
|
StarcoderdataPython
|
3373058
|
<reponame>Wesselban/CarbonArm<filename>src/importdata_test.py
import unittest
import importdata
class importdatatests(unittest.TestCase):
def test_getLine(self):
self.assertEqual(importdata.splitstringLine("83000000000000006400080700000000000000005500"), "83000000 00000000 6400 08070000 0000 00000000 5500")
def test_getValue(self):
self.assertEqual(importdata.splitstringValue(b"\x03\x00\x00\x00"), 3)
def test_convertCMPtoTXTNoFile(self):
self.assertEqual(importdata.ConvertCMPtoTXT(""), False)
def test_convertCMPtoTXTNoCMPFile(self):
self.assertEqual(importdata.ConvertCMPtoTXT("test.txt"), False)
def test_convertCMPtoTXTNonExistingFile(self):
self.assertEqual(importdata.ConvertCMPtoTXT("testnonexisting.cmp"), False)
def test_convertCMPtoTXT(self):
self.assertEqual(importdata.ConvertCMPtoTXT("test.cmp"), "test.txt")
def test_convertTXTtoCMPNoFile(self):
self.assertEqual(importdata.ConvertTXTtoCMP(""), False)
def test_convertTXTtoCMPNoTXTFile(self):
self.assertEqual(importdata.ConvertTXTtoCMP("test.cmp"), False)
def test_ConvertTXTtoCMPNonExistingFile(self):
self.assertEqual(importdata.ConvertTXTtoCMP("testnonexisting.txt"), False)
def test_ConvertTXTtoCMP(self):
self.assertEqual(importdata.ConvertTXTtoCMP("test2.txt"), "test2.cmp")
def test_converString(self):
self.assertEqual(importdata.ConvertStringToHexString("00000000 00000000 64000000 c8000000 00000000 00000000 55000000"), "0 0 100 200 0 0 85")
|
StarcoderdataPython
|
3388972
|
<gh_stars>1-10
import tensorflow as tf
a = tf.keras.Input(dtype='float32', name='a', batch_size=1, shape=(2, 3, 4))
b = tf.keras.Input(dtype='float32', name='b', batch_size=1, shape=(2, 3, 5))
c = tf.keras.Input(dtype='float32', name='c', batch_size=1, shape=(2, 3, 6))
# b1 = tf.keras.layers.AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', data_format='channels_last', name='max_pool')(b)
concat = tf.keras.layers.Concatenate(axis=-1, name='output')([a, b, c])
model = tf.keras.Model(inputs=[a, b, c], outputs=[concat])
# print(model.summary())
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
|
StarcoderdataPython
|
1670170
|
def helper(wsgiServerClass, global_conf, host, port, **local_conf):
# I think I can't write a tuple for bindAddress in .ini file
host = host or global_conf.get('host', 'localhost')
port = port or global_conf.get('port', 4000)
local_conf['bindAddress'] = (host, int(port))
def server(application):
server = wsgiServerClass(application, **local_conf)
server.run()
return server
|
StarcoderdataPython
|
1627149
|
import requests
class countries:
def __init__(self, base_url, key, secret):
self.base_url = base_url
self.key = key
self.secret = secret
self.headers = {
'accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'sso-key {}:{}'.format(key, secret)
}
def get_countries(self, market_id, region_type_id, region_name, fate, order):
req_url = '{}/v1/countries?marketId={}®ionTypeId={}®ionName={}&sort={}&order={}'.format(
self.base_url, market_id, region_type_id, region_name, fate, order)
r = requests.get(req_url, headers=self.headers)
return r.json()
def get_country(self, country_key, market_id, fate, order):
req_url = '{}/v1/countries/{}?marketId={}&sort={}&order={}'.format(
self.base_url, country_key, market_id, fate, order)
r = requests.get(req_url, headers=self.headers)
return r.json()
|
StarcoderdataPython
|
1622427
|
<reponame>vcarehuman/tf-pose-estimation-master
# -*- coding: utf-8 -*-
"""
Created on Tue May 8 16:44:03 2018
@author: <NAME>
"""
class Example:
name = "Example"
@staticmethod
def static():
print ("%s static() called" % Example.name)
class Offspring1(Example):
name = "Offspring1"
class Offspring2(Example):
name = "Offspring2"
@staticmethod
def static():
print ("%s static() called" % Offspring2.name)
Example.static() # prints Example
Offspring1.static() # prints Example
Offspring2.static() # prints Offspring2
|
StarcoderdataPython
|
3287618
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
from oauthlib.common import log
from oauthlib.oauth2.rfc6749 import errors, utils
class GrantTypeBase(object):
error_uri = None
request_validator = None
def create_authorization_response(self, request, token_handler):
raise NotImplementedError('Subclasses must implement this method.')
def create_token_response(self, request, token_handler):
raise NotImplementedError('Subclasses must implement this method.')
def validate_grant_type(self, request):
if not self.request_validator.validate_grant_type(request.client_id,
request.grant_type, request.client, request):
log.debug('Unauthorized from %r (%r) access to grant type %s.',
request.client_id, request.client, request.grant_type)
raise errors.UnauthorizedClientError(request=request)
def validate_scopes(self, request):
if not request.scopes:
request.scopes = utils.scope_to_list(request.scope) or utils.scope_to_list(
self.request_validator.get_default_scopes(request.client_id, request))
log.debug('Validating access to scopes %r for client %r (%r).',
request.scopes, request.client_id, request.client)
if not self.request_validator.validate_scopes(request.client_id,
request.scopes, request.client, request):
raise errors.InvalidScopeError(state=request.state, request=request)
|
StarcoderdataPython
|
4824263
|
#!/bin/env python
#===============================================================================
# NAME: ComponentHVisitor.py
#
# DESCRIPTION: A visitor responsible for the generation of component header
# file.
#
# AUTHOR: reder
# EMAIL: <EMAIL>
# DATE CREATED : Feb 5, 2007
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
#===============================================================================
#
# Python standard modules
#
import logging
import os
import sys
import time
import datetime
from optparse import OptionParser
#
# Python extention modules and custom interfaces
#
#from Cheetah import Template
#from fprime_ac.utils import version
from fprime_ac.utils import ConfigManager
from fprime_ac.models import ModelParser
#from fprime_ac.utils import DiffAndRename
from fprime_ac.utils import DictTypeConverter
from fprime_ac.generators.visitors import AbstractVisitor
from fprime_ac.generators import formatters
#
# Import precompiled templates here
#
from fprime_ac.generators.templates.channels import ChannelHeader
from fprime_ac.generators.templates.channels import ChannelBody
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger('output')
DEBUG = logging.getLogger('debug')
#
# Module class or classes go here.
class InstanceChannelVisitor(AbstractVisitor.AbstractVisitor):
"""
A visitor class responsible for generation of channel Python classes.
"""
__instance = None
__config = None
__fp = None
__form = None
__form_comment = None
__model_parser = None
def __init__(self):
"""
Constructor.
"""
self.__config = ConfigManager.ConfigManager.getInstance()
self.__form = formatters.Formatters.getInstance()
self.__form_comment = formatters.CommentFormatters()
DEBUG.info("InstanceChannelVisitor: Instanced.")
self.bodytext = ""
self.prototypetext = ""
def _writeTmpl(self, c, fp, visit_str):
"""
Wrapper to write tmpl to files desc.
"""
DEBUG.debug('ChannelVisitor:%s' % visit_str)
DEBUG.debug('===================================')
DEBUG.debug(c)
fp.writelines(c.__str__())
DEBUG.debug('===================================')
def DictStartVisit(self, obj , topology_model):
"""
Defined to generate files for generated code products.
@parms obj: the instance of the channel model to visit.
"""
# Build filename here...
# Make dictionary directly if it doesn't exist
output_dir = os.environ["DICT_DIR"] + "/channels"
if not (os.path.isdir(output_dir)):
os.makedirs(output_dir)
init_file = output_dir + os.sep + "__init__.py"
open(init_file, "w+")
self.__fp = {}
try:
instance_obj_list = topology_model.get_base_id_dict()[obj.get_component_base_name()]
except Exception:
PRINT.info("ERROR: Could not find instance object for component " + obj.get_component_base_name() + ". Check topology model to see if the component was instanced.")
raise
for instance_obj in instance_obj_list:
if instance_obj[3].get_dict_short_name() != None:
fname = "{}_{}".format(instance_obj[3].get_dict_short_name() , obj.get_name())
elif not topology_model.get_prepend_instance_name() and len(instance_obj_list) == 1:
fname = obj.get_name()
else:
fname = "{}_{}".format(instance_obj[0] , obj.get_name())
pyfile = "{}/{}.py".format(output_dir , fname)
DEBUG.info('Open file: {}'.format(pyfile))
fd = open(pyfile,'w')
if fd == None:
raise Exception("Could not open {} file.".format(pyfile))
DEBUG.info('Completed {} open'.format(pyfile))
self.__fp[fname] = fd
def DictHeaderVisit(self, obj , topology_model):
"""
Defined to generate header for channel python class.
"""
for fname in list(self.__fp.keys()):
c = ChannelHeader.ChannelHeader()
d = datetime.datetime.now()
c.date = d.strftime("%A, %d %B %Y")
c.user = os.environ['USER']
c.source = obj.get_xml_filename()
self._writeTmpl(c, self.__fp[fname], "channelHeaderVisit")
def DictBodyVisit(self, obj , topology_model):
"""
Defined to generate the body of the Python channel class
@parms obj: the instance of the channel model to operation on.
"""
try:
instance_obj_list = topology_model.get_base_id_dict()[obj.get_component_base_name()]
except Exception:
PRINT.info("ERROR: Could not find instance object for component " + obj.get_component_base_name() + ". Check topology model to see if the component was instanced.")
raise
for instance_obj in instance_obj_list:
c = ChannelBody.ChannelBody()
if instance_obj[3].get_dict_short_name() != None:
fname = "{}_{}".format(instance_obj[3].get_dict_short_name() , obj.get_name())
elif not topology_model.get_prepend_instance_name() and len(instance_obj_list) == 1:
fname = obj.get_name()
else:
fname = "{}_{}".format(instance_obj[0] , obj.get_name())
c.name = fname
if len(obj.get_ids()) > 1:
raise Exception("There is more than one event id when creating dictionaries. Check xml of {} or see if multiple explicit IDs exist in the AcConstants.ini file".format(fname))
try:
c.id = hex(instance_obj[1] + int(float(obj.get_ids()[0])))
except:
c.id = hex(instance_obj[1] + int(obj.get_ids()[0] , 16))
c.description = obj.get_comment()
c.format_string = obj.get_format_string()
c.component = obj.get_component_name()
(c.low_red,c.low_orange,c.low_yellow,c.high_yellow,c.high_orange,c.high_red) = obj.get_limits()
c.ser_import = None
(c.type,c.ser_import,type_name,dontcare) = DictTypeConverter.DictTypeConverter().convert(obj.get_type(),obj.get_size())
# special case for enums and Gse GUI. Needs to convert %d to %s
if type_name == "enum":
c.format_string = "%s"
self._writeTmpl(c, self.__fp[fname], "channelBodyVisit")
self.__fp[fname].close()
if __name__ == '__main__':
pass
|
StarcoderdataPython
|
125349
|
from visigoth.common.button.button import Button
|
StarcoderdataPython
|
3204036
|
<reponame>timmartin/skulpt
x = 'OK'
print x[0]
|
StarcoderdataPython
|
1678858
|
<filename>autoopt/optim/auto_adagrad.py<gh_stars>0
"""
Copyright 2019 eBay Inc.
Developers/Architects: <NAME>, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Copyright 2019 eBay Inc.
Developers/Architects: <NAME>, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
from autoopt.optim import AutoOptimizer
class AutoAdagrad(AutoOptimizer):
"""Implements AutoAdagrad algorithm.
Arguments:
model (torch.nn.Module): Model containing the parameters to optimize
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, model, weight_decay=0, initial_accumulator_value=0, ewma=0.9, gamma0=0.999):
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= initial_accumulator_value:
raise ValueError("Invalid initial_accumulator_value value: {}".format(initial_accumulator_value))
defaults = dict(weight_decay=weight_decay, initial_accumulator_value=initial_accumulator_value,
ewma=ewma, gamma0=gamma0)
super(AutoAdagrad, self).__init__(model, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.full_like(p.data, initial_accumulator_value)
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None, verbose=False):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
verbose: Print debug messages if set to True.
"""
super(AutoAdagrad, self).step(closure=closure)
loss = None
if closure is not None:
loss = closure()
self.model.auto_params = {'lr': [], 'momentum': []}
for group in self.param_groups:
for param in group['params']:
if param.grad is None:
continue
grad = param.grad.data
state = self.state[param]
state['step'] += 1
if group['weight_decay'] != 0:
if param.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(group['weight_decay'], param.data)
if grad.is_sparse:
raise NotImplementedError
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
self.auto_tune(parameter=param, hessian=std, with_momentum=True, verbose=verbose)
group['lr'] = 1 - param.gamma[0]
momentum = param.gamma[1] / (1 - param.gamma[0])
group['momentum'] = momentum
dampening = momentum
self.model.auto_params['lr'].append(group['lr'].item())
self.model.auto_params['momentum'].append(momentum.item())
if momentum != 0:
param_state = self.state[param]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(param.data)
buf.mul_(momentum).add_(1 - momentum, grad) # should be modified
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, grad)
grad = buf
param.data.addcdiv_(-group['lr'], grad, std)
return loss
|
StarcoderdataPython
|
1782756
|
<gh_stars>1-10
import unittest
# import summarize
from pymongo import MongoClient
import datetime
import random
class SummarizeTest(unittest.TestCase):
def test_work_ng(self):
pass
def test_setup_work(self):
db_name = "SewingMachine"
collection_name = "WorkObjectDetection"
client = MongoClient()
db = client[db_name]
db[collection_name].drop()
collection = db[collection_name]
session_id = "20200109163044181"
dt = datetime.timedelta(milliseconds=100)
timestamp = datetime.datetime.now()
stamps = [ timestamp + (dt*i) for i in range(500)]
data = list(map(lambda t: {"session_id": session_id,
"timestamp": t.strftime("%Y%m%d%H%M%S%f")[:-3],
"is_work": random.randint(1,1)}, stamps))
result = collection.insert_many(data)
find = collection.find()
for doc in find:
print(doc)
|
StarcoderdataPython
|
4804830
|
# coding=utf-8
"""
系统工具库
System tool library
"""
def clear_mem():
"""
清理系统内存
Clean system memory
:return: None
"""
from .. import dir_char, system
if dir_char == '\\':
print("Not support")
else:
import os
if system.startswith("darwin"):
os.system("sudo purge")
else:
os.system('sync')
os.system("echo 3 > /proc/sys/vm/drop_caches")
def get_core_num():
"""
获取核数量
:return: core num
"""
from .. import requirePackage
return requirePackage('psutil').cpu_count()
|
StarcoderdataPython
|
3364458
|
import re
from model.contact import Contact
def all_phones_on_home_page(app):
if app.contact.count() == 0:
app.contact.create(Contact(first_name="Dorota", last_name="Test"))
name_from_home_page = app.contact.get_contact_list()[0]
name_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
lastname_from_home_page = app.contact.get_contact_list()[0]
lastname_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
address_from_home_page = app.contact.get_contact_list()[0]
address_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert lastname_from_home_page.last_name == lastname_from_edit_page.last_name
assert name_from_home_page.address == name_from_edit_page.address
assert address_from_home_page.address == address_from_edit_page.address
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_number, contact.work_number, contact.mobile_number, contact.second_private_number]))))
|
StarcoderdataPython
|
1767586
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import glob
import h5py
import pickle
import ruamel.yaml as yaml
import numpy as np
import random # np.random.choice doesn't like a list of tuples.
from scipy.spatial.transform import Rotation
from torch.utils.data import Dataset
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import minkowski
import open3d as o3d
def download():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(partition):
download()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5' % partition)):
f = h5py.File(h5_name, 'r')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2. / 3., high=3. / 2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.05):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1 * clip, clip)
return pointcloud
def farthest_subsample_points(pointcloud1, pointcloud2, num_subsampled_points=768):
pointcloud1 = pointcloud1.T
pointcloud2 = pointcloud2.T
num_points = pointcloud1.shape[0]
nbrs1 = NearestNeighbors(n_neighbors=num_subsampled_points, algorithm='auto',
metric=lambda x, y: minkowski(x, y)).fit(pointcloud1)
random_p1 = np.random.random(size=(1, 3)) + np.array([[500, 500, 500]]) * np.random.choice([1, -1, 1, -1])
idx1 = nbrs1.kneighbors(random_p1, return_distance=False).reshape((num_subsampled_points,))
nbrs2 = NearestNeighbors(n_neighbors=num_subsampled_points, algorithm='auto',
metric=lambda x, y: minkowski(x, y)).fit(pointcloud2)
random_p2 = random_p1
random_p2 = np.random.random(size=(1, 3)) + np.array([[500, 500, 500]]) * np.random.choice([1, -1, 2, -2])
idx2 = nbrs2.kneighbors(random_p2, return_distance=False).reshape((num_subsampled_points,))
return pointcloud1[idx1, :].T, pointcloud2[idx2, :].T
class ModelNet40(Dataset):
def __init__(self, num_points, num_subsampled_points=768, partition='train',
gaussian_noise=False, unseen=False, rot_factor=4, category=None):
super(ModelNet40, self).__init__()
self.data, self.label = load_data(partition)
if category is not None:
self.data = self.data[self.label==category]
self.label = self.label[self.label==category]
self.num_points = num_points
self.num_subsampled_points = num_subsampled_points
self.partition = partition
self.gaussian_noise = gaussian_noise
self.unseen = unseen
self.label = self.label.squeeze()
self.rot_factor = rot_factor
if num_points != num_subsampled_points:
self.subsampled = True
else:
self.subsampled = False
if self.unseen:
######## simulate testing on first 20 categories while training on last 20 categories
if self.partition == 'test':
self.data = self.data[self.label>=20]
self.label = self.label[self.label>=20]
elif self.partition == 'train':
self.data = self.data[self.label<20]
self.label = self.label[self.label<20]
def __getitem__(self, item, vis=False):
# I don't understand how the ModelNet data works. Because somehow the point cloud is uniformly subsampled by simply taking the n last points.
pointcloud = self.data[item][:self.num_points]
if self.partition != 'train':
np.random.seed(item)
anglex = np.random.uniform() * np.pi / self.rot_factor
angley = np.random.uniform() * np.pi / self.rot_factor
anglez = np.random.uniform() * np.pi / self.rot_factor
cosx = np.cos(anglex)
cosy = np.cos(angley)
cosz = np.cos(anglez)
sinx = np.sin(anglex)
siny = np.sin(angley)
sinz = np.sin(anglez)
Rx = np.array([[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]])
Ry = np.array([[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]])
Rz = np.array([[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]])
R_ab = Rx.dot(Ry).dot(Rz)
R_ba = R_ab.T
translation_ab = np.array([np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5),
np.random.uniform(-0.5, 0.5)])
translation_ba = -R_ba.dot(translation_ab)
pointcloud1 = pointcloud.T
rotation_ab = Rotation.from_euler('zyx', [anglez, angley, anglex])
pointcloud2 = rotation_ab.apply(pointcloud1.T).T + np.expand_dims(translation_ab, axis=1)
euler_ab = np.asarray([anglez, angley, anglex])
euler_ba = -euler_ab[::-1]
pointcloud1 = np.random.permutation(pointcloud1.T).T
pointcloud2 = np.random.permutation(pointcloud2.T).T
if self.gaussian_noise:
pointcloud1 = jitter_pointcloud(pointcloud1)
pointcloud2 = jitter_pointcloud(pointcloud2)
if self.subsampled:
pointcloud1, pointcloud2 = farthest_subsample_points(pointcloud1, pointcloud2, num_subsampled_points=self.num_subsampled_points)
if vis:
pcd1_v = o3d.utility.Vector3dVector(pointcloud1.T)
pcd2_v = o3d.utility.Vector3dVector(pointcloud2.T)
pcd1 = o3d.geometry.PointCloud(pcd1_v)
pcd2 = o3d.geometry.PointCloud(pcd2_v)
pcd1 = pcd1.paint_uniform_color([1, 0, 0])
pcd2 = pcd2.paint_uniform_color([0, 1, 0])
o3d.visualization.draw_geometries([pcd1, pcd2])
#nanidcs = np.random.randint(pointcloud1.shape[1], size=70)
#pointcloud1.T[nanidcs] = [np.nan, np.nan, np.nan]
return pointcloud1.astype('float32'), pointcloud2.astype('float32'), R_ab.astype('float32'), \
translation_ab.astype('float32'), R_ba.astype('float32'), translation_ba.astype('float32'), \
euler_ab.astype('float32'), euler_ba.astype('float32')
def __len__(self):
return self.data.shape[0]
class Bunny(Dataset):
def __init__(self, num_subsampled_points=768, rot_factor=4, t_range=(-0.5, 0.5)):
# self.data, self.label = load_data(partition)
self.num_subsampled_points = num_subsampled_points
self.rot_factor = rot_factor
self.translation_range = t_range
self.pcd = o3d.io.read_point_cloud("/home/grans/Documents/prnet2/bunny.ply")
self.points = np.asarray(self.pcd.points)
idcs = np.random.choice(np.arange(len(self.points)), size=1536, replace=False)
self.points = self.points[idcs]
self.pcd = o3d.geometry.PointCloud()
self.pcd.points = o3d.utility.Vector3dVector(self.points)
def pick_points(self):
print("")
print("1) Pick two points [shift + left click]")
print(" Press [shift + right click] to undo point picking")
print("2) After picking points, press q for close the window")
vis = o3d.visualization.VisualizerWithEditing()
vis.create_window()
vis.add_geometry(self.pcd)
vis.run() # user picks points
vis.destroy_window()
print("")
return vis.get_picked_points()
def dist(self, idcs):
print(np.linalg.norm(self.points[idcs[0]] - self.points[idcs[1]]))
def __getitem__(self, item=0, vis=False):
print("Getting the item.")
# picked_points = []
# while len(picked_points) != 2:
# picked_points = self.pick_points()
picked_points = np.random.randint(len(self.points), size=(2, ))
self.dist(picked_points)
knn = NearestNeighbors(n_neighbors=self.num_subsampled_points)
knn.fit(self.points)
pcd1_idcs = knn.kneighbors([self.points[picked_points[0]]], return_distance=False)
pcd2_idcs = knn.kneighbors([self.points[picked_points[1]]], return_distance=False)
overlap_idcs = np.intersect1d(pcd1_idcs, pcd2_idcs)
pcd1 = self.points[pcd1_idcs][0] # (self.num_subsampled_points x 3)
pcd2 = self.points[pcd2_idcs][0]
## This cancels all the stuff above.
idcs = np.random.choice(np.arange(len(self.points)), size=self.num_subsampled_points, replace=False)
pcd1 = pcd2 = self.points[idcs]
if vis:
pcd_c = np.zeros_like(self.points)
pcd_c[:] = [1.0, 0, 0]
pcd_c[overlap_idcs] = [0, 0, 1.0]
pcd_c[picked_points] = [0, 0, 0]
pcdo3d1 = o3d.geometry.PointCloud()
pcdo3d1.points = o3d.utility.Vector3dVector(pcd1)
pcdo3d1.colors = o3d.utility.Vector3dVector(pcd_c[pcd1_idcs][0])
pcdo3d2 = o3d.geometry.PointCloud()
pcdo3d2.points = o3d.utility.Vector3dVector(pcd2)
pcd_c[:] = [0, 1.0, 0]
pcd_c[overlap_idcs] = [0, 0, 1.0]
pcd_c[picked_points] = [0, 0, 0]
pcdo3d2.colors = o3d.utility.Vector3dVector(pcd_c[pcd2_idcs][0])
o3d.visualization.draw_geometries([pcdo3d1, pcdo3d2])
## Apply rotation and translation here
# pointcloud = self.data[item][:self.num_points]
# if self.partition != 'train':
# np.random.seed(item)
anglex = np.random.uniform() * np.pi / self.rot_factor
angley = np.random.uniform() * np.pi / self.rot_factor
anglez = np.random.uniform() * np.pi / self.rot_factor
cosx = np.cos(anglex)
cosy = np.cos(angley)
cosz = np.cos(anglez)
sinx = np.sin(anglex)
siny = np.sin(angley)
sinz = np.sin(anglez)
Rx = np.array([[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]])
Ry = np.array([[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]])
Rz = np.array([[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]])
R_ab = Rx.dot(Ry).dot(Rz)
R_ba = R_ab.T
translation_ab = np.array([np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5),
np.random.uniform(-0.5, 0.5)])
translation_ba = -R_ba.dot(translation_ab)
pcd1 = pcd1.T
rotation_ab = Rotation.from_euler('zyx', [anglez, angley, anglex])
pcd2 = rotation_ab.apply(pcd2).T + np.expand_dims(translation_ab, axis=1)
euler_ab = np.asarray([anglez, angley, anglex])
euler_ba = -euler_ab[::-1]
#pcd1 = np.random.permutation(pcd1.T).T
#pcd2 = np.random.permutation(pcd2.T).T
#if self.subsampled:
# pcd1, pcd2 = farthest_subsample_points(pcd1, pcd2, num_subsampled_points=self.num_subsampled_points)
if vis:
pcd_c = np.zeros_like(self.points)
pcd_c[:] = [1.0, 0, 0]
pcd_c[overlap_idcs] = [0, 0, 1.0]
pcdo3d1 = o3d.geometry.PointCloud()
pcdo3d1.points = o3d.utility.Vector3dVector(pcd1.T)
pcdo3d1.colors = o3d.utility.Vector3dVector(pcd_c[pcd1_idcs][0])
pcdo3d2 = o3d.geometry.PointCloud()
pcdo3d2.points = o3d.utility.Vector3dVector(pcd2.T)
pcd_c[:] = [0, 1.0, 0]
pcd_c[overlap_idcs] = [0, 0, 1.0]
pcdo3d2.colors = o3d.utility.Vector3dVector(pcd_c[pcd2_idcs][0])
o3d.visualization.draw_geometries([pcdo3d1, pcdo3d2])
print("[%d, %d] Overlap: %f" % (picked_points[0], picked_points[1], (len(overlap_idcs)/self.num_subsampled_points)))
return pcd1.astype('float32'), pcd2.astype('float32'), R_ab.astype('float32'), \
translation_ab.astype('float32'), R_ba.astype('float32'), translation_ba.astype('float32'), \
euler_ab.astype('float32'), euler_ba.astype('float32')
def __len__(self):
return 1
class TLessModel(Dataset):
def __init__(self, num_points=768, tlesspath='/home/grans/Documents/t-less_v2/', model='cadsub'):
self.tlesspath = tlesspath
if model == 'cad':
self.model = 'models_cad'
elif model == 'cadsub':
self.model = 'models_cad_subdivided'
elif model == 'recon':
self.model = 'models_reconst'
else:
raise NotImplementedError
self.num_points = num_points
def __getitem__(self, item=None, vis=True):
if item is None:
item = np.random.randint(30) + 1
file_mask = os.path.join(self.tlesspath, self.model, 'obj_{:02d}.ply')
print("Opening " + file_mask.format(item))
mesh = o3d.io.read_triangle_mesh(file_mask.format(item))
pcd = mesh.sample_points_poisson_disk(self.num_points)
if vis:
pcd = pcd.paint_uniform_color([1, 0, 0])
o3d.visualization.draw_geometries([mesh, pcd])
def __len__(self):
return 1
class TLess(Dataset):
def __init__(self, num_points=768,
tlesspath='/home/grans/Documents/t-less_v2/',
obj2scene='/home/grans/Documents/prnet2/obj2scenelist.pkl',
scene2obj='/home/grans/Documents/prnet2/scene2objlist.pkl',
mesh_type='models_cad', # models_cad, models_cad_subdivided, models_reconst
scenes=list(range(1, 21)),
objects=list(range(1, 31)),
window_size=None):
self.num_points = num_points
self.tlesspath = tlesspath
self.obj2scene = pickle.load(open(obj2scene, 'rb'))
self.scene2obj = pickle.load(open(scene2obj, 'rb'))
self.scenes = scenes
self.objects = objects
self.gt_mask = os.path.join(self.tlesspath, 'test_primesense',
'{:02d}', 'gt.yml')
self.mesh_mask = os.path.join(self.tlesspath, mesh_type,
'obj_{:02d}.ply')
self.info_mask = os.path.join(self.tlesspath, 'test_primesense',
'{:02d}', 'info.yml')
self.rgb_image_mask = os.path.join(self.tlesspath, 'test_primesense',
'{:02d}', 'rgb', '{:04d}.png')
self.depth_image_mask = os.path.join(self.tlesspath, 'test_primesense',
'{:02d}', 'depth', '{:04d}.png')
def random_by_scene_id(self, scene_id):
return np.random.choice(self.scene2obj[scene_id])
def random_by_obj_id(self, obj_id):
return np.random.choice(self.obj2scene[obj_id])
def random(self):
scene_id = np.random.choice(self.scenes)
obj_id = np.random.choice(self.scene2obj[scene_id])
return obj_id, scene_id
def __getitem__(self, view_id=None,
obj_id=None,
scene_id=None,
instance_idx=None, vis=True):
if view_id is None:
# Each scene has 504 images 0000.png to 503.png
view_id = np.random.randint(504)
if obj_id is None:
if scene_id is None:
obj_id, scene_id = self.random()
else:
obj_id = self.random_by_scene_id(scene_id)
else:
scene_id = self.random_by_obj_id(obj_id)
f = open(self.gt_mask.format(scene_id), 'r')
gt = yaml.load(f, Loader=yaml.CLoader)
f.close()
f = open(self.info_mask.format(scene_id), 'r')
info = yaml.load(f, Loader=yaml.CLoader)
fx, _, cx, _, fy, cy, _, _, _ = np.array(info[view_id]['cam_K'])
scale = np.array(info[view_id]['depth_scale'])
depth_raw = o3d.io.read_image(self.depth_image_mask.format(scene_id, view_id))
width, height = depth_raw.get_max_bound().astype('int')
cameraIntrinsics = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
# We might have multiple instances of the same object in the scene,
# so we must get them all, and then select one of them.
# TODO: If this process turns out to be slow, it might be useful to
# reconstruct the dictionary somehow
obj_id_gts = []
for obj_gt in gt[view_id]:
if obj_gt['obj_id'] == obj_id:
obj_id_gts.append(obj_gt)
obj_id_gt = None
if instance_idx is not None:
obj_id_gt = obj_id_gts[instance_idx]
else:
# If there are multiple instances of the obj in the scene, we
# randomly select one of them.
instance_idx, obj_id_gt = random.choice(list(enumerate(obj_id_gts)))
Rm2c = np.array(obj_id_gt['cam_R_m2c']).reshape(3,3)
tm2c = np.array(obj_id_gt['cam_t_m2c'])[np.newaxis].T
Rab = Rm2c
tab = tm2c
euler_ab = Rotation.from_matrix(Rm2c).as_euler('zyx')
Rba = Rab.T
tba = -Rba.dot(tab)
euler_ba = -euler_ab[::-1]
mesh = o3d.io.read_triangle_mesh(self.mesh_mask.format(obj_id))
obj_pcd = mesh.sample_points_poisson_disk(self.num_points)
if vis:
# During visualization it can be nice to have the point cloud
# colored so that it's easier to see what it represents.
color_raw = o3d.io.read_image(self.rgb_image_mask.format(scene_id, view_id))
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
color_raw,
depth_raw,
depth_scale=(1/scale),
depth_trunc=np.inf,
convert_rgb_to_intensity=False
)
scan_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_image,
o3d.camera.PinholeCameraIntrinsic(cameraIntrinsics)
)
else:
scan_pcd = o3d.geometry.PointCloud().create_from_depth_image(depth_raw,
o3d.camera.PinholeCameraIntrinsic(cameraIntrinsics), depth_scale=(1/scale))
if vis:
Tm2c = np.hstack((Rm2c, tm2c))
Tm2c = np.vstack((Tm2c, [0, 0, 0, 1]))
mesh.compute_vertex_normals() # Just to make it look good.
mesh.paint_uniform_color([1, 0.706, 0])
mesh.transform(Tm2c)
#mesh.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
#pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
# o3d.visualization.draw_geometries([mesh])
o3d.visualization.draw_geometries([mesh, scan_pcd])
pointcloud1 = np.asarray(obj_pcd.points)
pointcloud2 = np.asarray(scan_pcd.points)
info = (scene_id, obj_id, view_id, instance_idx)
# pcds: (3, n) float32
# R_ab: (3, 3) float32
# t_ab: (3,) float32
# euler_ab: (3,) float32
# info: tuple (scene_id, obj_id, view_id, instance_idx)
return pointcloud1.astype('float32'), pointcloud2.astype('float32'), \
Rab.astype('float32'), tab.astype('float32'), \
Rba.astype('float32'), tba.astype('float32'), \
euler_ab.astype('float32'), euler_ba.astype('float32'), \
info
def __len__(self):
return 1
if __name__ == '__main__':
# tlessmodel = TLessModel()
# t = tlessmodel.__getitem__()
tlscan = TLess()
p1, p2, rab, tab, rba, tba, eulab, eulba, info = tlscan.__getitem__(vis=False)
print("Scene: %d Obj: %d View: %d, Instance: %d" % info)
pcd1 = o3d.geometry.PointCloud(
o3d.utility.Vector3dVector(p1)
)
pcd2 = o3d.geometry.PointCloud(
o3d.utility.Vector3dVector(p2)
)
pcd1.paint_uniform_color([1, 0.706, 0])
o3d.visualization.draw_geometries([pcd1, pcd2])
tab = np.vstack((np.hstack((rab, tab)), [0, 0, 0, 1]))
pcd1.transform(tab)
o3d.visualization.draw_geometries([pcd1, pcd2])
# pts = 128
# d = ModelNet40(num_points=pts,
# num_subsampled_points=pts,
# partition='test',
# rot_factor=4)
# d.__getitem__(0)
print('hello world')
|
StarcoderdataPython
|
193931
|
"""
MySQL output connector. Writes audit logs to MySQL database
"""
from __future__ import absolute_import, division
import MySQLdb
import json
import subprocess
import os
import sys
import urllib
import urllib2
import urlparse
import hashlib
import time
import threading
import sqlite3
import datetime
from time import strftime
from twisted.enterprise import adbapi
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
from twisted.internet import defer
from twisted.python import log
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
import cowrie.core.output
from cowrie.core.config import CONFIG
class ReconnectingConnectionPool(adbapi.ConnectionPool):
"""
Reconnecting adbapi connection pool for MySQL.
This class improves on the solution posted at
http://www.gelens.org/2008/09/12/reinitializing-twisted-connectionpool/
by checking exceptions by error code and only disconnecting the current
connection instead of all of them.
Also see:
http://twistedmatrix.com/pipermail/twisted-python/2009-July/020007.html
"""
def _runInteraction(self, interaction, *args, **kw):
try:
return adbapi.ConnectionPool._runInteraction(
self, interaction, *args, **kw)
except MySQLdb.OperationalError as e:
if e[0] not in (2003, 2006, 2013):
log.msg("RCP: got error {0}, retrying operation".format(e))
raise e
conn = self.connections.get(self.threadID())
self.disconnect(conn)
# Try the interaction again
return adbapi.ConnectionPool._runInteraction(
self, interaction, *args, **kw)
class Output(cowrie.core.output.Output):
db = None
def __init__(self):
try:
self.debug = CONFIG.getboolean('output_mysql', 'debug')
self.apiKey = CONFIG.get('output_mysql', 'api_key')
except Exception:
self.debug = False
cowrie.core.output.Output.__init__(self)
def start(self):
try:
port = CONFIG.getint('output_mysql', 'port')
except Exception:
port = 3306
try:
self.db = ReconnectingConnectionPool(
'MySQLdb',
host=CONFIG.get('output_mysql', 'host'),
db=CONFIG.get('output_mysql', 'database'),
user=CONFIG.get('output_mysql', 'username'),
passwd=CONFIG.get('output_mysql', 'password', raw=True),
port=port,
cp_min=1,
cp_max=1
)
except MySQLdb.Error as e:
log.msg("output_mysql: Error %d: %s" % (e.args[0], e.args[1]))
self.lc = LoopingCall(self.check_wait)
self.lc.start(30)
self.versions = {}
def stop(self):
self.lc.stop()
self.db.close()
self.versions = {}
def nowUnix(self):
"""return the current UTC time as an UNIX timestamp"""
return int(time.time())
def sqlerror(self, error):
log.err('output_mysql: MySQL Error: {}'.format(error.value))
def simpleQuery(self, sql, args):
"""
Just run a deferred sql query, only care about errors
"""
if self.debug:
log.msg("output_mysql: MySQL query: {} {}".format(sql, repr(args)))
d = self.db.runQuery(sql, args)
d.addErrback(self.sqlerror)
def simpleQueryWithCallback(self, callback, sql, args):
if self.debug:
log.msg("output_mysql: MySQL query: {} {}".format(sql, repr(args)))
d = self.db.runQuery(sql, args)
d.addCallbacks(callback, self.sqlerror)
############################
def createSession(self, peerIP, peerPort, hostIP, hostPort, timestamp, sessionId=None):
if sessionId == None:
sid = uuid.uuid4().hex
else:
sid = sessionId
self.createSessionWhenever(sid, peerIP, hostIP, timestamp)
return sid
def createASNForIP(self, sid, peerIP, sensorId, timestamp):
def addslashes(s):
l = ["\\", '"', "'", "\0", ]
for i in l:
if i in s:
s = s.replace(i, '\\'+i)
return s
def reverseIP(address):
temp = address.split(".")
convertedAddress = str(temp[3]) +'.' + str(temp[2]) + '.' + str(temp[1]) +'.' + str(temp[0])
return convertedAddress
def onASNRecordTest(r):
if r:
createTheSession(sid, peerIP, sensorId, int(r[0][0]), timestamp)
else:
timeModified = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
self.simpleQueryWithCallback(onASNRecordInsert, 'INSERT INTO `asinfo` (`asn`, `rir`, `country`, `asname`, `updatedTime`) VALUES (%s, %s, %s, %s, STR_TO_DATE(%s, %s)) ', (ASN, registry, country, isp, timeModified, '%Y-%m-%d %H:%i:%s'))
def onASNRecordInsert(r):
self.simpleQueryWithCallback(onASNRecordReady, 'SELECT `asnid` FROM `asinfo` WHERE `asn` = %s AND `rir` = %s AND `country` = %s AND `asname` = %s ', (ASN, registry, country, isp))
def onASNRecordReady(r):
createTheSession(sid, peerIP, sensorId, int(r[0][0]), timestamp)
def onSessionCreated(r):
if self.versions.has_key(sid):
self.simpleQuery(
'UPDATE `sessions` SET `client` = %s WHERE `id` = %s',
(self.versions[sid], sid))
del self.versions[sid]
else:
self.versions[sid] = 1
def createTheSession(sid, peerIP, sensorId, asnid, timestamp):
#Autor zmenil tvar timestamp, tu ho upravujem aby sedel s vasim
timestamp_modified = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
self.simpleQueryWithCallback(onSessionCreated,
'UPDATE `sessions` SET `starttime` = STR_TO_DATE(%s, %s), `sensor` = %s, `ip` = %s, `asnid` = %s' + \
' WHERE `id` = %s',
(timestamp_modified, '%Y-%m-%d %H:%i:%s', sensorId, peerIP, asnid, sid))#stary parsing: %Y-%m-%dT%H:%i:%s.%fZ
try:
querycmd1 = reverseIP(peerIP) + '.origin.asn.cymru.com'
response1 = subprocess.Popen(['dig', '-t', 'TXT', querycmd1, '+short'], stdout=subprocess.PIPE).communicate()[0]
response1List = response1.split('|')
ASN = response1List[0].strip('" ')
querycmd2 = 'AS' + ASN + '.asn.cymru.com'
response2 = subprocess.Popen(['dig', '-t', 'TXT', querycmd2, '+short'], stdout=subprocess.PIPE).communicate()[0]
except:
ASN = ""
response2 = ""
log.msg("dig process error: " + str(sys.exc_info()))
response2List = response2.split('|')
if len(response2List) < 4:
createTheSession(sid, peerIP, sensorId, '1', timestamp)
else:
isp = addslashes(response2List[4].replace('"', ''))
network = addslashes(response1List[1].strip())
country = addslashes(response1List[2].strip())
registry = addslashes(response1List[3].strip())
isp = network + "-" + isp
self.simpleQueryWithCallback(onASNRecordTest, 'SELECT `asnid` FROM `asinfo` WHERE `updated` = FALSE AND `asn` = %s AND `rir` = %s AND `country` = %s AND `asname` = %s ', (ASN, registry, country, isp))
def createSessionWhenever(self, sid, peerIP, hostIP, timestamp=None):
def onSensorReady(r):
id = int(r[0][0])
self.createASNForIP(sid, peerIP, id, timestamp)
def onSensorInsert(r):
self.simpleQueryWithCallback(onSensorReady, 'SELECT LAST_INSERT_ID()','')
def onSensorSelect(r):
if r:
onSensorReady(r)
else:
self.simpleQueryWithCallback(onSensorInsert,
'INSERT INTO `sensors` (`ip`) VALUES (%s)', (hostIP,))
self.simpleQueryWithCallback(onSensorSelect,
'SELECT `id` FROM `sensors` WHERE `ip` = %s', (hostIP,))
def insert_wait(self, resource, url, scan_id, sha256):
p = CONFIG.get('honeypot', 'log_path') + '/backlogs.sqlite'
try:
dbh = sqlite3.connect(p)
cursor = dbh.cursor()
dt = datetime.datetime.now()
timestamp = dt.strftime("%Y-%m-%d %H:%M:%S")
cursor.execute("""
INSERT INTO vtwait (scanid, hash, url, time, sha256)
VALUES (?,?,?,?,?) """, (scan_id, resource, url, timestamp, sha256))
dbh.commit()
cursor.close()
except:
log.msg("Unexpected error: " + str(sys.exc_info()))
return True
def check_wait(self):
p = CONFIG.get('honeypot', 'log_path') + '/backlogs.sqlite'
try:
dbh = sqlite3.connect(p)
cursor = dbh.cursor()
r = cursor.execute("""
SELECT scanid, hash, url, time, sha256 FROM vtwait""")
for record in r:
scanid = format(record[0])
hash = format(record[1])
url = format(record[2])
sha256 = format(record[4])
j, jsonString = self.get_vt_report(scanid)
if (not j is None) and (j["response_code"] == 1):
if "scans" in j.keys():
args = {'shasum': hash, 'url': url, 'permalink': j["permalink"], 'positives': j['positives'], 'total': j['total'], 'sha256' : sha256}
args_scan = {'shasum': hash, 'sha256' : sha256, 'permalink': j['permalink'], 'json': jsonString}
self.handleVirustotal(args, args_scan)
cursor.execute("""
DELETE FROM vtwait WHERE scanid = ?""", (scanid,) )
dbh.commit()
cursor.close()
except:
log.msg("Unexpected error: " + str(sys.exc_info()))
return True
def get_vt_report(self, resource):
url = "https://www.virustotal.com/vtapi/v2/file/report"
parameters = {"resource": resource, "apikey": self.apiKey}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
jsonString = response.read()
try:
j = json.loads(jsonString )
except:
j = None
return j, jsonString
def post_file(self, aFileName, aUrl=None):
file_to_send = open(aFileName, "rb").read()
h = hashlib.sha1()
h.update(file_to_send)
h256 = hashlib.sha256()
h256.update(file_to_send)
j, jsonString = self.get_vt_report(h.hexdigest())
if j is None:
response = -2
else:
response = j["response_code"]
if response == 1: # file known
log.msg("post_file(): file known")
if "scans" in j.keys():
args = {'shasum': h.hexdigest(), 'sha256' : h256.hexdigest(), 'url': aUrl, 'permalink': j['permalink'], 'positives' : j['positives'], 'total' : j['total']}
args_scan = {'shasum': h.hexdigest(), 'sha256' : h256, 'permalink': j['permalink'], 'json': jsonString}
self.handleVirustotal(args, args_scan)
else:
response = 2
elif response == 0: # file not known
log.msg("post_file(): sending the file to VT...")
register_openers()
datagen, headers = multipart_encode({"file": open(aFileName, "rb")})
request = urllib2.Request("https://www.virustotal.com/vtapi/v2/file/scan?apikey=" + self.apiKey, datagen, headers)
jsonString = urllib2.urlopen(request).read()
log.msg("post_file(): response is " + jsonString)
j = json.loads(jsonString)
self.insert_wait(h.hexdigest(), aUrl, j["scan_id"], h256.hexdigest())
return response
def make_comment(resource):
apikey = CONFIG.get('virustotal', 'apikey')
url = "https://www.virustotal.com/vtapi/v2/comments/put"
parameters = {"resource": resource,
"comment": "captured by ssh honeypot",
"apikey": apikey}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json = response.read()
def handleVirustotal(self, args, args2):
def insert_done(r):
self.handleVirustotalScan(args2)
def select_done(r):
if r:
id = r[0][0]
else:
d = self.db.runQuery('INSERT INTO `virustotals`' + \
' (`shasum`, `sha256`, `url`, `timestamp`, `permalink`, `positives`, `count`)' + \
' VALUES (%s, %s, %s, FROM_UNIXTIME(%s), %s, %s, %s)',
(args['shasum'], args['sha256'], args['url'], self.nowUnix(), args['permalink'], args['positives'], args['total'],))
d.addCallbacks(insert_done, self.sqlerror)
d = self.db.runQuery('SELECT `id` FROM `virustotals` WHERE `permalink` = %s', (args['permalink'],))
d.addCallbacks(select_done, self.sqlerror)
def handleVirustotalScan(self, args):
def insert_results(r):
scan_id = r[0][0]
jsonData = json.loads(args['json'])
scans = jsonData['scans']
for av, val in scans.items():
res = val['result']
# not detected = '' -> NULL
if res == '':
res = None
self.simpleQuery('INSERT INTO `virustotalscans`' + \
' (`scan_id`, `scanner`, `result`)' + \
' VALUES (%s, %s, %s)',
(scan_id, av, res, ))
d = self.db.runQuery('SELECT `id` FROM `virustotals` WHERE `permalink` = %s', (args['permalink'],))
d.addCallbacks(insert_results, self.sqlerror)
############################
def message_to_new(message):
return_string = "{\"eventid\": \"cowrie.client.version\", "
session_position = message.find("\"session\"")
timestamp_position = message.find("\"timestamp\"")
session = message[session_position + 11:timestamp_position - 2]
version_position = message.find("\"version\"")
time_position = message.find("\"time\"")
version = message[version_position + 11:time_position - 2]
version = "\"" + version[2:version.rfind('_')] + '",' + version[version.rfind('_') + 2:-3]
return_string = return_string + "\"session\": " + session + ", \"version\": " + version + "}"
return_string = return_string.replace("\\", "")
print("JSON")
print(return_string)
return return_string
@defer.inlineCallbacks
def write(self, entry):
if entry["eventid"] == 'cowrie.session.connect':
self.simpleQuery('INSERT INTO `sessions` (`id`, `starttime`, `sensor`, `ip`)' + \
' VALUES (%s, STR_TO_DATE(%s, %s), %s, %s)',
(entry['session'], '1991-1-1 1:1:1', '%Y-%m-%d %H:%i:%s', '1', entry['src_ip']))#stary parsing: %Y-%m-%dT%H:%i:%s.%fZ
elif entry["eventid"] == 'cowrie.login.success':
self.simpleQuery('INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) '
'VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))',
(entry["session"], 1, entry['username'], entry['password'], entry["time"]))
elif entry["eventid"] == 'cowrie.login.failed':
self.simpleQuery('INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) '
'VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))',
(entry["session"], 0, entry['username'], entry['password'], entry["time"]))
elif entry["eventid"] == 'cowrie.session.params':
self.simpleQuery('INSERT INTO `params` (`session`, `arch`) '
'VALUES (%s, %s)',
(entry["session"], entry["arch"]))
elif entry["eventid"] == 'cowrie.command.input':
self.simpleQuery('INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) '
'VALUES (%s, FROM_UNIXTIME(%s), %s , %s)',
(entry["session"], entry["time"], 1, entry["input"]))
elif entry["eventid"] == 'cowrie.command.failed':
self.simpleQuery('INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) '
'VALUES (%s, FROM_UNIXTIME(%s), %s , %s)',
(entry["session"], entry["time"], 0, entry["input"]))
elif entry["eventid"] == 'cowrie.session.file_download':
self.simpleQuery('INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) '
'VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)',
(entry["session"], entry["time"], entry['url'], entry['outfile'], entry['shasum']))
self.post_file(entry["outfile"], entry["url"])
elif entry["eventid"] == 'cowrie.session.file_download.failed':
self.simpleQuery('INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) '
'VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)',
(entry["session"], entry["time"], entry['url'], 'NULL', 'NULL'))
elif entry["eventid"] == 'cowrie.session.file_upload':
self.simpleQuery('INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) '
'VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)',
(entry["session"], entry["time"], '', entry['outfile'], entry['shasum']))
self.post_file(entry["outfile"])
elif entry["eventid"] == 'cowrie.session.input':
self.simpleQuery('INSERT INTO `input` (`session`, `timestamp`, `realm`, `input`) '
'VALUES (%s, FROM_UNIXTIME(%s), %s , %s)',
(entry["session"], entry["time"], entry["realm"], entry["input"]))
elif entry["eventid"] == 'cowrie.client.version':
try:
version_string = entry["version"]
hostport = json.loads(version_string[version_string.rfind('_') + 1:-1])["hostport"]
entry['src_ip'] = hostport[:hostport.rfind(':')];
entry['version'] = version_string[1:version_string.rfind('_')]
extraPresent = True
except:
extraPresent = False
self.createSessionWhenever(entry['session'], entry['src_ip'], self.sensor, entry['time'])
#yield self.db.runQuery(
# 'UPDATE `sessions` SET `ip` = %s WHERE `id` = %s',
# (hostport[:hostport.rfind(':')], entry['session'],))
r = yield self.db.runQuery(
'SELECT `id` FROM `clients` '
'WHERE `version` = %s',
(entry['version'],))
if r:
id = int(r[0][0])
else:
yield self.db.runQuery(
'INSERT INTO `clients` (`version`) '
'VALUES (%s)',
(entry['version'],))
r = yield self.db.runQuery('SELECT LAST_INSERT_ID()')
id = int(r[0][0])
if not self.versions.has_key(entry['session']):
self.versions[entry['session']] = id
else:
del self.versions[entry['session']]
self.simpleQuery(
'UPDATE `sessions` SET `client` = %s WHERE `id` = %s',
(id, entry["session"]))
elif entry["eventid"] == 'cowrie.client.size':
self.simpleQuery(
'UPDATE `sessions` '
'SET `termsize` = %s '
'WHERE `id` = %s',
('%sx%s' % (entry['width'], entry['height']), entry["session"]))
elif entry["eventid"] == 'cowrie.session.closed':
self.simpleQuery(
'UPDATE `sessions` '
'SET `endtime` = FROM_UNIXTIME(%s) '
'WHERE `id` = %s',
(entry["time"], entry["session"]))
elif entry["eventid"] == 'cowrie.log.closed':
self.simpleQuery(
'INSERT INTO `ttylog` (`session`, `ttylog`, `size`) '
'VALUES (%s, %s, %s)',
(entry["session"], entry["ttylog"], entry["size"]))
elif entry["eventid"] == 'cowrie.client.fingerprint':
self.simpleQuery(
'INSERT INTO `keyfingerprints` (`session`, `username`, `fingerprint`) '
'VALUES (%s, %s, %s)',
(entry["session"], entry["username"], entry["fingerprint"]))
|
StarcoderdataPython
|
25620
|
<gh_stars>1-10
from pyramid.config import Configurator
from pyramid.view import view_config
@view_config(route_name='index', renderer='templates/index.html.jinja2')
def index(request):
return {}
def create_app():
config = Configurator()
config.include('pyramid_jinja2')
config.add_route('index', '/')
config.add_static_view(name='static', path='static')
config.scan()
return config.make_wsgi_app()
|
StarcoderdataPython
|
3227888
|
# Created by Ethan
from chemlib import Compound
def saltSolubilities(compound):
try:
cmpd = compound
validate = Compound(compound)
temp = validate.occurences
temp = list(temp.keys())
for val in temp:
cmpd = cmpd.replace(val, "")
if len(cmpd) > 0:
return "Not a Valid Compound. Please check your input."
except:
return "Not a Valid Compound. Please check your input."
# Check for SPAN Salts
if "Na" in compound or "K" in compound or "NH4" in compound or "NO3" in compound:
return True
# Check for Chloride Salts
elif "Cl" in compound:
if compound == "FeCl2" or compound == "AgCl":
return False
else:
return True
# Check for Sulfate Salts
elif "SO4" in compound:
if compound == "PbSO4" or compound == "CaSO4" or compound == "BaSO4":
return False
else:
return True
# Check for Carbonate Salts
elif "CO3" in compound:
if compound == "Na2CO3" or compound == "K2CO3" or compound == "(NH4)2CO3":
return True
else:
return False
# Check for Hydroxide and Oxide Salts
elif "OH" in compound:
if compound == "NaOH" or compound == "KOH" or compound == "NH4OH" or compound == "Ca2OH" or compound == "Na2O" or compound == "K2O" or compound == "(NH4)2O" or compound == "CaO":
return True
else:
return False
else:
return False
|
StarcoderdataPython
|
1671103
|
<filename>model/network/MT3D.py
import torch
import torch.nn as nn
import numpy as np
from .basic_blocks import SetBlock, BasicConv2d, M3DPooling, FramePooling, FramePooling1, LocalTransform, BasicConv3DB, GMAP, SeparateFC
class MTNet(nn.Module):
def __init__(self, hidden_dim):
super(MTNet, self).__init__()
self.hidden_dim = hidden_dim
self.batch_frame = None
_set_in_channels = 1
_set_channels = [32, 64, 128, 128]
self.layer1 = nn.Conv3d(_set_in_channels, _set_channels[0], kernel_size=(3,3,3), stride=(2,1,1), padding=1,bias=False)
# Transform clip 每个clip分开卷
self.local_transform1 = LocalTransform(_set_channels[0], _set_channels[0],s=3)
self.B3D_layer2_S = BasicConv3DB(_set_channels[0], _set_channels[1], padding=1)
self.M3D_layer2_S = M3DPooling()
self.B3D_layer2_L = BasicConv3DB(_set_channels[0], _set_channels[1], padding=1)
self.M3D_layer2_L = M3DPooling()
self.local_transform2 = LocalTransform(_set_channels[1], _set_channels[1],s=3)
self.B3D_layer3_S1 = BasicConv3DB(_set_channels[1], _set_channels[2], padding=1)
self.B3D_layer3_S2 = BasicConv3DB(_set_channels[2], _set_channels[3], padding=1)
self.B3D_layer3_L1 = BasicConv3DB(_set_channels[1], _set_channels[2], padding=1)
self.B3D_layer3_L2 = BasicConv3DB(_set_channels[2], _set_channels[3], padding=1)
self.local_transform3 = LocalTransform(_set_channels[3], _set_channels[3],s=3)
self.framepooling_S = FramePooling1()
self.framepooling_L = FramePooling1()
self.gmap_S = GMAP(w=22)
self.gmap_L = GMAP(w=22)
self.fc_bin = nn.Parameter(
nn.init.xavier_uniform_(
torch.zeros(64, _set_channels[3], self.hidden_dim)))
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
nn.init.xavier_uniform_(m.weight.data)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
nn.init.constant(m.bias.data, 0.0)
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
nn.init.normal(m.weight.data, 1.0, 0.02)
nn.init.constant(m.bias.data, 0.0)
def forward(self, silho, batch_frame=None):
# n: batch_size, s: frame_num, k: keypoints_num, c: channel
if batch_frame is not None:
batch_frame = batch_frame[0].data.cpu().numpy().tolist()
_ = len(batch_frame)
for i in range(len(batch_frame)):
if batch_frame[-(i + 1)] != 0:
break
else:
_ -= 1
batch_frame = batch_frame[:_]
frame_sum = np.sum(batch_frame)
if frame_sum < silho.size(1):
silho = silho[:, :frame_sum, :, :]
self.batch_frame = [0] + np.cumsum(batch_frame).tolist()
n = silho.size(0)
x = silho.unsqueeze(2) #[12, 30, 1, 64, 44]
del silho
import pdb
# pdb.set_trace()
#layer1
x = self.layer1(x.permute(0,2,1,3,4).contiguous()) #output [12, 32, 15, 64, 44]
x1 = self.local_transform1(x) # [12, 32, 5, 64, 44]
#layer2
x = self.B3D_layer2_S(x) # [12, 64, 15, 64, 44]
x = self.M3D_layer2_S(x) # [12, 64, 15, 32, 22]
x1 = self.B3D_layer2_L(x1) # [12, 64, 5, 64, 44]
x1 = self.M3D_layer2_L(x1) # [12, 64, 5, 32, 22]
x1 = x1 + self.local_transform2(x) # [12, 64, 5, 32, 22]
#layer3
x = self.B3D_layer3_S1(x)
x = self.B3D_layer3_S2(x) # [12, 128, 15, 32, 22]
x1 = self.B3D_layer3_L1(x1)
x1 = self.B3D_layer3_L2(x1) # [12, 128, 5, 32, 22]
x1 = x1 + self.local_transform3(x) # [12, 128, 5, 32, 22]
#Framepooling & GAP GMP
x = self.framepooling_S(x) # [12, 128, 1, 32, 22]
x = self.gmap_S(x) # [12, 128, 1, 32, 1]
x1 = self.framepooling_L(x1)
x1 = self.gmap_L(x1)
#Separate FC
feature = torch.cat((x,x1),dim=3) # [12, 128, 1, 64, 1]
del x1
del x
# x = self.fc(x)
feature = feature.squeeze(-1) # [12, 128, 1, 64]
feature = feature.permute(0, 3, 2, 1).contiguous() # [12, 64, 1, 128]
feature = feature.matmul(self.fc_bin)
return feature.squeeze(2), None # [12, 64, 128]
|
StarcoderdataPython
|
3213383
|
#!/bin/python3
import sys
def factorial(x):
if x < 1:
return 1
else:
x = x * factorial(x-1)
return x
n = int(input().strip())
print(factorial(n))
|
StarcoderdataPython
|
3235665
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from utils.rewards import get_scores, get_self_cider_scores
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = input.reshape(-1)
reward = reward.reshape(-1)
mask = (seq > 0).float()
mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1).reshape(-1)
output = - input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
class StructureLosses(nn.Module):
"""
This loss is inspired by Classical Structured Prediction Losses for Sequence to Sequence Learning (Edunov et al., 2018).
"""
def __init__(self, opt):
super(StructureLosses, self).__init__()
self.opt = opt
self.loss_type = opt.structure_loss_type
def forward(self, input, seq, data_gts):
"""
Input is either logits or log softmax
"""
out = {}
batch_size = input.size(0)
seq_per_img = batch_size // len(data_gts)
assert seq_per_img == self.opt.train_sample_n, seq_per_img
mask = (seq>0).float()
mask = torch.cat([mask.new_full((mask.size(0), 1), 1), mask[:, :-1]], 1)
scores = get_scores(data_gts, seq, self.opt)
scores = torch.from_numpy(scores).type_as(input).view(-1, seq_per_img)
out['reward'] = scores #.mean()
if self.opt.entropy_reward_weight > 0:
entropy = - (F.softmax(input, dim=2) * F.log_softmax(input, dim=2)).sum(2).data
entropy = (entropy * mask).sum(1) / mask.sum(1)
print('entropy', entropy.mean().item())
scores = scores + self.opt.entropy_reward_weight * entropy.view(-1, seq_per_img)
costs = - scores
if self.loss_type == 'risk' or self.loss_type == 'softmax_margin':
costs = costs - costs.min(1, keepdim=True)[0]
costs = costs / costs.max(1, keepdim=True)[0]
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
if self.loss_type == 'seqnll':
# input is logsoftmax
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'risk':
# input is logsoftmax
input = input * mask
input = input.sum(1)
input = input.view(-1, seq_per_img)
output = (F.softmax(input.exp()) * costs).sum(1).mean()
elif self.loss_type == 'max_margin':
# input is logits
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
_, __ = costs.min(1, keepdim=True)
costs_star = _
input_star = input.gather(1, __)
output = F.relu(costs - costs_star - input_star + input).max(1)[0] / 2
output = output.mean()
elif self.loss_type == 'multi_margin':
# input is logits
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
_, __ = costs.min(1, keepdim=True)
costs_star = _
input_star = input.gather(1, __)
output = F.relu(costs - costs_star - input_star + input)
output = output.mean()
elif self.loss_type == 'softmax_margin':
# input is logsoftmax
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
input = input + costs
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'real_softmax_margin':
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
input = input + costs
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'new_self_critical':
"""
A different self critical
Self critical uses greedy decoding score as baseline;
This setting uses the average score of the rest samples as baseline
(suppose c1...cn n samples, reward1 = score1 - 1/(n-1)(score2+..+scoren) )
"""
baseline = (scores.sum(1, keepdim=True) - scores) / (scores.shape[1] - 1)
scores = scores - baseline
if getattr(self.opt, 'self_cider_reward_weight', 0) > 0:
_scores = get_self_cider_scores(data_gts, seq, self.opt)
_scores = torch.from_numpy(_scores).type_as(scores).view(-1, 1)
_scores = _scores.expand_as(scores - 1)
scores += self.opt.self_cider_reward_weight * _scores
output = - input * mask * scores.view(-1, 1)
output = torch.sum(output) / torch.sum(mask)
out['loss'] = output
return out
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
# truncate to the same size
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)].float()
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
output = torch.sum(output) / torch.sum(mask)
return output
class LabelSmoothing(nn.Module):
"Implement label smoothing."
def __init__(self, size=0, padding_idx=0, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False, reduce=False)
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.true_dist = None
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)]
input = input.reshape(-1, input.size(-1))
target = target.reshape(-1)
mask = mask.reshape(-1).float()
self.size = input.size(1)
true_dist = input.data.clone()
true_dist.fill_(self.smoothing / (self.size - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return (self.criterion(input, true_dist).sum(1) * mask).sum() / mask.sum()
|
StarcoderdataPython
|
1662802
|
<filename>Main/SharedTools/FinClasses.py
class stock():
def __init__(self, name, ticker, weight):
self.name = name
self.ticker = ticker
self.weight = weight
def getTicker(self):
return self.ticker
def getWeight(self):
return self.weight
def setWeight(self, weight):
self.weight = weight
def getName(self):
return self.name
class ETF():
def __init__(self, ticker, weight):
self.ticker = ticker
self.weight = weight
self.holdings = []
self.jsonList = []
def setHoldings(self, holdings):
self.holdings = holdings
def getTicker(self):
return self.ticker
def getWeight(self):
return self.weight
def getHoldings(self):
return self.holdings
def setJsonList(self, jsonList):
self.jsonList = jsonList
def getJsonList(self):
return self.jsonList
def getRegionJSON(self):
return self.jsonList[0]
def getCountryJSON(self):
return self.jsonList[1]
def getSectorJSON(self):
return self.jsonList[2]
def getMarketCapJSON(self):
return self.jsonList[3]
#override print function
def __str__(self):
return self.ticker
|
StarcoderdataPython
|
1723064
|
<reponame>BedrockDev/CAU2019<filename>Pre-term/Computational Thinking and Problem Solving/Assignment 2/problem3.py
# problem 3: lottery number generator
import random
freq = [0]*45
recommendation = []
def generate():
return random.randint(1, 45)
def lotto_generator():
numbers = [generate()]
for i in range(1, 6):
number = generate()
# remove duplicates
while number in numbers:
number = generate()
numbers.append(number)
return numbers
for i in range(0, 1000):
lotto = lotto_generator()
for j in lotto:
freq[j - 1] += 1
# print(lotto)
for i in range(0, 6):
top = max(freq)
recommendation.append(str(freq.index(top) + 1) + " (" + str(top) + " times)")
# resets the top value
freq[freq.index(top)] = -1
for number in recommendation:
print(number)
|
StarcoderdataPython
|
3389158
|
from django import forms
from property.models import PropertyEnquiry
class PropertyForm (forms.ModelForm) :
class Meta :
exclude = ('date_added',)
class PropertyTypeForm(forms.ModelForm) :
class Meta :
exclude = ()
class EnquiryForm(forms.Form) :
subject = forms.CharField(required=True, )
message = forms.CharField(required=True, widget=forms.Textarea)
def save_inquiry(self, user, property):
inquiry = PropertyEnquiry.objects.create(
user=user,
property=property,
message=self.data['message'],
subject=self.data['subject'],
)
inquiry.save()
|
StarcoderdataPython
|
3301710
|
from collections import deque
import sys
"""
--- Day 15: Beverage Bandits ---
Having perfected their hot chocolate, the Elves have a new problem: the Goblins that live in these caves will do
anything to steal it. Looks like they're here for a fight.
You scan the area, generating a map of the walls (#), open cavern (.), and starting position of every Goblin (G) and Elf
(E) (your puzzle input).
Combat proceeds in rounds; in each round, each unit that is still alive takes a turn, resolving all of its actions
before the next unit's turn begins. On each unit's turn, it tries to move into range of an enemy (if it isn't already)
and then attack (if it is in range).
All units are very disciplined and always follow very strict combat rules. Units never move or attack diagonally, as
doing so would be dishonorable. When multiple choices are equally valid, ties are broken in reading order:
top-to-bottom, then left-to-right. For instance, the order in which units take their turns within a round is the reading
order of their starting positions in that round, regardless of the type of unit or whether other units have moved after
the round started. For example:
would take their
These units: turns in this order:
####### #######
#.G.E.# #.1.2.#
#E.G.E# #3.4.5#
#.G.E.# #.6.7.#
####### #######
Each unit begins its turn by identifying all possible targets (enemy units). If no targets remain, combat ends.
Then, the unit identifies all of the open squares (.) that are in range of each target; these are the squares which are
adjacent (immediately up, down, left, or right) to any target and which aren't already occupied by a wall or another
unit. Alternatively, the unit might already be in range of a target. If the unit is not already in range of a target,
and there are no open squares which are in range of a target, the unit ends its turn.
If the unit is already in range of a target, it does not move, but continues its turn with an attack. Otherwise, since
it is not in range of a target, it moves.
To move, the unit first considers the squares that are in range and determines which of those squares it could reach in
the fewest steps. A step is a single movement to any adjacent (immediately up, down, left, or right) open (.) square.
Units cannot move into walls or other units. The unit does this while considering the current positions of units and
does not do any prediction about where units will be later. If the unit cannot reach (find an open path to) any of the
squares that are in range, it ends its turn. If multiple squares are in range and tied for being reachable in the fewest
steps, the square which is first in reading order is chosen. For example:
Targets: In range: Reachable: Nearest: Chosen:
####### ####### ####### ####### #######
#E..G.# #E.?G?# #E.@G.# #E.!G.# #E.+G.#
#...#.# --> #.?.#?# --> #.@.#.# --> #.!.#.# --> #...#.#
#.G.#G# #?G?#G# #@G@#G# #!G.#G# #.G.#G#
####### ####### ####### ####### #######
In the above scenario, the Elf has three targets (the three Goblins):
* Each of the Goblins has open, adjacent squares which are in range (marked with a ? on the map).
* Of those squares, four are reachable (marked @); the other two (on the right) would require moving through a wall or
unit to reach.
* Three of these reachable squares are nearest, requiring the fewest steps (only 2) to reach (marked !).
* Of those, the square which is first in reading order is chosen (+).
The unit then takes a single step toward the chosen square along the shortest path to that square. If multiple steps
would put the unit equally closer to its destination, the unit chooses the step which is first in reading order. (This
requires knowing when there is more than one shortest path so that you can consider the first step of each such path.)
For example:
In range: Nearest: Chosen: Distance: Step:
####### ####### ####### ####### #######
#.E...# #.E...# #.E...# #4E212# #..E..#
#...?.# --> #...!.# --> #...+.# --> #32101# --> #.....#
#..?G?# #..!G.# #...G.# #432G2# #...G.#
####### ####### ####### ####### #######
The Elf sees three squares in range of a target (?), two of which are nearest (!), and so the first in reading order is
chosen (+). Under "Distance", each open square is marked with its distance from the destination square; the two squares
to which the Elf could move on this turn (down and to the right) are both equally good moves and would leave the Elf 2
steps from being in range of the Goblin. Because the step which is first in reading order is chosen, the Elf moves right
one square.
Here's a larger example of movement:
Initially:
#########
#G..G..G#
#.......#
#.......#
#G..E..G#
#.......#
#.......#
#G..G..G#
#########
After 1 round:
#########
#.G...G.#
#...G...#
#...E..G#
#.G.....#
#.......#
#G..G..G#
#.......#
#########
After 2 rounds:
#########
#..G.G..#
#...G...#
#.G.E.G.#
#.......#
#G..G..G#
#.......#
#.......#
#########
After 3 rounds:
#########
#.......#
#..GGG..#
#..GEG..#
#G..G...#
#......G#
#.......#
#.......#
#########
Once the Goblins and Elf reach the positions above, they all are either in range of a target or cannot find any square
in range of a target, and so none of the units can move until a unit dies.
After moving (or if the unit began its turn in range of a target), the unit attacks.
To attack, the unit first determines all of the targets that are in range of it by being immediately adjacent to it. If
there are no such targets, the unit ends its turn. Otherwise, the adjacent target with the fewest hit points is
selected; in a tie, the adjacent target with the fewest hit points which is first in reading order is selected.
The unit deals damage equal to its attack power to the selected target, reducing its hit points by that amount. If this
reduces its hit points to 0 or fewer, the selected target dies: its square becomes . and it takes no further turns.
Each unit, either Goblin or Elf, has 3 attack power and starts with 200 hit points.
For example, suppose the only Elf is about to attack:
HP: HP:
G.... 9 G.... 9
..G.. 4 ..G.. 4
..EG. 2 --> ..E..
..G.. 2 ..G.. 2
...G. 1 ...G. 1
The "HP" column shows the hit points of the Goblin to the left in the corresponding row. The Elf is in range of three
targets: the Goblin above it (with 4 hit points), the Goblin to its right (with 2 hit points), and the Goblin below it
(also with 2 hit points). Because three targets are in range, the ones with the lowest hit points are selected: the two
Goblins with 2 hit points each (one to the right of the Elf and one below the Elf). Of those, the Goblin first in
reading order (the one to the right of the Elf) is selected. The selected Goblin's hit points (2) are reduced by the
Elf's attack power (3), reducing its hit points to -1, killing it.
After attacking, the unit's turn ends. Regardless of how the unit's turn ends, the next unit in the round takes its
turn. If all units have taken turns in this round, the round ends, and a new round begins.
The Elves look quite outnumbered. You need to determine the outcome of the battle: the number of full rounds that were
completed (not counting the round in which combat ends) multiplied by the sum of the hit points of all remaining units
at the moment combat ends. (Combat only ends when a unit finds no targets during its turn.)
Below is an entire sample combat. Next to each map, each row's units' hit points are listed from left to right.
Initially:
#######
#.G...# G(200)
#...EG# E(200), G(200)
#.#.#G# G(200)
#..G#E# G(200), E(200)
#.....#
#######
After 1 round:
#######
#..G..# G(200)
#...EG# E(197), G(197)
#.#G#G# G(200), G(197)
#...#E# E(197)
#.....#
#######
After 2 rounds:
#######
#...G.# G(200)
#..GEG# G(200), E(188), G(194)
#.#.#G# G(194)
#...#E# E(194)
#.....#
#######
Combat ensues; eventually, the top Elf dies:
After 23 rounds:
#######
#...G.# G(200)
#..G.G# G(200), G(131)
#.#.#G# G(131)
#...#E# E(131)
#.....#
#######
After 24 rounds:
#######
#..G..# G(200)
#...G.# G(131)
#.#G#G# G(200), G(128)
#...#E# E(128)
#.....#
#######
After 25 rounds:
#######
#.G...# G(200)
#..G..# G(131)
#.#.#G# G(125)
#..G#E# G(200), E(125)
#.....#
#######
After 26 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(122)
#...#E# E(122)
#..G..# G(200)
#######
After 27 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(119)
#...#E# E(119)
#...G.# G(200)
#######
After 28 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(116)
#...#E# E(113)
#....G# G(200)
#######
More combat ensues; eventually, the bottom Elf dies:
After 47 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(59)
#...#.#
#....G# G(200)
#######
Before the 48th round can finish, the top-left Goblin finds that there are no targets remaining, and so combat ends. So,
the number of full rounds that were completed is 47, and the sum of the hit points of all remaining units is
200+131+59+200 = 590. From these, the outcome of the battle is 47 * 590 = 27730.
Here are a few example summarized combats:
####### #######
#G..#E# #...#E# E(200)
#E#E.E# #E#...# E(197)
#G.##.# --> #.E##.# E(185)
#...#E# #E..#E# E(200), E(200)
#...E.# #.....#
####### #######
Combat ends after 37 full rounds
Elves win with 982 total hit points left
Outcome: 37 * 982 = 36334
####### #######
#E..EG# #.E.E.# E(164), E(197)
#.#G.E# #.#E..# E(200)
#E.##E# --> #E.##.# E(98)
#G..#.# #.E.#.# E(200)
#..E#.# #...#.#
####### #######
Combat ends after 46 full rounds
Elves win with 859 total hit points left
Outcome: 46 * 859 = 39514
####### #######
#E.G#.# #G.G#.# G(200), G(98)
#.#G..# #.#G..# G(200)
#G.#.G# --> #..#..#
#G..#.# #...#G# G(95)
#...E.# #...G.# G(200)
####### #######
Combat ends after 35 full rounds
Goblins win with 793 total hit points left
Outcome: 35 * 793 = 27755
####### #######
#.E...# #.....#
#.#..G# #.#G..# G(200)
#.###.# --> #.###.#
#E#G#G# #.#.#.#
#...#G# #G.G#G# G(98), G(38), G(200)
####### #######
Combat ends after 54 full rounds
Goblins win with 536 total hit points left
Outcome: 54 * 536 = 28944
######### #########
#G......# #.G.....# G(137)
#.E.#...# #G.G#...# G(200), G(200)
#..##..G# #.G##...# G(200)
#...##..# --> #...##..#
#...#...# #.G.#...# G(200)
#.G...G.# #.......#
#.....G.# #.......#
######### #########
Combat ends after 20 full rounds
Goblins win with 937 total hit points left
Outcome: 20 * 937 = 18740
What is the outcome of the combat described in your puzzle input?
Your puzzle answer was 250594.
--- Part Two ---
According to your calculations, the Elves are going to lose badly. Surely, you won't mess up the timeline too much if
you give them just a little advanced technology, right?
You need to make sure the Elves not only win, but also suffer no losses: even the death of a single Elf is unacceptable.
However, you can't go too far: larger changes will be more likely to permanently alter spacetime.
So, you need to find the outcome of the battle in which the Elves have the lowest integer attack power (at least 4) that
allows them to win without a single death. The Goblins always have an attack power of 3.
In the first summarized example above, the lowest attack power the Elves need to win without losses is 15:
####### #######
#.G...# #..E..# E(158)
#...EG# #...E.# E(14)
#.#.#G# --> #.#.#.#
#..G#E# #...#.#
#.....# #.....#
####### #######
Combat ends after 29 full rounds
Elves win with 172 total hit points left
Outcome: 29 * 172 = 4988
In the second example above, the Elves need only 4 attack power:
####### #######
#E..EG# #.E.E.# E(200), E(23)
#.#G.E# #.#E..# E(200)
#E.##E# --> #E.##E# E(125), E(200)
#G..#.# #.E.#.# E(200)
#..E#.# #...#.#
####### #######
Combat ends after 33 full rounds
Elves win with 948 total hit points left
Outcome: 33 * 948 = 31284
In the third example above, the Elves need 15 attack power:
####### #######
#E.G#.# #.E.#.# E(8)
#.#G..# #.#E..# E(86)
#G.#.G# --> #..#..#
#G..#.# #...#.#
#...E.# #.....#
####### #######
Combat ends after 37 full rounds
Elves win with 94 total hit points left
Outcome: 37 * 94 = 3478
In the fourth example above, the Elves need 12 attack power:
####### #######
#.E...# #...E.# E(14)
#.#..G# #.#..E# E(152)
#.###.# --> #.###.#
#E#G#G# #.#.#.#
#...#G# #...#.#
####### #######
Combat ends after 39 full rounds
Elves win with 166 total hit points left
Outcome: 39 * 166 = 6474
In the last example above, the lone Elf needs 34 attack power:
######### #########
#G......# #.......#
#.E.#...# #.E.#...# E(38)
#..##..G# #..##...#
#...##..# --> #...##..#
#...#...# #...#...#
#.G...G.# #.......#
#.....G.# #.......#
######### #########
Combat ends after 30 full rounds
Elves win with 38 total hit points left
Outcome: 30 * 38 = 1140
After increasing the Elves' attack power until it is just barely enough for them to win without any Elves dying, what is
the outcome of the combat described in your puzzle input?
Your puzzle answer was 52133.
Both parts of this puzzle are complete! They provide two gold stars: **
"""
class ElfDied(Exception):
pass
class Board:
def __init__(self, units, walls):
self._units = set(units)
self.walls = set(walls)
self.rounds = 0
def __str__(self):
units = {unit.position: unit for unit in self.units}
all_positions = units.keys() | self.walls
maxx = max(all_positions, key=lambda p: p[0])[0] + 1
maxy = max(all_positions, key=lambda p: p[1])[1] + 1
result = ""
for y in range(maxy):
units_in_line = []
for x in range(maxx):
if (x, y) in self.walls:
result += "#"
elif (x, y) in units:
unit = units[(x, y)]
result += unit.type
units_in_line.append(f"{unit.type}({unit.hp})")
else:
result += "."
result += "\t" + ", ".join(units_in_line) + "\n"
return result.strip()
@property
def units(self):
return [unit for unit in self._units if unit.is_alive]
def tick(self):
units_in_order = sorted(self.units, key=sort_by_reading_order)
for unit in units_in_order:
if unit.is_alive:
unit.do_turn(self)
self.rounds += 1
@property
def remaining_hit_points(self):
return sum(unit.hp for unit in self.units)
@property
def _blocked_spaces(self):
return self.walls | {unit.position for unit in self.units}
@property
def elves(self):
return sorted(
(unit for unit in self.units if unit.type == "E"),
key=sort_by_reading_order)
@property
def goblins(self):
return sorted(
(unit for unit in self.units if unit.type == "G"),
key=sort_by_reading_order)
def find_closest_enemy(self, position, enemy_type):
enemies = [unit for unit in self.units if unit.type == enemy_type]
self._check_game_over(enemies)
attack_positions = {
(position[0], position[1] + 1),
(position[0], position[1] - 1),
(position[0] + 1, position[1]),
(position[0] - 1, position[1])
}
enemies_in_range = [enemy for enemy in enemies if enemy.position in attack_positions]
if len(enemies_in_range) > 0:
lowest_hp = min(enemies_in_range, key=lambda e: e.hp).hp
return sorted(
[enemy for enemy in enemies_in_range if enemy.hp == lowest_hp],
key=sort_by_reading_order)[0]
return None
def find_path_to_enemy(self, position, enemy_type):
stack = deque()
visited = set()
stack.append((position, []))
found_paths = []
while len(stack) > 0:
position, path = stack.popleft()
x, y = position
next_positions = sorted(
[(x, y - 1), (x, y + 1), (x + 1, y), (x - 1, y)],
key=sort_by_reading_order)
for next_position in next_positions:
if self.has_unit(next_position, enemy_type):
if len(found_paths) > 0:
shortest_path_so_far = len(min(found_paths, key=lambda p: len(p)))
else:
shortest_path_so_far = sys.maxsize
if len(path) <= shortest_path_so_far:
found_paths.append(path)
else:
break
for p in next_positions:
if p not in visited and self._is_open_space(p):
visited.add(p)
new_path = list(path)
new_path.append(p)
stack.append((p, new_path))
if len(found_paths) > 0:
paths_by_target = {p[-1]: p for p in found_paths}
last_step = sorted(paths_by_target.keys(), key=sort_by_reading_order)[0]
return paths_by_target[last_step]
def has_unit(self, position, unit_type):
for unit in self.units:
if unit.type == unit_type and unit.position == position:
return True
@staticmethod
def _check_game_over(enemies):
if len(enemies) == 0:
raise StopIteration
def _find_path(self, start, end):
queue = deque()
visited = set()
visited.add(start)
queue.append((*start, 0, []))
while queue:
x, y, distance, path = queue.popleft()
if (x, y) == end:
return path
possible_moves = sorted([
(x + 1, y),
(x - 1, y),
(x, y + 1),
(x, y - 1),
], key=sort_by_reading_order)
possible_moves = [
loc
for loc in possible_moves
if self._is_open_space(loc) and loc not in visited
]
for move in possible_moves:
visited.add(move)
new_path = list(path)
new_path.append(move)
queue.append((*move, distance + 1, new_path))
def _is_open_space(self, location):
return location not in self._blocked_spaces
def sort_by_reading_order(unit):
if hasattr(unit, "position"):
x, y = unit.position
else:
x, y = unit
return x + y * 1000
class Unit:
def __init__(self, type, position, power):
self.type = type
self.position = position
self.damage = power
self.hp = 200
self.enemy = "E" if type == "G" else "G"
@property
def is_alive(self):
return self.hp > 0
def do_turn(self, board: Board):
attacked = self._try_attack(board)
if not attacked:
path = board.find_path_to_enemy(self.position, self.enemy)
if path:
self.position = path[0]
self._try_attack(board)
def _try_attack(self, board: Board):
enemy = board.find_closest_enemy(self.position, self.enemy)
if enemy:
enemy.hp -= self.damage
return True
def __repr__(self):
return f"{self.type} {self.position} [{self.hp}]"
def parse(puzzle, elf_power=3):
walls = []
units = []
for y in range(len(puzzle)):
for x in range(len(puzzle[y])):
item = puzzle[y][x]
if item == "#":
walls.append((x, y))
elif item == "E":
units.append(Unit(item, (x, y), elf_power))
elif item == "G":
units.append(Unit(item, (x, y), 3))
return Board(units, walls)
def play_game2(puzzle):
elf_power = 4
while True:
try:
board = parse(puzzle, elf_power=elf_power)
return play_game1(board, allow_elf_death=False)
except ElfDied:
# print(f"Elf died after {board.rounds} with power {elf_power}")
elf_power += 1
def play_game1(board: Board, allow_elf_death=True):
starting_elves = len(board.elves)
while True:
try:
# print("AFTER " + str(board.rounds))
# print(str(board))
board.tick()
except StopIteration:
return board.rounds * board.remaining_hit_points
finally:
if not allow_elf_death and len(board.elves) < starting_elves:
raise ElfDied
if __name__ == "__main__":
with open("15_beverage_bandits.txt") as file:
puzzle = file.readlines()
board = parse(puzzle)
score = play_game1(board)
print(f"part 1: {score}")
print(f"part 2: {play_game2(puzzle)}")
|
StarcoderdataPython
|
1622519
|
<gh_stars>1-10
"""
The SQLAlchemy model definition.
revision history:
40 - Move image data to seperate table for speed
39 - Remove SQL insert functions, add dataset row to frequencyband table. Add image data.
38 - add varmetric table
37 - add forcedfits_count column to runningcatalog
36 - switch to SQLAlchemy schema initialisation
"""
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Index,\
Integer, SmallInteger, String, text, Sequence, LargeBinary
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION as Double
SCHEMA_VERSION = 40
Base = declarative_base()
metadata = Base.metadata
class Assocskyrgn(Base):
__tablename__ = 'assocskyrgn'
id = Column(Integer, primary_key=True)
runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False, index=True)
runcat = relationship('Runningcatalog',
backref=backref('assocskyrgns',
cascade="all,delete"))
skyrgn_id = Column('skyrgn', ForeignKey('skyregion.id'), nullable=False, index=True)
skyrgn = relationship('Skyregion')
distance_deg = Column(Double)
class Assocxtrsource(Base):
__tablename__ = 'assocxtrsource'
__table_args__ = (
Index('assocxtrsource_runcat_xtrsrc_key', 'runcat', 'xtrsrc',
unique=True),
)
id = Column(Integer, primary_key=True)
runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False)
runcat = relationship('Runningcatalog')
xtrsrc_id = Column('xtrsrc', ForeignKey('extractedsource.id'), index=True)
xtrsrc = relationship('Extractedsource')
type = Column(SmallInteger, nullable=False)
distance_arcsec = Column(Double)
r = Column(Double)
loglr = Column(Double)
v_int = Column(Double, nullable=False)
eta_int = Column(Double, nullable=False)
f_datapoints = Column(Integer, nullable=False)
class Config(Base):
__tablename__ = 'config'
__table_args__ = (
Index('config_dataset_section_key_key', 'dataset', 'section', 'key',
unique=True),
)
id = Column(Integer, primary_key=True)
dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False)
dataset = relationship('Dataset',
backref=backref('configs', cascade="all,delete"))
section = Column(String(100))
key = Column(String(100))
value = Column(String(500))
type = Column(String(5))
seq_dataset = Sequence('seq_dataset')
class Dataset(Base):
__tablename__ = 'dataset'
id = Column(Integer, seq_dataset, server_default=seq_dataset.next_value(), primary_key=True)
rerun = Column(Integer, nullable=False, server_default=text("0"))
type = Column(SmallInteger, nullable=False, server_default=text("1"))
process_start_ts = Column(DateTime, nullable=False)
process_end_ts = Column(DateTime)
detection_threshold = Column(Double)
analysis_threshold = Column(Double)
assoc_radius = Column(Double)
backsize_x = Column(SmallInteger)
backsize_y = Column(SmallInteger)
margin_width = Column(Double)
description = Column(String(100), nullable=False)
node = Column(SmallInteger, nullable=False, server_default=text("1"))
nodes = Column(SmallInteger, nullable=False, server_default=text("1"))
# extractedsource types
BLIND_FIT = 0
FORCED_FIT = 1
MONITORED_FIT = 2
class Extractedsource(Base):
__tablename__ = 'extractedsource'
id = Column(Integer, primary_key=True)
image_id = Column('image', ForeignKey('image.id'), nullable=False, index=True)
image = relationship('Image', backref=backref('extractedsources', cascade="all,delete"))
ff_runcat_id = Column('ff_runcat', ForeignKey('runningcatalog.id'))
ff_runcat = relationship('Runningcatalog', primaryjoin='Extractedsource.ff_runcat_id == Runningcatalog.id')
ff_monitor_id = Column('ff_monitor', ForeignKey('monitor.id'))
ff_monitor = relationship('Monitor')
zone = Column(Integer, nullable=False)
ra = Column(Double, nullable=False, index=True)
decl = Column(Double, nullable=False, index=True)
uncertainty_ew = Column(Double, nullable=False)
uncertainty_ns = Column(Double, nullable=False)
ra_err = Column(Double, nullable=False, index=True)
decl_err = Column(Double, nullable=False, index=True)
ra_fit_err = Column(Double, nullable=False)
decl_fit_err = Column(Double, nullable=False)
ew_sys_err = Column(Double, nullable=False)
ns_sys_err = Column(Double, nullable=False)
error_radius = Column(Double, nullable=False)
x = Column(Double, nullable=False, index=True)
y = Column(Double, nullable=False, index=True)
z = Column(Double, nullable=False, index=True)
racosdecl = Column(Double, nullable=False)
margin = Column(Boolean, nullable=False, server_default=text("false"))
det_sigma = Column(Double, nullable=False)
semimajor = Column(Double)
semiminor = Column(Double)
pa = Column(Double)
f_peak = Column(Double)
f_peak_err = Column(Double)
f_int = Column(Double)
f_int_err = Column(Double)
chisq = Column(Double)
reduced_chisq = Column(Double)
extract_type = Column(SmallInteger)
fit_type = Column(SmallInteger)
node = Column(SmallInteger, nullable=False, server_default=text("1"))
nodes = Column(SmallInteger, nullable=False, server_default=text("1"))
seq_frequencyband = Sequence('seq_frequencyband')
class Frequencyband(Base):
__tablename__ = 'frequencyband'
id = Column(Integer, seq_frequencyband, primary_key=True,
server_default=seq_frequencyband.next_value())
dataset_id = Column('dataset', Integer, ForeignKey('dataset.id'),
nullable=False, index=True)
dataset = relationship('Dataset', backref=backref('frequencybands', cascade="all,delete"))
freq_central = Column(Double)
freq_low = Column(Double)
freq_high = Column(Double)
seq_image = Sequence('seq_image')
class Image(Base):
__tablename__ = 'image'
id = Column(Integer, seq_image, primary_key=True,
server_default=seq_image.next_value())
dataset_id = Column('dataset', Integer, ForeignKey('dataset.id'), nullable=False, index=True)
dataset = relationship('Dataset', backref=backref('images', cascade="delete"))
band_id = Column('band', ForeignKey('frequencyband.id'), nullable=False, index=True)
band = relationship('Frequencyband', cascade="delete")
skyrgn_id = Column('skyrgn', Integer, ForeignKey('skyregion.id'), nullable=False, index=True)
skyrgn = relationship('Skyregion', backref=backref('images', cascade="delete"))
tau = Column(Integer)
stokes = Column(SmallInteger, nullable=False, server_default=text("1"))
tau_time = Column(Double)
freq_eff = Column(Double, nullable=False)
freq_bw = Column(Double)
taustart_ts = Column(DateTime, nullable=False, index=True)
rb_smaj = Column(Double, nullable=False)
rb_smin = Column(Double, nullable=False)
rb_pa = Column(Double, nullable=False)
deltax = Column(Double, nullable=False)
deltay = Column(Double, nullable=False)
fwhm_arcsec = Column(Double)
fov_degrees = Column(Double)
rms_qc = Column(Double, nullable=False)
rms_min = Column(Double)
rms_max = Column(Double)
detection_thresh = Column(Double)
analysis_thresh = Column(Double)
url = Column(String(1024))
node = Column(SmallInteger, nullable=False, server_default=text("1"))
nodes = Column(SmallInteger, nullable=False, server_default=text("1"))
data = relationship("ImageData", uselist=False, back_populates="image")
class ImageData(Base):
__tablename__ = 'imagedata'
id = Column(Integer, primary_key=True)
image_id = Column('image', Integer, ForeignKey('image.id'), nullable=False, index=True)
image = relationship("Image", back_populates="data")
fits_header = Column(String)
fits_data = Column(LargeBinary)
class Monitor(Base):
__tablename__ = 'monitor'
id = Column(Integer, primary_key=True)
dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False, index=True)
dataset = relationship('Dataset')
runcat_id = Column('runcat', ForeignKey('runningcatalog.id'))
runcat = relationship('Runningcatalog')
ra = Column(Double, nullable=False)
decl = Column(Double, nullable=False)
name = Column(String(100))
class Newsource(Base):
__tablename__ = 'newsource'
id = Column(Integer, primary_key=True)
runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False, index=True)
runcat = relationship('Runningcatalog', backref=backref("newsources",
cascade="all,delete"))
trigger_xtrsrc_id = Column('trigger_xtrsrc', ForeignKey('extractedsource.id'), nullable=False, index=True)
trigger_xtrsrc = relationship('Extractedsource')
previous_limits_image_id = Column('previous_limits_image', ForeignKey('image.id'), nullable=False)
previous_limits_image = relationship('Image')
newsource_type = Column(SmallInteger, nullable=False)
class Node(Base):
__tablename__ = 'node'
__table_args__ = (
Index('node_node_zone_key', 'node', 'zone', unique=True),
)
id = Column(Integer, primary_key=True)
node = Column(SmallInteger, nullable=False, server_default=text("1"))
zone = Column(SmallInteger, nullable=False)
zone_min = Column(SmallInteger)
zone_max = Column(SmallInteger)
zone_min_incl = Column(Boolean, server_default=text("true"))
zone_max_incl = Column(Boolean, server_default=text("false"))
zoneheight = Column(Double, server_default=text("1.0"))
nodes = Column(SmallInteger, nullable=False, server_default=text("1"))
class Rejection(Base):
__tablename__ = 'rejection'
id = Column(Integer, primary_key=True)
image_id = Column('image', ForeignKey('image.id'), index=True)
image = relationship('Image')
# TO DO: Rename this column to 'rejectreason_id',
# (rather than just 'rejectreason') so the model attribute matches
# the SQL column name, avoiding the current confusing name-shadowing
# between the SQL columns and the model attributes. (Issue #508)
rejectreason_id = Column('rejectreason', ForeignKey('rejectreason.id'), index=True)
rejectreason = relationship('Rejectreason')
comment = Column(String(512))
class Rejectreason(Base):
__tablename__ = 'rejectreason'
id = Column(Integer, primary_key=True)
description = Column(String(512))
class Runningcatalog(Base):
__tablename__ = 'runningcatalog'
id = Column(Integer, primary_key=True)
xtrsrc_id = Column('xtrsrc', ForeignKey('extractedsource.id'), nullable=False, unique=True)
xtrsrc = relationship('Extractedsource',
primaryjoin='Runningcatalog.xtrsrc_id == Extractedsource.id',
backref=backref('extractedsources', cascade="all,delete"))
dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False, index=True)
dataset = relationship('Dataset')
datapoints = Column(Integer, nullable=False)
zone = Column(Integer, nullable=False, index=True)
wm_ra = Column(Double, nullable=False, index=True)
wm_decl = Column(Double, nullable=False, index=True)
wm_uncertainty_ew = Column(Double, nullable=False, index=True)
wm_uncertainty_ns = Column(Double, nullable=False, index=True)
avg_ra_err = Column(Double, nullable=False)
avg_decl_err = Column(Double, nullable=False)
avg_wra = Column(Double, nullable=False)
avg_wdecl = Column(Double, nullable=False)
avg_weight_ra = Column(Double, nullable=False)
avg_weight_decl = Column(Double, nullable=False)
x = Column(Double, nullable=False, index=True)
y = Column(Double, nullable=False, index=True)
z = Column(Double, nullable=False, index=True)
inactive = Column(Boolean, nullable=False, server_default=text("false"))
mon_src = Column(Boolean, nullable=False, server_default=text("false"))
forcedfits_count = Column(Integer, server_default=text("0"))
extractedsources = relationship('Extractedsource',
secondary='assocxtrsource',
backref='runningcatalogs')
varmetric = relationship("Varmetric", uselist=False, backref="runcat",
cascade="all,delete")
class Varmetric(Base):
__tablename__ = 'varmetric'
id = Column(Integer, primary_key=True)
runcat_id = Column('runcat', ForeignKey('runningcatalog.id'),
nullable=False, index=True, unique=True)
v_int = Column(Double, index=True)
eta_int = Column(Double)
band_id = Column('band', ForeignKey('frequencyband.id'), nullable=False,
index=True)
band = relationship('Frequencyband', cascade="delete")
newsource = Column(Integer)
sigma_rms_max = Column(Double, index=True)
sigma_rms_min = Column(Double, index=True)
lightcurve_max = Column(Double, index=True)
lightcurve_avg = Column(Double, index=True)
lightcurve_median = Column(Double, index=True)
class RunningcatalogFlux(Base):
__tablename__ = 'runningcatalog_flux'
__table_args__ = (
Index('runningcatalog_flux_runcat_band_stokes_key', 'runcat', 'band',
'stokes', unique=True),
)
id = Column(Integer, primary_key=True)
runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False)
runcat = relationship('Runningcatalog',
backref=backref('runningcatalogfluxs',
cascade="all,delete"))
band_id = Column('band', ForeignKey('frequencyband.id'), nullable=False, index=True)
band = relationship('Frequencyband', cascade="delete")
stokes = Column(SmallInteger, nullable=False, server_default=text("1"))
f_datapoints = Column(Integer, nullable=False)
avg_f_peak = Column(Double)
avg_f_peak_sq = Column(Double)
avg_f_peak_weight = Column(Double)
avg_weighted_f_peak = Column(Double)
avg_weighted_f_peak_sq = Column(Double)
avg_f_int = Column(Double)
avg_f_int_sq = Column(Double)
avg_f_int_weight = Column(Double)
avg_weighted_f_int = Column(Double)
avg_weighted_f_int_sq = Column(Double)
seq_skyregion = Sequence('seq_skyregion')
class Skyregion(Base):
__tablename__ = 'skyregion'
id = Column(Integer, seq_skyregion, primary_key=True,
server_default=seq_skyregion.next_value())
dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False, index=True)
dataset = relationship('Dataset',
backref=backref('skyregions',cascade="all,delete"))
centre_ra = Column(Double, nullable=False)
centre_decl = Column(Double, nullable=False)
xtr_radius = Column(Double, nullable=False)
x = Column(Double, nullable=False)
y = Column(Double, nullable=False)
z = Column(Double, nullable=False)
class Temprunningcatalog(Base):
__tablename__ = 'temprunningcatalog'
id = Column(Integer, primary_key=True)
runcat_id = Column('runcat', ForeignKey('runningcatalog.id'), nullable=False, index=True)
runcat = relationship('Runningcatalog')
xtrsrc_id = Column('xtrsrc', ForeignKey('extractedsource.id'), nullable=False, index=True)
xtrsrc = relationship('Extractedsource')
dataset_id = Column('dataset', ForeignKey('dataset.id'), nullable=False, index=True)
dataset = relationship('Dataset')
band_id = Column('band', ForeignKey('frequencyband.id'), nullable=False, index=True)
band = relationship('Frequencyband', cascade="delete")
distance_arcsec = Column(Double, nullable=False)
r = Column(Double, nullable=False)
stokes = Column(SmallInteger, nullable=False, server_default=text("1"))
datapoints = Column(Integer, nullable=False)
zone = Column(Integer, nullable=False)
wm_ra = Column(Double, nullable=False)
wm_decl = Column(Double, nullable=False)
wm_uncertainty_ew = Column(Double, nullable=False)
wm_uncertainty_ns = Column(Double, nullable=False)
avg_ra_err = Column(Double, nullable=False)
avg_decl_err = Column(Double, nullable=False)
avg_wra = Column(Double, nullable=False)
avg_wdecl = Column(Double, nullable=False)
avg_weight_ra = Column(Double, nullable=False)
avg_weight_decl = Column(Double, nullable=False)
x = Column(Double, nullable=False)
y = Column(Double, nullable=False)
z = Column(Double, nullable=False)
margin = Column(Boolean, nullable=False, server_default=text("false"))
inactive = Column(Boolean, nullable=False, server_default=text("false"))
beam_semimaj = Column(Double)
beam_semimin = Column(Double)
beam_pa = Column(Double)
f_datapoints = Column(Integer)
avg_f_peak = Column(Double)
avg_f_peak_sq = Column(Double)
avg_f_peak_weight = Column(Double)
avg_weighted_f_peak = Column(Double)
avg_weighted_f_peak_sq = Column(Double)
avg_f_int = Column(Double)
avg_f_int_sq = Column(Double)
avg_f_int_weight = Column(Double)
avg_weighted_f_int = Column(Double)
avg_weighted_f_int_sq = Column(Double)
class Version(Base):
__tablename__ = 'version'
name = Column(String(12), primary_key=True)
value = Column(Integer, nullable=False)
|
StarcoderdataPython
|
3386146
|
parse_debug = False
record = False
analyzing = False
tst_non_object = True
tst_minimal = True
tst_space = True
tst_some_args = True
# # FIXME: comment or remove before commit
# from util import tst_disable_all
# tst_disable_all()
# record = True
#------------------------------------------------------------------------------
import sys
import os
import re
from contextlib import redirect_stdout
import unittest
from arpeggio import ParserPython, NonTerminal, Terminal, flatten
from arpeggio import Sequence, OrderedChoice, ZeroOrMore, OneOrMore, EOF
from arpeggio import RegExMatch as _
#------------------------------------------------------------------------------
from prettyprinter import cpprint as pp, pprint as pp_plain
from docopt_parser.parsetreenodes import nodes_equal
from p import pp_str
#------------------------------------------------------------------------------
from grammar.python.common import ws, newline, COMMA, BAR
from grammar.python.operand import operand, operand_all_caps, operand_angled
from grammar.python.option import *
from grammar.python.optdesc.list import *
from grammar.python.optdesc.line import *
from grammar.python.optdesc.section import *
from docopt_parser import DocOptListViewVisitor
from .optsect import tprint, document, body, element
from .optsect import create_terms
from .optsect import expect_document
from .optsect import section_optdesc
#------------------------------------------------------------------------------
# Refactoring to use *Def object specifications, isolated in *_obj functions :
from .optsect import section_optdesc_obj
# Shortcuts for creating specification objects
from .optlist import OptionDef as opt, OptionListDef as olst
from .optline import OptionLineDef as ol
from .optsect import OptionDescDef as od
from base import Test_Base
from util import tprint, write_scratch
#------------------------------------------------------------------------------
class Test_Usage_Section ( Test_Base ) :
def setUp(self):
# first get defaults, should all be False for boolean flags
super().setUp()
global parse_debug, record, analyzing
self.parse_debug = parse_debug
self.record = record
self.analyzing = analyzing
# quiet, no parse trees displayeda
# self.debug = False
# show parse tree for pass >= self.debug
# self.debug = 2
# Show text being parsed
# self.show = True
# and again, to apply behavior per altered settings
super().setUp()
self.grammar = document
self.parser = ParserPython ( language_def = self.grammar,
reduce_tree = False,
debug = self.parse_debug, )
if self.record :
write_scratch ( _clean = True )
#--------------------------------------------------------------------------
@unittest.skipUnless(tst_non_object, "Non-object tests not enabled")
def test_minimal(self):
ol_line_specs = [
( ( ( '-h', ), ( '--help', ) ), "Show this usage information." ),
( ( ( '-v', ), ( '--version', ) ), "Print the version and exit." ),
]
( text, opt_desc ) = section_optdesc ( ol_line_specs )
expect = expect_document ( [ opt_desc ] )
# tprint("[expect]") ; pp(expect)
# with open ("expect.txt", 'w') as f :
# pp_plain(expect, stream=f)
self.parse_and_verify ( text, expect )
#--------------------------------------------------------------------------
@unittest.skipUnless(tst_minimal, "Minimal tests not enabled")
def test_minimal_obj__step_by_step (self):
olst_1 = olst ( opt( '-h', ), opt( '--help', ) )
oline_1 = ol ( olst_1, "Show this usage information." )
olst_2 = olst ( opt( '-v', ), opt( '--version', ) )
oline_2 = ol ( olst_2, "Print the version and exit." )
optspecs = od ( oline_1, oline_2, intro="Options :" )
# print("[optspecs]") ; pp(optspecs)
( text, opt_desc ) = section_optdesc_obj ( optspecs )
# print(f"[test] text :\n{text}\n")
# print(f"[test] opt_desc :\n{pp_str(opt_desc)}\n")
expect = expect_document ( [ opt_desc ] )
# tprint("[expect]") ; pp(expect)
# with open ("expect.txt", 'w') as f :
# pp_plain(expect, stream=f)
self.parse_and_verify ( text, expect )
#--------------------------------------------------------------------------
@unittest.skipUnless(tst_minimal, "Minimal tests not enabled")
def test_minimal_obj__single_spec (self):
optspecs = od (
ol ( olst ( opt( '-h', ), opt( '--help', ) ) ,
"Show this usage information." ) ,
ol ( olst ( opt( '-v', ), opt( '--version', ) ) ,
"Print the version and exit." ) ,
intro="Options :" )
# print("[optspecs]") ; pp(optspecs)
( text, opt_desc ) = section_optdesc_obj ( optspecs )
expect = expect_document ( [ opt_desc ] )
self.parse_and_verify ( text, expect )
#--------------------------------------------------------------------------
# Three bugs for the price of one !
#
# In the option-list, operands may be either part of an option or a term.
#
# opt( '--file', '=', '<file>') :
# <file> is part of the term long_eq_arg of '--file'.
#
# opt( '--query', ' ', '<query>') :
# <query> is a term itself, separate from long_no_arg '--query'.
#
# 1. optline was inserting a comma rather than a space gap before
# operands.
# 2. All optline/optlist term except handlers assumed all terms were
# options and wrapped them in an option() Nonterminal. This wrapping
# neeed to be pushed down into the term__{long,short}_* functions.
#
# 3. Unexpected space gap between option-list and newline when no help
# provided. Due to offset, without help, it must be 0.
#
# Inner node focused analysis is left as an example. Smaller parse trees
# are much easier to digest quickly.
#
@unittest.skipUnless(tst_space, "Space tests not enabled")
def test_space_arg (self):
optspecs = od (
ol ( olst ( opt( '--query', ' ', '<query>' ) ) ) ,
# ol ( olst ( opt( '-f', ) ) ) ,
# ol ( olst ( opt( '-f', ) , opt( '--file', '=', '<file>' ) ) ,
# "File to load." ) ,
# ol ( olst ( opt( '-x', '', 'FILE' ), opt( '--extract', ) ) ,
# "Extract file." ) ,
ol ( olst ( opt( '-x', '', 'FILE' ) ) ) ,
intro="Options :", offset=18 )
# print("[optspecs]") ; pp(optspecs)
( text, opt_desc ) = section_optdesc_obj ( optspecs )
write_scratch ( optdesc = opt_desc )
expect = expect_document ( [ opt_desc ] )
parsed = self.parse ( text, expect )
# improper comma separator for operand bug
if False :
expect = expect[0][0][0][0][1][1][0]
parsed = parsed[0][0][0][0][1][1][0]
# improper 'option' wrapper for 'operand'
if False :
expect = expect[0][0][0][0][1][1][1]
parsed = parsed[0][0][0][0][1][1][1]
# ...
if True :
expect = expect[0][0][0][1]
parsed = parsed[0][0][0][1]
write_scratch ( expect=expect, parsed=parsed )
self.verify ( text, expect, parsed )
#--------------------------------------------------------------------------
@unittest.skipUnless(tst_some_args, "Some args tests not enabled")
def test_some_args (self): # '--query <query>', expect includes comma
optspecs = od (
ol ( olst ( opt( '-f', ), opt( '--file', '=', '<file>' ) ) ,
"File to load." ) ,
ol ( olst ( opt( '-x', '', 'EXTRACT' ), opt( '--extract', ) ) ,
"Extract file." ) ,
ol ( olst ( opt( '-m', ' ', 'MEMBER' ), opt( '--member', ) ) ,
"member ..." ) ,
ol ( olst ( opt( '-q', ), opt( '--query', ' ', '<query>' ) ) ,
"Query ..." ) ,
ol ( olst ( opt( '-h', ), opt( '--help', ) ) ,
"Show this usage information." ) ,
ol ( olst ( opt( '-v', ), opt( '--version', ) ) ,
"Print the version and exit." ) ,
intro="Options :", offset=18 )
# print("[optspecs]") ; pp(optspecs)
( text, opt_desc ) = section_optdesc_obj ( optspecs )
expect = expect_document ( [ opt_desc ] )
self.parse_and_verify(text, expect)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
#------------------------------------------------------------------------------
|
StarcoderdataPython
|
3241084
|
"""Match functions included as standard in the core."""
from ..ext.match_fun import MatchFun
from ..ext.register import register
@register()
class ReCoreMatchFun(MatchFun):
"""Integer match."""
name = 're'
def __init__(self, regex):
"""Initialise re (regex) match function."""
self.regex = regex
@register()
class FixedWidthColumnCoreMatchFun(MatchFun):
"""Integer match."""
name = 'fixedwidth'
def __init__(self, width, strip=True):
"""Initialise re (regex) match function."""
self.regex = '.{' + str(width) + '}'
self._strip = strip
def post_proc(self, string):
"""
Strip white space from both ends of the result (by default).
This makes sense for a fixed width column field is normally there would be white space.
"""
return super().post_proc(string.lstrip().rstrip() if self._strip else string)
|
StarcoderdataPython
|
1768104
|
### Functions used during the research project that culminated in SBMClone ###
# Author: <NAME>
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score, confusion_matrix
import graph_tool.inference.minimize as gt_min
import scipy
import numpy as np
from datetime import datetime
from util import *
from simulation import generate_toy_design, merge_perfect
########## Functions to test SBMClone ##########
def test_graphtool_recursive(M, min_blocks = 4, max_blocks = 5, nested = True):
"""
Given biadjacency matrix M, infers a stochastic block model fitting M with at most 5 blocks (first split),
then divides the cells into sets accordingly and infers an SBM with at most 5 blocks from each induced submatrix.
ARI is computed with reference to the 4085-cell tree-structured block matrix.
"""
results = []
row_values = [129, 1823, 72, 1686, 141, 39, 88, 107]
true_labels = [[i] * row_values[i] for i in range(len(row_values))]
true_labels = np.concatenate(true_labels)
first_labels = [0] * 1952 + [1] * 2133
first_ari, (cellpart, snvpart) = test_graphtool(M, first_labels, min_blocks, max_blocks, nested)
print("First split ARI: {}".format(first_ari))
inf_labels = np.array(cellpart)
cell_sets = {i:np.argwhere(inf_labels == i).flatten() for i in range(max_blocks + 1)}
print([len(a) for a in cell_sets.values()])
results.append((true_labels, first_ari, cellpart, snvpart))
# break M into submatrices by rows using cell clusters
# For each submatrix, if the submatrix meets some condition,
for cset in cell_sets.values():
if len(cset) > 1:
myM = M[cset]
my_labels = [true_labels[a] for a in cset]
next_ari, (cellpart, snvpart) = test_graphtool(myM, my_labels, min_blocks, max_blocks, nested)
print("Recursive split ARI: {} (on {} cells)".format(next_ari, len(cset)))
results.append((my_labels, next_ari, cellpart, snvpart))
return results
def test_graphtool(M, true_labels, min_blocks = 4, max_blocks = None, nested = True):
"""
Apply the SBM inference functions from graph_tool to the given biadjacency matrix M.
Returns the ARI between the inferred partition and the partition [true_part[0], true_part[1:]]
"""
assert min_blocks is None or isinstance(min_blocks, int)
assert max_blocks is None or isinstance(max_blocks, int)
m, n = M.shape
G, label2id, vtype = construct_graph_graphtool(M)
if nested:
r = gt_min.minimize_nested_blockmodel_dl(G, B_min = min_blocks, B_max = max_blocks, state_args = {'clabel': vtype})
b = r.get_bs()[0]
else:
r = gt_min.minimize_blockmodel_dl(G, B_min = min_blocks, B_max = max_blocks, state_args = {'clabel': vtype})
b = r.get_blocks()
id2label = reverse_dict(label2id)
inf_labels = [b[label2id['cell{}'.format(i)]] if 'cell{}'.format(i) in label2id else 100 for i in range(m)]
return adjusted_rand_score(inf_labels, true_labels)
def test_graphtool_oneshot(M, true_labels = None, min_blocks = 4, max_blocks = None, nested = True):
"""
Apply the SBM inference functions from graph_tool to the given biadjacency matrix M.
If true_labels is not given, assumes M is the empirical tree structure and uses the ground truth labeling of the 4085 cells.
Returns the ARI between the inferred partition and the partition given by true_lables, as well as the inferred partition.
"""
m, n = M.shape
G, label2id, vtype = construct_graph_graphtool(M)
if nested:
r = gt_min.minimize_nested_blockmodel_dl(G, B_min = min_blocks, B_max = max_blocks, state_args = {'clabel': vtype})
b = r.get_bs()[0]
else:
r = gt_min.minimize_blockmodel_dl(G, B_min = min_blocks, B_max = max_blocks, state_args = {'clabel': vtype})
b = r.get_blocks()
if true_labels is None:
row_values = [129, 1823, 72, 1686, 141, 39, 88, 107]
true_labels = [[i] * row_values[i] for i in range(len(row_values))]
true_labels = np.concatenate(true_labels)
cell_part, mut_part = blockmodel_to_labels(b, label2id, n_cells = m, n_muts = n)
inf_labels = cell_part
return adjusted_rand_score(true_labels, inf_labels), cell_part, mut_part
########## Functions to test naive approaches #########
def test_naive_rows_kmeans(M, true_part, k = 2):
"""
Naive k-means approach used in the paper.
"""
# Compute the sum for each row to use as a 1-dimensional representation of the row
rowsums = np.sum(M, axis=1)
row_idx_pairs = sorted(zip(rowsums, range(len(rowsums))), key=lambda x: x[0], reverse = True)
row_idx = [x[1] for x in row_idx_pairs]
# Cluster rows using kmeans
pred_labels = KMeans(n_clusters = k, random_state = 0).fit_predict(rowsums.reshape(-1, 1))
if len(true_part) == 2:
true_labels = [(0 if i in true_part[0] else 1) for i in range(len(true_part[0]) + len(true_part[1]))]
else:
true_labels = true_part
score = adjusted_rand_score(true_labels, pred_labels)
return score
def test_naive_cols_kmeans(M, true_part):
"""
Column/mutation-based k-means approach. Clusters columns/mutations into 3 sets using k-means on their sums, calls the set with largest sum clonal, and matches each row to the set of columns that it has the most 1-entries in.
"""
# Compute column sums
colsums = np.sum(M, axis=0)
# Cluster columns using kmeans
col_labels = KMeans(n_clusters = 3, random_state = 0).fit_predict(colsums.reshape(-1, 1))
# Call the section with the largest average column sum clonal
idx0 = []
idx1 = []
idx2 = []
for idx in range(len(col_labels)):
label = col_labels[idx]
if label == 0:
idx0.append(idx)
elif label == 1:
idx1.append(idx)
elif label == 2:
idx2.append(idx)
else:
print("uhoh")
colsums = colsums.reshape(-1, 1)
mean0 = np.mean(colsums[idx0])
mean1 = np.mean(colsums[idx1])
mean2 = np.mean(colsums[idx2])
if mean0 > mean1:
if mean0 > mean2:
clonal_idx = idx0
subclone1_idx = idx1
subclone2_idx = idx2
else: # sum2 >= mean0 > mean1
clonal_idx = idx2
subclone1_idx = idx0
subclone2_idx = idx1
else:
if mean2 > mean1: # mean2 > mean1 >= mean0
clonal_idx = idx2
subclone1_idx = idx0
subclone2_idx = idx1
else: # mean1 >= mean2, mean1 >= mean0
clonal_idx = idx1
subclone1_idx = idx0
subclone2_idx = idx2
# Score each cell by the number of mutations
partition_vector = np.zeros(M.shape[1])
partition_vector[subclone1_idx] = 1
partition_vector[subclone2_idx] = -1
partition_vector = partition_vector.reshape(-1, 1)
M = M.toarray()
partition_vals = np.matmul(M, partition_vector)
pred_labels = [(partition_vals[i] >= 0)[0] for i in range(M.shape[0])]
true_labels = [(0 if i in true_part[0] else 1) for i in range(len(true_part[0]) + len(true_part[1]))]
score = adjusted_rand_score(true_labels, pred_labels)
return score
def test_naive_rows_cutpoint(M, true_part, oracle=False):
"""
Finds a bipartition of the rows using their sums and a cutpoint (all rows with sum > cutpoint are in one set, all rows with sum <= cutpoint are in the other).
Sort the rows of the matrix by their sum and either:
Oracle=False: choose the mean of the sums as the cutpoint
Oracle=True: choose the cutpoint that maximizes ARI with the true_partition
"""
m, n = M.shape
# We sort the row sums, and use this order to compute the sets
rowsums = np.sum(M, axis=1)
row_idx_pairs = sorted(zip(rowsums, range(len(rowsums))), key=lambda x: x[0], reverse = True)
row_idx = [x[1] for x in row_idx_pairs]
cell_to_idx = {'cell%d' % c:c for c in range(M.shape[0])}
true_labels = [(0 if i in true_part[0] else 1) for i in range(len(true_part[0]) + len(true_part[1]))]
if oracle:
# March down the rows trying to split the rows into 2 sets
pred_labels = np.zeros(len(row_idx))
best = None
best_score = -1
for i in range(m):
# move cell index i from A to Abar
pred_labels[row_idx[i]] = 1
ari = adjusted_rand_score(true_labels, pred_labels)
if ari > best_score:
best_score = ari
best = pred_labels
score = best_score
else:
# Use the mean of the row sums as a threshold
cutpoint = np.mean(rowsums)
A = set()
i = 0
ordered_rowsums = [int(a[0]) for a in row_idx_pairs]
while ordered_rowsums[i] > cutpoint:
A.add('cell%d' % row_idx_pairs[i][1])
i += 1
Abar = set()
for j in range(i, M.shape[0]):
Abar.add('cell%d' % row_idx_pairs[j][1])
pred_labels = sets_to_clusters((A, Abar, set()), reverse_dict(cell_to_idx))
score = adjusted_rand_score(true_labels, pred_labels)
return score
def kmeans_remove_clonal(M):
"""
Use k-means to remove mutations that appear to be "clonal".
Clusters columns by their sum (each column is a point in 1D), and removes all columns in the cluster with the largest mean.
"""
# Cluster column sums using kmeans
colsums = np.sum(M, axis=0)
col_labels = KMeans(n_clusters = 3, random_state = 0).fit_predict(colsums.reshape(-1, 1))
# Call the section with the largest average column sum clonal
idx0 = []
idx1 = []
idx2 = []
for idx in range(len(col_labels)):
label = col_labels[idx]
if label == 0:
idx0.append(idx)
elif label == 1:
idx1.append(idx)
elif label == 2:
idx2.append(idx)
else:
print("uhoh")
colsums = colsums.reshape(-1, 1)
mean0 = np.mean(colsums[idx0])
mean1 = np.mean(colsums[idx1])
mean2 = np.mean(colsums[idx2])
if mean0 > mean1:
if mean0 > mean2:
clonal_idx = idx0
subclone1_idx = idx1
subclone2_idx = idx2
else: # sum2 >= mean0 > mean1
clonal_idx = idx2
subclone1_idx = idx0
subclone2_idx = idx1
else:
if mean2 > mean1: # mean2 > mean1 >= mean0
clonal_idx = idx2
subclone1_idx = idx0
subclone2_idx = idx1
else: # mean1 >= mean2, mean1 >= mean0
clonal_idx = idx1
subclone1_idx = idx0
subclone2_idx = idx2
return M[:, sorted(list(subclone1_idx) + list(subclone2_idx))]
########## Functions to test regularized spectral clustering proposed by Zhou & Amini (2019) ##########
def adjacency_regularization(A, tau = 3, norm = 'L1'):
# This function implements Algorithm 1 from Zhou and Amini Feb. 2019 paper in JMLR
### Notation correspondence to Algorithm 1 pseudocode ###
# rdegrees, cdegrees - D_1...D_n
# rdegrees_sorted, cdegrees_sorted - D_(1) ... D_(n_1)
# Dbar1/Dbar2 - \overbar{D}
# alpha1/alpha2 - \alpha
# threshold1/threshold2 - \hat{d}_1, \hat{d}_2
# idx1/idx2 - \hat{\mathcal{I}}_1, \hat{\mathcal{I}}_2
assert tau > 0
assert norm == 'L1' or norm == 'L2'
m, n = A.shape
rdegrees = np.array(np.sum(A, axis=1)).flatten()
rdegrees_sorted = np.sort(rdegrees)[::-1]
Dbar1 = np.mean(rdegrees)
alpha1 = min(int(np.floor(float(m) / Dbar1)), len(rdegrees) - 1)
threshold1 = tau * rdegrees_sorted[alpha1]
idx1 = [i for i in range(m) if rdegrees[i] >= threshold1]
cdegrees = np.array(np.sum(A, axis=0)).flatten()
cdegrees_sorted = np.sort(cdegrees)[::-1]
Dbar2 = np.mean(cdegrees)
alpha2 = min(int(np.floor(float(n) / Dbar2)), len(cdegrees) - 1)
threshold2 = tau * cdegrees_sorted[alpha2]
idx2 = [i for i in range(n) if cdegrees[1] >= threshold2]
Are = A.copy().todense()
# Scale only those elements in rows/columns with degree above thresholds
if len(idx1) == 0:
print("No row-wise regularization")
else:
if norm == 'L1':
Are[idx1] = np.diag(float(threshold1) / rdegrees[idx1]) * A[idx1]
else:
Are[idx1] = np.diag(np.sqrt(float(threshold1) / rdegrees[idx1])) * A[idx1]
if len(idx2) == 0:
print("No column-wise regularization")
else:
if norm == 'L1':
Are[:, idx2] = A[:, idx2] * np.diag(float(threshold2) / cdegrees[idx2])
else:
Are[:, idx2] = A[:, idx2] * np.diag(np.sqrt(float(threshold2) / cdegrees[idx2]))
return Are
def SC_RRE(A, tau = 1.5, norm = 'L2', n_vectors = None, rows_k = 2, cols_k = 3, classify_cols = False):
# This function implements Algorithm 4 from Zhou and Amini Feb. 2019 paper in JMLR
# Variables correspond with notation in the paper where possible
m, n = A.shape
if n_vectors is None:
n_vectors = min(rows_k, cols_k)
Are = adjacency_regularization(A, tau = tau, norm = norm)
Are = scipy.sparse.lil_matrix(Are)
Z1, s, Z2T = scipy.sparse.linalg.svds(Are, k=n_vectors)
s = np.diag(s)
Z2 = Z2T.transpose()
row_vals = np.matmul(Z1, s)
col_vals = np.matmul(Z2, s)
dim2 = n_vectors
# Cluster and partition rows
Z1 = np.matmul(Z1, s).reshape(-1, dim2)
print(Z1.shape, s.shape, A.shape)
x_model = KMeans(n_clusters = rows_k, random_state = 0).fit(Z1)
x_clusters = x_model.predict(Z1)
x_sets = {i:set() for i in range(rows_k)}
[x_sets[x_clusters[i]].add(i) for i in range(m)]
if not classify_cols:
return x_sets
else:
# Cluster and partition columns
Z2 = np.matmul(Z2, s).reshape(-1, dim2)
y_model = KMeans(n_clusters = cols_k, random_state = 0).fit(Z2)
y_clusters = y_model.predict(Z2)
y_sets = {i:set() for i in range(cols_k)}
[y_sets[y_clusters[i]].add(i) for i in range(n)]
return x_sets, y_sets
def test_SCRRE(M, true_labels, n_vectors = 1, tau = 1.5, norm = 'L1'):
m, _ = M.shape
x_sets = SC_RRE(M, n_vectors = n_vectors, tau = tau, norm = norm)
assert type(x_sets) == dict
pred_labels = [0 if i in x_sets[0] else 1 for i in range(m)]
return adjusted_rand_score(true_labels, pred_labels)
#adjusted_mutual_info_score(true_labels, pred_labels, average_method='arithmetic'), normalized_mutual_info_score(true_labels, pred_labels, average_method='arithmetic')
########## Functions to test spectral clustering ##########
def generate_and_test_firstsplit(design, row_thresholds, col_thresholds, true_first_split = [], cov = 0.01, seed = 0, max_cluster_size = 2, balanced = False):
mblocks = len(row_thresholds)
nblocks = len(col_thresholds)
if len(true_first_split) > 0:
assert len(true_first_split) == 2
assert sum([len(a) for a in true_first_split]) == len(row_thresholds)
assert all(a < mblocks for b in true_first_split for a in b)
assert all(a >= 0 for b in true_first_split for a in b)
else:
true_first_split = [0,1], list(range(2, 8))
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "Constructing matrix")
M0, true_csets, true_ssets = generate_toy_design(design, row_thresholds, col_thresholds, coverage = cov, seed = seed)
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "Merging cells within clones")
M1 = merge_perfect(M0, true_csets, max_cluster_size = max_cluster_size, seed = seed, balanced = balanced)
my_truth = set().union(*[true_csets[i] for i in true_first_split[0]]), set().union(*[true_csets[i] for i in true_first_split[1]])
print("First split: ")
first_ari, cm1, _, _ = partition_score(M1, my_truth, cmatrix = True)
return first_ari, cm1.tolist()
def generate_and_test(design, row_thresholds, col_thresholds, true_first_split = [], cov = 0.01, seed = 0, max_cluster_size = 2, balanced = False):
mblocks = len(row_thresholds)
nblocks = len(col_thresholds)
if len(true_first_split) > 0:
assert len(true_first_split) == 2
assert sum([len(a) for a in true_first_split]) == len(row_thresholds)
assert all(a < mblocks for b in true_first_split for a in b)
assert all(a >= 0 for b in true_first_split for a in b)
else:
true_first_split = [0,1], list(range(2, 8))
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "Constructing matrix")
M0, true_csets, true_ssets = generate_toy_design(design, row_thresholds, col_thresholds, coverage = cov, seed = seed)
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "Merging cells within clones")
M1 = merge_perfect(M0, true_csets, max_cluster_size = max_cluster_size, seed = seed, balanced = balanced)
print([len(a) for a in true_csets])
my_truth = set().union(*[true_csets[i] for i in true_first_split[0]]), set().union(*[true_csets[i] for i in true_first_split[1]])
print("First split: ")
first_ari, cm1, A, Abar = partition_score(M1, my_truth, cmatrix = True)
if len(A) < len(Abar):
Mleft = M0[sorted([int(a[4:]) for a in A])]
leftset = set([int(a[4:]) for a in A])
Mright = M0[sorted([int(a[4:]) for a in Abar])]
rightset = set([int(a[4:]) for a in Abar])
else:
Mright = M0[sorted([int(a[4:]) for a in A])]
rightset = set([int(a[4:]) for a in A])
Mleft = M0[sorted([int(a[4:]) for a in Abar])]
leftset = set([int(a[4:]) for a in Abar])
# Define the correct partition on the left side according to the cells that were placed there
left_truth = {i:set() for i in range(len(true_csets))}
idx1 = 0
right_truth = {i:set() for i in range(len(true_csets))}
idx2 = 0
missing_cells = 0
for i in range(M1.shape[0]):
my_sets = [j for j in range(len(true_csets)) if i in true_csets[j]]
if len(my_sets) == 1:
my_set = my_sets[0]
else:
my_set = len(right_truth) + 1
if i in leftset:
left_truth[my_set].add(idx1)
idx1 += 1
elif i in rightset:
right_truth[my_set].add(idx2)
idx2 += 1
else:
missing_cells += 1
print("Found {} missing cells when splitting into left and right".format(missing_cells))
if Mleft.shape[0] > 2:
print("Merging and partitioning {} cells in left branch: ".format(len(leftset)))
try:
M1left = merge_perfect(Mleft, [a for a in list(left_truth.values()) if len(a) > 0],
max_cluster_size = max_cluster_size, seed = seed, balanced = balanced)
left_ari, cmL, _, _= partition_score(M1left, left_truth, cmatrix = True)
except ValueError:
print("SCIPY VALUE ERROR: too few non-empty cells in this partition")
left_ari = 0
cmL = []
else:
print("Too few cells in left branch to partition ({})".format(Mleft.shape[0]))
left_ari = 0
mL = []
#print Mright.shape, np.sum(Mright)
if Mright.shape[0] > 2:
print("Merging and partitioning {} cells in right branch: ".format(len(rightset)))
try:
M1right = merge_perfect(Mright, [a for a in list(right_truth.values()) if len(a) > 0],
max_cluster_size = max_cluster_size, seed = seed, balanced = balanced)
right_ari, cmR, _, _= partition_score(M1right, right_truth, cmatrix = True)
except ValueError:
print("SCIPY VALUE ERROR: too few non-empty cells in this partition")
right_ari = 0
cmR = []
else:
print("Too few cells in right branch to partition ({})".format(Mright.shape[0]))
right_ari = 0
cmR = []
return first_ari, left_ari, right_ari, (cm1.tolist(), cmL.tolist(), cmR.tolist())
def declonal_cluster(M, true_part, true_clonal = False, cluster_spectral = False):
"""
Remove clonal SNVs (using k-means or the ground truth) before clustering (using spectral clustering or kmeans)
"""
ncells, nsnvs = M.shape
if true_clonal:
# use the true assignment of clonal SNVs
clonal_idx = [i for i in true_part[2]]
subclone1_idx = [i for i in true_part[3]]
subclone2_idx = [i for i in true_part[4]]
else:
# Cluster column sums using kmeans
colsums = np.sum(M, axis=0)
col_labels = KMeans(n_clusters = 3, random_state = 0).fit_predict(colsums.reshape(-1, 1))
# Call the section with the largest average column sum clonal
idx0 = []
idx1 = []
idx2 = []
for idx in range(len(col_labels)):
label = col_labels[idx]
if label == 0:
idx0.append(idx)
elif label == 1:
idx1.append(idx)
elif label == 2:
idx2.append(idx)
else:
print("uhoh")
colsums = colsums.reshape(-1, 1)
mean0 = np.mean(colsums[idx0])
mean1 = np.mean(colsums[idx1])
mean2 = np.mean(colsums[idx2])
if mean0 > mean1:
if mean0 > mean2:
clonal_idx = idx0
subclone1_idx = idx1
subclone2_idx = idx2
else: # sum2 >= mean0 > mean1
clonal_idx = idx2
subclone1_idx = idx0
subclone2_idx = idx1
else:
if mean2 > mean1: # mean2 > mean1 >= mean0
clonal_idx = idx2
subclone1_idx = idx0
subclone2_idx = idx1
else: # mean1 >= mean2, mean1 >= mean0
clonal_idx = idx1
subclone1_idx = idx0
subclone2_idx = idx2
# Count the number of subclonal mutations in each cell
rowsum_vector = np.zeros(nsnvs)
rowsum_vector[subclone1_idx] = 1
rowsum_vector[subclone2_idx] = 1
rowsum_vector = rowsum_vector.reshape(-1, 1)
if cluster_spectral:
M = M[:, sorted(list(subclone1_idx) + list(subclone2_idx))]
my_cellpart = [['cell{}'.format(a) for a in S] for S in true_part[:2]]
my_snvpart = [[], [], []]
idx = 0
for i in range(nsnvs):
if i in clonal_idx:
continue
elif i in subclone1_idx:
my_snvpart[1].append(idx)
else:
assert i in subclone2_idx
my_snvpart[2].append(idx)
idx += 1
return partition_score(M, my_cellpart + my_snvpart, verbose = False)[0]
else:
M = M.toarray()
# Cluster row sums using kmeans
rowsums = np.matmul(M, rowsum_vector)
pred_labels = KMeans(n_clusters = 2, random_state = 0).fit_predict(rowsums.reshape(-1, 1))
true_labels = [(0 if i in true_part[0] else 1) for i in range(len(true_part[0]) + len(true_part[1]))]
score = adjusted_rand_score(true_labels, pred_labels)
return score
########## Spectral clustering ##########
def compute_cell_partition(x, xclust=2, cellnames = None, GMM = False):
"""
Given low-dimensional representation x of rows/cells, cluster into <xclust> clusters using either KMeans (GMM = False) or GMM (GMM = True).
"""
assert xclust == 2 or xclust == 3
if cellnames == None:
cellnames = {i:i for i in range(len(x))}
else:
assert len(cellnames) == len(x)
dim2 = 1 if len(x.shape) == 1 else x.shape[1]
# cluster cells
x = x.reshape(-1, dim2)
if GMM:
x_model = GaussianMixture(n_components = xclust, random_state = 0).fit(x)
else:
x_model = KMeans(n_clusters = xclust, random_state = 0).fit(x)
x_clusters = x_model.predict(x)
A = set()
Abar = set()
Ox = set()
for i in range(len(x_clusters)):
cl = x_clusters[i]
tkn = cellnames[i]
if cl == 0:
A.add(tkn)
elif cl == 1:
Abar.add(tkn)
else:
assert xclust == 3
Ox.add(tkn)
if xclust == 3:
return A, Abar, Ox
else:
assert len(Ox) == 0
return A, Abar
def partition_score(Matrix, true_sets, cmatrix = False, verbose = True, partition_instead_of_sets = False):
"""
Partition the rows of the matrix using spectral clustering and compare the inferred partition to the true partition using ARI.
"""
# compute singular values
x, y, zero_rows, zero_cols = spectral_partition(Matrix)
cell_to_idx = {}
m, n = Matrix.shape
idx0 = 0
idx = 0
for i in range(m):
if idx0 < len(zero_rows) and zero_rows[idx0] == i: # this cell has degree 0 in the data
idx0 += 1 # increment the index in 0 rows list
else: # cell has some degree in the data
cell_to_idx['cell%d' % i] = idx # map the current space in the new list to this cell
idx += 1 # point to the next space in the new list
mut_to_idx = {}
idx0 = 0
idx = 0
for i in range(n):
if idx0 < len(zero_cols) and zero_cols[idx0] == i: # this mut has degree 0 in the data
idx0 += 1 # increment the index in 0 cols list
else: # cell has some degree in the data
mut_to_idx['mut%d' % i] = idx # map the current space in the new list to this mut
idx += 1 # point to the next space in the new list
# run kmeans on the singular values to get set assignments
A, Abar, B, Bbar, O = compute_partition(x, y, cellnames = reverse_dict(cell_to_idx), mutnames= reverse_dict(mut_to_idx))
# compute the true and inferred labels
if partition_instead_of_sets:
true_labels = true_sets
else:
true_labels = []
pred_labels = []
n_missing_pred = 0
n_missing_gt = 0
for i in range(m):
cell_key = 'cell{}'.format(i)
# compute true label if it was not supplied
if not partition_instead_of_sets:
my_true_sets = [j for j in range(len(true_sets)) if cell_key in true_sets[j]]
if len(my_true_sets) == 0:
#print "Cell missing from ground truth: {}".format(i)
true_labels.append(len(true_sets))
n_missing_gt += 1
else:
assert len(my_true_sets) == 1
true_labels.append(my_true_sets[0])
# compute predicted label
if cell_key in A:
pred_labels.append(0)
elif cell_key in Abar:
pred_labels.append(1)
else:
#print "oops: couldn't find cell <{}>".format(cell_key)
pred_labels.append(2)
n_missing_pred += 1
assert n_missing_pred == len(zero_rows)
ari = adjusted_rand_score(true_labels, pred_labels)
cmat = confusion_matrix(true_labels, pred_labels)
if verbose:
if n_missing_pred > 0:
print("Missing a total of {} cells from A/Abar (empty rows in the matrix)".format(n_missing_pred))
if n_missing_gt > 0:
print("Missing a total of {} cells from the ground truth (probably errors from first split)".format(n_missing_gt))
print(ari)
print(cmat)
if cmatrix:
return ari, cmat, A, Abar
else:
return ari, A, Abar
def spectral_partition(M, n_vectors = 1, verbose = False):
"""
Compute spectral representations of the rows and columns of M as in
Dhillon 2001.
Args:
M (scipy.sparse.coo_matrix): matrix to partition
n_vectors (int): number of singular vectors to use
verbose (bool): print the matrix dimensions and singular vectors
Returns:
x (): spectral representation of rows
y: spectral representation of columns
"""
if not isinstance(M, scipy.sparse.csr_matrix):
raise TypeError("Matrix must be in CSR format.")
m0, n0 = M.shape
rowsums = np.sum(M, axis=1)
zero_rows = [i for i in range(m0) if rowsums[i] == 0]
M = csr_delete(M, zero_rows, axis=0)
rowsums = np.delete(rowsums, zero_rows, axis=0)
if verbose:
print(M.shape)
colsums = np.sum(M, axis=0).reshape(-1, 1)
zero_cols = [i for i in range(n0) if colsums[i] == 0]
M = csr_delete(M, zero_cols, axis=1)
colsums = np.delete(colsums, zero_cols, axis=0)
m, n = M.shape
if verbose:
print("Found %d nonzero rows and %d nonzero columns" % (m, n))
# Compute inverse sqrt diagonal matrices for original and transposed weight matrix
Dx_entries = []
for i in range(m):
Dx_entries.append(1 / np.sqrt(float(rowsums[i])))
a = list(range(m))
Dx = scipy.sparse.csr_matrix((Dx_entries, (a, a)), shape=(m, m))
Dy_entries = []
for i in range(n):
Dy_entries.append(1 / np.sqrt(float(colsums[i])))
b = list(range(n))
Dy = scipy.sparse.csr_matrix((Dy_entries, (b, b)), shape=(n, n))
# Scale the adjacency matrix
Mhat = Dx * M
Mhat = Mhat * Dy
# Compute singular values and get the second-largest left and right singular vectors
U, s, Vt = scipy.sparse.linalg.svds(Mhat, k=n_vectors + 1)
if verbose:
print("Singular values: ", s)
xhat = U[:,0:n_vectors]
yhat = Vt[0:n_vectors,:]
yhat = yhat.transpose()
# Scale the singular vectors using the diagonal matrices
x = Dx * xhat
y = Dy * yhat
return x, y, zero_rows, zero_cols
def compute_partition(x, y, xclust=2, yclust=3, cellnames = None, mutnames = None, together = False, GMM = False):
"""
Helper function for spectral partitioning as in Dhillon 2001.
Args:
x: low-dimensional representation of part 1 vertices
y: low-dimensional representaiton of part 2 vertices
xclust: number of clusters for part 1 vertices (default 2)
yclust: number of clusters for part 2 vertices (default 3)
cellnames: labels for part 1 vertices (default None)
mutnames: labels for part 2 vertices (default None)
together: if True, cluster all vertices simultaneously (default False)
GMM: if True, use Gaussian mixture model instead of k-means (default False)
Returns:
tuple with sets of partitioned vertices:
A/Abar(/Ox) partition the part 1 (cell) vertices
B/Bbar(/Oy) partition the part 2 (mutation) vertices
Note: the implemention of this function is not very general and will require adjustment if many different values for xclust and yclust are desired.
"""
assert xclust == 2 or xclust == 3
assert yclust == 2 or yclust == 3
if cellnames == None:
cellnames = {i:i for i in range(len(x))}
else:
assert len(cellnames) == len(x)
if mutnames == None:
mutnames = {i:"mut%d" % i for i in range(len(y))}
else:
assert len(mutnames) == len(y)
dim2 = 1 if len(x.shape) == 1 else x.shape[1]
if together:
assert xclust == yclust
# cluster cells and SNVs jointly
all_data = np.concatenate((x, y)).reshape(-1, dim2)
if GMM:
model = GaussianMixture(n_components = xclust, random_state = 0).fit(all_data)
else:
model = KMeans(n_clusters = xclust, random_state = 0).fit(all_data)
all_clusters = model.predict(all_data)
x_clusters = all_clusters[:len(x)]
y_clusters = all_clusters[len(x):]
else:
# cluster cells and SNVs separately
x = x.reshape(-1, dim2)
y = y.reshape(-1, dim2)
if GMM:
x_model = GaussianMixture(n_components = xclust, random_state = 0).fit(x)
y_model = GaussianMixture(n_components = yclust, random_state = 0).fit(y)
else:
x_model = KMeans(n_clusters = xclust, random_state = 0).fit(x)
y_model = KMeans(n_clusters = yclust, random_state = 0).fit(y)
x_clusters = x_model.predict(x)
y_clusters = y_model.predict(y)
A = set()
Abar = set()
Ox = set()
for i in range(len(x_clusters)):
cl = x_clusters[i]
tkn = cellnames[i]
if cl == 0:
A.add(tkn)
elif cl == 1:
Abar.add(tkn)
else:
assert xclust == 3
Ox.add(tkn)
B = set()
Bbar = set()
Oy = set()
for i in range(len(y_clusters)):
cl = y_clusters[i]
tkn = mutnames[i]
if cl == 0:
B.add(tkn)
elif cl == 1:
Bbar.add(tkn)
else:
Oy.add(tkn)
if xclust == 3:
return A, Abar, Ox, B, Bbar, Oy
else:
assert len(Ox) == 0
return A, Abar, B, Bbar, Oy
########## Exhaustive heuristics related to spectral clustering ##########
def find_best_ARI(x, true_cl):
"""
Given 1-dimensional representation x of objects (here, rows/cells) and a true labeling <true_cl> of these objects, exhaustively finds the cut-point that minimizes the ARI.
"""
sx = sorted(x)
cut = 0
best_ari = -1
best_cut = 0
while cut < len(sx):
my_cl = [(0 if x[i] <= sx[cut] else 1) for i in range(len(x))]
my_ari = adjusted_rand_score(my_cl, true_cl)
if my_ari > best_ari:
best_cut = cut
best_ari = my_ari
cut += 1
return best_cut, best_ari
def find_best_ncut(G, x, y, cell_to_idx=None, mut_to_idx=None):
"""
Given bipartite graph G, 1-dimensional representation x of "left" vertices, 1-dimensional representation y of "right" vertices, exhaustively finds the cut-points in these 1D representations that minimizes the normalized cut.
"""
i2c = reverse_dict(cell_to_idx) if cell_to_idx != None else {i:'cell%d' % i for i in range(len(x))}
i2m = reverse_dict(mut_to_idx) if mut_to_idx != None else {i:'mut%d' % i for i in range(len(y))}
first = lambda t : t[0]
xs = sorted([(x[i], i2c[i]) for i in range(len(x))], key=first)
ys = sorted([(y[i], i2m[i]) for i in range(len(y))], key=first)
best_ncut = 100
best_detailed = None
best_cutpoints = (0, 0, 0)
A = set()
Abar = set(i2c.values())
B = set()
O = set()
Bbar = set(i2m.values())
i = 0
j = 0
while i < len(xs) - 1:
# move the cut-point for x over by 1 (loop through all cells with the same x-value)
while i < len(xs) - 1 and xs[i][0] == xs[i + 1][0]:
A.add(xs[i][1])
Abar.remove(xs[i][1])
i += 1
while j < len(ys) - 1:
print(len(O))
assert len(O) == 0
# move the first cut-point for y (adding directly from Bbar to B)
while j < len(ys) - 1 and ys[j][0] == ys[j + 1][0]:
B.add(ys[j][1])
Bbar.remove(ys[j][1])
j += 1
# initialize O to be as large as possible
k = len(ys) - 1
[O.add(ys[l][1]) for l in range(j + 1, len(ys))]
while k > j:
# move the second cut-point from the right side, increasing Bbar and decreasing O
while k > j + 1 and ys[k][0] == ys[k - 1][0]:
Bbar.add(ys[k][1])
O.remove(ys[k][1])
k -= 1
val = ncut(G, A, Abar, B, Bbar, O, cell_idx=cell_to_idx, mut_idx=mut_to_idx, detailed=True, use_idx=False)
if sum(val) < best_ncut:
best_ncut = sum(val)
best_detailed = val
best_cutpoints = (i, j, k)
Bbar.add(ys[k][1])
O.remove(ys[k][1])
k -= 1
B.add(ys[j][1])
Bbar.remove(ys[j][1])
j += 1
A.add(xs[i][1])
Abar.remove(xs[i][1])
i += 1
return best_ncut, best_detailed, best_cutpoints
def ncut(G, A, Abar, B, Bbar, O, use_idx=True, cell_idx=None, mut_idx=None, detailed=False):
"""
Given bipartite graph G with partition {A, Abar} of cell vertices and partition {B, Bbar, O} of mutations vertices, computes the normalized cut objective function from Section 5 of Zha et al., 2001 (http://ranger.uta.edu/~chqding/papers/BipartiteClustering.pdf).
"""
num1 = 0
denom1 = 0
num2 = 0
denom2 = 0
alpha = 1.0 / (len(B) + len(Bbar) + len(O))
for edge in G.edges:
source = edge[0]
dest = edge[1]
if use_idx:
if cell_idx != None:
if type(source) == str:
i = 'cell%d' % cell_idx[source]
j = 'mut%d' % mut_idx[dest]
else:
i = 'cell%d' % cell_idx[dest]
j = 'mut%d' % mut_idx[source]
else:
if source.startswith('cell'):
i = source
j = dest
elif source.startswith('mut'):
i = dest
j = source
else:
print("Source does not start with \"cell\" or \"mut\": ", source)
return -1
else:
if type(source) == str:
i = source
j = dest
elif type(source) == tuple:
i = dest
j = source
else:
print("Source is not a string or tuple: ", source)
return -1
if i in A:
if j in Bbar:
num1 += 1
num2 += 1
denom2 += 1
denom1 += 1
elif i in Abar:
if j in B:
num1 += 1
num2 += 1
denom1 += 1
denom2 += 1
else:
print("Source is not in A or Abar: ", i)
return -1
if denom1 == 0:
term1 = 0
else:
term1 = float(num1)/float(denom1)
if denom2 == 0:
term2 = 0
else:
term2 = float(num2)/float(denom2)
if detailed:
return term1, term2, alpha * len(O)
else:
return term1 + term2 + alpha * len(O)
def construct_graph(M):
"""
Given a matrix M, constructs the bipartite graph with biadjacency matrix M.
"""
G = nx.Graph()
n_cells, n_muts = M.shape
if sparse.issparse(M):
M = M.tocoo()
for i,j, v in zip(M.row, M.col, M.data):
assert v == 1
G.add_edge("cell{}".format(i), "mut{}".format(j))
else:
[[G.add_edge("cell%d" % i, "mut%d" % j) for j in range(n_muts) if M[i][j] == 1] for i in range(n_cells)]
return G
|
StarcoderdataPython
|
155621
|
<gh_stars>0
import contextlib
import io
import logging
from typing import ( # noqa: F401
Iterator,
Set
)
from evm import opcode_values
from evm.validation import (
validate_is_bytes,
)
class CodeStream(object):
stream = None
depth_processed = None
logger = logging.getLogger('evm.vm.CodeStream')
def __init__(self, code_bytes: bytes) -> None:
validate_is_bytes(code_bytes, title="CodeStream bytes")
self.stream = io.BytesIO(code_bytes)
self.invalid_positions = set() # type: Set[int]
self.depth_processed = 0
def read(self, size: int) -> bytes:
return self.stream.read(size)
def __len__(self) -> int:
return len(self.stream.getvalue())
def __iter__(self) -> 'CodeStream':
return self
def __next__(self) -> int:
return self.next()
def __getitem__(self, i: int) -> int:
return self.stream.getvalue()[i]
def next(self) -> int:
next_opcode_as_byte = self.read(1)
if next_opcode_as_byte:
return ord(next_opcode_as_byte)
else:
return opcode_values.STOP
def peek(self) -> int:
current_pc = self.pc
next_opcode = next(self)
self.pc = current_pc
return next_opcode
@property
def pc(self):
return self.stream.tell()
@pc.setter
def pc(self, value):
self.stream.seek(min(value, len(self)))
@contextlib.contextmanager
def seek(self, pc: int) -> Iterator['CodeStream']:
anchor_pc = self.pc
self.pc = pc
try:
yield self
finally:
self.pc = anchor_pc
invalid_positions = None
def is_valid_opcode(self, position: int) -> bool:
if position >= len(self):
return False
if position in self.invalid_positions:
return False
if position <= self.depth_processed:
return True
else:
i = self.depth_processed
while i <= position:
opcode = self.__getitem__(i)
if opcode >= opcode_values.PUSH1 and opcode <= opcode_values.PUSH32:
left_bound = (i + 1)
right_bound = left_bound + (opcode - 95)
invalid_range = range(left_bound, right_bound)
self.invalid_positions.update(invalid_range)
i = right_bound
else:
self.depth_processed = i
i += 1
if position in self.invalid_positions:
return False
else:
return True
|
StarcoderdataPython
|
144217
|
<filename>awx/api/views/webhooks.py<gh_stars>1-10
from hashlib import sha1
import hmac
import json
import logging
import urllib.parse
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from awx.api import serializers
from awx.api.generics import APIView, GenericAPIView
from awx.api.permissions import WebhookKeyPermission
from awx.main.models import Job, JobTemplate, WorkflowJob, WorkflowJobTemplate
logger = logging.getLogger('awx.api.views.webhooks')
class WebhookKeyView(GenericAPIView):
serializer_class = serializers.EmptySerializer
permission_classes = (WebhookKeyPermission,)
def get_queryset(self):
qs_models = {
'job_templates': JobTemplate,
'workflow_job_templates': WorkflowJobTemplate,
}
self.model = qs_models.get(self.kwargs['model_kwarg'])
return super().get_queryset()
def get(self, request, *args, **kwargs):
obj = self.get_object()
return Response({'webhook_key': obj.webhook_key})
def post(self, request, *args, **kwargs):
obj = self.get_object()
obj.rotate_webhook_key()
obj.save(update_fields=['webhook_key'])
return Response({'webhook_key': obj.webhook_key}, status=status.HTTP_201_CREATED)
class WebhookReceiverBase(APIView):
lookup_url_kwarg = None
lookup_field = 'pk'
permission_classes = (AllowAny,)
authentication_classes = ()
ref_keys = {}
def get_queryset(self):
qs_models = {
'job_templates': JobTemplate,
'workflow_job_templates': WorkflowJobTemplate,
}
model = qs_models.get(self.kwargs['model_kwarg'])
if model is None:
raise PermissionDenied
return model.objects.filter(webhook_service=self.service).exclude(webhook_key='')
def get_object(self):
queryset = self.get_queryset()
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = queryset.filter(**filter_kwargs).first()
if obj is None:
raise PermissionDenied
return obj
def get_event_type(self):
raise NotImplementedError
def get_event_guid(self):
raise NotImplementedError
def get_event_status_api(self):
raise NotImplementedError
def get_event_ref(self):
key = self.ref_keys.get(self.get_event_type(), '')
value = self.request.data
for element in key.split('.'):
try:
if element.isdigit():
value = value[int(element)]
else:
value = (value or {}).get(element)
except Exception:
value = None
if value == '0000000000000000000000000000000000000000': # a deleted ref
value = None
return value
def get_signature(self):
raise NotImplementedError
def check_signature(self, obj):
if not obj.webhook_key:
raise PermissionDenied
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1)
logger.debug("header signature: %s", self.get_signature())
logger.debug("calculated signature: %s", force_bytes(mac.hexdigest()))
if not hmac.compare_digest(force_bytes(mac.hexdigest()), self.get_signature()):
raise PermissionDenied
@csrf_exempt
def post(self, request, *args, **kwargs):
# Ensure that the full contents of the request are captured for multiple uses.
request.body
logger.debug(
"headers: {}\n"
"data: {}\n".format(request.headers, request.data)
)
obj = self.get_object()
self.check_signature(obj)
event_type = self.get_event_type()
event_guid = self.get_event_guid()
event_ref = self.get_event_ref()
status_api = self.get_event_status_api()
kwargs = {
'unified_job_template_id': obj.id,
'webhook_service': obj.webhook_service,
'webhook_guid': event_guid,
}
if WorkflowJob.objects.filter(**kwargs).exists() or Job.objects.filter(**kwargs).exists():
# Short circuit if this webhook has already been received and acted upon.
logger.debug("Webhook previously received, returning without action.")
return Response({'message': _("Webhook previously received, aborting.")},
status=status.HTTP_202_ACCEPTED)
kwargs = {
'_eager_fields': {
'launch_type': 'webhook',
'webhook_service': obj.webhook_service,
'webhook_credential': obj.webhook_credential,
'webhook_guid': event_guid,
},
'extra_vars': json.dumps({
'tower_webhook_event_type': event_type,
'tower_webhook_event_guid': event_guid,
'tower_webhook_event_ref': event_ref,
'tower_webhook_status_api': status_api,
'tower_webhook_payload': request.data,
})
}
new_job = obj.create_unified_job(**kwargs)
new_job.signal_start()
return Response({'message': "Job queued."}, status=status.HTTP_202_ACCEPTED)
class GithubWebhookReceiver(WebhookReceiverBase):
service = 'github'
ref_keys = {
'pull_request': 'pull_request.head.sha',
'pull_request_review': 'pull_request.head.sha',
'pull_request_review_comment': 'pull_request.head.sha',
'push': 'after',
'release': 'release.tag_name',
'commit_comment': 'comment.commit_id',
'create': 'ref',
'page_build': 'build.commit',
}
def get_event_type(self):
return self.request.META.get('HTTP_X_GITHUB_EVENT')
def get_event_guid(self):
return self.request.META.get('HTTP_X_GITHUB_DELIVERY')
def get_event_status_api(self):
if self.get_event_type() != 'pull_request':
return
return self.request.data.get('pull_request', {}).get('statuses_url')
def get_signature(self):
header_sig = self.request.META.get('HTTP_X_HUB_SIGNATURE')
if not header_sig:
logger.debug("Expected signature missing from header key HTTP_X_HUB_SIGNATURE")
raise PermissionDenied
hash_alg, signature = header_sig.split('=')
if hash_alg != 'sha1':
logger.debug("Unsupported signature type, expected: sha1, received: {}".format(hash_alg))
raise PermissionDenied
return force_bytes(signature)
class GitlabWebhookReceiver(WebhookReceiverBase):
service = 'gitlab'
ref_keys = {
'Push Hook': 'checkout_sha',
'Tag Push Hook': 'checkout_sha',
'Merge Request Hook': 'object_attributes.last_commit.id',
}
def get_event_type(self):
return self.request.META.get('HTTP_X_GITLAB_EVENT')
def get_event_guid(self):
# GitLab does not provide a unique identifier on events, so construct one.
h = sha1()
h.update(force_bytes(self.request.body))
return h.hexdigest()
def get_event_status_api(self):
if self.get_event_type() != 'Merge Request Hook':
return
project = self.request.data.get('project', {})
repo_url = project.get('web_url')
if not repo_url:
return
parsed = urllib.parse.urlparse(repo_url)
return "{}://{}/api/v4/projects/{}/statuses/{}".format(
parsed.scheme, parsed.netloc, project['id'], self.get_event_ref())
def get_signature(self):
return force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '')
def check_signature(self, obj):
if not obj.webhook_key:
raise PermissionDenied
# GitLab only returns the secret token, not an hmac hash. Use
# the hmac `compare_digest` helper function to prevent timing
# analysis by attackers.
if not hmac.compare_digest(force_bytes(obj.webhook_key), self.get_signature()):
raise PermissionDenied
|
StarcoderdataPython
|
3364794
|
<filename>ProjectApplication/project_core/migrations/0127_allocated_budget_project_status_default_change.py
# Generated by Django 3.0.7 on 2020-06-23 10:20
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_core', '0126_add_close_project_on_by_fields'),
]
operations = [
migrations.AlterField(
model_name='historicalproject',
name='allocated_budget',
field=models.DecimalField(decimal_places=2, default=0, help_text='Budget allocated to project', max_digits=10, validators=[django.core.validators.MinValueValidator(0)]),
preserve_default=False,
),
migrations.AlterField(
model_name='historicalproject',
name='status',
field=models.CharField(choices=[('Ongoing', 'Ongoing'), ('Completed', 'Completed'), ('Aborted', 'Aborted')], default='Ongoing', help_text='Status of a project', max_length=30),
),
migrations.AlterField(
model_name='project',
name='allocated_budget',
field=models.DecimalField(decimal_places=2, default=0, help_text='Budget allocated to project', max_digits=10, validators=[django.core.validators.MinValueValidator(0)]),
preserve_default=False,
),
migrations.AlterField(
model_name='project',
name='status',
field=models.CharField(choices=[('Ongoing', 'Ongoing'), ('Completed', 'Completed'), ('Aborted', 'Aborted')], default='Ongoing', help_text='Status of a project', max_length=30),
),
]
|
StarcoderdataPython
|
3365512
|
<gh_stars>0
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from setuptools import setup, Command
import os
import re
class RunTestsCommand(Command):
description = "Test command to run testr in virtualenv"
user_options = [
('coverage', 'c',
"Generate code coverage report"),
]
boolean_options = ['coverage']
def initialize_options(self):
self.coverage = False
def finalize_options(self):
pass
def run(self):
logfname = 'test.log'
args = '-V'
if self.coverage:
logfname = 'coveragetest.log'
args += ' -c'
rc_sig = os.system('./run_tests.sh %s' % args)
if rc_sig >> 8:
os._exit(rc_sig >> 8)
with open(logfname) as f:
if not re.search('\nOK', ''.join(f.readlines())):
os._exit(1)
setup(
name='vnc_api',
# version='0.1dev',
version=open('vnc_api/version.info', 'r+').read().strip('\n').strip('\t'),
packages=['vnc_api',
'vnc_api.gen',
'vnc_api.gen.heat',
'vnc_api.gen.heat.resources',
'vnc_api.gen.heat.template',
'vnc_api.gen.heat.env',
],
long_description="VNC API Library Package",
package_data={'': ['*.yaml', '*.env', '*.info']},
install_requires=[
'requests>=1.1.0'
],
cmdclass={
'run_tests': RunTestsCommand,
},
)
|
StarcoderdataPython
|
1748754
|
<reponame>CylonicRaider/Instant
#!/usr/bin/env python3
# -*- coding: ascii -*-
"""
A log-keeping bot for Instant.
"""
import sys, os, re, time
import threading
import bisect
import contextlib
import signal
import json
import sqlite3
import websocket_server
import instabot
NICKNAME = 'Scribe'
VERSION = instabot.VERSION
MAXLEN = None
PING_DELAY = 3600 # 1 h
MAX_PINGS = 3
def parse_version(s):
if isinstance(s, float): s = str(s)
if s.startswith('v'): s = s[1:]
try:
return tuple(map(int, s.split('.')))
except (TypeError, ValueError):
return ()
class LogEntry(dict):
@staticmethod
def derive_timestamp(msgid):
# NOTE: Returns milliseconds since Epoch.
if isinstance(msgid, int):
return msgid >> 10
else:
return int(msgid, 16) >> 10
def __cmp__(self, other):
if isinstance(other, LogEntry):
oid = other['id']
elif isinstance(other, str):
oid = other
else:
raise NotImplementedError()
sid = self['id']
return 0 if sid == oid else 1 if sid > oid else -1
def __gt__(self, other):
return self.__cmp__(other) > 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __lt__(self, other):
return self.__cmp__(other) < 0
class LogDB:
def __init__(self, maxlen=None):
if maxlen is None: maxlen = MAXLEN
self.maxlen = maxlen
def __enter__(self):
self.init()
return self
def __exit__(self, *exc_info):
self.close()
def init(self):
pass
def capacity(self):
return self.maxlen
def bounds(self):
raise NotImplementedError
def get(self, index):
raise NotImplementedError
def query(self, lfrom=None, lto=None, amount=None):
raise NotImplementedError
def append(self, entry):
return bool(self.extend((entry,)))
def extend(self, entries):
raise NotImplementedError
def delete(self, ids):
raise NotImplementedError
def append_uuid(self, uid, uuid):
raise NotImplementedError
def extend_uuid(self, mapping):
ret = []
for k, v in mapping.items():
if self.append_uuid(k, v): ret.append(k)
return ret
def get_uuid(self, uid):
raise NotImplementedError
def query_uuid(self, ids=None):
raise NotImplementedError
def close(self):
pass
class LogDBNull(LogDB):
def bounds(self):
return (None, None, None)
def get(self, index):
return None
def query(self, lfrom=None, lto=None, amount=None):
return []
def extend(self, entries):
return []
def delete(self, ids):
return []
def append_uuid(self, uid, uuid):
return False
def get_uuid(self, uid):
return False
def query_uuid(self, ids=None):
return []
class LogDBList(LogDB):
@staticmethod
def merge_logs(base, add, maxlen=None):
seen, added = set(i['id'] for i in base), set()
for e in add:
eid = e['id']
if eid in seen: continue
seen.add(eid)
added.add(eid)
base.append(e)
base.sort()
if maxlen:
base[:] = base[-maxlen:]
return added
def __init__(self, maxlen=None):
LogDB.__init__(self, maxlen)
self.data = []
self.uuids = {}
self._uuid_list = []
def bounds(self):
if not self.data:
return (None, None, None)
else:
return (self.data[0]['id'], self.data[-1]['id'], len(self.data))
def get(self, index):
try:
return self.data[index]
except IndexError:
return None
def query(self, lfrom=None, lto=None, amount=None):
if lfrom is None:
fromidx = None
else:
fromidx = bisect.bisect_left(self.data, lfrom)
if lto is None:
toidx = None
else:
toidx = bisect.bisect_right(self.data, lto)
if fromidx is not None and toidx is not None:
ret = self.data[fromidx:toidx]
elif fromidx is not None:
if amount is None:
ret = self.data[fromidx:]
else:
ret = self.data[fromidx:min(len(self.data),
fromidx + amount)]
elif toidx is not None:
if amount is None:
ret = self.data[:toidx]
else:
ret = self.data[max(0, toidx - amount):toidx]
elif amount is not None:
ret = self.data[len(self.data) - amount:]
else:
ret = self.data
return ret
def extend(self, entries):
return self.merge_logs(self.data, entries, self.maxlen)
def delete(self, ids):
idset, ndata, ret = set(ids), [], []
for i in self.data:
(ret if i['id'] in idset else ndata).append(i)
self.data[:] = ndata
return ret
def append_uuid(self, uid, uuid):
ret = (uid not in self.uuids)
self.uuids[uid] = uuid
self._uuid_list.append(uid)
if self.maxlen is not None and len(self._uuid_list) > 4 * self.maxlen:
# First, keep the most recent maxlen UUIDs.
keep = set()
for uid in reversed(self._uuid_list):
if len(keep) >= self.maxlen: break
keep.add(uid)
# Also, retain UUIDs referenced by logs.
keep.update(entry['from'] for entry in self.data)
# Extract the last appearances of the entries to be retained in
# their proper order.
new_uuid_list, seen = [], set()
for uid in reversed(self._uuid_list):
if uid in seen or uid not in keep: continue
seen.add(uid)
new_uuid_list.append(uid)
# Save the transformed usage list and trim old UUID mappings.
self._uuid_list[:] = reversed(new_uuid_list)
for uid in tuple(self.uuids):
if uid in seen: continue
del self.uuids[uid]
return ret
def get_uuid(self, uid):
return self.uuids.get(uid)
def query_uuid(self, ids=None):
if not ids: return self.uuids
ret = {}
for u in ids:
try:
ret[u] = self.uuids[u]
except KeyError:
pass
return ret
class LogDBSQLite(LogDB):
@staticmethod
def make_msgid(key):
return (None if key is None else '%016X' % key)
@staticmethod
def make_key(msgid):
return (None if msgid is None else int(msgid, 16))
@staticmethod
def make_strkey(msgid):
return (None if msgid is None else str(int(msgid, 16)))
def __init__(self, filename, maxlen=None):
LogDB.__init__(self, maxlen)
self.filename = filename
def init(self):
self.conn = sqlite3.connect(self.filename)
# Allow tuning DB performance.
sync = os.environ.get('SCRIBE_DB_SYNC', '')
if re.match(r'^[A-Za-z0-9]+$', sync):
self.conn.execute('PRAGMA synchronous = ' + sync)
# Create cursor.
self.cursor = self.conn.cursor()
# The REFERENCES is not enforced to allow "stray" messages to
# be preserved.
self.cursor.execute('CREATE TABLE IF NOT EXISTS logs ('
'msgid INTEGER PRIMARY KEY, '
'parent INTEGER, ' # REFERENCES msgid
'sender INTEGER, '
'nick TEXT, '
'text TEXT'
')')
self.cursor.execute('CREATE TABLE IF NOT EXISTS uuid ('
'user INTEGER PRIMARY KEY, '
'uuid TEXT'
')')
self.conn.commit()
def capacity(self):
return None
def _wrap(self, row):
msgid, parent, sender, nick, text = row
return LogEntry(id=self.make_msgid(msgid),
parent=self.make_msgid(parent),
nick=nick,
text=text,
timestamp=LogEntry.derive_timestamp(msgid),
**{'from': self.make_msgid(sender)})
def _unwrap(self, entry):
return (self.make_key(entry['id']),
self.make_key(entry['parent']),
self.make_key(entry['from']),
entry['nick'],
entry['text'])
def _try_unwrap(self, entry):
# People are known to have actually injected bad message ID-s
try:
return self._unwrap(entry)
except (KeyError, ValueError):
return None
def bounds(self):
self.cursor.execute('SELECT MIN(msgid), MAX(msgid), '
'COUNT(msgid) FROM logs')
first, last, count = self.cursor.fetchone()
if not count: count = None
return (self.make_msgid(first), self.make_msgid(last), count)
def get(self, index):
if index >= 0:
self.cursor.execute('SELECT * FROM logs '
'ORDER BY msgid ASC '
'LIMIT 1 OFFSET ?', (str(index),))
else:
self.cursor.execute('SELECT * FROM logs '
'ORDER BY msgid DESC '
'LIMIT 1 OFFSET ?', (str(-index - 1),))
res = self.cursor.fetchone()
if res is None: return None
return self._unwrap(res)
def query(self, lfrom=None, lto=None, amount=None):
fromkey = self.make_strkey(lfrom)
tokey = self.make_strkey(lto)
if amount is None:
amount = None if self.maxlen is None else str(self.maxlen)
else:
amount = str(amount)
flip = False
if fromkey is not None and tokey is not None:
stmt = ('SELECT * FROM logs WHERE msgid BETWEEN ? AND ? '
'ORDER BY msgid ASC', (fromkey, tokey))
elif fromkey is not None:
if amount is not None:
stmt = ('SELECT * FROM logs WHERE msgid >= ? '
'ORDER BY msgid ASC LIMIT ?', (fromkey, amount))
else:
stmt = ('SELECT * FROM logs WHERE msgid >= ? '
'ORDER BY msgid ASC', (fromkey,))
elif tokey is not None:
if amount is not None:
stmt = ('SELECT * FROM logs WHERE msgid <= ? '
'ORDER BY msgid DESC LIMIT ?', (tokey, amount))
else:
stmt = ('SELECT * FROM logs WHERE msgid <= ? '
'ORDER BY msgid DESC', (tokey,))
flip = True
elif amount is not None:
stmt = ('SELECT * FROM logs ORDER BY msgid DESC '
'LIMIT ?', (amount,))
flip = True
else:
stmt = ('SELECT * FROM logs ORDER BY msgid',)
self.cursor.execute(*stmt)
data = self.cursor.fetchall()
if flip: data.reverse()
return list(map(self._wrap, data))
def append(self, entry):
row = self._try_unwrap(entry)
if not row: return False
self.cursor.execute('INSERT OR REPLACE INTO logs ('
'msgid, parent, sender, nick, text) VALUES (?, ?, ?, ?, ?)',
row)
self.conn.commit()
return True
def extend(self, entries):
added = []
for e in entries:
sk = self.make_strkey(e['id'])
self.cursor.execute('SELECT 1 FROM logs WHERE msgid = ?',
(sk,))
if not self.cursor.fetchone(): added.append(e['id'])
added.sort()
rows = filter(None, map(self._try_unwrap, entries))
self.cursor.executemany('INSERT OR REPLACE INTO logs ('
'msgid, parent, sender, nick, text) VALUES (?, ?, ?, ?, ?)',
rows)
self.conn.commit()
return added
def delete(self, ids):
ret, msgids = [], [self.make_key(i) for i in ids]
for i in msgids:
self.cursor.execute('SELECT * FROM logs WHERE msgid = ?', (i,))
ret.extend(self.cursor.fetchall())
self.cursor.executemany('DELETE FROM logs WHERE msgid = ?',
((i,) for i in msgids))
self.conn.commit()
return list(map(self._wrap, ret))
def append_uuid(self, uid, uuid):
key = self.make_strkey(uid)
try:
self.cursor.execute('INSERT INTO uuid (user, uuid) '
'VALUES (?, ?)', (key, uuid))
self.conn.commit()
return True
except sqlite3.IntegrityError:
self.cursor.execute('UPDATE uuid SET uuid = ? WHERE user = ?',
(uuid, key))
self.conn.commit()
return False
def extend_uuid(self, mapping):
added = []
for k in mapping.keys():
self.cursor.execute('SELECT 1 FROM uuid WHERE user = ?',
(self.make_strkey(k),))
if not self.cursor.fetchone(): added.append(k)
self.cursor.executemany('INSERT OR REPLACE INTO uuid (user, uuid) '
'VALUES (?, ?)',
((self.make_strkey(k), v) for k, v in mapping.items()))
self.conn.commit()
return added
def get_uuid(self, uid):
self.cursor.execute('SELECT uuid FROM uuid WHERE user = ?',
(self.make_strkey(uid),))
res = self.cursor.fetchone()
return (None if res is None else res[0])
def query_uuid(self, ids=None):
ret = {}
if not ids:
if self.maxlen is None:
self.cursor.execute('SELECT user, uuid FROM uuid '
'ORDER BY user DESC')
else:
self.cursor.execute('SELECT user, uuid FROM uuid '
'ORDER BY user DESC '
'LIMIT ?', (str(self.maxlen),))
for k, v in self.cursor:
ret[k] = v
return ret
for u in ids:
uuid = self.get_uuid(u)
if uuid is not None: ret[u] = uuid
return ret
def close(self):
self.conn.close()
def read_posts_ex(logger, maxlen=None, filt=None):
def truncate(ret, uuids):
delset, kset = set(dels), set(sorted(uuids)[-maxlen:])
cur_ids = set(i['id'] for i in ret)
dels[:] = [d for d in dels if d not in cur_ids]
ret = [i for i in ret if i['id'] not in delset]
ret.sort()
ret = ret[-maxlen:]
kset.update(i['from'] for i in ret)
uuids = dict((k, v) for k, v in uuids.items() if k in kset)
seen.intersection_update(i['id'] for i in ret)
return (ret, uuids)
def prune(ret, uuids):
delset = set(dels)
ret = [i for i in ret if i['id'] not in delset]
return (ret, uuids)
allow_tags = set(('SCRIBE', 'POST', 'LOGPOST', 'MESSAGE', 'DELETE',
'UUID'))
cver, froms, dels, ret, uuids, seen = (), {}, [], [], {}, set()
n = 0
for ts, tag, values in logger.read_back(allow_tags.__contains__):
if tag == 'SCRIBE':
cver = parse_version(values.get('version'))
if cver >= (1, 2): allow_tags.discard('MESSAGE')
continue
if tag in ('POST', 'LOGPOST'):
pass
elif tag == 'MESSAGE':
try:
msg = json.loads(values.get('content'))
except (TypeError, ValueError):
continue
if msg.get('type') not in ('broadcast', 'unicast'):
continue
try:
msgid = msg['id']
except KeyError:
continue
msgd = msg.get('data', {})
if msgd.get('type') == 'post':
froms[msgid] = msg.get('from')
elif msgd.get('type') == 'log':
for v in msgd.get('data', ()):
try:
froms[v['id']] = v['from']
except KeyError:
pass
continue
elif tag == 'DELETE':
try:
dels.append(values['id'])
except KeyError:
pass
continue
elif tag == 'UUID':
try:
uuids[values['id']] = values['uuid']
except KeyError:
pass
continue
else:
continue
if 'id' not in values or values['id'] in seen:
continue
seen.add(values['id'])
values = LogEntry(values)
if 'timestamp' not in values:
values['timestamp'] = LogEntry.derive_timestamp(values['id'])
if 'text' not in values and 'content' in values:
values['text'] = values['content']
del values['content']
if 'from' not in values and values['id'] in froms:
values['from'] = froms.pop(values['id'])
if filt and not filt(values, {'uuid': uuids.get(values['id'])}):
continue
ret.append(values)
if maxlen is not None and len(ret) >= 2 * maxlen:
ret, uuids = truncate(ret, uuids)
if maxlen is not None:
ret, uuids = truncate(ret, uuids)
else:
ret, uuids = prune(ret, uuids)
ret.sort(key=lambda x: x['id'])
return (ret, uuids)
def read_posts(logger, maxlen=None, filt=None):
return read_posts_ex(logger, maxlen, filt)[0]
class Scribe(instabot.Bot):
NICKNAME = NICKNAME
def __init__(self, url, nickname=Ellipsis, **kwds):
instabot.Bot.__init__(self, url, nickname, **kwds)
self.scheduler = kwds['scheduler']
self.db = kwds['db']
self.dont_stay = kwds.get('dont_stay', False)
self.dont_pull = kwds.get('dont_pull', False)
self.ping_delay = kwds.get('ping_delay', PING_DELAY)
self.max_pings = kwds.get('max_pings', MAX_PINGS)
self.push_logs = kwds.get('push_logs', [])
self.reconnect = True
self._selecting_candidate = False
self._cur_candidate = None
self._already_loaded = {}
self._logs_done = False
self._ping_job = None
self._last_pong = None
self._ping_lock = threading.RLock()
def connect(self):
self.scheduler.set_forever(True)
return instabot.Bot.connect(self)
def on_open(self):
instabot.Bot.on_open(self)
self._last_pong = None
def on_message(self, rawmsg):
self.logger.log('MESSAGE content=%r' % (rawmsg,))
instabot.Bot.on_message(self, rawmsg)
def on_connection_error(self, exc):
self.logger.log_exception('ERROR', exc)
def on_close(self, final):
instabot.Bot.on_close(self, final)
with self._ping_lock:
if self._ping_job is not None:
self.scheduler.cancel(self._ping_job)
self._ping_job = None
self.scheduler.set_forever(False)
def handle_pong(self, content, rawmsg):
instabot.Bot.handle_pong(self, content, rawmsg)
self._last_pong = time.time()
def handle_identity(self, content, rawmsg):
self._send_ping()
instabot.Bot.handle_identity(self, content, rawmsg)
self.send_seq({'type': 'who'})
self.send_broadcast({'type': 'who'})
self._execute(self._push_logs)
if not self.dont_pull:
self._logs_begin()
self.scheduler.set_forever(False)
def handle_who(self, content, rawmsg):
instabot.Bot.handle_who(self, content, rawmsg)
data = content['data']
self._execute(self._process_who, data=data)
def handle_joined(self, content, rawmsg):
instabot.Bot.handle_joined(self, content, rawmsg)
data = content['data']
self._execute(self._process_joined, uid=data['id'], uuid=data['uuid'])
def on_client_message(self, data, content, rawmsg):
instabot.Bot.on_client_message(self, data, content, rawmsg)
tp = data.get('type')
if tp == 'nick':
# Someone sharing their nick.
self._execute(self._process_nick, uid=content['from'],
nick=data.get('nick'), uuid=data.get('uuid'))
elif tp == 'post':
# An individual message.
data['id'] = content['id']
data['from'] = content['from']
data['timestamp'] = content['timestamp']
self._execute(self._process_post, data=data)
elif tp == 'log-query':
# Someone interested in our logs.
self._execute(self._process_log_query, uid=content['from'])
elif tp == 'log-info':
# Someone telling about their logs.
if self.dont_pull: return
self._execute(self._process_log_info, data=data,
uid=content['from'])
elif tp == 'log-request':
# Someone requesting some logs.
self._execute(self._process_log_request, data=data,
uid=content['from'])
elif tp == 'log':
# Someone delivering logs.
self._execute(self._process_log, data=data, uid=content['from'])
elif tp == 'delete':
# Message deletion request.
self._execute(self._delete, ids=data.get('ids', ()),
cause=content['from'])
elif tp == 'log-inquiry':
# Inquiry about whether we are done loading logs.
if self._logs_done:
self.send_unicast(content['from'], {'type': 'log-done'})
elif tp == 'log-done':
# Someone is done loading logs.
if self.dont_stay and self.dont_pull:
self.reconnect = False
self.close()
elif tp == 'privmsg':
# Someone is PM-ing us.
# Just log it.
data['id'] = content['id']
data['from'] = content['from']
data['timestamp'] = content['timestamp']
self._process_pm(data)
def send_raw(self, rawmsg, verbose=True):
if verbose:
self.logger.log('SEND content=%r' % (rawmsg,))
return instabot.Bot.send_raw(self, rawmsg)
def run(self, *args, **kwds):
try:
instabot.Bot.run(self, *args, **kwds)
except Exception as exc:
self.logger.log_exception('CRASHED', exc)
sys.stderr.write('\n***EXCEPTION*** at %s\n' %
time.strftime('%Y-%m-%d %H:%M:%S Z', time.gmtime()))
sys.stderr.flush()
raise
def process_logs(self, rawlogs, uuids):
logs = []
for e in rawlogs:
if not isinstance(e, dict): continue
logs.append(LogEntry(id=e.get('id'), parent=e.get('parent'),
nick=e.get('nick'), timestamp=e.get('timestamp'),
text=e.get('text'), **{'from': e.get('from')}))
logs.sort()
added = set(self.db.extend(logs))
for e in logs:
eid = e['id']
if eid not in added: continue
self.logger.log(
'LOGPOST id=%r parent=%r from=%r nick=%r text=%r' %
(eid, e['parent'], e['from'], e['nick'], e['text']))
uuid_added = self.db.extend_uuid(uuids)
uuid_added.sort()
for k in uuid_added:
self.logger.log('LOGUUID id=%r uuid=%r' % (k, uuids[k]))
return (added, uuid_added)
def send_logs(self, peer, data):
data.setdefault('type', 'log')
ls = 'LOGSEND to=%r' % (peer,)
if data['data']:
ret = data['data']
ls += ' log-from=%r log-to=%r log-count=%r' % (ret[0]['id'],
ret[-1]['id'], len(ret))
else:
ls += ' log-count=0'
if data.get('key'):
ls += ' key=%r' % (data.get('key'),)
self.logger.log(ls)
return self.send_unicast(peer, data, verbose=False)
def _execute(self, func, *args, **kwds):
self.scheduler.add_now(lambda: func(*args, **kwds))
def _process_who(self, data):
for uid, info in data.items():
self._process_nick(uid, uuid=info['uuid'])
def _process_joined(self, uid, uuid=None):
self._process_nick(uid, uuid=uuid)
if self._selecting_candidate:
self.send_unicast(uid, {'type': 'log-query'})
def _process_nick(self, uid, nick=None, uuid=None):
if nick:
if uuid:
self.logger.log('NICK id=%r uuid=%r nick=%r' % (uid, uuid,
nick))
else:
self.logger.log('NICK id=%r nick=%r' % (uid, nick))
if uuid:
if self.db.append_uuid(uid, uuid):
self.logger.log('UUID id=%r uuid=%r' % (uid, uuid))
def _process_post(self, data):
post = LogEntry(id=data.get('id'), parent=data.get('parent'),
nick=data.get('nick'), text=data.get('text'),
timestamp=data.get('timestamp'),
**{'from': data.get('from')})
self.logger.log('POST id=%r parent=%r from=%r nick=%r text=%r' %
(post['id'], post['parent'], post['from'],
post['nick'], post['text']))
self.db.append(post)
def _process_log_query(self, uid):
bounds = self.db.bounds()
if bounds[2] and uid != self.identity['id']:
self.send_unicast(uid, {'type': 'log-info', 'from': bounds[0],
'to': bounds[1], 'length': bounds[2]})
def _process_log_info(self, data, uid):
if not data.get('from') or uid == self.identity['id']:
return
if (self._cur_candidate is None or
data['from'] < self._cur_candidate['from']):
dbfrom = self.db.bounds()[0]
if dbfrom is not None and data['from'] < dbfrom:
data['reqto'] = dbfrom
elif uid in self._already_loaded:
data['reqto'] = self._already_loaded[uid]
else:
data['reqto'] = data['to']
self._cur_candidate = data
self.scheduler.add(1, lambda: self._send_request(data, uid))
def _send_request(self, data, uid=None):
if self._cur_candidate is not data:
return
self._selecting_candidate = False
if data is None or uid is None:
self._logs_finish()
return
self.send_unicast(uid, {'type': 'log-request', 'to': data['reqto']})
def _process_log_request(self, data, uid):
logs = self.db.query(data.get('from'), data.get('to'),
data.get('length'))
response = {'data': logs,
'uuids': self.db.query_uuid(ent['from'] for ent in logs)}
if data.get('key') is not None: response['key'] = data['key']
self.send_logs(uid, response)
def _process_log(self, data, uid):
rawlogs, uuids = data.get('data', []), data.get('uuids', {})
for k, v in data.get('users', {}).items():
u = v.get('uuid')
if u: uuids[k] = u
res = self.process_logs(rawlogs, uuids)
if not self.dont_pull:
if rawlogs:
self._already_loaded[uid] = min(i['id'] for i in rawlogs)
if res[0] or res[1]:
self._logs_begin()
else:
self._logs_finish()
def _delete(self, ids, cause=None):
handled = set()
for msg in self.db.delete(ids):
handled.add(msg['id'])
self.logger.log(
'DELETE by=%r id=%r parent=%r from=%r nick=%r text=%r' %
(cause, msg['id'], msg['parent'], msg['from'],
msg['nick'], msg['text']))
for i in ids:
if i in handled: continue
self.logger.log('DELETE by=%r id=%r' % (cause, i))
def _process_pm(self, data):
self.logger.log(
'PRIVMSG id=%r parent=%r from=%r nick=%r subject=%r text=%r' %
(data['id'], data.get('parent'), data['from'],
data.get('nick'), data.get('subject'), data.get('text')))
def _push_logs(self, peer=None):
if peer is None:
if not self.push_logs: return
peer = self.push_logs.pop(0)
do_again = bool(self.push_logs)
inquire = (not do_again)
else:
do_again, inquire = False, False
bounds = self.db.bounds()
data = self.db.query(bounds[0], bounds[1])
uuids = self.db.query_uuid(ent['from'] for ent in data)
self.send_logs(peer, {'data': data, 'uuids': uuids})
if do_again:
self._execute(self._push_logs)
elif inquire:
self.send_broadcast({'type': 'log-inquiry'})
def _logs_begin(self):
self._selecting_candidate = True
self._cur_candidate = None
self.send_broadcast({'type': 'log-query'})
self.scheduler.add(1, lambda: self._send_request(None))
def _logs_finish(self):
if not self._logs_done:
self.send_broadcast({'type': 'log-done'})
self._logs_done = True
if self.dont_stay:
self.reconnect = False
self.close()
def _send_ping(self):
now = time.time()
if (self._last_pong is not None and now >= self._last_pong +
self.max_pings * self.ping_delay):
self.close()
return
self.send_seq({'type': 'ping',
'next': (now + self.ping_delay) * 1000})
with self._ping_lock:
self._ping_job = self.scheduler.add(self.ping_delay,
self._send_ping)
def main():
@contextlib.contextmanager
def openarg(fname):
if fname == '-':
yield sys.stdin
else:
with open(fname) as f:
yield f
def install_sighandler(signum, callback):
try:
signal.signal(signal.SIGINT, callback)
except Exception as e:
logger.log_exception('WARNING', e)
def interrupt(signum, frame):
raise SystemExit
def handle_crash(exc):
logger.log_exception('CRASHED', e)
sys.stderr.write('\n***CRASH*** at %s\n' %
time.strftime('%Y-%m-%d %H:%M:%S Z', time.gmtime()))
sys.stderr.flush()
raise
b = instabot.CmdlineBotBuilder(Scribe, NICKNAME, None)
p = b.make_parser(sys.argv[0],
desc='An Instant bot storing room logs.')
p.option('maxlen', MAXLEN, type=int,
help='Maximum amount of logs to deliver')
p.option('msgdb', placeholder='<file>', default=Ellipsis,
help='SQLite database file for messages')
p.flag_ex('no-msgdb', None, 'msgdb',
help='Do not store messages at all')
p.option('read-file', placeholder='<file>',
help='Parse log file for messages')
p.option('read-rotate', placeholder='<time>[:<compress>]',
help='Assume the file has been rotated as given')
p.option('push-logs', [], accum=True, varname='push_logs',
placeholder='<id>', help='Send logs to given ID without asking')
p.flag('dont-stay', varname='dont_stay',
help='Exit after collecting logs')
p.flag('dont-pull', varname='dont_pull', help='Do not collect logs')
b.parse(sys.argv[1:])
b.add_args('push_logs', 'dont_stay', 'dont_pull')
maxlen, msgdb_file = b.get_args('maxlen', 'msgdb')
read_file, read_rotate = b.get_args('read-file', 'read-rotate')
logger = b.kwds.get('logger', instabot.DEFAULT_LOGGER)
logger.log('SCRIBE version=%s' % VERSION)
install_sighandler(signal.SIGINT, interrupt)
install_sighandler(signal.SIGTERM, interrupt)
logger.log('OPENING file=%r maxlen=%r' % (msgdb_file, maxlen))
try:
if msgdb_file is None:
msgdb = LogDBNull(maxlen)
elif msgdb_file is Ellipsis:
msgdb = LogDBList(maxlen)
else:
msgdb = LogDBSQLite(msgdb_file, maxlen)
msgdb.init()
except Exception as e:
handle_crash(e)
if read_file is not None:
logger.log('READING file=%r rotation=%r maxlen=%r' % (read_file,
read_rotate, msgdb.capacity()))
try:
with b.build_logger(read_file, read_rotate) as l:
logs, uuids = read_posts_ex(l, msgdb.capacity())
msgdb.extend(logs)
msgdb.extend_uuid(uuids)
logs, uuids = None, None
except IOError as e:
logger.log('ERROR reason=%r' % repr(e))
logger.log('LOGBOUNDS from=%r to=%r amount=%r' % msgdb.bounds())
if b.get_args('url') is None:
logger.log('EXITING')
return
sched = instabot.EventScheduler()
bot = b(scheduler=sched, db=msgdb, keepalive=False, logger=logger)
thr = None
try:
while 1:
sched.set_forever(True)
canceller = instabot.Canceller()
thr = bot.start(canceller)
try:
sched.run()
except websocket_server.ConnectionClosedError:
pass
sched.clear()
canceller.cancel()
ws = bot.ws
if ws: ws.close_now()
bot.close(False)
thr.join(1)
if not bot.reconnect: break
time.sleep(1)
except (KeyboardInterrupt, SystemExit) as e:
bot.close()
if isinstance(e, SystemExit):
logger.log('EXITING')
else:
logger.log('INTERRUPTED')
except Exception as e:
handle_crash(e)
finally:
if thr: thr.join(1)
msgdb.close()
if __name__ == '__main__': main()
|
StarcoderdataPython
|
105080
|
from OpenAttack import substitute
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.abspath(__file__)),
".."
))
import OpenAttack
def get_attackers_on_chinese(dataset, clsf):
triggers = OpenAttack.attackers.UATAttacker.get_triggers(clsf, dataset, clsf.tokenizer)
attackers = [
OpenAttack.attackers.FDAttacker(token_unk=clsf.token_unk, lang="chinese"),
OpenAttack.attackers.UATAttacker(triggers=triggers, lang="chinese"),
OpenAttack.attackers.TextBuggerAttacker(lang="chinese"),
OpenAttack.attackers.GeneticAttacker(lang="chinese", filter_words=["的", "了", "着"]),
OpenAttack.attackers.PWWSAttacker(lang="chinese"),
OpenAttack.attackers.PSOAttacker(lang="chinese")
]
return attackers
|
StarcoderdataPython
|
3237355
|
import copy
import datetime
import decimal
import json
import uuid
import pytest
from boto3.dynamodb.types import TypeSerializer
from botocore import stub
from fixtures import context, lambda_module # pylint: disable=import-error
from helpers import compare_dict # pylint: disable=import-error,no-name-in-module
lambda_module = pytest.fixture(scope="module", params=[{
"function_dir": "get_order",
"module_name": "main",
"environ": {
"ENVIRONMENT": "test",
"TABLE_NAME": "TABLE_NAME",
"USER_INDEX_NAME": "USER_INDEX_NAME",
"ORDERS_LIMIT": "20",
"POWERTOOLS_TRACE_DISABLED": "true"
}
}])(lambda_module)
context = pytest.fixture(context)
@pytest.fixture
def apigateway_event(order):
"""
API Gateway Lambda Proxy event
See https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
"""
return {
"resource": "/backend/{orderId}",
"path": "/backend/"+order["orderId"],
"httpMethod": "GET",
"headers": {},
"multiValueHeaders": {},
"queryStringParameters": None,
"multiValueQueryStringParameters": {},
"pathParameters": {
"orderId": order["orderId"]
},
"stageVariables": {},
"requestContext": {
"identity": {
"accountId": "123456789012",
"caller": "CALLER",
"sourceIp": "127.0.0.1",
"accessKey": "ACCESS_KEY",
"userArn": "arn:aws:iam::123456789012:user/alice",
"userAgent": "PostmanRuntime/7.1.1",
"user": "CALLER"
}
},
"body": {},
"isBase64Encoded": False
}
@pytest.fixture
def order():
"""
Single order
"""
now = datetime.datetime.now()
return {
"orderId": str(uuid.uuid4()),
"userId": str(uuid.uuid4()),
"createdDate": now.isoformat(),
"modifiedDate": now.isoformat(),
"status": "NEW",
"products": [{
"productId": str(uuid.uuid4()),
"name": "<NAME>",
"package": {
"width": 1000,
"length": 900,
"height": 800,
"weight": 700
},
"price": 300,
"quantity": 4
}],
"address": {
"name": "<NAME>",
"companyName": "Company Inc.",
"streetAddress": "123 Street St",
"postCode": "12345",
"city": "Town",
"state": "State",
"country": "SE",
"phoneNumber": "+123456789"
},
"deliveryPrice": 200,
"total": 1400
}
def test_get_order(lambda_module, order):
"""
Test get_order()
"""
# Stub boto3
table = stub.Stubber(lambda_module.table.meta.client)
response = {
"Item": {k: TypeSerializer().serialize(v) for k, v in order.items()},
# We do not use ConsumedCapacity
"ConsumedCapacity": {}
}
expected_params = {
"TableName": lambda_module.TABLE_NAME,
"Key": {"orderId": order["orderId"]}
}
table.add_response("get_item", response, expected_params)
table.activate()
# Gather orders
ddb_order = lambda_module.get_order(order["orderId"])
# Remove stub
table.assert_no_pending_responses()
table.deactivate()
# Check response
compare_dict(order, ddb_order)
def test_handler(lambda_module, apigateway_event, order, context):
"""
Test handler()
"""
# Stub boto3
table = stub.Stubber(lambda_module.table.meta.client)
response = {
"Item": {k: TypeSerializer().serialize(v) for k, v in order.items()},
# We do not use ConsumedCapacity
"ConsumedCapacity": {}
}
expected_params = {
"TableName": lambda_module.TABLE_NAME,
"Key": {"orderId": order["orderId"]}
}
table.add_response("get_item", response, expected_params)
table.activate()
# Send request
response = lambda_module.handler(apigateway_event, context)
# Remove stub
table.assert_no_pending_responses()
table.deactivate()
assert response["statusCode"] == 200
assert "body" in response
body = json.loads(response["body"])
compare_dict(order, body)
def test_handler_not_found(lambda_module, apigateway_event, order, context):
"""
Test handler() with an unknown order ID
"""
# Stub boto3
table = stub.Stubber(lambda_module.table.meta.client)
response = {
# We do not use ConsumedCapacity
"ConsumedCapacity": {}
}
expected_params = {
"TableName": lambda_module.TABLE_NAME,
"Key": {"orderId": order["orderId"]}
}
table.add_response("get_item", response, expected_params)
table.activate()
# Send request
response = lambda_module.handler(apigateway_event, context)
# Remove stub
table.assert_no_pending_responses()
table.deactivate()
assert response["statusCode"] == 404
assert "body" in response
body = json.loads(response["body"])
assert "message" in body
assert isinstance(body["message"], str)
def test_handler_forbidden(lambda_module, apigateway_event, context):
"""
Test handler() without claims
"""
apigateway_event = copy.deepcopy(apigateway_event)
del apigateway_event["requestContext"]["identity"]
# Send request
response = lambda_module.handler(apigateway_event, context)
assert response["statusCode"] == 401
assert "body" in response
body = json.loads(response["body"])
assert "message" in body
assert isinstance(body["message"], str)
def test_handler_missing_order(lambda_module, apigateway_event, context):
"""
Test handler() without orderId
"""
apigateway_event = copy.deepcopy(apigateway_event)
apigateway_event["pathParameters"] = None
# Send request
response = lambda_module.handler(apigateway_event, context)
assert response["statusCode"] == 400
assert "body" in response
body = json.loads(response["body"])
assert "message" in body
assert isinstance(body["message"], str)
|
StarcoderdataPython
|
1625095
|
from setuptools import setup
install_requires = [
r.strip() for r in open('requirements.txt')
if r.strip() and not r.strip().startswith('#')
]
setup(
name="aiokafka_rpc",
version="1.3.0",
author='<NAME>',
author_email='<EMAIL>',
description=("RPC over Apache Kafka for Python using asyncio"),
license="Apache Software License",
keywords="aiokafka_rpc",
url="https://github.com/fabregas/aiokafka_rpc",
packages=["aiokafka_rpc"],
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3"
],
)
|
StarcoderdataPython
|
110704
|
"""
Created on 08 Okt. 2021
@author: <NAME>
This example shows how you can use MiP-EGO in order to perform hyper-parameter optimization for machine learning tasks.
"""
#import packages
from sklearn.datasets import load_iris
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score, KFold
import numpy as np
#import our package, the surrogate model and the search space classes
from mipego import ParallelBO
from mipego.Surrogate import RandomForest
from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# First we need to define the Search Space
# the search space consists of one continues variable
# one ordinal (integer) variable
# and two categorical (nominal) variables.
Cvar = ContinuousSpace([1.0, 20.0],'C') # one integer variable with label C
degree = OrdinalSpace([2,6], 'degree')
gamma = NominalSpace(['scale', 'auto'], 'gamma')
kernel = NominalSpace(['linear', 'poly', 'rbf', 'sigmoid'], 'kernel')
#the complete search space is just the sum of the parameter spaces
search_space = Cvar + gamma + degree + kernel
#now we define the objective function (the model optimization)
def train_model(c):
#define the model
# We will use a Support Vector Classifier
svm = SVC(kernel=c['kernel'], gamma=c['gamma'], C=c['C'], degree=c['degree'])
cv = KFold(n_splits=4, shuffle=True, random_state=42)
# Nested CV with parameter optimization
cv_score = cross_val_score(svm, X=X_iris, y=y_iris, cv=cv)
#by default mip-ego minimises, so we reverse the accuracy
return -1 * np.mean(cv_score)
model = RandomForest(levels=search_space.levels)
opt = ParallelBO(
search_space=search_space,
obj_fun=train_model,
model=model,
max_FEs=6,
DoE_size=5, # the initial DoE size
eval_type='dict',
acquisition_fun='MGFI',
acquisition_par={'t' : 2},
n_job=3, # number of processes
n_point=3, # number of the candidate solution proposed in each iteration
verbose=True # turn this off, if you prefer no output
)
xopt, fopt, stop_dict = opt.run()
print('xopt: {}'.format(xopt))
print('fopt: {}'.format(fopt))
print('stop criteria: {}'.format(stop_dict))
|
StarcoderdataPython
|
4811984
|
from pickle import loads, dumps
import pytest
from instruct.types import FrozenMapping, FROZEN_MAPPING_SINGLETONS
from instruct.utils import flatten, flatten_fields
def test_frozen_mapping():
# Test identity operations:
assert FrozenMapping() is FrozenMapping(None) is FrozenMapping({})
# Test simple mapping:
f1 = FrozenMapping({"a": 1, "b": 2})
assert f1 is FrozenMapping({"a": 1, "b": 2})
# Test pickling:
assert loads(dumps(f1)) is f1
# Test exploding on unhashable types:
with pytest.raises(TypeError):
f2 = FrozenMapping({"a": {"unhashable"}})
# Test on other hashables:
f2 = FrozenMapping({"a": frozenset({"hashable"})})
assert f2 is FrozenMapping({"a": frozenset({"hashable"})})
# Test merging:
f3 = f1 | f2
assert f3["a"] == frozenset({"hashable"})
f3 |= {"b": 3}
assert f3["b"] == 3
f4 = {"foo": "bar"} | f3
f4_i = f3 | {"foo": "bar"}
assert isinstance(f4, FrozenMapping)
assert isinstance(f4_i, FrozenMapping)
assert hash(f4) == hash(f4_i)
assert f4 is f4_i
assert (f4 - {"foo"}) is f3
assert (f4 - {"foo": "baz"}) is f4_i
x = FrozenMapping({"a": {"b": {"c": None}, "d": 1}}) - FrozenMapping({"a": "b"})
assert x is FrozenMapping({"a": {"d": 1}})
# Test weak refs:
hash_code_1 = hash(f1)
hash_code_2 = hash(f2)
assert hash_code_1 in FROZEN_MAPPING_SINGLETONS
assert hash_code_2 in FROZEN_MAPPING_SINGLETONS
# Kill it:
del f1, f2
assert hash_code_1 not in FROZEN_MAPPING_SINGLETONS
assert hash_code_2 not in FROZEN_MAPPING_SINGLETONS
def test_flatten():
assert flatten([["a"]], eager=True) == ("a",)
def test_flatten_fields():
# The format is always:
# if a string, it means "remove this from the DAO universally"
# if a mapping, means "operate on this field and operate on these elements"
# where operation if string means "remove" and if not string means recurse
expected = FrozenMapping(
{
"a": FrozenMapping({"a": None}),
"Q": FrozenMapping({"x": None, "y": None}),
"b": FrozenMapping({"c": None}),
"d": FrozenMapping(
{"e": FrozenMapping({"f": None}), "h": FrozenMapping({"foo": None, "bar": None})}
),
"stripme": None,
}
)
assert flatten_fields.collect(expected) == expected
n1 = flatten_fields.collect(
{
"a": ("a",),
"Q": ("x", "y"),
"b": "c",
"d": {"e": {"f"}, "h": ("foo", "bar")},
"stripme": None,
}
)
assert n1 == expected
n2 = flatten_fields.collect(
{
"a": {"a": None},
"Q": {"x": None, "y": None},
"b": {"c": None},
"d": {"e": {"f": None}, "h": {"foo": None, "bar": None}},
"stripme": None,
}
)
assert n2 == expected
n3 = flatten_fields.collect(
{
"a": ({"a": None},),
"Q": ({"x": None}, {"y": None}),
"b": {"c": None},
"d": {"e": {"f": None}, "h": ({"foo": None, "bar": None},)},
"stripme": {},
}
)
assert n3 == expected
def test_merge_skip_keys():
skipped = FrozenMapping({})
skip_only_a = skipped | flatten_fields.collect({"a"})
assert skip_only_a is FrozenMapping({"a": None})
skip_parts_of_ab = skipped | flatten_fields.collect({"a": {"b": None}, "c": {"d": None}})
assert skip_parts_of_ab is FrozenMapping({"a": {"b": None}, "c": {"d": None}})
assert (skip_parts_of_ab | flatten_fields.collect(("a", "b", "c"))) is FrozenMapping(
{"a": None, "b": None, "c": None}
)
|
StarcoderdataPython
|
4809573
|
<gh_stars>0
#! /root/anaconda3/bin/python
from threading import current_thread, Thread
import time
print('parent thread %s start' % (current_thread().getName()))
class MyThread(Thread):
def run(self):
print('child thread %s start' % current_thread().getName())
time.sleep(5)
print('child thread %s stop' % current_thread().getName())
mt = MyThread()
mt.daemon = True
# mt.setDaemon(True)
mt.start()
mt.join()
# mp.join(1)
# time.sleep(5)
# 第一次注释 查看进程执行顺序time.sleep(5)
# parent process 75851 start
# parent process 75851 stop
# child process 75852 start
# child process 75852 stop
print('parent thread %s stop' % (current_thread().getName()))
|
StarcoderdataPython
|
1640378
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Membership'
db.create_table('users_membership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('legal_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('preferred_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('email_address', self.gf('django.db.models.fields.EmailField')(max_length=100)),
('city', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('region', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('country', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('postal_code', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('psf_code_of_conduct', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('psf_announcements', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, null=True, to=orm['users.User'])),
))
db.send_create_signal('users', ['Membership'])
# Deleting field 'User.preferred_name'
db.delete_column('users_user', 'preferred_name')
# Deleting field 'User.psf_code_of_conduct'
db.delete_column('users_user', 'psf_code_of_conduct')
# Deleting field 'User.postal_code'
db.delete_column('users_user', 'postal_code')
# Deleting field 'User.region'
db.delete_column('users_user', 'region')
# Deleting field 'User.legal_name'
db.delete_column('users_user', 'legal_name')
# Deleting field 'User.psf_announcements'
db.delete_column('users_user', 'psf_announcements')
# Deleting field 'User.city'
db.delete_column('users_user', 'city')
# Deleting field 'User.country'
db.delete_column('users_user', 'country')
def backwards(self, orm):
# Deleting model 'Membership'
db.delete_table('users_membership')
# Adding field 'User.preferred_name'
db.add_column('users_user', 'preferred_name',
self.gf('django.db.models.fields.CharField')(max_length=100, default='', blank=True),
keep_default=False)
# Adding field 'User.psf_code_of_conduct'
db.add_column('users_user', 'psf_code_of_conduct',
self.gf('django.db.models.fields.NullBooleanField')(blank=True, null=True),
keep_default=False)
# Adding field 'User.postal_code'
db.add_column('users_user', 'postal_code',
self.gf('django.db.models.fields.CharField')(max_length=20, default='', blank=True),
keep_default=False)
# Adding field 'User.region'
db.add_column('users_user', 'region',
self.gf('django.db.models.fields.CharField')(max_length=100, default='', blank=True),
keep_default=False)
# Adding field 'User.legal_name'
db.add_column('users_user', 'legal_name',
self.gf('django.db.models.fields.CharField')(max_length=100, default='', blank=True),
keep_default=False)
# Adding field 'User.psf_announcements'
db.add_column('users_user', 'psf_announcements',
self.gf('django.db.models.fields.NullBooleanField')(blank=True, null=True),
keep_default=False)
# Adding field 'User.city'
db.add_column('users_user', 'city',
self.gf('django.db.models.fields.CharField')(max_length=100, default='', blank=True),
keep_default=False)
# Adding field 'User.country'
db.add_column('users_user', 'country',
self.gf('django.db.models.fields.CharField')(max_length=100, default='', blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'to': "orm['auth.Permission']"})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'object_name': 'ContentType', 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'users.membership': {
'Meta': {'object_name': 'Membership'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'null': 'True', 'to': "orm['users.User']"}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legal_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'preferred_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'psf_announcements': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'psf_code_of_conduct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
'users.user': {
'Meta': {'object_name': 'User'},
'_bio_rendered': ('django.db.models.fields.TextField', [], {}),
'bio': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True', 'blank': 'True'}),
'bio_markup_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'default': "'markdown'", 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_privacy': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'search_visibility': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
}
}
complete_apps = ['users']
|
StarcoderdataPython
|
1784305
|
import functools
import string
import typing as t
from mypy.errorcodes import ErrorCode
from mypy.nodes import (
Expression,
FuncDef,
LambdaExpr,
NameExpr,
RefExpr,
StrExpr,
)
from mypy.options import Options
from mypy.plugin import (
MethodContext,
Plugin,
)
from mypy.types import (
get_proper_type,
ProperType,
Type,
)
import typing_extensions as te
ERROR_BAD_ARG: te.Final[ErrorCode] = ErrorCode(
'logger-arg',
'Positional argument of loguru handler is not valid for given message',
'loguru',
)
ERROR_BAD_KWARG: te.Final[ErrorCode] = ErrorCode(
'logger-kwarg',
'Named argument of loguru handler is not valid for given message',
'loguru',
)
class Opts(t.NamedTuple):
lazy: bool
DEFAULT_LAZY = False # type: te.Final
DEFAULT_OPTS = Opts(lazy=DEFAULT_LAZY) # type: te.Final
NAME_TO_BOOL = {
'False': False,
'True': True,
} # type: te.Final
def _loguru_logger_call_handler(
loggers: t.Dict[ProperType, Opts],
ctx: MethodContext,
) -> Type:
log_msg_expr = ctx.args[0][0]
logger_opts = loggers.get(ctx.type) or DEFAULT_OPTS
assert isinstance(log_msg_expr, StrExpr), type(log_msg_expr)
# collect call args/kwargs
# due to funky structure mypy offers here, it's easier
# to beg for forgiveness here
try:
call_args = ctx.args[1]
call_args_count = len(call_args)
except IndexError:
call_args = []
call_args_count = 0
try:
call_kwargs = {
kwarg_name: ctx.args[2][idx]
for idx, kwarg_name in enumerate(ctx.arg_names[2])
}
except IndexError:
call_kwargs = {}
# collect args/kwargs from string interpolation
log_msg_value: str = log_msg_expr.value
log_msg_expected_args_count = 0
log_msg_expected_kwargs = []
for res in string.Formatter().parse(log_msg_value):
if res[1] is None:
continue
elif not res[1].strip():
log_msg_expected_args_count += 1
else:
log_msg_expected_kwargs.append(res[1].strip())
if log_msg_expected_args_count > call_args_count:
ctx.api.msg.fail(
f'Missing {log_msg_expected_args_count - call_args_count} '
'positional arguments for log message',
context=log_msg_expr,
code=ERROR_BAD_ARG,
)
return ctx.default_return_type
elif log_msg_expected_args_count < call_args_count:
ctx.api.msg.note(
f'Expected {log_msg_expected_args_count} but found {call_args_count} '
'positional arguments for log message',
context=log_msg_expr,
code=ERROR_BAD_ARG,
)
return ctx.default_return_type
elif logger_opts.lazy:
for call_pos, call_arg in enumerate(call_args):
if isinstance(call_arg, LambdaExpr) and call_arg.arguments:
ctx.api.msg.fail(
f'Expected 0 arguments for <lambda>: {call_pos} arg',
context=call_arg,
code=ERROR_BAD_ARG,
)
elif isinstance(call_arg, RefExpr) and isinstance(
call_arg.node, FuncDef) and call_arg.node.arguments:
ctx.api.msg.fail(
f'Expected 0 arguments for {call_arg.fullname}: {call_pos} arg',
context=call_arg,
code=ERROR_BAD_ARG,
)
for log_msg_kwarg in log_msg_expected_kwargs:
maybe_kwarg_expr = call_kwargs.pop(log_msg_kwarg, None)
if maybe_kwarg_expr is None:
ctx.api.msg.fail(
f'{log_msg_kwarg} keyword argument is missing',
context=log_msg_expr,
code=ERROR_BAD_KWARG,
)
return ctx.default_return_type
elif logger_opts.lazy:
if isinstance(maybe_kwarg_expr, LambdaExpr) and maybe_kwarg_expr.arguments:
ctx.api.msg.fail(
f'Expected 0 arguments for <lambda>: {log_msg_kwarg} kwarg',
context=maybe_kwarg_expr,
code=ERROR_BAD_KWARG,
)
elif isinstance(maybe_kwarg_expr, RefExpr) and isinstance(
maybe_kwarg_expr.node, FuncDef) and maybe_kwarg_expr.node.arguments:
ctx.api.msg.fail(
'Expected 0 arguments for '
f'{maybe_kwarg_expr.node.fullname}: {log_msg_kwarg}',
context=maybe_kwarg_expr,
code=ERROR_BAD_KWARG,
)
for extra_kwarg_name in call_kwargs:
ctx.api.msg.fail(
f'{extra_kwarg_name} keyword argument not found in log message',
context=log_msg_expr,
code=ERROR_BAD_KWARG,
)
return ctx.default_return_type
def _loguru_opt_call_handler(
loggers: t.Dict[ProperType, Opts],
ctx: MethodContext,
) -> Type:
return_type = get_proper_type(ctx.default_return_type)
lazy_expr = _get_opt_arg('lazy', ctx=ctx)
if isinstance(lazy_expr, NameExpr):
loggers[return_type] = Opts(lazy=NAME_TO_BOOL[lazy_expr.name])
return return_type
def _get_opt_arg(
arg_name: str,
*,
ctx: MethodContext,
) -> t.Optional[Expression]:
try:
return ctx.args[ctx.callee_arg_names.index(arg_name)][0]
except IndexError:
return None
class LoguruPlugin(Plugin):
builtin_severities = (
'info',
'debug',
'warning',
'error',
'exception',
'success',
'trace',
)
def __init__(self, options: Options) -> None:
super().__init__(options)
self._known_loggers: t.Dict[ProperType, Opts] = {}
def get_method_hook(
self,
fullname: str,
) -> t.Optional[t.Callable[[MethodContext], Type]]:
if fullname.startswith('loguru'):
_, method = fullname.rsplit('.', 1)
if method in self.builtin_severities:
return functools.partial(_loguru_logger_call_handler, self._known_loggers)
elif method == 'opt':
return functools.partial(_loguru_opt_call_handler, self._known_loggers)
return super().get_method_hook(fullname)
def plugin(version: str) -> t.Type[LoguruPlugin]:
return LoguruPlugin
|
StarcoderdataPython
|
1780577
|
<reponame>meow464/pyobjus
__version__ = '1.2.0'
from .pyobjus import *
|
StarcoderdataPython
|
150815
|
'''
URL: https://leetcode.com/problems/delete-node-in-a-linked-list/
Difficulty: Easy
Description: Delete Node in a Linked List
Write a function to delete a node in a singly-linked list. You will not be given access to the head of the list, instead you will be given access to the node to be deleted directly.
It is guaranteed that the node to be deleted is not a tail node in the list.
Example 1:
Input: head = [4,5,1,9], node = 5
Output: [4,1,9]
Explanation: You are given the second node with value 5, the linked list should become 4 -> 1 -> 9 after calling your function.
Example 2:
Input: head = [4,5,1,9], node = 1
Output: [4,5,9]
Explanation: You are given the third node with value 1, the linked list should become 4 -> 5 -> 9 after calling your function.
Example 3:
Input: head = [1,2,3,4], node = 3
Output: [1,2,4]
Example 4:
Input: head = [0,1], node = 0
Output: [1]
Example 5:
Input: head = [-3,5,-99], node = -3
Output: [5,-99]
Constraints:
The number of the nodes in the given list is in the range [2, 1000].
-1000 <= Node.val <= 1000
The value of each node in the list is unique.
The node to be deleted is in the list and is not a tail node
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
|
StarcoderdataPython
|
68618
|
<reponame>TachikakaMin/envpool
# Copyright 2021 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Api wrapper layer for EnvPool."""
from typing import Tuple, Type
from .dm_envpool import DMEnvPoolMeta
from .env_spec import EnvSpecMeta
from .gym_envpool import GymEnvPoolMeta
from .protocol import EnvPool, EnvSpec
def py_env(
envspec: Type[EnvSpec], envpool: Type[EnvPool]
) -> Tuple[Type[EnvSpec], Type[EnvPool], Type[EnvPool]]:
"""Initialize EnvPool for users."""
# remove the _ prefix added when registering cpp class via pybind
spec_name = envspec.__name__[1:]
pool_name = envpool.__name__[1:]
return (
EnvSpecMeta(spec_name, (envspec,), {}), # type: ignore[return-value]
DMEnvPoolMeta(pool_name.replace("EnvPool", "DMEnvPool"), (envpool,), {}),
GymEnvPoolMeta(pool_name.replace("EnvPool", "GymEnvPool"), (envpool,), {}),
)
|
StarcoderdataPython
|
1642009
|
""" With the draw module new schematics ca be created using python code """
from .Dot import Dot
from .Draw import Draw
from .DrawElement import DrawElement
from .Element import Element
from .Label import Label
from .Line import Line
from .NC import NC
|
StarcoderdataPython
|
100399
|
<filename>xfel/ui/components/timeit.py
from __future__ import absolute_import, division, print_function
import time, math
def now():
return "%02d:%02d:%02d" % (time.localtime().tm_hour, time.localtime().tm_min, time.localtime().tm_sec)
def duration(t1, t2):
diff = t2 - t1
seconds = int(math.floor(diff))
frac = diff - seconds
hh = seconds // 3600
mm = seconds // 60
if hh > 0:
mm = mm % 60
ss = seconds % 60
return "%02dh %02dm %fs" % (hh, mm, ss + frac)
|
StarcoderdataPython
|
3278810
|
import numpy as np
# Import dask
import dask
# Use dask jobqueue
from dask_jobqueue import PBSCluster
def get_pbscluster(nthreads):
cluster = PBSCluster(
cores=1, # The number of cores you want
memory='10GB', # Amount of memory
processes=1, # How many processes
queue='casper', # The type of queue to utilize (/glade/u/apps/dav/opt/usr/bin/execcasper)
local_directory='$TMPDIR', # Use your local directory
resource_spec='select=1:ncpus=1:mem=10GB', # Specify resources
project='CESM0002', # Input your project ID here
walltime='04:00:00', # Amount of wall time
interface='ib0', # Interface to use
)
# Scale up
cluster.scale(nthreads)
dask.config.set({'distributed.dashboard.link':'https://jupyterhub.hpc.ucar.edu/stable/user/{USER}/proxy/{port}/status'})
return cluster
# a function to return the global mean of a dataset
# variables weighted by cos(latitude)
#
def global_mean(ds):
lat = ds['lat']
weight = np.cos(np.deg2rad(lat))
weight /= weight.mean()
other_dims = set(ds.dims) - {'year'}
return (ds * weight).mean(other_dims)
|
StarcoderdataPython
|
3258843
|
"""
This module provides functionality for resolving references within an instance
of `oapi.oas.model.OpenAPI`.
For example, the following will replace all references in the Open API
document `open_api_document` with the objects targeted by the `ref` property
of the reference:
```python
from urllib.request import urlopen
from oapi.oas.model import OpenAPI
with urlopen(
'https://raw.githubusercontent.com/OAI/OpenAPI-Specification/master/'
'examples/v3.0/callback-example.yaml'
) as response:
open_api_document = OpenAPI(response)
resolver = Resolver(open_api_document)
resolver.dereference()
```
"""
import sob
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
from urllib.error import HTTPError
from urllib.parse import ParseResult, urljoin, urlparse
from urllib.request import urlopen as _urlopen
from jsonpointer import resolve_pointer # type: ignore
from .model import OpenAPI, Reference
from ..errors import ReferenceLoopError
def _unmarshal_resolved_reference(
resolved_reference: sob.abc.MarshallableTypes,
url: Optional[str],
pointer: str,
types: Union[Sequence[Union[sob.abc.Property, type]], sob.abc.Types] = (),
) -> sob.abc.Model:
if types or (not isinstance(resolved_reference, sob.abc.Model)):
resolved_reference = sob.model.unmarshal(
resolved_reference, types=types
)
# Re-assign the URL and pointer
assert isinstance(resolved_reference, sob.abc.Model)
sob.meta.url(resolved_reference, url)
sob.meta.pointer(resolved_reference, pointer)
return resolved_reference
class _Document:
def __init__(
self,
resolver: "Resolver",
root: sob.abc.Model,
url: Optional[str] = None,
) -> None:
assert isinstance(url, str)
assert isinstance(resolver, Resolver)
# Infer the document URL
if (url is None) and isinstance(root, sob.abc.Model):
url = sob.meta.get_url(root)
self.resolver: Resolver = resolver
self.root: sob.abc.Model = root
self.pointers: Dict[str, Optional[sob.abc.Model]] = {}
self.url: str = url
def get_url_pointer(self, pointer: str) -> Tuple[str, str]:
"""
Get an absolute URL + relative pointer
"""
url: str
separator: str
url, separator, pointer = pointer.partition("#")
url = self.get_absolute_url(url)
return url, f"{separator}{pointer}"
def get_absolute_url(self, url: str) -> str:
"""
Get an absolute URL from a (possibly relative) URL
"""
parse_result = urlparse(url)
if not parse_result.scheme:
url = urljoin(self.url, url.lstrip("/ "))
return url
def dereference(
self, model: sob.abc.Model, recursive: bool = True
) -> None:
"""
Recursively dereference this objects and all items/properties
"""
try:
if isinstance(model, sob.abc.Object):
self.dereference_object_properties(model, recursive=recursive)
elif isinstance(model, sob.abc.Array):
self.dereference_array_items(model, recursive=recursive)
elif isinstance(model, sob.abc.Dictionary):
self.dereference_dictionary_values(model, recursive=recursive)
else:
raise TypeError(
"The argument must be an instance of "
f"`{sob.utilities.qualified_name(sob.model.Model)}`, "
f"not {repr(model)}"
)
except ReferenceLoopError:
if not recursive:
raise
def prevent_infinite_recursion(
self, model: sob.abc.Model
) -> Tuple[Optional[str], Optional[sob.abc.Model]]:
"""
Prevent recursion errors by putting a placeholder `None` in place of
the parent object in the `pointer` cache
"""
pointer = sob.meta.get_pointer(model)
existing_value: Optional[sob.abc.Model] = None
if pointer:
if pointer in self.pointers:
existing_value = self.pointers[pointer]
self.pointers[pointer] = None
return pointer, existing_value
def reset_recursion_placeholder(
self, pointer: str, previous_value: Optional[sob.abc.Model]
) -> None:
"""
Cleanup a placeholder created by the `prevent_infinite_recursion`
method
"""
if pointer and (pointer in self.pointers):
if previous_value is None:
del self.pointers[pointer]
else:
self.pointers[pointer] = previous_value
def dereference_object_properties(
self, object_: sob.abc.Model, recursive: bool = True
) -> None:
"""
Replace all references in this object's properties with the referenced
object
"""
object_meta = sob.meta.read(object_)
# Prevent recursion errors
pointer: Optional[str]
existing: Optional[sob.abc.Model]
pointer, existing = self.prevent_infinite_recursion(object_)
for property_name, property_ in object_meta.properties.items():
value = getattr(object_, property_name)
if isinstance(value, Reference):
assert value.ref
setattr(
object_,
property_name,
self.resolve(
value.ref, types=(property_,), dereference=recursive
),
)
elif recursive and isinstance(value, sob.abc.Model):
self.dereference(value, recursive=recursive)
if pointer:
self.reset_recursion_placeholder(pointer, existing)
def dereference_array_items(
self, array: sob.abc.Array, recursive: bool = True
) -> None:
"""
Replace all references in this array with the referenced object
"""
array_meta = sob.meta.read(array)
# Prevent recursion errors
pointer: Optional[str]
existing: Optional[sob.abc.Model]
pointer, existing = self.prevent_infinite_recursion(array)
index: int
item: Any
for index, item in enumerate(array):
if isinstance(item, Reference):
assert item.ref
array[index] = self.resolve(
item.ref,
types=array_meta.item_types or (),
dereference=recursive,
)
elif recursive and isinstance(item, sob.abc.Model):
self.dereference(item, recursive=recursive)
if pointer:
self.reset_recursion_placeholder(pointer, existing)
def dereference_dictionary_values(
self, dictionary: sob.abc.Dictionary, recursive: bool = True
) -> None:
"""
Replace all references in this dictionary with the referenced object
"""
dictionary_meta = sob.meta.read(dictionary)
# Prevent recursion errors
pointer, existing = self.prevent_infinite_recursion(dictionary)
key: str
value: Any
for key, value in dictionary.items():
if isinstance(value, Reference):
assert value.ref
dictionary[key] = self.resolve(
value.ref,
types=dictionary_meta.value_types or (),
dereference=recursive,
)
elif recursive and isinstance(value, sob.abc.Model):
self.dereference(value, recursive=recursive)
if pointer:
self.reset_recursion_placeholder(pointer, existing)
def resolve(
self,
pointer: str,
types: Union[
sob.abc.Types, Sequence[Union[sob.abc.Property, type]]
] = (),
dereference: bool = False,
) -> sob.abc.Model:
"""
Return the object referenced by a pointer
"""
if pointer in self.pointers:
# This catches recursion errors
if self.pointers[pointer] is None:
raise ReferenceLoopError(pointer)
else:
self.pointers[pointer] = None
if pointer.startswith("#"):
# Resolve a reference within the same Open API document
resolved = resolve_pointer(self.root, pointer[1:])
# Cast the resolved reference as one of the given types
resolved = _unmarshal_resolved_reference(
resolved, self.url, pointer, types=types
)
if resolved is None:
raise RuntimeError()
else:
# Resolve a reference from another Open API document
url, document_pointer = self.get_url_pointer(pointer)
# Retrieve the document
document = self.resolver.get_document(
urljoin(self.url, url.lstrip("/"))
)
# Resolve the pointer, if needed
if document_pointer:
resolved = document.resolve(document_pointer, types)
else:
resolved = document.root
# Cast the resolved reference as one of the given types
resolved = _unmarshal_resolved_reference(
resolved, url, document_pointer, types=types
)
# Recursively dereference
if dereference and isinstance(resolved, sob.abc.Model):
self.dereference(resolved, recursive=dereference)
# Cache the resolved pointer
self.pointers[pointer] = resolved
model: Optional[sob.abc.Model] = self.pointers[pointer]
assert model
# The following is necessary in order to apply pointers to
# dereferenced elements
sob.meta.set_pointer(model, pointer)
return model
def dereference_all(self) -> None:
self.dereference(self.root, recursive=True)
class Resolver:
"""
This class should be used, with an instance of `oapi.oas.model.OpenAPI`, to
resolve references.
Parameters:
- root (oapi.oas.model.OpenAPI): The OpenAPI document against which
pointers will be resolved.
- url (str): The URL or file path from where `root` was retrieved. The
base URL for relative paths will be the directory above this URL.
This will not typically be needed, as it can be inferred from most
`Model` instances.
- urlopen (collections.Callable): If provided, this should be a
function taking one argument (a `str`), which can be used in lieu
of `urllib.request.urlopen` to retrieve a document and return an
instance of a sub-class of `IOBase` (such as
`http.client.HTTPResponse`). This should be used if authentication is
needed in order to retrieve external references in the document,
or if local file paths will be referenced instead of web URL's (use
`open` as the value for the `urlopen` parameter
in this case).
"""
def __init__(
self,
root: OpenAPI,
url: str = None,
urlopen: Callable = _urlopen,
) -> None:
# Ensure arguments are of the correct types
assert callable(urlopen)
assert isinstance(root, OpenAPI)
assert isinstance(url, (str, sob.utilities.types.NoneType))
# This is the function used to open external pointer references
self.urlopen = urlopen
# Infer the URL from the `OpenAPI` document, if not explicitly provided
if url is None:
url = sob.meta.url(root) or ""
self.url = url
# This is the primary document--the one we are resolving
document: _Document = _Document(self, root, url)
# Store the primary document both by URL and under the key "#" (for
# convenient reference)
self.documents = {url: document}
if url != "":
self.documents[""] = document
def get_document(self, url: str) -> _Document:
"""
Retrieve a document by URL, or use the cached document if previously
retrieved
"""
if url not in self.documents:
try:
with self.urlopen(url) as response:
self.documents[url] = _Document(
self, sob.model.detect_format(response)[0], url=url
)
except (HTTPError, FileNotFoundError) as error:
sob.errors.append_exception_text(error, ": {url}")
raise error
return self.documents[url]
def dereference(self) -> None:
"""
Dereference the primary document
"""
self.documents[""].dereference_all()
def resolve(
self,
pointer: str,
types: Union[
sob.abc.Types, Sequence[Union[type, sob.abc.Property]]
] = (),
dereference: bool = False,
) -> sob.abc.Model:
"""
Retrieve an object at the specified pointer
"""
url, pointer = self.documents[""].get_url_pointer(pointer)
return self.documents[url].resolve(
pointer, types, dereference=dereference
)
def resolve_reference(
self,
reference: Reference,
types: Union[
sob.abc.Types, Sequence[Union[type, sob.abc.Property]]
] = (),
) -> sob.abc.Model:
"""
Retrieve a referenced object.
Parameters:
- reference (oapi.oas.model.Reference)
- types ([Union[type, sob.abc.Property]]) = ()
"""
url: str = sob.meta.get_url(reference) or ""
assert reference.ref
pointer: str = urljoin(
sob.meta.get_pointer(reference) or "",
reference.ref,
)
resolved_model: sob.abc.Model = self.get_document(url).resolve(
pointer, types
)
if resolved_model is reference or (
isinstance(resolved_model, Reference)
and resolved_model.ref == reference.ref
):
raise ReferenceLoopError(
f"`Reference` instance is self-referential: {pointer}"
)
if isinstance(resolved_model, Reference):
resolved_model = self.resolve_reference(
resolved_model, types=types
)
return resolved_model
def get_relative_url(self, url: str) -> str:
"""
Given a URL, return that URL relative to the base document
"""
relative_url: str = ""
if url:
parse_result: ParseResult = urlparse(url)
# Determine if the URL is absolute or relative
if parse_result.netloc or parse_result.scheme == "file":
# Only include the relative URL if it is not the root document
if url == self.url:
relative_url = ""
else:
relative_url = sob.utilities.string.url_relative_to(
url, self.url
)
else:
relative_url = url
return relative_url
|
StarcoderdataPython
|
182118
|
import os
def call_executibles(dps, run_screen=True):
print('calling executible ... ')
for i,dp in enumerate(dps):
run_screen = 1 if run_screen else 0 # 1 - true, 0 - false
os.system('./run.sh ' + dp.model_name + ' ' + dp.run_path + ' ' + str(dp.n_mpi) + ' ' + str(run_screen) + ' ' + dp.pp_tag)
|
StarcoderdataPython
|
3363935
|
import sys, os, re, math, random, shutil
from fife import fife
from scripts.objects.baseObject import BaseGameObject, GameObjectTypes
class BaseItem(BaseGameObject):
def __int__(self, gameplay, layer, typeName, baseObjectName, itemType, itemName):
super(BaseItem, self).__init__(gameplay, layer, typeName, baseObjectName, itemType, itemName, True)
def _getItemType(self):
return self._name
def _getItemName(self):
return self._id
itemType = property(_getItemType)
itemName = property(_getItemName)
class PickableItem(BaseItem):
def __init__(self, gameplay, layer, typeName, baseObjectName, itemType, itemName):
super(PickableItem, self).__init__(gameplay, layer, typeName, baseObjectName, itemType, itemName)
self._type = GameObjectTypes['ITEM']
def onPickUp(self):
self._gameplay.scene.removeObjectFromScene(self)
def onDrop(self, dropX, dropY):
self._createFifeInstance(self, self._gameplay.scene.itemLayer)
self.setMapPosition(dropX, dropY)
self._gameplay.scene.addObjectToScene(self)
class GoldStack(PickableItem):
def __init__(self, gameplay, layer, typeName, baseObjectName, itemType, itemName):
super(GoldStack, self).__init__(gameplay, layer, typeName, baseObjectName, itemType, itemName)
self._value = 0
def serialize(self):
lvars = super(GoldStack, self).serialize()
lvars['value'] = self._value
return lvars
def deserialize(self, valueDict):
super(GoldStack, self).deserialize(valueDict)
if valueDict.has_key('value'):
self._value = int(valueDict['value'])
else:
self._value = 0
def _getValue(self):
return self._value
def _setValue(self, newValue):
self._value = int(newValue)
value = property(_getValue, _setValue)
class Portal(BaseItem):
def __init__(self, gameplay, layer, typeName, baseObjectName, itemType, itemName):
super(Portal, self).__init__(gameplay, layer, typeName, baseObjectName, itemType, itemName)
self._type = GameObjectTypes['PORTAL']
self._destination = None
def serialize(self):
lvars = super(Portal, self).serialize()
lvars['destination'] = self._destination
return lvars
def deserialize(self, valueDict):
super(Portal, self).deserialize(valueDict)
if valueDict.has_key('destination'):
self._destination = valueDict['destination']
else:
self._destination = "town"
def _getDestination(self):
return self._destination
def _setDestination(self, newDestination):
self._destination = destination
destination = property(_getDestination, _setDestination)
|
StarcoderdataPython
|
1609843
|
<filename>WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/sqlite3/sqlite3_memory.py<gh_stars>0
# Copyright (c) 2010 <NAME>. All rights reserved.
#
"""Working with an in-memory database
"""
# end_pymotw_header
import sqlite3
schema_filename = "todo_schema.sql"
with sqlite3.connect(":memory:") as conn:
conn.row_factory = sqlite3.Row
print("Creating schema")
with open(schema_filename, "rt") as f:
schema = f.read()
conn.executescript(schema)
print("Inserting initial data")
conn.execute(
"""
insert into project (name, description, deadline)
values ('pymotw', 'Python Module of the Week', '2010-11-01')
"""
)
data = [
("write about select", "done", "2010-10-03", "pymotw"),
("write about random", "waiting", "2010-10-10", "pymotw"),
("write about sqlite3", "active", "2010-10-17", "pymotw"),
]
conn.executemany(
"""
insert into task (details, status, deadline, project)
values (?, ?, ?, ?)
""",
data,
)
print("Looking for tasks...")
cursor = conn.cursor()
cursor.execute(
"""
select id, priority, status, deadline, details from task
where project = 'pymotw' order by deadline
"""
)
for row in cursor.fetchall():
print(
"{:2d} [{:d}] {:<25} [{:<8}] ({})".format(
row["id"],
row["priority"],
row["details"],
row["status"],
row["deadline"],
)
)
with sqlite3.connect(":memory:") as conn2:
print("\nLooking for tasks in second connection...")
cursor = conn2.cursor()
cursor.execute(
"""
select id, priority, status, deadline, details from task
where project = 'pymotw' order by deadline
"""
)
for row in cursor.fetchall():
print(
"{:2d} [{:d}] {:<25} [{:<8}] ({})".format(
row["id"],
row["priority"],
row["details"],
row["status"],
row["deadline"],
)
)
|
StarcoderdataPython
|
4825421
|
from bzt.utils import SoapUIScriptConverter
from tests.unit import BZTestCase, RESOURCES_DIR, ROOT_LOGGER
class TestSoapUIConverter(BZTestCase):
def test_minimal(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
self.assertIn("execution", config)
self.assertEqual(4, len(config["execution"]))
execution = config["execution"][1]
self.assertEqual("TestSuite 1-index", execution.get("scenario"))
self.assertEqual(60, execution.get("hold-for"))
self.assertEqual(10, execution.get("concurrency"))
self.assertIn("scenarios", config)
self.assertIn("TestSuite 1-index", config["scenarios"])
scenario = config["scenarios"]["TestSuite 1-index"]
self.assertIn("requests", scenario)
self.assertEqual(3, len(scenario["requests"]))
self.assertIn("variables", scenario)
self.assertEqual("foo", scenario["variables"].get("something"))
self.assertEqual("2", scenario["variables"].get("something_else"))
self.assertEqual("json", scenario["variables"].get("route_part"))
first_req = scenario["requests"][0]
self.assertEqual("http://blazedemo.com/reserve.php", first_req["url"])
self.assertEqual("test index", first_req["label"])
self.assertIn("headers", first_req)
self.assertEqual(first_req["headers"].get("X-Custom-Header"), "Value")
self.assertIn("assert", first_req)
self.assertEqual(2, len(first_req["assert"]))
self.assertEqual("BlazeDemo", first_req["assert"][0]["contains"][0])
self.assertFalse(first_req["assert"][0]["not"])
self.assertFalse(first_req["assert"][0]["regexp"])
self.assertEqual("BlazeDemou", first_req["assert"][1]["contains"][0])
self.assertTrue(first_req["assert"][1]["not"])
self.assertTrue(first_req["assert"][1]["regexp"])
second_req = scenario["requests"][1]
self.assertEqual("http://example.com/body", second_req["url"])
self.assertEqual("posty", second_req["label"])
self.assertEqual("POST", second_req["method"])
self.assertIn("headers", second_req)
self.assertEqual(second_req["headers"].get("X-Header"), "X-Value")
self.assertEqual(second_req["headers"].get("X-Header-2"), "X-Value-2")
self.assertIn("body", second_req)
self.assertIn("answer", second_req["body"])
self.assertEqual('42', second_req["body"]["answer"])
self.assertIn("extract-xpath", second_req)
self.assertIn("something_else", second_req["extract-xpath"])
self.assertEqual("//head", second_req["extract-xpath"]["something_else"]["xpath"])
third_req = scenario["requests"][2]
self.assertEqual("http://localhost:9999/api/${route_part}", third_req["url"])
self.assertEqual("/api/json", third_req["label"])
self.assertIn("extract-jsonpath", third_req)
self.assertIn("something", third_req["extract-jsonpath"])
self.assertEqual("$.baz", third_req["extract-jsonpath"]["something"]["jsonpath"])
def test_find_test_case(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
scenarios = config["scenarios"]
self.assertEqual(len(scenarios), 4)
target_scenario = scenarios["TestSuite 1-index"]
found_name, found_scenario = obj.find_soapui_test_case("index", scenarios)
self.assertEqual(target_scenario, found_scenario)
def test_find_test_case_empty(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
self.sniff_log(obj.log)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
scenarios = config["scenarios"]
self.assertEqual(len(scenarios), 4)
target_scenario = scenarios["BlazeDemo LoadTest"]
found_name, found_scenario = obj.find_soapui_test_case(None, scenarios)
self.assertEqual(target_scenario, found_scenario)
def test_skip_if_no_requests(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
self.sniff_log(obj.log)
obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
self.assertIn("No requests extracted for scenario TestSuite 1-EmptyTestCase, skipping it",
self.log_recorder.warn_buff.getvalue())
def test_rest_service_name_as_base_address(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/youtube-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["requests"]), 5)
for request in scenario["requests"]:
self.assertTrue(request["url"].startswith("http://gdata.youtube.com/"))
def test_project_suite_case_level_properties(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/flickr-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["variables"]), 2)
self.assertIn("#Project#ApiKey", scenario["variables"])
self.assertIn("#TestCase#temp", scenario["variables"])
def test_rest_parameters(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/flickr-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["requests"]), 4)
first = scenario["requests"][0]
self.assertIn("body", first)
self.assertEqual(len(first["body"]), 4)
self.assertTrue(all(key in first["body"] for key in ["format", "method", "nojsoncallback", "api_key"]))
def test_soap_conversion(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/globalweather.xml")
self.assertEqual(len(config["scenarios"]), 4)
merged = config["scenarios"]["GWSOAPMerged-Test"]
split1 = config["scenarios"]["GWSOAPSplit-GetCities"]
split2 = config["scenarios"]["GWSOAPSplit-GetWeather"]
self.assertEqual(len(merged["requests"]), 2)
self.assertEqual(merged["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(merged["requests"][0]["method"], "POST")
self.assertEqual(merged["requests"][0]["headers"]["Content-Type"], "text/xml; charset=utf-8")
self.assertIn("body", merged["requests"][0])
self.assertEqual(merged["requests"][1]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(len(split1["requests"]), 1)
self.assertEqual(split1["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(len(split2["requests"]), 1)
self.assertEqual(split2["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
def test_rest_templated_params_interpolation(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/gmaps-sample.xml")
self.assertEqual(len(config["scenarios"]), 10)
scenario = config["scenarios"]["Directions API TestSuite-Simple Tests"]
for request in scenario["requests"]:
self.assertNotIn("{format}", request["url"])
self.assertEqual(scenario["requests"][0]["url"], "http://maps.googleapis.com/maps/api/directions/json")
self.assertEqual(scenario["requests"][1]["url"], "http://maps.googleapis.com/maps/api/directions/json")
self.assertEqual(scenario["requests"][2]["url"], "http://maps.googleapis.com/maps/api/directions/xml")
|
StarcoderdataPython
|
3218502
|
<reponame>sumanyu/ece457b<gh_stars>0
import os
import csv
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import MultinomialNB
from DenoisingAutoencoder import DenoisingAutoencoder
from StackedDenoisingAutoencoders import StackedDenoisingAutoencoders
custom_data_home = os.path.join(os.path.split(__file__)[0], "data")
mnist = fetch_mldata('MNIST original', data_home=custom_data_home)
X, y = mnist.data / 255., mnist.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
# Stacked AE test
hidden_layer_one = [200,300,400]
hidden_layer_two = [50 ,100,150,200]
num_epochs = [1,2,3,4,5,6,7]
num_noise = [0.1, 0.2, 0.3,0.4]
for epoch in num_epochs:
for h_one in hidden_layer_one:
for h_two in (x for x in hidden_layer_two if x < h_one):
for noise_level in num_noise:
print "EPOCHS: %d " % epoch
print "LAYER_1: %d " % h_one
print "LAYER_2: %d " % h_two
stacked_ae = StackedDenoisingAutoencoders(hidden_layers_sizes=[h_one, h_two], corruption_level=noise_level,verbose=True, training_epochs=epoch)
stacked_ae.fit(X_train)
X_train_latent = stacked_ae.transform_latent_representation(X_train)
X_test_latent = stacked_ae.transform_latent_representation(X_test)
clf = MultinomialNB()
# Fit the model
clf.fit(X_train_latent, y_train)
# Perform the predictions
y_predicted = clf.predict(X_test_latent)
from sklearn.metrics import accuracy_score
print "Accuracy = {} %".format(accuracy_score(y_test, y_predicted)*100)
from sklearn.metrics import classification_report
print "Classification Report \n {}".format(classification_report(y_test, y_predicted, labels=range(0,10)))
with open('stackedNoiseAEs.csv', 'a') as csvfile:
outputFile = csv.writer(csvfile, delimiter=',')
outputFile.writerow([epoch, h_one, h_two, noise_level, accuracy_score(y_test, y_predicted)*100])
|
StarcoderdataPython
|
31268
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import array
import numpy as np
from numcodecs.compat import buffer_tobytes
def test_buffer_tobytes():
bufs = [
b'adsdasdas',
bytes(20),
np.arange(100),
array.array('l', b'qwertyuiqwertyui')
]
for buf in bufs:
b = buffer_tobytes(buf)
assert isinstance(b, bytes)
|
StarcoderdataPython
|
103984
|
<filename>testscripts/RDKB/component/PAM/TS_PAM_CheckLogUploadStatus.py
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>8</version>
<name>TS_PAM_CheckLogUploadStatus</name>
<primitive_test_id/>
<primitive_test_name>pam_Setparams</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To check if the LogUpload Status retrieved using Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus is "In Progress" or "Complete" state and is the same as the contents of upload_log_status file after setting Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow to true.</synopsis>
<groups_id/>
<execution_time>2</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
<box_type>RPI</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_PAM_213</test_case_id>
<test_objective>To check if the LogUpload Status retrieved using Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus is "In Progress" or "Complete" state and is the same as the contents of upload_log_status file after setting Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow to true.</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband, RPI</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>ParamName : Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow
ParamValue : true
ParamType : bool
ParamName : Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus
</input_parameters>
<automation_approch>1. Load the pam and sysutil modules
2. Set Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow to true.
3. Sleep for 60s
4. Get Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus. The Status should be "In Progress" or "Complete".
5. Check if upload_log_status file is present under the tmp.
6 Compare the contents from the file with the TR181 status parameter. It should match.
7. Unload the modules</automation_approch>
<expected_output>The LogUpload Status retrieved using Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus should be the same as the contents of upload_log_status file after setting Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow to true</expected_output>
<priority>High</priority>
<test_stub_interface>pam</test_stub_interface>
<test_script>TS_PAM_CheckLogUploadStatus</test_script>
<skipped>No</skipped>
<release_version>M92</release_version>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from tdkbVariables import *;
import tdkutility;
from time import sleep;
#Test component to be tested
pamobj = tdklib.TDKScriptingLibrary("pam","1");
sysobj = tdklib.TDKScriptingLibrary("sysutil","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
pamobj.configureTestCase(ip,port,'TS_PAM_CheckLogUploadStatus');
sysobj.configureTestCase(ip,port,'TS_PAM_CheckLogUploadStatus');
#Get the result of connection with test component and DUT
pamloadmodulestatus=pamobj.getLoadModuleResult();
sysloadmodulestatus=sysobj.getLoadModuleResult();
if "SUCCESS" in sysloadmodulestatus.upper() and "SUCCESS" in pamloadmodulestatus.upper():
#Set the result status of execution
pamobj.setLoadModuleStatus("SUCCESS")
sysobj.setLoadModuleStatus("SUCCESS")
expectedresult="SUCCESS";
#Set Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow to true
value = "true";
step = 1;
tdkTestObj = pamobj.createTestStep('pam_Setparams');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow");
tdkTestObj.addParameter("ParamValue",value);
tdkTestObj.addParameter("Type","bool");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "\nTEST STEP %d: Set Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow to %s" %(step, value);
print "EXPECTED RESULT %d : Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow should be set to %s successfully" %(step, value);
if expectedresult in actualresult and details != "":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d : Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow is set to %s successfully; Details : %s" %(step, value, details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Check the Log Upload status with Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus
sleep(60);
step = step + 1;
tdkTestObj = pamobj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "\nTEST STEP %d : Get the value of Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus" %step;
print "EXPECTED RESULT %d : Should get the value of Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus" %step;
if expectedresult in actualresult and details != "":
status = details.strip().replace("\\n", "");
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d : Status of Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus is : %s" %(step,status);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if "In Progress" in status or "Complete" in status:
print "The Log Upload Status has been moved to : %s" %status;
tdkTestObj.setResultStatus("SUCCESS");
#Check if upload_log_status file is found under tmp
step = step + 1;
tdkTestObj = sysobj.createTestStep('ExecuteCmd');
cmd = "[ -f /tmp/upload_log_status ] && echo \"File exist\" || echo \"File does not exist\"";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
print "\nTEST STEP %d: Check if upload_log_status is present or not under tmp" %step;
print "EXPECTED RESULT %d: upload_log_status should be present under tmp" %step;
if details == "File exist":
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: upload_log_status is present under tmp" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Check if contents to the file is same as the status including the timestamp
step = step + 1;
tdkTestObj = sysobj.createTestStep('ExecuteCmd');
cmd = "cat /tmp/upload_log_status";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
print "\nTEST STEP %d: Get the contents of upload_log_status file" %step;
print "EXPECTED RESULT %d: Should get non-empty content from upload_log_status file" %step;
if expectedresult in actualresult and details != "":
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: upload_log_status file contains : %s" %(step, details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
step = step + 1;
print "\nTEST STEP %d: Cross check the status retrieved from TR181 parameter and from the upload_log_status file" %step;
print "EXPECTED RESULT %d: The status retrieved from TR181 parameter and from the upload_log_status file should be same" %step;
if status == details:
print "ACTUAL RESULT %d: The status of LogUpload is same as the content in upload_log_status file" %step;
tdkTestObj.setResultStatus("SUCCESS");
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
print "ACTUAL RESULT %d: The status of LogUpload is not same as the content in upload_log_status file" %step;
tdkTestObj.setResultStatus("FAILURE");
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: upload_log_status file contains : %s" %(step, details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: upload_log_status is not present under tmp" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else :
print "Log Upload Status : %s" %status;
tdkTestObj.setResultStatus("FAILURE");
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d : Status of Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMLogsUploadStatus is not retrieved" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d : Device.DeviceInfo.X_RDKCENTRAL-COM_xOpsDeviceMgmt.Logging.xOpsDMUploadLogsNow is not set to %s successfully; Details : %s" %(step, value, details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
pamobj.unloadModule("pam");
sysobj.unloadModule("sysutil");
else:
print "Failed to load module";
pamobj.setLoadModuleStatus("FAILURE");
sysobj.setLoadModuleStatus("FAILURE");
|
StarcoderdataPython
|
3277528
|
<filename>gplay_apk_download_multidex/apk_multidex.py
#!/usr/bin/env python3
import subprocess
import os
APKANALYZER = "/Users/amitseal/Android/Sdk/tools/bin/apkanalyzer"
APKANALYZER_COMMAND = "{} dex list {}"
def is_multidex(apk_path: str):
global APKANALYZER
global APKANALYZER_COMMAND
# command = shlex.split(APKANALYZER_COMMAND.format(APKANALYZER, apk_path))
command = APKANALYZER_COMMAND.format(APKANALYZER, apk_path)
print(command)
output = subprocess.getoutput(command)
lines = output.splitlines()
count_dex = 0
for line in lines:
if line.endswith(".dex"):
count_dex += 1
if count_dex >= 2:
return True
return False
def count_multidex_in_dir(dir_path: str):
apk_count = 0
multidex_count = 0
for entry in os.scandir(dir_path):
if entry.is_file():
file_name: str = entry.name
if file_name.endswith(".apk"):
apk_count += 1
if is_multidex(dir_path+os.sep+file_name):
multidex_count += 1
return (multidex_count, apk_count)
if __name__ == "__main__":
print(count_multidex_in_dir("/Users/amitseal/workspaces/apks"))
|
StarcoderdataPython
|
112294
|
<reponame>arieltrevisan/python-3-from-scratch-practices
import unittest as ut
"""
https://docs.python.org/3/library/unittest.html#test-discovery
"""
class TestClassOne(ut.TestCase):
@classmethod
def setUpClass(cls):
# print("setupClass")
pass
@classmethod
def tearDownClass(cls):
# print("tearDownClass")
pass
# setup of each method
def setUp(self):
# print("setUp")
pass
# teardown of each method
def tearDown(self):
# print("tearDown")
pass
def test_bool(self):
value = True
self.assertTrue(value, "Not True")
def test_equals(self):
value_1 = 1
value_2 = 1
self.assertEqual(value_1, value_2)
def test_raise(self):
with self.assertRaises(ZeroDivisionError):
1 / 0
class TestClassTwo(ut.TestCase):
def test_1(self):
self.assertEqual(1, 1)
def test_2(self):
list_1 = [1, 2, 3]
list_2 = [2, 3, 1]
self.assertCountEqual(list_1, list_2)
if __name__ == '__main__':
ut.main(verbosity=2)
|
StarcoderdataPython
|
1642557
|
import numpy as np
class network():
"""
A network class for the neural network which include the following methods:
- feedforward(), for using the network on a given input
- SGD(), for apply Stochastic gradient descent (e.g. training the
network)
- BP(), for apply backpropergation, this function is intented to be
called using SGD()
- cost(), for calculating the cost of a given input in regards to the
desired output
- eval(), for evaluation the performance of the network, while
training
"""
def __init__(self, l_sizes: list):
"""
Where mu and sigma to adjust the initial starting parameters for the
neural network (w, b is weight and bias, respectively)
Note l_sizes (layer sizes) is given as a list of layers sizes.
Note that the first layers does not contain weights and biases.
Note that there is good argument for initializing the biases at zero
following the Stanford CS231N Notes:
http://cs231n.github.io/neural-networks-2/
(not mentioned in the assigment, its effects is not (yet) explored)
"""
self.n_layers = len(l_sizes)
self.layer_sizes = l_sizes
# Setting random biases using by default N(0, 1) (intercepts)
self.biases = [np.sqrt(b_sigma)*np.random.randn(x, 1)
for x in l_sizes[1:]]
# Setting random weights using by default N(0, 1) (beta values)
self.weights = [np.sqrt(w_sigma)*np.random.randn(y, x) + w_mu
for x, y in np.array((l_sizes[:-1], l_sizes[1:])).T]
def feedforward(self, x, save_var = False):
"""
Returns prediction of the network given the input, x
Assumes n_input is a np.array of shape (dimension) (n,) or (n, 1),
where n is equal to size of the first layers (e.g. l_sizes[0])
"""
# Used debugging and for backpropergations (BP)
if save_var == True:
xs = x
l_activation = [x] # a list of all the layer activation (with sigmoid)
x_list = [] # list of vectors, one for each layer (without sigmoid)
# Note that the calc is split up as to save variables underway
for l in range(self.n_layers-1):
x = np.dot(self.weights[l], xs) + self.biases[l]
x_list.append(x)
xs = sigmoid(x)
l_activation.append(xs)
return x_list, xs, l_activation
# Tranforming input in case of dim (n,), it does not influence (n, 1)
x = x.reshape(-1, 1)
# Note this could be optimized using matrix multiplication
# -1 since x is the input layer
for l in range(self.n_layers-1):
x = sigmoid(np.dot(self.weights[l], x) + self.biases[l])
return x
def SGD(self, train_data, epochs, batch_size, learning_rate,
test_data=None, save_performance = False):
"""
Stochastic Gradient Descent (SGD)
Loops through the number of epochs, splitting to training data into
evenly sized chunk of size n, where n is the batch size. Then loops
over each of these and applying Backpropergation (BP).
Lastly if a test data is given it evaluates the network performance on
the testdata using the eval() function
"""
# Copying the data in as to not reorder the original data,
# keeping the same name for readability.
train_data = train_data[:]
# Save a list for performance to be saved in
if save_performance:
if not test_data:
raise Exception("Performance can't be saved if no test data is given")
self.performance = []
for epoch in range(epochs):
print(f"\n Epoch: {(epoch+1)}/{epochs}", end="")
random.shuffle(train_data) # Using a Fisher Yates Shuffle
batches = chunks(train_data, batch_size)
# Note that instead of looping through each batch, you could have
# a more effective approach would be to consider each batch as a
# vector in a matrix, and from here simply use matrix
# multiplication
for batch in batches:
# Apply backpergation using gradient descent for each batch
self.BP(batch, learning_rate)
if test_data:
n_correct, n = self.eval(test_data)
print(f", Obtained Accuracy: {np.round(n_correct/n, 2)}" +
f" \t ({n_correct}/{n})", end="")
if save_performance:
n_correct_train, n_t = self.eval(train_data, train_data = True)
self.performance.append((n_correct/n, n_correct_train/n_t))
print("\n Process complete")
def BP(self, batch, learning_rate):
"""
Backpropergation (BP)
loops trough each training sample in the batch and applies gradient
descent. Lastly it averages the gradient vector and updates the wieghts
and biases of the network.
Where a batch is a tuple of length 2 on the form (pixels, answer).
Where pixels is a list of pixel activation (zero is black) and answer
is a boolean list og length 10, indicating the number of the digit.
(assumes the MNIST data)
"""
n_biases = [np.zeros(bias.shape) for bias in self.biases]
n_weights = [np.zeros(weight.shape) for weight in self.weights]
# looping over each batch, applying gradient descent
for pixels, answer in batch:
### start BP
dn_biases = [np.zeros(b.shape) for b in self.biases]
dn_weights = [np.zeros(w.shape) for w in self.weights]
# feedforward - where we save relevant variables
x_list, activation, activations = self.feedforward(pixels, save_var=True)
# update the weight and biases going backward in the N.N.
delta = self.cost(activations[-1],
answer) * sigmoid(x_list[-1], derivative=True)
dn_biases[-1] = delta
dn_weights[-1] = np.dot(delta, activations[-2].transpose())
# Note that the following loop is loop backwards
for l in range(2, self.n_layers):
x = x_list[-l]
s_deriv = sigmoid(x, derivative=True)
delta = s_deriv * np.dot(self.weights[-l+1].T, delta)
# Saving dn's
dn_biases[-l] = delta
dn_weights[-l] = np.dot(delta, activations[-l-1].T)
for l in range(self.n_layers-1):
n_biases[l] += dn_biases[l]
n_weights[l] += dn_weights[l]
# update weight and biases - averaged and weighted by the learning rate
for l in range(self.n_layers-1):
self.weights[l] = self.weights[l] - (learning_rate / len(batch)) * n_weights[l]
self.biases[l] = self.biases[l] - (learning_rate / len(batch)) * n_biases[l]
def cost(self, output, actual, derivative = True):
"""
A cost function, which returns the difference between the output of the
neural network (e.g. its prediction) and the actual value
Note that this is (in part, se note of the end) a partial derivative of
the cost function given by (in laTeX):
\frac { 1 }{ 2 } \sum _{ n }{ \frac { |f(x)-a|^{ 2 } }{ 2 } }
where n is the number of observations, f(x) is the output of the neural
network and a is the actual result.
"""
# In practice only the derived function is used, consequently the
# original function serves only a conceptual purpose
if derivative == False:
return 1/2 * (output - actual)*(output - actual)
return(output - actual)
def eval(self, data, train_data = False):
"""
Evaluates the network on a given test data, returning a tuple with
the number of correct predictions and the total number of predicitons.
assumes the MNIST database or data with similar structure
"""
# creates a 2 by n matrix, where n is the length of the test_data
# where the second column indicates the right answer
# Note that there is a restructering for the train_data due to the
# different structures of train and test_data
if train_data:
predictions = np.array([(np.argmax(self.feedforward(pixels)), np.argmax(answer))
for pixels, answer in data])
else:
predictions = np.array([(np.argmax(self.feedforward(pixels)), answer)
for pixels, answer in data])
n_correct = sum(predictions[:, 0] == predictions[:, 1])
return (n_correct, len(predictions))
|
StarcoderdataPython
|
3200640
|
<reponame>LBJ-Wade/bilby
import argparse
import logging
import os
import subprocess
from .log import logger
def set_up_command_line_arguments():
""" Sets up command line arguments that can be used to modify how scripts are run.
Returns
=======
command_line_args, command_line_parser: tuple
The command_line_args is a Namespace of the command line arguments while
the command_line_parser can be given to a new `argparse.ArgumentParser`
as a parent object from which to inherit.
Notes
=====
The command line arguments are passed initially at runtime, but this parser
does not have a `--help` option (i.e., the command line options are
available for any script which includes `import bilby`, but no help command
is available. This is done to avoid conflicts with child argparse routines
(see the example below).
Examples
========
In the following example we demonstrate how to setup a custom command line for a
project which uses bilby.
.. code-block:: python
# Here we import bilby, which initialises and parses the default command-line args
>>> import bilby
# The command line arguments can then be accessed via
>>> bilby.core.utils.command_line_args
Namespace(clean=False, log_level=20, quite=False)
# Next, we import argparse and define a new argparse object
>>> import argparse
>>> parser = argparse.ArgumentParser(parents=[bilby.core.utils.command_line_parser])
>>> parser.add_argument('--argument', type=int, default=1)
>>> args = parser.parse_args()
Namespace(clean=False, log_level=20, quite=False, argument=1)
Placing these lines into a script, you'll be able to pass in the usual bilby default
arguments, in addition to `--argument`. To see a list of all options, call the script
with `--help`.
"""
try:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False, allow_abbrev=False)
except TypeError:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False)
parser.add_argument("-v", "--verbose", action="store_true",
help=("Increase output verbosity [logging.DEBUG]." +
" Overridden by script level settings"))
parser.add_argument("-q", "--quiet", action="store_true",
help=("Decrease output verbosity [logging.WARNING]." +
" Overridden by script level settings"))
parser.add_argument("-c", "--clean", action="store_true",
help="Force clean data, never use cached data")
parser.add_argument("-u", "--use-cached", action="store_true",
help="Force cached data and do not check its validity")
parser.add_argument("--sampler-help", nargs='?', default=False,
const='None', help="Print help for given sampler")
parser.add_argument("--bilby-test-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
parser.add_argument("--bilby-zero-likelihood-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
args, unknown_args = parser.parse_known_args()
if args.quiet:
args.log_level = logging.WARNING
elif args.verbose:
args.log_level = logging.DEBUG
else:
args.log_level = logging.INFO
return args, parser
def run_commandline(cl, log_level=20, raise_error=True, return_output=True):
"""Run a string cmd as a subprocess, check for errors and return output.
Parameters
==========
cl: str
Command to run
log_level: int
See https://docs.python.org/2/library/logging.html#logging-levels,
default is '20' (INFO)
"""
logger.log(log_level, 'Now executing: ' + cl)
if return_output:
try:
out = subprocess.check_output(
cl, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as e:
logger.log(log_level, 'Execution failed: {}'.format(e.output))
if raise_error:
raise
else:
out = 0
os.system('\n')
return(out)
else:
process = subprocess.Popen(cl, shell=True)
process.communicate()
|
StarcoderdataPython
|
3361093
|
# This function is not intended to be invoked directly. Instead it will be
# triggered by an HTTP starter function.
# Before running this sample, please:
# - create a Durable activity function (default name is "Hello")
# - create a Durable HTTP starter function
# - add azure-functions-durable to requirements.txt
# - run pip install -r requirements.txt
import logging
import json
import os
import azure.functions as func
import azure.durable_functions as df
from boxsdk import OAuth2, Client
from boxsdk import JWTAuth
from _init_ import ActivateUser, GetAllItems, RemoveItems, ListTrashedItems, PurgeTrashedItems, RollOutUserAccount
def orchestrator_function(context: df.DurableOrchestrationContext):
args = migrationParams(context.get_input())
yield context.call_activity('ActivateUserAccount', args.userId)
items = yield itemParams(context.call_activity('GetAllItems', args))
yield [contex.call_activity('RemoveItems', item) for item in items ]
trashedItems = yield itemParams(context.call_activity('ListAllTrashedItems', args))
yield [context.call_activity('PurgeTrashedItems', item) for item in items]
yield context.call_activity('RollOutUserAccount', args.userId)
main = df.Orchestrator.create(orchestrator_function)
def createBoxClient():
configJson = os.getenv('BoxConfigJson')
auth = JWTAuth.from_settings_file(configJson)
return Client(auth)
def createBoxUserClient(userId):
user = client.user(user_id=userId)
configJson = os.getenv('BoxConfigJson')
auth = JWTAuth.from_settings_file(configJson)
auth.authenticate_user()
user_client = Client(auth)
def getCurrentUser():
boxClient = createBoxClient()
current_user = boxClient.user().get()
|
StarcoderdataPython
|
1670938
|
<gh_stars>0
import RPi.GPIO as GPIO
import time
from libdw import pyrebase
#Database Set-Up
projectid = "cleanbean-9e2f5"
dburl = "https://" + projectid + ".firebaseio.com"
authdomain = projectid + ".firebaseapp.com"
apikey = "<KEY>"
email = "<EMAIL>"
password = "<PASSWORD>"
config = {
"apiKey": apikey,
"authDomain": authdomain,
"databaseURL": dburl,
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
user = auth.sign_in_with_email_and_password(email, password)
db = firebase.database()
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER = 6
GPIO_ECHO = 5
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
binHeight = 18.6
def trashHeight(distance):
return 1-(binHeight-distance)/binHeight
def distance():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
if __name__ == '__main__':
try:
distold = binHeight
distance_list = []
trash_height_list = []
while True:
dist = distance()
distance_list.append(dist)
print ("Measured Distance = %.1f cm" % dist)
time.sleep(1)
print(trashHeight(dist))
trash_height = trashHeight(dist)
db.child("Bin_1").set(trash_height, user['idToken'])
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.